diff --git a/.github/workflows/CI.yml b/.github/workflows/CI.yml
new file mode 100644
index 0000000000..0535ccd7dc
--- /dev/null
+++ b/.github/workflows/CI.yml
@@ -0,0 +1,33 @@
+name: CI
+
+on: [push, pull_request]
+
+jobs:
+ java-8:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v2
+ - name: Set up JDK 1.8
+ uses: actions/setup-java@v1
+ with:
+ java-version: 1.8
+ - name: Test
+ run: |
+ cd h2
+ echo $JAVA_OPTS
+ export JAVA_OPTS=-Xmx512m
+ ./build.sh jar testCI
+ java-11:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v2
+ - name: Set up JDK 11
+ uses: actions/setup-java@v1
+ with:
+ java-version: 11
+ - name: Test
+ run: |
+ cd h2
+ echo $JAVA_OPTS
+ export JAVA_OPTS=-Xmx512m
+ ./build.sh jar testCI
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000000..4e63e5a68d
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,21 @@
+/bin/
+/.settings/
+/.classpath
+/.project
+/dist/
+/build/
+/nbproject/
+/data/
+/error.lock
+/error.txt
+/test.out.txt
+.DS_Store
+/.idea/
+*.iml
+*.ipr
+*.iws
+.checkstyle
+/temp/
+/h2web/
+.pmd
+docs/html/testOutput.html
diff --git a/.lift.toml b/.lift.toml
new file mode 100644
index 0000000000..3c7beccf52
--- /dev/null
+++ b/.lift.toml
@@ -0,0 +1,8 @@
+# Config file for SonaType Lift analysis tool
+#
+# config reference here: https://help.sonatype.com/lift/configuration-reference
+#
+
+# Tell sonatype where our pom file lives, so it can build it again
+#
+build = "maven -f h2/pom.xml compile"
\ No newline at end of file
diff --git a/LICENSE.txt b/LICENSE.txt
new file mode 100644
index 0000000000..eed8e4b1a1
--- /dev/null
+++ b/LICENSE.txt
@@ -0,0 +1,552 @@
+H2 is dual licensed and available under the MPL 2.0 (Mozilla Public License
+Version 2.0) or under the EPL 1.0 (Eclipse Public License).
+
+-------------------------------------------------------------------------------
+
+Mozilla Public License, version 2.0
+
+1. Definitions
+
+ 1.1. “Contributor”
+ means each individual or legal entity that creates, contributes to the
+ creation of, or owns Covered Software.
+
+ 1.2. “Contributor Version”
+ means the combination of the Contributions of others (if any) used by a
+ Contributor and that particular Contributor’s Contribution.
+
+ 1.3. “Contribution”
+ means Covered Software of a particular Contributor.
+
+ 1.4. “Covered Software”
+ means Source Code Form to which the initial Contributor has attached the
+ notice in Exhibit A, the Executable Form of such Source Code Form,
+ and Modifications of such Source Code Form, in each case
+ including portions thereof.
+
+ 1.5. “Incompatible With Secondary Licenses”
+ means
+
+ a. that the initial Contributor has attached the notice described
+ in Exhibit B to the Covered Software; or
+
+ b. that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the terms
+ of a Secondary License.
+
+ 1.6. “Executable Form”
+ means any form of the work other than Source Code Form.
+
+ 1.7. “Larger Work”
+ means a work that combines Covered Software with other material,
+ in a separate file or files, that is not Covered Software.
+
+ 1.8. “License”
+ means this document.
+
+ 1.9. “Licensable”
+ means having the right to grant, to the maximum extent possible,
+ whether at the time of the initial grant or subsequently,
+ any and all of the rights conveyed by this License.
+
+ 1.10. “Modifications”
+ means any of the following:
+
+ a. any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered Software; or
+
+ b. any new file in Source Code Form that contains any Covered Software.
+
+ 1.11. “Patent Claims” of a Contributor
+ means any patent claim(s), including without limitation, method, process,
+ and apparatus claims, in any patent Licensable by such Contributor that
+ would be infringed, but for the grant of the License, by the making,
+ using, selling, offering for sale, having made, import, or transfer of
+ either its Contributions or its Contributor Version.
+
+ 1.12. “Secondary License”
+ means either the GNU General Public License, Version 2.0, the
+ GNU Lesser General Public License, Version 2.1, the GNU Affero General
+ Public License, Version 3.0, or any later versions of those licenses.
+
+ 1.13. “Source Code Form”
+ means the form of the work preferred for making modifications.
+
+ 1.14. “You” (or “Your”)
+ means an individual or a legal entity exercising rights under this License.
+ For legal entities, “You” includes any entity that controls,
+ is controlled by, or is under common control with You. For purposes of
+ this definition, “control” means (a) the power, direct or indirect,
+ to cause the direction or management of such entity, whether by contract
+ or otherwise, or (b) ownership of more than fifty percent (50%) of the
+ outstanding shares or beneficial ownership of such entity.
+
+2. License Grants and Conditions
+
+ 2.1. Grants
+ Each Contributor hereby grants You a world-wide, royalty-free,
+ non-exclusive license:
+
+ a. under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications,
+ or as part of a Larger Work; and
+
+ b. under Patent Claims of such Contributor to make, use, sell,
+ offer for sale, have made, import, and otherwise transfer either
+ its Contributions or its Contributor Version.
+
+ 2.2. Effective Date
+ The licenses granted in Section 2.1 with respect to any Contribution
+ become effective for each Contribution on the date the Contributor
+ first distributes such Contribution.
+
+ 2.3. Limitations on Grant Scope
+ The licenses granted in this Section 2 are the only rights granted
+ under this License. No additional rights or licenses will be implied
+ from the distribution or licensing of Covered Software under this License.
+ Notwithstanding Section 2.1(b) above, no patent license is granted
+ by a Contributor:
+
+ a. for any code that a Contributor has removed from
+ Covered Software; or
+
+ b. for infringements caused by: (i) Your and any other third party’s
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its
+ Contributor Version); or
+
+ c. under Patent Claims infringed by Covered Software in the
+ absence of its Contributions.
+
+ This License does not grant any rights in the trademarks, service marks,
+ or logos of any Contributor (except as may be necessary to comply with
+ the notice requirements in Section 3.4).
+
+ 2.4. Subsequent Licenses
+ No Contributor makes additional grants as a result of Your choice to
+ distribute the Covered Software under a subsequent version of this
+ License (see Section 10.2) or under the terms of a Secondary License
+ (if permitted under the terms of Section 3.3).
+
+ 2.5. Representation
+ Each Contributor represents that the Contributor believes its
+ Contributions are its original creation(s) or it has sufficient rights
+ to grant the rights to its Contributions conveyed by this License.
+
+ 2.6. Fair Use
+ This License is not intended to limit any rights You have under
+ applicable copyright doctrines of fair use, fair dealing,
+ or other equivalents.
+
+ 2.7. Conditions
+ Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the
+ licenses granted in Section 2.1.
+
+3. Responsibilities
+
+ 3.1. Distribution of Source Form
+ All distribution of Covered Software in Source Code Form, including
+ any Modifications that You create or to which You contribute, must be
+ under the terms of this License. You must inform recipients that the
+ Source Code Form of the Covered Software is governed by the terms
+ of this License, and how they can obtain a copy of this License.
+ You may not attempt to alter or restrict the recipients’ rights
+ in the Source Code Form.
+
+ 3.2. Distribution of Executable Form
+ If You distribute Covered Software in Executable Form then:
+
+ a. such Covered Software must also be made available in Source Code
+ Form, as described in Section 3.1, and You must inform recipients of
+ the Executable Form how they can obtain a copy of such Source Code
+ Form by reasonable means in a timely manner, at a charge no more than
+ the cost of distribution to the recipient; and
+
+ b. You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter
+ the recipients’ rights in the Source Code Form under this License.
+
+ 3.3. Distribution of a Larger Work
+ You may create and distribute a Larger Work under terms of Your choice,
+ provided that You also comply with the requirements of this License for
+ the Covered Software. If the Larger Work is a combination of
+ Covered Software with a work governed by one or more Secondary Licenses,
+ and the Covered Software is not Incompatible With Secondary Licenses,
+ this License permits You to additionally distribute such Covered Software
+ under the terms of such Secondary License(s), so that the recipient of
+ the Larger Work may, at their option, further distribute the
+ Covered Software under the terms of either this License or such
+ Secondary License(s).
+
+ 3.4. Notices
+ You may not remove or alter the substance of any license notices
+ (including copyright notices, patent notices, disclaimers of warranty,
+ or limitations of liability) contained within the Source Code Form of
+ the Covered Software, except that You may alter any license notices to
+ the extent required to remedy known factual inaccuracies.
+
+ 3.5. Application of Additional Terms
+ You may choose to offer, and to charge a fee for, warranty, support,
+ indemnity or liability obligations to one or more recipients of
+ Covered Software. However, You may do so only on Your own behalf,
+ and not on behalf of any Contributor. You must make it absolutely clear
+ that any such warranty, support, indemnity, or liability obligation is
+ offered by You alone, and You hereby agree to indemnify every Contributor
+ for any liability incurred by such Contributor as a result of warranty,
+ support, indemnity or liability terms You offer. You may include
+ additional disclaimers of warranty and limitations of liability
+ specific to any jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+
+If it is impossible for You to comply with any of the terms of this License
+with respect to some or all of the Covered Software due to statute,
+judicial order, or regulation then You must: (a) comply with the terms of
+this License to the maximum extent possible; and (b) describe the limitations
+and the code they affect. Such description must be placed in a text file
+included with all distributions of the Covered Software under this License.
+Except to the extent prohibited by statute or regulation, such description
+must be sufficiently detailed for a recipient of ordinary skill
+to be able to understand it.
+
+5. Termination
+
+ 5.1. The rights granted under this License will terminate automatically
+ if You fail to comply with any of its terms. However, if You become
+ compliant, then the rights granted under this License from a particular
+ Contributor are reinstated (a) provisionally, unless and until such
+ Contributor explicitly and finally terminates Your grants, and (b) on an
+ ongoing basis, if such Contributor fails to notify You of the
+ non-compliance by some reasonable means prior to 60 days after You have
+ come back into compliance. Moreover, Your grants from a particular
+ Contributor are reinstated on an ongoing basis if such Contributor
+ notifies You of the non-compliance by some reasonable means,
+ this is the first time You have received notice of non-compliance with
+ this License from such Contributor, and You become compliant prior to
+ 30 days after Your receipt of the notice.
+
+ 5.2. If You initiate litigation against any entity by asserting a patent
+ infringement claim (excluding declaratory judgment actions,
+ counter-claims, and cross-claims) alleging that a Contributor Version
+ directly or indirectly infringes any patent, then the rights granted
+ to You by any and all Contributors for the Covered Software under
+ Section 2.1 of this License shall terminate.
+
+ 5.3. In the event of termination under Sections 5.1 or 5.2 above, all
+ end user license agreements (excluding distributors and resellers) which
+ have been validly granted by You or Your distributors under this License
+ prior to termination shall survive termination.
+
+6. Disclaimer of Warranty
+
+Covered Software is provided under this License on an “as is” basis, without
+warranty of any kind, either expressed, implied, or statutory, including,
+without limitation, warranties that the Covered Software is free of defects,
+merchantable, fit for a particular purpose or non-infringing. The entire risk
+as to the quality and performance of the Covered Software is with You.
+Should any Covered Software prove defective in any respect, You
+(not any Contributor) assume the cost of any necessary servicing, repair,
+or correction. This disclaimer of warranty constitutes an essential part of
+this License. No use of any Covered Software is authorized under this
+License except under this disclaimer.
+
+7. Limitation of Liability
+
+Under no circumstances and under no legal theory, whether tort
+(including negligence), contract, or otherwise, shall any Contributor, or
+anyone who distributes Covered Software as permitted above, be liable to
+You for any direct, indirect, special, incidental, or consequential damages
+of any character including, without limitation, damages for lost profits,
+loss of goodwill, work stoppage, computer failure or malfunction, or any and
+all other commercial damages or losses, even if such party shall have been
+informed of the possibility of such damages. This limitation of liability
+shall not apply to liability for death or personal injury resulting from
+such party’s negligence to the extent applicable law prohibits such
+limitation. Some jurisdictions do not allow the exclusion or limitation of
+incidental or consequential damages, so this exclusion and limitation may
+not apply to You.
+
+8. Litigation
+
+Any litigation relating to this License may be brought only in the courts of
+a jurisdiction where the defendant maintains its principal place of business
+and such litigation shall be governed by laws of that jurisdiction, without
+reference to its conflict-of-law provisions. Nothing in this Section shall
+prevent a party’s ability to bring cross-claims or counter-claims.
+
+9. Miscellaneous
+
+This License represents the complete agreement concerning the subject matter
+hereof. If any provision of this License is held to be unenforceable,
+such provision shall be reformed only to the extent necessary to make it
+enforceable. Any law or regulation which provides that the language of a
+contract shall be construed against the drafter shall not be used to construe
+this License against a Contributor.
+
+10. Versions of the License
+
+ 10.1. New Versions
+ Mozilla Foundation is the license steward. Except as provided in
+ Section 10.3, no one other than the license steward has the right to
+ modify or publish new versions of this License. Each version will be
+ given a distinguishing version number.
+
+ 10.2. Effect of New Versions
+ You may distribute the Covered Software under the terms of the version
+ of the License under which You originally received the Covered Software,
+ or under the terms of any subsequent version published
+ by the license steward.
+
+ 10.3. Modified Versions
+ If you create software not governed by this License, and you want to
+ create a new license for such software, you may create and use a modified
+ version of this License if you rename the license and remove any
+ references to the name of the license steward (except to note that such
+ modified license differs from this License).
+
+ 10.4. Distributing Source Code Form that is
+ Incompatible With Secondary Licenses
+ If You choose to distribute Source Code Form that is
+ Incompatible With Secondary Licenses under the terms of this version of
+ the License, the notice described in Exhibit B of this
+ License must be attached.
+
+Exhibit A - Source Code Form License Notice
+
+ This Source Code Form is subject to the terms of the
+ Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed
+ with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular file,
+then You may include the notice in a location (such as a LICENSE file in a
+relevant directory) where a recipient would be likely to
+look for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - “Incompatible With Secondary Licenses” Notice
+
+ This Source Code Form is “Incompatible With Secondary Licenses”,
+ as defined by the Mozilla Public License, v. 2.0.
+
+-------------------------------------------------------------------------------
+
+Eclipse Public License, Version 1.0 (EPL-1.0)
+
+THE ACCOMPANYING PROGRAM IS PROVIDED UNDER THE TERMS OF THIS ECLIPSE PUBLIC
+LICENSE ("AGREEMENT"). ANY USE, REPRODUCTION OR DISTRIBUTION OF THE PROGRAM
+CONSTITUTES RECIPIENT'S ACCEPTANCE OF THIS AGREEMENT.
+
+1. DEFINITIONS
+
+"Contribution" means:
+
+ a) in the case of the initial Contributor, the initial code and
+ documentation distributed under this Agreement, and
+
+ b) in the case of each subsequent Contributor:
+ i) changes to the Program, and
+ ii) additions to the Program;
+
+where such changes and/or additions to the Program originate from and are
+distributed by that particular Contributor. A Contribution 'originates'
+from a Contributor if it was added to the Program by such Contributor itself
+or anyone acting on such Contributor's behalf. Contributions do not include
+additions to the Program which: (i) are separate modules of software
+distributed in conjunction with the Program under their own license agreement,
+and (ii) are not derivative works of the Program.
+
+"Contributor" means any person or entity that distributes the Program.
+
+"Licensed Patents " mean patent claims licensable by a Contributor which are
+necessarily infringed by the use or sale of its Contribution alone or
+when combined with the Program.
+
+"Program" means the Contributions distributed in accordance with
+this Agreement.
+
+"Recipient" means anyone who receives the Program under this Agreement,
+including all Contributors.
+
+2. GRANT OF RIGHTS
+
+ a) Subject to the terms of this Agreement, each Contributor hereby grants
+ Recipient a non-exclusive, worldwide, royalty-free copyright license to
+ reproduce, prepare derivative works of, publicly display, publicly
+ perform, distribute and sublicense the Contribution of such
+ Contributor, if any, and such derivative works,
+ in source code and object code form.
+
+ b) Subject to the terms of this Agreement, each Contributor hereby grants
+ Recipient a non-exclusive, worldwide, royalty-free patent license under
+ Licensed Patents to make, use, sell, offer to sell, import and
+ otherwise transfer the Contribution of such Contributor, if any,
+ in source code and object code form. This patent license shall apply
+ to the combination of the Contribution and the Program if, at the time
+ the Contribution is added by the Contributor, such addition of the
+ Contribution causes such combination to be covered by the
+ Licensed Patents. The patent license shall not apply to any other
+ combinations which include the Contribution.
+ No hardware per se is licensed hereunder.
+
+ c) Recipient understands that although each Contributor grants the
+ licenses to its Contributions set forth herein, no assurances are
+ provided by any Contributor that the Program does not infringe the
+ patent or other intellectual property rights of any other entity.
+ Each Contributor disclaims any liability to Recipient for claims
+ brought by any other entity based on infringement of intellectual
+ property rights or otherwise. As a condition to exercising the
+ rights and licenses granted hereunder, each Recipient hereby assumes
+ sole responsibility to secure any other intellectual property rights
+ needed, if any. For example, if a third party patent license is
+ required to allow Recipient to distribute the Program, it is
+ Recipient's responsibility to acquire that license
+ before distributing the Program.
+
+ d) Each Contributor represents that to its knowledge it has sufficient
+ copyright rights in its Contribution, if any, to grant the copyright
+ license set forth in this Agreement.
+
+3. REQUIREMENTS
+
+A Contributor may choose to distribute the Program in object code form under
+its own license agreement, provided that:
+
+ a) it complies with the terms and conditions of this Agreement; and
+
+ b) its license agreement:
+
+ i) effectively disclaims on behalf of all Contributors all warranties
+ and conditions, express and implied, including warranties or
+ conditions of title and non-infringement, and implied warranties or
+ conditions of merchantability and fitness for a particular purpose;
+
+ ii) effectively excludes on behalf of all Contributors all liability
+ for damages, including direct, indirect, special, incidental and
+ consequential damages, such as lost profits;
+
+ iii) states that any provisions which differ from this Agreement are
+ offered by that Contributor alone and not by any other party; and
+
+ iv) states that source code for the Program is available from such
+ Contributor, and informs licensees how to obtain it in a reasonable
+ manner on or through a medium customarily used for software exchange.
+
+When the Program is made available in source code form:
+
+ a) it must be made available under this Agreement; and
+ b) a copy of this Agreement must be included with each copy of the Program.
+
+Contributors may not remove or alter any copyright notices contained
+within the Program.
+
+Each Contributor must identify itself as the originator of its Contribution,
+if any, in a manner that reasonably allows subsequent Recipients to
+identify the originator of the Contribution.
+
+4. COMMERCIAL DISTRIBUTION
+
+Commercial distributors of software may accept certain responsibilities with
+respect to end users, business partners and the like. While this license is
+intended to facilitate the commercial use of the Program, the Contributor who
+includes the Program in a commercial product offering should do so in a manner
+which does not create potential liability for other Contributors. Therefore,
+if a Contributor includes the Program in a commercial product offering,
+such Contributor ("Commercial Contributor") hereby agrees to defend and
+indemnify every other Contributor ("Indemnified Contributor") against any
+losses, damages and costs (collectively "Losses") arising from claims,
+lawsuits and other legal actions brought by a third party against the
+Indemnified Contributor to the extent caused by the acts or omissions of
+such Commercial Contributor in connection with its distribution of the Program
+in a commercial product offering. The obligations in this section do not apply
+to any claims or Losses relating to any actual or alleged intellectual
+property infringement. In order to qualify, an Indemnified Contributor must:
+a) promptly notify the Commercial Contributor in writing of such claim,
+and b) allow the Commercial Contributor to control, and cooperate with the
+Commercial Contributor in, the defense and any related settlement
+negotiations. The Indemnified Contributor may participate in any such
+claim at its own expense.
+
+For example, a Contributor might include the Program in a commercial product
+offering, Product X. That Contributor is then a Commercial Contributor.
+If that Commercial Contributor then makes performance claims, or offers
+warranties related to Product X, those performance claims and warranties
+are such Commercial Contributor's responsibility alone. Under this section,
+the Commercial Contributor would have to defend claims against the other
+Contributors related to those performance claims and warranties, and if a
+court requires any other Contributor to pay any damages as a result,
+the Commercial Contributor must pay those damages.
+
+5. NO WARRANTY
+
+EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, THE PROGRAM IS PROVIDED ON AN
+"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR
+IMPLIED INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE,
+NON-INFRINGEMENT, MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
+Each Recipient is solely responsible for determining the appropriateness of
+using and distributing the Program and assumes all risks associated with its
+exercise of rights under this Agreement , including but not limited to the
+risks and costs of program errors, compliance with applicable laws, damage to
+or loss of data, programs or equipment, and unavailability
+or interruption of operations.
+
+6. DISCLAIMER OF LIABILITY
+
+EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, NEITHER RECIPIENT NOR ANY
+CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING WITHOUT LIMITATION
+LOST PROFITS), HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OR DISTRIBUTION OF THE PROGRAM OR THE
+EXERCISE OF ANY RIGHTS GRANTED HEREUNDER, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGES.
+
+7. GENERAL
+
+If any provision of this Agreement is invalid or unenforceable under
+applicable law, it shall not affect the validity or enforceability of the
+remainder of the terms of this Agreement, and without further action by
+the parties hereto, such provision shall be reformed to the minimum extent
+necessary to make such provision valid and enforceable.
+
+If Recipient institutes patent litigation against any entity (including a
+cross-claim or counterclaim in a lawsuit) alleging that the Program itself
+(excluding combinations of the Program with other software or hardware)
+infringes such Recipient's patent(s), then such Recipient's rights granted
+under Section 2(b) shall terminate as of the date such litigation is filed.
+
+All Recipient's rights under this Agreement shall terminate if it fails to
+comply with any of the material terms or conditions of this Agreement and
+does not cure such failure in a reasonable period of time after becoming
+aware of such noncompliance. If all Recipient's rights under this
+Agreement terminate, Recipient agrees to cease use and distribution of the
+Program as soon as reasonably practicable. However, Recipient's obligations
+under this Agreement and any licenses granted by Recipient relating to the
+Program shall continue and survive.
+
+Everyone is permitted to copy and distribute copies of this Agreement,
+but in order to avoid inconsistency the Agreement is copyrighted and may
+only be modified in the following manner. The Agreement Steward reserves
+the right to publish new versions (including revisions) of this Agreement
+from time to time. No one other than the Agreement Steward has the right to
+modify this Agreement. The Eclipse Foundation is the initial
+Agreement Steward. The Eclipse Foundation may assign the responsibility to
+serve as the Agreement Steward to a suitable separate entity. Each new version
+of the Agreement will be given a distinguishing version number. The Program
+(including Contributions) may always be distributed subject to the version
+of the Agreement under which it was received. In addition, after a new version
+of the Agreement is published, Contributor may elect to distribute the Program
+(including its Contributions) under the new version. Except as expressly
+stated in Sections 2(a) and 2(b) above, Recipient receives no rights or
+licenses to the intellectual property of any Contributor under this Agreement,
+whether expressly, by implication, estoppel or otherwise. All rights in the
+Program not expressly granted under this Agreement are reserved.
+
+This Agreement is governed by the laws of the State of New York and the
+intellectual property laws of the United States of America. No party to
+this Agreement will bring a legal action under this Agreement more than one
+year after the cause of action arose. Each party waives its rights to a
+jury trial in any resulting litigation.
diff --git a/README.md b/README.md
new file mode 100644
index 0000000000..70de378686
--- /dev/null
+++ b/README.md
@@ -0,0 +1,40 @@
+[](https://github.com/h2database/h2database/actions?query=workflow%3ACI)
+# Welcome to H2, the Java SQL database.
+
+## The main features of H2 are:
+
+* Very fast, open source, JDBC API
+* Embedded and server modes; disk-based or in-memory databases
+* Transaction support, multi-version concurrency
+* Browser based Console application
+* Encrypted databases
+* Fulltext search
+* Pure Java with small footprint: around 2.5 MB jar file size
+* ODBC driver
+
+More information: https://h2database.com
+
+## Downloads
+
+[Download latest version](https://h2database.com/html/download.html) or add to `pom.xml`:
+
+```XML
+
-The following statements return a result set: Advanced
Two Phase Commit
Compatibility
+
+ Keywords / Reserved Words
Standards Compliance
Run as Windows Service
ODBC Driver
-
- Using H2 in Microsoft .NET
ACID
@@ -81,8 +81,6 @@ Advanced
Pluggable File System
Split File System
-
- Database Upgrade
Java Objects Serialization
@@ -94,7 +92,10 @@ Result Sets
Statements that Return a Result Set
SELECT, EXPLAIN, CALL, SCRIPT, SHOW, HELP
.
+The following statements return a result set: SELECT
, TABLE
, VALUES
,
+EXPLAIN
, CALL
, SCRIPT
, SHOW
, HELP
.
+EXECUTE
may return either a result set or an update count.
+Result of a WITH
statement depends on inner command.
All other statements return an update count.
Limiting the Number of Rows
Server side cursors are not supported currently.
If only the first few rows are interesting for the application, then the
result set size should be limited to improve the performance.
-This can be done using LIMIT
in a query
-(example: SELECT * FROM TEST LIMIT 100
),
+This can be done using FETCH
in a query
+(example: SELECT * FROM TEST FETCH FIRST 100 ROWS ONLY
),
or by using Statement.setMaxRows(max)
.
By default, this database stores large LOB (CLOB and BLOB) objects separate from the main table data. Small LOB objects are stored in-place, the threshold can be set using -MAX_LENGTH_INPLACE_LOB, +MAX_LENGTH_INPLACE_LOB, but there is still an overhead to use CLOB/BLOB. Because of this, BLOB and CLOB should never be used for columns with a maximum size below about 200 bytes. The best threshold depends on the use case; reading in-place objects is faster @@ -145,18 +146,6 @@
-The following feature is only available for the PageStore storage engine.
-For the MVStore engine (the default for H2 version 1.4.x),
-append ;COMPRESS=TRUE
to the database URL instead.
-CLOB and BLOB values can be compressed by using
-SET COMPRESS_LOB.
-The LZF algorithm is faster but needs more disk space. By default compression is disabled, which usually speeds up write
-operations. If you store many large compressible values such as XML, HTML, text, and uncompressed binary files,
-then compressing can save a lot of disk space (sometimes more than 50%), and read operations may even be faster.
-
This database supports linked tables, which means tables that don't exist in the current database but @@ -184,7 +173,7 @@
h2.shareLinkedConnections=false
.
-The statement CREATE LINKED TABLE +The statement CREATE LINKED TABLE supports an optional schema name parameter.
@@ -219,72 +208,72 @@
Please note that most data definition language (DDL) statements, such as "create table", commit the current transaction. -See the Grammar for details. +See the Commands for details.
Transaction isolation is provided for all data manipulation language (DML) statements.
-Please note MVCC is enabled in version 1.4.x by default, when using the MVStore. -In this case, table level locking is not used. -Instead, rows are locked for update, and read committed is used in all cases -(changing the isolation level has no effect). -
--This database supports the following transaction isolation levels: +H2 supports read uncommitted, read committed, repeatable read, snapshot, +and serializable (partially, see below) isolation levels:
SET SESSION CHARACTERISTICS AS TRANSACTION ISOLATION LEVEL READ UNCOMMITTED
+SET LOCK_MODE 3
;LOCK_MODE=3
to the database URL: jdbc:h2:~/test;LOCK_MODE=3
-SET LOCK_MODE 1
;LOCK_MODE=1
to the database URL: jdbc:h2:~/test;LOCK_MODE=1
-SET LOCK_MODE 0
;LOCK_MODE=0
to the database URL: jdbc:h2:~/test;LOCK_MODE=0
+ Dirty reads aren't possible; non-repeatable reads and phantom reads are possible.
+ To enable, execute the SQL statement
+ SET SESSION CHARACTERISTICS AS TRANSACTION ISOLATION LEVEL READ COMMITTED
+SET SESSION CHARACTERISTICS AS TRANSACTION ISOLATION LEVEL REPEATABLE READ
+SET SESSION CHARACTERISTICS AS TRANSACTION ISOLATION LEVEL SNAPSHOT
+SET SESSION CHARACTERISTICS AS TRANSACTION ISOLATION LEVEL SERIALIZABLE
-When using the isolation level 'serializable', dirty reads, non-repeatable reads, and phantom reads are prohibited. -
-The database allows multiple concurrent connections to the same database. -To make sure all connections only see consistent data, table level locking is used by default. -This mechanism does not allow high concurrency, but is very fast. -Shared locks and exclusive locks are supported. -Before reading from a table, the database tries to add a shared lock to the table -(this is only possible if there is no exclusive lock on the object by another connection). -If the shared lock is added successfully, the table can be read. It is allowed that -other connections also have a shared lock on the same object. If a connection wants -to write to a table (update or delete a row), an exclusive lock is required. To get the -exclusive lock, other connection must not have any locks on the object. After the -connection commits, all locks are released. -This database keeps all locks in memory. -When a lock is released, and multiple connections are waiting for it, one of them is picked at random. +Insert and update operations only issue a shared lock on the table. +An exclusive lock is still used when adding or removing columns or when dropping the table. +Connections only 'see' committed data, and own changes. That means, if connection A updates +a row but doesn't commit this change yet, connection B will see the old value. +Only when the change is committed, the new value is visible by other connections +(read committed). If multiple connections concurrently try to lock or update the same row, the +database waits until it can apply the change, but at most until the lock timeout expires.
-The MVCC feature allows higher concurrency than using (table level or row level) locks.
-When using MVCC in this database, delete, insert and update operations will only issue a
-shared lock on the table. An exclusive lock is still used when adding or removing columns,
-when dropping the table, and when using SELECT ... FOR UPDATE
.
-Connections only 'see' committed data, and own changes. That means, if connection A updates
-a row but doesn't commit this change yet, connection B will see the old value.
-Only when the change is committed, the new value is visible by other connections
-(read committed). If multiple connections concurrently try to update the same row, the
-database waits until it can apply the change, but at most until the lock timeout expires.
-
-To use the MVCC feature, append ;MVCC=TRUE
to the database URL:
-
-jdbc:h2:~/test;MVCC=TRUE --
-The setting must be specified in the first connection (the one that opens the database). -It is not possible to enable or disable this setting while the database is already open. -
-
-If MVCC is enabled, changing the lock mode (LOCK_MODE
) has no effect.
-
MULTI_THREADED=TRUE
;
-the complete undo log (the list of uncommitted changes) must fit in memory when using multi-version concurrency.
-The setting MAX_MEMORY_UNDO
has no effect.
-
This database supports a simple clustering / high availability mechanism. The architecture is: @@ -409,7 +361,7 @@
-SELECT VALUE FROM INFORMATION_SCHEMA.SETTINGS WHERE NAME='CLUSTER' +SELECT SETTING_VALUE FROM INFORMATION_SCHEMA.SETTINGS WHERE SETTING_NAME = 'CLUSTER'
If the result is ''
(two single quotes), then the cluster mode is disabled. Otherwise, the list of
@@ -436,12 +388,12 @@
RANDOM_UUID(), SECURE_RAND(), SESSION_ID(),
+executed with care: UUID(), RANDOM_UUID(), SECURE_RAND(), SESSION_ID(),
MEMORY_FREE(), MEMORY_USED(), CSVREAD(), CSVWRITE(), RAND()
[when not using a seed].
Those functions should not be used directly in modifying statements
(for example INSERT, UPDATE, MERGE
). However, they can be used
in read-only statements and the result can then be used for modifying statements.
-Using auto-increment and identity columns is currently not supported.
+Identity columns aren't supported.
Instead, sequence values need to be manually requested and then used to insert data (using two statements).
@@ -492,21 +444,258 @@
Transaction Commit when Autocommit is On
Other database engines may commit the transaction in this case when the result set is closed.
-Keywords / Reserved Words
+Keywords / Reserved Words
There is a list of keywords that can't be used as identifiers (table names, column names and so on),
-unless they are quoted (surrounded with double quotes). The list is currently:
-
-
-CROSS, CURRENT_DATE, CURRENT_TIME, CURRENT_TIMESTAMP, DISTINCT, EXCEPT, EXISTS, FALSE,
-FETCH, FOR, FROM, FULL, GROUP, HAVING, INNER, INTERSECT, IS, JOIN, LIKE, LIMIT, MINUS, NATURAL,
-NOT, NULL, OFFSET, ON, ORDER, PRIMARY, ROWNUM, SELECT, SYSDATE, SYSTIME, SYSTIMESTAMP, TODAY,
-TRUE, UNION, UNIQUE, WHERE
-
-
-Certain words of this list are keywords because they are functions that can be used without '()' for compatibility,
-for example CURRENT_TIMESTAMP
.
+unless they are quoted (surrounded with double quotes).
+The following tokens are keywords in H2:
+
+
+
+
+Keyword
+H2
+SQL Standard
+
+
+2016
+2011
+2008
+2003
+1999
+92
+
+
+
+ALL
++ + + + + + +
+AND
++ + + + + + +
+ANY
++ + + + + + +
+ARRAY
++ + + + + +
+AS
++ + + + + + +
+ASYMMETRIC
++ + + + + NR
+AUTHORIZATION
++ + + + + + +
+BETWEEN
++ + + + + NR +
+BOTH
+CS + + + + + +
+CASE
++ + + + + + +
+CAST
++ + + + + + +
+CHECK
++ + + + + + +
+CONSTRAINT
++ + + + + + +
+CROSS
++ + + + + + +
+CURRENT_CATALOG
++ + + +
+CURRENT_DATE
++ + + + + + +
+CURRENT_PATH
++ + + + + +
+CURRENT_ROLE
++ + + + + +
+CURRENT_SCHEMA
++ + + +
+CURRENT_TIME
++ + + + + + +
+CURRENT_TIMESTAMP
++ + + + + + +
+CURRENT_USER
++ + + + + + +
+DAY
++ + + + + + +
+DEFAULT
++ + + + + + +
+DISTINCT
++ + + + + + +
+ELSE
++ + + + + + +
+END
++ + + + + + +
+EXCEPT
++ + + + + + +
+EXISTS
++ + + + + NR +
+FALSE
++ + + + + + +
+FETCH
++ + + + + + +
+FILTER
+CS + + + +
+FOR
++ + + + + + +
+FOREIGN
++ + + + + + +
+FROM
++ + + + + + +
+FULL
++ + + + + + +
+GROUP
++ + + + + + +
+GROUPS
+CS + +
+HAVING
++ + + + + + +
+HOUR
++ + + + + + +
+IF
++
+ILIKE
+CS
+IN
++ + + + + + +
+INNER
++ + + + + + +
+INTERSECT
++ + + + + + +
+INTERVAL
++ + + + + + +
+IS
++ + + + + + +
+JOIN
++ + + + + + +
+KEY
++ NR NR NR NR + +
+LEADING
+CS + + + + + +
+LEFT
++ + + + + + +
+LIKE
++ + + + + + +
+LIMIT
+MS +
+LOCALTIME
++ + + + + +
+LOCALTIMESTAMP
++ + + + + +
+MINUS
+MS
+MINUTE
++ + + + + + +
+MONTH
++ + + + + + +
+NATURAL
++ + + + + + +
+NOT
++ + + + + + +
+NULL
++ + + + + + +
+OFFSET
++ + + +
+ON
++ + + + + + +
+OR
++ + + + + + +
+ORDER
++ + + + + + +
+OVER
+CS + + + +
+PARTITION
+CS + + + +
+PRIMARY
++ + + + + + +
+QUALIFY
++
+RANGE
+CS + + + +
+REGEXP
+CS
+RIGHT
++ + + + + + +
+ROW
++ + + + + +
+ROWNUM
++
+ROWS
+CS + + + + + +
+SECOND
++ + + + + + +
+SELECT
++ + + + + + +
+SESSION_USER
++ + + + + +
+SET
++ + + + + + +
+SOME
++ + + + + + +
+SYMMETRIC
++ + + + + NR
+SYSTEM_USER
++ + + + + + +
+TABLE
++ + + + + + +
+TO
++ + + + + + +
+TOP
+MS
CS
+TRAILING
+CS + + + + + +
+TRUE
++ + + + + + +
+UESCAPE
++ + + + +
+UNION
++ + + + + + +
+UNIQUE
++ + + + + + +
+UNKNOWN
++ + + + + + +
+USER
++ + + + + + +
+USING
++ + + + + + +
+VALUE
++ + + + + + +
+VALUES
++ + + + + + +
+WHEN
++ + + + + + +
+WHERE
++ + + + + + +
+WINDOW
++ + + + +
+WITH
++ + + + + + +
+YEAR
++ + + + + + +
+_ROWID_
++
+
+
+
+Mode-sensitive keywords (MS) are keywords only in some compatibility modes.
+
+- LIMIT is a keywords only in Regular, Legacy, DB2, HSQLDB, MariaDB, MySQL, and PostgreSQL compatibility modes.
+It is an identifier in Strict, Derby, MSSQLServer, and Oracle compatibility modes.
+
- MINUS is a keyword only in Regular, Legacy, DB2, HSQLDB, and Oracle compatibility modes.
+It is an identifier in Strict, Derby, MSSQLServer, MariaDB, MySQL, and PostgreSQL compatibility modes.
+
- TOP is a context-sensitive keyword (can be either keyword or identifier)
+only in Regular, Legacy, HSQLDB, and MSSQLServer compatibility modes.
+It is an identifier unconditionally in Strict, Derby, DB2, MariaDB, MySQL, Oracle, and PostgreSQL compatibility modes.
+
+
+Context-sensitive keywords (CS) can be used as identifiers in some places,
+but cannot be used as identifiers in others.
+Normal keywords (+) are always treated as keywords.
+
+Most keywords in H2 are also reserved (+) or non-reserved (NR) words in the SQL Standard.
+Newer versions of H2 may have more keywords than older ones.
+Reserved words from the SQL Standard are potential candidates for keywords in future versions.
+
+
+There is a compatibility setting
+SET NON_KEYWORDS
+that can be used as a temporary workaround for applications that use keywords as unquoted identifiers.
Standards Compliance
@@ -525,7 +714,7 @@
Run as Windows Service
Using a native wrapper / adapter, Java applications can be run as a Windows Service.
There are various tools available to do that. The Java Service Wrapper from
-Tanuki Software, Inc.
+Tanuki Software, Inc.
is included in the installation. Batch files are provided to install, start, stop and uninstall the
H2 Database Engine Service. This service contains the TCP Server and the H2 Console web application.
The batch files are located in the directory h2/service
.
@@ -534,7 +723,7 @@
Run as Windows Service
The service wrapper bundled with H2 is a 32-bit version.
To use a 64-bit version of Windows (x64), you need to use a 64-bit version of the wrapper,
for example the one from
-
+
Simon Krenger.
@@ -600,7 +789,7 @@
ODBC Driver
first run c:/windows/syswow64/odbcad32.exe
.
At this point you set up your DSN just like you would on any other system.
See also:
-Re: ODBC Driver on Windows 64 bit
+Re: ODBC Driver on Windows 64 bit
ODBC Installation
@@ -608,7 +797,7 @@ ODBC Installation
First, the ODBC driver must be installed.
Any recent PostgreSQL ODBC driver should work, however version 8.2 (psqlodbc-08_02*
) or newer is recommended.
The Windows version of the PostgreSQL ODBC driver is available at
-http://www.postgresql.org/ftp/odbc/versions/msi.
+https://www.postgresql.org/ftp/odbc/versions/msi/.
Starting the Server
@@ -714,55 +903,6 @@ Using Microsoft Access
Tools - Options - Edit/Find - ODBC fields.
-Using H2 in Microsoft .NET
-
-The database can be used from Microsoft .NET even without using Java, by using IKVM.NET.
-You can access a H2 database on .NET using the JDBC API, or using the ADO.NET interface.
-
-
-Using the ADO.NET API on .NET
-
-An implementation of the ADO.NET interface is available in the open source project
-H2Sharp.
-
-
-Using the JDBC API on .NET
-- Install the .NET Framework from Microsoft.
- Mono has not yet been tested.
-
- Install IKVM.NET.
-
- Copy the
h2*.jar
file to ikvm/bin
- - Run the H2 Console using:
-
ikvm -jar h2*.jar
- - Convert the H2 Console to an
.exe
file using:
- ikvmc -target:winexe h2*.jar
.
- You may ignore the warnings.
- - Create a
.dll
file using (change the version accordingly):
- ikvmc.exe -target:library -version:1.0.69.0 h2*.jar
-
-
-If you want your C# application use H2, you need to add the h2.dll
and the
-IKVM.OpenJDK.ClassLibrary.dll
to your C# solution. Here some sample code:
-
-
-using System;
-using java.sql;
-
-class Test
-{
- static public void Main()
- {
- org.h2.Driver.load();
- Connection conn = DriverManager.getConnection("jdbc:h2:~/test", "sa", "sa");
- Statement stat = conn.createStatement();
- ResultSet rs = stat.executeQuery("SELECT 'Hello World'");
- while (rs.next())
- {
- Console.WriteLine(rs.getString(1));
- }
- }
-}
-
-
ACID
In the database world, ACID stands for:
@@ -790,7 +930,8 @@
Isolation
For H2, as with most other database systems, the default isolation level is 'read committed'.
This provides better performance, but also means that transactions are not completely isolated.
-H2 supports the transaction isolation levels 'serializable', 'read committed', and 'read uncommitted'.
+H2 supports the transaction isolation levels 'read uncommitted', 'read committed', 'repeatable read',
+and 'serializable'.
Durability
@@ -851,9 +992,9 @@ Ways to (Not) Achieve Durability
FileChannel.force()
,
data is not always persisted to the hard drive, because most hard drives do not obey
fsync()
: see
-Your Hard Drive Lies to You.
+Your Hard Drive Lies to You.
In Mac OS X, fsync
does not flush hard drive buffers. See
-Bad fsync?.
+Bad fsync?.
So the situation is confusing, and tests prove there is a problem.
@@ -900,7 +1041,8 @@
Using the Recover Tool
For each database in the current directory, a text file will be created.
This file contains raw insert statements (for the data) and data definition (DDL) statements to recreate
the schema of the database. This file can be executed using the RunScript
tool or a
-RUNSCRIPT FROM
SQL statement. The script includes at least one
+RUNSCRIPT
SQL statement.
+The script includes at least one
CREATE USER
statement. If you run the script against a database that was created with the same
user, or if there are conflicting users, running the script will fail. Consider running the script
against a database that was created with a user name that is not in the script.
@@ -1063,7 +1205,6 @@ Passwords: Using Char Arrays instead of Strings
import java.util.*;
public class Test {
public static void main(String[] args) throws Exception {
- Class.forName("org.h2.Driver");
String url = "jdbc:h2:~/test";
Properties prop = new Properties();
prop.setProperty("user", "sa");
@@ -1081,7 +1222,6 @@ Passwords: Using Char Arrays instead of Strings
}
-This example requires Java 1.6.
When using Swing, use javax.swing.JPasswordField
.
@@ -1221,7 +1361,7 @@ Protection against Remote Access
If you enable remote access using
-tcpAllowOthers
or -pgAllowOthers
,
-please also consider using the options -baseDir, -ifExists
,
+please also consider using the options -baseDir
,
so that remote users can not create new databases
or access existing databases with weak passwords.
When using the option -baseDir
, only databases within that directory may be accessed.
@@ -1230,9 +1370,10 @@
Protection against Remote Access
If you enable remote access using -webAllowOthers
,
please ensure the web server can only be accessed from trusted networks.
-The options -baseDir, -ifExists
don't protect
-access to the tools section, prevent remote shutdown of the web server,
-changes to the preferences, the saved connection settings,
+If this option is specified, -webExternalNames
should be also specified with
+comma-separated list of external names or addresses of this server.
+The options -baseDir
don't protect
+access to the saved connection settings,
or access to other databases accessible from the system.
@@ -1378,7 +1519,7 @@ TLS Connections
To use your own keystore, set the system properties javax.net.ssl.keyStore
and
javax.net.ssl.keyStorePassword
before starting the H2 server and client.
-See also
+See also
Customizing the Default Key and Trust Stores, Store Types, and Store Passwords
for more information.
@@ -1395,7 +1536,7 @@ Universally Unique Identifiers (UUID)
Standardized randomly generated UUIDs have 122 random bits.
4 bits are used for the version (Randomly generated UUID), and 2 bits for the variant (Leach-Salz).
This database supports generating such UUIDs using the built-in function
-RANDOM_UUID()
.
+RANDOM_UUID()
or UUID()
.
Here is a small program to estimate the probability of having two identical UUIDs
after generating a number of values:
@@ -1430,52 +1571,45 @@ Universally Unique Identifiers (UUID)
Spatial Features
-H2 supports the geometry data type and spatial indexes if
-the JTS Topology Suite
-is in the classpath.
-To run the H2 Console tool with the JTS tool, you need to download the
-JTS 1.13 jar file
-and place it in the h2 bin directory. Then edit the h2.sh
file as follows:
-
-
-#!/bin/sh
-dir=$(dirname "$0")
-java -cp "$dir/h2.jar:jts-1.13.jar:$H2DRIVERS:$CLASSPATH" org.h2.tools.Console "$@"
-
-
+H2 supports the geometry data type and spatial indexes.
Here is an example SQL script to create a table with a spatial column and index:
-CREATE TABLE GEO_TABLE(GID SERIAL, THE_GEOM GEOMETRY);
+CREATE TABLE GEO_TABLE(
+ GID BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY,
+ THE_GEOM GEOMETRY);
INSERT INTO GEO_TABLE(THE_GEOM) VALUES
('POINT(500 505)'),
('LINESTRING(550 551, 525 512, 565 566)'),
('POLYGON ((550 521, 580 540, 570 564, 512 566, 550 521))');
-CREATE SPATIAL INDEX GEO_TABLE_SPATIAL_INDEX ON GEO_TABLE(THE_GEOM);
+CREATE SPATIAL INDEX GEO_TABLE_SPATIAL_INDEX
+ ON GEO_TABLE(THE_GEOM);
To query the table using geometry envelope intersection,
-use the operation &&
, as in PostGIS:
+use the operation &&
, as in PostGIS:
SELECT * FROM GEO_TABLE
-WHERE THE_GEOM && 'POLYGON ((490 490, 536 490, 536 515, 490 515, 490 490))';
+ WHERE THE_GEOM &&
+ 'POLYGON ((490 490, 536 490, 536 515, 490 515, 490 490))';
You can verify that the spatial index is used using the "explain plan" feature:
EXPLAIN SELECT * FROM GEO_TABLE
-WHERE THE_GEOM && 'POLYGON ((490 490, 536 490, 536 515, 490 515, 490 490))';
+ WHERE THE_GEOM &&
+ 'POLYGON ((490 490, 536 490, 536 515, 490 515, 490 490))';
-- Result
SELECT
- GEO_TABLE.GID,
- GEO_TABLE.THE_GEOM
-FROM PUBLIC.GEO_TABLE
- /* PUBLIC.GEO_TABLE_SPATIAL_INDEX:
- THE_GEOM && 'POLYGON ((490 490, 536 490, 536 515, 490 515, 490 490))' */
-WHERE INTERSECTS(THE_GEOM,
- 'POLYGON ((490 490, 536 490, 536 515, 490 515, 490 490))')
+ "PUBLIC"."GEO_TABLE"."GID",
+ "PUBLIC"."GEO_TABLE"."THE_GEOM"
+FROM "PUBLIC"."GEO_TABLE"
+ /* PUBLIC.GEO_TABLE_SPATIAL_INDEX: THE_GEOM &&
+ GEOMETRY 'POLYGON ((490 490, 536 490, 536 515, 490 515, 490 490))' */
+WHERE "THE_GEOM" &&
+ GEOMETRY 'POLYGON ((490 490, 536 490, 536 515, 490 515, 490 490))'
For persistent databases, the spatial index is stored on disk;
@@ -1512,7 +1646,7 @@
Recursive Queries
WITH LINK(ID, NAME, LEVEL) AS (
SELECT ID, NAME, 0 FROM FOLDER WHERE PARENT IS NULL
UNION ALL
- SELECT FOLDER.ID, IFNULL(LINK.NAME || '/', '') || FOLDER.NAME, LEVEL + 1
+ SELECT FOLDER.ID, COALESCE(LINK.NAME || '/', '') || FOLDER.NAME, LEVEL + 1
FROM LINK INNER JOIN FOLDER ON LINK.ID = FOLDER.PARENT
)
SELECT NAME FROM LINK WHERE NAME IS NOT NULL ORDER BY ID;
@@ -1561,7 +1695,7 @@ Settings Read from System Properties
For a complete list of settings, see
-SysProperties.
+SysProperties.
Setting the Server Bind Address
@@ -1577,21 +1711,31 @@ Pluggable File System
This database supports a pluggable file system API.
The file system implementation is selected using a file name prefix.
-Internally, the interfaces are very similar to the Java 7 NIO2 API, but do not (yet) use or require Java 7.
+Internally, the interfaces are very similar to the Java 7 NIO2 API.
The following file systems are included:
-zip:
read-only zip-file based file system. Format: zip:/zipFileName!/fileName
.
+file:
the default file system that uses FileChannel
.
+zip:
read-only zip-file based file system. Format: zip:~/zipFileName!/fileName
.
split:
file system that splits files in 1 GB files (stackable with other file systems).
-nio:
file system that uses FileChannel
instead of RandomAccessFile
(faster in some operating systems).
nioMapped:
file system that uses memory mapped files (faster in some operating systems).
- Please note that there currently is a file size limitation of 2 GB when using this file system when using a 32-bit JVM.
- To work around this limitation, combine it with the split file system: split:nioMapped:test
.
+ Please note that there currently is a file size limitation of 2 GB when using this file system.
+ To work around this limitation, combine it with the split file system: split:nioMapped:~/test
.
+async:
experimental file system that uses AsynchronousFileChannel
instead of FileChannel
(faster in some operating systems).
memFS:
in-memory file system (slower than mem; experimental; mainly used for testing the database engine itself).
memLZF:
compressing in-memory file system (slower than memFS but uses less memory; experimental; mainly used for testing the database engine itself).
+nioMemFS:
stores data outside of the VM's heap - useful for large memory DBs without incurring GC costs.
+
+-
+
nioMemLZF:
stores compressed data outside of the VM's heap -
+ useful for large memory DBs without incurring GC costs.
+ Use "nioMemLZF:12:" to tweak the % of blocks that are stored uncompressed.
+ If you size this to your working set correctly,
+ compressed storage is roughly the same performance as uncompressed.
+ The default value is 1%.
-As an example, to use the the nio
file system, use the following database URL:
-jdbc:h2:nio:~/test
.
+As an example, to use the async:
file system
+use the following database URL: jdbc:h2:async:~/test
.
To register a new file system, extend the classes org.h2.store.fs.FilePath, FileBase
,
@@ -1619,43 +1763,10 @@
Split File System
However this can be changed if required, by specifying the block size in the file name.
The file name format is: split:<x>:<fileName>
where the file size per block is 2^x.
For 1 MiB block sizes, use x = 20 (because 2^20 is 1 MiB).
-The following file name means the logical file is split into 1 MiB blocks: split:20:test.h2.db
.
+The following file name means the logical file is split into 1 MiB blocks: split:20:~/test.h2.db
.
An example database URL for this case is jdbc:h2:split:20:~/test
.
-Database Upgrade
-
-In version 1.2, H2 introduced a new file store implementation which is incompatible to the one used in versions < 1.2.
-To automatically convert databases to the new file store, it is necessary to include an additional jar file.
-The file can be found at http://h2database.com/h2mig_pagestore_addon.jar .
-If this file is in the classpath, every connect to an older database will result in a conversion process.
-
-
-The conversion itself is done internally via 'script to'
and 'runscript from'
. After the conversion process, the files will be
-renamed from
-
-dbName.data.db
to dbName.data.db.backup
-dbName.index.db
to dbName.index.db.backup
-
-by default. Also, the temporary script will be written to the database directory instead of a temporary directory.
-Both defaults can be customized via
-
-org.h2.upgrade.DbUpgrade.setDeleteOldDb(boolean)
-org.h2.upgrade.DbUpgrade.setScriptInTmpDir(boolean)
-
-prior opening a database connection.
-
-
-Since version 1.2.140 it is possible to let the old h2 classes (v 1.2.128) connect to the database.
-The automatic upgrade .jar file must be present, and the URL must start with jdbc:h2v1_1:
-(the JDBC driver class is org.h2.upgrade.v1_1.Driver
).
-If the database should automatically connect using the old version if a database with the old format exists
-(without upgrade), and use the new version otherwise, then append ;NO_UPGRADE=TRUE
-to the database URL.
-Please note the old driver did not process the system property "h2.baseDir"
correctly,
-so that using this setting is not supported when upgrading.
-
-
Java Objects Serialization
Java objects serialization is enabled by default for columns of type OTHER
, using standard Java serialization/deserialization semantics.
@@ -1664,7 +1775,9 @@
Java Objects Serialization
To disable this feature set the system property h2.serializeJavaObject=false
(default: true).
-Serialization and deserialization of java objects is customizable both at system level and at database level providing a JavaObjectSerializer implementation:
+Serialization and deserialization of java objects is customizable both at system level and at database level providing a
+JavaObjectSerializer implementation:
+
-
At system level set the system property
h2.javaObjectSerializer
with the
@@ -1680,7 +1793,6 @@ Java Objects Serialization
-
Limits and Limitations
@@ -1697,28 +1809,29 @@
Limits and Limitations
An example database URL is: jdbc:h2:split:~/test
.
- The maximum number of rows per table is 2^64.
- The maximum number of open transactions is 65535.
+
- The maximum number of columns in a table or expressions in a SELECT statement is 16384.
+The actual possible number can be smaller if their definitions are too long.
+
- The maximum length of an identifier (table name, column name, and so on) is 256 characters.
+
- The maximum length of CHARACTER, CHARACTER VARYING and VARCHAR_IGNORECASE values and columns
+is 1048576 characters.
+
- The maximum length of BINARY, BINARY VARYING, JAVA_OBJECT, GEOMETRY, and JSON values and columns
+is 1048576 bytes.
+
- The maximum precision of NUMERIC and DECFLOAT values and columns is 100000.
+
- The maximum length of an ENUM value is 1048576 characters, the maximum number of ENUM values is 65536.
+
- The maximum cardinality of an ARRAY value or column is 65536.
+
- The maximum degree of a ROW value or column is 16384.
+
- The maximum index of parameter is 100000.
- Main memory requirements: The larger the database, the more main memory is required.
- With the current storage mechanism (the page store),
- the minimum main memory required is around 1 MB for each 8 GB database file size.
- Limit on the complexity of SQL statements.
-Statements of the following form will result in a stack overflow exception:
-
-SELECT * FROM DUAL WHERE X = 1
-OR X = 2 OR X = 2 OR X = 2 OR X = 2 OR X = 2
--- repeat previous line 500 times --
-
+Very complex expressions may result in a stack overflow exception.
- There is no limit for the following entities, except the memory and storage capacity:
- maximum identifier length (table name, column name, and so on);
- maximum number of tables, columns, indexes, triggers, and other database objects;
- maximum statement length, number of parameters per statement, tables per statement, expressions
- in order by, group by, having, and so on;
+ maximum number of tables, indexes, triggers, and other database objects;
+ maximum statement length, tables per statement;
maximum rows per query;
- maximum columns per table, columns per index, indexes per table, lob columns per table, and so on;
- maximum row length, index row length, select row length;
- maximum length of a varchar column, decimal column, literal in a statement.
+ maximum indexes per table, lob columns per table, and so on;
+ maximum row length, index row length, select row length.
- Querying from the metadata tables is slow if there are many tables (thousands).
-
- For limitations on data types, see the documentation of the respective Java data type
- or the data type documentation of this database.
+
- For other limitations on data types, see the data type documentation of this database.
Glossary and Links
@@ -1730,60 +1843,53 @@ Glossary and Links
AES-128
A block encryption algorithm. See also: Wikipedia:
- AES
+ href="https://en.wikipedia.org/wiki/Advanced_Encryption_Standard">Wikipedia:
+ Advanced Encryption Standard
Birthday Paradox
Describes the higher than expected probability that two
persons in a room have the same birthday. Also valid for randomly
generated UUIDs. See also: Wikipedia:
- Birthday Paradox
+ href="https://en.wikipedia.org/wiki/Birthday_problem">Wikipedia:
+ Birthday problem
Digest
Protocol to protect a password (but not to protect data).
- See also: RFC
+ See also: RFC
2617: HTTP Digest Access Authentication
-
- GCJ
- Compiler for Java. GNU
- Compiler for the Java and NativeJ
- (commercial)
-
HTTPS
A protocol to provide security to HTTP connections. See
- also: RFC 2818:
+ also: RFC 2818:
HTTP Over TLS
Modes of Operation
Wikipedia:
- Block cipher modes of operation
+ href="https://en.wikipedia.org/wiki/Block_cipher_mode_of_operation">Wikipedia:
+ Block cipher mode of operation
Salt
Random number to increase the security of passwords. See
- also: Wikipedia:
+ also: Wikipedia:
Key derivation function
SHA-256
A cryptographic one-way hash function. See also: Wikipedia: SHA
- hash functions
+ href="https://en.wikipedia.org/wiki/Secure_Hash_Algorithms">Wikipedia:
+ Secure Hash Algorithms
SQL Injection
A security vulnerability where an application embeds SQL
statements or expressions in user input. See also: Wikipedia:
- SQL Injection
+ href="https://en.wikipedia.org/wiki/SQL_injection">Wikipedia:
+ SQL injection
Watermark Attack
@@ -1795,7 +1901,7 @@ Glossary and Links
SSL/TLS
Secure Sockets Layer / Transport Layer Security. See also:
- Java Secure Socket
+ Java Secure Socket
Extension (JSSE)
diff --git a/h2/src/docsrc/html/architecture.html b/h2/src/docsrc/html/architecture.html
index 9875867357..af4ccdca18 100644
--- a/h2/src/docsrc/html/architecture.html
+++ b/h2/src/docsrc/html/architecture.html
@@ -1,7 +1,7 @@
@@ -50,6 +50,7 @@ Introduction
Top-down Overview
Working from the top down, the layers look like this:
+
- JDBC driver.
- Connection/session management.
- SQL Parser.
@@ -59,7 +60,6 @@
Top-down Overview
- B-tree engine and page-based storage allocation.
- Filesystem abstraction.
-
JDBC Driver
@@ -69,6 +69,7 @@
JDBC Driver
Connection/session management
The primary classes of interest are:
+
Package Description
org.h2.engine.Database the root/global class
@@ -79,14 +80,13 @@ Connection/session management
org.h2.engine.SessionRemote
remote session
-
Parser
The parser lives in org.h2.command.Parser
. It uses a straightforward recursive-descent design.
-See Wikipedia Recursive-descent parser page.
+See Wikipedia Recursive descent parser page.
@@ -95,14 +95,15 @@ Command execution and planning
Unlike other databases, we do not have an intermediate step where we generate some kind of IR (intermediate representation) of the query.
The parser class directly generates a command execution object.
Then we run some optimisation steps over the command to possibly generate a more efficient command.
-
+
+
The primary packages of interest are:
+
Package Description
org.h2.command.ddl Commands that modify schema data structures
org.h2.command.dml Commands that modify data
-
Table/Index/Constraints
@@ -110,18 +111,18 @@
Table/Index/Constraints
The primary packages of interest are:
+
Package Description
org.h2.table Implementations of different kinds of tables
org.h2.index Implementations of different kinds of indices
-
Undo log, redo log, and transactions layer
We have a transaction log, which is shared among all sessions. See also
-http://en.wikipedia.org/wiki/Transaction_log
-http://h2database.com/html/grammar.html#set_log
+https://en.wikipedia.org/wiki/Transaction_log
+https://h2database.com/html/grammar.html#set_log
We also have an undo log, which is per session, to undo an operation (an update that fails for example)
diff --git a/h2/src/docsrc/html/build.html b/h2/src/docsrc/html/build.html
index 19294814ca..87a588d72b 100644
--- a/h2/src/docsrc/html/build.html
+++ b/h2/src/docsrc/html/build.html
@@ -1,7 +1,7 @@
@@ -18,15 +18,13 @@
H2 Database Engine Cheat Sheet
Using H2
-- H2 is
- open source,
+
- H2 is
+ open source,
free to use and distribute.
-
- Download:
- jar,
- installer (Windows),
- zip.
+
- Download:
+ jar,
+ installer (Windows),
+ zip.
- To start the
H2 Console tool, double click the jar file, or
run
java -jar h2*.jar
, h2.bat
, or h2.sh
.
- A new database is automatically created
- by default.
+ by default if an embedded URL is used.
- Closing the last connection closes the database.
@@ -148,19 +148,24 @@ Database URLs
Embedded
jdbc:h2:~/test
'test' in the user home directory
jdbc:h2:/data/test
'test' in the directory /data
-jdbc:h2:test
in the current(!) working directory
+jdbc:h2:./test
in the current(!) working directory
In-Memory
-jdbc:h2:mem:test
multiple connections in one process
+jdbc:h2:mem:test
multiple connections in one process,
+database is removed when all connections are closed
+jdbc:h2:mem:test;DB_CLOSE_DELAY=-1
multiple connections in one process,
+database in not removed when all connections are closed
+(may create a memory leak)
jdbc:h2:mem:
unnamed private; one connection
Server Mode
jdbc:h2:tcp://localhost/~/test
user home dir
-jdbc:h2:tcp://localhost//data/test
absolute dir
+jdbc:h2:tcp://localhost//data/test
or jdbc:h2:tcp://localhost/D:/data/test
absolute dir
Server start:java -cp *.jar org.h2.tools.Server
Settings
-jdbc:h2:..;MODE=MySQL
compatibility (or HSQLDB,...)
+jdbc:h2:..;MODE=MySQL;DATABASE_TO_LOWER=TRUE
+compatibility (or HSQLDB,...)
jdbc:h2:..;TRACE_LEVEL_FILE=3
log to *.trace.db
@@ -169,7 +174,6 @@ Database URLs
Using the JDBC API
-Class.forName("org.h2.Driver");
Connection conn = DriverManager.
getConnection("jdbc:h2:~/test");
conn.close();
diff --git a/h2/src/docsrc/html/commands.html b/h2/src/docsrc/html/commands.html
new file mode 100644
index 0000000000..cb236b61b0
--- /dev/null
+++ b/h2/src/docsrc/html/commands.html
@@ -0,0 +1,181 @@
+
+
+
+
+
+
+Commands
+
+
+
+
+
+
+
+
+Commands
+Index
+Commands (Data Manipulation)
+
+
+
+
+
+
+ ${item.topic}
+
+
+
+ ${item.topic}
+
+
+
+ ${item.topic}
+
+
+
+
+
+
+Commands (Data Definition)
+
+
+
+
+
+
+ ${item.topic}
+
+
+
+ ${item.topic}
+
+
+
+ ${item.topic}
+
+
+
+
+
+
+Commands (Other)
+
+
+
+
+
+
+ ${item.topic}
+
+
+
+ ${item.topic}
+
+
+
+ ${item.topic}
+
+
+
+
+
+
+Details
+
+Click on the header of the command to switch between railroad diagram and BNF.
+
+Non-standard syntax is marked in green. Compatibility-only non-standard syntax is marked in red,
+don't use it unless you need it for compatibility with other databases or old versions of H2.
+
+Commands (Data Manipulation)
+
+${item.topic}
+
+
+
+${item.railroad}
+
+
+
+${item.text}
+Example:
+
+${item.example}
+
+
+Commands (Data Definition)
+
+${item.topic}
+
+
+
+${item.railroad}
+
+
+
+${item.text}
+Example:
+
+${item.example}
+
+
+Commands (Other)
+
+${item.topic}
+
+
+
+${item.railroad}
+
+
+
+${item.text}
+Example:
+
+${item.example}
+
+
+
+
+
diff --git a/h2/src/docsrc/html/datatypes.html b/h2/src/docsrc/html/datatypes.html
index 2f784612a0..367ab2d00d 100644
--- a/h2/src/docsrc/html/datatypes.html
+++ b/h2/src/docsrc/html/datatypes.html
@@ -1,7 +1,7 @@
@@ -18,7 +18,7 @@
Data Types
-Index
+Index
-
Details
-Click on the header to switch between railroad diagram and BNF.
+
+Click on the header of the data type to switch between railroad diagram and BNF.
+Non-standard syntax is marked in green. Compatibility-only non-standard syntax is marked in red,
+don't use it unless you need it for compatibility with other databases or old versions of H2.
+${item.topic}
+
+
+
+${item.railroad}
+
+
+
+${item.text}
+Example:
+${item.example}
+
+
+Interval Data Types
+
+
${item.topic}
Downloads
-Version ${version} (${versionDate}), Beta
+Version ${version} (${versionDate})
-Windows Installer
+Windows Installer
-Platform-Independent Zip
+Platform-Independent Zip
-Version ${stableVersion} (${stableVersionDate}), Last Stable
+Version 2.0.206 (2022-01-04)
-Windows Installer
-Platform-Independent Zip
+Windows Installer
+(SHA1 checksum: 982dff9c88412b00b3ced52b6870753e0133be07)
+Platform-Independent Zip
+(SHA1 checksum: 85d6d8f552661c2f8e1b86c10a12ab4bb6b0d29b)
-Download Mirror and Older Versions
+Archive Downloads
-Platform-Independent Zip
+Archive Downloads
-Jar File
+Maven (Binary JAR, Javadoc, and Source)
-Maven.org
-Sourceforge.net
-Latest Automated Build (not released)
+Binary JAR
+Javadoc
+Sources
-Maven (Binary, Javadoc, and Source)
+Git Source Repository
-
-Database Upgrade Helper File
-
-Upgrade database from 1.1 to the current version
-
-
-Subversion Source Repository
-
@@ -68,9 +58,9 @@
Subversion Source Repository
News and Project Information
-Atom Feed
-RSS Feed
-DOAP File (what is this)
+Atom Feed
+RSS Feed
+DOAP File (what is this)
diff --git a/h2/src/docsrc/html/faq.html b/h2/src/docsrc/html/faq.html
index 5bfdf8ea55..932ef197ac 100644
--- a/h2/src/docsrc/html/faq.html
+++ b/h2/src/docsrc/html/faq.html
@@ -1,7 +1,7 @@
@@ -17,7 +17,7 @@
-Frequently Asked Questions
+Frequently Asked Questions
I Have a Problem or Feature Request
@@ -47,8 +47,6 @@ Frequently Asked Questions
Column Names are Incorrect?
Float is Double?
-
- Is the GCJ Version Stable? Faster?
How to Translate this Project?
@@ -69,29 +67,20 @@ Are there Known Bugs? When is the Next Release?
will differ. This is not a problem within regions that use the same rules (such as, within
USA, or within Europe), even if the timezone itself is different. As a workaround, export the
database to a SQL script using the old timezone, and create a new database in the new
- timezone. This problem does not occur when using the system property "h2.storeLocalTime"
- (however such database files are not compatible with older versions of H2).
-Apache Harmony: there seems to be a bug in Harmony that affects H2.
- See HARMONY-6505.
- Tomcat and Glassfish 3 set most static fields (final or non-final) to null
when
- unloading a web application. This can cause a NullPointerException
in H2 versions
- 1.1.107 and older, and may still not work in newer versions. Please report it if you
- run into this issue. In Tomcat >= 6.0 this behavior can be disabled by setting the
- system property org.apache.catalina.loader.WebappClassLoader.ENABLE_CLEAR_REFERENCES=false
,
- however Tomcat may then run out of memory. A known workaround is to
- put the h2*.jar
file in a shared lib
directory
+ timezone.
+ Old versions of Tomcat and Glassfish 3 set most static fields (final or non-final) to null
when
+ unloading a web application. This can cause a NullPointerException
.
+ In Tomcat >= 6.0 this behavior can be disabled by setting the
+ system property org.apache.catalina.loader.WebappClassLoader.ENABLE_CLEAR_REFERENCES=false
.
+ A known workaround is to put the h2*.jar
file in a shared lib
directory
(common/lib
).
+ Tomcat 8.5 and newer versions don't clear fields and don't have such property.
Some problems have been found with right outer join. Internally, it is converted
to left outer join, which does not always produce the same results as other databases
when used in combination with other joins. This problem is fixed in H2 version 1.3.
- When using Install4j before 4.1.4 on Linux and enabling pack200
,
- the h2*.jar
becomes corrupted by the install process, causing application failure.
- A workaround is to add an empty file h2*.jar.nopack
- next to the h2*.jar
file.
- This problem is solved in Install4j 4.1.4.
-For a complete list, see Open Issues.
+For a complete list, see Open Issues.
Is this Database Engine Open Source?
@@ -102,13 +91,13 @@ Is this Database Engine Open Source?
Is Commercial Support Available?
-Yes, commercial support is available,
-see Commercial Support.
+No, currently commercial support is not available.
How to Create a New Database?
-By default, a new database is automatically created if it does not yet exist.
+By default, a new database is automatically created if it does not yet exist when
+embedded URL is used.
See Creating New Databases.
@@ -119,7 +108,6 @@ How to Connect to a Database?
To connect to a database using JDBC, use the following code:
-Class.forName("org.h2.Driver");
Connection conn = DriverManager.getConnection("jdbc:h2:~/test", "sa", "");
@@ -130,15 +118,17 @@ Where are the Database Files Stored?
For Windows, this is usually
C:\Documents and Settings\<userName>
or
C:\Users\<userName>
.
-If the base directory is not set (as in jdbc:h2:test
),
+If the base directory is not set (as in jdbc:h2:./test
),
the database files are stored in the directory where the application is started
-(the current working directory). When using the H2 Console application from the start menu,
+(the current working directory).
+When using the H2 Console application from the start menu,
this is <Installation Directory>/bin
.
-The base directory can be set in the database URL. A fixed or relative path can be used. When using the URL
-jdbc:h2:file:data/sample
, the database is stored in the directory
+The base directory can be set in the database URL.
+A fixed or relative path can be used. When using the URL
+jdbc:h2:file:./data/sample
, the database is stored in the directory
data
(relative to the current working directory).
-The directory is created automatically if it does not yet exist. It is also possible to use the
-fully qualified directory name (and for Windows, drive name).
+The directory is created automatically if it does not yet exist.
+It is also possible to use the fully qualified directory name (and for Windows, drive name).
Example: jdbc:h2:file:C:/data/test
@@ -175,10 +165,9 @@ Is it Reliable?
is well tested (if possible with automated test cases). The areas that are not well tested are:
-- Platforms other than Windows XP, Linux, Mac OS X, or JVMs other than Sun 1.6 or 1.7
+
- Platforms other than Windows, Linux, Mac OS X, or runtime environments other than Oracle / OpenJDK 7, 8, 9.
- The features
AUTO_SERVER
and AUTO_RECONNECT
.
- Cluster mode, 2-phase commit, savepoints.
-
- 24/7 operation.
- Fulltext search.
- Operations on LOBs over 2 GB.
- The optimizer may not always select the best plan.
@@ -191,7 +180,6 @@
Is it Reliable?
- The PostgreSQL server
- Clustering (there are cases were transaction isolation can be broken
due to timing issues, for example one session overtaking another session).
-
- Multi-threading within the engine using
SET MULTI_THREADED=1
.
- Compatibility modes for other databases (only some features are implemented).
- The soft reference cache (
CACHE_TYPE=SOFT_LRU
). It might not improve performance,
and out of memory issues have been reported.
@@ -246,13 +234,12 @@ Column Names are Incorrect?
return X
. What's wrong?
-This is not a bug. According the the JDBC specification, the method
+This is not a bug. According the JDBC specification, the method
ResultSetMetaData.getColumnName()
should return the name of the column
and not the alias name. If you need the alias name, use
-ResultSetMetaData.getColumnLabel()
.
+ResultSetMetaData.getColumnLabel()
.
Some other database don't work like this yet (they don't follow the JDBC specification).
-If you need compatibility with those databases, use the Compatibility Mode,
-or append ;ALIAS_COLUMN_NAME=TRUE
to the database URL.
+If you need compatibility with those databases, use the Compatibility Mode.
This also applies to DatabaseMetaData calls that return a result set.
@@ -266,20 +253,12 @@
Float is Double?
return a java.lang.Float
. What's wrong?
-This is not a bug. According the the JDBC specification, the JDBC data type FLOAT
+This is not a bug. According the JDBC specification, the JDBC data type FLOAT
is equivalent to DOUBLE
, and both are mapped to java.lang.Double
.
See also
-
+
Mapping SQL and Java Types - 8.3.10 FLOAT.
-
-Is the GCJ Version Stable? Faster?
-
-The GCJ version is not as stable as the Java version.
-When running the regression test with the GCJ version, sometimes the application just stops
-at what seems to be a random point without error message.
-Currently, the GCJ version is also slower than when using the Sun VM.
-However, the startup of the GCJ version is faster than when using a VM.
-
+Use REAL or FLOAT(24) data type for java.lang.Float
values.
How to Translate this Project?
@@ -297,7 +276,7 @@
How to Contribute to this Project?
code coverage (the target code coverage for this project is 90%, higher is better).
You will have to develop, build and run the tests.
Once you are familiar with the code, you could implement missing features from the
-feature request list.
+feature request list.
I suggest to start with very small features that are easy to implement.
Keep in mind to provide test cases as well.
diff --git a/h2/src/docsrc/html/features.html b/h2/src/docsrc/html/features.html
index e8c963431e..5d1f7c7f22 100644
--- a/h2/src/docsrc/html/features.html
+++ b/h2/src/docsrc/html/features.html
@@ -1,7 +1,7 @@
@@ -21,8 +21,6 @@ Features
Feature List
-
- Comparison to Other Database Engines
H2 in Use
@@ -69,8 +67,8 @@ Features
Read Only Databases
Read Only Databases in Zip or Jar File
-
- Computed Columns / Function Based Index
+
+ Generated Columns (Computed Columns) / Function Based Index
Multi-Dimensional Indexes
@@ -83,6 +81,8 @@ Features
Compacting a Database
Cache Settings
+
+ External Authentication (Experimental)
Feature List
Main Features
@@ -100,8 +100,8 @@ Main Features
Additional Features
- Disk based or in-memory databases and tables, read-only database support, temporary tables
-
- Transaction support (read committed), 2-phase-commit
-
- Multiple connections, table level locking
+
- Transaction support (read uncommitted, read committed, repeatable read, snapshot), 2-phase-commit
+
- Multiple connections, row-level locking
- Cost based optimizer, using a genetic algorithm for complex queries, zero-administration
- Scrollable and updatable result set support, large result set, external result sorting,
functions can return a result set
@@ -116,8 +116,10 @@
SQL Support
- Triggers and Java functions / stored procedures
- Many built-in functions, including XML and lossless data compression
- Wide range of data types including large objects (BLOB/CLOB) and arrays
-
- Sequence and autoincrement columns, computed columns (can be used for function based indexes)
-
ORDER BY, GROUP BY, HAVING, UNION, LIMIT, TOP
+- Sequences and identity columns, generated columns (can be used for function based indexes)
+
- ORDER BY, GROUP BY, HAVING, UNION, OFFSET / FETCH (including PERCENT and WITH TIES), LIMIT, TOP,
+ DISTINCT / DISTINCT ON (...)
+
- Window functions
- Collation support, including support for the ICU4J library
- Support for users and roles
- Compatibility modes for IBM DB2, Apache Derby, HSQLDB,
@@ -140,7 +142,7 @@
Security Features
Other Features and Tools
-- Small footprint (smaller than 1.5 MB), low memory requirements
+
- Small footprint (around 2.5 MB), low memory requirements
- Multiple index types (b-tree, tree, hash)
- Support for multi-dimensional indexes
- CSV (comma separated values) file support
@@ -157,241 +159,10 @@
Other Features and Tools
- Well tested (high code coverage, randomized stress tests)
-Comparison to Other Database Engines
-
-This comparison is based on
-H2 1.3,
-Apache Derby version 10.8,
-HSQLDB 2.2,
-MySQL 5.5,
-PostgreSQL 9.0.
-
-
-
-Feature
-H2
-Derby
-HSQLDB
-MySQL
-PostgreSQL
-
-Pure Java
-Yes
-Yes
-Yes
-No
-No
-
-Embedded Mode (Java)
-Yes
-Yes
-Yes
-No
-No
-
-In-Memory Mode
-Yes
-Yes
-Yes
-No
-No
-
-
-Explain Plan
-Yes
-Yes *12
-Yes
-Yes
-Yes
-
-Built-in Clustering / Replication
-Yes
-Yes
-No
-Yes
-Yes
-
-Encrypted Database
-Yes
-Yes *10
-Yes *10
-No
-No
-
-Linked Tables
-Yes
-No
-Partially *1
-Partially *2
-No
-
-ODBC Driver
-Yes
-No
-No
-Yes
-Yes
-
-Fulltext Search
-Yes
-Yes
-No
-Yes
-Yes
-
-Domains (User-Defined Types)
-Yes
-No
-Yes
-Yes
-Yes
-
-Files per Database
-Few
-Many
-Few
-Many
-Many
-
-Row Level Locking
-Yes *9
-Yes
-Yes *9
-Yes
-Yes
-
-Multi Version Concurrency
-Yes
-No
-Yes
-Yes
-Yes
-
-Multi-Threaded Statement Processing
-No *11
-Yes
-Yes
-Yes
-Yes
-
-Role Based Security
-Yes
-Yes *3
-Yes
-Yes
-Yes
-
-Updatable Result Sets
-Yes
-Yes *7
-Yes
-Yes
-Yes
-
-Sequences
-Yes
-Yes
-Yes
-No
-Yes
-
-Limit and Offset
-Yes
-Yes *13
-Yes
-Yes
-Yes
-
-Window Functions
-No *15
-No *15
-No
-No
-Yes
-
-Temporary Tables
-Yes
-Yes *4
-Yes
-Yes
-Yes
-
-Information Schema
-Yes
-No *8
-Yes
-Yes
-Yes
-
-Computed Columns
-Yes
-Yes
-Yes
-No
-Yes *6
-
-Case Insensitive Columns
-Yes
-Yes *14
-Yes
-Yes
-Yes *6
-
-Custom Aggregate Functions
-Yes
-No
-Yes
-Yes
-Yes
-
-CLOB/BLOB Compression
-Yes
-No
-No
-No
-Yes
-
-Footprint (jar/dll size)
-~1.5 MB *5
-~3 MB
-~1.5 MB
-~4 MB
-~6 MB
-
-
-
-*1 HSQLDB supports text tables.
-*2 MySQL supports linked MySQL tables under the name 'federated tables'.
-*3 Derby support for roles based security and password checking as an option.
-*4 Derby only supports global temporary tables.
-*5 The default H2 jar file contains debug information, jar files for other databases do not.
-*6 PostgreSQL supports functional indexes.
-*7 Derby only supports updatable result sets if the query is not sorted.
-*8 Derby doesn't support standard compliant information schema tables.
-*9 When using MVCC (multi version concurrency).
-*10 Derby and HSQLDB
- don't hide data patterns well.
-*11 The MULTI_THREADED option is not enabled by default, and not yet supported when using MVCC.
-*12 Derby doesn't support the EXPLAIN
statement, but it supports runtime statistics and retrieving statement execution plans.
-*13 Derby doesn't support the syntax LIMIT .. [OFFSET ..]
, however it supports FETCH FIRST .. ROW[S] ONLY
.
-*14 Using collations.
-*15 Derby and H2 support ROW_NUMBER() OVER()
.
-
-
-DaffodilDb and One$Db
-
-It looks like the development of this database has stopped. The last release was February 2006.
-
-
-McKoi
-
-It looks like the development of this database has stopped. The last release was August 2004.
-
-
H2 in Use
For a list of applications that work with or use H2, see:
-Links.
+Links.
Connection Modes
@@ -413,6 +184,15 @@ Embedded Mode
There is no limit on the number of database open concurrently,
or on the number of open connections.
+
+In embedded mode I/O operations can be performed by application's threads that execute a SQL command.
+The application may not interrupt these threads, it can lead to database corruption,
+because JVM closes I/O handle during thread interruption.
+Consider other ways to control execution of your application.
+When interrupts are possible the async:
+file system can be used as a workaround, but full safety is not guaranteed.
+It's recommended to use the client-server model instead, the client side may interrupt own threads.
+
@@ -492,7 +272,7 @@ Database URL Overview
Server mode (remote connections)
using TLS
- jdbc:h2:ssl://<server>[:<port>]/<databaseName>
+ jdbc:h2:ssl://<server>[:<port>]/[<path>]<databaseName>
jdbc:h2:ssl://localhost:8085/~/sample;
@@ -507,7 +287,7 @@ Database URL Overview
File locking methods
- jdbc:h2:<url>;FILE_LOCK={FILE|SOCKET|NO}
+ jdbc:h2:<url>;FILE_LOCK={FILE|SOCKET|FS|NO}
jdbc:h2:file:~/private;CIPHER=AES;FILE_LOCK=SOCKET
@@ -568,7 +348,7 @@ Database URL Overview
Compatibility mode
jdbc:h2:<url>;MODE=<databaseType>
- jdbc:h2:~/test;MODE=MYSQL
+ jdbc:h2:~/test;MODE=MYSQL;DATABASE_TO_LOWER=TRUE
@@ -639,19 +419,30 @@ In-Memory Databases
To keep the database open, add ;DB_CLOSE_DELAY=-1
to the database URL.
To keep the content of an in-memory database as long as the virtual machine is alive, use
jdbc:h2:mem:test;DB_CLOSE_DELAY=-1
.
+This may create a memory leak, when you need to remove the database, use
+the SHUTDOWN command.
Database Files Encryption
-The database files can be encrypted. Two encryption algorithm AES is supported.
+The database files can be encrypted.
+Three encryption algorithms are supported:
+
+
+- "AES" - also known as Rijndael, only AES-128 is implemented.
+- "XTEA" - the 32 round version.
+- "FOG" - pseudo-encryption only useful for hiding data from a text editor.
+
+
To use file encryption, you need to specify the encryption algorithm (the 'cipher')
and the file password (in addition to the user password) when connecting to the database.
Creating a New Database with File Encryption
-By default, a new database is automatically created if it does not exist yet.
-To create an encrypted database, connect to it as it would already exist.
+By default, a new database is automatically created if it does not exist yet
+when the embedded url is used.
+To create an encrypted database, connect to it as it would already exist locally using the embedded URL.
Connecting to an Encrypted Database
@@ -663,7 +454,6 @@ Connecting to an Encrypted Database
password-encrypted database:
-Class.forName("org.h2.Driver");
String url = "jdbc:h2:~/test;CIPHER=AES";
String user = "sa";
String pwds = "filepwd userpwd";
@@ -722,7 +512,8 @@ Database File Locking
Opening a Database Only if it Already Exists
By default, when an application calls DriverManager.getConnection(url, ...)
-and the database specified in the URL does not yet exist, a new (empty) database is created.
+with embedded URL and the database specified in the URL does not yet exist,
+a new (empty) database is created.
In some situations, it is better to restrict creating new databases, and only allow to open
existing databases. To do this, add ;IFEXISTS=TRUE
to the database URL. In this case, if the database does not already exist, an exception is thrown when
@@ -814,7 +605,7 @@
Changing Other Settings when Opening a Connection
Adding ;setting=value
at the end of a database URL is the
same as executing the statement SET setting value
just after
connecting. For a list of supported settings, see SQL Grammar
-or the DbSettings javadoc.
+or the DbSettings javadoc.
Custom File Access Mode
@@ -853,36 +644,24 @@ Multiple Connections to the Same Database: Client/Server
Multithreading Support
-This database is multithreading-safe. That means, if an application is multi-threaded, it does not need
-to worry about synchronizing access to the database. Internally, most requests to the same database
-are synchronized. That means an application can use multiple threads that access the same database
-at the same time, however if one thread executes a long running query, the other threads
-need to wait.
+This database is multithreading-safe.
+If an application is multi-threaded, it does not need to worry about synchronizing access to the database.
+An application should normally use one connection per thread.
+This database synchronizes access to the same connection, but other databases may not do this.
+To get higher concurrency, you need to use multiple connections.
-An application should normally use one connection per thread. This database synchronizes
-access to the same connection, but other databases may not do this.
+An application can use multiple threads that access the same database at the same time.
+Threads that use different connections can use the database concurrently.
Locking, Lock-Timeout, Deadlocks
-Please note MVCC is enabled in version 1.4.x by default, when using the MVStore.
-In this case, table level locking is not used.
-
-If multi-version concurrency is not used,
-the database uses table level locks to give each connection a consistent state of the data.
-There are two kinds of locks: read locks (shared locks) and write locks (exclusive locks).
-All locks are released when the transaction commits or rolls back.
-When using the default transaction isolation level 'read committed', read locks are already released after each statement.
-
-If a connection wants to reads from a table, and there is no write lock on the table,
-then a read lock is added to the table. If there is a write lock, then this connection waits
-for the other connection to release the lock. If a connection cannot get a lock for a specified time,
-then a lock timeout exception is thrown.
-
Usually, SELECT
statements will generate read locks. This includes subqueries.
-Statements that modify data use write locks. It is also possible to lock a table exclusively without modifying data,
+Statements that modify data use write locks on the modified rows.
+It is also possible to issue write locks without modifying data,
using the statement SELECT ... FOR UPDATE
.
+Data definition statements may issue exclusive locks on tables.
The statements COMMIT
and
ROLLBACK
releases all open locks.
The commands SAVEPOINT
and
@@ -903,18 +682,18 @@
Locking, Lock-Timeout, Deadlocks
SCRIPT;
- Write
+ Write (row-level)
SELECT * FROM TEST WHERE 1=0 FOR UPDATE;
- Write
+ Write (row-level)
INSERT INTO TEST VALUES(1, 'Hello');
INSERT INTO TEST SELECT * FROM TEST;
UPDATE TEST SET NAME='Hi';
DELETE FROM TEST;
- Write
+ Exclusive
ALTER TABLE TEST ...;
CREATE INDEX ... ON TEST ...;
DROP INDEX ...;
@@ -928,13 +707,6 @@ Locking, Lock-Timeout, Deadlocks
SET DEFAULT_LOCK_TIMEOUT <milliseconds>
. The default lock timeout is persistent.
-Avoiding Deadlocks
-
-To avoid deadlocks, ensure that all transactions lock the tables in the same order
-(for example in alphabetical order), and avoid upgrading read locks to write locks.
-Both can be achieved using explicitly locking tables using SELECT ... FOR UPDATE
.
-
-
Database File Layout
The following files are created for persistent databases:
@@ -942,14 +714,32 @@
Database File Layout
File Name Description Number of Files
- test.h2.db
+ test.mv.db
Database file.
Contains the transaction log, indexes, and data for all tables.
- Format: <database>.h2.db
+ Format: <database>.mv.db
1 per database
+
+ test.newFile
+
+ Temporary file for database compaction.
+ Contains the new MVStore file.
+ Format: <database>.newFile
+
+ 0 or 1 per database
+
+
+ test.tempFile
+
+ Temporary file for database compaction.
+ Contains the temporary MVStore file.
+ Format: <database>.tempFile
+
+ 0 or 1 per database
+
test.lock.db
@@ -965,19 +755,10 @@ Database File Layout
Trace file (if the trace option is enabled).
Contains trace information.
Format: <database>.trace.db
- Renamed to <database>.trace.db.old
is too big.
+ Renamed to <database>.trace.db.old
if too big.
0 or 1 per database
-
- test.lobs.db/*
-
- Directory containing one file for each
- BLOB or CLOB value larger than a certain size.
- Format: <id>.t<tableId>.lob.db
-
- 1 per large object
-
test.123.temp.db
@@ -1030,31 +811,101 @@ Compatibility
(example: jdbc:h2:~/test;IGNORECASE=TRUE
).
-Compatibility Modes
+Compatibility Modes
For certain features, this database can emulate the behavior of specific databases.
However, only a small subset of the differences between databases are implemented in this way.
Here is the list of currently supported modes and the differences to the regular mode:
+REGULAR Compatibility mode
+
+This mode is used by default.
+
+- Empty IN predicate is allowed.
+
- TOP clause in SELECT is allowed.
+
- OFFSET/LIMIT clauses are allowed.
+
- MINUS can be used instead of EXCEPT.
+
- IDENTITY can be used as a data type.
+
- Legacy SERIAL and BIGSERIAL data types are supported.
+
- AUTO_INCREMENT clause can be used instead of GENERATED { ALWAYS | BY DEFAULT } AS IDENTITY.
+
+
+STRICT Compatibility Mode
+
+To use the STRICT mode, use the database URL jdbc:h2:~/test;MODE=STRICT
+or the SQL statement SET MODE STRICT
.
+In this mode some deprecated features are disabled.
+
+
+If your application or library uses only the H2 or it generates different SQL for different database systems
+it is recommended to use this compatibility mode in unit tests
+to reduce possibility of accidental misuse of such features.
+This mode cannot be used as SQL validator, however.
+
+
+It is not recommended to enable this mode in production builds of libraries,
+because this mode may become more restrictive in future releases of H2 that may break your library
+if it will be used together with newer version of H2.
+
+- Empty IN predicate is disallowed.
+
- TOP and OFFSET/LIMIT clauses are disallowed, only OFFSET/FETCH can be used.
+
- MINUS cannot be used instead of EXCEPT.
+
- IDENTITY cannot be used as a data type and AUTO_INCREMENT clause cannot be specified.
+Use GENERATED BY DEFAULT AS IDENTITY clause instead.
+
- SERIAL and BIGSERIAL data types are disallowed.
+Use INTEGER GENERATED BY DEFAULT AS IDENTITY or BIGINT GENERATED BY DEFAULT AS IDENTITY instead.
+
+
+LEGACY Compatibility Mode
+
+To use the LEGACY mode, use the database URL jdbc:h2:~/test;MODE=LEGACY
+or the SQL statement SET MODE LEGACY
.
+In this mode some compatibility features for applications written for H2 1.X are enabled.
+This mode doesn't provide full compatibility with H2 1.X.
+
+- Empty IN predicate is allowed.
+
- TOP clause in SELECT is allowed.
+
- OFFSET/LIMIT clauses are allowed.
+
- MINUS can be used instead of EXCEPT.
+
- IDENTITY can be used as a data type.
+
- MS SQL Server-style IDENTITY clause is supported.
+
- Legacy SERIAL and BIGSERIAL data types are supported.
+
- AUTO_INCREMENT clause can be used instead of GENERATED { ALWAYS | BY DEFAULT } AS IDENTITY.
+
- If a value for identity column was specified in an INSERT command
+the base value of sequence generator of this column is updated if current value of generator was smaller
+(larger for generators with negative increment) than the inserted value.
+
- Identity columns have implicit DEFAULT ON NULL clause.
+It means a NULL value may be specified for this column in INSERT command and it will be treated as DEFAULT.
+
- Oracle-style CURRVAL and NEXTVAL can be used on sequences.
+
- TOP clause can be used in DELETE and UPDATE.
+
- Non-standard Oracle-style WHERE clause can be used in standard MERGE command.
+
- Attempt to reference a non-unique set of columns from a referential constraint
+will create an UNIQUE constraint on them automatically.
+
- Unsafe comparison operators between numeric and boolean values are allowed.
+
- IDENTITY() and SCOPE_IDENTITY() are supported, but both are implemented like SCOPE_IDENTITY()
+
+
DB2 Compatibility Mode
-To use the IBM DB2 mode, use the database URL jdbc:h2:~/test;MODE=DB2
+To use the IBM DB2 mode, use the database URL jdbc:h2:~/test;MODE=DB2;DEFAULT_NULL_ORDERING=HIGH
or the SQL statement SET MODE DB2
.
- For aliased columns,
ResultSetMetaData.getColumnName()
returns the alias name and getTableName()
returns
null
.
- - Support for the syntax
[OFFSET .. ROW] [FETCH ... ONLY]
- as an alternative for LIMIT .. OFFSET
.
- - Concatenating
NULL
with another value
- results in the other value.
- Support the pseudo-table SYSIBM.SYSDUMMY1.
+
- Timestamps with dash between date and time are supported.
+
- Datetime value functions return the same value within a command.
+
- Second and third arguments of TRANSLATE() function are swapped.
+
- LIMIT / OFFSET clauses are supported.
+
- MINUS can be used instead of EXCEPT.
+
- Unsafe comparison operators between numeric and boolean values are allowed.
Derby Compatibility Mode
-To use the Apache Derby mode, use the database URL jdbc:h2:~/test;MODE=Derby
+To use the Apache Derby mode, use the database URL jdbc:h2:~/test;MODE=Derby;DEFAULT_NULL_ORDERING=HIGH
or the SQL statement SET MODE Derby
.
- For aliased columns,
ResultSetMetaData.getColumnName()
@@ -1062,24 +913,22 @@ Derby Compatibility Mode
null
.
- For unique indexes,
NULL
is distinct.
That means only one row with NULL
in one of the columns is allowed.
- - Concatenating
NULL
with another value
- results in the other value.
- Support the pseudo-table SYSIBM.SYSDUMMY1.
+
- Datetime value functions return the same value within a command.
HSQLDB Compatibility Mode
-To use the HSQLDB mode, use the database URL jdbc:h2:~/test;MODE=HSQLDB
+To use the HSQLDB mode, use the database URL jdbc:h2:~/test;MODE=HSQLDB;DEFAULT_NULL_ORDERING=FIRST
or the SQL statement SET MODE HSQLDB
.
-- For aliased columns,
ResultSetMetaData.getColumnName()
- returns the alias name and getTableName()
returns
- null
.
- - When converting the scale of decimal data, the number is only converted if the new scale is
- smaller than the current scale. Usually, the scale is converted and 0s are added if required.
-
- For unique indexes,
NULL
is distinct.
- That means only one row with NULL
in one of the columns is allowed.
- - Text can be concatenated using '+'.
+
- Text can be concatenated using '+'.
+
- NULL value works like DEFAULT value is assignments to identity columns.
+
- Datetime value functions return the same value within a command.
+
- TOP clause in SELECT is supported.
+
- LIMIT / OFFSET clauses are supported.
+
- MINUS can be used instead of EXCEPT.
+
- Unsafe comparison operators between numeric and boolean values are allowed.
MS SQL Server Compatibility Mode
@@ -1093,28 +942,92 @@ MS SQL Server Compatibility Mode
- Identifiers may be quoted using square brackets as in
[Test]
.
- For unique indexes,
NULL
is distinct.
That means only one row with NULL
in one of the columns is allowed.
- - Concatenating
NULL
with another value
- results in the other value.
- Text can be concatenated using '+'.
+
- Arguments of LOG() function are swapped.
+
- MONEY data type is treated like NUMERIC(19, 4) data type. SMALLMONEY data type is treated like NUMERIC(10, 4)
+ data type.
+
IDENTITY
can be used for automatic id generation on column level.
+- Table hints are discarded. Example:
SELECT * FROM table WITH (NOLOCK)
.
+ - Datetime value functions return the same value within a command.
+
- 0x literals are parsed as binary string literals.
+
- TRUNCATE TABLE restarts next values of generated columns.
+
- TOP clause in SELECT, UPDATE, and DELETE is supported.
+
- Unsafe comparison operators between numeric and boolean values are allowed.
+MariaDB Compatibility Mode
+
+To use the MariaDB mode, use the database URL jdbc:h2:~/test;MODE=MariaDB;DATABASE_TO_LOWER=TRUE
.
+When case-insensitive identifiers are needed append ;CASE_INSENSITIVE_IDENTIFIERS=TRUE
to URL.
+Do not change value of DATABASE_TO_LOWER after creation of database.
+
+- Creating indexes in the
CREATE TABLE
statement is allowed using
+ INDEX(..)
or KEY(..)
.
+ Example: create table test(id int primary key, name varchar(255), key idx_name(name));
+ - When converting a floating point number to an integer, the fractional
+ digits are not truncated, but the value is rounded.
+
- ON DUPLICATE KEY UPDATE is supported in INSERT statements, due to this feature VALUES has special non-standard
+ meaning is some contexts.
+
- INSERT IGNORE is partially supported and may be used to skip rows with duplicate keys if ON DUPLICATE KEY
+ UPDATE is not specified.
+
- REPLACE INTO is partially supported.
+
- Spaces are trimmed from the right side of CHAR values.
+
- REGEXP_REPLACE() uses \ for back-references.
+
- Datetime value functions return the same value within a command.
+
- 0x literals are parsed as binary string literals.
+
- Unrelated expressions in ORDER BY clause of DISTINCT queries are allowed.
+
- Some MariaDB-specific ALTER TABLE commands are partially supported.
+
- TRUNCATE TABLE restarts next values of generated columns.
+
- NEXT VALUE FOR returns different values when invoked multiple times within the same row.
+
- If value of an identity column was manually specified, its sequence is updated to generate values after
+inserted.
+
- NULL value works like DEFAULT value is assignments to identity columns.
+
- LIMIT / OFFSET clauses are supported.
+
- AUTO_INCREMENT clause can be used.
+
- YEAR data type is treated like SMALLINT data type.
+
- GROUP BY clause can contain 1-based positions of expressions from the SELECT list.
+
- Unsafe comparison operators between numeric and boolean values are allowed.
+
+
+Text comparison in MariaDB is case insensitive by default, while in H2 it is case sensitive (as in most other databases).
+H2 does support case insensitive text comparison, but it needs to be set separately,
+using SET IGNORECASE TRUE
.
+This affects comparison using =, LIKE, REGEXP
.
+
+
MySQL Compatibility Mode
-To use the MySQL mode, use the database URL jdbc:h2:~/test;MODE=MySQL
-or the SQL statement SET MODE MySQL
.
+To use the MySQL mode, use the database URL jdbc:h2:~/test;MODE=MySQL;DATABASE_TO_LOWER=TRUE
.
+When case-insensitive identifiers are needed append ;CASE_INSENSITIVE_IDENTIFIERS=TRUE
to URL.
+Do not change value of DATABASE_TO_LOWER after creation of database.
-- When inserting data, if a column is defined to be
NOT NULL
- and NULL
is inserted,
- then a 0 (or empty string, or the current timestamp for timestamp columns) value is used.
- Usually, this operation is not allowed and an exception is thrown.
- - Creating indexes in the
CREATE TABLE
statement is allowed using
+- Creating indexes in the
CREATE TABLE
statement is allowed using
INDEX(..)
or KEY(..)
.
Example: create table test(id int primary key, name varchar(255), key idx_name(name));
- - Meta data calls return identifiers in lower case.
- When converting a floating point number to an integer, the fractional
digits are not truncated, but the value is rounded.
-
- Concatenating
NULL
with another value
- results in the other value.
+ - ON DUPLICATE KEY UPDATE is supported in INSERT statements, due to this feature VALUES has special non-standard
+ meaning is some contexts.
+
- INSERT IGNORE is partially supported and may be used to skip rows with duplicate keys if ON DUPLICATE KEY
+ UPDATE is not specified.
+
- REPLACE INTO is partially supported.
+
- Spaces are trimmed from the right side of CHAR values.
+
- REGEXP_REPLACE() uses \ for back-references.
+
- Datetime value functions return the same value within a command.
+
- 0x literals are parsed as binary string literals.
+
- Unrelated expressions in ORDER BY clause of DISTINCT queries are allowed.
+
- Some MySQL-specific ALTER TABLE commands are partially supported.
+
- TRUNCATE TABLE restarts next values of generated columns.
+
- If value of an identity column was manually specified, its sequence is updated to generate values after
+inserted.
+
- NULL value works like DEFAULT value is assignments to identity columns.
+
- Referential constraints don't require an existing primary key or unique constraint on referenced columns
+and create a unique constraint automatically if such constraint doesn't exist.
+
- LIMIT / OFFSET clauses are supported.
+
- AUTO_INCREMENT clause can be used.
+
- YEAR data type is treated like SMALLINT data type.
+
- GROUP BY clause can contain 1-based positions of expressions from the SELECT list.
+
- Unsafe comparison operators between numeric and boolean values are allowed.
Text comparison in MySQL is case insensitive by default, while in H2 it is case sensitive (as in most other databases).
@@ -1125,7 +1038,7 @@
MySQL Compatibility Mode
Oracle Compatibility Mode
-To use the Oracle mode, use the database URL jdbc:h2:~/test;MODE=Oracle
+To use the Oracle mode, use the database URL jdbc:h2:~/test;MODE=Oracle;DEFAULT_NULL_ORDERING=HIGH
or the SQL statement SET MODE Oracle
.
- For aliased columns,
ResultSetMetaData.getColumnName()
@@ -1134,24 +1047,50 @@ Oracle Compatibility Mode
- When using unique indexes, multiple rows with
NULL
in all columns are allowed, however it is not allowed to have multiple rows with the
same values otherwise.
- - Concatenating
NULL
with another value
+ - Empty strings are treated like
NULL
values, concatenating NULL
with another value
results in the other value.
- - Empty strings are treated like
NULL
values.
+ - REGEXP_REPLACE() uses \ for back-references.
+
- RAWTOHEX() converts character strings to hexadecimal representation of their UTF-8 encoding.
+
- HEXTORAW() decodes a hexadecimal character string to a binary string.
+
- DATE data type is treated like TIMESTAMP(0) data type.
+
- Datetime value functions return the same value within a command.
+
- ALTER TABLE MODIFY COLUMN command is partially supported.
+
- SEQUENCE.NEXTVAL and SEQUENCE.CURRVAL are supported and return values with DECIMAL/NUMERIC data type.
+
- Merge when matched clause may have WHERE clause.
+
- MINUS can be used instead of EXCEPT.
PostgreSQL Compatibility Mode
-To use the PostgreSQL mode, use the database URL jdbc:h2:~/test;MODE=PostgreSQL
-or the SQL statement SET MODE PostgreSQL
.
+To use the PostgreSQL mode, use the database URL
+jdbc:h2:~/test;MODE=PostgreSQL;DATABASE_TO_LOWER=TRUE;DEFAULT_NULL_ORDERING=HIGH
.
+Do not change value of DATABASE_TO_LOWER after creation of database.
- For aliased columns,
ResultSetMetaData.getColumnName()
returns the alias name and getTableName()
returns
null
.
- When converting a floating point number to an integer, the fractional
digits are not be truncated, but the value is rounded.
-
- The system columns
CTID
and
- OID
are supported.
+ - The system columns
ctid
and
+ oid
are supported.
- LOG(x) is base 10 in this mode.
+
- REGEXP_REPLACE():
+
+ - uses \ for back-references;
+ - does not throw an exception when the
flagsString
parameter contains a 'g';
+ - replaces only the first matched substring in the absence of the 'g' flag in the
flagsString
parameter.
+
+ - LIMIT / OFFSET clauses are supported.
+
- Legacy SERIAL and BIGSERIAL data types are supported.
+
- ON CONFLICT DO NOTHING is supported in INSERT statements.
+
- Spaces are trimmed from the right side of CHAR values, but CHAR values in result sets are right-padded with
+ spaces to the declared length.
+
- MONEY data type is treated like NUMERIC(19, 2) data type.
+
- Datetime value functions return the same value within a transaction.
+
- ARRAY_SLICE() out of bounds parameters are silently corrected.
+
- EXTRACT function with DOW field returns (0-6), Sunday is 0.
+
- UPDATE with FROM is supported.
+
- GROUP BY clause can contain 1-based positions of expressions from the SELECT list.
Auto-Reconnect
@@ -1194,7 +1133,7 @@ Automatic Mixed Mode
which is faster than the server mode. Therefore the main application should open
the database first if possible. The first connection automatically starts a server on a random port.
This server allows remote connections, however only to this database (to ensure that,
-the client reads .lock.db
file and sends the the random key that is stored there to the server).
+the client reads .lock.db
file and sends the random key that is stored there to the server).
When the first connection is closed, the server stops. If other (remote) connections are still
open, one of them will then start a server (auto-reconnect is enabled automatically).
@@ -1224,10 +1163,11 @@ Automatic Mixed Mode
Page Size
-The page size for new databases is 2 KB (2048), unless the page size is set
+The page size for new databases is 4 KiB (4096 bytes), unless the page size is set
explicitly in the database URL using PAGE_SIZE=
when
the database is created. The page size of existing databases can not be changed,
so this property needs to be set when the database is created.
+The page size of encrypted databases must be a multiple of 4096 (4096, 8192, …).
Using the Trace Options
@@ -1293,7 +1233,7 @@ Java Code Generation
12-20 20:58:09 jdbc[0]:
/**/dbMeta3.getURL();
12-20 20:58:09 jdbc[0]:
-/**/dbMeta3.getTables(null, "", null, new String[]{"TABLE", "VIEW"});
+/**/dbMeta3.getTables(null, "", null, new String[]{"BASE TABLE", "VIEW"});
...
@@ -1319,7 +1259,7 @@
Using Other Logging APIs
facility as the application, for example Log4j. To do that, this database support SLF4J.
-SLF4J is a simple facade for various logging APIs
+SLF4J is a simple facade for various logging APIs
and allows to plug in the desired implementation at deployment time.
SLF4J supports implementations such as Logback, Log4j, Jakarta Commons Logging (JCL),
Java logging, x4juli, and Simple Log.
@@ -1382,7 +1322,7 @@
Read Only Databases in Zip or Jar File
If the database is larger than a few megabytes, performance is much better if the database file is split into multiple smaller files,
because random access in compressed files is not possible.
-See also the sample application ReadOnlyDatabaseInZip.
+See also the sample application ReadOnlyDatabaseInZip.
Opening a Corrupted Database
@@ -1392,26 +1332,32 @@ Opening a Corrupted Database
The exceptions are logged, but opening the database will continue.
-Computed Columns / Function Based Index
+Generated Columns (Computed Columns) / Function Based Index
-A computed column is a column whose value is calculated before storing.
+Each column is either a base column or a generated column.
+A generated column is a column whose value is calculated before storing and cannot be assigned directly.
The formula is evaluated when the row is inserted, and re-evaluated every time the row is updated.
One use case is to automatically update the last-modification time:
-CREATE TABLE TEST(ID INT, NAME VARCHAR, LAST_MOD TIMESTAMP AS NOW());
+CREATE TABLE TEST(
+ ID INT,
+ NAME VARCHAR,
+ LAST_MOD TIMESTAMP WITH TIME ZONE
+ GENERATED ALWAYS AS CURRENT_TIMESTAMP
+);
Function indexes are not directly supported by this database, but they can be emulated
-by using computed columns. For example, if an index on the upper-case version of
-a column is required, create a computed column with the upper-case version of the original column,
+by using generated columns. For example, if an index on the upper-case version of
+a column is required, create a generated column with the upper-case version of the original column,
and create an index for this column:
CREATE TABLE ADDRESS(
ID INT PRIMARY KEY,
NAME VARCHAR,
- UPPER_NAME VARCHAR AS UPPER(NAME)
+ UPPER_NAME VARCHAR GENERATED ALWAYS AS UPPER(NAME)
);
CREATE INDEX IDX_U_NAME ON ADDRESS(UPPER_NAME);
@@ -1436,7 +1382,7 @@ Multi-Dimensional Indexes
Currently, Z-order (also called N-order or Morton-order) is used;
Hilbert curve could also be used, but the implementation is more complex.
The algorithm to convert the multi-dimensional value is called bit-interleaving.
-The scalar value is indexed using a B-Tree index (usually using a computed column).
+The scalar value is indexed using a B-Tree index (usually using a generated column).
The method can result in a drastic performance improvement
over just using an index on the first column. Depending on the
@@ -1486,18 +1432,20 @@
Referencing a Compiled Method
Declaring Functions as Source Code
When defining a function alias with source code, the database tries to compile
-the source code using the Sun Java compiler (the class com.sun.tools.javac.Main
)
-if the tools.jar
is in the classpath. If not, javac
is run as a separate process.
+the source code using the Java compiler (the class javax.tool.ToolProvider.getSystemJavaCompiler()
)
+if it is in the classpath. If not, javac
is run as a separate process.
Only the source code is stored in the database; the class is compiled each time the database is re-opened.
-Source code is usually passed as dollar quoted text to avoid escaping problems, however single quotes can be used as well.
+Source code can be passed as dollar quoted text ($$source code$$
) to avoid escaping problems.
+If you use some third-party script processing tool, use standard single quotes instead and don't forget to repeat
+each single quotation mark twice within the source code.
Example:
-CREATE ALIAS NEXT_PRIME AS $$
+CREATE ALIAS NEXT_PRIME AS '
String nextPrime(String value) {
return new BigInteger(value).nextProbablePrime().toString();
}
-$$;
+';
By default, the three packages java.util, java.math, java.sql
are imported.
@@ -1507,13 +1455,13 @@
Declaring Functions as Source Code
and separated with the tag @CODE
:
-CREATE ALIAS IP_ADDRESS AS $$
+CREATE ALIAS IP_ADDRESS AS '
import java.net.*;
@CODE
String ipAddress(String host) throws Exception {
return InetAddress.getByName(host).getHostAddress();
}
-$$;
+';
The following template is used to create a complete Java class:
@@ -1647,6 +1595,7 @@
Pluggable or User-Defined Tables
In order to create your own TableEngine, you need to implement the org.h2.api.TableEngine
interface e.g.
something like this:
+
package acme;
public static class MyTableEngine implements org.h2.api.TableEngine {
@@ -1660,12 +1609,13 @@ Pluggable or User-Defined Tables
}
}
+
and then create the table from SQL like this:
+
CREATE TABLE TEST(ID INT, NAME VARCHAR)
ENGINE "acme.MyTableEngine";
-
It is also possible to pass in parameters to the table engine, like so:
@@ -1675,6 +1625,15 @@ Pluggable or User-Defined Tables
In which case the parameters are passed down in the tableEngineParams field of the CreateTableData object.
+
+It is also possible to specify default table engine params on schema creation:
+
+
+CREATE SCHEMA TEST_SCHEMA WITH "param1", "param2";
+
+
+Params from the schema are used when CREATE TABLE issued on this schema does not have its own engine params specified.
+
Triggers
@@ -1732,7 +1691,7 @@
Triggers
import org.h2.tools.TriggerAdapter;
...
-public class TriggerSample implements TriggerAdapter {
+public class TriggerSample extends TriggerAdapter {
public void fire(Connection conn, ResultSet oldRow, ResultSet newRow)
throws SQLException {
@@ -1782,7 +1741,7 @@ Cache Settings
is kept. Setting the cache size in the database URL or explicitly using SET CACHE_SIZE
overrides this value (even if larger than the physical memory).
To get the current used maximum cache size, use the query
-SELECT * FROM INFORMATION_SCHEMA.SETTINGS WHERE NAME = 'info.CACHE_MAX_SIZE'
+SELECT * FROM INFORMATION_SCHEMA.SETTINGS WHERE SETTING_NAME = 'info.CACHE_MAX_SIZE'
An experimental scan-resistant cache algorithm "Two Queue" (2Q) is available.
To enable it, append ;CACHE_TYPE=TQ
to the database URL.
@@ -1801,6 +1760,79 @@
Cache Settings
call SELECT * FROM INFORMATION_SCHEMA.SETTINGS
. The number of pages read / written
is listed.
+External authentication (Experimental)
+
+External authentication allows to optionally validate user credentials externally (JAAS,LDAP,custom classes).
+Is also possible to temporary assign roles to externally authenticated users. This feature is experimental and subject to change
+
+Master user cannot be externally authenticated
+
+To enable external authentication on a database execute statement SET AUTHENTICATOR TRUE
. This setting in persisted on the database.
+
+
+To connect on a database by using external credentials client must append AUTHREALM=H2
to the database URL. H2
+is the identifier of the authentication realm (see later).
+
+External authentication requires to send password to the server. For this reason is works only on local connection or remote over ssl
+
+By default external authentication is performed through JAAS login interface (configuration name is h2
).
+To configure JAAS add argument -Djava.security.auth.login.config=jaas.conf
+Here an example of
+JAAS login configuration file content:
+
+
+h2 {
+ com.sun.security.auth.module.LdapLoginModule REQUIRED \
+ userProvider="ldap://127.0.0.1:10389" authIdentity="uid={USERNAME},ou=people,dc=example,dc=com" \
+ debug=true useSSL=false ;
+};
+
+
+Is it possible to specify custom authentication settings by using
+JVM argument -Dh2auth.configurationFile={urlOfH2Auth.xml}
. Here an example of h2auth.xml file content:
+
+
+<h2Auth allowUserRegistration="false" createMissingRoles="true">
+
+ <!-- realm: DUMMY authenticate users named DUMMY[0-9] with a static password -->
+ <realm name="DUMMY"
+ validatorClass="org.h2.security.auth.impl.FixedPasswordCredentialsValidator">
+ <property name="userNamePattern" value="DUMMY[0-9]" />
+ <property name="password" value="mock" />
+ </realm>
+
+ <!-- realm LDAPEXAMPLE:perform credentials validation on LDAP -->
+ <realm name="LDAPEXAMPLE"
+ validatorClass="org.h2.security.auth.impl.LdapCredentialsValidator">
+ <property name="bindDnPattern" value="uid=%u,ou=people,dc=example,dc=com" />
+ <property name="host" value="127.0.0.1" />
+ <property name="port" value="10389" />
+ <property name="secure" value="false" />
+ </realm>
+
+ <!-- realm JAAS: perform credentials validation by using JAAS api -->
+ <realm name="JAAS"
+ validatorClass="org.h2.security.auth.impl.JaasCredentialsValidator">
+ <property name="appName" value="H2" />
+ </realm>
+
+ <!--Assign to each user role @{REALM} -->
+ <userToRolesMapper class="org.h2.security.auth.impl.AssignRealmNameRole"/>
+
+ <!--Assign to each user role REMOTEUSER -->
+ <userToRolesMapper class="org.h2.security.auth.impl.StaticRolesMapper">
+ <property name="roles" value="REMOTEUSER"/>
+ </userToRolesMapper>
+</h2Auth>
+
+
+Custom credentials validators must implement the interface
+org.h2.api.CredentialsValidator
+
+
+Custom criteria for role assignments must implement the interface
+org.h2.api.UserToRoleMapper
+
diff --git a/h2/src/docsrc/html/fragments.html b/h2/src/docsrc/html/fragments.html
index f82a5e3bbb..b35432e0f1 100644
--- a/h2/src/docsrc/html/fragments.html
+++ b/h2/src/docsrc/html/fragments.html
@@ -1,6 +1,6 @@
@@ -27,7 +27,7 @@
@@ -71,31 +71,35 @@
Installation
Tutorial
Features
+Security
Performance
Advanced
Reference
-SQL Grammar
+Commands
Functions
+• Aggregate
+• Window
+
Data Types
+SQL Grammar
+System Tables
Javadoc
-PDF (1 MB)
+PDF (2 MB)
Support
FAQ
Error Analyzer
-Google Group (English)
-Google Group (Japanese)
-Google Group (Chinese)
+Google Group
Appendix
-History & Roadmap
+History
License
Build
Links
-JaQu
MVStore
Architecture
+Migration to 2.0
@@ -116,7 +120,7 @@
document.getElementById('translate').style.display='';
var script=document.createElement('script');
script.setAttribute("type","text/javascript");
- script.setAttribute("src", "http://translate.google.com/translate_a/element.js?cb=googleTranslateElementInit");
+ script.setAttribute("src", "https://translate.google.com/translate_a/element.js?cb=googleTranslateElementInit");
document.getElementsByTagName("head")[0].appendChild(script);
}
function googleTranslateElementInit() {
diff --git a/h2/src/docsrc/html/frame.html b/h2/src/docsrc/html/frame.html
index 0395283c25..42c7d4932f 100644
--- a/h2/src/docsrc/html/frame.html
+++ b/h2/src/docsrc/html/frame.html
@@ -1,6 +1,6 @@
diff --git a/h2/src/docsrc/html/functions-aggregate.html b/h2/src/docsrc/html/functions-aggregate.html
new file mode 100644
index 0000000000..dd40bca0d3
--- /dev/null
+++ b/h2/src/docsrc/html/functions-aggregate.html
@@ -0,0 +1,326 @@
+
+
+
+
+
+
+Aggregate Functions
+
+
+
+
+
+
+
+
+Aggregate Functions
+Index
+General Aggregate Functions
+
+
+
+
+
+
+ ${item.topic}
+
+
+
+ ${item.topic}
+
+
+
+ ${item.topic}
+
+
+
+
+
+
+Binary Set Functions
+
+
+
+
+
+
+ ${item.topic}
+
+
+
+ ${item.topic}
+
+
+
+ ${item.topic}
+
+
+
+
+
+
+Ordered Aggregate Functions
+
+
+
+
+
+
+ ${item.topic}
+
+
+
+ ${item.topic}
+
+
+
+ ${item.topic}
+
+
+
+
+
+
+Hypothetical Set Functions
+
+
+
+
+
+
+ ${item.topic}
+
+
+
+ ${item.topic}
+
+
+
+ ${item.topic}
+
+
+
+
+
+
+Inverse Distribution Functions
+
+
+
+
+
+
+ ${item.topic}
+
+
+
+ ${item.topic}
+
+
+
+ ${item.topic}
+
+
+
+
+
+
+JSON Aggregate Functions
+
+
+
+
+
+
+ ${item.topic}
+
+
+
+ ${item.topic}
+
+
+
+ ${item.topic}
+
+
+
+
+
+
+Details
+
+Click on the header of the function to switch between railroad diagram and BNF.
+
+Non-standard syntax is marked in green. Compatibility-only non-standard syntax is marked in red,
+don't use it unless you need it for compatibility with other databases or old versions of H2.
+
+General Aggregate Functions
+
+${item.topic}
+
+
+
+${item.railroad}
+
+
+
+${item.text}
+Example:
+${item.example}
+
+
+Binary Set Functions
+
+${item.topic}
+
+
+
+${item.railroad}
+
+
+
+${item.text}
+Example:
+${item.example}
+
+
+Ordered Aggregate Functions
+
+${item.topic}
+
+
+
+${item.railroad}
+
+
+
+${item.text}
+Example:
+${item.example}
+
+
+Hypothetical Set Functions
+
+${item.topic}
+
+
+
+${item.railroad}
+
+
+
+${item.text}
+Example:
+${item.example}
+
+
+Inverse Distribution Functions
+
+${item.topic}
+
+
+
+${item.railroad}
+
+
+
+${item.text}
+Example:
+${item.example}
+
+
+JSON Aggregate Functions
+
+${item.topic}
+
+
+
+${item.railroad}
+
+
+
+${item.text}
+Example:
+${item.example}
+
+
+
+
+
+
diff --git a/h2/src/docsrc/html/functions-window.html b/h2/src/docsrc/html/functions-window.html
new file mode 100644
index 0000000000..f7ad4e5933
--- /dev/null
+++ b/h2/src/docsrc/html/functions-window.html
@@ -0,0 +1,277 @@
+
+
+
+
+
+
+Window Functions
+
+
+
+
+
+
+
+
+Window Functions
+Index
+Row Number Function
+
+
+
+
+
+
+ ${item.topic}
+
+
+
+ ${item.topic}
+
+
+
+ ${item.topic}
+
+
+
+
+
+
+Rank Functions
+
+
+
+
+
+
+ ${item.topic}
+
+
+
+ ${item.topic}
+
+
+
+ ${item.topic}
+
+
+
+
+
+
+Lead or Lag Functions
+
+
+
+
+
+
+ ${item.topic}
+
+
+
+ ${item.topic}
+
+
+
+ ${item.topic}
+
+
+
+
+
+
+Nth Value Functions
+
+
+
+
+
+
+ ${item.topic}
+
+
+
+ ${item.topic}
+
+
+
+ ${item.topic}
+
+
+
+
+
+
+Other Window Functions
+
+
+
+
+
+
+ ${item.topic}
+
+
+
+ ${item.topic}
+
+
+
+ ${item.topic}
+
+
+
+
+
+
+Details
+
+Click on the header of the function to switch between railroad diagram and BNF.
+
+Non-standard syntax is marked in green. Compatibility-only non-standard syntax is marked in red,
+don't use it unless you need it for compatibility with other databases or old versions of H2.
+
+Row Number Function
+
+${item.topic}
+
+
+
+${item.railroad}
+
+
+
+${item.text}
+Example:
+${item.example}
+
+
+Rank Functions
+
+${item.topic}
+
+
+
+${item.railroad}
+
+
+
+${item.text}
+Example:
+${item.example}
+
+
+Lead or Lag Functions
+
+${item.topic}
+
+
+
+${item.railroad}
+
+
+
+${item.text}
+Example:
+${item.example}
+
+
+Nth Value Functions
+
+${item.topic}
+
+
+
+${item.railroad}
+
+
+
+${item.text}
+Example:
+${item.example}
+
+
+Other Window Functions
+
+${item.topic}
+
+
+
+${item.railroad}
+
+
+
+${item.text}
+Example:
+${item.example}
+
+
+
+
+
+
diff --git a/h2/src/docsrc/html/functions.html b/h2/src/docsrc/html/functions.html
index b31d1eee2d..d62066ff5d 100644
--- a/h2/src/docsrc/html/functions.html
+++ b/h2/src/docsrc/html/functions.html
@@ -1,7 +1,7 @@
@@ -18,11 +18,11 @@
Functions
-Index
-Aggregate Functions
+Index
+Numeric Functions
-Numeric Functions
+String Functions
-String Functions
+Time and Date Functions
-Time and Date Functions
+System Functions
-System Functions
+JSON Functions
+Table Functions
+
+
+
+
+
+ ${item.topic}
+
+
+
+ ${item.topic}
+
+
+
+ ${item.topic}
+
+
+
+
+
+
Details
-Click on the header to switch between railroad diagram and BNF.
+
+Click on the header of the function to switch between railroad diagram and BNF.
+
+Non-standard syntax is marked in green. Compatibility-only non-standard syntax is marked in red,
+don't use it unless you need it for compatibility with other databases or old versions of H2.
+
+Numeric Functions
+
+${item.topic}
+
+
+
+${item.railroad}
+
+
+
+${item.text}
+Example:
+${item.example}
+
+
+String Functions
+
+${item.topic}
+
+
+
+${item.railroad}
+
+
+${item.text}
+Example:
+${item.example}
+
+
+Time and Date Functions
+
+${item.topic}
+
+
+
+${item.railroad}
+
+
+
+${item.text}
+Example:
+${item.example}
+
+
+System Functions
+
+${item.topic}
+
+
+
+${item.railroad}
+
+
+
+${item.text}
+Example:
+${item.example}
+
+
+JSON Functions
+
+${item.topic}
+
+
+
+${item.railroad}
+
+
+
+${item.text}
+Example:
+${item.example}
+
-
+Table Functions
+
${item.topic}
-
-
-
- ${item.topic}
-
-
-
- ${item.topic}
-
-
-
+
${item.topic}
@@ -131,18 +103,15 @@ Other Grammar
-System Tables
-
-Information Schema
-Range Table
-
-
-
Details
-Click on the header to switch between railroad diagram and BNF.
+
+Click on the header of the grammar element to switch between railroad diagram and BNF.
+Non-standard syntax is marked in green. Compatibility-only non-standard syntax is marked in red,
+don't use it unless you need it for compatibility with other databases or old versions of H2.
-
+Literals
+
${item.topic}
-
+Datetime fields
+
${item.topic}
+
+
${item.railroad}
+
${item.text}
Example:
-${item.example}
-
-
-Information Schema
-
-The system tables in the schema INFORMATION_SCHEMA
contain the meta data
-of all tables in the database as well as the current settings.
-
-
-Table Columns
-
-
- ${item.topic}
- ${item.syntax}
-
+
+${item.example}
-
-Range Table
-
-The range table is a dynamic system table that contains all values from a start to an end value.
-The table contains one column called X. Both the start and end values are included in the result.
-The table is used as follows:
-
-Example:
+Other Grammar
+
+${item.topic}
+
+
+
+${item.railroad}
+
+
+
+${item.text}
+Example:
+${item.example}
+
diff --git a/h2/src/docsrc/html/history.html b/h2/src/docsrc/html/history.html
index 4446f91a64..b5068a54c6 100644
--- a/h2/src/docsrc/html/history.html
+++ b/h2/src/docsrc/html/history.html
@@ -1,7 +1,7 @@
@@ -17,11 +17,9 @@
-History and Roadmap
+History
Change Log
-
- Roadmap
History of this Database Engine
@@ -31,25 +29,15 @@ History and Roadmap
Change Log
-The up-to-date change log is available at
-
-http://www.h2database.com/html/changelog.html
-
-
-
-Roadmap
-
-The current roadmap is available at
-
-http://www.h2database.com/html/roadmap.html
-
+The up-to-date change log is available
+here
History of this Database Engine
The development of H2 was started in May 2004,
but it was first published on December 14th 2005.
-The main author of H2, Thomas Mueller, is also the original developer of Hypersonic SQL.
+The original author of H2, Thomas Mueller, is also the original developer of Hypersonic SQL.
In 2001, he joined PointBase Inc. where he wrote PointBase Micro, a commercial Java SQL database.
At that point, he had to discontinue Hypersonic SQL. The HSQLDB Group was formed
to continued to work on the Hypersonic SQL codebase.
@@ -90,20 +78,28 @@
Why Java
Supporters
Many thanks for those who reported bugs, gave valuable feedback,
-spread the word, and translated this project. Also many thanks to the donors.
+spread the word, and translated this project.
+
+
+Also many thanks to the donors.
To become a donor, use PayPal (at the very bottom of the main web page).
+Donators are:
-- xso; xBase Software Ontwikkeling, Netherlands
-
- Cognitect, USA
-
- Code 42 Software, Inc., Minneapolis
-
- Martin Wildam, Austria
-
- Code Lutin, France
+
+- Martin Wildam, Austria
+
- tagtraum industries incorporated, USA
+
- TimeWriter, Netherlands
+
- Cognitect, USA
+
- Code 42 Software, Inc., Minneapolis
+
- Code Lutin, France
- NetSuxxess GmbH, Germany
-
- Poker Copilot, Steve McLeod, Germany
-
- SkyCash, Poland
-
- Lumber-mill, Inc., Japan
-
- StockMarketEye, USA
-
- Eckenfelder GmbH & Co.KG, Germany
+
- Poker Copilot, Steve McLeod, Germany
+
- SkyCash, Poland
+
- Lumber-mill, Inc., Japan
+
- StockMarketEye, USA
+
- Eckenfelder GmbH & Co.KG, Germany
+
- Jun Iyama, Japan
+
- Steven Branda, USA
- Anthony Goubard, Netherlands
- Richard Hickey, USA
- Alessio Jacopo D'Adamo, Italy
@@ -111,14 +107,13 @@
Supporters
- Donald Bleyl, USA
- Frank Berger, Germany
- Florent Ramiere, France
-
- Jun Iyama, Japan
- Antonio Casqueiro, Portugal
- Oliver Computing LLC, USA
- Harpal Grover Consulting Inc., USA
- Elisabetta Berlini, Italy
- William Gilbert, USA
- Antonio Dieguez Rojas, Chile
-
- Ontology Works, USA
+
- Ontology Works, USA
- Pete Haidinyak, USA
- William Osmond, USA
- Joachim Ansorg, Germany
@@ -131,7 +126,6 @@
Supporters
- Gustav Trede, Sweden
- Joonas Pulakka, Finland
- Bjorn Darri Sigurdsson, Iceland
-
- Iyama Jun, Japan
- Gray Watson, USA
- Erik Dick, Germany
- Pengxiang Shao, China
@@ -155,6 +149,21 @@
Supporters
- Ladislav Jech, Czech Republic
- Dimitrijs Fedotovs, Latvia
- Richard Manley-Reeve, United Kingdom
+
- Daniel Cyr, ThirdHalf.com, LLC, USA
+
- Peter Jünger, Germany
+
- Dan Keegan, USA
+
- Rafel Israels, Germany
+
- Fabien Todescato, France
+
- Cristan Meijer, Netherlands
+
- Adam McMahon, USA
+
- Fábio Gomes Lisboa Gomes, Brasil
+
- Lyderic Landry, England
+
- Mederp, Morocco
+
- Joaquim Golay, Switzerland
+
- Clemens Quoss, Germany
+
- Kervin Pierre, USA
+
- Jake Bellotti, Australia
+
- Arun Chittanoor, USA
diff --git a/h2/src/docsrc/html/installation.html b/h2/src/docsrc/html/installation.html
index 0b365ae045..f787f957ed 100644
--- a/h2/src/docsrc/html/installation.html
+++ b/h2/src/docsrc/html/installation.html
@@ -1,7 +1,7 @@
@@ -36,7 +36,7 @@ Requirements
Database Engine
- Windows XP or Vista, Mac OS X, or Linux
-
- Sun Java 6 or newer
+
- Oracle Java 8 or newer
- Recommended Windows file system: NTFS (FAT32 only supports files up to 4 GB)
@@ -47,11 +47,8 @@ H2 Console
Supported Platforms
As this database is written in Java, it can run on many different platforms.
-It is tested with Java 6 and 7.
-Currently, the database is developed and tested on Windows 8
-and Mac OS X using Java 6, but it also works in many other operating systems
-and using other Java runtime environments.
-All major operating systems (Windows XP, Windows Vista, Windows 7, Mac OS, Ubuntu,...) are supported.
+It is tested with Java 8 and 11.
+All major operating systems (Windows, Mac OS X, Linux, ...) are supported.
Installing the Software
diff --git a/h2/src/docsrc/html/jaqu.html b/h2/src/docsrc/html/jaqu.html
deleted file mode 100644
index 0f072afa1f..0000000000
--- a/h2/src/docsrc/html/jaqu.html
+++ /dev/null
@@ -1,340 +0,0 @@
-
-
-
-
-
-
-JaQu
-
-
-
-
-
-
-
-
-JaQu
-
- What is JaQu
-
- Differences to Other Data Access Tools
-
- Current State
-
- Building the JaQu Library
-
- Requirements
-
- Example Code
-
- Configuration
-
- Natural Syntax
-
- Other Ideas
-
- Similar Projects
-
-What is JaQu
-
-Note: This project is currently in maintenance mode.
-A friendly fork of JaQu is
-available under the name iciql.
-
-
-JaQu stands for Java Query and allows to access databases using pure Java.
-JaQu provides a fluent interface (or internal DSL).
-JaQu is something like LINQ for Java (LINQ stands for "language integrated query" and is a
-Microsoft .NET technology). The following JaQu code:
-
-
-Product p = new Product();
-List<Product> soldOutProducts =
- db.from(p).where(p.unitsInStock).is(0).select();
-
-
-stands for the SQL statement:
-
-
-SELECT * FROM PRODUCTS P
-WHERE P.UNITS_IN_STOCK = 0
-
-
-Differences to Other Data Access Tools
-
-Unlike SQL, JaQu can be easily integrated in Java applications. Because JaQu is pure Java,
-auto-complete in the IDE is supported. Type checking is performed by the compiler.
-JaQu fully protects against SQL injection.
-
-
-JaQu is meant as replacement for JDBC and SQL and not as much as a replacement for tools like Hibernate.
-With JaQu, you don't write SQL statements as strings.
-JaQu is much smaller and simpler than other persistence frameworks such as Hibernate,
-but it also does not provide all the features of those.
-Unlike iBatis and Hibernate, no XML or annotation based configuration is required;
-instead the configuration (if required at all) is done in pure Java, within the application.
-
-
-JaQu does not require or contain any data caching mechanism. Like JDBC and iBatis,
-JaQu provides full control over when and what SQL statements are executed
-(but without having to write SQL statements as strings).
-
-
-Restrictions
-
-Primitive types (eg. boolean, int, long, double
) are not supported.
-Use java.lang.Boolean, Integer, Long, Double
instead.
-
-
-Why in Java?
-
-Most applications are written in Java. Mixing Java and another language (for example Scala or Groovy)
-in the same application is complicated: you would need to split the application and database code,
-and write adapter / wrapper code.
-
-
-Current State
-
-Currently, JaQu is only tested with the H2 database. The API may change in future versions.
-JaQu is not part of the h2 jar file, however the source code is included in H2, under:
-
-src/test/org/h2/test/jaqu/*
(samples and tests)
-src/tools/org/h2/jaqu/*
(framework)
-
-
-Building the JaQu Library
-
-To create the JaQu jar file, run: build jarJaqu
. This will create the file bin/h2jaqu.jar
.
-
-
-Requirements
-
-JaQu requires Java 6. Annotations are not need.
-Currently, JaQu is only tested with the H2 database engine, however in theory it should
-work with any database that supports the JDBC API.
-
-
-Example Code
-
-package org.h2.test.jaqu;
-import java.math.BigDecimal;
-import java.util.List;
-import org.h2.jaqu.Db;
-import static org.h2.jaqu.Function.*;
-
-public class Test {
- Db db;
-
- public static void main(String[] args) throws Exception {
- new SamplesTest().test();
- }
-
- public void test() throws Exception {
- db = Db.open("jdbc:h2:mem:", "sa", "sa");
- db.insertAll(Product.getProductList());
- db.insertAll(Customer.getCustomerList());
- db.insertAll(Order.getOrderList());
- testLength();
- testCount();
- testGroup();
- testSelectManyCompoundFrom2();
- testWhereSimple4();
- testSelectSimple2();
- testAnonymousTypes3();
- testWhereSimple2();
- testWhereSimple3();
- db.close();
- }
-
- private void testWhereSimple2() throws Exception {
- Product p = new Product();
- List<Product> soldOutProducts =
- db.from(p).
- where(p.unitsInStock).is(0).
- orderBy(p.productId).select();
- }
-
- private void testWhereSimple3() throws Exception {
- Product p = new Product();
- List<Product> expensiveInStockProducts =
- db.from(p).
- where(p.unitsInStock).bigger(0).
- and(p.unitPrice).bigger(3.0).
- orderBy(p.productId).select();
- }
-
- private void testWhereSimple4() throws Exception {
- Customer c = new Customer();
- List<Customer> waCustomers =
- db.from(c).
- where(c.region).is("WA").
- select();
- }
-
- private void testSelectSimple2() throws Exception {
- Product p = new Product();
- List<String> productNames =
- db.from(p).
- orderBy(p.productId).select(p.productName);
- }
-
- public static class ProductPrice {
- public String productName;
- public String category;
- public Double price;
- }
-
- private void testAnonymousTypes3() throws Exception {
- final Product p = new Product();
- List<ProductPrice> productInfos =
- db.from(p).orderBy(p.productId).
- select(new ProductPrice() {{
- productName = p.productName;
- category = p.category;
- price = p.unitPrice;
- }});
- }
-
- public static class CustOrder {
- public String customerId;
- public Integer orderId;
- public BigDecimal total;
- }
-
- private void testSelectManyCompoundFrom2() throws Exception {
- final Customer c = new Customer();
- final Order o = new Order();
- List<CustOrder> orders =
- db.from(c).
- innerJoin(o).on(c.customerId).is(o.customerId).
- where(o.total).smaller(new BigDecimal("500.00")).
- orderBy(1).
- select(new CustOrder() {{
- customerId = c.customerId;
- orderId = o.orderId;
- total = o.total;
- }});
- }
-
- private void testLength() throws Exception {
- Product p = new Product();
- List<Integer> lengths =
- db.from(p).
- where(length(p.productName)).smaller(10).
- orderBy(1).
- selectDistinct(length(p.productName));
- }
-
- private void testCount() throws Exception {
- long count = db.from(new Product()).selectCount();
- }
-
- public static class ProductGroup {
- public String category;
- public Long productCount;
- }
-
- private void testGroup() throws Exception {
- final Product p = new Product();
- List<ProductGroup> list =
- db.from(p).
- groupBy(p.category).
- orderBy(1).
- select(new ProductGroup() {{
- category = p.category;
- productCount = count();
- }});
- }
-
-}
-
-
-Configuration
-
-JaQu does not require any configuration when using the default field to column mapping.
-To define table indices, or if you want to map a class to a table with a different name,
-or a field to a column with another name, create a function called define
in the data class.
-Example:
-
-
-import static org.h2.jaqu.Define.*;
-
-public class Product implements Table {
-
- public Integer productId;
- public String productName;
- public String category;
- public Double unitPrice;
- public Integer unitsInStock;
-
- public void define() {
- tableName("Product");
- primaryKey(productId);
- index(productName, category);
- }
-
-}
-
-
-The method define()
contains the mapping definition. It is called once
-when the class is used for the first time. Like annotations, the mapping is defined in the class itself.
-Unlike when using annotations, the compiler can check the syntax even for multi-column
-objects (multi-column indexes, multi-column primary keys and so on).
-Because the definition is written in Java, the configuration can be set at runtime,
-which is not possible using annotations.
-Unlike XML mapping configuration, the configuration is integrated in the class itself.
-
-
-Natural Syntax
-The plan is to support more natural (pure Java) syntax in conditions.
-To do that, the condition class is de-compiled to a SQL condition.
-A proof of concept decompiler is included (but it doesn't fully work yet; patches are welcome).
-The planned syntax is:
-
-
-long count = db.from(co).
- where(new Filter() { public boolean where() {
- return co.id == x
- && co.name.equals(name)
- && co.value == new BigDecimal("1")
- && co.amount == 1L
- && co.birthday.before(new java.util.Date())
- && co.created.before(java.sql.Timestamp.valueOf("2005-05-05 05:05:05"))
- && co.time.before(java.sql.Time.valueOf("23:23:23"));
- } }).selectCount();
-
-
-Other Ideas
-
-This project has just been started, and nothing is fixed yet.
-Some ideas are:
-
-- Support queries on collections (instead of using a database).
-
- Provide API level compatibility with JPA (so that JaQu can be used as an extension of JPA).
-
- Internally use a JPA implementation (for example Hibernate) instead of SQL directly.
-
- Use PreparedStatements and cache them.
-
-
-Similar Projects
-
-iciql (a friendly fork of JaQu)
-Cement Framework
-Dreamsource ORM
-Empire-db
-JEQUEL: Java Embedded QUEry Language
-Joist
-jOOQ
-JoSQL
-LIQUidFORM
-Quaere (Alias implementation)
-Quaere
-Querydsl
-Squill
-
-
-
-
diff --git a/h2/src/docsrc/html/license.html b/h2/src/docsrc/html/license.html
index 699609dd50..1f228df8f1 100644
--- a/h2/src/docsrc/html/license.html
+++ b/h2/src/docsrc/html/license.html
@@ -1,7 +1,7 @@
@@ -30,8 +30,8 @@ License
Summary and License FAQ
-H2 is dual licensed and available under the MPL 2.0 (Mozilla Public License Version 2.0)
-or under the EPL 1.0 (Eclipse Public License).
+H2 is dual licensed and available under the MPL 2.0 (Mozilla Public License Version 2.0)
+or under the EPL 1.0 (Eclipse Public License).
There is a license FAQ for both the MPL and the EPL.
@@ -47,7 +47,7 @@ Summary and License FAQ
However, nobody is allowed to rename H2, modify it a little, and sell it as a database engine without telling the customers it is in fact H2.
This happened to HSQLDB: a company called 'bungisoft' copied HSQLDB, renamed it to 'RedBase', and tried to sell it,
hiding the fact that it was in fact just HSQLDB. It seems 'bungisoft' does not exist any more, but you can use the
-Wayback Machine and visit old web pages of http://www.bungisoft.com
.
+Wayback Machine and visit old web pages of http://www.bungisoft.com
.
About porting the source code to another language (for example C# or C++): converted source code (even if done manually) stays under the same
copyright and license as the original code. The copyright of the ported source code does not (automatically) go to the person who ported the code.
@@ -62,11 +62,11 @@
Summary and License FAQ
This software contains unmodified binary redistributions for
-H2 database engine (http://www.h2database.com/),
+H2 database engine (https://h2database.com/),
which is dual licensed and available under the MPL 2.0
(Mozilla Public License) or under the EPL 1.0 (Eclipse Public License).
An original copy of the license agreement can be found at:
-http://www.h2database.com/html/license.html
+https://h2database.com/html/license.html
Mozilla Public License Version 2.0
@@ -158,7 +158,7 @@ Exhibit A - Source Code Form License Notice
This Source Code Form is subject to the terms of the Mozilla
Public License, v. 2.0. If a copy of the MPL was not distributed
-with this file, you can obtain one at http://mozilla.org/MPL/2.0
+with this file, you can obtain one at https://mozilla.org/MPL/2.0
If it is not possible or desirable to put the notice in a particular file, then You may include the notice in a location (such as a LICENSE file in a relevant directory) where a recipient would be likely to look for such a notice.
You may add additional accurate notices of copyright ownership.
@@ -395,9 +395,9 @@ 7. GENERAL
Export Control Classification Number (ECCN)
-As far as we know, the U.S. Export Control Classification Number (ECCN) for this software is 5D002
.
+As far as we know, the U.S. Export Control Classification Number (ECCN) for this software is 5D002
.
However, for legal reasons, we can make no warranty that this information is correct.
-For details, see also the Apache Software Foundation Export Classifications page.
+For details, see also the Apache Software Foundation Export Classifications page.
diff --git a/h2/src/docsrc/html/links.html b/h2/src/docsrc/html/links.html
index 91dc087b9e..98cf0cad6a 100644
--- a/h2/src/docsrc/html/links.html
+++ b/h2/src/docsrc/html/links.html
@@ -1,7 +1,7 @@
@@ -21,8 +21,6 @@ Links
If you want to add a link, please send it to the support email address or post it to the group.
-
- Commercial Support
Quotes
@@ -36,23 +34,9 @@ Links
Products and Projects
-Commercial Support
-
-Commercial support for H2 is available
-from Steve McLeod (steve dot mcleod at gmail dot com).
-Please note he is not one of the main developers of H2. He describes himself as follows:
-
-- I'm a long time user of H2, routinely working with H2 databases several gigabytes in size.
-
- I'm the creator of popular commercial desktop software that uses H2.
-
- I'm a certified Java developer (SCJP).
-
- I have a decade and more of IT consulting experience with large and small clients in Australia, the UK, and Germany.
-
- I'm based in Germany, and willing to travel within Europe.
- I can work remotely with teams in the USA and other locations."
-
-
Quotes
-
+
Quote:
"This is by far the easiest and fastest database that I have ever used.
Originally the web application that I am working on is using SQL server.
@@ -61,34 +45,34 @@
Quotes
Books
-
+
Seam In Action
Extensions
-
+
Grails H2 Database Plugin
-
+
h2osgi: OSGi for the H2 Database
-
+
H2Sharp: ADO.NET interface for the H2 database engine
A spatial extension of the H2 database.
Blog Articles, Videos
-
+
Youtube: Minecraft 1.7.3 / How to install Bukkit Server with xAuth and H2
Analyzing CSVs with H2 in under 10 minutes (2009-12-07)
-
+
Efficient sorting and iteration on large databases (2009-06-15)
Porting Flexive to the H2 Database (2008-12-05)
H2 Database with GlassFish (2008-11-24)
-
+
H2 Database - Performance Tracing (2008-04-30)
Open Source Databases Comparison (2007-09-11)
@@ -102,13 +86,13 @@ Blog Articles, Videos
The Codist: Write Your Own Database, Again (2006-11-13)
Project Pages
-
+
Ohloh
-
+
Freshmeat Project Page
-
+
Wikipedia
-
+
Java Source Net
Linux Package Manager
@@ -125,7 +109,7 @@ Database Frontends / Tools
SQL query tool.
-
+
DbVisualizer
Database tool.
@@ -135,7 +119,7 @@ Database Frontends / Tools
Database utility written in Java.
-
+
Flyway
The agile database migration framework for Java.
@@ -156,17 +140,17 @@ Database Frontends / Tools
HenPlus is a SQL shell written in Java.
-
+
JDBC lint
Helps write correct and efficient code when using the JDBC API.
-
+
OpenOffice
Base is OpenOffice.org's database application. It provides access to relational data sources.
-
+
RazorSQL
An SQL query tool, database browser, SQL editor, and database administration tool.
@@ -176,7 +160,7 @@ Database Frontends / Tools
Universal Database Frontend.
-
+
SQL Workbench/J
Free DBMS-independent SQL tool.
@@ -186,7 +170,7 @@ Database Frontends / Tools
Graphical tool to view the structure of a database, browse the data, issue SQL commands etc.
-
+
SQuirreL DB Copy Plugin
Tool to copy data from one database to another.
@@ -198,7 +182,7 @@ Products and Projects
Visual business process modeling and simulation software for business users.
-
+
Adeptia BPM
A Business Process Management (BPM) suite to quickly and easily automate business processes and workflows.
@@ -208,7 +192,7 @@ Products and Projects
Process-centric, services-based application integration suite.
-
+
Aejaks
A server-side scripting environment to build AJAX enabled web applications.
@@ -218,17 +202,17 @@ Products and Projects
A web framework that let's you write dynamic web applications with Zen-like simplicity.
-
+
Apache Cayenne
Open source persistence framework providing object-relational mapping (ORM) and remoting services.
-
+
Apache Jackrabbit
Open source implementation of the Java Content Repository API (JCR).
-
+
Apache OpenJPA
Open source implementation of the Java Persistence API (JPA).
@@ -238,7 +222,7 @@ Products and Projects
Helps building web applications.
-
+
BGBlitz
The Swiss army knife of Backgammon.
@@ -254,7 +238,7 @@ Products and Projects
JSR 168 compliant bookmarks management portlet application.
-
+
Claros inTouch
Ajax communication suite with mail, addresses, notes, IM, and rss reader.
@@ -285,7 +269,7 @@ Products and Projects
Ajax/J2EE framework for RAD development (mainly oriented toward hispanic markets).
-
+
District Health Information Software 2 (DHIS)
The DHIS 2 is a tool for collection, validation, analysis, and presentation of aggregate statistical data,
tailored (but not limited) to integrated health information management activities.
@@ -296,7 +280,7 @@
Products and Projects
Open source Java Object Relational Mapping tool.
-
+
Eclipse CDO
The CDO (Connected Data Objects) Model Repository is a distributed shared model framework for EMF models,
and a fast server-based O/R mapping solution.
@@ -307,7 +291,7 @@
Products and Projects
Fabric3 is a project implementing a federated service network based on the Service Component Architecture specification (http://www.osoa.org).
-
+
FIT4Data
A testing framework for data management applications built on the Java implementation of FIT.
@@ -322,7 +306,7 @@ Products and Projects
GeoServer is a Java-based software server that allows users to view and edit geospatial data. Using open standards set forth by the Open Geospatial Consortium (OGC), GeoServer allows for great flexibility in map creation and data sharing.
-
+
GBIF Integrated Publishing Toolkit (IPT)
The GBIF IPT is an open source, Java based web application that connects and serves
three types of biodiversity data: taxon primary occurrence data,
@@ -339,7 +323,7 @@
Products and Projects
Fun-to-play games with a simple interface.
-
+
GridGain
GridGain is easy to use Cloud Application Platform that enables development of
highly scalable distributed Java and Scala applications
@@ -356,12 +340,12 @@
Products and Projects
High-Availability JDBC: A JDBC proxy that provides light-weight, transparent, fault tolerant clustering capability to any underlying JDBC driver.
-
+
Hibernate
Relational persistence for idiomatic Java (O-R mapping tool).
-
+
Hibicius
Online Banking Client for the HBCI protocol.
@@ -383,12 +367,12 @@ Products and Projects
Java Spatial. Jaspa potentially brings around 200 spatial functions.
-
+
Java Simon
Simple Monitoring API.
-
+
JBoss jBPM
A platform for executable process languages ranging from business process management (BPM) over workflow to service orchestration.
@@ -409,7 +393,7 @@ Products and Projects
Free, multi platform, open source GIS based on the GIS framework of uDig.
-
+
Jena
Java framework for building Semantic Web applications.
@@ -419,8 +403,8 @@ Products and Projects
Framework for constructing workgroup business applications based on the Naked Objects Architectural Pattern.
-
-jOOQ (Java Object Oriented Querying)
+
+jOOQ (JOOQ Object Oriented Querying)
jOOQ is a fluent API for typesafe SQL query construction and execution
@@ -429,7 +413,7 @@ Products and Projects
A Scala-based, secure, developer friendly web framework.
-
+
LiquiBase
A tool to manage database changes and refactorings.
@@ -439,7 +423,7 @@ Products and Projects
Build automation and management tool.
-
+
localdb
A tool that locates the full file path of the folder containing the database files.
@@ -465,7 +449,7 @@ Products and Projects
Java web app that provides dynamic web content and Java libraries access from JavaScript.
-
+
MyTunesRss
MyTunesRSS lets you listen to your music wherever you are.
@@ -501,7 +485,7 @@ Products and Projects
understand the application structure.
-
+
Ontology Works
This company provides semantic technologies including deductive
information repositories (the Ontology Works Knowledge Servers),
@@ -526,7 +510,7 @@
Products and Projects
OpenGroove is a groupware program that allows users to synchronize data.
-
+
OpenSocial Development Environment (OSDE)
Development tool for OpenSocial application.
@@ -538,10 +522,10 @@ Products and Projects
P5H2
-A library for the Processing programming language and environment.
+A library for the Processing programming language and environment.
-
+
Phase-6
A computer based learning software.
@@ -561,7 +545,7 @@ Products and Projects
Open source database benchmark.
-
+
Poormans
Very basic CMS running as a SWT application and generating static html pages.
@@ -572,7 +556,7 @@ Products and Projects
programmed in CFML into Java bytecode and executes it on a servlet engine.
-
+
Razuna
Open source Digital Asset Management System with integrated Web Content Management.
@@ -592,7 +576,7 @@ Products and Projects
ETL (Extract-Transform-Load) and script execution tool.
-
+
Sesar
Dependency Injection Container with Aspect Oriented Programming.
@@ -607,7 +591,7 @@ Products and Projects
A free, light-weight, java data access framework.
-
+
ShapeLogic
Toolkit for declarative programming, image processing and computer vision.
@@ -632,7 +616,7 @@ Products and Projects
A web-enabled, database independent, data synchronization/replication software.
-
+
SmartFoxServer
Platform for developing multiuser applications and games with Macromedia Flash.
@@ -647,7 +631,7 @@ Products and Projects
Simple object relational mapping.
-
+
Springfuse
Code generation For Spring, Spring MVC & Hibernate.
@@ -674,10 +658,10 @@ Products and Projects
Event (stream) processing kernel.
-
+
SUSE Manager, part of Linux Enterprise Server 11
The SUSE Manager
-
+
eases the burden of compliance with regulatory requirements and corporate policies.
@@ -686,7 +670,15 @@ Products and Projects
Easy-to-use backup solution for your iTunes library.
-
+
+TimeWriter
+TimeWriter is a very flexible program for time administration / time tracking.
+The older versions used dBase tables.
+The new version 5 is completely rewritten, now using the H2 database.
+TimeWriter is delivered in Dutch and English.
+
+
+
weblica
Desktop CMS.
@@ -696,7 +688,7 @@ Products and Projects
Collaborative and realtime interactive media platform for the web.
-
+
Werkzeugkasten
Minimum Java Toolset.
@@ -707,7 +699,7 @@ Products and Projects
for building applications composed from server components - view providers.
-
+
Volunteer database
A database front end to register volunteers, partnership and donation for a Non Profit organization.
diff --git a/h2/src/docsrc/html/main.html b/h2/src/docsrc/html/main.html
index 4aa9d265f8..ea060a9132 100644
--- a/h2/src/docsrc/html/main.html
+++ b/h2/src/docsrc/html/main.html
@@ -1,7 +1,7 @@
diff --git a/h2/src/docsrc/html/mainWeb.html b/h2/src/docsrc/html/mainWeb.html
index 3c2c95e168..07f12b2267 100644
--- a/h2/src/docsrc/html/mainWeb.html
+++ b/h2/src/docsrc/html/mainWeb.html
@@ -1,7 +1,7 @@
@@ -13,8 +13,8 @@
H2 Database Engine
-
-
+
+
@@ -29,7 +29,7 @@ H2 Database Engine
Very fast, open source, JDBC API
Embedded and server modes; in-memory databases
Browser based Console application
- Small footprint: around 1.5 MB jar file size
+ Small footprint: around 2.5 MB jar file size
@@ -37,17 +37,17 @@ H2 Database Engine
Download
- Version ${version} (${versionDate}), Beta
+ Version ${version} (${versionDate})
-
+
- Windows Installer (5 MB)
+ Windows Installer (6.7 MB)
-
+
- All Platforms (zip, 8 MB)
+ All Platforms (zip, 9.5 MB)
All Downloads
@@ -60,9 +60,8 @@ Download
Support
- Stack Overflow (tag H2)
- Google Group English,
- Japanese
+ Stack Overflow (tag H2)
+ Google Group
For non-technical issues, use:
+
+
+
+
+Contents
+
+ Introduction
+
+ Upgrading
+
+ File Format
+
+ Data types
+
+ Identity columns and sequences
+
+ INFORMATION_SCHEMA
+
+ General
+
+Introduction
+
+
+Between version 1.4.200 and version 2.0.202 there have been considerable changes, such that a simple update is
+not possible.
+
+
+
+It would have been nice to write some kind of migration tool, or auto-detect the file and upgrade. Unfortunately, this
+is purely a volunteer-run project, so this is just the way it has to be. There exists a migration tool H2MigrationTool available
+in GitHub, but it hasn't been tested by our team. Use at
+your own risk.
+
+
+Upgrading
+
+
+The official way to upgrade is to export it into SQL script with the
+SCRIPT command
+USING YOUR CURRENT VERSION OF H2.
+
+
+
+Then create a fresh database USING THE NEW VERSION OF H2, then perform a
+RUNSCRIPT to load your data.
+You may need to specify FROM_1X flag, see documentation of this command for details.
+
+
+MVStore file format
+
+
+The MVStore file format we use (i.e. the default) is still mostly the same, but some subtle changes have been made
+to the undo logs,
+for the purposes of improving crash safety and also read/write performance.
+
+
+Data types
+
+
+The maximum length of CHARACTER
+and CHARACTER VARYING data types
+is n 1,048,576 characters. For larger values use
+CHARACTER LARGE OBJECT.
+
+
+
+BINARY
+and BINARY VARYING
+are now different data types. BINARY means fixed-length data type and its default length is 1.
+The maximum length of binary strings is 1,048,576 bytes. For larger values use
+BINARY LARGE OBJECT
+
+
+
+NUMERIC / DECIMAL / DEC without parameters
+now have scale 0. For a variable-scale data type see
+DECFLOAT.
+Negative scale isn't allowed for these data types any more.
+The maximum precision is now 100,000.
+
+
+
+ENUM values now have 1-based ordinal numbers.
+
+
+
+Arrays are now typed.
+Arrays with mixed types of elements aren't supported.
+In some cases they can be replaced with a new ROW
+data type.
+
+
+
+All non-standard data types, with exception for TINYINT, JAVA_OBJECT, ENUM, GEOMETRY, JSON, and UUID are deprecated.
+
+
+Identity columns and sequences
+
+
+Various legacy vendor-specific declarations and expressions are deprecated
+and may not work at all depending on compatibility mode.
+
+
+
+Identity columns should be normally declared with GENERATED BY DEFAULT AS IDENTITY or GENERATED ALWAYS AS IDENTITY
+clauses, options may also be specified.
+GENERATED ALWAYS AS IDENTITY columns cannot be assigned to a user-provided value
+unless OVERRIDING SYSTEM VALUE is specified.
+
+
+
+NULL cannot be specified as a value for IDENTITY column to force identity generation
+(with exception for some compatibility modes).
+Use DEFAULT or simply exclude this column from insert column list.
+
+
+
+IDENTITY() and SCOPE_IDENTITY() aren't available in Regular mode. If you need to get a generated value,
+you need to use data change delta tables
+or Statement.getGeneratedKeys().
+
+
+
+Undocumented Oracle-style .NEXTVAL and .CURRVAL expressions are restricted to Oracle compatibility mode.
+Other functions are deprecated for Regular mode.
+Use sequence value expression instead.
+
+
+INFORMATION_SCHEMA
+
+
+INFORMATION_SCHEMA in H2 is now compliant with the SQL Standard and other database systems,
+but it isn't compliant with previous versions of H2.
+You may need to update your queries.
+
+
+General
+
+
+There are a lot more SQL keywords now. Many SQL statements feature far better support of SQL-Standard behaviour.
+There is a NON_KEYWORDS setting that
+can be used as a temporary workaround if your application uses them as unquoted identifiers.
+
+
+
+Numeric and boolean values aren't comparable. It means you need to use TRUE, FALSE, or UNKNOWN (NULL)
+as boolean literals. 1 and 0 don't work any more (with exception for some compatibility modes).
+
+
+
+Some other non-standard SQL syntax has been restricted to related compatibility modes.
+Since H2 2.0.204 there is a LEGACY compatibility mode that provides some limited compatibility with previous versions.
+
+
+
+Various deprecated grammar elements are marked in red in documentation. Please, avoid their usage.
+
+
+
+Migrating an old database to the new version works most of the times. However, there are a couple of important changes in the new version to keep in mind:
+
+
+
+- Oracle-style units were never supported officially without being in Oracle compatibility mode, although some worked before. For example, the length of the VARCHAR datatype cannot be more specified using CHAR but CHARACTERS or OCTETS. CHAR and BYTE need to be used in Oracle compatibility mode.
+
- IDENTITY syntax changed when type is specified: if the type for IDENTITY is specified, then the clause needs to be expanded as INTEGER GENERATED ALWAYS AS IDENTITY. Using just INTEGER IDENTITY is no more working.
+
- LOG connection setting removed: PageStore was removed from H2 so the "LOG=0" setting at the end of the URL (like
+"jdbc:h2:file:/tmp/test;LOG=0") is no longer available.
+
+
+
diff --git a/h2/src/docsrc/html/mvstore.html b/h2/src/docsrc/html/mvstore.html
index 2d85f48011..a5fd229d05 100644
--- a/h2/src/docsrc/html/mvstore.html
+++ b/h2/src/docsrc/html/mvstore.html
@@ -1,7 +1,7 @@
@@ -59,7 +59,7 @@ MVStore
Overview
The MVStore is a persistent, log structured key-value store.
-It is planned to be the next storage subsystem of H2,
+It is used as default storage subsystem of H2,
but it can also be used directly within an application, without using JDBC or SQL.
- MVStore stands for "multi-version store".
@@ -172,7 +172,7 @@
Maps
including access to the first and last key, iterate over some or all keys, and so on.
Also supported, and very uncommon for maps, is fast index lookup:
-the entries of the map can be be efficiently accessed like a random-access list
+the entries of the map can be efficiently accessed like a random-access list
(get the entry at the given index), and the index of a key can be calculated efficiently.
That also means getting the median of two keys is very fast,
and a range of keys can be counted very quickly.
@@ -234,7 +234,7 @@
Transactions
To support multiple concurrent open transactions, a transaction utility is included,
the TransactionStore
.
-The tool supports PostgreSQL style "read committed" transaction isolation
+The tool supports "read committed" transaction isolation
with savepoints, two-phase commit, and other features typically available in a database.
There is no limit on the size of a transaction
(the log is written to disk for large or long running transactions).
@@ -295,8 +295,7 @@
R-Tree and Pluggable Map Implementations
The map implementation is pluggable.
In addition to the default MVMap
(multi-version map),
-there is a map that supports concurrent write operations,
-and a multi-version R-tree map implementation for spatial operations.
+there is a multi-version R-tree map implementation for spatial operations.
Concurrent Operations and Caching
@@ -467,8 +466,6 @@ Storage Engine for H2
For older versions, append
;MV_STORE=TRUE
to the database URL.
-Even though it can be used with the default table level locking,
-by default the MVCC mode is enabled when using the MVStore.
File Format
@@ -478,7 +475,7 @@ File Format
The file headers are one block each; a block is 4096 bytes.
Each chunk is at least one block, but typically 200 blocks or more.
Data is stored in the chunks in the form of a
-log structured storage.
+log structured storage.
There is one chunk for every version.
@@ -496,7 +493,7 @@ File Format
}
s.commit();
for (int i = 0; i < 100; i++) {
- map.put(0, "Hi");
+ map.put(i, "Hi");
}
s.commit();
s.close();
@@ -512,7 +509,7 @@ File Format
Chunk 2:
-- Page 4: (root) node with 2 entries pointing to page 3 and 5
+- Page 4: (root) node with 2 entries pointing to page 5 and 3
- Page 5: leaf with 140 entries (keys 0 - 139)
@@ -536,18 +533,18 @@
File Header
The data is stored in the form of a key-value pair.
Each value is stored as a hexadecimal number. The entries are:
-
- H: The entry "H:2" stands for the the H2 database.
+
- H: The entry "H:2" stands for the H2 database.
- block: The block number where one of the newest chunks starts
(but not necessarily the newest).
- blockSize: The block size of the file; currently always hex 1000, which is decimal 4096,
- to match the disk sector
+ to match the disk sector
length of modern hard disks.
- chunk: The chunk id, which is normally the same value as the version;
however, the chunk id might roll over to 0, while the version doesn't.
- created: The number of milliseconds since 1970 when the file was created.
- format: The file format number. Currently 1.
- version: The version number of the chunk.
-
- fletcher: The
+
- fletcher: The
Fletcher-32 checksum of the header.
@@ -604,11 +601,11 @@
Chunk Format
If an entry in a map is changed, removed, or added, then the respective page is copied,
modified, and stored in the next chunk, and the number of live pages in the old chunk is decremented.
This mechanism is called copy-on-write, and is similar to how the
-Btrfs file system works.
+Btrfs file system works.
Chunks without live pages are marked as free, so the space can be re-used by more recent chunks.
Because not all chunks are of the same size, there can be a number of free blocks in front of a chunk
for some time (until a small chunk is written or the chunks are compacted).
-There is a
+There is a
delay of 45 seconds (by default) before a free chunk is overwritten,
to ensure new versions are persisted first.
@@ -630,14 +627,14 @@ Chunk Format
Page Format
-Each map is a B-tree,
+Each map is a B-tree,
and the map data is stored in (B-tree-) pages.
There are leaf pages that contain the key-value pairs of the map,
and internal nodes, which only contain keys and pointers to leaf pages.
The root of a tree is either a leaf or an internal node.
Unlike file header and chunk header and footer, the page data is not human readable.
Instead, it is stored as byte arrays, with long (8 bytes), int (4 bytes), short (2 bytes),
-and variable size int and long
+and variable size int and long
(1 to 5 / 10 bytes). The page format is:
- length (int): Length of the page in bytes.
@@ -681,7 +678,7 @@
Page Format
The total number of entries in child pages are kept to allow efficient range counting,
lookup by index, and skip operations.
-The pages form a counted B-tree.
+The pages form a counted B-tree.
Data compression: The data after the page type are optionally compressed using the LZF algorithm.
diff --git a/h2/src/docsrc/html/navigation.js b/h2/src/docsrc/html/navigation.js
index aeb90d3e14..1262d1bf5f 100644
--- a/h2/src/docsrc/html/navigation.js
+++ b/h2/src/docsrc/html/navigation.js
@@ -1,7 +1,7 @@
/*
- * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0,
- * and the EPL 1.0 (http://h2database.com/html/license.html).
- * * Initial Developer: H2 Group
+ * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0,
+ * and the EPL 1.0 (https://h2database.com/html/license.html).
+ * Initial Developer: H2 Group
*/
function scroll() {
diff --git a/h2/src/docsrc/html/performance.html b/h2/src/docsrc/html/performance.html
index 81a37159be..54d1b4ba15 100644
--- a/h2/src/docsrc/html/performance.html
+++ b/h2/src/docsrc/html/performance.html
@@ -1,7 +1,7 @@
@@ -52,54 +52,54 @@
Performance Comparison
Embedded
Test Case Unit H2 HSQLDB Derby
-Simple: Init ms 1019 1907 8280
-Simple: Query (random) ms 1304 873 1912
-Simple: Query (sequential) ms 835 1839 5415
-Simple: Update (sequential) ms 961 2333 21759
-Simple: Delete (sequential) ms 950 1922 32016
-Simple: Memory Usage MB 21 10 8
-BenchA: Init ms 919 2133 7528
-BenchA: Transactions ms 1219 2297 8541
-BenchA: Memory Usage MB 12 15 7
-BenchB: Init ms 905 1993 8049
-BenchB: Transactions ms 1091 583 1165
-BenchB: Memory Usage MB 17 11 8
-BenchC: Init ms 2491 4003 8064
-BenchC: Transactions ms 1979 803 2840
-BenchC: Memory Usage MB 19 22 9
-Executed statements # 1930995 1930995 1930995
-Total time ms 13673 20686 105569
-Statements per second # 141226 93347 18291
+Simple: Init ms 1021 2510 6762
+Simple: Query (random) ms 513 653 2035
+Simple: Query (sequential) ms 1344 2210 7665
+Simple: Update (sequential) ms 1642 3040 7034
+Simple: Delete (sequential) ms 1697 2310 9981
+Simple: Memory Usage MB 18 15 13
+BenchA: Init ms 801 2877 6576
+BenchA: Transactions ms 1369 2629 4987
+BenchA: Memory Usage MB 12 15 9
+BenchB: Init ms 966 2544 7161
+BenchB: Transactions ms 341 2316 815
+BenchB: Memory Usage MB 14 10 10
+BenchC: Init ms 2630 3144 7420
+BenchC: Transactions ms 1732 1742 2735
+BenchC: Memory Usage MB 19 34 11
+Executed statements # 2222032 2222032 2222032
+Total time ms 14056 25975 63171
+Statements per second #/s 158084 85545 35174
Client-Server
-Test Case Unit H2 (Server) HSQLDB Derby PostgreSQL MySQL
-Simple: Init ms 16338 17198 27860 30156 29409
-Simple: Query (random) ms 3399 2582 6190 3315 3342
-Simple: Query (sequential) ms 21841 18699 42347 30774 32611
-Simple: Update (sequential) ms 6913 7745 28576 32698 11350
-Simple: Delete (sequential) ms 8051 9751 42202 44480 16555
-Simple: Memory Usage MB 22 11 9 0 1
-BenchA: Init ms 12996 14720 24722 26375 26060
-BenchA: Transactions ms 10134 10250 18452 21453 15877
-BenchA: Memory Usage MB 13 15 9 0 1
-BenchB: Init ms 15264 16889 28546 31610 29747
-BenchB: Transactions ms 3017 3376 1842 2771 1433
-BenchB: Memory Usage MB 17 12 11 1 1
-BenchC: Init ms 14020 10407 17655 19520 17532
-BenchC: Transactions ms 5076 3160 6411 6063 4530
-BenchC: Memory Usage MB 19 21 11 1 1
-Executed statements # 1930995 1930995 1930995 1930995 1930995
-Total time ms 117049 114777 244803 249215 188446
-Statements per second # 16497 16823 7887 7748 10246
+Test Case Unit H2 HSQLDB Derby PostgreSQL MySQL
+Simple: Init ms 27989 48055 47142 32972 109482
+Simple: Query (random) ms 4821 5984 14741 4089 15140
+Simple: Query (sequential) ms 33656 49112 95999 35676 143536
+Simple: Update (sequential) ms 9878 23565 31418 26113 50676
+Simple: Delete (sequential) ms 13056 28584 43955 20985 64647
+Simple: Memory Usage MB 18 15 15 2 4
+BenchA: Init ms 20993 42525 38335 27794 107723
+BenchA: Transactions ms 16549 29255 28995 23113 65036
+BenchA: Memory Usage MB 12 18 11 1 4
+BenchB: Init ms 26785 48772 39756 32369 115398
+BenchB: Transactions ms 898 10046 1916 818 1794
+BenchB: Memory Usage MB 16 11 12 2 5
+BenchC: Init ms 18266 26865 39325 24547 70531
+BenchC: Transactions ms 6569 7783 9412 8916 19150
+BenchC: Memory Usage MB 17 35 13 2 7
+Executed statements # 2222032 2222032 2222032 2222032 2222032
+Total time ms 179460 320546 390994 237392 763113
+Statements per second #/s 12381 6932 5683 9360 2911
Benchmark Results and Comments
H2
-Version 1.4.177 (2014-04-12) was used for the test.
+Version 2.0.202 (2021-11-25) was used for the test.
For most operations, the performance of H2 is about the same as for HSQLDB.
One situation where H2 is slow is large result sets, because they are buffered to
disk if more than a certain number of records are returned.
@@ -108,14 +108,14 @@
H2
HSQLDB
-Version 2.3.2 was used for the test.
+Version 2.5.1 was used for the test.
Cached tables are used in this test (hsqldb.default_table_type=cached
),
and the write delay is 1 second (SET WRITE_DELAY 1
).
Derby
-Version 10.10.1.1 was used for the test. Derby is clearly the slowest embedded database in this test.
+Version 10.14.2.0 was used for the test. Derby is clearly the slowest embedded database in this test.
This seems to be a structural problem, because all operations are really slow.
It will be hard for the developers of Derby to improve the performance to a reasonable level.
A few problems have been identified: leaving autocommit on is a problem for Derby.
@@ -132,33 +132,42 @@
Derby
PostgreSQL
-Version 9.1.5 was used for the test.
+Version 13.4 was used for the test.
The following options where changed in postgresql.conf:
-fsync = off, commit_delay = 1000
.
+fsync = off, commit_delay = 100000 (microseconds).
PostgreSQL is run in server mode.
The memory usage number is incorrect, because only the memory usage of the JDBC driver is measured.
MySQL
-Version 5.1.65-log was used for the test.
+Version 8.0.27 was used for the test.
MySQL was run with the InnoDB backend.
-The setting innodb_flush_log_at_trx_commit
-(found in the my.ini / my.cnf
file) was set to 0. Otherwise (and by default), MySQL is slow
-(around 140 statements per second in this test) because it tries to flush the data to disk for each commit.
+ The setting innodb_flush_log_at_trx_commit
and sync_binlog
code>
+(found in the my.ini / community-mysql-server.cnf
file) was set to 0. Otherwise
+(and by default), MySQL is slow (around 140 statements per second in this test)
+because it tries to flush the data to disk for each commit.
For small transactions (when autocommit is on) this is really slow.
But many use cases use small or relatively small transactions.
Too bad this setting is not listed in the configuration wizard,
and it always overwritten when using the wizard.
-You need to change this setting manually in the file my.ini / my.cnf
, and then restart the service.
+You need to change those settings manually in the file my.ini / community-mysql-server.cnf
,
+and then restart the service.
The memory usage number is incorrect, because only the memory usage of the JDBC driver is measured.
+SQLite
+
+SQLite 3.36.0.2 was tested, but the results are not published currently,
+because it's about 50 times slower than H2 in embedded mode.
+Any tips on how to configure SQLite for higher performance are welcome.
+
+
Firebird
-Firebird 1.5 (default installation) was tested, but the results are not published currently.
-It is possible to run the performance test with the Firebird database,
-and any information on how to configure Firebird for higher performance are welcome.
+Firebird 3.0 (default installation) was tested, but failed on multi-threaded part of the test.
+It is likely possible to run the performance test with the Firebird database,
+and any information on how to configure Firebird for this are welcome.
Why Oracle / MS SQL Server / DB2 are Not Listed
@@ -166,7 +175,6 @@ Why Oracle / MS SQL Server / DB2 are Not Listed
The license of these databases does not allow to publish benchmark results.
This doesn't mean that they are fast. They are in fact quite slow,
and need a lot of memory. But you will need to test this yourself.
-SQLite was not tested because the JDBC driver doesn't support transactions.
About this Benchmark
@@ -210,8 +218,7 @@ Comparing Embedded with Server Databases
Test Platform
-This test is run on Mac OS X 10.6. No virus scanner was used, and disk indexing was disabled.
-The JVM used is Sun JDK 1.6.
+This test is run on Fedora v.34 with Oracle JVM 1.8 and SSD drive.
Multiple Runs
@@ -371,6 +378,22 @@ Index Usage
For other columns, indexes need to be created manually using the CREATE INDEX
statement.
+Index Hints
+
+If you have determined that H2 is not using the optimal index for your query, you can use index hints to force
+H2 to use specific indexes.
+
+
+SELECT * FROM TEST USE INDEX (index_name_1, index_name_2) WHERE X=1
+
+Only indexes in the list will be used when choosing an index to use on the given table. There
+is no significance to order in this list.
+
+It is possible that no index in the list is chosen, in which case a full table scan will be used.
+
+An empty list of index names forces a full table scan to be performed.
+Each index in the list must exist.
+
How Data is Stored Internally
For persistent databases, if a table is created with a single column primary key of type BIGINT, INT, SMALLINT, TINYINT
,
@@ -385,7 +408,7 @@
How Data is Stored Internally
then this column is used as the key of the data b-tree.
If no primary key has been specified, if the primary key column is of another data type,
or if the primary key contains more than one column,
-then a hidden auto-increment column of type BIGINT
is added to the table,
+then a hidden identity column of type BIGINT
is added to the table,
which is used as the key for the data b-tree.
All other columns of the table are stored within the data area of this data b-tree
(except for large BLOB, CLOB
columns, which are stored externally).
@@ -474,8 +497,8 @@ Prepared Statements and IN(...)
PreparedStatement prep = conn.prepareStatement(
- "SELECT * FROM TABLE(X INT=?) T INNER JOIN TEST ON T.X=TEST.ID");
-prep.setObject(1, new Object[] { "1", "2" });
+ "SELECT * FROM TEST WHERE ID = ANY(?)");
+prep.setObject(1, new Long[] { 1L, 2L });
ResultSet rs = prep.executeQuery();
@@ -496,7 +519,7 @@ Data Types
Each data type has different storage and performance characteristics:
- The
DECIMAL/NUMERIC
type is slower
- and requires more storage than the REAL
and DOUBLE
types.
+ and requires more storage than the REAL
and DOUBLE PRECISION
types.
- Text types are slower to read, write, and compare than numeric types and generally require more storage.
- See Large Objects for information on
BINARY
vs. BLOB
@@ -582,7 +605,7 @@ Database Profiling
For this to work, the trace level needs to be 2 or higher (TRACE_LEVEL_FILE=2
).
The easiest way to set the trace level is to append the setting to the database URL, for example:
jdbc:h2:~/test;TRACE_LEVEL_FILE=2
or jdbc:h2:tcp://localhost/~/test;TRACE_LEVEL_FILE=2
.
-As an example, execute the the following script using the H2 Console:
+As an example, execute the following script using the H2 Console:
SET TRACE_LEVEL_FILE 2;
@@ -733,7 +756,8 @@ How Data is Stored and How Indexes Work
Access by row id is fast because the data is sorted by this key.
-Please note the row id is not available until after the row was added (that means, it can not be used in computed columns or constraints).
+Please note the row id is not available until after the row was added
+(that means, it can not be used in generated columns or constraints).
If the query condition does not contain the row id (and if no other index can be used), then all rows of the table are scanned.
A table scan iterates over all rows in the table, in the order of the row id.
To find out what strategy the database uses to retrieve the data, use EXPLAIN SELECT
:
@@ -856,19 +880,6 @@
Using Multiple Indexes
Fast Database Import
-To speed up large imports, consider using the following options temporarily:
-
-SET LOG 0
(disabling the transaction log)
-SET CACHE_SIZE
(a large cache is faster)
-SET LOCK_MODE 0
(disable locking)
-SET UNDO_LOG 0
(disable the session undo log)
-
-
-These options can be set in the database URL:
-jdbc:h2:~/test;LOG=0;CACHE_SIZE=65536;LOCK_MODE=0;UNDO_LOG=0
.
-Most of those options are not recommended for regular use, that means you need to reset them after use.
-
-
If you have to import a lot of rows, use a PreparedStatement or use CSV import.
Please note that CREATE TABLE(...) ... AS SELECT ...
is faster than CREATE TABLE(...); INSERT INTO ... SELECT ...
.
diff --git a/h2/src/docsrc/html/quickstart.html b/h2/src/docsrc/html/quickstart.html
index 74d6bf38d7..5bb4fc0a41 100644
--- a/h2/src/docsrc/html/quickstart.html
+++ b/h2/src/docsrc/html/quickstart.html
@@ -1,7 +1,7 @@
diff --git a/h2/src/docsrc/html/roadmap.html b/h2/src/docsrc/html/roadmap.html
deleted file mode 100644
index 17ba4b1ac4..0000000000
--- a/h2/src/docsrc/html/roadmap.html
+++ /dev/null
@@ -1,591 +0,0 @@
-
-
-
-
-
-
-
-Roadmap
-
-
-
-
-
-
-
-
-Roadmap
-
-New (feature) requests will usually be added at the very end of the list. The priority is increased for important and popular requests.
-Of course, patches are always welcome, but are not always applied as is.
-See also Providing Patches.
-
-
-Version 1.5.x: Planned Changes
-- Replace file password hash with file encryption key; validate encryption key when connecting.
-
- Remove "set binary collation" feature.
-
- Remove the encryption algorithm XTEA.
-
- Disallow referencing other tables in a table (via constraints for example).
-
- Remove PageStore features like compress_lob.
-
-
-Version 1.4.x: Planned Changes
-- Change license to MPL 2.0.
-
- Automatic migration from 1.3 databases to 1.4.
-
- Option to disable the file name suffix somehow (issue 447).
-
-
-Priority 1
-- Bugfixes.
-
- More tests with MULTI_THREADED=1 (and MULTI_THREADED with MVCC):
- Online backup (using the 'backup' statement).
-
- Server side cursors.
-
-
-Priority 2
-- Support hints for the optimizer (which index to use, enforce the join order).
-
- Full outer joins.
-
- Access rights: remember the owner of an object.
- Create, alter and drop privileges.
- COMMENT: allow owner of object to change it.
- Issue 208: Access rights for schemas.
-
- Test multi-threaded in-memory db access.
-
- MySQL, MS SQL Server compatibility: support case sensitive (mixed case) identifiers without quotes.
-
- Support GRANT SELECT, UPDATE ON [schemaName.] *.
-
- Migrate database tool (also from other database engines). For Oracle, maybe use
- DBMS_METADATA.GET_DDL / GET_DEPENDENT_DDL.
-
- Clustering: support mixed clustering mode (one embedded, others in server mode).
-
- Clustering: reads should be randomly distributed (optional) or to a designated database on RAM (parameter: READ_FROM=3).
-
- Window functions: RANK() and DENSE_RANK(), partition using OVER().
- select *, count(*) over() as fullCount from ... limit 4;
-
- PostgreSQL catalog: use BEFORE SELECT triggers instead of views over metadata tables.
-
- Compatibility: automatically load functions from a script depending on the mode - see FunctionsMySQL.java, issue 211.
-
- Test very large databases and LOBs (up to 256 GB).
-
- Store all temp files in the temp directory.
-
- Don't use temp files, specially not deleteOnExit (bug 4513817: File.deleteOnExit consumes memory).
- Also to allow opening client / server (remote) connections when using LOBs.
-
- Make DDL (Data Definition) operations transactional.
-
- Deferred integrity checking (DEFERRABLE INITIALLY DEFERRED).
-
- Groovy Stored Procedures: http://groovy.codehaus.org/GSQL
-
- Add a migration guide (list differences between databases).
-
- Optimization: automatic index creation suggestion using the trace file?
-
- Fulltext search Lucene: analyzer configuration, mergeFactor.
-
- Compression performance: don't allocate buffers, compress / expand in to out buffer.
-
- Rebuild index functionality to shrink index size and improve performance.
-
- Console: add accesskey to most important commands (A, AREA, BUTTON, INPUT, LABEL, LEGEND, TEXTAREA).
-
- Test performance again with SQL Server, Oracle, DB2.
-
- Test with Spatial DB in a box / JTS: http://www.opengeospatial.org/standards/sfs - OpenGIS Implementation Specification.
-
- Write more tests and documentation for MVCC (Multi Version Concurrency Control).
-
- Find a tool to view large text file (larger than 100 MB), with find, page up and down (like less), truncate before / after.
-
- Implement, test, document XAConnection and so on.
-
- Pluggable data type (for streaming, hashing, compression, validation, conversion, encryption).
-
- CHECK: find out what makes CHECK=TRUE slow, move to CHECK2.
-
- Drop with invalidate views (so that source code is not lost). Check what other databases do exactly.
-
- Index usage for (ID, NAME)=(1, 'Hi'); document.
-
- Set a connection read only (Connection.setReadOnly) or using a connection parameter.
-
- Access rights: finer grained access control (grant access for specific functions).
-
- ROW_NUMBER() OVER([PARTITION BY columnName][ORDER BY columnName]).
-
- Version check: docs / web console (using Javascript), and maybe in the library (using TCP/IP).
-
- Web server classloader: override findResource / getResourceFrom.
-
- Cost for embedded temporary view is calculated wrong, if result is constant.
-
- Count index range query (count(*) where id between 10 and 20).
-
- Performance: update in-place.
-
- Clustering: when a database is back alive, automatically synchronize with the master (requires readable transaction log).
-
- Database file name suffix: a way to use no or a different suffix (for example using a slash).
-
- Eclipse plugin.
-
- Asynchronous queries to support publish/subscribe: SELECT ... FOR READ WAIT [maxMillisToWait].
- See also MS SQL Server "Query Notification".
-
- Fulltext search (native): reader / tokenizer / filter.
-
- Linked schema using CSV files: one schema for a directory of files; support indexes for CSV files.
-
- iReport to support H2.
-
- Include SMTP (mail) client (alert on cluster failure, low disk space,...).
-
- Option for SCRIPT to only process one or a set of schemas or tables, and append to a file.
-
- JSON parser and functions.
-
- Copy database: tool with config GUI and batch mode, extensible (example: compare).
-
- Document, implement tool for long running transactions using user-defined compensation statements.
-
- Support SET TABLE DUAL READONLY.
-
- GCJ: what is the state now?
-
- Events for: database Startup, Connections, Login attempts, Disconnections, Prepare (after parsing), Web Server. See http://docs.openlinksw.com/virtuoso/fn_dbev_startup.html
-
- Optimization: simpler log compression.
-
- Support standard INFORMATION_SCHEMA tables, as defined in http://www.contrib.andrew.cmu.edu/~shadow/sql/sql1992.txt - specially KEY_COLUMN_USAGE: http://dev.mysql.com/doc/refman/5.0/en/information-schema.html, http://www.xcdsql.org/Misc/INFORMATION_SCHEMA%20With%20Rolenames.gif
-
- Compatibility: in MySQL, HSQLDB, /0.0 is NULL; in PostgreSQL, Derby: division by zero. HSQLDB: 0.0e1 / 0.0e1 is NaN.
-
- Functional tables should accept parameters from other tables (see FunctionMultiReturn) SELECT * FROM TEST T, P2C(T.A, T.R).
-
- Custom class loader to reload functions on demand.
-
- Test http://mysql-je.sourceforge.net/
-
- H2 Console: the webclient could support more features like phpMyAdmin.
-
- Support Oracle functions: TO_DATE, TO_NUMBER.
-
- Work on the Java to C converter.
-
- The HELP information schema can be directly exposed in the Console.
-
- Maybe use the 0x1234 notation for binary fields, see MS SQL Server.
-
- Support Oracle CONNECT BY in some way: http://www.adp-gmbh.ch/ora/sql/connect_by.html http://philip.greenspun.com/sql/trees.html
-
- SQL Server 2005, Oracle: support COUNT(*) OVER(). See http://www.orafusion.com/art_anlytc.htm
-
- SQL 2003: http://www.wiscorp.com/sql_2003_standard.zip
-
- Version column (number/sequence and timestamp based).
-
- Optimize getGeneratedKey: send last identity after each execute (server).
-
- Test and document UPDATE TEST SET (ID, NAME) = (SELECT ID*10, NAME || '!' FROM TEST T WHERE T.ID=TEST.ID).
-
- Max memory rows / max undo log size: use block count / row size not row count.
-
- Implement point-in-time recovery.
-
- Support PL/SQL (programming language / control flow statements).
-
- LIKE: improved version for larger texts (currently using naive search).
-
- Throw an exception when the application calls getInt on a Long (optional).
-
- Default date format for input and output (local date constants).
-
- Document ROWNUM usage for reports: SELECT ROWNUM, * FROM (subquery).
-
- File system that writes to two file systems (replication, replicating file system).
-
- Standalone tool to get relevant system properties and add it to the trace output.
-
- Support 'call proc(1=value)' (PostgreSQL, Oracle).
-
- Console: improve editing data (Tab, Shift-Tab, Enter, Up, Down, Shift+Del?).
-
- Console: autocomplete Ctrl+Space inserts template.
-
- Option to encrypt .trace.db file.
-
- Auto-Update feature for database, .jar file.
-
- ResultSet SimpleResultSet.readFromURL(String url): id varchar, state varchar, released timestamp.
-
- Partial indexing (see PostgreSQL).
-
- Add GUI to build a custom version (embedded, fulltext,...) using build flags.
-
- http://rubyforge.org/projects/hypersonic/
-
- Add a sample application that runs the H2 unit test and writes the result to a file (so it can be included in the user app).
-
- Table order: ALTER TABLE TEST ORDER BY NAME DESC (MySQL compatibility).
-
- Backup tool should work with other databases as well.
-
- Console: -ifExists doesn't work for the console. Add a flag to disable other dbs.
-
- Check if 'FSUTIL behavior set disablelastaccess 1' improves the performance (fsutil behavior query disablelastaccess).
-
- Java static code analysis: http://pmd.sourceforge.net/
-
- Java static code analysis: http://www.eclipse.org/tptp/
-
- Compatibility for CREATE SCHEMA AUTHORIZATION.
-
- Implement Clob / Blob truncate and the remaining functionality.
-
- Add multiple columns at the same time with ALTER TABLE .. ADD .. ADD ...
-
- File locking: writing a system property to detect concurrent access from the same VM (different classloaders).
-
- Pure SQL triggers (example: update parent table if the child table is changed).
-
- Add H2 to Gem (Ruby install system).
-
- Support linked JCR tables.
-
- Native fulltext search: min word length; store word positions.
-
- Add an option to the SCRIPT command to generate only portable / standard SQL.
-
- Updatable views: create 'instead of' triggers automatically if possible (simple cases first).
-
- Improve create index performance.
-
- Compact databases without having to close the database (vacuum).
-
- Implement more JDBC 4.0 features.
-
- Support TRANSFORM / PIVOT as in MS Access.
-
- SELECT * FROM (VALUES (...), (...), ....) AS alias(f1, ...).
-
- Support updatable views with join on primary keys (to extend a table).
-
- Public interface for functions (not public static).
-
- Support reading the transaction log.
-
- Feature matrix as in i-net software.
-
- Updatable result set on table without primary key or unique index.
-
- Compatibility with Derby and PostgreSQL: VALUES(1), (2); SELECT * FROM (VALUES (1), (2)) AS myTable(c1). Issue 221.
-
- Allow execution time prepare for SELECT * FROM CSVREAD(?, 'columnNameString')
-
- Support data type INTERVAL
-
- Support nested transactions (possibly using savepoints internally).
-
- Add a benchmark for bigger databases, and one for many users.
-
- Compression in the result set over TCP/IP.
-
- Support curtimestamp (like curtime, curdate).
-
- Support ANALYZE {TABLE|INDEX} tableName COMPUTE|ESTIMATE|DELETE STATISTICS ptnOption options.
-
- Release locks (shared or exclusive) on demand
-
- Support OUTER UNION
-
- Support parameterized views (similar to CSVREAD, but using just SQL for the definition)
-
- A way (JDBC driver) to map an URL (jdbc:h2map:c1) to a connection object
-
- Support dynamic linked schema (automatically adding/updating/removing tables)
-
- Clustering: adding a node should be very fast and without interrupting clients (very short lock)
-
- Compatibility: # is the start of a single line comment (MySQL) but date quote (Access). Mode specific
-
- Run benchmarks with Android, Java 7, java -server
-
- Optimizations: faster hash function for strings.
-
- DatabaseEventListener: callback for all operations (including expected time, RUNSCRIPT) and cancel functionality
-
- Benchmark: add a graph to show how databases scale (performance/database size)
-
- Implement a SQLData interface to map your data over to a custom object
-
- In the MySQL and PostgreSQL mode, use lower case identifiers by default (DatabaseMetaData.storesLowerCaseIdentifiers = true)
-
- Support multiple directories (on different hard drives) for the same database
-
- Server protocol: use challenge response authentication, but client sends hash(user+password) encrypted with response
-
- Support EXEC[UTE] (doesn't return a result set, compatible to MS SQL Server)
-
- Support native XML data type - see http://en.wikipedia.org/wiki/SQL/XML
-
- Support triggers with a string property or option: SpringTrigger, OSGITrigger
-
- MySQL compatibility: update test1 t1, test2 t2 set t1.id = t2.id where t1.id = t2.id;
-
- Ability to resize the cache array when resizing the cache
-
- Time based cache writing (one second after writing the log)
-
- Check state of H2 driver for DDLUtils: http://issues.apache.org/jira/browse/DDLUTILS-185
-
- Index usage for REGEXP LIKE.
-
- Compatibility: add a role DBA (like ADMIN).
-
- Better support multiple processors for in-memory databases.
-
- Support N'text'
-
- Support compatibility for jdbc:hsqldb:res:
-
- HSQLDB compatibility: automatically convert to the next 'higher' data type.
- Example: cast(2000000000 as int) + cast(2000000000 as int);
- (HSQLDB: long; PostgreSQL: integer out of range)
-
- Provide an Java SQL builder with standard and H2 syntax
-
- Trace: write OS, file system, JVM,... when opening the database
-
- Support indexes for views (probably requires materialized views)
-
- Document SET SEARCH_PATH, BEGIN, EXECUTE, parameters
-
- Server: use one listener (detect if the request comes from an PG or TCP client)
-
- Optimize SELECT MIN(ID), MAX(ID), COUNT(*) FROM TEST WHERE ID BETWEEN 100 AND 200
-
- Sequence: PostgreSQL compatibility (rename, create) http://www.postgresql.org/docs/8.2/static/sql-altersequence.html
-
- DISTINCT: support large result sets by sorting on all columns (additionally) and then removing duplicates.
-
- Support a special trigger on all tables to allow building a transaction log reader.
-
- File system with a background writer thread; test if this is faster
-
- Better document the source code (high level documentation).
-
- Support select * from dual a left join dual b on b.x=(select max(x) from dual)
-
- Optimization: don't lock when the database is read-only
-
- Issue 146: Support merge join.
-
- Integrate spatial functions from http://geosysin.iict.ch/irstv-trac/wiki/H2spatial/Download
-
- Cluster: hot deploy (adding a node at runtime).
-
- Support DatabaseMetaData.insertsAreDetected: updatable result sets should detect inserts.
-
- Oracle: support DECODE method (convert to CASE WHEN).
-
- Native search: support "phrase search", wildcard search (* and ?), case-insensitive search, boolean operators, and grouping
-
- Improve documentation of access rights.
-
- Support opening a database that is in the classpath, maybe using a new file system. Workaround: detect jar file using getClass().getProtectionDomain().getCodeSource().getLocation().
-
- Support ENUM data type (see MySQL, PostgreSQL, MS SQL Server, maybe others).
-
- Remember the user defined data type (domain) of a column.
-
- MVCC: support multi-threaded kernel with multi-version concurrency.
-
- Auto-server: add option to define the port range or list.
-
- Support Jackcess (MS Access databases)
-
- Built-in methods to write large objects (BLOB and CLOB): FILE_WRITE('test.txt', 'Hello World')
-
- Improve time to open large databases (see mail 'init time for distributed setup')
-
- Move Maven 2 repository from hsql.sf.net to h2database.sf.net
-
- Java 1.5 tool: JdbcUtils.closeSilently(s1, s2,...)
-
- Optimize A=? OR B=? to UNION if the cost is lower.
-
- Javadoc: document design patterns used
-
- Support custom collators, for example for natural sort (for text that contains numbers).
-
- Write an article about SQLInjection (h2/src/docsrc/html/images/SQLInjection.txt)
-
- Convert SQL-injection-2.txt to html document, include SQLInjection.java sample
-
- Support OUT parameters in user-defined procedures.
-
- Web site design: http://www.igniterealtime.org/projects/openfire/index.jsp
-
- HSQLDB compatibility: Openfire server uses: CREATE SCHEMA PUBLIC AUTHORIZATION DBA;
- CREATE USER SA PASSWORD ""; GRANT DBA TO SA; SET SCHEMA PUBLIC
-
- Translation: use ${.} in help.csv
-
- Translated .pdf
-
- Recovery tool: bad blocks should be converted to INSERT INTO SYSTEM_ERRORS(...), and things should go into the .trace.db file
-
- Issue 357: support getGeneratedKeys to return multiple rows when used with batch updates.
- This is supported by MySQL, but not Derby. Both PostgreSQL and HSQLDB don't support getGeneratedKeys.
- Also support it when using INSERT ... SELECT.
-
- RECOVER=2 to backup the database, run recovery, open the database
-
- Recovery should work with encrypted databases
-
- Corruption: new error code, add help
-
- Space reuse: after init, scan all storages and free those that don't belong to a live database object
-
- Access rights: add missing features (users should be 'owner' of objects; missing rights for sequences; dropping objects)
-
- Support NOCACHE table option (Oracle).
-
- Support table partitioning.
-
- Add regular javadocs (using the default doclet, but another css) to the homepage.
-
- The database should be kept open for a longer time when using the server mode.
-
- Javadocs: for each tool, add a copy & paste sample in the class level.
-
- Javadocs: add @author tags.
-
- Fluent API for tools: Server.createTcpServer().setPort(9081).setPassword(password).start();
-
- MySQL compatibility: real SQL statement for DESCRIBE TEST
-
- Use a default delay of 1 second before closing a database.
-
- Write (log) to system table before adding to internal data structures.
-
- Support direct lookup for MIN and MAX when using WHERE (see todo.txt / Direct Lookup).
-
- Support other array types (String[], double[]) in PreparedStatement.setObject(int, Object) (with test case).
-
- MVCC should not be memory bound (uncommitted data is kept in memory in the delta index; maybe using a regular b-tree index solves the problem).
-
- Oracle compatibility: support NLS_DATE_FORMAT.
-
- Support for Thread.interrupt to cancel running statements.
-
- Cluster: add feature to make sure cluster nodes can not get out of sync (for example by stopping one process).
-
- H2 Console: support CLOB/BLOB download using a link.
-
- Support flashback queries as in Oracle.
-
- Import / Export of fixed with text files.
-
- HSQLDB compatibility: automatic data type for SUM if value is the value is too big (by default use the same type as the data).
-
- Improve the optimizer to select the right index for special cases: where id between 2 and 4 and booleanColumn
-
- Linked tables: make hidden columns available (Oracle: rowid and ora_rowscn columns).
-
- H2 Console: in-place autocomplete.
-
- Support large databases: split database files to multiple directories / disks (similar to tablespaces).
-
- H2 Console: support configuration option for fixed width (monospace) font.
-
- Native fulltext search: support analyzers (specially for Chinese, Japanese).
-
- Automatically compact databases from time to time (as a background process).
-
- Test Eclipse DTP.
-
- H2 Console: autocomplete: keep the previous setting
-
- executeBatch: option to stop at the first failed statement.
-
- Implement OLAP features as described here: http://www.devx.com/getHelpOn/10MinuteSolution/16573/0/page/5
-
- Support Oracle ROWID (unique identifier for each row).
-
- MySQL compatibility: alter table add index i(c), add constraint c foreign key(c) references t(c);
-
- Server mode: improve performance for batch updates.
-
- Applets: support read-only databases in a zip file (accessed as a resource).
-
- Long running queries / errors / trace system table.
-
- H2 Console should support JaQu directly.
-
- Better document FTL_SEARCH, FTL_SEARCH_DATA.
-
- Sequences: CURRVAL should be session specific. Compatibility with PostgreSQL.
-
- Index creation using deterministic functions.
-
- ANALYZE: for unique indexes that allow null, count the number of null.
-
- MySQL compatibility: multi-table delete: DELETE .. FROM .. [,...] USING - See http://dev.mysql.com/doc/refman/5.0/en/delete.html
-
- AUTO_SERVER: support changing IP addresses (disable a network while the database is open).
-
- Avoid using java.util.Calendar internally because it's slow, complicated, and buggy.
-
- Support TRUNCATE .. CASCADE like PostgreSQL.
-
- Fulltext search: lazy result generation using SimpleRowSource.
-
- Fulltext search: support alternative syntax: WHERE FTL_CONTAINS(name, 'hello').
-
- MySQL compatibility: support REPLACE, see http://dev.mysql.com/doc/refman/6.0/en/replace.html and issue 73.
-
- MySQL compatibility: support INSERT INTO table SET column1 = value1, column2 = value2
-
- Docs: add a one line description for each functions and SQL statements at the top (in the link section).
-
- Javadoc search: weight for titles should be higher ('random' should list Functions as the best match).
-
- Replace information_schema tables with regular tables that are automatically re-built when needed. Use indexes.
-
- Issue 50: Oracle compatibility: support calling 0-parameters functions without parenthesis. Make constants obsolete.
-
- MySQL, HSQLDB compatibility: support where 'a'=1 (not supported by Derby, PostgreSQL)
-
- Support a data type "timestamp with timezone" using java.util.Calendar.
-
- Finer granularity for SLF4J trace - See http://code.google.com/p/h2database/issues/detail?id=62
-
- Add database creation date and time to the database.
-
- Support ASSERTION.
-
- MySQL compatibility: support comparing 1='a'
-
- Support PostgreSQL lock modes: http://www.postgresql.org/docs/8.3/static/explicit-locking.html
-
- PostgreSQL compatibility: test DbVisualizer and Squirrel SQL using a new PostgreSQL JDBC driver.
-
- RunScript should be able to read from system in (or quite mode for Shell).
-
- Natural join: support select x from dual natural join dual.
-
- Support using system properties in database URLs (may be a security problem).
-
- Natural join: somehow support this: select a.x, b.x, x from dual a natural join dual b
-
- Use the Java service provider mechanism to register file systems and function libraries.
-
- MySQL compatibility: for auto_increment columns, convert 0 to next value (as when inserting NULL).
-
- Optimization for multi-column IN: use an index if possible. Example: (A, B) IN((1, 2), (2, 3)).
-
- Optimization for EXISTS: convert to inner join or IN(..) if possible.
-
- Functions: support hashcode(value); cryptographic and fast
-
- Serialized file lock: support long running queries.
-
- Network: use 127.0.0.1 if other addresses don't work.
-
- Pluggable network protocol (currently Socket/ServerSocket over TCP/IP) - see also TransportServer with master slave replication.
-
- Support reading JCR data: one table per node type; query table; cache option
-
- OSGi: create a sample application, test, document.
-
- help.csv: use complete examples for functions; run as test case.
-
- Functions to calculate the memory and disk space usage of a table, a row, or a value.
-
- Re-implement PooledConnection; use a lightweight connection object.
-
- Doclet: convert tests in javadocs to a java class.
-
- Doclet: format fields like methods, but support sorting by name and value.
-
- Doclet: shrink the html files.
-
- MySQL compatibility: support SET NAMES 'latin1' - See also http://code.google.com/p/h2database/issues/detail?id=56
-
- Allow to scan index backwards starting with a value (to better support ORDER BY DESC).
-
- Java Service Wrapper: try http://yajsw.sourceforge.net/
-
- Batch parameter for INSERT, UPDATE, and DELETE, and commit after each batch. See also MySQL DELETE.
-
- MySQL compatibility: support ALTER TABLE .. MODIFY COLUMN.
-
- Use a lazy and auto-close input stream (open resource when reading, close on eof).
-
- Connection pool: 'reset session' command (delete temp tables, rollback, auto-commit true).
-
- Improve SQL documentation, see http://www.w3schools.com/sql/
-
- MySQL compatibility: DatabaseMetaData.stores*() methods should return the same values. Test with SquirrelSQL.
-
- MS SQL Server compatibility: support DATEPART syntax.
-
- Sybase/DB2/Oracle compatibility: support out parameters in stored procedures - See http://code.google.com/p/h2database/issues/detail?id=83
-
- Support INTERVAL data type (see Oracle and others).
-
- Combine Server and Console tool (only keep Server).
-
- Store the Lucene index in the database itself.
-
- Support standard MERGE statement: http://en.wikipedia.org/wiki/Merge_%28SQL%29
-
- Oracle compatibility: support DECODE(x, ...).
-
- MVCC: compare concurrent update behavior with PostgreSQL and Oracle.
-
- HSQLDB compatibility: CREATE FUNCTION (maybe using a Function interface).
-
- HSQLDB compatibility: support CALL "java.lang.Math.sqrt"(2.0)
-
- Support comma as the decimal separator in the CSV tool.
-
- Compatibility: Java functions with SQLJ Part1 http://www.acm.org/sigmod/record/issues/9912/standards.pdf.gz
-
- Compatibility: Java functions with SQL/PSM (Persistent Stored Modules) - need to find the documentation.
-
- CACHE_SIZE: automatically use a fraction of Runtime.maxMemory - maybe automatically the second level cache.
-
- Support date/time/timestamp as documented in http://en.wikipedia.org/wiki/ISO_8601
-
- PostgreSQL compatibility: when in PG mode, treat BYTEA data like PG.
-
- Support =ANY(array) as in PostgreSQL. See also http://www.postgresql.org/docs/8.0/interactive/arrays.html
-
- IBM DB2 compatibility: support PREVIOUS VALUE FOR sequence.
-
- Compatibility: use different LIKE ESCAPE characters depending on the mode (disable for Derby, HSQLDB, DB2, Oracle, MSSQLServer).
-
- Oracle compatibility: support CREATE SYNONYM table FOR schema.table.
-
- FTP: document the server, including -ftpTask option to execute / kill remote processes
-
- FTP: problems with multithreading?
-
- FTP: implement SFTP / FTPS
-
- FTP: access to a database (.csv for a table, a directory for a schema, a file for a lob, a script.sql file).
-
- More secure default configuration if remote access is enabled.
-
- Improve database file locking (maybe use native file locking). The current approach seems to be problematic
- if the file system is on a remote share (see Google Group 'Lock file modification time is in the future').
-
- Document internal features such as BELONGS_TO_TABLE, NULL_TO_DEFAULT, SEQUENCE.
-
- Issue 107: Prefer using the ORDER BY index if LIMIT is used.
-
- An index on (id, name) should be used for a query: select * from t where s=? order by i
-
- Support reading sequences using DatabaseMetaData.getTables(null, null, null, new String[]{"SEQUENCE"}).
- See PostgreSQL.
-
- Add option to enable TCP_NODELAY using Socket.setTcpNoDelay(true).
-
- Maybe disallow = within database names (jdbc:h2:mem:MODE=DB2 means database name MODE=DB2).
-
- Fast alter table add column.
-
- Improve concurrency for in-memory database operations.
-
- Issue 122: Support for connection aliases for remote tcp connections.
-
- Fast scrambling (strong encryption doesn't help if the password is included in the application).
-
- H2 Console: support -webPassword to require a password to access preferences or shutdown.
-
- Issue 126: The index name should be "IDX_" plus the constraint name unless there is a conflict, in which case append a number.
-
- Issue 127: Support activation/deactivation of triggers
-
- Issue 130: Custom log event listeners
-
- Issue 131: IBM DB2 compatibility: sysibm.sysdummy1
-
- Issue 132: Use Java enum trigger type.
-
- Issue 134: IBM DB2 compatibility: session global variables.
-
- Cluster: support load balance with values for each server / auto detect.
-
- FTL_SET_OPTION(keyString, valueString) with key stopWords at first.
-
- Pluggable access control mechanism.
-
- Fulltext search (Lucene): support streaming CLOB data.
-
- Document/example how to create and read an encrypted script file.
-
- Check state of http://issues.apache.org/jira/browse/OPENJPA-1367 (H2 does support cross joins).
-
- Fulltext search (Lucene): only prefix column names with _ if they already start with _. Instead of DATA / QUERY / modified use _DATA, _QUERY, _MODIFIED if possible.
-
- Support a way to create or read compressed encrypted script files using an API.
-
- Scripting language support (Javascript).
-
- The network client should better detect if the server is not an H2 server and fail early.
-
- H2 Console: support CLOB/BLOB upload.
-
- Database file lock: detect hibernate / standby / very slow threads (compare system time).
-
- Automatic detection of redundant indexes.
-
- Maybe reject join without "on" (except natural join).
-
- Implement GiST (Generalized Search Tree for Secondary Storage).
-
- Function to read a number of bytes/characters from an BLOB or CLOB.
-
- Issue 156: Support SELECT ? UNION SELECT ?.
-
- Automatic mixed mode: support a port range list (to avoid firewall problems).
-
- Support the pseudo column rowid, oid, _rowid_.
-
- H2 Console / large result sets: stream early instead of keeping a whole result in-memory
-
- Support TRUNCATE for linked tables.
-
- UNION: evaluate INTERSECT before UNION (like most other database except Oracle).
-
- Delay creating the information schema, and share metadata columns.
-
- TCP Server: use a nonce (number used once) to protect unencrypted channels against replay attacks.
-
- Simplify running scripts and recovery: CREATE FORCE USER (overwrites an existing user).
-
- Support CREATE DATABASE LINK (a custom JDBC driver is already supported).
-
- Support large GROUP BY operations. Issue 216.
-
- Issue 163: Allow to create foreign keys on metadata types.
-
- Logback: write a native DBAppender.
-
- Cache size: don't use more cache than what is available.
-
- Allow to defragment at runtime (similar to SHUTDOWN DEFRAG) in a background thread.
-
- Tree index: Instead of an AVL tree, use a general balanced trees or a scapegoat tree.
-
- User defined functions: allow to store the bytecode (of just the class, or the jar file of the extension) in the database.
-
- Compatibility: ResultSet.getObject() on a CLOB (TEXT) should return String for PostgreSQL and MySQL.
-
- Optimizer: WHERE X=? AND Y IN(?), it always uses the index on Y. Should be cost based.
-
- Common Table Expression (CTE) / recursive queries: support parameters. Issue 314.
-
- Oracle compatibility: support INSERT ALL.
-
- Issue 178: Optimizer: index usage when both ascending and descending indexes are available.
-
- Issue 179: Related subqueries in HAVING clause.
-
- IBM DB2 compatibility: NOT NULL WITH DEFAULT. Similar to MySQL Mode.convertInsertNullToZero.
-
- Creating primary key: always create a constraint.
-
- Maybe use a different page layout: keep the data at the head of the page, and ignore the tail
- (don't store / read it). This may increase write / read performance depending on the file system.
-
- Indexes of temporary tables are currently kept in-memory. Is this how it should be?
-
- The Shell tool should support the same built-in commands as the H2 Console.
-
- Maybe use PhantomReference instead of finalize.
-
- Database file name suffix: should only have one dot by default. Example: .h2db
-
- Issue 196: Function based indexes
-
- ALTER TABLE ... ADD COLUMN IF NOT EXISTS columnName.
-
- Fix the disk space leak (killing the process at the exact right moment will increase
- the disk space usage; this space is not re-used). See TestDiskSpaceLeak.java
-
- ROWNUM: Oracle compatibility when used within a subquery. Issue 198.
-
- Allow to access the database over HTTP (possibly using port 80) and a servlet in a REST way.
-
- ODBC: encrypted databases are not supported because the ;CIPHER= can not be set.
-
- Support CLOB and BLOB update, specially conn.createBlob().setBinaryStream(1);
-
- Optimizer: index usage when both ascending and descending indexes are available. Issue 178.
-
- Issue 306: Support schema specific domains.
-
- Triggers: support user defined execution order. Oracle:
- CREATE OR REPLACE TRIGGER TEST_2 BEFORE INSERT
- ON TEST FOR EACH ROW FOLLOWS TEST_1.
- SQL specifies that multiple triggers should be fired in time-of-creation order.
- PostgreSQL uses name order, which was judged to be more convenient.
- Derby: triggers are fired in the order in which they were created.
-
- PostgreSQL compatibility: combine "users" and "roles". See:
- http://www.postgresql.org/docs/8.1/interactive/user-manag.html
-
- Improve documentation of system properties: only list the property names, default values, and description.
-
- Support running totals / cumulative sum using SUM(..) OVER(..).
-
- Improve object memory size calculation. Use constants for known VMs, or use reflection to call java.lang.instrument.Instrumentation.getObjectSize(Object objectToSize)
-
- Triggers: NOT NULL checks should be done after running triggers (Oracle behavior, maybe others).
-
- Common Table Expression (CTE) / recursive queries: support INSERT INTO ... SELECT ... Issue 219.
-
- Common Table Expression (CTE) / recursive queries: support non-recursive queries. Issue 217.
-
- Common Table Expression (CTE) / recursive queries: avoid endless loop. Issue 218.
-
- Common Table Expression (CTE) / recursive queries: support multiple named queries. Issue 220.
-
- Common Table Expression (CTE) / recursive queries: identifier scope may be incorrect. Issue 222.
-
- Log long running transactions (similar to long running statements).
-
- Parameter data type is data type of other operand. Issue 205.
-
- Some combinations of nested join with right outer join are not supported.
-
- DatabaseEventListener.openConnection(id) and closeConnection(id).
-
- Listener or authentication module for new connections, or a way to restrict the number of different connections to a tcp server,
- or to prevent to login with the same username and password from different IPs.
- Possibly using the DatabaseEventListener API, or a new API.
-
- Compatibility for data type CHAR (Derby, HSQLDB). Issue 212.
-
- Compatibility with MySQL TIMESTAMPDIFF. Issue 209.
-
- Optimizer: use a histogram of the data, specially for non-normal distributions.
-
- Trigger: allow declaring as source code (like functions).
-
- User defined aggregate: allow declaring as source code (like functions).
-
- The error "table not found" is sometimes caused by using the wrong database.
- Add "(this database is empty)" to the exception message if applicable.
-
- MySQL + PostgreSQL compatibility: support string literal escape with \n.
-
- PostgreSQL compatibility: support string literal escape with double \\.
-
- Document the TCP server "management_db". Maybe include the IP address of the client.
-
- Use javax.tools.JavaCompilerTool instead of com.sun.tools.javac.Main
-
- If a database object was not found in the current schema, but one with the same name existed in another schema, included that in the error message.
-
- Optimization to use an index for OR when using multiple keys: where (key1 = ? and key2 = ?) OR (key1 = ? and key2 = ?)
-
- Issue 302: Support optimizing queries with both inner and outer joins, as in:
- select * from test a inner join test b on a.id=b.id inner join o on o.id=a.id where b.x=1
- (the optimizer should swap a and b here).
- See also TestNestedJoins, tag "swapInnerJoinTables".
-
- JaQu should support a DataSource and a way to create a Db object using a Connection (for multi-threaded usage with a connection pool).
-
- Move table to a different schema (rename table to a different schema), possibly using ALTER TABLE ... SET SCHEMA ...;
-
- nioMapped file system: automatically fall back to regular (non mapped) IO if there is a problem (out of memory exception for example).
-
- Column as parameter of function table. Issue 228.
-
- Connection pool: detect ;AUTOCOMMIT=FALSE in the database URL, and if set,
- disable autocommit for all connections.
-
- Compatibility with MS Access: support "&" to concatenate text.
-
- The BACKUP statement should not synchronize on the database, and therefore should not block other users.
-
- Document the database file format.
-
- Support reading LOBs.
-
- Require appending DANGEROUS=TRUE when using certain dangerous settings such as
- LOG=0, LOG=1, LOCK_MODE=0, disabling FILE_LOCK,...
-
- Support UDT (user defined types) similar to how Apache Derby supports it:
- check constraint, allow to use it in Java functions as parameters (return values already seem to work).
-
- Encrypted file system (use cipher text stealing so file length doesn't need to decrypt; 4 KB header per file,
- optional compatibility with current encrypted database files).
-
- Issue 229: SELECT with simple OR tests uses tableScan when it could use indexes.
-
- GROUP BY queries should use a temporary table if there are too many rows.
-
- BLOB: support random access when reading.
-
- CLOB: support random access when reading (this is harder than for BLOB as data is stored in UTF-8 form).
-
- Compatibility: support SELECT INTO (as an alias for CREATE TABLE ... AS SELECT ...).
-
- Compatibility with MySQL: support SELECT INTO OUTFILE (cannot be an existing file) as an alias for CSVWRITE(...).
-
- Compatibility with MySQL: support non-strict mode (sql_mode = "") any data
- that is too large for the column will just be truncated or set to the default value.
-
- The full condition should be sent to the linked table, not just the indexed condition.
- Example: TestLinkedTableFullCondition
-
- Compatibility with IBM DB2: CREATE PROCEDURE.
-
- Compatibility with IBM DB2: SQL cursors.
-
- Single-column primary key values are always stored explicitly. This is not required.
-
- Compatibility with MySQL: support CREATE TABLE TEST(NAME VARCHAR(255) CHARACTER SET UTF8).
-
- CALL is incompatible with other databases because it returns a result set, so that CallableStatement.execute() returns true.
-
- Optimization for large lists for column IN(1, 2, 3, 4,...) - currently an list is used, could potentially use a hash set
- (maybe only for a part of the values - the ones that can be evaluated).
-
- Compatibility for ARRAY data type (Oracle: VARRAY(n) of VARCHAR(m); HSQLDB: VARCHAR(n) ARRAY; Postgres: VARCHAR(n)[]).
-
- PostgreSQL compatible array literal syntax: ARRAY[['a', 'b'], ['c', 'd']]
-
- PostgreSQL compatibility: UPDATE with FROM.
-
- Issue 297: Oracle compatibility for "at time zone".
-
- IBM DB2 compatibility: IDENTITY_VAL_LOCAL().
-
- Support SQL/XML.
-
- Support concurrent opening of databases.
-
- Improved error message and diagnostics in case of network configuration problems.
-
- TRUNCATE should reset the identity columns as in MySQL and MS SQL Server (and possibly other databases).
-
- Adding a primary key should make the columns 'not null' unless if there is a row with null
- (compatibility with MySQL, PostgreSQL, HSQLDB; not Derby).
-
- ARRAY data type: support Integer[] and so on in Java functions (currently only Object[] is supported).
-
- MySQL compatibility: LOCK TABLES a READ, b READ - see also http://dev.mysql.com/doc/refman/5.0/en/lock-tables.html
-
- The HTML to PDF converter should use http://code.google.com/p/wkhtmltopdf/
-
- Issue 303: automatically convert "X NOT IN(SELECT...)" to "NOT EXISTS(...)".
-
- MySQL compatibility: update test1 t1, test2 t2 set t1.name=t2.name where t1.id=t2.id.
-
- Issue 283: Improve performance of H2 on Android.
-
- Support INSERT INTO / UPDATE / MERGE ... RETURNING to retrieve the generated key(s).
-
- Column compression option - see http://groups.google.com/group/h2-database/browse_thread/thread/3e223504e52671fa/243da82244343f5d
-
- PostgreSQL compatibility: ALTER TABLE ADD combined with adding a foreign key constraint, as in ALTER TABLE FOO ADD COLUMN PARENT BIGINT REFERENCES FOO(ID).
-
- MS SQL Server compatibility: support @@ROWCOUNT.
-
- PostgreSQL compatibility: LOG(x) is LOG10(x) and not LN(x).
-
- Issue 311: Serialized lock mode: executeQuery of write operations fails.
-
- PostgreSQL compatibility: support PgAdmin III (specially the function current_setting).
-
- MySQL compatibility: support TIMESTAMPADD.
-
- Support SELECT ... FOR UPDATE with joins (supported by PostgreSQL, MySQL, and HSQLDB; but not Derby).
-
- Support SELECT ... FOR UPDATE OF [field-list] (supported by PostgreSQL, MySQL, and HSQLDB; but not Derby).
-
- Support SELECT ... FOR UPDATE OF [table-list] (supported by PostgreSQL, HSQLDB, Sybase).
-
- TRANSACTION_ID() for in-memory databases.
-
- TRANSACTION_ID() should be long (same as HSQLDB and PostgreSQL).
-
- Support [INNER | OUTER] JOIN USING(column [,...]).
-
- Support NATURAL [ { LEFT | RIGHT } [ OUTER ] | INNER ] JOIN (Derby, Oracle)
-
- GROUP BY columnNumber (similar to ORDER BY columnNumber) (MySQL, PostgreSQL, SQLite; not by HSQLDB and Derby).
-
- Sybase / MS SQL Server compatibility: CONVERT(..) parameters are swapped.
-
- Index conditions: WHERE AGE>1 should not scan through all rows with AGE=1.
-
- PHP support: H2 should support PDO, or test with PostgreSQL PDO.
-
- Outer joins: if no column of the outer join table is referenced, the outer join table could be removed from the query.
-
- Cluster: allow using auto-increment and identity columns by ensuring executed in lock-step.
-
- MySQL compatibility: index names only need to be unique for the given table.
-
- Issue 352: constraints: distinguish between 'no action' and 'restrict'. Currently, only restrict is supported,
- and 'no action' is internally mapped to 'restrict'. The database meta data returns 'restrict' in all cases.
-
- Oracle compatibility: support MEDIAN aggregate function.
-
- Issue 348: Oracle compatibility: division should return a decimal result.
-
- Read rows on demand: instead of reading the whole row, only read up to that column that is requested.
- Keep an pointer to the data area and the column id that is already read.
-
- Long running transactions: log session id when detected.
-
- Optimization: "select id from test" should use the index on id even without "order by".
-
- Issue 362: LIMIT support for UPDATE statements (MySQL compatibility).
-
- Sybase SQL Anywhere compatibility: SELECT TOP ... START AT ...
-
- Use Java 6 SQLException subclasses.
-
- Issue 390: RUNSCRIPT FROM '...' CONTINUE_ON_ERROR
-
- Use Java 6 exceptions: SQLDataException, SQLSyntaxErrorException, SQLTimeoutException,..
-
-
-Not Planned
-
-- HSQLDB (did) support this: select id i from test where i<0 (other databases don't). Supporting it may break compatibility.
-
- String.intern (so that Strings can be compared with ==) will not be used because some VMs have problems when used extensively.
-
- In prepared statements, identifier names (table names and so on) can not be parameterized. Adding such a feature would complicate the source code without providing reasonable speedup, and would slow down regular prepared statements.
-
-
-
-
diff --git a/h2/src/docsrc/html/search.js b/h2/src/docsrc/html/search.js
index 33644612c9..6d32a658d3 100644
--- a/h2/src/docsrc/html/search.js
+++ b/h2/src/docsrc/html/search.js
@@ -1,7 +1,7 @@
/*
- * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0,
- * and the EPL 1.0 (http://h2database.com/html/license.html).
- * * Initial Developer: H2 Group
+ * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0,
+ * and the EPL 1.0 (https://h2database.com/html/license.html).
+ * Initial Developer: H2 Group
*/
var pages = new Array();
diff --git a/h2/src/docsrc/html/security.html b/h2/src/docsrc/html/security.html
new file mode 100644
index 0000000000..fe8d29f841
--- /dev/null
+++ b/h2/src/docsrc/html/security.html
@@ -0,0 +1,73 @@
+
+
+
+
+
+
+Features
+
+
+
+
+
+
+
+
+Securing your H2
+
+
+ Introduction
+
+ Network exposed
+
+ Alias / Stored Procedures
+
+ Grants / Roles / Permissions
+
+ Encrypted storage
+
+Introduction
+
+H2 is __not__ designed to be run in an adversarial environment. You should absolutely not expose your H2 server to untrusted connections.
+
+
+Running H2 in embedded mode is the best choice - it is not externally exposed.
+
+
+Network exposed
+
+When running an H2 server in TCP mode, first prize is to run with it only listening to connections on localhost (i.e 127.0.0.1).
+
+
+Second prize is running listening to restricted ports on a secured network.
+
+
+If you expose H2 to the broader Internet, you can secure the connection with SSL, but this is a rather tricky thing to get right, between JVM bugs, certificates and choosing a decent cipher.
+
+
+Alias / Stored procedures
+
+Anything created with CREATE ALIAS
can do anything the JVM can do, which includes reading/writing from the filesystem on the machine the JVM is running on.
+
+
+Grants / Roles / Permissions
+
+GRANT / REVOKE
TODO
+
+
+Encrypted storage
+
+Encrypting your on-disk database will provide a small measure of security to your stored data.
+You should not assume that this is any kind of real security against a determined opponent however,
+since there are many repeated data structures that will allow someone with resources and time to extract the secret key.
+
+
+Also the secret key is visible to anything that can read the memory of the process.
+
+
+
+
diff --git a/h2/src/docsrc/html/source.html b/h2/src/docsrc/html/source.html
index d6b31efe5d..5b8f130680 100644
--- a/h2/src/docsrc/html/source.html
+++ b/h2/src/docsrc/html/source.html
@@ -1,33 +1,36 @@
-
-Source Code Viewer
+
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
diff --git a/h2/src/docsrc/html/sourceError.html b/h2/src/docsrc/html/sourceError.html
index d181391f1b..84538c4bce 100644
--- a/h2/src/docsrc/html/sourceError.html
+++ b/h2/src/docsrc/html/sourceError.html
@@ -1,7 +1,7 @@
@@ -40,6 +40,9 @@
function getVersion(build) {
if (build == 64) {
return '1.0/version-1.0.' + build;
+ } else if (build > 200) {
+ var b = build + 1;
+ return Math.floor(b / 100) + '.' + Math.floor(b % 100 / 10) + '.' + build;
} else if (build >= 177) {
return '1.4.' + build;
} else if (build >= 146 && build != 147) {
@@ -67,7 +70,7 @@
code = code.replace('HY', '50');
code = code.replace('C', '1');
code = code.replace('T', '2');
- get('more').src = 'http://h2database.com/javadoc/org/h2/constant/ErrorCode.html#c' + code;
+ get('more').src = 'https://h2database.com/javadoc/org/h2/constant/ErrorCode.html#c' + code;
}
function go(file, line) {
@@ -82,13 +85,12 @@
get('file').innerHTML = file;
get('code').src = url;
} else {
+ url = 'https://github.com/h2database/h2database/tree/'
if (build && build > 0) {
- var tag = 'tags/version-' + getVersion(build) + '/h2';
+ url += 'version-' + getVersion(parseInt(build)) + '/h2';
} else {
- var tag = 'trunk/h2';
+ var tag = 'master/h2';
}
- url = 'http://code.google.com/p/h2database/source/browse/';
- url += tag;
url += '/src/main/';
url += file;
url += '#';
@@ -115,7 +117,7 @@
hasData = true;
idx = errorCode.indexOf("-");
build = parseInt(errorCode.substring(idx + 1));
- get('version').innerHTML = getVersion(build);
+ get('version').innerHTML = getVersion(parseInt(build));
errorCode = errorCode.substring(0, idx);
while (errorCode.length > 1 && errorCode.charAt(0) == '0') {
errorCode = errorCode.substring(1);
diff --git a/h2/src/docsrc/html/stylesheet.css b/h2/src/docsrc/html/stylesheet.css
index faec065a6b..a30f4d5adc 100644
--- a/h2/src/docsrc/html/stylesheet.css
+++ b/h2/src/docsrc/html/stylesheet.css
@@ -1,7 +1,7 @@
/*
- * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0,
- * and the EPL 1.0 (http://h2database.com/html/license.html).
- * * Initial Developer: H2 Group
+ * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0,
+ * and the EPL 1.0 (https://h2database.com/html/license.html).
+ * Initial Developer: H2 Group
*/
td, input, select, textarea, body, code, pre, td, th {
@@ -283,6 +283,23 @@ td.index {
vertical-align: top;
}
+div.ruleCompat code {
+ border-color: coral;
+ background-color: mistyrose;
+}
+
+div.ruleH2 code {
+ border-color: lightseagreen;
+}
+
+span.ruleCompat {
+ color: darkred;
+}
+
+span.ruleH2 {
+ color: green;
+}
+
.c {
padding: 1px 3px;
margin: 0px 0px;
diff --git a/h2/src/docsrc/html/stylesheetPdf.css b/h2/src/docsrc/html/stylesheetPdf.css
index 0977e2c5e3..dacc282997 100644
--- a/h2/src/docsrc/html/stylesheetPdf.css
+++ b/h2/src/docsrc/html/stylesheetPdf.css
@@ -1,21 +1,21 @@
/*
- * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0,
- * and the EPL 1.0 (http://h2database.com/html/license.html).
- * * Initial Developer: H2 Group
+ * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0,
+ * and the EPL 1.0 (https://h2database.com/html/license.html).
+ * Initial Developer: H2 Group
*/
td, input, select, textarea, body, code, pre, td, th {
- font: 9pt Tahoma, Arial, Helvetica, sans-serif;
+ font: 14pt Tahoma, Arial, Helvetica, sans-serif;
font-weight: normal;
}
h1, h2, h3, h4, h5 {
- font: 9pt Arial, Helvetica, sans-serif;
+ font: 14pt Arial, Helvetica, sans-serif;
font-weight: bold;
}
td, input, select, textarea, body, code, pre {
- font-size: 9pt;
+ font-size: 14pt;
}
pre {
@@ -32,26 +32,27 @@ body {
margin: 0px;
}
-h1 {
+h1, p.title {
background-color: #0000bb;
padding: 2px 4px 2px 4px;
color: #fff;
- font-size: 15pt;
+ font-size: 24pt;
+ font-weight: bold;
line-height: normal;
}
h2 {
- font-size: 13pt;
+ font-size: 18pt;
margin-top: 1.5em;
}
h3 {
- font-size: 11pt;
+ font-size: 16pt;
margin-top: 1.5em;
}
h4 {
- font-size: 9pt;
+ font-size: 14pt;
margin-top: 1.5em;
}
@@ -69,17 +70,16 @@ table {
}
th {
- font-size: 9pt;
- font-weight: normal;
+ font-size: 14pt;
+ font-weight: bold;
text-align: left;
- background-color: #ece9d8;
border: 1px solid #aca899;
padding: 2px;
}
td {
background-color: #ffffff;
- font-size: 9pt;
+ font-size: 14pt;
text-align: left;
vertical-align: top;
border: 1px solid #aca899;
@@ -152,3 +152,11 @@ td.index {
border-collapse: collapse;
vertical-align: top;
}
+
+span.ruleCompat {
+ color: darkred;
+}
+
+span.ruleH2 {
+ color: green;
+}
diff --git a/h2/src/docsrc/html/systemtables.html b/h2/src/docsrc/html/systemtables.html
new file mode 100644
index 0000000000..fa19549629
--- /dev/null
+++ b/h2/src/docsrc/html/systemtables.html
@@ -0,0 +1,104 @@
+
+
+
+
+
+
+System Tables
+
+
+
+
+
+
+
+
+System Tables
+
+Index
+
+
+
+
+
+
+
+
+
+ ${item.table}
+
+
+
+ ${item.table}
+
+
+
+ ${item.table}
+
+
+
+
+
+
+
+Range Table
+
+
+Information Schema
+
+The system tables and views in the schema INFORMATION_SCHEMA
contain the meta data
+of all tables, views, domains, and other objects in the database as well as the current settings.
+This documentation describes the default new version of INFORMATION_SCHEMA
for H2 2.0.
+Old TCP clients (1.4.200 and below) see the legacy version of INFORMATION_SCHEMA
,
+because they can't work with the new one. The legacy version is not documented.
+
+
+
+${item.table}
+${item.description}
+
+
+${item.columns}
+
+
+
+
+Range Table
+
+The range table is a dynamic system table that contains all values from a start to an end value.
+Non-zero step value may be also specified, default is 1.
+Start value, end value, and optional step value are converted to BIGINT data type.
+The table contains one column called X.
+If start value is greater than end value and step is positive the result is empty.
+If start value is less than end value and step is negative the result is empty too.
+If start value is equal to end value the result contains only start value.
+Start value, start value plus step, start value plus step multiplied by two and so on are included in result.
+If step is positive the last value is less than or equal to the specified end value.
+If step in negative the last value is greater than or equal to the specified end value.
+The table is used as follows:
+
+Examples:
+
+SELECT X FROM SYSTEM_RANGE(1, 10);
+-- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10
+SELECT X FROM SYSTEM_RANGE(1, 10, 2);
+-- 1, 3, 5, 7, 9
+SELECT X FROM SYSTEM_RANGE(1, 10, -1);
+-- No rows
+SELECT X FROM SYSTEM_RANGE(10, 2, -2);
+-- 10, 8, 6, 4, 2
+
+
+
diff --git a/h2/src/docsrc/html/tutorial.html b/h2/src/docsrc/html/tutorial.html
index 6f71a25b80..3dadf0f822 100644
--- a/h2/src/docsrc/html/tutorial.html
+++ b/h2/src/docsrc/html/tutorial.html
@@ -1,7 +1,7 @@
@@ -18,7 +18,7 @@
+
+If the console startup procedure is unable to locate the default system web browser,
+an error message may be displayed. It is possible to explicitly tell H2 which
+program/script to use when opening a system web browser by setting either the BROWSER
+environment variable, or the h2.browser java property.
+
Firewall
@@ -295,7 +299,7 @@
Special H2 Console Syntax
@attributes, @best_row_identifier, @catalogs, @columns,
@column_privileges, @cross_references, @exported_keys,
@imported_keys, @index_info, @primary_keys, @procedures,
- @procedure_columns, @schemas, @super_tables, @super_types,
+ @procedure_columns, @pseudo_columns, @schemas, @super_tables, @super_types,
@tables, @table_privileges, @table_types, @type_info, @udts,
@version_columns
@@ -311,10 +315,13 @@ Special H2 Console Syntax
- @generated insert into test() values();
+ @generated insert into test() values();
+ @generated(1) insert into test() values();
+ @generated(ID, "TIMESTAMP") insert into test() values();
Show the result of Statement.getGeneratedKeys()
.
+ Names or one-based indexes of required columns can be optionally specified.
@@ -436,6 +443,7 @@ Settings of the H2 Console
webAllowOthers
: allow other computers to connect.
webPort
: the port of the H2 Console
webSSL
: use encrypted TLS (HTTPS) connections.
+webAdminPassword
: password to access preferences and tools of H2 Console.
In addition to those settings, the properties of the last recently used connection
@@ -456,7 +464,6 @@
Connecting to a Database using JDBC
public class Test {
public static void main(String[] a)
throws Exception {
- Class.forName("org.h2.Driver");
Connection conn = DriverManager.
getConnection("jdbc:h2:~/test", "sa", "");
// add application code here
@@ -465,8 +472,7 @@ Connecting to a Database using JDBC
}
-This code first loads the driver (Class.forName(...)
)
-and then opens a connection (using DriverManager.getConnection()
).
+This code opens a connection (using DriverManager.getConnection()
).
The driver name is "org.h2.Driver"
.
The database URL always needs to start with jdbc:h2:
to be recognized by this database. The second parameter in the getConnection()
call
@@ -476,14 +482,61 @@
Connecting to a Database using JDBC
Creating New Databases
-By default, if the database specified in the URL does not yet exist, a new (empty)
-database is created automatically. The user that created the database automatically becomes
-the administrator of this database.
+By default, if the database specified in the embedded URL does not yet exist,
+a new (empty) database is created automatically.
+The user that created the database automatically becomes the administrator of this database.
-Auto-creating new database can be disabled, see
+Auto-creation of databases can be disabled, see
Opening a Database Only if it Already Exists.
+
+H2 Console does not allow creation of databases unless a browser window is opened by Console during its
+startup or from its icon in the system tray and remote access is not enabled.
+A context menu of the tray icon can also be used to create a new database.
+
+
+You can also create a new local database from a command line with a Shell tool:
+
+
+> java -cp h2-*.jar org.h2.tools.Shell
+
+Welcome to H2 Shell
+Exit with Ctrl+C
+[Enter] jdbc:h2:mem:2
+URL jdbc:h2:./path/to/database
+[Enter] org.h2.Driver
+Driver
+[Enter] sa
+User your_username
+Password (hidden)
+Type the same password again to confirm database creation.
+Password (hidden)
+Connected
+
+sql> quit
+Connection closed
+
+
+By default remote creation of databases from a TCP connection or a web interface is not allowed.
+It's not recommended to enable remote creation of databases due to security reasons.
+User who creates a new database becomes its administrator and therefore gets the same access to your JVM as H2 has
+and the same access to your operating system as Java and your system account allows.
+It's recommended to create all databases locally using an embedded URL, local H2 Console, or the Shell tool.
+
+
+If you really need to allow remote database creation, you can pass -ifNotExists
parameter to
+TCP, PG, or Web servers (but not to the Console tool).
+Its combination with -tcpAllowOthers
, -pgAllowOthers
, or -webAllowOthers
+effectively creates a remote security hole in your system, if you use it, always guard your ports with a firewall
+or some other solution and use such combination of settings only in trusted networks.
+
+
+H2 Servlet also supports such option.
+When you use it always protect the servlet with security constraints,
+see Using the H2 Console Servlet for example;
+don't forget to uncomment and adjust security configuration for your needs.
+
Using the Server
@@ -545,13 +598,13 @@
Stopping a TCP Server from Another Process
To stop the server from the command line, run:
-java org.h2.tools.Server -tcpShutdown tcp://localhost:9092
+java org.h2.tools.Server -tcpShutdown tcp://localhost:9092 -tcpPassword password
To stop the server from a user application, use the following code:
-org.h2.tools.Server.shutdownTcpServer("tcp://localhost:9094");
+org.h2.tools.Server.shutdownTcpServer("tcp://localhost:9092", "password", false, false);
This function will only stop the TCP server.
@@ -559,18 +612,14 @@
Stopping a TCP Server from Another Process
To avoid recovery when the databases are opened the next time,
all connections to the databases should be closed before calling this method.
To stop a remote server, remote connections must be enabled on the server.
-Shutting down a TCP server can be protected using the option -tcpPassword
+Shutting down a TCP server is protected using the option -tcpPassword
(the same password must be used to start and stop the TCP server).
Using Hibernate
This database supports Hibernate version 3.1 and newer. You can use the HSQLDB Dialect,
-or the native H2 Dialect. Unfortunately the H2 Dialect included in some old versions of Hibernate was buggy.
-A patch
-for Hibernate has been submitted and is now applied.
-You can rename it to H2Dialect.java
and include this as a patch in your application,
-or upgrade to a version of Hibernate where this is fixed.
+or the native H2 Dialect.
When using Hibernate, try to use the H2Dialect
if possible.
@@ -614,7 +663,7 @@
Using EclipseLink
To use H2 in EclipseLink, use the platform class org.eclipse.persistence.platform.database.H2Platform
.
If this platform is not available in your version of EclipseLink, you can use the OraclePlatform instead in many case.
-See also H2Platform.
+See also H2Platform.
Using Apache ActiveMQ
@@ -632,13 +681,9 @@ Using Apache ActiveMQ
Using H2 within NetBeans
-The project H2 Database Engine Support For NetBeans
-allows you to start and stop the H2 server from within the IDE.
-
-
There is a known issue when using the Netbeans SQL Execution Window:
before executing a query, another query in the form SELECT COUNT(*) FROM <query>
is run.
-This is a problem for queries that modify state, such as SELECT SEQ.NEXTVAL
.
+This is a problem for queries that modify state, such as SELECT NEXT VALUE FOR SEQ
.
In this case, two sequence values are allocated instead of just one.
@@ -656,7 +701,7 @@ Using H2 with jOOQ
then run the jOOQ code generator on the command line using this command:
-java -cp jooq.jar;jooq-meta.jar;jooq-codegen.jar;h2-1.3.158.jar;.
+java -cp jooq.jar;jooq-meta.jar;jooq-codegen.jar;h2-1.4.199.jar;.
org.jooq.util.GenerationTool /codegen.xml
@@ -664,7 +709,7 @@
Using H2 with jOOQ
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
-<configuration xmlns="http://www.jooq.org/xsd/jooq-codegen-2.3.0.xsd">
+<configuration xmlns="http://www.jooq.org/xsd/jooq-codegen-3.11.0.xsd">
<jdbc>
<driver>org.h2.Driver</driver>
<url>jdbc:h2:~/test</url>
@@ -672,14 +717,11 @@ Using H2 with jOOQ
<password></password>
</jdbc>
<generator>
- <name>org.jooq.util.DefaultGenerator</name>
<database>
- <name>org.jooq.util.h2.H2Database</name>
<includes>.*</includes>
<excludes></excludes>
<inputSchema>PUBLIC</inputSchema>
</database>
- <generate></generate>
<target>
<packageName>org.jooq.h2.generated</packageName>
<directory>./src</directory>
@@ -691,16 +733,16 @@ Using H2 with jOOQ
Using the generated source, you can query the database as follows:
-Factory create = new H2Factory(connection);
+DSLContext dsl = DSL.using(connection);
Result<UserRecord> result =
-create.selectFrom(USER)
+dsl.selectFrom(USER)
.where(NAME.like("Johnny%"))
.orderBy(ID)
.fetch();
-See more details on jOOQ Homepage
-and in the jOOQ Tutorial
+See more details on jOOQ Homepage
+and in the jOOQ Tutorial
Using Databases in Web Applications
@@ -747,6 +789,15 @@ Using a Servlet Listener to Start and Stop a Database
</listener>
+If your servlet container is already Servlet 5-compatible, use the following
+snippet instead:
+
+
+<listener>
+ <listener-class>org.h2.server.web.JakartaDbStarter</listener-class>
+</listener>
+
+
For details on how to access the database, see the file DbStarter.java
.
By default this tool opens an embedded connection
using the database URL jdbc:h2:~/test
,
@@ -786,10 +837,10 @@
Using a Servlet Listener to Start and Stop a Database
If the TCP server is started within the DbStarter
, it will also be stopped automatically.
-Using the H2 Console Servlet
+Using the H2 Console Servlet
The H2 Console is a standalone application and includes its own web server, but it can be
-used as a servlet as well. To do that, include the the h2*.jar
file in your application, and
+used as a servlet as well. To do that, include the h2*.jar
file in your application, and
add the following configuration to your web.xml
:
@@ -812,68 +863,34 @@ Using the H2 Console Servlet
<servlet-name>H2Console</servlet-name>
<url-pattern>/console/*</url-pattern>
</servlet-mapping>
+<!--
+<security-role>
+ <role-name>admin</role-name>
+</security-role>
+<security-constraint>
+ <web-resource-collection>
+ <web-resource-name>H2 Console</web-resource-name>
+ <url-pattern>/console/*</url-pattern>
+ </web-resource-collection>
+ <auth-constraint>
+ <role-name>admin</role-name>
+ </auth-constraint>
+</security-constraint>
+-->
For details, see also src/tools/WEB-INF/web.xml
.
-To create a web application with just the H2 Console, run the following command:
-
-
-build warConsole
-
-
-Android
-
-You can use this database on an Android device (using the Dalvik VM) instead of or in addition to SQLite.
-So far, only very few tests and benchmarks were run, but it seems that performance is similar to SQLite,
-except for opening and closing a database, which is not yet optimized in H2
-(H2 takes about 0.2 seconds, and SQLite about 0.02 seconds).
-Read operations seem to be a bit faster than SQLite, and write operations seem to be slower.
-So far, only very few tests have been run, and everything seems to work as expected.
-Fulltext search was not yet tested, however the native fulltext search should work.
-
-
-Reasons to use H2 instead of SQLite are:
-
-- Full Unicode support including UPPER() and LOWER().
-
- Streaming API for BLOB and CLOB data.
-
- Fulltext search.
-
- Multiple connections.
-
- User defined functions and triggers.
-
- Database file encryption.
-
- Reading and writing CSV files (this feature can be used outside the database as well).
-
- Referential integrity and check constraints.
-
- Better data type and SQL support.
-
- In-memory databases, read-only databases, linked tables.
-
- Better compatibility with other databases which simplifies porting applications.
-
- Possibly better performance (so far for read operations).
-
- Server mode (accessing a database on a different machine over TCP/IP).
-
-
-Currently only the JDBC API is supported (it is planned to support the Android database API in future releases).
-Both the regular H2 jar file and the smaller h2small-*.jar
can be used.
-To create the smaller jar file, run the command ./build.sh jarSmall
(Linux / Mac OS)
-or build.bat jarSmall
(Windows).
+If your application is already Servlet 5-compatible, use the servlet class
+org.h2.server.web.JakartaWebServlet
instead.
-The database files needs to be stored in a place that is accessible for the application.
-Example:
+To create a web application with just the H2 Console, run the following command:
-String url = "jdbc:h2:/data/data/" +
- "com.example.hello" +
- "/data/hello" +
- ";FILE_LOCK=FS" +
- ";PAGE_SIZE=1024" +
- ";CACHE_SIZE=8192";
-Class.forName("org.h2.Driver");
-conn = DriverManager.getConnection(url);
-...
+build warConsole
-
-Limitations: Using a connection pool is currently not supported, because the required javax.sql.
classes are not available on Android.
-
CSV (Comma Separated Values) Support
@@ -1008,6 +1025,15 @@
Restore from a Script
need to be available on the server side.
+
+If the script was generated by H2 1.4.200 or an older version, add VARIABLE_BINARY
option to import it
+into more recent version.
+
+
+
+java org.h2.tools.RunScript -url jdbc:h2:~/test -user sa -script test.zip -options compression zip variable_binary
+
+
Online Backup
The BACKUP
SQL statement and the Backup
tool both create a zip file
@@ -1135,7 +1161,7 @@
Using OpenOffice Base
This can be done by create it using the NetBeans OpenOffice plugin.
-See also Extensions Development.
+See also Extensions Development.
Java Web Start / JNLP
@@ -1157,9 +1183,9 @@ Using a Connection Pool
For H2, opening a connection is fast if the database is already open.
Still, using a connection pool improves performance if you open and close connections a lot.
A simple connection pool is included in H2. It is based on the
-Mini Connection Pool Manager
+Mini Connection Pool Manager
from Christian d'Heureuse. There are other, more complex, open source connection pools available,
-for example the Apache Commons DBCP.
+for example the Apache Commons DBCP.
For H2, it is about twice as faster to get a connection from the built-in connection pool than to get
one using DriverManager.getConnection()
.The build-in connection pool is used as follows:
@@ -1242,11 +1268,10 @@ Using the Native Fulltext Search
org.h2.fulltext.FullText.searchData(conn, text, limit, offset);
-Using the Lucene Fulltext Search
+Using the Apache Lucene Fulltext Search
-To use the Lucene full text search, you need the Lucene library in the classpath.
-Currently Apache Lucene version 2.x is used by default for H2 version 1.2.x,
-and Lucene version 3.x is used by default for H2 version 1.3.x.
+To use the Apache Lucene full text search, you need the Lucene library in the classpath.
+Apache Lucene 8.5.2 or binary compatible version is required.
How to do that depends on the application; if you use the H2 Console, you can add the Lucene
jar file to the environment variables H2DRIVERS
or
CLASSPATH
.
@@ -1320,13 +1345,6 @@
Using the Lucene Fulltext Search
SELECT * FROM FTL_SEARCH_DATA('LAST_NAME:John', 0, 0);
CALL FTL_DROP_ALL();
-
-The Lucene fulltext search implementation is not synchronized internally.
-If you update the database and query the fulltext search concurrently
-(directly using the Java API of H2 or Lucene itself), you need to ensure
-operations are properly synchronized. If this is not the case, you may get
-exceptions such as org.apache.lucene.store.AlreadyClosedException: this IndexReader is closed
.
-
User-Defined Variables
@@ -1342,7 +1360,7 @@
User-Defined Variables
SET @TOTAL = NULL;
-SELECT X, SET(@TOTAL, IFNULL(@TOTAL, 1.) * X) F FROM SYSTEM_RANGE(1, 50);
+SELECT X, SET(@TOTAL, COALESCE(@TOTAL, 1.) * X) F FROM SYSTEM_RANGE(1, 50);
Variables that are not set evaluate to NULL
.
@@ -1354,18 +1372,34 @@
User-Defined Variables
Date and Time
-Date, time and timestamp values support ISO 8601 formatting, including time zone:
+Date, time and timestamp values support standard literals:
-CALL TIMESTAMP '2008-01-01 12:00:00+01:00';
+VALUES (
+ DATE '2008-01-01',
+ TIME '12:00:00',
+ TIME WITH TIME ZONE '12:00:00+01:00',
+ TIMESTAMP '2008-01-01 12:00:00',
+ TIMESTAMP WITH TIME ZONE '2008-01-01 12:00:00+01:00'
+);
-If the time zone is not set, the value is parsed using the current time zone setting of the system.
-Date and time information is stored in H2 database files without time zone information.
-If the database is opened using another system time zone, the date and time will be the same.
-That means if you store the value '2000-01-01 12:00:00' in one time zone, then close the database
-and open the database again in a different time zone, you will also get '2000-01-01 12:00:00'.
-Please note that changing the time zone after the H2 driver is loaded is not supported.
+ISO 8601-style datetime formats with T instead of space between date and time parts are also supported.
+
+
+TIME and TIMESTAMP values are preserved without time zone information as local time.
+That means if you store the value '2000-01-01 12:00:00' in one time zone, then change time zone of the session
+you will also get '2000-01-01 12:00:00', the value will not be adjusted to the new time zone,
+therefore its absolute value in UTC may be different.
+
+
+TIME WITH TIME ZONE and TIMESTAMP WITH TIME ZONE values preserve the specified time zone offset
+and if you store the value '2008-01-01 12:00:00+01:00' it also remains the same
+even if you change time zone of the session,
+and because it has a time zone offset its absolute value in UTC will be the same.
+TIMESTAMP WITH TIME ZONE values may be also specified with time zone name like '2008-01-01 12:00:00 Europe/Berlin'.
+It that case this name will be converted into time zone offset.
+Names of time zones are not stored.
Using Spring
@@ -1386,53 +1420,13 @@ Using the TCP Server
The destroy-method
will help prevent exceptions on hot-redeployment or when restarting the server.
-Error Code Incompatibility
-
-There is an incompatibility with the Spring JdbcTemplate and H2 version 1.3.154 and newer,
-because of a change in the error code. This will cause the JdbcTemplate to not detect
-a duplicate key condition, and so a DataIntegrityViolationException
is thrown instead of
-DuplicateKeyException
.
-See also the issue SPR-8235.
-The workaround is to add the following XML file to the root of the classpath:
-
-
-<beans
- xmlns="http://www.springframework.org/schema/beans"
- xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
- xsi:schemaLocation=
- "http://www.springframework.org/schema/beans
- http://www.springframework.org/schema/beans/spring-beans.xsd"
- >
- <import resource="classpath:org/springframework/jdbc/support/sql-error-codes.xml"/>
- <bean id = "H2" class="org.springframework.jdbc.support.SQLErrorCodes">
- <property name="badSqlGrammarCodes">
- <value>
- 42000,42001,42101,42102,42111,42112,42121,42122,42132
- </value>
- </property>
- <property name="duplicateKeyCodes">
- <value>23001,23505</value>
- </property>
- <property name="dataIntegrityViolationCodes">
- <value>22003,22012,22025,23000</value>
- </property>
- <property name="dataAccessResourceFailureCodes">
- <value>90046,90100,90117,90121,90126</value>
- </property>
- <property name="cannotAcquireLockCodes">
- <value>50200</value>
- </property>
- </bean>
-</beans>
-
-
OSGi
The standard H2 jar can be dropped in as a bundle in an OSGi container.
H2 implements the JDBC Service defined in OSGi Service Platform Release 4 Version 4.2 Enterprise Specification.
The H2 Data Source Factory service is registered with the following properties:
OSGI_JDBC_DRIVER_CLASS=org.h2.Driver
-and OSGI_JDBC_DRIVER_NAME=H2
.
+and OSGI_JDBC_DRIVER_NAME=H2 JDBC Driver
.
The OSGI_JDBC_DRIVER_VERSION
property reflects the version of the driver as is.
diff --git a/h2/src/docsrc/images/favicon.ico b/h2/src/docsrc/images/favicon.ico
index 6e0f78aeb1..fd5e73a416 100644
Binary files a/h2/src/docsrc/images/favicon.ico and b/h2/src/docsrc/images/favicon.ico differ
diff --git a/h2/src/docsrc/images/h2-16.png b/h2/src/docsrc/images/h2-16.png
index 2dee09e17f..747340ac7d 100644
Binary files a/h2/src/docsrc/images/h2-16.png and b/h2/src/docsrc/images/h2-16.png differ
diff --git a/h2/src/docsrc/images/h2-24.png b/h2/src/docsrc/images/h2-24.png
index 1d83623bd2..9f682d6861 100644
Binary files a/h2/src/docsrc/images/h2-24.png and b/h2/src/docsrc/images/h2-24.png differ
diff --git a/h2/src/docsrc/images/h2-32.png b/h2/src/docsrc/images/h2-32.png
index 7e6c3e8c9c..c7af904cf1 100644
Binary files a/h2/src/docsrc/images/h2-32.png and b/h2/src/docsrc/images/h2-32.png differ
diff --git a/h2/src/docsrc/images/h2-64.png b/h2/src/docsrc/images/h2-64.png
index 754cc59543..51a47e34cf 100644
Binary files a/h2/src/docsrc/images/h2-64.png and b/h2/src/docsrc/images/h2-64.png differ
diff --git a/h2/src/docsrc/images/h2-logo-2.png b/h2/src/docsrc/images/h2-logo-2.png
index d8025aa52d..218fe975bd 100644
Binary files a/h2/src/docsrc/images/h2-logo-2.png and b/h2/src/docsrc/images/h2-logo-2.png differ
diff --git a/h2/src/docsrc/images/h2-logo.png b/h2/src/docsrc/images/h2-logo.png
index 52ebd8e7f8..fb65afe0b5 100644
Binary files a/h2/src/docsrc/images/h2-logo.png and b/h2/src/docsrc/images/h2-logo.png differ
diff --git a/h2/src/docsrc/images/h2-logo.svg b/h2/src/docsrc/images/h2-logo.svg
index a73119867d..1beb7606f1 100644
--- a/h2/src/docsrc/images/h2-logo.svg
+++ b/h2/src/docsrc/images/h2-logo.svg
@@ -1,69 +1,23 @@
+
diff --git a/h2/src/docsrc/images/h2_v2_3_7.svg b/h2/src/docsrc/images/h2_v2_3_7.svg
new file mode 100644
index 0000000000..c2dc03d239
--- /dev/null
+++ b/h2/src/docsrc/images/h2_v2_3_7.svg
@@ -0,0 +1,61 @@
+
+
+
+
diff --git a/h2/src/docsrc/index.html b/h2/src/docsrc/index.html
index 41b0314be3..2e09c2fef2 100644
--- a/h2/src/docsrc/index.html
+++ b/h2/src/docsrc/index.html
@@ -1,7 +1,7 @@
diff --git a/h2/src/docsrc/javadoc/animate.js b/h2/src/docsrc/javadoc/animate.js
deleted file mode 100644
index b6ee70e9a4..0000000000
--- a/h2/src/docsrc/javadoc/animate.js
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0,
- * and the EPL 1.0 (http://h2database.com/html/license.html).
- * * Initial Developer: H2 Group
- */
-
-function on(id) {
- return switchTag(id, 'titleOff', 'detailOn');
-}
-
-function off(id) {
- return switchTag(id, '', 'detail');
-}
-
-function allDetails() {
- for (i = 0;; i++) {
- x = document.getElementById('_' + i);
- if (x == null) {
- break;
- }
- switchTag(i, 'titleOff', 'detailOn');
- }
- return false;
-}
-
-function switchTag(id, title, detail) {
- if (document.getElementById('__' + id) != null) {
- document.getElementById('__' + id).className = title;
- document.getElementById('_' + id).className = detail;
- }
- return false;
-}
-
-function openLink() {
- page = new String(self.document.location);
- var pos = page.lastIndexOf("#") + 1;
- if (pos == 0) {
- return;
- }
- var ref = page.substr(pos);
- link = decodeURIComponent(ref);
- el = document.getElementById(link);
- if (el.nodeName.toLowerCase() == 'h4') {
- // constant
- return true;
- }
- el = el.parentNode.parentNode;
- window.scrollTo(0, el.offsetTop);
- on(el.id.substr(2));
- return false;
-}
\ No newline at end of file
diff --git a/h2/src/docsrc/javadoc/classes.html b/h2/src/docsrc/javadoc/classes.html
deleted file mode 100644
index 56d26ae0b8..0000000000
--- a/h2/src/docsrc/javadoc/classes.html
+++ /dev/null
@@ -1,93 +0,0 @@
-
-
-
-
-
-
- H2 Documentation
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/h2/src/docsrc/javadoc/index.html b/h2/src/docsrc/javadoc/index.html
deleted file mode 100644
index 68cbb169df..0000000000
--- a/h2/src/docsrc/javadoc/index.html
+++ /dev/null
@@ -1,24 +0,0 @@
-
-
-
-
-
-
- H2 Documentation
-
-
-
-
-
- Sorry, Lynx is not supported
-
-
-
-
diff --git a/h2/src/docsrc/javadoc/overview.html b/h2/src/docsrc/javadoc/overview.html
deleted file mode 100644
index c54079fbd4..0000000000
--- a/h2/src/docsrc/javadoc/overview.html
+++ /dev/null
@@ -1,38 +0,0 @@
-
-
-
-
-
-
- API Overview
-
-
-
-
-
-
-
-
-API Overview
-
-JDBC API
-
-
-Use the JDBC API to connect to a database and execute queries.
-
-
-Tools API
-
-
-The Tools API can be used to do maintenance operations,
-such as deleting database files or changing the database file password,
-that do not require a connection to the database.
-
-
-
-
-
diff --git a/h2/src/docsrc/javadoc/stylesheet.css b/h2/src/docsrc/javadoc/stylesheet.css
deleted file mode 100644
index 5b4ff0cec6..0000000000
--- a/h2/src/docsrc/javadoc/stylesheet.css
+++ /dev/null
@@ -1,171 +0,0 @@
-/*
- * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0,
- * and the EPL 1.0 (http://h2database.com/html/license.html).
- * * Initial Developer: H2 Group
- */
-
-td, input, select, textarea, body, code, pre, td, th {
- font: 13px/1.4 Arial, sans-serif;
- font-weight: normal;
-}
-
-pre {
- background-color: #ece9d8;
- border: 1px solid rgb(172, 168, 153);
- padding: 4px;
-}
-
-body {
- margin: 0px;
- max-width: 800px;
-}
-
-h1 {
- background-color: #0000bb;
- padding: 2px 4px 2px 4px;
- margin-top: 11px;
- color: #fff;
- font-size: 22px;
- line-height: normal;
-}
-
-h2 {
- font-size: 19px;
-}
-
-h3 {
- font-size: 16px;
-}
-
-h4 {
- font-size: 13px;
-}
-
-hr {
- color: #CCC;
- background-color: #CCC;
- height: 1px;
- border: 0px solid blue;
-}
-
-.menu {
- margin: 10px 10px 10px 10px;
-}
-
-.block {
- border: 0px;
-}
-
-.titleOff {
- display: none;
-}
-
-.detail {
- border: 0px;
- display: none;
-}
-
-.detailOn {
- border: 0px;
-}
-
-td.return {
- white-space:nowrap;
- width: 1%;
-}
-
-td.method {
- width: 99%;
-}
-
-.deprecated {
- text-decoration: line-through;
-}
-
-.methodText {
- color: #000000;
- font-weight: normal;
- margin: 0px 0px 0px 20px;
-}
-
-.method {
-}
-
-.fieldText {
- margin: 6px 20px 6px 20px;
-}
-
-.methodName {
- font-weight: bold;
-}
-
-.itemTitle {
-}
-
-.item {
- margin: 0px 0px 0px 20px;
-}
-
-table {
- background-color: #ffffff;
- border-collapse: collapse;
- border: 1px solid #aca899;
-}
-
-th {
- text-align: left;
- background-color: #ece9d8;
- border: 1px solid #aca899;
- padding: 2px;
-}
-
-td {
- background-color: #ffffff;
- text-align: left;
- vertical-align:top;
- border: 1px solid #aca899;
- padding: 2px;
-}
-
-
-ul, ol {
- list-style-position: outside;
- padding-left: 20px;
-}
-
-li {
- margin-top: 8px;
- line-height: 100%;
-}
-
-a {
- text-decoration: none;
- color: #0000ff;
-}
-
-a:hover {
- text-decoration: underline;
-}
-
-table.content {
- width: 100%;
- height: 100%;
- border: 0px;
-}
-
-tr.content {
- border:0px;
- border-left:1px solid #aca899;
-}
-
-td.content {
- border:0px;
- border-left:1px solid #aca899;
-}
-
-.contentDiv {
- margin:10px;
-}
-
-
-
diff --git a/h2/src/docsrc/text/_docs_en.utf8.txt b/h2/src/docsrc/text/_docs_en.utf8.txt
deleted file mode 100644
index 641108d54d..0000000000
--- a/h2/src/docsrc/text/_docs_en.utf8.txt
+++ /dev/null
@@ -1,11985 +0,0 @@
-@advanced_1000_h1
-Advanced
-
-@advanced_1001_a
- Result Sets
-
-@advanced_1002_a
- Large Objects
-
-@advanced_1003_a
- Linked Tables
-
-@advanced_1004_a
- Spatial Features
-
-@advanced_1005_a
- Recursive Queries
-
-@advanced_1006_a
- Updatable Views
-
-@advanced_1007_a
- Transaction Isolation
-
-@advanced_1008_a
- Multi-Version Concurrency Control (MVCC)
-
-@advanced_1009_a
- Clustering / High Availability
-
-@advanced_1010_a
- Two Phase Commit
-
-@advanced_1011_a
- Compatibility
-
-@advanced_1012_a
- Standards Compliance
-
-@advanced_1013_a
- Run as Windows Service
-
-@advanced_1014_a
- ODBC Driver
-
-@advanced_1015_a
- Using H2 in Microsoft .NET
-
-@advanced_1016_a
- ACID
-
-@advanced_1017_a
- Durability Problems
-
-@advanced_1018_a
- Using the Recover Tool
-
-@advanced_1019_a
- File Locking Protocols
-
-@advanced_1020_a
- Using Passwords
-
-@advanced_1021_a
- Password Hash
-
-@advanced_1022_a
- Protection against SQL Injection
-
-@advanced_1023_a
- Protection against Remote Access
-
-@advanced_1024_a
- Restricting Class Loading and Usage
-
-@advanced_1025_a
- Security Protocols
-
-@advanced_1026_a
- TLS Connections
-
-@advanced_1027_a
- Universally Unique Identifiers (UUID)
-
-@advanced_1028_a
- Settings Read from System Properties
-
-@advanced_1029_a
- Setting the Server Bind Address
-
-@advanced_1030_a
- Pluggable File System
-
-@advanced_1031_a
- Split File System
-
-@advanced_1032_a
- Database Upgrade
-
-@advanced_1033_a
- Java Objects Serialization
-
-@advanced_1034_a
- Limits and Limitations
-
-@advanced_1035_a
- Glossary and Links
-
-@advanced_1036_h2
-Result Sets
-
-@advanced_1037_h3
-Statements that Return a Result Set
-
-@advanced_1038_p
- The following statements return a result set: SELECT, EXPLAIN, CALL, SCRIPT, SHOW, HELP
. All other statements return an update count.
-
-@advanced_1039_h3
-Limiting the Number of Rows
-
-@advanced_1040_p
- Before the result is returned to the application, all rows are read by the database. Server side cursors are not supported currently. If only the first few rows are interesting for the application, then the result set size should be limited to improve the performance. This can be done using LIMIT
in a query (example: SELECT * FROM TEST LIMIT 100
), or by using Statement.setMaxRows(max)
.
-
-@advanced_1041_h3
-Large Result Sets and External Sorting
-
-@advanced_1042_p
- For large result set, the result is buffered to disk. The threshold can be defined using the statement SET MAX_MEMORY_ROWS
. If ORDER BY
is used, the sorting is done using an external sort algorithm. In this case, each block of rows is sorted using quick sort, then written to disk; when reading the data, the blocks are merged together.
-
-@advanced_1043_h2
-Large Objects
-
-@advanced_1044_h3
-Storing and Reading Large Objects
-
-@advanced_1045_p
- If it is possible that the objects don't fit into memory, then the data type CLOB (for textual data) or BLOB (for binary data) should be used. For these data types, the objects are not fully read into memory, by using streams. To store a BLOB, use PreparedStatement.setBinaryStream
. To store a CLOB, use PreparedStatement.setCharacterStream
. To read a BLOB, use ResultSet.getBinaryStream
, and to read a CLOB, use ResultSet.getCharacterStream
. When using the client/server mode, large BLOB and CLOB data is stored in a temporary file on the client side.
-
-@advanced_1046_h3
-When to use CLOB/BLOB
-
-@advanced_1047_p
- By default, this database stores large LOB (CLOB and BLOB) objects separate from the main table data. Small LOB objects are stored in-place, the threshold can be set using MAX_LENGTH_INPLACE_LOB, but there is still an overhead to use CLOB/BLOB. Because of this, BLOB and CLOB should never be used for columns with a maximum size below about 200 bytes. The best threshold depends on the use case; reading in-place objects is faster than reading from separate files, but slows down the performance of operations that don't involve this column.
-
-@advanced_1048_h3
-Large Object Compression
-
-@advanced_1049_p
- The following feature is only available for the PageStore storage engine. For the MVStore engine (the default for H2 version 1.4.x), append ;COMPRESS=TRUE
to the database URL instead. CLOB and BLOB values can be compressed by using SET COMPRESS_LOB. The LZF algorithm is faster but needs more disk space. By default compression is disabled, which usually speeds up write operations. If you store many large compressible values such as XML, HTML, text, and uncompressed binary files, then compressing can save a lot of disk space (sometimes more than 50%), and read operations may even be faster.
-
-@advanced_1050_h2
-Linked Tables
-
-@advanced_1051_p
- This database supports linked tables, which means tables that don't exist in the current database but are just links to another database. To create such a link, use the CREATE LINKED TABLE
statement:
-
-@advanced_1052_p
- You can then access the table in the usual way. Whenever the linked table is accessed, the database issues specific queries over JDBC. Using the example above, if you issue the query SELECT * FROM LINK WHERE ID=1
, then the following query is run against the PostgreSQL database: SELECT * FROM TEST WHERE ID=?
. The same happens for insert and update statements. Only simple statements are executed against the target database, that means no joins (queries that contain joins are converted to simple queries). Prepared statements are used where possible.
-
-@advanced_1053_p
- To view the statements that are executed against the target table, set the trace level to 3.
-
-@advanced_1054_p
- If multiple linked tables point to the same database (using the same database URL), the connection is shared. To disable this, set the system property h2.shareLinkedConnections=false
.
-
-@advanced_1055_p
- The statement CREATE LINKED TABLE supports an optional schema name parameter.
-
-@advanced_1056_p
- The following are not supported because they may result in a deadlock: creating a linked table to the same database, and creating a linked table to another database using the server mode if the other database is open in the same server (use the embedded mode instead).
-
-@advanced_1057_p
- Data types that are not supported in H2 are also not supported for linked tables, for example unsigned data types if the value is outside the range of the signed type. In such cases, the columns needs to be cast to a supported type.
-
-@advanced_1058_h2
-Updatable Views
-
-@advanced_1059_p
- By default, views are not updatable. To make a view updatable, use an "instead of" trigger as follows:
-
-@advanced_1060_p
- Update the base table(s) within the trigger as required. For details, see the sample application org.h2.samples.UpdatableView
.
-
-@advanced_1061_h2
-Transaction Isolation
-
-@advanced_1062_p
- Please note that most data definition language (DDL) statements, such as "create table", commit the current transaction. See the Grammar for details.
-
-@advanced_1063_p
- Transaction isolation is provided for all data manipulation language (DML) statements.
-
-@advanced_1064_p
- Please note MVCC is enabled in version 1.4.x by default, when using the MVStore. In this case, table level locking is not used. Instead, rows are locked for update, and read committed is used in all cases (changing the isolation level has no effect).
-
-@advanced_1065_p
- This database supports the following transaction isolation levels:
-
-@advanced_1066_b
-Read Committed
-
-@advanced_1067_li
- This is the default level. Read locks are released immediately after executing the statement, but write locks are kept until the transaction commits. Higher concurrency is possible when using this level.
-
-@advanced_1068_li
- To enable, execute the SQL statement SET LOCK_MODE 3
-
-@advanced_1069_li
- or append ;LOCK_MODE=3
to the database URL: jdbc:h2:~/test;LOCK_MODE=3
-
-@advanced_1070_b
-Serializable
-
-@advanced_1071_li
- Both read locks and write locks are kept until the transaction commits. To enable, execute the SQL statement SET LOCK_MODE 1
-
-@advanced_1072_li
- or append ;LOCK_MODE=1
to the database URL: jdbc:h2:~/test;LOCK_MODE=1
-
-@advanced_1073_b
-Read Uncommitted
-
-@advanced_1074_li
- This level means that transaction isolation is disabled.
-
-@advanced_1075_li
- To enable, execute the SQL statement SET LOCK_MODE 0
-
-@advanced_1076_li
- or append ;LOCK_MODE=0
to the database URL: jdbc:h2:~/test;LOCK_MODE=0
-
-@advanced_1077_p
- When using the isolation level 'serializable', dirty reads, non-repeatable reads, and phantom reads are prohibited.
-
-@advanced_1078_b
-Dirty Reads
-
-@advanced_1079_li
- Means a connection can read uncommitted changes made by another connection.
-
-@advanced_1080_li
- Possible with: read uncommitted
-
-@advanced_1081_b
-Non-Repeatable Reads
-
-@advanced_1082_li
- A connection reads a row, another connection changes a row and commits, and the first connection re-reads the same row and gets the new result.
-
-@advanced_1083_li
- Possible with: read uncommitted, read committed
-
-@advanced_1084_b
-Phantom Reads
-
-@advanced_1085_li
- A connection reads a set of rows using a condition, another connection inserts a row that falls in this condition and commits, then the first connection re-reads using the same condition and gets the new row.
-
-@advanced_1086_li
- Possible with: read uncommitted, read committed
-
-@advanced_1087_h3
-Table Level Locking
-
-@advanced_1088_p
- The database allows multiple concurrent connections to the same database. To make sure all connections only see consistent data, table level locking is used by default. This mechanism does not allow high concurrency, but is very fast. Shared locks and exclusive locks are supported. Before reading from a table, the database tries to add a shared lock to the table (this is only possible if there is no exclusive lock on the object by another connection). If the shared lock is added successfully, the table can be read. It is allowed that other connections also have a shared lock on the same object. If a connection wants to write to a table (update or delete a row), an exclusive lock is required. To get the exclusive lock, other connection must not have any locks on the object. After the connection commits, all locks are released. This database keeps all locks in memory. When a lock is released, and multiple connections are waiting for it, one of them is picked at random.
-
-@advanced_1089_h3
-Lock Timeout
-
-@advanced_1090_p
- If a connection cannot get a lock on an object, the connection waits for some amount of time (the lock timeout). During this time, hopefully the connection holding the lock commits and it is then possible to get the lock. If this is not possible because the other connection does not release the lock for some time, the unsuccessful connection will get a lock timeout exception. The lock timeout can be set individually for each connection.
-
-@advanced_1091_h2
-Multi-Version Concurrency Control (MVCC)
-
-@advanced_1092_p
- The MVCC feature allows higher concurrency than using (table level or row level) locks. When using MVCC in this database, delete, insert and update operations will only issue a shared lock on the table. An exclusive lock is still used when adding or removing columns, when dropping the table, and when using SELECT ... FOR UPDATE
. Connections only 'see' committed data, and own changes. That means, if connection A updates a row but doesn't commit this change yet, connection B will see the old value. Only when the change is committed, the new value is visible by other connections (read committed). If multiple connections concurrently try to update the same row, the database waits until it can apply the change, but at most until the lock timeout expires.
-
-@advanced_1093_p
- To use the MVCC feature, append ;MVCC=TRUE
to the database URL:
-
-@advanced_1094_p
- The setting must be specified in the first connection (the one that opens the database). It is not possible to enable or disable this setting while the database is already open.
-
-@advanced_1095_p
- If MVCC is enabled, changing the lock mode (LOCK_MODE
) has no effect.
-
-@advanced_1096_div
- The MVCC mode is enabled by default in version 1.4.x, with the default MVStore storage engine. MVCC is disabled by default when using the PageStore storage engine (which is the default in version 1.3.x). The following applies when using the PageStore storage engine: The MVCC feature is not fully tested yet. The limitations of the MVCC mode are: with the PageStore storage engine, it can not be used at the same time as MULTI_THREADED=TRUE
; the complete undo log (the list of uncommitted changes) must fit in memory when using multi-version concurrency. The setting MAX_MEMORY_UNDO
has no effect. Clustering / High Availability
-
-@advanced_1097_p
- This database supports a simple clustering / high availability mechanism. The architecture is: two database servers run on two different computers, and on both computers is a copy of the same database. If both servers run, each database operation is executed on both computers. If one server fails (power, hardware or network failure), the other server can still continue to work. From this point on, the operations will be executed only on one server until the other server is back up.
-
-@advanced_1098_p
- Clustering can only be used in the server mode (the embedded mode does not support clustering). The cluster can be re-created using the CreateCluster
tool without stopping the remaining server. Applications that are still connected are automatically disconnected, however when appending ;AUTO_RECONNECT=TRUE
, they will recover from that.
-
-@advanced_1099_p
- To initialize the cluster, use the following steps:
-
-@advanced_1100_li
-Create a database
-
-@advanced_1101_li
-Use the CreateCluster
tool to copy the database to another location and initialize the clustering. Afterwards, you have two databases containing the same data.
-
-@advanced_1102_li
-Start two servers (one for each copy of the database)
-
-@advanced_1103_li
-You are now ready to connect to the databases with the client application(s)
-
-@advanced_1104_h3
-Using the CreateCluster Tool
-
-@advanced_1105_p
- To understand how clustering works, please try out the following example. In this example, the two databases reside on the same computer, but usually, the databases will be on different servers.
-
-@advanced_1106_li
-Create two directories: server1, server2
. Each directory will simulate a directory on a computer.
-
-@advanced_1107_li
-Start a TCP server pointing to the first directory. You can do this using the command line:
-
-@advanced_1108_li
-Start a second TCP server pointing to the second directory. This will simulate a server running on a second (redundant) computer. You can do this using the command line:
-
-@advanced_1109_li
-Use the CreateCluster
tool to initialize clustering. This will automatically create a new, empty database if it does not exist. Run the tool on the command line:
-
-@advanced_1110_li
-You can now connect to the databases using an application or the H2 Console using the JDBC URL jdbc:h2:tcp://localhost:9101,localhost:9102/~/test
-
-@advanced_1111_li
-If you stop a server (by killing the process), you will notice that the other machine continues to work, and therefore the database is still accessible.
-
-@advanced_1112_li
-To restore the cluster, you first need to delete the database that failed, then restart the server that was stopped, and re-run the CreateCluster
tool.
-
-@advanced_1113_h3
-Detect Which Cluster Instances are Running
-
-@advanced_1114_p
- To find out which cluster nodes are currently running, execute the following SQL statement:
-
-@advanced_1115_p
- If the result is ''
(two single quotes), then the cluster mode is disabled. Otherwise, the list of servers is returned, enclosed in single quote. Example: 'server1:9191,server2:9191'
.
-
-@advanced_1116_p
- It is also possible to get the list of servers by using Connection.getClientInfo().
-
-@advanced_1117_p
- The property list returned from getClientInfo()
contains a numServers
property that returns the number of servers that are in the connection list. To get the actual servers, getClientInfo()
also has properties server0
..serverX
, where serverX is the number of servers minus 1.
-
-@advanced_1118_p
- Example: To get the 2nd server in the connection list one uses getClientInfo('server1')
. Note: The serverX
property only returns IP addresses and ports and not hostnames.
-
-@advanced_1119_h3
-Clustering Algorithm and Limitations
-
-@advanced_1120_p
- Read-only queries are only executed against the first cluster node, but all other statements are executed against all nodes. There is currently no load balancing made to avoid problems with transactions. The following functions may yield different results on different cluster nodes and must be executed with care: RANDOM_UUID(), SECURE_RAND(), SESSION_ID(), MEMORY_FREE(), MEMORY_USED(), CSVREAD(), CSVWRITE(), RAND()
[when not using a seed]. Those functions should not be used directly in modifying statements (for example INSERT, UPDATE, MERGE
). However, they can be used in read-only statements and the result can then be used for modifying statements. Using auto-increment and identity columns is currently not supported. Instead, sequence values need to be manually requested and then used to insert data (using two statements).
-
-@advanced_1121_p
- When using the cluster modes, result sets are read fully in memory by the client, so that there is no problem if the server dies that executed the query. Result sets must fit in memory on the client side.
-
-@advanced_1122_p
- The SQL statement SET AUTOCOMMIT FALSE
is not supported in the cluster mode. To disable autocommit, the method Connection.setAutoCommit(false)
needs to be called.
-
-@advanced_1123_p
- It is possible that a transaction from one connection overtakes a transaction from a different connection. Depending on the operations, this might result in different results, for example when conditionally incrementing a value in a row.
-
-@advanced_1124_h2
-Two Phase Commit
-
-@advanced_1125_p
- The two phase commit protocol is supported. 2-phase-commit works as follows:
-
-@advanced_1126_li
-Autocommit needs to be switched off
-
-@advanced_1127_li
-A transaction is started, for example by inserting a row
-
-@advanced_1128_li
-The transaction is marked 'prepared' by executing the SQL statement PREPARE COMMIT transactionName
-
-@advanced_1129_li
-The transaction can now be committed or rolled back
-
-@advanced_1130_li
-If a problem occurs before the transaction was successfully committed or rolled back (for example because a network problem occurred), the transaction is in the state 'in-doubt'
-
-@advanced_1131_li
-When re-connecting to the database, the in-doubt transactions can be listed with SELECT * FROM INFORMATION_SCHEMA.IN_DOUBT
-
-@advanced_1132_li
-Each transaction in this list must now be committed or rolled back by executing COMMIT TRANSACTION transactionName
or ROLLBACK TRANSACTION transactionName
-
-@advanced_1133_li
-The database needs to be closed and re-opened to apply the changes
-
-@advanced_1134_h2
-Compatibility
-
-@advanced_1135_p
- This database is (up to a certain point) compatible to other databases such as HSQLDB, MySQL and PostgreSQL. There are certain areas where H2 is incompatible.
-
-@advanced_1136_h3
-Transaction Commit when Autocommit is On
-
-@advanced_1137_p
- At this time, this database engine commits a transaction (if autocommit is switched on) just before returning the result. For a query, this means the transaction is committed even before the application scans through the result set, and before the result set is closed. Other database engines may commit the transaction in this case when the result set is closed.
-
-@advanced_1138_h3
-Keywords / Reserved Words
-
-@advanced_1139_p
- There is a list of keywords that can't be used as identifiers (table names, column names and so on), unless they are quoted (surrounded with double quotes). The list is currently:
-
-@advanced_1140_code
- CROSS, CURRENT_DATE, CURRENT_TIME, CURRENT_TIMESTAMP, DISTINCT, EXCEPT, EXISTS, FALSE, FETCH, FOR, FROM, FULL, GROUP, HAVING, INNER, INTERSECT, IS, JOIN, LIKE, LIMIT, MINUS, NATURAL, NOT, NULL, OFFSET, ON, ORDER, PRIMARY, ROWNUM, SELECT, SYSDATE, SYSTIME, SYSTIMESTAMP, TODAY, TRUE, UNION, UNIQUE, WHERE
-
-@advanced_1141_p
- Certain words of this list are keywords because they are functions that can be used without '()' for compatibility, for example CURRENT_TIMESTAMP
.
-
-@advanced_1142_h2
-Standards Compliance
-
-@advanced_1143_p
- This database tries to be as much standard compliant as possible. For the SQL language, ANSI/ISO is the main standard. There are several versions that refer to the release date: SQL-92, SQL:1999, and SQL:2003. Unfortunately, the standard documentation is not freely available. Another problem is that important features are not standardized. Whenever this is the case, this database tries to be compatible to other databases.
-
-@advanced_1144_h3
-Supported Character Sets, Character Encoding, and Unicode
-
-@advanced_1145_p
- H2 internally uses Unicode, and supports all character encoding systems and character sets supported by the virtual machine you use.
-
-@advanced_1146_h2
-Run as Windows Service
-
-@advanced_1147_p
- Using a native wrapper / adapter, Java applications can be run as a Windows Service. There are various tools available to do that. The Java Service Wrapper from Tanuki Software, Inc. is included in the installation. Batch files are provided to install, start, stop and uninstall the H2 Database Engine Service. This service contains the TCP Server and the H2 Console web application. The batch files are located in the directory h2/service
.
-
-@advanced_1148_p
- The service wrapper bundled with H2 is a 32-bit version. To use a 64-bit version of Windows (x64), you need to use a 64-bit version of the wrapper, for example the one from Simon Krenger.
-
-@advanced_1149_p
- When running the database as a service, absolute path should be used. Using ~
in the database URL is problematic in this case, because it means to use the home directory of the current user. The service might run without or with the wrong user, so that the database files might end up in an unexpected place.
-
-@advanced_1150_h3
-Install the Service
-
-@advanced_1151_p
- The service needs to be registered as a Windows Service first. To do that, double click on 1_install_service.bat
. If successful, a command prompt window will pop up and disappear immediately. If not, a message will appear.
-
-@advanced_1152_h3
-Start the Service
-
-@advanced_1153_p
- You can start the H2 Database Engine Service using the service manager of Windows, or by double clicking on 2_start_service.bat
. Please note that the batch file does not print an error message if the service is not installed.
-
-@advanced_1154_h3
-Connect to the H2 Console
-
-@advanced_1155_p
- After installing and starting the service, you can connect to the H2 Console application using a browser. Double clicking on 3_start_browser.bat
to do that. The default port (8082) is hard coded in the batch file.
-
-@advanced_1156_h3
-Stop the Service
-
-@advanced_1157_p
- To stop the service, double click on 4_stop_service.bat
. Please note that the batch file does not print an error message if the service is not installed or started.
-
-@advanced_1158_h3
-Uninstall the Service
-
-@advanced_1159_p
- To uninstall the service, double click on 5_uninstall_service.bat
. If successful, a command prompt window will pop up and disappear immediately. If not, a message will appear.
-
-@advanced_1160_h3
-Additional JDBC drivers
-
-@advanced_1161_p
- To use other databases (for example MySQL), the location of the JDBC drivers of those databases need to be added to the environment variables H2DRIVERS
or CLASSPATH
before installing the service. Multiple drivers can be set; each entry needs to be separated with a ;
(Windows) or :
(other operating systems). Spaces in the path names are supported. The settings must not be quoted.
-
-@advanced_1162_h2
-ODBC Driver
-
-@advanced_1163_p
- This database does not come with its own ODBC driver at this time, but it supports the PostgreSQL network protocol. Therefore, the PostgreSQL ODBC driver can be used. Support for the PostgreSQL network protocol is quite new and should be viewed as experimental. It should not be used for production applications.
-
-@advanced_1164_p
- To use the PostgreSQL ODBC driver on 64 bit versions of Windows, first run c:/windows/syswow64/odbcad32.exe
. At this point you set up your DSN just like you would on any other system. See also: Re: ODBC Driver on Windows 64 bit
-
-@advanced_1165_h3
-ODBC Installation
-
-@advanced_1166_p
- First, the ODBC driver must be installed. Any recent PostgreSQL ODBC driver should work, however version 8.2 (psqlodbc-08_02*
) or newer is recommended. The Windows version of the PostgreSQL ODBC driver is available at http://www.postgresql.org/ftp/odbc/versions/msi.
-
-@advanced_1167_h3
-Starting the Server
-
-@advanced_1168_p
- After installing the ODBC driver, start the H2 Server using the command line:
-
-@advanced_1169_p
- The PG Server (PG for PostgreSQL protocol) is started as well. By default, databases are stored in the current working directory where the server is started. Use -baseDir
to save databases in another directory, for example the user home directory:
-
-@advanced_1170_p
- The PG server can be started and stopped from within a Java application as follows:
-
-@advanced_1171_p
- By default, only connections from localhost are allowed. To allow remote connections, use -pgAllowOthers
when starting the server.
-
-@advanced_1172_p
- To map an ODBC database name to a different JDBC database name, use the option -key
when starting the server. Please note only one mapping is allowed. The following will map the ODBC database named TEST
to the database URL jdbc:h2:~/data/test;cipher=aes
:
-
-@advanced_1173_h3
-ODBC Configuration
-
-@advanced_1174_p
- After installing the driver, a new Data Source must be added. In Windows, run odbcad32.exe
to open the Data Source Administrator. Then click on 'Add...' and select the PostgreSQL Unicode driver. Then click 'Finish'. You will be able to change the connection properties. The property column represents the property key in the odbc.ini
file (which may be different from the GUI).
-
-@advanced_1175_th
-Property
-
-@advanced_1176_th
-Example
-
-@advanced_1177_th
-Remarks
-
-@advanced_1178_td
-Data Source
-
-@advanced_1179_td
-H2 Test
-
-@advanced_1180_td
-The name of the ODBC Data Source
-
-@advanced_1181_td
-Database
-
-@advanced_1182_td
-~/test;ifexists=true
-
-@advanced_1183_td
- The database name. This can include connections settings. By default, the database is stored in the current working directory where the Server is started except when the -baseDir setting is used. The name must be at least 3 characters.
-
-@advanced_1184_td
-Servername
-
-@advanced_1185_td
-localhost
-
-@advanced_1186_td
-The server name or IP address.
-
-@advanced_1187_td
-By default, only remote connections are allowed
-
-@advanced_1188_td
-Username
-
-@advanced_1189_td
-sa
-
-@advanced_1190_td
-The database user name.
-
-@advanced_1191_td
-SSL
-
-@advanced_1192_td
-false (disabled)
-
-@advanced_1193_td
-At this time, SSL is not supported.
-
-@advanced_1194_td
-Port
-
-@advanced_1195_td
-5435
-
-@advanced_1196_td
-The port where the PG Server is listening.
-
-@advanced_1197_td
-Password
-
-@advanced_1198_td
-sa
-
-@advanced_1199_td
-The database password.
-
-@advanced_1200_p
- To improve performance, please enable 'server side prepare' under Options / Datasource / Page 2 / Server side prepare.
-
-@advanced_1201_p
- Afterwards, you may use this data source.
-
-@advanced_1202_h3
-PG Protocol Support Limitations
-
-@advanced_1203_p
- At this time, only a subset of the PostgreSQL network protocol is implemented. Also, there may be compatibility problems on the SQL level, with the catalog, or with text encoding. Problems are fixed as they are found. Currently, statements can not be canceled when using the PG protocol. Also, H2 does not provide index meta over ODBC.
-
-@advanced_1204_p
- PostgreSQL ODBC Driver Setup requires a database password; that means it is not possible to connect to H2 databases without password. This is a limitation of the ODBC driver.
-
-@advanced_1205_h3
-Security Considerations
-
-@advanced_1206_p
- Currently, the PG Server does not support challenge response or encrypt passwords. This may be a problem if an attacker can listen to the data transferred between the ODBC driver and the server, because the password is readable to the attacker. Also, it is currently not possible to use encrypted SSL connections. Therefore the ODBC driver should not be used where security is important.
-
-@advanced_1207_p
- The first connection that opens a database using the PostgreSQL server needs to be an administrator user. Subsequent connections don't need to be opened by an administrator.
-
-@advanced_1208_h3
-Using Microsoft Access
-
-@advanced_1209_p
- When using Microsoft Access to edit data in a linked H2 table, you may need to enable the following option: Tools - Options - Edit/Find - ODBC fields.
-
-@advanced_1210_h2
-Using H2 in Microsoft .NET
-
-@advanced_1211_p
- The database can be used from Microsoft .NET even without using Java, by using IKVM.NET. You can access a H2 database on .NET using the JDBC API, or using the ADO.NET interface.
-
-@advanced_1212_h3
-Using the ADO.NET API on .NET
-
-@advanced_1213_p
- An implementation of the ADO.NET interface is available in the open source project H2Sharp.
-
-@advanced_1214_h3
-Using the JDBC API on .NET
-
-@advanced_1215_li
-Install the .NET Framework from Microsoft. Mono has not yet been tested.
-
-@advanced_1216_li
-Install IKVM.NET.
-
-@advanced_1217_li
-Copy the h2*.jar
file to ikvm/bin
-
-@advanced_1218_li
-Run the H2 Console using: ikvm -jar h2*.jar
-
-@advanced_1219_li
-Convert the H2 Console to an .exe
file using: ikvmc -target:winexe h2*.jar
. You may ignore the warnings.
-
-@advanced_1220_li
-Create a .dll
file using (change the version accordingly): ikvmc.exe -target:library -version:1.0.69.0 h2*.jar
-
-@advanced_1221_p
- If you want your C# application use H2, you need to add the h2.dll
and the IKVM.OpenJDK.ClassLibrary.dll
to your C# solution. Here some sample code:
-
-@advanced_1222_h2
-ACID
-
-@advanced_1223_p
- In the database world, ACID stands for:
-
-@advanced_1224_li
-Atomicity: transactions must be atomic, meaning either all tasks are performed or none.
-
-@advanced_1225_li
-Consistency: all operations must comply with the defined constraints.
-
-@advanced_1226_li
-Isolation: transactions must be isolated from each other.
-
-@advanced_1227_li
-Durability: committed transaction will not be lost.
-
-@advanced_1228_h3
-Atomicity
-
-@advanced_1229_p
- Transactions in this database are always atomic.
-
-@advanced_1230_h3
-Consistency
-
-@advanced_1231_p
- By default, this database is always in a consistent state. Referential integrity rules are enforced except when explicitly disabled.
-
-@advanced_1232_h3
-Isolation
-
-@advanced_1233_p
- For H2, as with most other database systems, the default isolation level is 'read committed'. This provides better performance, but also means that transactions are not completely isolated. H2 supports the transaction isolation levels 'serializable', 'read committed', and 'read uncommitted'.
-
-@advanced_1234_h3
-Durability
-
-@advanced_1235_p
- This database does not guarantee that all committed transactions survive a power failure. Tests show that all databases sometimes lose transactions on power failure (for details, see below). Where losing transactions is not acceptable, a laptop or UPS (uninterruptible power supply) should be used. If durability is required for all possible cases of hardware failure, clustering should be used, such as the H2 clustering mode.
-
-@advanced_1236_h2
-Durability Problems
-
-@advanced_1237_p
- Complete durability means all committed transaction survive a power failure. Some databases claim they can guarantee durability, but such claims are wrong. A durability test was run against H2, HSQLDB, PostgreSQL, and Derby. All of those databases sometimes lose committed transactions. The test is included in the H2 download, see org.h2.test.poweroff.Test
.
-
-@advanced_1238_h3
-Ways to (Not) Achieve Durability
-
-@advanced_1239_p
- Making sure that committed transactions are not lost is more complicated than it seems first. To guarantee complete durability, a database must ensure that the log record is on the hard drive before the commit call returns. To do that, databases use different methods. One is to use the 'synchronous write' file access mode. In Java, RandomAccessFile
supports the modes rws
and rwd
:
-
-@advanced_1240_code
-rwd
-
-@advanced_1241_li
-: every update to the file's content is written synchronously to the underlying storage device.
-
-@advanced_1242_code
-rws
-
-@advanced_1243_li
-: in addition to rwd
, every update to the metadata is written synchronously.
-
-@advanced_1244_p
- A test (org.h2.test.poweroff.TestWrite
) with one of those modes achieves around 50 thousand write operations per second. Even when the operating system write buffer is disabled, the write rate is around 50 thousand operations per second. This feature does not force changes to disk because it does not flush all buffers. The test updates the same byte in the file again and again. If the hard drive was able to write at this rate, then the disk would need to make at least 50 thousand revolutions per second, or 3 million RPM (revolutions per minute). There are no such hard drives. The hard drive used for the test is about 7200 RPM, or about 120 revolutions per second. There is an overhead, so the maximum write rate must be lower than that.
-
-@advanced_1245_p
- Calling fsync
flushes the buffers. There are two ways to do that in Java:
-
-@advanced_1246_code
-FileDescriptor.sync()
-
-@advanced_1247_li
-. The documentation says that this forces all system buffers to synchronize with the underlying device. This method is supposed to return after all in-memory modified copies of buffers associated with this file descriptor have been written to the physical medium.
-
-@advanced_1248_code
-FileChannel.force()
-
-@advanced_1249_li
-. This method is supposed to force any updates to this channel's file to be written to the storage device that contains it.
-
-@advanced_1250_p
- By default, MySQL calls fsync
for each commit. When using one of those methods, only around 60 write operations per second can be achieved, which is consistent with the RPM rate of the hard drive used. Unfortunately, even when calling FileDescriptor.sync()
or FileChannel.force()
, data is not always persisted to the hard drive, because most hard drives do not obey fsync()
: see Your Hard Drive Lies to You. In Mac OS X, fsync
does not flush hard drive buffers. See Bad fsync?. So the situation is confusing, and tests prove there is a problem.
-
-@advanced_1251_p
- Trying to flush hard drive buffers is hard, and if you do the performance is very bad. First you need to make sure that the hard drive actually flushes all buffers. Tests show that this can not be done in a reliable way. Then the maximum number of transactions is around 60 per second. Because of those reasons, the default behavior of H2 is to delay writing committed transactions.
-
-@advanced_1252_p
- In H2, after a power failure, a bit more than one second of committed transactions may be lost. To change the behavior, use SET WRITE_DELAY
and CHECKPOINT SYNC
. Most other databases support commit delay as well. In the performance comparison, commit delay was used for all databases that support it.
-
-@advanced_1253_h3
-Running the Durability Test
-
-@advanced_1254_p
- To test the durability / non-durability of this and other databases, you can use the test application in the package org.h2.test.poweroff
. Two computers with network connection are required to run this test. One computer just listens, while the test application is run (and power is cut) on the other computer. The computer with the listener application opens a TCP/IP port and listens for an incoming connection. The second computer first connects to the listener, and then created the databases and starts inserting records. The connection is set to 'autocommit', which means after each inserted record a commit is performed automatically. Afterwards, the test computer notifies the listener that this record was inserted successfully. The listener computer displays the last inserted record number every 10 seconds. Now, switch off the power manually, then restart the computer, and run the application again. You will find out that in most cases, none of the databases contains all the records that the listener computer knows about. For details, please consult the source code of the listener and test application.
-
-@advanced_1255_h2
-Using the Recover Tool
-
-@advanced_1256_p
- The Recover
tool can be used to extract the contents of a database file, even if the database is corrupted. It also extracts the content of the transaction log and large objects (CLOB or BLOB). To run the tool, type on the command line:
-
-@advanced_1257_p
- For each database in the current directory, a text file will be created. This file contains raw insert statements (for the data) and data definition (DDL) statements to recreate the schema of the database. This file can be executed using the RunScript
tool or a RUNSCRIPT FROM
SQL statement. The script includes at least one CREATE USER
statement. If you run the script against a database that was created with the same user, or if there are conflicting users, running the script will fail. Consider running the script against a database that was created with a user name that is not in the script.
-
-@advanced_1258_p
- The Recover
tool creates a SQL script from database file. It also processes the transaction log.
-
-@advanced_1259_p
- To verify the database can recover at any time, append ;RECOVER_TEST=64
to the database URL in your test environment. This will simulate an application crash after each 64 writes to the database file. A log file named databaseName.h2.db.log
is created that lists the operations. The recovery is tested using an in-memory file system, that means it may require a larger heap setting.
-
-@advanced_1260_h2
-File Locking Protocols
-
-@advanced_1261_p
- Multiple concurrent connections to the same database are supported, however a database file can only be open for reading and writing (in embedded mode) by one process at the same time. Otherwise, the processes would overwrite each others data and corrupt the database file. To protect against this problem, whenever a database is opened, a lock file is created to signal other processes that the database is in use. If the database is closed, or if the process that opened the database stops normally, this lock file is deleted.
-
-@advanced_1262_p
- In special cases (if the process did not terminate normally, for example because there was a power failure), the lock file is not deleted by the process that created it. That means the existence of the lock file is not a safe protocol for file locking. However, this software uses a challenge-response protocol to protect the database files. There are two methods (algorithms) implemented to provide both security (that is, the same database files cannot be opened by two processes at the same time) and simplicity (that is, the lock file does not need to be deleted manually by the user). The two methods are 'file method' and 'socket methods'.
-
-@advanced_1263_p
- The file locking protocols (except the file locking method 'FS') have the following limitation: if a shared file system is used, and the machine with the lock owner is sent to sleep (standby or hibernate), another machine may take over. If the machine that originally held the lock wakes up, the database may become corrupt. If this situation can occur, the application must ensure the database is closed when the application is put to sleep.
-
-@advanced_1264_h3
-File Locking Method 'File'
-
-@advanced_1265_p
- The default method for database file locking for version 1.3 and older is the 'File Method'. The algorithm is:
-
-@advanced_1266_li
-If the lock file does not exist, it is created (using the atomic operation File.createNewFile
). Then, the process waits a little bit (20 ms) and checks the file again. If the file was changed during this time, the operation is aborted. This protects against a race condition when one process deletes the lock file just after another one create it, and a third process creates the file again. It does not occur if there are only two writers.
-
-@advanced_1267_li
- If the file can be created, a random number is inserted together with the locking method ('file'). Afterwards, a watchdog thread is started that checks regularly (every second once by default) if the file was deleted or modified by another (challenger) thread / process. Whenever that occurs, the file is overwritten with the old data. The watchdog thread runs with high priority so that a change to the lock file does not get through undetected even if the system is very busy. However, the watchdog thread does use very little resources (CPU time), because it waits most of the time. Also, the watchdog only reads from the hard disk and does not write to it.
-
-@advanced_1268_li
- If the lock file exists and was recently modified, the process waits for some time (up to two seconds). If it was still changed, an exception is thrown (database is locked). This is done to eliminate race conditions with many concurrent writers. Afterwards, the file is overwritten with a new version (challenge). After that, the thread waits for 2 seconds. If there is a watchdog thread protecting the file, he will overwrite the change and this process will fail to lock the database. However, if there is no watchdog thread, the lock file will still be as written by this thread. In this case, the file is deleted and atomically created again. The watchdog thread is started in this case and the file is locked.
-
-@advanced_1269_p
- This algorithm is tested with over 100 concurrent threads. In some cases, when there are many concurrent threads trying to lock the database, they block each other (meaning the file cannot be locked by any of them) for some time. However, the file never gets locked by two threads at the same time. However using that many concurrent threads / processes is not the common use case. Generally, an application should throw an error to the user if it cannot open a database, and not try again in a (fast) loop.
-
-@advanced_1270_h3
-File Locking Method 'Socket'
-
-@advanced_1271_p
- There is a second locking mechanism implemented, but disabled by default. To use it, append ;FILE_LOCK=SOCKET
to the database URL. The algorithm is:
-
-@advanced_1272_li
-If the lock file does not exist, it is created. Then a server socket is opened on a defined port, and kept open. The port and IP address of the process that opened the database is written into the lock file.
-
-@advanced_1273_li
-If the lock file exists, and the lock method is 'file', then the software switches to the 'file' method.
-
-@advanced_1274_li
-If the lock file exists, and the lock method is 'socket', then the process checks if the port is in use. If the original process is still running, the port is in use and this process throws an exception (database is in use). If the original process died (for example due to a power failure, or abnormal termination of the virtual machine), then the port was released. The new process deletes the lock file and starts again.
-
-@advanced_1275_p
- This method does not require a watchdog thread actively polling (reading) the same file every second. The problem with this method is, if the file is stored on a network share, two processes (running on different computers) could still open the same database files, if they do not have a direct TCP/IP connection.
-
-@advanced_1276_h3
-File Locking Method 'FS'
-
-@advanced_1277_p
- This is the default mode for version 1.4 and newer. This database file locking mechanism uses native file system lock on the database file. No *.lock.db file is created in this case, and no background thread is started. This mechanism may not work on all systems as expected. Some systems allow to lock the same file multiple times within the same virtual machine, and on some system native file locking is not supported or files are not unlocked after a power failure.
-
-@advanced_1278_p
- To enable this feature, append ;FILE_LOCK=FS
to the database URL.
-
-@advanced_1279_p
- This feature is relatively new. When using it for production, please ensure your system does in fact lock files as expected.
-
-@advanced_1280_h2
-Using Passwords
-
-@advanced_1281_h3
-Using Secure Passwords
-
-@advanced_1282_p
- Remember that weak passwords can be broken regardless of the encryption and security protocols. Don't use passwords that can be found in a dictionary. Appending numbers does not make passwords secure. A way to create good passwords that can be remembered is: take the first letters of a sentence, use upper and lower case characters, and creatively include special characters (but it's more important to use a long password than to use special characters). Example:
-
-@advanced_1283_code
-i'sE2rtPiUKtT
-
-@advanced_1284_p
- from the sentence it's easy to remember this password if you know the trick
.
-
-@advanced_1285_h3
-Passwords: Using Char Arrays instead of Strings
-
-@advanced_1286_p
- Java strings are immutable objects and cannot be safely 'destroyed' by the application. After creating a string, it will remain in the main memory of the computer at least until it is garbage collected. The garbage collection cannot be controlled by the application, and even if it is garbage collected the data may still remain in memory. It might also be possible that the part of memory containing the password is swapped to disk (if not enough main memory is available), which is a problem if the attacker has access to the swap file of the operating system.
-
-@advanced_1287_p
- It is a good idea to use char arrays instead of strings for passwords. Char arrays can be cleared (filled with zeros) after use, and therefore the password will not be stored in the swap file.
-
-@advanced_1288_p
- This database supports using char arrays instead of string to pass user and file passwords. The following code can be used to do that:
-
-@advanced_1289_p
- This example requires Java 1.6. When using Swing, use javax.swing.JPasswordField
.
-
-@advanced_1290_h3
-Passing the User Name and/or Password in the URL
-
-@advanced_1291_p
- Instead of passing the user name as a separate parameter as in Connection conn = DriverManager. getConnection("jdbc:h2:~/test", "sa", "123");
the user name (and/or password) can be supplied in the URL itself: Connection conn = DriverManager. getConnection("jdbc:h2:~/test;USER=sa;PASSWORD=123");
The settings in the URL override the settings passed as a separate parameter.
-
-@advanced_1292_h2
-Password Hash
-
-@advanced_1293_p
- Sometimes the database password needs to be stored in a configuration file (for example in the web.xml
file). In addition to connecting with the plain text password, this database supports connecting with the password hash. This means that only the hash of the password (and not the plain text password) needs to be stored in the configuration file. This will only protect others from reading or re-constructing the plain text password (even if they have access to the configuration file); it does not protect others from accessing the database using the password hash.
-
-@advanced_1294_p
- To connect using the password hash instead of plain text password, append ;PASSWORD_HASH=TRUE
to the database URL, and replace the password with the password hash. To calculate the password hash from a plain text password, run the following command within the H2 Console tool: @password_hash <upperCaseUserName> <password>
. As an example, if the user name is sa
and the password is test
, run the command @password_hash SA test
. Then use the resulting password hash as you would use the plain text password. When using an encrypted database, then the user password and file password need to be hashed separately. To calculate the hash of the file password, run: @password_hash file <filePassword>
.
-
-@advanced_1295_h2
-Protection against SQL Injection
-
-@advanced_1296_h3
-What is SQL Injection
-
-@advanced_1297_p
- This database engine provides a solution for the security vulnerability known as 'SQL Injection'. Here is a short description of what SQL injection means. Some applications build SQL statements with embedded user input such as:
-
-@advanced_1298_p
- If this mechanism is used anywhere in the application, and user input is not correctly filtered or encoded, it is possible for a user to inject SQL functionality or statements by using specially built input such as (in this example) this password: ' OR ''='
. In this case the statement becomes:
-
-@advanced_1299_p
- Which is always true no matter what the password stored in the database is. For more information about SQL Injection, see Glossary and Links.
-
-@advanced_1300_h3
-Disabling Literals
-
-@advanced_1301_p
- SQL Injection is not possible if user input is not directly embedded in SQL statements. A simple solution for the problem above is to use a prepared statement:
-
-@advanced_1302_p
- This database provides a way to enforce usage of parameters when passing user input to the database. This is done by disabling embedded literals in SQL statements. To do this, execute the statement:
-
-@advanced_1303_p
- Afterwards, SQL statements with text and number literals are not allowed any more. That means, SQL statement of the form WHERE NAME='abc'
or WHERE CustomerId=10
will fail. It is still possible to use prepared statements and parameters as described above. Also, it is still possible to generate SQL statements dynamically, and use the Statement API, as long as the SQL statements do not include literals. There is also a second mode where number literals are allowed: SET ALLOW_LITERALS NUMBERS
. To allow all literals, execute SET ALLOW_LITERALS ALL
(this is the default setting). Literals can only be enabled or disabled by an administrator.
-
-@advanced_1304_h3
-Using Constants
-
-@advanced_1305_p
- Disabling literals also means disabling hard-coded 'constant' literals. This database supports defining constants using the CREATE CONSTANT
command. Constants can be defined only when literals are enabled, but used even when literals are disabled. To avoid name clashes with column names, constants can be defined in other schemas:
-
-@advanced_1306_p
- Even when literals are enabled, it is better to use constants instead of hard-coded number or text literals in queries or views. With constants, typos are found at compile time, the source code is easier to understand and change.
-
-@advanced_1307_h3
-Using the ZERO() Function
-
-@advanced_1308_p
- It is not required to create a constant for the number 0 as there is already a built-in function ZERO()
:
-
-@advanced_1309_h2
-Protection against Remote Access
-
-@advanced_1310_p
- By default this database does not allow connections from other machines when starting the H2 Console, the TCP server, or the PG server. Remote access can be enabled using the command line options -webAllowOthers, -tcpAllowOthers, -pgAllowOthers
.
-
-@advanced_1311_p
- If you enable remote access using -tcpAllowOthers
or -pgAllowOthers
, please also consider using the options -baseDir, -ifExists
, so that remote users can not create new databases or access existing databases with weak passwords. When using the option -baseDir
, only databases within that directory may be accessed. Ensure the existing accessible databases are protected using strong passwords.
-
-@advanced_1312_p
- If you enable remote access using -webAllowOthers
, please ensure the web server can only be accessed from trusted networks. The options -baseDir, -ifExists
don't protect access to the tools section, prevent remote shutdown of the web server, changes to the preferences, the saved connection settings, or access to other databases accessible from the system.
-
-@advanced_1313_h2
-Restricting Class Loading and Usage
-
-@advanced_1314_p
- By default there is no restriction on loading classes and executing Java code for admins. That means an admin may call system functions such as System.setProperty
by executing:
-
-@advanced_1315_p
- To restrict users (including admins) from loading classes and executing code, the list of allowed classes can be set in the system property h2.allowedClasses
in the form of a comma separated list of classes or patterns (items ending with *
). By default all classes are allowed. Example:
-
-@advanced_1316_p
- This mechanism is used for all user classes, including database event listeners, trigger classes, user-defined functions, user-defined aggregate functions, and JDBC driver classes (with the exception of the H2 driver) when using the H2 Console.
-
-@advanced_1317_h2
-Security Protocols
-
-@advanced_1318_p
- The following paragraphs document the security protocols used in this database. These descriptions are very technical and only intended for security experts that already know the underlying security primitives.
-
-@advanced_1319_h3
-User Password Encryption
-
-@advanced_1320_p
- When a user tries to connect to a database, the combination of user name, @, and password are hashed using SHA-256, and this hash value is transmitted to the database. This step does not protect against an attacker that re-uses the value if he is able to listen to the (unencrypted) transmission between the client and the server. But, the passwords are never transmitted as plain text, even when using an unencrypted connection between client and server. That means if a user reuses the same password for different things, this password is still protected up to some point. See also 'RFC 2617 - HTTP Authentication: Basic and Digest Access Authentication' for more information.
-
-@advanced_1321_p
- When a new database or user is created, a new random salt value is generated. The size of the salt is 64 bits. Using the random salt reduces the risk of an attacker pre-calculating hash values for many different (commonly used) passwords.
-
-@advanced_1322_p
- The combination of user-password hash value (see above) and salt is hashed using SHA-256. The resulting value is stored in the database. When a user tries to connect to the database, the database combines user-password hash value with the stored salt value and calculates the hash value. Other products use multiple iterations (hash the hash value again and again), but this is not done in this product to reduce the risk of denial of service attacks (where the attacker tries to connect with bogus passwords, and the server spends a lot of time calculating the hash value for each password). The reasoning is: if the attacker has access to the hashed passwords, he also has access to the data in plain text, and therefore does not need the password any more. If the data is protected by storing it on another computer and only accessible remotely, then the iteration count is not required at all.
-
-@advanced_1323_h3
-File Encryption
-
-@advanced_1324_p
- The database files can be encrypted using the AES-128 algorithm.
-
-@advanced_1325_p
- When a user tries to connect to an encrypted database, the combination of file@
and the file password is hashed using SHA-256. This hash value is transmitted to the server.
-
-@advanced_1326_p
- When a new database file is created, a new cryptographically secure random salt value is generated. The size of the salt is 64 bits. The combination of the file password hash and the salt value is hashed 1024 times using SHA-256. The reason for the iteration is to make it harder for an attacker to calculate hash values for common passwords.
-
-@advanced_1327_p
- The resulting hash value is used as the key for the block cipher algorithm. Then, an initialization vector (IV) key is calculated by hashing the key again using SHA-256. This is to make sure the IV is unknown to the attacker. The reason for using a secret IV is to protect against watermark attacks.
-
-@advanced_1328_p
- Before saving a block of data (each block is 8 bytes long), the following operations are executed: first, the IV is calculated by encrypting the block number with the IV key (using the same block cipher algorithm). This IV is combined with the plain text using XOR. The resulting data is encrypted using the AES-128 algorithm.
-
-@advanced_1329_p
- When decrypting, the operation is done in reverse. First, the block is decrypted using the key, and then the IV is calculated combined with the decrypted text using XOR.
-
-@advanced_1330_p
- Therefore, the block cipher mode of operation is CBC (cipher-block chaining), but each chain is only one block long. The advantage over the ECB (electronic codebook) mode is that patterns in the data are not revealed, and the advantage over multi block CBC is that flipped cipher text bits are not propagated to flipped plaintext bits in the next block.
-
-@advanced_1331_p
- Database encryption is meant for securing the database while it is not in use (stolen laptop and so on). It is not meant for cases where the attacker has access to files while the database is in use. When he has write access, he can for example replace pieces of files with pieces of older versions and manipulate data like this.
-
-@advanced_1332_p
- File encryption slows down the performance of the database engine. Compared to unencrypted mode, database operations take about 2.5 times longer using AES (embedded mode).
-
-@advanced_1333_h3
-Wrong Password / User Name Delay
-
-@advanced_1334_p
- To protect against remote brute force password attacks, the delay after each unsuccessful login gets double as long. Use the system properties h2.delayWrongPasswordMin
and h2.delayWrongPasswordMax
to change the minimum (the default is 250 milliseconds) or maximum delay (the default is 4000 milliseconds, or 4 seconds). The delay only applies for those using the wrong password. Normally there is no delay for a user that knows the correct password, with one exception: after using the wrong password, there is a delay of up to (randomly distributed) the same delay as for a wrong password. This is to protect against parallel brute force attacks, so that an attacker needs to wait for the whole delay. Delays are synchronized. This is also required to protect against parallel attacks.
-
-@advanced_1335_p
- There is only one exception message for both wrong user and for wrong password, to make it harder to get the list of user names. It is not possible from the stack trace to see if the user name was wrong or the password.
-
-@advanced_1336_h3
-HTTPS Connections
-
-@advanced_1337_p
- The web server supports HTTP and HTTPS connections using SSLServerSocket
. There is a default self-certified certificate to support an easy starting point, but custom certificates are supported as well.
-
-@advanced_1338_h2
-TLS Connections
-
-@advanced_1339_p
- Remote TLS connections are supported using the Java Secure Socket Extension (SSLServerSocket, SSLSocket
). By default, anonymous TLS is enabled.
-
-@advanced_1340_p
- To use your own keystore, set the system properties javax.net.ssl.keyStore
and javax.net.ssl.keyStorePassword
before starting the H2 server and client. See also Customizing the Default Key and Trust Stores, Store Types, and Store Passwords for more information.
-
-@advanced_1341_p
- To disable anonymous TLS, set the system property h2.enableAnonymousTLS
to false.
-
-@advanced_1342_h2
-Universally Unique Identifiers (UUID)
-
-@advanced_1343_p
- This database supports UUIDs. Also supported is a function to create new UUIDs using a cryptographically strong pseudo random number generator. With random UUIDs, the chance of two having the same value can be calculated using the probability theory. See also 'Birthday Paradox'. Standardized randomly generated UUIDs have 122 random bits. 4 bits are used for the version (Randomly generated UUID), and 2 bits for the variant (Leach-Salz). This database supports generating such UUIDs using the built-in function RANDOM_UUID()
. Here is a small program to estimate the probability of having two identical UUIDs after generating a number of values:
-
-@advanced_1344_p
- Some values are:
-
-@advanced_1345_th
-Number of UUIs
-
-@advanced_1346_th
-Probability of Duplicates
-
-@advanced_1347_td
-2^36=68'719'476'736
-
-@advanced_1348_td
-0.000'000'000'000'000'4
-
-@advanced_1349_td
-2^41=2'199'023'255'552
-
-@advanced_1350_td
-0.000'000'000'000'4
-
-@advanced_1351_td
-2^46=70'368'744'177'664
-
-@advanced_1352_td
-0.000'000'000'4
-
-@advanced_1353_p
- To help non-mathematicians understand what those numbers mean, here a comparison: one's annual risk of being hit by a meteorite is estimated to be one chance in 17 billion, that means the probability is about 0.000'000'000'06.
-
-@advanced_1354_h2
-Spatial Features
-
-@advanced_1355_p
- H2 supports the geometry data type and spatial indexes if the JTS Topology Suite is in the classpath. To run the H2 Console tool with the JTS tool, you need to download the JTS 1.13 jar file and place it in the h2 bin directory. Then edit the h2.sh
file as follows:
-
-@advanced_1356_p
- Here is an example SQL script to create a table with a spatial column and index:
-
-@advanced_1357_p
- To query the table using geometry envelope intersection, use the operation &&
, as in PostGIS:
-
-@advanced_1358_p
- You can verify that the spatial index is used using the "explain plan" feature:
-
-@advanced_1359_p
- For persistent databases, the spatial index is stored on disk; for in-memory databases, the index is kept in memory.
-
-@advanced_1360_h2
-Recursive Queries
-
-@advanced_1361_p
- H2 has experimental support for recursive queries using so called "common table expressions" (CTE). Examples:
-
-@advanced_1362_p
- Limitations: Recursive queries need to be of the type UNION ALL
, and the recursion needs to be on the second part of the query. No tables or views with the name of the table expression may exist. Different table expression names need to be used when using multiple distinct table expressions within the same transaction and for the same session. All columns of the table expression are of type VARCHAR
, and may need to be cast to the required data type. Views with recursive queries are not supported. Subqueries and INSERT INTO ... FROM
with recursive queries are not supported. Parameters are only supported within the last SELECT
statement (a workaround is to use session variables like @start
within the table expression). The syntax is:
-
-@advanced_1363_h2
-Settings Read from System Properties
-
-@advanced_1364_p
- Some settings of the database can be set on the command line using -DpropertyName=value
. It is usually not required to change those settings manually. The settings are case sensitive. Example:
-
-@advanced_1365_p
- The current value of the settings can be read in the table INFORMATION_SCHEMA.SETTINGS
.
-
-@advanced_1366_p
- For a complete list of settings, see SysProperties.
-
-@advanced_1367_h2
-Setting the Server Bind Address
-
-@advanced_1368_p
- Usually server sockets accept connections on any/all local addresses. This may be a problem on multi-homed hosts. To bind only to one address, use the system property h2.bindAddress
. This setting is used for both regular server sockets and for TLS server sockets. IPv4 and IPv6 address formats are supported.
-
-@advanced_1369_h2
-Pluggable File System
-
-@advanced_1370_p
- This database supports a pluggable file system API. The file system implementation is selected using a file name prefix. Internally, the interfaces are very similar to the Java 7 NIO2 API, but do not (yet) use or require Java 7. The following file systems are included:
-
-@advanced_1371_code
-zip:
-
-@advanced_1372_li
- read-only zip-file based file system. Format: zip:/zipFileName!/fileName
.
-
-@advanced_1373_code
-split:
-
-@advanced_1374_li
- file system that splits files in 1 GB files (stackable with other file systems).
-
-@advanced_1375_code
-nio:
-
-@advanced_1376_li
- file system that uses FileChannel
instead of RandomAccessFile
(faster in some operating systems).
-
-@advanced_1377_code
-nioMapped:
-
-@advanced_1378_li
- file system that uses memory mapped files (faster in some operating systems). Please note that there currently is a file size limitation of 2 GB when using this file system when using a 32-bit JVM. To work around this limitation, combine it with the split file system: split:nioMapped:test
.
-
-@advanced_1379_code
-memFS:
-
-@advanced_1380_li
- in-memory file system (slower than mem; experimental; mainly used for testing the database engine itself).
-
-@advanced_1381_code
-memLZF:
-
-@advanced_1382_li
- compressing in-memory file system (slower than memFS but uses less memory; experimental; mainly used for testing the database engine itself).
-
-@advanced_1383_p
- As an example, to use the the nio
file system, use the following database URL: jdbc:h2:nio:~/test
.
-
-@advanced_1384_p
- To register a new file system, extend the classes org.h2.store.fs.FilePath, FileBase
, and call the method FilePath.register
before using it.
-
-@advanced_1385_p
- For input streams (but not for random access files), URLs may be used in addition to the registered file systems. Example: jar:file:///c:/temp/example.zip!/org/example/nested.csv
. To read a stream from the classpath, use the prefix classpath:
, as in classpath:/org/h2/samples/newsfeed.sql
.
-
-@advanced_1386_h2
-Split File System
-
-@advanced_1387_p
- The file system prefix split:
is used to split logical files into multiple physical files, for example so that a database can get larger than the maximum file system size of the operating system. If the logical file is larger than the maximum file size, then the file is split as follows:
-
-@advanced_1388_code
-<fileName>
-
-@advanced_1389_li
- (first block, is always created)
-
-@advanced_1390_code
-<fileName>.1.part
-
-@advanced_1391_li
- (second block)
-
-@advanced_1392_p
- More physical files (*.2.part, *.3.part
) are automatically created / deleted if needed. The maximum physical file size of a block is 2^30 bytes, which is also called 1 GiB or 1 GB. However this can be changed if required, by specifying the block size in the file name. The file name format is: split:<x>:<fileName>
where the file size per block is 2^x. For 1 MiB block sizes, use x = 20 (because 2^20 is 1 MiB). The following file name means the logical file is split into 1 MiB blocks: split:20:test.h2.db
. An example database URL for this case is jdbc:h2:split:20:~/test
.
-
-@advanced_1393_h2
-Database Upgrade
-
-@advanced_1394_p
- In version 1.2, H2 introduced a new file store implementation which is incompatible to the one used in versions < 1.2. To automatically convert databases to the new file store, it is necessary to include an additional jar file. The file can be found at http://h2database.com/h2mig_pagestore_addon.jar . If this file is in the classpath, every connect to an older database will result in a conversion process.
-
-@advanced_1395_p
- The conversion itself is done internally via 'script to'
and 'runscript from'
. After the conversion process, the files will be renamed from
-
-@advanced_1396_code
-dbName.data.db
-
-@advanced_1397_li
- to dbName.data.db.backup
-
-@advanced_1398_code
-dbName.index.db
-
-@advanced_1399_li
- to dbName.index.db.backup
-
-@advanced_1400_p
- by default. Also, the temporary script will be written to the database directory instead of a temporary directory. Both defaults can be customized via
-
-@advanced_1401_code
-org.h2.upgrade.DbUpgrade.setDeleteOldDb(boolean)
-
-@advanced_1402_code
-org.h2.upgrade.DbUpgrade.setScriptInTmpDir(boolean)
-
-@advanced_1403_p
- prior opening a database connection.
-
-@advanced_1404_p
- Since version 1.2.140 it is possible to let the old h2 classes (v 1.2.128) connect to the database. The automatic upgrade .jar file must be present, and the URL must start with jdbc:h2v1_1:
(the JDBC driver class is org.h2.upgrade.v1_1.Driver
). If the database should automatically connect using the old version if a database with the old format exists (without upgrade), and use the new version otherwise, then append ;NO_UPGRADE=TRUE
to the database URL. Please note the old driver did not process the system property "h2.baseDir"
correctly, so that using this setting is not supported when upgrading.
-
-@advanced_1405_h2
-Java Objects Serialization
-
-@advanced_1406_p
- Java objects serialization is enabled by default for columns of type OTHER
, using standard Java serialization/deserialization semantics.
-
-@advanced_1407_p
- To disable this feature set the system property h2.serializeJavaObject=false
(default: true).
-
-@advanced_1408_p
- Serialization and deserialization of java objects is customizable both at system level and at database level providing a JavaObjectSerializer implementation:
-
-@advanced_1409_li
- At system level set the system property h2.javaObjectSerializer
with the Fully Qualified Name of the JavaObjectSerializer
interface implementation. It will be used over the entire JVM session to (de)serialize java objects being stored in column of type OTHER. Example h2.javaObjectSerializer=com.acme.SerializerClassName
.
-
-@advanced_1410_li
- At database level execute the SQL statement SET JAVA_OBJECT_SERIALIZER 'com.acme.SerializerClassName'
or append ;JAVA_OBJECT_SERIALIZER='com.acme.SerializerClassName'
to the database URL: jdbc:h2:~/test;JAVA_OBJECT_SERIALIZER='com.acme.SerializerClassName'
.
-
-@advanced_1411_p
- Please note that this SQL statement can only be executed before any tables are defined.
-
-@advanced_1412_h2
-Limits and Limitations
-
-@advanced_1413_p
- This database has the following known limitations:
-
-@advanced_1414_li
-Database file size limit: 4 TB (using the default page size of 2 KB) or higher (when using a larger page size). This limit is including CLOB and BLOB data.
-
-@advanced_1415_li
-The maximum file size for FAT or FAT32 file systems is 4 GB. That means when using FAT or FAT32, the limit is 4 GB for the data. This is the limitation of the file system. The database does provide a workaround for this problem, it is to use the file name prefix split:
. In that case files are split into files of 1 GB by default. An example database URL is: jdbc:h2:split:~/test
.
-
-@advanced_1416_li
-The maximum number of rows per table is 2^64.
-
-@advanced_1417_li
-The maximum number of open transactions is 65535.
-
-@advanced_1418_li
-Main memory requirements: The larger the database, the more main memory is required. With the current storage mechanism (the page store), the minimum main memory required is around 1 MB for each 8 GB database file size.
-
-@advanced_1419_li
-Limit on the complexity of SQL statements. Statements of the following form will result in a stack overflow exception:
-
-@advanced_1420_li
-There is no limit for the following entities, except the memory and storage capacity: maximum identifier length (table name, column name, and so on); maximum number of tables, columns, indexes, triggers, and other database objects; maximum statement length, number of parameters per statement, tables per statement, expressions in order by, group by, having, and so on; maximum rows per query; maximum columns per table, columns per index, indexes per table, lob columns per table, and so on; maximum row length, index row length, select row length; maximum length of a varchar column, decimal column, literal in a statement.
-
-@advanced_1421_li
-Querying from the metadata tables is slow if there are many tables (thousands).
-
-@advanced_1422_li
-For limitations on data types, see the documentation of the respective Java data type or the data type documentation of this database.
-
-@advanced_1423_h2
-Glossary and Links
-
-@advanced_1424_th
-Term
-
-@advanced_1425_th
-Description
-
-@advanced_1426_td
-AES-128
-
-@advanced_1427_td
-A block encryption algorithm. See also: Wikipedia: AES
-
-@advanced_1428_td
-Birthday Paradox
-
-@advanced_1429_td
-Describes the higher than expected probability that two persons in a room have the same birthday. Also valid for randomly generated UUIDs. See also: Wikipedia: Birthday Paradox
-
-@advanced_1430_td
-Digest
-
-@advanced_1431_td
-Protocol to protect a password (but not to protect data). See also: RFC 2617: HTTP Digest Access Authentication
-
-@advanced_1432_td
-GCJ
-
-@advanced_1433_td
-Compiler for Java. GNU Compiler for the Java and NativeJ (commercial)
-
-@advanced_1434_td
-HTTPS
-
-@advanced_1435_td
-A protocol to provide security to HTTP connections. See also: RFC 2818: HTTP Over TLS
-
-@advanced_1436_td
-Modes of Operation
-
-@advanced_1437_a
-Wikipedia: Block cipher modes of operation
-
-@advanced_1438_td
-Salt
-
-@advanced_1439_td
-Random number to increase the security of passwords. See also: Wikipedia: Key derivation function
-
-@advanced_1440_td
-SHA-256
-
-@advanced_1441_td
-A cryptographic one-way hash function. See also: Wikipedia: SHA hash functions
-
-@advanced_1442_td
-SQL Injection
-
-@advanced_1443_td
-A security vulnerability where an application embeds SQL statements or expressions in user input. See also: Wikipedia: SQL Injection
-
-@advanced_1444_td
-Watermark Attack
-
-@advanced_1445_td
-Security problem of certain encryption programs where the existence of certain data can be proven without decrypting. For more information, search in the internet for 'watermark attack cryptoloop'
-
-@advanced_1446_td
-SSL/TLS
-
-@advanced_1447_td
-Secure Sockets Layer / Transport Layer Security. See also: Java Secure Socket Extension (JSSE)
-
-@architecture_1000_h1
-Architecture
-
-@architecture_1001_a
- Introduction
-
-@architecture_1002_a
- Top-down overview
-
-@architecture_1003_a
- JDBC driver
-
-@architecture_1004_a
- Connection/session management
-
-@architecture_1005_a
- Command execution and planning
-
-@architecture_1006_a
- Table/index/constraints
-
-@architecture_1007_a
- Undo log, redo log, and transactions layer
-
-@architecture_1008_a
- B-tree engine and page-based storage allocation
-
-@architecture_1009_a
- Filesystem abstraction
-
-@architecture_1010_h2
-Introduction
-
-@architecture_1011_p
- H2 implements an embedded and standalone ANSI-SQL89 compliant SQL engine on top of a B-tree based disk store.
-
-@architecture_1012_p
- As of October 2013, Thomas is still working on our next-generation storage engine called MVStore. This will in time replace the B-tree based storage engine.
-
-@architecture_1013_h2
-Top-down Overview
-
-@architecture_1014_p
- Working from the top down, the layers look like this:
-
-@architecture_1015_li
-JDBC driver.
-
-@architecture_1016_li
-Connection/session management.
-
-@architecture_1017_li
-SQL Parser.
-
-@architecture_1018_li
-Command execution and planning.
-
-@architecture_1019_li
-Table/Index/Constraints.
-
-@architecture_1020_li
-Undo log, redo log, and transactions layer.
-
-@architecture_1021_li
-B-tree engine and page-based storage allocation.
-
-@architecture_1022_li
-Filesystem abstraction.
-
-@architecture_1023_h2
-JDBC Driver
-
-@architecture_1024_p
- The JDBC driver implementation lives in org.h2.jdbc, org.h2.jdbcx
-
-@architecture_1025_h2
-Connection/session management
-
-@architecture_1026_p
- The primary classes of interest are:
-
-@architecture_1027_th
-Package
-
-@architecture_1028_th
-Description
-
-@architecture_1029_td
-org.h2.engine.Database
-
-@architecture_1030_td
-the root/global class
-
-@architecture_1031_td
-org.h2.engine.SessionInterface
-
-@architecture_1032_td
-abstracts over the differences between embedded and remote sessions
-
-@architecture_1033_td
-org.h2.engine.Session
-
-@architecture_1034_td
-local/embedded session
-
-@architecture_1035_td
-org.h2.engine.SessionRemote
-
-@architecture_1036_td
-remote session
-
-@architecture_1037_h2
-Parser
-
-@architecture_1038_p
- The parser lives in org.h2.command.Parser
. It uses a straightforward recursive-descent design.
-
-@architecture_1039_p
- See Wikipedia Recursive-descent parser page.
-
-@architecture_1040_h2
-Command execution and planning
-
-@architecture_1041_p
- Unlike other databases, we do not have an intermediate step where we generate some kind of IR (intermediate representation) of the query. The parser class directly generates a command execution object. Then we run some optimisation steps over the command to possibly generate a more efficient command. The primary packages of interest are:
-
-@architecture_1042_th
-Package
-
-@architecture_1043_th
-Description
-
-@architecture_1044_td
-org.h2.command.ddl
-
-@architecture_1045_td
-Commands that modify schema data structures
-
-@architecture_1046_td
-org.h2.command.dml
-
-@architecture_1047_td
-Commands that modify data
-
-@architecture_1048_h2
-Table/Index/Constraints
-
-@architecture_1049_p
- One thing to note here is that indexes are simply stored as special kinds of tables.
-
-@architecture_1050_p
- The primary packages of interest are:
-
-@architecture_1051_th
-Package
-
-@architecture_1052_th
-Description
-
-@architecture_1053_td
-org.h2.table
-
-@architecture_1054_td
-Implementations of different kinds of tables
-
-@architecture_1055_td
-org.h2.index
-
-@architecture_1056_td
-Implementations of different kinds of indices
-
-@architecture_1057_h2
-Undo log, redo log, and transactions layer
-
-@architecture_1058_p
- We have a transaction log, which is shared among all sessions. See also http://en.wikipedia.org/wiki/Transaction_log http://h2database.com/html/grammar.html#set_log
-
-@architecture_1059_p
- We also have an undo log, which is per session, to undo an operation (an update that fails for example) and to rollback a transaction. Theoretically, the transaction log could be used, but for simplicity, H2 currently uses it's own "list of operations" (usually in-memory).
-
-@architecture_1060_p
- With the MVStore, this is no longer needed (just the transaction log).
-
-@architecture_1061_h2
-B-tree engine and page-based storage allocation.
-
-@architecture_1062_p
- The primary package of interest is org.h2.store
.
-
-@architecture_1063_p
- This implements a storage mechanism which allocates pages of storage (typically 2k in size) and also implements a b-tree over those pages to allow fast retrieval and update.
-
-@architecture_1064_h2
-Filesystem abstraction.
-
-@architecture_1065_p
- The primary class of interest is org.h2.store.FileStore
.
-
-@architecture_1066_p
- This implements an abstraction of a random-access file. This allows the higher layers to treat in-memory vs. on-disk vs. zip-file databases the same.
-
-@build_1000_h1
-Build
-
-@build_1001_a
- Portability
-
-@build_1002_a
- Environment
-
-@build_1003_a
- Building the Software
-
-@build_1004_a
- Build Targets
-
-@build_1005_a
- Using Maven 2
-
-@build_1006_a
- Using Eclipse
-
-@build_1007_a
- Translating
-
-@build_1008_a
- Providing Patches
-
-@build_1009_a
- Reporting Problems or Requests
-
-@build_1010_a
- Automated Build
-
-@build_1011_a
- Generating Railroad Diagrams
-
-@build_1012_h2
-Portability
-
-@build_1013_p
- This database is written in Java and therefore works on many platforms. It can also be compiled to a native executable using GCJ.
-
-@build_1014_h2
-Environment
-
-@build_1015_p
- To run this database, a Java Runtime Environment (JRE) version 1.6 or higher is required.
-
-@build_1016_p
- To create the database executables, the following software stack was used. To use this database, it is not required to install this software however.
-
-@build_1017_li
-Mac OS X and Windows
-
-@build_1018_a
-Sun JDK Version 1.6 and 1.7
-
-@build_1019_a
-Eclipse
-
-@build_1020_li
-Eclipse Plugins: Subclipse, Eclipse Checkstyle Plug-in, EclEmma Java Code Coverage
-
-@build_1021_a
-Emma Java Code Coverage
-
-@build_1022_a
-Mozilla Firefox
-
-@build_1023_a
-OpenOffice
-
-@build_1024_a
-NSIS
-
-@build_1025_li
- (Nullsoft Scriptable Install System)
-
-@build_1026_a
-Maven
-
-@build_1027_h2
-Building the Software
-
-@build_1028_p
- You need to install a JDK, for example the Sun JDK version 1.6 or 1.7. Ensure that Java binary directory is included in the PATH
environment variable, and that the environment variable JAVA_HOME
points to your Java installation. On the command line, go to the directory h2
and execute the following command:
-
-@build_1029_p
- For Linux and OS X, use ./build.sh
instead of build
.
-
-@build_1030_p
- You will get a list of targets. If you want to build the jar
file, execute (Windows):
-
-@build_1031_p
- To run the build tool in shell mode, use the command line option -
as in ./build.sh -
.
-
-@build_1032_h3
-Switching the Source Code
-
-@build_1033_p
- The source code uses Java 1.6 features. To switch the source code to the installed version of Java, run:
-
-@build_1034_h2
-Build Targets
-
-@build_1035_p
- The build system can generate smaller jar files as well. The following targets are currently supported:
-
-@build_1036_code
-jarClient
-
-@build_1037_li
- creates the file h2client.jar
. This only contains the JDBC client.
-
-@build_1038_code
-jarSmall
-
-@build_1039_li
- creates the file h2small.jar
. This only contains the embedded database. Debug information is disabled.
-
-@build_1040_code
-jarJaqu
-
-@build_1041_li
- creates the file h2jaqu.jar
. This only contains the JaQu (Java Query) implementation. All other jar files do not include JaQu.
-
-@build_1042_code
-javadocImpl
-
-@build_1043_li
- creates the Javadocs of the implementation.
-
-@build_1044_p
- To create the file h2client.jar
, go to the directory h2
and execute the following command:
-
-@build_1045_h3
-Using Lucene 2 / 3
-
-@build_1046_p
- Both Apache Lucene 2 and Lucene 3 are supported. Currently Apache Lucene version 2.x is used by default for H2 version 1.2.x, and Lucene version 3.x is used by default for H2 version 1.3.x. To use a different version of Lucene when compiling, it needs to be specified as follows:
-
-@build_1047_h2
-Using Maven 2
-
-@build_1048_h3
-Using a Central Repository
-
-@build_1049_p
- You can include the database in your Maven 2 project as a dependency. Example:
-
-@build_1050_p
- New versions of this database are first uploaded to http://hsql.sourceforge.net/m2-repo/ and then automatically synchronized with the main Maven repository; however after a new release it may take a few hours before they are available there.
-
-@build_1051_h3
-Maven Plugin to Start and Stop the TCP Server
-
-@build_1052_p
- A Maven plugin to start and stop the H2 TCP server is available from Laird Nelson at GitHub. To start the H2 server, use:
-
-@build_1053_p
- To stop the H2 server, use:
-
-@build_1054_h3
-Using Snapshot Version
-
-@build_1055_p
- To build a h2-*-SNAPSHOT.jar
file and upload it the to the local Maven 2 repository, execute the following command:
-
-@build_1056_p
- Afterwards, you can include the database in your Maven 2 project as a dependency:
-
-@build_1057_h2
-Using Eclipse
-
-@build_1058_p
- To create an Eclipse project for H2, use the following steps:
-
-@build_1059_li
-Install Subversion and Eclipse.
-
-@build_1060_li
-Get the H2 source code from the Subversion repository:
-
-@build_1061_code
-svn checkout http://h2database.googlecode.com/svn/trunk h2database-read-only
-
-@build_1062_li
-Download all dependencies (Windows):
-
-@build_1063_code
-build.bat download
-
-@build_1064_li
-In Eclipse, create a new Java project from existing source code: File, New, Project, Java Project, Create project from existing source
.
-
-@build_1065_li
-Select the h2
folder, click Next
and Finish
.
-
-@build_1066_li
-To resolve com.sun.javadoc
import statements, you may need to manually add the file <java.home>/../lib/tools.jar
to the build path.
-
-@build_1067_h2
-Translating
-
-@build_1068_p
- The translation of this software is split into the following parts:
-
-@build_1069_li
-H2 Console: src/main/org/h2/server/web/res/_text_*.prop
-
-@build_1070_li
-Error messages: src/main/org/h2/res/_messages_*.prop
-
-@build_1071_p
- To translate the H2 Console, start it and select Preferences / Translate. After you are done, send the translated *.prop
file to the Google Group. The web site is currently translated using Google.
-
-@build_1072_h2
-Providing Patches
-
-@build_1073_p
- If you like to provide patches, please consider the following guidelines to simplify merging them:
-
-@build_1074_li
-Only use Java 6 features (do not use Java 7) (see Environment).
-
-@build_1075_li
-Follow the coding style used in the project, and use Checkstyle (see above) to verify. For example, do not use tabs (use spaces instead). The checkstyle configuration is in src/installer/checkstyle.xml
.
-
-@build_1076_li
-A template of the Eclipse settings are in src/installer/eclipse.settings/*
. If you want to use them, you need to copy them to the .settings
directory. The formatting options (eclipseCodeStyle
) are also included.
-
-@build_1077_li
-Please provide test cases and integrate them into the test suite. For Java level tests, see src/test/org/h2/test/TestAll.java
. For SQL level tests, see src/test/org/h2/test/test.in.txt
or testSimple.in.txt
.
-
-@build_1078_li
-The test cases should cover at least 90% of the changed and new code; use a code coverage tool to verify that (see above). or use the build target coverage
.
-
-@build_1079_li
-Verify that you did not break other features: run the test cases by executing build test
.
-
-@build_1080_li
-Provide end user documentation if required (src/docsrc/html/*
).
-
-@build_1081_li
-Document grammar changes in src/docsrc/help/help.csv
-
-@build_1082_li
-Provide a change log entry (src/docsrc/html/changelog.html
).
-
-@build_1083_li
-Verify the spelling using build spellcheck
. If required add the new words to src/tools/org/h2/build/doc/dictionary.txt
.
-
-@build_1084_li
-Run src/installer/buildRelease
to find and fix formatting errors.
-
-@build_1085_li
-Verify the formatting using build docs
and build javadoc
.
-
-@build_1086_li
-Submit patches as .patch
files (compressed if big). To create a patch using Eclipse, use Team / Create Patch.
-
-@build_1087_p
- For legal reasons, patches need to be public in the form of an email to the group, or in the form of an issue report or attachment. Significant contributions need to include the following statement:
-
-@build_1088_p
- "I wrote the code, it's mine, and I'm contributing it to H2 for distribution multiple-licensed under the MPL 2.0, and the EPL 1.0 (http://h2database.com/html/license.html)."
-
-@build_1089_h2
-Reporting Problems or Requests
-
-@build_1090_p
- Please consider the following checklist if you have a question, want to report a problem, or if you have a feature request:
-
-@build_1091_li
-For bug reports, please provide a short, self contained, correct (compilable), example of the problem.
-
-@build_1092_li
-Feature requests are always welcome, even if the feature is already on the roadmap. Your mail will help prioritize feature requests. If you urgently need a feature, consider providing a patch.
-
-@build_1093_li
-Before posting problems, check the FAQ and do a Google search.
-
-@build_1094_li
-When got an unexpected exception, please try the Error Analyzer tool. If this doesn't help, please report the problem, including the complete error message and stack trace, and the root cause stack trace(s).
-
-@build_1095_li
-When sending source code, please use a public web clipboard such as Pastebin, Cl1p, or Mystic Paste to avoid formatting problems. Please keep test cases as simple and short as possible, but so that the problem can still be reproduced. As a template, use: HelloWorld.java. Method that simply call other methods should be avoided, as well as unnecessary exception handling. Please use the JDBC API and no external tools or libraries. The test should include all required initialization code, and should be started with the main method.
-
-@build_1096_li
-For large attachments, use a public temporary storage such as Rapidshare.
-
-@build_1097_li
-Google Group versus issue tracking: Use the Google Group for questions or if you are not sure it's a bug. If you are sure it's a bug, you can create an issue, but you don't need to (sending an email to the group is enough). Please note that only few people monitor the issue tracking system.
-
-@build_1098_li
-For out-of-memory problems, please analyze the problem yourself first, for example using the command line option -XX:+HeapDumpOnOutOfMemoryError
(to create a heap dump file on out of memory) and a memory analysis tool such as the Eclipse Memory Analyzer (MAT).
-
-@build_1099_li
-It may take a few days to get an answers. Please do not double post.
-
-@build_1100_h2
-Automated Build
-
-@build_1101_p
- This build process is automated and runs regularly. The build process includes running the tests and code coverage, using the command line ./build.sh clean jar coverage -Dh2.ftpPassword=... uploadBuild
. The last results are available here:
-
-@build_1102_a
-Test Output
-
-@build_1103_a
-Code Coverage Summary
-
-@build_1104_a
-Code Coverage Details (download, 1.3 MB)
-
-@build_1105_a
-Build Newsfeed
-
-@build_1106_a
-Latest Jar File (download, 1 MB)
-
-@build_1107_h2
-Generating Railroad Diagrams
-
-@build_1108_p
- The railroad diagrams of the SQL grammar are HTML, formatted as nested tables. The diagrams are generated as follows:
-
-@build_1109_li
-The BNF parser (org.h2.bnf.Bnf
) reads and parses the BNF from the file help.csv
.
-
-@build_1110_li
-The page parser (org.h2.server.web.PageParser
) reads the template HTML file and fills in the diagrams.
-
-@build_1111_li
-The rail images (one straight, four junctions, two turns) are generated using a simple Java application.
-
-@build_1112_p
- To generate railroad diagrams for other grammars, see the package org.h2.jcr
. This package is used to generate the SQL-2 railroad diagrams for the JCR 2.0 specification.
-
-@changelog_1000_h1
-Change Log
-
-@changelog_1001_h2
-Next Version (unreleased)
-
-@changelog_1002_li
--
-
-@changelog_1003_h2
-Version 1.4.187 Beta (2015-04-10)
-
-@changelog_1004_li
-MVStore: concurrent changes to the same row could result in the exception "The transaction log might be corrupt for key ...". This could only be reproduced with 3 or more threads.
-
-@changelog_1005_li
-Results with CLOB or BLOB data are no longer reused.
-
-@changelog_1006_li
-References to BLOB and CLOB objects now have a timeout. The configuration setting is LOB_TIMEOUT (default 5 minutes). This should avoid growing the database file if there are many queries that return BLOB or CLOB objects, and the database is not closed for a longer time.
-
-@changelog_1007_li
-MVStore: when committing a session that removed LOB values, changes were flushed unnecessarily.
-
-@changelog_1008_li
-Issue 610: possible integer overflow in WriteBuffer.grow().
-
-@changelog_1009_li
-Issue 609: the spatial index did not support NULL (ClassCastException).
-
-@changelog_1010_li
-MVStore: in some cases, CLOB/BLOB data blocks were removed incorrectly when opening a database.
-
-@changelog_1011_li
-MVStore: updates that affected many rows were were slow in some cases if there was a secondary index.
-
-@changelog_1012_li
-Using "runscript" with autocommit disabled could result in a lock timeout on the internal table "SYS".
-
-@changelog_1013_li
-Issue 603: there was a memory leak when using H2 in a web application. Apache Tomcat logged an error message: "The web application ... created a ThreadLocal with key of type [org.h2.util.DateTimeUtils$1]".
-
-@changelog_1014_li
-When using the MVStore, running a SQL script generate by the Recover tool from a PageStore file failed with a strange error message (NullPointerException), now a clear error message is shown.
-
-@changelog_1015_li
-Issue 605: with version 1.4.186, opening a database could result in an endless loop in LobStorageMap.init.
-
-@changelog_1016_li
-Queries that use the same table alias multiple times now work. Before, the select expression list was expanded incorrectly. Example: "select * from a as x, b as x".
-
-@changelog_1017_li
-The MySQL compatibility feature "insert ... on duplicate key update" did not work with a non-default schema.
-
-@changelog_1018_li
-Issue 599: the condition "in(x, y)" could not be used in the select list when using "group by".
-
-@changelog_1019_li
-The LIRS cache could grow larger than the allocated memory.
-
-@changelog_1020_li
-A new file system implementation that re-opens the file if it was closed due to the application calling Thread.interrupt(). File name prefix "retry:". Please note it is strongly recommended to avoid calling Thread.interrupt; this is a problem for various libraries, including Apache Lucene.
-
-@changelog_1021_li
-MVStore: use RandomAccessFile file system if the file name starts with "file:".
-
-@changelog_1022_li
-Allow DATEADD to take a long value for count when manipulating milliseconds.
-
-@changelog_1023_li
-When using MV_STORE=TRUE and the SET CACHE_SIZE setting, the cache size was incorrectly set, so that it was effectively 1024 times smaller than it should be.
-
-@changelog_1024_li
-Concurrent CREATE TABLE... IF NOT EXISTS in the presence of MULTI_THREAD=TRUE could throw an exception.
-
-@changelog_1025_li
-Fix bug in MVStore when creating lots of temporary tables, where we could run out of transaction IDs.
-
-@changelog_1026_li
-Add support for PostgreSQL STRING_AGG function. Patch by Fred Aquiles.
-
-@changelog_1027_li
-Fix bug in "jdbc:h2:nioMemFS" isRoot() function. Also, the page size was increased to 64 KB.
-
-@changelog_1028_h2
-Version 1.4.186 Beta (2015-03-02)
-
-@changelog_1029_li
-The Servlet API 3.0.1 is now used, instead of 2.4.
-
-@changelog_1030_li
-MVStore: old chunks no longer removed in append-only mode.
-
-@changelog_1031_li
-MVStore: the cache for page references could grow far too big, resulting in out of memory in some cases.
-
-@changelog_1032_li
-MVStore: orphaned lob objects were not correctly removed in some cases, making the database grow unnecessarily.
-
-@changelog_1033_li
-MVStore: the maximum cache size was artificially limited to 2 GB (due to an integer overflow).
-
-@changelog_1034_li
-MVStore / TransactionStore: concurrent updates could result in a "Too many open transactions" exception.
-
-@changelog_1035_li
-StringUtils.toUpperEnglish now has a small cache. This should speed up reading from a ResultSet when using the column name.
-
-@changelog_1036_li
-MVStore: up to 65535 open transactions are now supported. Previously, the limit was at most 65535 transactions between the oldest open and the newest open transaction (which was quite a strange limit).
-
-@changelog_1037_li
-The default limit for in-place LOB objects was changed from 128 to 256 bytes. This is because each read creates a reference to a LOB, and maintaining the references is a big overhead. With the higher limit, less references are needed.
-
-@changelog_1038_li
-Tables without columns didn't work. (The use case for such tables is testing.)
-
-@changelog_1039_li
-The LIRS cache now resizes the table automatically in all cases and no longer needs the averageMemory configuration.
-
-@changelog_1040_li
-Creating a linked table from an MVStore database to a non-MVStore database created a second (non-MVStore) database file.
-
-@changelog_1041_li
-In version 1.4.184, a bug was introduced that broke queries that have both joins and wildcards, for example: select * from dual join(select x from dual) on 1=1
-
-@changelog_1042_li
-Issue 598: parser fails on timestamp "24:00:00.1234" - prevent the creation of out-of-range time values.
-
-@changelog_1043_li
-Allow declaring triggers as source code (like functions). Patch by Sylvain Cuaz.
-
-@changelog_1044_li
-Make the planner use indexes for sorting when doing a GROUP BY where all of the GROUP BY columns are not mentioned in the select. Patch by Frederico (zepfred).
-
-@changelog_1045_li
-PostgreSQL compatibility: generate_series (as an alias for system_range). Patch by litailang.
-
-@changelog_1046_li
-Fix missing "column" type in right-hand parameter in ConditionIn. Patch by Arnaud Thimel.
-
-@changelog_1047_h2
-Version 1.4.185 Beta (2015-01-16)
-
-@changelog_1048_li
-In version 1.4.184, "group by" ignored the table name, and could pick a select column by mistake. Example: select 0 as x from system_range(1, 2) d group by d.x;
-
-@changelog_1049_li
-New connection setting "REUSE_SPACE" (default: true). If disabled, all changes are appended to the database file, and existing content is never overwritten. This allows to rollback to a previous state of the database by truncating the database file.
-
-@changelog_1050_li
-Issue 587: MVStore: concurrent compaction and store operations could result in an IllegalStateException.
-
-@changelog_1051_li
-Issue 594: Profiler.copyInThread does not work properly.
-
-@changelog_1052_li
-Script tool: Now, SCRIPT ... TO is always used (for higher speed and lower disk space usage).
-
-@changelog_1053_li
-Script tool: Fix parsing of BLOCKSIZE parameter, original patch by Ken Jorissen.
-
-@changelog_1054_li
-Fix bug in PageStore#commit method - when the ignoreBigLog flag was set, the logic that cleared the flag could never be reached, resulting in performance degradation. Reported by Alexander Nesterov.
-
-@changelog_1055_li
-Issue 552: Implement BIT_AND and BIT_OR aggregate functions.
-
-@changelog_1056_h2
-Version 1.4.184 Beta (2014-12-19)
-
-@changelog_1057_li
-In version 1.3.183, indexes were not used if the table contains columns with a default value generated by a sequence. This includes tables with identity and auto-increment columns. This bug was introduced by supporting "rownum" in views and derived tables.
-
-@changelog_1058_li
-MVStore: imported BLOB and CLOB data sometimes disappeared. This was caused by a bug in the ObjectDataType comparison.
-
-@changelog_1059_li
-Reading from a StreamStore now throws an IOException if the underlying data doesn't exist.
-
-@changelog_1060_li
-MVStore: if there is an exception while saving, the store is now in all cases immediately closed.
-
-@changelog_1061_li
-MVStore: the dump tool could go into an endless loop for some files.
-
-@changelog_1062_li
-MVStore: recovery for a database with many CLOB or BLOB entries is now much faster.
-
-@changelog_1063_li
-Group by with a quoted select column name alias didn't work. Example: select 1 "a" from dual group by "a"
-
-@changelog_1064_li
-Auto-server mode: the host name is now stored in the .lock.db file.
-
-@changelog_1065_h2
-Version 1.4.183 Beta (2014-12-13)
-
-@changelog_1066_li
-MVStore: the default auto-commit buffer size is now about twice as big. This should reduce the database file size after inserting a lot of data.
-
-@changelog_1067_li
-The built-in functions "power" and "radians" now always return a double.
-
-@changelog_1068_li
-Using "row_number" or "rownum" in views or derived tables had unexpected results if the outer query contained constraints for the given view. Example: select b.nr, b.id from (select row_number() over() as nr, a.id as id from (select id from test order by name) as a) as b where b.id = 1
-
-@changelog_1069_li
-MVStore: the Recover tool can now deal with more types of corruption in the file.
-
-@changelog_1070_li
-MVStore: the TransactionStore now first needs to be initialized before it can be used.
-
-@changelog_1071_li
-Views and derived tables with equality and range conditions on the same columns did not work properly. example: select x from (select x from (select 1 as x) where x > 0 and x < 2) where x = 1
-
-@changelog_1072_li
-The database URL setting PAGE_SIZE setting is now also used for the MVStore.
-
-@changelog_1073_li
-MVStore: the default page split size for persistent stores is now 4096 (it was 16 KB so far). This should reduce the database file size for most situations (in some cases, less than half the size of the previous version).
-
-@changelog_1074_li
-With query literals disabled, auto-analyze of a table with CLOB or BLOB did not work.
-
-@changelog_1075_li
-MVStore: use a mark and sweep GC algorithm instead of reference counting, to ensure used chunks are never overwrite, even if the reference counting algorithm does not work properly.
-
-@changelog_1076_li
-In the multi-threaded mode, updating the column selectivity ("analyze") in the background sometimes did not work.
-
-@changelog_1077_li
-In the multi-threaded mode, database metadata operations did sometimes not work if the schema was changed at the same time (for example, if tables were dropped).
-
-@changelog_1078_li
-Some CLOB and BLOB values could no longer be read when the original row was removed (even when using the MVCC mode).
-
-@changelog_1079_li
-The MVStoreTool could throw an IllegalArgumentException.
-
-@changelog_1080_li
-Improved performance for some date / time / timestamp conversion operations. Thanks to Sergey Evdokimov for reporting the problem.
-
-@changelog_1081_li
-H2 Console: the built-in web server did not work properly if an unknown file was requested.
-
-@changelog_1082_li
-MVStore: the jar file is renamed to "h2-mvstore-*.jar" and is deployed to Maven separately.
-
-@changelog_1083_li
-MVStore: support for concurrent reads and writes is now enabled by default.
-
-@changelog_1084_li
-Server mode: the transfer buffer size has been changed from 16 KB to 64 KB, after it was found that this improves performance on Linux quite a lot.
-
-@changelog_1085_li
-H2 Console and server mode: SSL is now disabled and TLS is used to protect against the Poodle SSLv3 vulnerability. The system property to disable secure anonymous connections is now "h2.enableAnonymousTLS". The default certificate is still self-signed, so you need to manually install another one if you want to avoid man in the middle attacks.
-
-@changelog_1086_li
-MVStore: the R-tree did not correctly measure the memory usage.
-
-@changelog_1087_li
-MVStore: compacting a store with an R-tree did not always work.
-
-@changelog_1088_li
-Issue 581: When running in LOCK_MODE=0, JdbcDatabaseMetaData#supportsTransactionIsolationLevel(TRANSACTION_READ_UNCOMMITTED) should return false
-
-@changelog_1089_li
-Fix bug which could generate deadlocks when multiple connections accessed the same table.
-
-@changelog_1090_li
-Some places in the code were not respecting the value set in the "SET MAX_MEMORY_ROWS x" command
-
-@changelog_1091_li
-Fix bug which could generate a NegativeArraySizeException when performing large (>40M) row union operations
-
-@changelog_1092_li
-Fix "USE schema" command for MySQL compatibility, patch by mfulton
-
-@changelog_1093_li
-Parse and ignore the ROW_FORMAT=DYNAMIC MySQL syntax, patch by mfulton
-
-@changelog_1094_h2
-Version 1.4.182 Beta (2014-10-17)
-
-@changelog_1095_li
-MVStore: improved error messages and logging; improved behavior if there is an error when serializing objects.
-
-@changelog_1096_li
-OSGi: the MVStore packages are now exported.
-
-@changelog_1097_li
-With the MVStore option, when using multiple threads that concurrently create indexes or tables, it was relatively easy to get a lock timeout on the "SYS" table.
-
-@changelog_1098_li
-When using the multi-threaded option, the exception "Unexpected code path" could be thrown, specially if the option "analyze_auto" was set to a low value.
-
-@changelog_1099_li
-In the server mode, when reading from a CLOB or BLOB, if the connection was closed, a NullPointerException could be thrown instead of an exception saying the connection is closed.
-
-@changelog_1100_li
-DatabaseMetaData.getProcedures and getProcedureColumns could throw an exception if a user defined class is not available.
-
-@changelog_1101_li
-Issue 584: the error message for a wrong sequence definition was wrong.
-
-@changelog_1102_li
-CSV tool: the rowSeparator option is no longer supported, as the same can be achieved with the lineSeparator.
-
-@changelog_1103_li
-Descending indexes on MVStore tables did not work properly.
-
-@changelog_1104_li
-Issue 579: Conditions on the "_rowid_" pseudo-column didn't use an index when using the MVStore.
-
-@changelog_1105_li
-Fixed documentation that "offset" and "fetch" are also keywords since version 1.4.x.
-
-@changelog_1106_li
-The Long.MIN_VALUE could not be parsed for auto-increment (identity) columns.
-
-@changelog_1107_li
-Issue 573: Add implementation for Methods "isWrapperFor()" and "unwrap()" in other JDBC classes.
-
-@changelog_1108_li
-Issue 572: MySQL compatibility for "order by" in update statements.
-
-@changelog_1109_li
-The change in JDBC escape processing in version 1.4.181 affects both the parser (which is running on the server) and the JDBC API (which is running on the client). If you (or a tool you use) use the syntax "{t 'time}", or "{ts 'timestamp'}", or "{d 'data'}", then both the client and the server need to be upgraded to version 1.4.181 or later.
-
-@changelog_1110_h2
-Version 1.4.181 Beta (2014-08-06)
-
-@changelog_1111_li
-Improved MySQL compatibility by supporting "use schema". Thanks a lot to Karl Pietrzak for the patch!
-
-@changelog_1112_li
-Writing to the trace file is now faster, specially with the debug level.
-
-@changelog_1113_li
-The database option "defrag_always=true" did not work with the MVStore.
-
-@changelog_1114_li
-The JDBC escape syntax {ts 'value'} did not interpret the value as a timestamp. The same for {d 'value'} (for date) and {t 'value'} (for time). Thanks to Lukas Eder for reporting the issue. The following problem was detected after version 1.4.181 was released: The change in JDBC escape processing affects both the parser (which is running on the server) and the JDBC API (which is running on the client). If you (or a tool you use) use the syntax {t 'time'}, or {ts 'timestamp'}, or {d 'date'}, then both the client and the server need to be upgraded to version 1.4.181 or later.
-
-@changelog_1115_li
-File system abstraction: support replacing existing files using move (currently not for Windows).
-
-@changelog_1116_li
-The statement "shutdown defrag" now compresses the database (with the MVStore). This command can greatly reduce the file size, and is relatively fast, but is not incremental.
-
-@changelog_1117_li
-The MVStore now automatically compacts the store in the background if there is no read or write activity, which should (after some time; sometimes about one minute) reduce the file size. This is still work in progress, feedback is welcome!
-
-@changelog_1118_li
-Change default value of PAGE_SIZE from 2048 to 4096 to more closely match most file systems block size (PageStore only; the MVStore already used 4096).
-
-@changelog_1119_li
-Auto-scale MAX_MEMORY_ROWS and CACHE_SIZE settings by the amount of available RAM. Gives a better out of box experience for people with more powerful machines.
-
-@changelog_1120_li
-Handle tabs like 4 spaces in web console, patch by Martin Grajcar.
-
-@changelog_1121_li
-Issue 573: Add implementation for Methods "isWrapperFor()" and "unwrap()" in JdbcConnection.java, patch by BigMichi1.
-
-@changelog_1122_h2
-Version 1.4.180 Beta (2014-07-13)
-
-@changelog_1123_li
-MVStore: the store is now auto-compacted automatically up to some point, to avoid very large file sizes. This area is still work in progress.
-
-@changelog_1124_li
-Sequences of temporary tables (auto-increment or identity columns) were persisted unnecessarily in the database file, and were not removed when re-opening the database.
-
-@changelog_1125_li
-MVStore: an IndexOutOfBoundsException could sometimes occur MVMap.openVersion when concurrently accessing the store.
-
-@changelog_1126_li
-The LIRS cache now re-sizes the internal hash map if needed.
-
-@changelog_1127_li
-Optionally persist session history in the H2 console. (patch from Martin Grajcar)
-
-@changelog_1128_li
-Add client-info property to get the number of servers currently in the cluster and which servers that are available. (patch from Nikolaj Fogh)
-
-@changelog_1129_li
-Fix bug in changing encrypted DB password that kept the file handle open when the wrong password was supplied. (test case from Jens Hohmuth).
-
-@changelog_1130_li
-Issue 567: H2 hangs for a long time then (sometimes) recovers. Introduce a queue when doing table locking to prevent session starvation.
-
-@changelog_1131_h2
-Version 1.4.179 Beta (2014-06-23)
-
-@changelog_1132_li
-The license was changed to MPL 2.0 (from 1.0) and EPL 1.0.
-
-@changelog_1133_li
-Issue 565: MVStore: concurrently adding LOB objects (with MULTI_THREADED option) resulted in a NullPointerException.
-
-@changelog_1134_li
-MVStore: reduced dependencies to other H2 classes.
-
-@changelog_1135_li
-There was a way to prevent a database from being re-opened, by creating a column constraint that references a table with a higher id, for example with "check" constraints that contains queries. This is now detected, and creating the table is prohibited. In future versions of H2, most likely creating references to other tables will no longer be supported because of such problems.
-
-@changelog_1136_li
-MVStore: descending indexes with "nulls first" did not work as expected (null was ordered last).
-
-@changelog_1137_li
-Large result sets now always create temporary tables instead of temporary files.
-
-@changelog_1138_li
-When using the PageStore, opening a database failed in some cases with a NullPointerException if temporary tables were used (explicitly, or implicitly when using large result sets).
-
-@changelog_1139_li
-If a database file in the PageStore file format exists, this file and this mode is now used, even if the database URL does not contain "MV_STORE=FALSE". If a MVStore file exists, it is used.
-
-@changelog_1140_li
-Databases created with version 1.3.175 and earlier that contained foreign keys in combination with multi-column indexes could not be opened in some cases. This was due to a bugfix in version 1.3.176: Referential integrity constraints sometimes used the wrong index.
-
-@changelog_1141_li
-MVStore: the ObjectDataType comparison method was incorrect if one key was Serializable and the other was of a common class.
-
-@changelog_1142_li
-Recursive queries with many result rows (more than the setting "max_memory_rows") did not work correctly.
-
-@changelog_1143_li
-The license has changed to MPL 2.0 + EPL 1.0.
-
-@changelog_1144_li
-MVStore: temporary tables from result sets could survive re-opening a database, which could result in a ClassCastException.
-
-@changelog_1145_li
-Issue 566: MVStore: unique indexes that were created later on did not work correctly if there were over 5000 rows in the table. Existing databases need to be re-created (at least the broken index need to be re-built).
-
-@changelog_1146_li
-MVStore: creating secondary indexes on large tables results in missing rows in the index.
-
-@changelog_1147_li
-Metadata: the password of linked tables is now only visible for admin users.
-
-@changelog_1148_li
-For Windows, database URLs of the form "jdbc:h2:/test" where considered relative and did not work unless the system property "h2.implicitRelativePath" was used.
-
-@changelog_1149_li
-Windows: using a base directory of "C:/" and similar did not work as expected.
-
-@changelog_1150_li
-Follow JDBC specification on Procedures MetaData, use P0 as return type of procedure.
-
-@changelog_1151_li
-Issue 531: IDENTITY ignored for added column.
-
-@changelog_1152_li
-FileSystem: improve exception throwing compatibility with JDK
-
-@changelog_1153_li
-Spatial Index: adjust costs so we do not use the spatial index if the query does not contain an intersects operator.
-
-@changelog_1154_li
-Fix multi-threaded deadlock when using a View that includes a TableFunction.
-
-@changelog_1155_li
-Fix bug in dividing very-small BigDecimal numbers.
-
-@changelog_1156_h2
-Version 1.4.178 Beta (2014-05-02)
-
-@changelog_1157_li
-Issue 559: Make dependency on org.osgi.service.jdbc optional.
-
-@changelog_1158_li
-Improve error message when the user specifies an unsupported combination of database settings.
-
-@changelog_1159_li
-MVStore: in the multi-threaded mode, NullPointerException and other exceptions could occur.
-
-@changelog_1160_li
-MVStore: some database file could not be compacted due to a bug in the bookkeeping of the fill rate. Also, database file were compacted quite slowly. This has been improved; but more changes in this area are expected.
-
-@changelog_1161_li
-MVStore: support for volatile maps (that don't store changes).
-
-@changelog_1162_li
-MVStore mode: in-memory databases now also use the MVStore.
-
-@changelog_1163_li
-In server mode, appending ";autocommit=false" to the database URL was working, but the return value of Connection.getAutoCommit() was wrong.
-
-@changelog_1164_li
-Issue 561: OSGi: the import package declaration of org.h2 excluded version 1.4.
-
-@changelog_1165_li
-Issue 558: with the MVStore, a NullPointerException could occur when using LOBs at session commit (LobStorageMap.removeLob).
-
-@changelog_1166_li
-Remove the "h2.MAX_MEMORY_ROWS_DISTINCT" system property to reduce confusion. We already have the MAX_MEMORY_ROWS setting which does a very similar thing, and is better documented.
-
-@changelog_1167_li
-Issue 554: Web Console in an IFrame was not fully supported.
-
-@changelog_1168_h2
-Version 1.4.177 Beta (2014-04-12)
-
-@changelog_1169_li
-By default, the MV_STORE option is enabled, so it is using the new MVStore storage. The MVCC setting is by default set to the same values as the MV_STORE setting, so it is also enabled by default. For testing, both settings can be disabled by appending ";MV_STORE=FALSE" and/or ";MVCC=FALSE" to the database URL.
-
-@changelog_1170_li
-The file locking method 'serialized' is no longer supported. This mode might return in a future version, however this is not clear right now. A new implementation and new tests would be needed.
-
-@changelog_1171_li
-Enable the new storage format for dates (system property "h2.storeLocalTime"). For the MVStore mode, this is always enabled, but with version 1.4 this is even enabled in the PageStore mode.
-
-@changelog_1172_li
-Implicit relative paths are disabled (system property "h2.implicitRelativePath"), so that the database URL jdbc:h2:test now needs to be written as jdbc:h2:./test.
-
-@changelog_1173_li
-"select ... fetch first 1 row only" is supported with the regular mode. This was disabled so far because "fetch" and "offset" are now keywords. See also Mode.supportOffsetFetch.
-
-@changelog_1174_li
-Byte arrays are now sorted in unsigned mode (x'99' is larger than x'09'). (System property "h2.sortBinaryUnsigned", Mode.binaryUnsigned, setting "binary_collation").
-
-@changelog_1175_li
-Csv.getInstance will be removed in future versions of 1.4. Use the public constructor instead.
-
-@changelog_1176_li
-Remove support for the limited old-style outer join syntax using "(+)". Use "outer join" instead. System property "h2.oldStyleOuterJoin".
-
-@changelog_1177_li
-Support the data type "DATETIME2" as an alias for "DATETIME", for MS SQL Server compatibility.
-
-@changelog_1178_li
-Add Oracle-compatible TRANSLATE function, patch by Eric Chatellier.
-
-@changelog_1179_h2
-Version 1.3.176 (2014-04-05)
-
-@changelog_1180_li
-The file locking method 'serialized' is no longer documented, as it will not be available in version 1.4.
-
-@changelog_1181_li
-The static method Csv.getInstance() was removed. Use the public constructor instead.
-
-@changelog_1182_li
-The default user name for the Script, RunScript, Shell, and CreateCluster tools are no longer "sa" but an empty string.
-
-@changelog_1183_li
-The stack trace of the exception "The object is already closed" is no longer logged by default.
-
-@changelog_1184_li
-If a value of a result set was itself a result set, the result could only be read once.
-
-@changelog_1185_li
-Column constraints are also visible in views (patch from Nicolas Fortin for H2GIS).
-
-@changelog_1186_li
-Granting a additional right to a role that already had a right for that table was not working.
-
-@changelog_1187_li
-Spatial index: a few bugs have been fixed (using spatial constraints in views, transferring geometry objects over TCP/IP, the returned geometry object is copied when needed).
-
-@changelog_1188_li
-Issue 551: the datatype documentation was incorrect (found by Bernd Eckenfels).
-
-@changelog_1189_li
-Issue 368: ON DUPLICATE KEY UPDATE did not work for multi-row inserts. Test case from Angus Macdonald.
-
-@changelog_1190_li
-OSGi: the package javax.tools is now imported (as an optional).
-
-@changelog_1191_li
-H2 Console: auto-complete is now disabled by default, but there is a hot-key (Ctrl+Space).
-
-@changelog_1192_li
-H2 Console: auto-complete did not work with multi-line statements.
-
-@changelog_1193_li
-CLOB and BLOB data was not immediately removed after a rollback.
-
-@changelog_1194_li
-There is a new Aggregate API that supports the internal H2 data types (GEOMETRY for example). Thanks a lot to Nicolas Fortin for the patch!
-
-@changelog_1195_li
-Referential integrity constraints sometimes used the wrong index, such that updating a row in the referenced table incorrectly failed with a constraint violation.
-
-@changelog_1196_li
-The Polish translation was completed and corrected by Wojtek Jurczyk. Thanks a lot!
-
-@changelog_1197_li
-Issue 545: Unnecessary duplicate code was removed.
-
-@changelog_1198_li
-The profiler tool can now process files with full thread dumps.
-
-@changelog_1199_li
-MVStore: the file format was changed slightly.
-
-@changelog_1200_li
-MVStore mode: the CLOB and BLOB storage was re-implemented and is now much faster than with the PageStore (which is still the default storage).
-
-@changelog_1201_li
-MVStore mode: creating indexes is now much faster (in many cases faster than with the default PageStore).
-
-@changelog_1202_li
-Various bugs in the MVStore storage and have been fixed, including a bug in the R-tree implementation. The database could get corrupt if there were transient IO exceptions while storing.
-
-@changelog_1203_li
-The method org.h2.expression.Function.getCost could throw a NullPointException.
-
-@changelog_1204_li
-Storing LOBs in separate files (outside of the main database file) is no longer supported for new databases.
-
-@changelog_1205_li
-Lucene 2 is no longer supported.
-
-@changelog_1206_li
-Fix bug in calculating default MIN and MAX values for SEQUENCE.
-
-@changelog_1207_li
-Fix bug in performing IN queries with multiple values when IGNORECASE=TRUE
-
-@changelog_1208_li
-Add entry-point to org.h2.tools.Shell so it can be called from inside an application. patch by Thomas Gillet.
-
-@changelog_1209_li
-Fix bug that prevented the PgServer from being stopped and started multiple times.
-
-@changelog_1210_li
-Support some more DDL syntax for MySQL, patch from Peter Jentsch.
-
-@changelog_1211_li
-Issue 548: TO_CHAR does not format MM and DD correctly when the month or day of the month is 1 digit, patch from "the.tucc"
-
-@changelog_1212_li
-Fix bug in varargs support in ALIAS's, patch from Nicolas Fortin
-
-@cheatSheet_1000_h1
-H2 Database Engine Cheat Sheet
-
-@cheatSheet_1001_h2
-Using H2
-
-@cheatSheet_1002_a
-H2
-
-@cheatSheet_1003_li
- is open source, free to use and distribute.
-
-@cheatSheet_1004_a
-Download
-
-@cheatSheet_1005_li
-: jar, installer (Windows), zip.
-
-@cheatSheet_1006_li
-To start the H2 Console tool, double click the jar file, or run java -jar h2*.jar
, h2.bat
, or h2.sh
.
-
-@cheatSheet_1007_a
-A new database is automatically created
-
-@cheatSheet_1008_a
-by default
-
-@cheatSheet_1009_li
-.
-
-@cheatSheet_1010_a
-Closing the last connection closes the database
-
-@cheatSheet_1011_li
-.
-
-@cheatSheet_1012_h2
-Documentation
-
-@cheatSheet_1013_p
- Reference: SQL grammar, functions, data types, tools, API
-
-@cheatSheet_1014_a
-Features
-
-@cheatSheet_1015_p
-: fulltext search, encryption, read-only (zip/jar), CSV, auto-reconnect, triggers, user functions
-
-@cheatSheet_1016_a
-Database URLs
-
-@cheatSheet_1017_a
-Embedded
-
-@cheatSheet_1018_code
-jdbc:h2:~/test
-
-@cheatSheet_1019_p
- 'test' in the user home directory
-
-@cheatSheet_1020_code
-jdbc:h2:/data/test
-
-@cheatSheet_1021_p
- 'test' in the directory /data
-
-@cheatSheet_1022_code
-jdbc:h2:test
-
-@cheatSheet_1023_p
- in the current(!) working directory
-
-@cheatSheet_1024_a
-In-Memory
-
-@cheatSheet_1025_code
-jdbc:h2:mem:test
-
-@cheatSheet_1026_p
- multiple connections in one process
-
-@cheatSheet_1027_code
-jdbc:h2:mem:
-
-@cheatSheet_1028_p
- unnamed private; one connection
-
-@cheatSheet_1029_a
-Server Mode
-
-@cheatSheet_1030_code
-jdbc:h2:tcp://localhost/~/test
-
-@cheatSheet_1031_p
- user home dir
-
-@cheatSheet_1032_code
-jdbc:h2:tcp://localhost//data/test
-
-@cheatSheet_1033_p
- absolute dir
-
-@cheatSheet_1034_a
-Server start
-
-@cheatSheet_1035_p
-:java -cp *.jar org.h2.tools.Server
-
-@cheatSheet_1036_a
-Settings
-
-@cheatSheet_1037_code
-jdbc:h2:..;MODE=MySQL
-
-@cheatSheet_1038_a
-compatibility (or HSQLDB,...)
-
-@cheatSheet_1039_code
-jdbc:h2:..;TRACE_LEVEL_FILE=3
-
-@cheatSheet_1040_a
-log to *.trace.db
-
-@cheatSheet_1041_a
-Using the JDBC API
-
-@cheatSheet_1042_a
-Connection Pool
-
-@cheatSheet_1043_a
-Maven 2
-
-@cheatSheet_1044_a
-Hibernate
-
-@cheatSheet_1045_p
- hibernate.cfg.xml (or use the HSQLDialect):
-
-@cheatSheet_1046_a
-TopLink and Glassfish
-
-@cheatSheet_1047_p
- Datasource class: org.h2.jdbcx.JdbcDataSource
-
-@cheatSheet_1048_code
-oracle.toplink.essentials.platform.
-
-@cheatSheet_1049_code
-database.H2Platform
-
-@download_1000_h1
-Downloads
-
-@download_1001_h3
-Version 1.4.187 (2015-04-10), Beta
-
-@download_1002_a
-Windows Installer
-
-@download_1003_a
-Platform-Independent Zip
-
-@download_1004_h3
-Version 1.3.176 (2014-04-05), Last Stable
-
-@download_1005_a
-Windows Installer
-
-@download_1006_a
-Platform-Independent Zip
-
-@download_1007_h3
-Download Mirror and Older Versions
-
-@download_1008_a
-Platform-Independent Zip
-
-@download_1009_h3
-Jar File
-
-@download_1010_a
-Maven.org
-
-@download_1011_a
-Sourceforge.net
-
-@download_1012_a
-Latest Automated Build (not released)
-
-@download_1013_h3
-Maven (Binary, Javadoc, and Source)
-
-@download_1014_a
-Binary
-
-@download_1015_a
-Javadoc
-
-@download_1016_a
-Sources
-
-@download_1017_h3
-Database Upgrade Helper File
-
-@download_1018_a
-Upgrade database from 1.1 to the current version
-
-@download_1019_h3
-Subversion Source Repository
-
-@download_1020_a
-Google Code
-
-@download_1021_p
- For details about changes, see the Change Log.
-
-@download_1022_h3
-News and Project Information
-
-@download_1023_a
-Atom Feed
-
-@download_1024_a
-RSS Feed
-
-@download_1025_a
-DOAP File
-
-@download_1026_p
- (what is this)
-
-@faq_1000_h1
-Frequently Asked Questions
-
-@faq_1001_a
- I Have a Problem or Feature Request
-
-@faq_1002_a
- Are there Known Bugs? When is the Next Release?
-
-@faq_1003_a
- Is this Database Engine Open Source?
-
-@faq_1004_a
- Is Commercial Support Available?
-
-@faq_1005_a
- How to Create a New Database?
-
-@faq_1006_a
- How to Connect to a Database?
-
-@faq_1007_a
- Where are the Database Files Stored?
-
-@faq_1008_a
- What is the Size Limit (Maximum Size) of a Database?
-
-@faq_1009_a
- Is it Reliable?
-
-@faq_1010_a
- Why is Opening my Database Slow?
-
-@faq_1011_a
- My Query is Slow
-
-@faq_1012_a
- H2 is Very Slow
-
-@faq_1013_a
- Column Names are Incorrect?
-
-@faq_1014_a
- Float is Double?
-
-@faq_1015_a
- Is the GCJ Version Stable? Faster?
-
-@faq_1016_a
- How to Translate this Project?
-
-@faq_1017_a
- How to Contribute to this Project?
-
-@faq_1018_h3
-I Have a Problem or Feature Request
-
-@faq_1019_p
- Please read the support checklist.
-
-@faq_1020_h3
-Are there Known Bugs? When is the Next Release?
-
-@faq_1021_p
- Usually, bugs get fixes as they are found. There is a release every few weeks. Here is the list of known and confirmed issues:
-
-@faq_1022_li
-When opening a database file in a timezone that has different daylight saving rules: the time part of dates where the daylight saving doesn't match will differ. This is not a problem within regions that use the same rules (such as, within USA, or within Europe), even if the timezone itself is different. As a workaround, export the database to a SQL script using the old timezone, and create a new database in the new timezone. This problem does not occur when using the system property "h2.storeLocalTime" (however such database files are not compatible with older versions of H2).
-
-@faq_1023_li
-Apache Harmony: there seems to be a bug in Harmony that affects H2. See HARMONY-6505.
-
-@faq_1024_li
-Tomcat and Glassfish 3 set most static fields (final or non-final) to null
when unloading a web application. This can cause a NullPointerException
in H2 versions 1.1.107 and older, and may still not work in newer versions. Please report it if you run into this issue. In Tomcat >= 6.0 this behavior can be disabled by setting the system property org.apache.catalina.loader.WebappClassLoader.ENABLE_CLEAR_REFERENCES=false
, however Tomcat may then run out of memory. A known workaround is to put the h2*.jar
file in a shared lib
directory (common/lib
).
-
-@faq_1025_li
-Some problems have been found with right outer join. Internally, it is converted to left outer join, which does not always produce the same results as other databases when used in combination with other joins. This problem is fixed in H2 version 1.3.
-
-@faq_1026_li
-When using Install4j before 4.1.4 on Linux and enabling pack200
, the h2*.jar
becomes corrupted by the install process, causing application failure. A workaround is to add an empty file h2*.jar.nopack
next to the h2*.jar
file. This problem is solved in Install4j 4.1.4.
-
-@faq_1027_p
- For a complete list, see Open Issues.
-
-@faq_1028_h3
-Is this Database Engine Open Source?
-
-@faq_1029_p
- Yes. It is free to use and distribute, and the source code is included. See also under license.
-
-@faq_1030_h3
-Is Commercial Support Available?
-
-@faq_1031_p
- Yes, commercial support is available, see Commercial Support.
-
-@faq_1032_h3
-How to Create a New Database?
-
-@faq_1033_p
- By default, a new database is automatically created if it does not yet exist. See Creating New Databases.
-
-@faq_1034_h3
-How to Connect to a Database?
-
-@faq_1035_p
- The database driver is org.h2.Driver
, and the database URL starts with jdbc:h2:
. To connect to a database using JDBC, use the following code:
-
-@faq_1036_h3
-Where are the Database Files Stored?
-
-@faq_1037_p
- When using database URLs like jdbc:h2:~/test
, the database is stored in the user directory. For Windows, this is usually C:\Documents and Settings\<userName>
or C:\Users\<userName>
. If the base directory is not set (as in jdbc:h2:test
), the database files are stored in the directory where the application is started (the current working directory). When using the H2 Console application from the start menu, this is <Installation Directory>/bin
. The base directory can be set in the database URL. A fixed or relative path can be used. When using the URL jdbc:h2:file:data/sample
, the database is stored in the directory data
(relative to the current working directory). The directory is created automatically if it does not yet exist. It is also possible to use the fully qualified directory name (and for Windows, drive name). Example: jdbc:h2:file:C:/data/test
-
-@faq_1038_h3
-What is the Size Limit (Maximum Size) of a Database?
-
-@faq_1039_p
- See Limits and Limitations.
-
-@faq_1040_h3
-Is it Reliable?
-
-@faq_1041_p
- That is not easy to say. It is still a quite new product. A lot of tests have been written, and the code coverage of these tests is higher than 80% for each package. Randomized stress tests are run regularly. But there are probably still bugs that have not yet been found (as with most software). Some features are known to be dangerous, they are only supported for situations where performance is more important than reliability. Those dangerous features are:
-
-@faq_1042_li
-Disabling the transaction log or FileDescriptor.sync() using LOG=0 or LOG=1.
-
-@faq_1043_li
-Using the transaction isolation level READ_UNCOMMITTED
(LOCK_MODE 0
) while at the same time using multiple connections.
-
-@faq_1044_li
-Disabling database file protection using (setting FILE_LOCK
to NO
in the database URL).
-
-@faq_1045_li
-Disabling referential integrity using SET REFERENTIAL_INTEGRITY FALSE
.
-
-@faq_1046_p
- In addition to that, running out of memory should be avoided. In older versions, OutOfMemory errors while using the database could corrupt a databases.
-
-@faq_1047_p
- This database is well tested using automated test cases. The tests run every night and run for more than one hour. But not all areas of this database are equally well tested. When using one of the following features for production, please ensure your use case is well tested (if possible with automated test cases). The areas that are not well tested are:
-
-@faq_1048_li
-Platforms other than Windows XP, Linux, Mac OS X, or JVMs other than Sun 1.6 or 1.7
-
-@faq_1049_li
-The features AUTO_SERVER
and AUTO_RECONNECT
.
-
-@faq_1050_li
-Cluster mode, 2-phase commit, savepoints.
-
-@faq_1051_li
-24/7 operation.
-
-@faq_1052_li
-Fulltext search.
-
-@faq_1053_li
-Operations on LOBs over 2 GB.
-
-@faq_1054_li
-The optimizer may not always select the best plan.
-
-@faq_1055_li
-Using the ICU4J collator.
-
-@faq_1056_p
- Areas considered experimental are:
-
-@faq_1057_li
-The PostgreSQL server
-
-@faq_1058_li
-Clustering (there are cases were transaction isolation can be broken due to timing issues, for example one session overtaking another session).
-
-@faq_1059_li
-Multi-threading within the engine using SET MULTI_THREADED=1
.
-
-@faq_1060_li
-Compatibility modes for other databases (only some features are implemented).
-
-@faq_1061_li
-The soft reference cache (CACHE_TYPE=SOFT_LRU
). It might not improve performance, and out of memory issues have been reported.
-
-@faq_1062_p
- Some users have reported that after a power failure, the database cannot be opened sometimes. In this case, use a backup of the database or the Recover tool. Please report such problems. The plan is that the database automatically recovers in all situations.
-
-@faq_1063_h3
-Why is Opening my Database Slow?
-
-@faq_1064_p
- To find out what the problem is, use the H2 Console and click on "Test Connection" instead of "Login". After the "Login Successful" appears, click on it (it's a link). This will list the top stack traces. Then either analyze this yourself, or post those stack traces in the Google Group.
-
-@faq_1065_p
- Other possible reasons are: the database is very big (many GB), or contains linked tables that are slow to open.
-
-@faq_1066_h3
-My Query is Slow
-
-@faq_1067_p
- Slow SELECT
(or DELETE, UPDATE, MERGE
) statement can have multiple reasons. Follow this checklist:
-
-@faq_1068_li
-Run ANALYZE
(see documentation for details).
-
-@faq_1069_li
-Run the query with EXPLAIN
and check if indexes are used (see documentation for details).
-
-@faq_1070_li
-If required, create additional indexes and try again using ANALYZE
and EXPLAIN
.
-
-@faq_1071_li
-If it doesn't help please report the problem.
-
-@faq_1072_h3
-H2 is Very Slow
-
-@faq_1073_p
- By default, H2 closes the database when the last connection is closed. If your application closes the only connection after each operation, the database is opened and closed a lot, which is quite slow. There are multiple ways to solve this problem, see Database Performance Tuning.
-
-@faq_1074_h3
-Column Names are Incorrect?
-
-@faq_1075_p
- For the query SELECT ID AS X FROM TEST
the method ResultSetMetaData.getColumnName()
returns ID
, I expect it to return X
. What's wrong?
-
-@faq_1076_p
- This is not a bug. According the the JDBC specification, the method ResultSetMetaData.getColumnName()
should return the name of the column and not the alias name. If you need the alias name, use ResultSetMetaData.getColumnLabel()
. Some other database don't work like this yet (they don't follow the JDBC specification). If you need compatibility with those databases, use the Compatibility Mode, or append ;ALIAS_COLUMN_NAME=TRUE
to the database URL.
-
-@faq_1077_p
- This also applies to DatabaseMetaData calls that return a result set. The columns in the JDBC API are column labels, not column names.
-
-@faq_1078_h3
-Float is Double?
-
-@faq_1079_p
- For a table defined as CREATE TABLE TEST(X FLOAT)
the method ResultSet.getObject()
returns a java.lang.Double
, I expect it to return a java.lang.Float
. What's wrong?
-
-@faq_1080_p
- This is not a bug. According the the JDBC specification, the JDBC data type FLOAT
is equivalent to DOUBLE
, and both are mapped to java.lang.Double
. See also Mapping SQL and Java Types - 8.3.10 FLOAT.
-
-@faq_1081_h3
-Is the GCJ Version Stable? Faster?
-
-@faq_1082_p
- The GCJ version is not as stable as the Java version. When running the regression test with the GCJ version, sometimes the application just stops at what seems to be a random point without error message. Currently, the GCJ version is also slower than when using the Sun VM. However, the startup of the GCJ version is faster than when using a VM.
-
-@faq_1083_h3
-How to Translate this Project?
-
-@faq_1084_p
- For more information, see Build/Translating.
-
-@faq_1085_h3
-How to Contribute to this Project?
-
-@faq_1086_p
- There are various way to help develop an open source project like H2. The first step could be to translate the error messages and the GUI to your native language. Then, you could provide patches. Please start with small patches. That could be adding a test case to improve the code coverage (the target code coverage for this project is 90%, higher is better). You will have to develop, build and run the tests. Once you are familiar with the code, you could implement missing features from the feature request list. I suggest to start with very small features that are easy to implement. Keep in mind to provide test cases as well.
-
-@features_1000_h1
-Features
-
-@features_1001_a
- Feature List
-
-@features_1002_a
- Comparison to Other Database Engines
-
-@features_1003_a
- H2 in Use
-
-@features_1004_a
- Connection Modes
-
-@features_1005_a
- Database URL Overview
-
-@features_1006_a
- Connecting to an Embedded (Local) Database
-
-@features_1007_a
- In-Memory Databases
-
-@features_1008_a
- Database Files Encryption
-
-@features_1009_a
- Database File Locking
-
-@features_1010_a
- Opening a Database Only if it Already Exists
-
-@features_1011_a
- Closing a Database
-
-@features_1012_a
- Ignore Unknown Settings
-
-@features_1013_a
- Changing Other Settings when Opening a Connection
-
-@features_1014_a
- Custom File Access Mode
-
-@features_1015_a
- Multiple Connections
-
-@features_1016_a
- Database File Layout
-
-@features_1017_a
- Logging and Recovery
-
-@features_1018_a
- Compatibility
-
-@features_1019_a
- Auto-Reconnect
-
-@features_1020_a
- Automatic Mixed Mode
-
-@features_1021_a
- Page Size
-
-@features_1022_a
- Using the Trace Options
-
-@features_1023_a
- Using Other Logging APIs
-
-@features_1024_a
- Read Only Databases
-
-@features_1025_a
- Read Only Databases in Zip or Jar File
-
-@features_1026_a
- Computed Columns / Function Based Index
-
-@features_1027_a
- Multi-Dimensional Indexes
-
-@features_1028_a
- User-Defined Functions and Stored Procedures
-
-@features_1029_a
- Pluggable or User-Defined Tables
-
-@features_1030_a
- Triggers
-
-@features_1031_a
- Compacting a Database
-
-@features_1032_a
- Cache Settings
-
-@features_1033_h2
-Feature List
-
-@features_1034_h3
-Main Features
-
-@features_1035_li
-Very fast database engine
-
-@features_1036_li
-Open source
-
-@features_1037_li
-Written in Java
-
-@features_1038_li
-Supports standard SQL, JDBC API
-
-@features_1039_li
-Embedded and Server mode, Clustering support
-
-@features_1040_li
-Strong security features
-
-@features_1041_li
-The PostgreSQL ODBC driver can be used
-
-@features_1042_li
-Multi version concurrency
-
-@features_1043_h3
-Additional Features
-
-@features_1044_li
-Disk based or in-memory databases and tables, read-only database support, temporary tables
-
-@features_1045_li
-Transaction support (read committed), 2-phase-commit
-
-@features_1046_li
-Multiple connections, table level locking
-
-@features_1047_li
-Cost based optimizer, using a genetic algorithm for complex queries, zero-administration
-
-@features_1048_li
-Scrollable and updatable result set support, large result set, external result sorting, functions can return a result set
-
-@features_1049_li
-Encrypted database (AES), SHA-256 password encryption, encryption functions, SSL
-
-@features_1050_h3
-SQL Support
-
-@features_1051_li
-Support for multiple schemas, information schema
-
-@features_1052_li
-Referential integrity / foreign key constraints with cascade, check constraints
-
-@features_1053_li
-Inner and outer joins, subqueries, read only views and inline views
-
-@features_1054_li
-Triggers and Java functions / stored procedures
-
-@features_1055_li
-Many built-in functions, including XML and lossless data compression
-
-@features_1056_li
-Wide range of data types including large objects (BLOB/CLOB) and arrays
-
-@features_1057_li
-Sequence and autoincrement columns, computed columns (can be used for function based indexes)
-
-@features_1058_code
-ORDER BY, GROUP BY, HAVING, UNION, LIMIT, TOP
-
-@features_1059_li
-Collation support, including support for the ICU4J library
-
-@features_1060_li
-Support for users and roles
-
-@features_1061_li
-Compatibility modes for IBM DB2, Apache Derby, HSQLDB, MS SQL Server, MySQL, Oracle, and PostgreSQL.
-
-@features_1062_h3
-Security Features
-
-@features_1063_li
-Includes a solution for the SQL injection problem
-
-@features_1064_li
-User password authentication uses SHA-256 and salt
-
-@features_1065_li
-For server mode connections, user passwords are never transmitted in plain text over the network (even when using insecure connections; this only applies to the TCP server and not to the H2 Console however; it also doesn't apply if you set the password in the database URL)
-
-@features_1066_li
-All database files (including script files that can be used to backup data) can be encrypted using the AES-128 encryption algorithm
-
-@features_1067_li
-The remote JDBC driver supports TCP/IP connections over TLS
-
-@features_1068_li
-The built-in web server supports connections over TLS
-
-@features_1069_li
-Passwords can be sent to the database using char arrays instead of Strings
-
-@features_1070_h3
-Other Features and Tools
-
-@features_1071_li
-Small footprint (smaller than 1.5 MB), low memory requirements
-
-@features_1072_li
-Multiple index types (b-tree, tree, hash)
-
-@features_1073_li
-Support for multi-dimensional indexes
-
-@features_1074_li
-CSV (comma separated values) file support
-
-@features_1075_li
-Support for linked tables, and a built-in virtual 'range' table
-
-@features_1076_li
-Supports the EXPLAIN PLAN
statement; sophisticated trace options
-
-@features_1077_li
-Database closing can be delayed or disabled to improve the performance
-
-@features_1078_li
-Web-based Console application (translated to many languages) with autocomplete
-
-@features_1079_li
-The database can generate SQL script files
-
-@features_1080_li
-Contains a recovery tool that can dump the contents of the database
-
-@features_1081_li
-Support for variables (for example to calculate running totals)
-
-@features_1082_li
-Automatic re-compilation of prepared statements
-
-@features_1083_li
-Uses a small number of database files
-
-@features_1084_li
-Uses a checksum for each record and log entry for data integrity
-
-@features_1085_li
-Well tested (high code coverage, randomized stress tests)
-
-@features_1086_h2
-Comparison to Other Database Engines
-
-@features_1087_p
- This comparison is based on H2 1.3, Apache Derby version 10.8, HSQLDB 2.2, MySQL 5.5, PostgreSQL 9.0.
-
-@features_1088_th
-Feature
-
-@features_1089_th
-H2
-
-@features_1090_th
-Derby
-
-@features_1091_th
-HSQLDB
-
-@features_1092_th
-MySQL
-
-@features_1093_th
-PostgreSQL
-
-@features_1094_td
-Pure Java
-
-@features_1095_td
-Yes
-
-@features_1096_td
-Yes
-
-@features_1097_td
-Yes
-
-@features_1098_td
-No
-
-@features_1099_td
-No
-
-@features_1100_td
-Embedded Mode (Java)
-
-@features_1101_td
-Yes
-
-@features_1102_td
-Yes
-
-@features_1103_td
-Yes
-
-@features_1104_td
-No
-
-@features_1105_td
-No
-
-@features_1106_td
-In-Memory Mode
-
-@features_1107_td
-Yes
-
-@features_1108_td
-Yes
-
-@features_1109_td
-Yes
-
-@features_1110_td
-No
-
-@features_1111_td
-No
-
-@features_1112_td
-Explain Plan
-
-@features_1113_td
-Yes
-
-@features_1114_td
-Yes *12
-
-@features_1115_td
-Yes
-
-@features_1116_td
-Yes
-
-@features_1117_td
-Yes
-
-@features_1118_td
-Built-in Clustering / Replication
-
-@features_1119_td
-Yes
-
-@features_1120_td
-Yes
-
-@features_1121_td
-No
-
-@features_1122_td
-Yes
-
-@features_1123_td
-Yes
-
-@features_1124_td
-Encrypted Database
-
-@features_1125_td
-Yes
-
-@features_1126_td
-Yes *10
-
-@features_1127_td
-Yes *10
-
-@features_1128_td
-No
-
-@features_1129_td
-No
-
-@features_1130_td
-Linked Tables
-
-@features_1131_td
-Yes
-
-@features_1132_td
-No
-
-@features_1133_td
-Partially *1
-
-@features_1134_td
-Partially *2
-
-@features_1135_td
-No
-
-@features_1136_td
-ODBC Driver
-
-@features_1137_td
-Yes
-
-@features_1138_td
-No
-
-@features_1139_td
-No
-
-@features_1140_td
-Yes
-
-@features_1141_td
-Yes
-
-@features_1142_td
-Fulltext Search
-
-@features_1143_td
-Yes
-
-@features_1144_td
-Yes
-
-@features_1145_td
-No
-
-@features_1146_td
-Yes
-
-@features_1147_td
-Yes
-
-@features_1148_td
-Domains (User-Defined Types)
-
-@features_1149_td
-Yes
-
-@features_1150_td
-No
-
-@features_1151_td
-Yes
-
-@features_1152_td
-Yes
-
-@features_1153_td
-Yes
-
-@features_1154_td
-Files per Database
-
-@features_1155_td
-Few
-
-@features_1156_td
-Many
-
-@features_1157_td
-Few
-
-@features_1158_td
-Many
-
-@features_1159_td
-Many
-
-@features_1160_td
-Row Level Locking
-
-@features_1161_td
-Yes *9
-
-@features_1162_td
-Yes
-
-@features_1163_td
-Yes *9
-
-@features_1164_td
-Yes
-
-@features_1165_td
-Yes
-
-@features_1166_td
-Multi Version Concurrency
-
-@features_1167_td
-Yes
-
-@features_1168_td
-No
-
-@features_1169_td
-Yes
-
-@features_1170_td
-Yes
-
-@features_1171_td
-Yes
-
-@features_1172_td
-Multi-Threaded Statement Processing
-
-@features_1173_td
-No *11
-
-@features_1174_td
-Yes
-
-@features_1175_td
-Yes
-
-@features_1176_td
-Yes
-
-@features_1177_td
-Yes
-
-@features_1178_td
-Role Based Security
-
-@features_1179_td
-Yes
-
-@features_1180_td
-Yes *3
-
-@features_1181_td
-Yes
-
-@features_1182_td
-Yes
-
-@features_1183_td
-Yes
-
-@features_1184_td
-Updatable Result Sets
-
-@features_1185_td
-Yes
-
-@features_1186_td
-Yes *7
-
-@features_1187_td
-Yes
-
-@features_1188_td
-Yes
-
-@features_1189_td
-Yes
-
-@features_1190_td
-Sequences
-
-@features_1191_td
-Yes
-
-@features_1192_td
-Yes
-
-@features_1193_td
-Yes
-
-@features_1194_td
-No
-
-@features_1195_td
-Yes
-
-@features_1196_td
-Limit and Offset
-
-@features_1197_td
-Yes
-
-@features_1198_td
-Yes *13
-
-@features_1199_td
-Yes
-
-@features_1200_td
-Yes
-
-@features_1201_td
-Yes
-
-@features_1202_td
-Window Functions
-
-@features_1203_td
-No *15
-
-@features_1204_td
-No *15
-
-@features_1205_td
-No
-
-@features_1206_td
-No
-
-@features_1207_td
-Yes
-
-@features_1208_td
-Temporary Tables
-
-@features_1209_td
-Yes
-
-@features_1210_td
-Yes *4
-
-@features_1211_td
-Yes
-
-@features_1212_td
-Yes
-
-@features_1213_td
-Yes
-
-@features_1214_td
-Information Schema
-
-@features_1215_td
-Yes
-
-@features_1216_td
-No *8
-
-@features_1217_td
-Yes
-
-@features_1218_td
-Yes
-
-@features_1219_td
-Yes
-
-@features_1220_td
-Computed Columns
-
-@features_1221_td
-Yes
-
-@features_1222_td
-Yes
-
-@features_1223_td
-Yes
-
-@features_1224_td
-No
-
-@features_1225_td
-Yes *6
-
-@features_1226_td
-Case Insensitive Columns
-
-@features_1227_td
-Yes
-
-@features_1228_td
-Yes *14
-
-@features_1229_td
-Yes
-
-@features_1230_td
-Yes
-
-@features_1231_td
-Yes *6
-
-@features_1232_td
-Custom Aggregate Functions
-
-@features_1233_td
-Yes
-
-@features_1234_td
-No
-
-@features_1235_td
-Yes
-
-@features_1236_td
-Yes
-
-@features_1237_td
-Yes
-
-@features_1238_td
-CLOB/BLOB Compression
-
-@features_1239_td
-Yes
-
-@features_1240_td
-No
-
-@features_1241_td
-No
-
-@features_1242_td
-No
-
-@features_1243_td
-Yes
-
-@features_1244_td
-Footprint (jar/dll size)
-
-@features_1245_td
-~1.5 MB *5
-
-@features_1246_td
-~3 MB
-
-@features_1247_td
-~1.5 MB
-
-@features_1248_td
-~4 MB
-
-@features_1249_td
-~6 MB
-
-@features_1250_p
- *1 HSQLDB supports text tables.
-
-@features_1251_p
- *2 MySQL supports linked MySQL tables under the name 'federated tables'.
-
-@features_1252_p
- *3 Derby support for roles based security and password checking as an option.
-
-@features_1253_p
- *4 Derby only supports global temporary tables.
-
-@features_1254_p
- *5 The default H2 jar file contains debug information, jar files for other databases do not.
-
-@features_1255_p
- *6 PostgreSQL supports functional indexes.
-
-@features_1256_p
- *7 Derby only supports updatable result sets if the query is not sorted.
-
-@features_1257_p
- *8 Derby doesn't support standard compliant information schema tables.
-
-@features_1258_p
- *9 When using MVCC (multi version concurrency).
-
-@features_1259_p
- *10 Derby and HSQLDB don't hide data patterns well.
-
-@features_1260_p
- *11 The MULTI_THREADED option is not enabled by default, and not yet supported when using MVCC.
-
-@features_1261_p
- *12 Derby doesn't support the EXPLAIN
statement, but it supports runtime statistics and retrieving statement execution plans.
-
-@features_1262_p
- *13 Derby doesn't support the syntax LIMIT .. [OFFSET ..]
, however it supports FETCH FIRST .. ROW[S] ONLY
.
-
-@features_1263_p
- *14 Using collations. *15 Derby and H2 support ROW_NUMBER() OVER()
.
-
-@features_1264_h3
-DaffodilDb and One$Db
-
-@features_1265_p
- It looks like the development of this database has stopped. The last release was February 2006.
-
-@features_1266_h3
-McKoi
-
-@features_1267_p
- It looks like the development of this database has stopped. The last release was August 2004.
-
-@features_1268_h2
-H2 in Use
-
-@features_1269_p
- For a list of applications that work with or use H2, see: Links.
-
-@features_1270_h2
-Connection Modes
-
-@features_1271_p
- The following connection modes are supported:
-
-@features_1272_li
-Embedded mode (local connections using JDBC)
-
-@features_1273_li
-Server mode (remote connections using JDBC or ODBC over TCP/IP)
-
-@features_1274_li
-Mixed mode (local and remote connections at the same time)
-
-@features_1275_h3
-Embedded Mode
-
-@features_1276_p
- In embedded mode, an application opens a database from within the same JVM using JDBC. This is the fastest and easiest connection mode. The disadvantage is that a database may only be open in one virtual machine (and class loader) at any time. As in all modes, both persistent and in-memory databases are supported. There is no limit on the number of database open concurrently, or on the number of open connections.
-
-@features_1277_h3
-Server Mode
-
-@features_1278_p
- When using the server mode (sometimes called remote mode or client/server mode), an application opens a database remotely using the JDBC or ODBC API. A server needs to be started within the same or another virtual machine, or on another computer. Many applications can connect to the same database at the same time, by connecting to this server. Internally, the server process opens the database(s) in embedded mode.
-
-@features_1279_p
- The server mode is slower than the embedded mode, because all data is transferred over TCP/IP. As in all modes, both persistent and in-memory databases are supported. There is no limit on the number of database open concurrently per server, or on the number of open connections.
-
-@features_1280_h3
-Mixed Mode
-
-@features_1281_p
- The mixed mode is a combination of the embedded and the server mode. The first application that connects to a database does that in embedded mode, but also starts a server so that other applications (running in different processes or virtual machines) can concurrently access the same data. The local connections are as fast as if the database is used in just the embedded mode, while the remote connections are a bit slower.
-
-@features_1282_p
- The server can be started and stopped from within the application (using the server API), or automatically (automatic mixed mode). When using the automatic mixed mode, all clients that want to connect to the database (no matter if it's an local or remote connection) can do so using the exact same database URL.
-
-@features_1283_h2
-Database URL Overview
-
-@features_1284_p
- This database supports multiple connection modes and connection settings. This is achieved using different database URLs. Settings in the URLs are not case sensitive.
-
-@features_1285_th
-Topic
-
-@features_1286_th
-URL Format and Examples
-
-@features_1287_a
-Embedded (local) connection
-
-@features_1288_td
- jdbc:h2:[file:][<path>]<databaseName>
-
-@features_1289_td
- jdbc:h2:~/test
-
-@features_1290_td
- jdbc:h2:file:/data/sample
-
-@features_1291_td
- jdbc:h2:file:C:/data/sample (Windows only)
-
-@features_1292_a
-In-memory (private)
-
-@features_1293_td
-jdbc:h2:mem:
-
-@features_1294_a
-In-memory (named)
-
-@features_1295_td
- jdbc:h2:mem:<databaseName>
-
-@features_1296_td
- jdbc:h2:mem:test_mem
-
-@features_1297_a
-Server mode (remote connections)
-
-@features_1298_a
- using TCP/IP
-
-@features_1299_td
- jdbc:h2:tcp://<server>[:<port>]/[<path>]<databaseName>
-
-@features_1300_td
- jdbc:h2:tcp://localhost/~/test
-
-@features_1301_td
- jdbc:h2:tcp://dbserv:8084/~/sample
-
-@features_1302_td
- jdbc:h2:tcp://localhost/mem:test
-
-@features_1303_a
-Server mode (remote connections)
-
-@features_1304_a
- using TLS
-
-@features_1305_td
- jdbc:h2:ssl://<server>[:<port>]/<databaseName>
-
-@features_1306_td
- jdbc:h2:ssl://localhost:8085/~/sample;
-
-@features_1307_a
-Using encrypted files
-
-@features_1308_td
- jdbc:h2:<url>;CIPHER=AES
-
-@features_1309_td
- jdbc:h2:ssl://localhost/~/test;CIPHER=AES
-
-@features_1310_td
- jdbc:h2:file:~/secure;CIPHER=AES
-
-@features_1311_a
-File locking methods
-
-@features_1312_td
- jdbc:h2:<url>;FILE_LOCK={FILE|SOCKET|NO}
-
-@features_1313_td
- jdbc:h2:file:~/private;CIPHER=AES;FILE_LOCK=SOCKET
-
-@features_1314_a
-Only open if it already exists
-
-@features_1315_td
- jdbc:h2:<url>;IFEXISTS=TRUE
-
-@features_1316_td
- jdbc:h2:file:~/sample;IFEXISTS=TRUE
-
-@features_1317_a
-Don't close the database when the VM exits
-
-@features_1318_td
- jdbc:h2:<url>;DB_CLOSE_ON_EXIT=FALSE
-
-@features_1319_a
-Execute SQL on connection
-
-@features_1320_td
- jdbc:h2:<url>;INIT=RUNSCRIPT FROM '~/create.sql'
-
-@features_1321_td
- jdbc:h2:file:~/sample;INIT=RUNSCRIPT FROM '~/create.sql'\;RUNSCRIPT FROM '~/populate.sql'
-
-@features_1322_a
-User name and/or password
-
-@features_1323_td
- jdbc:h2:<url>[;USER=<username>][;PASSWORD=<value>]
-
-@features_1324_td
- jdbc:h2:file:~/sample;USER=sa;PASSWORD=123
-
-@features_1325_a
-Debug trace settings
-
-@features_1326_td
- jdbc:h2:<url>;TRACE_LEVEL_FILE=<level 0..3>
-
-@features_1327_td
- jdbc:h2:file:~/sample;TRACE_LEVEL_FILE=3
-
-@features_1328_a
-Ignore unknown settings
-
-@features_1329_td
- jdbc:h2:<url>;IGNORE_UNKNOWN_SETTINGS=TRUE
-
-@features_1330_a
-Custom file access mode
-
-@features_1331_td
- jdbc:h2:<url>;ACCESS_MODE_DATA=rws
-
-@features_1332_a
-Database in a zip file
-
-@features_1333_td
- jdbc:h2:zip:<zipFileName>!/<databaseName>
-
-@features_1334_td
- jdbc:h2:zip:~/db.zip!/test
-
-@features_1335_a
-Compatibility mode
-
-@features_1336_td
- jdbc:h2:<url>;MODE=<databaseType>
-
-@features_1337_td
- jdbc:h2:~/test;MODE=MYSQL
-
-@features_1338_a
-Auto-reconnect
-
-@features_1339_td
- jdbc:h2:<url>;AUTO_RECONNECT=TRUE
-
-@features_1340_td
- jdbc:h2:tcp://localhost/~/test;AUTO_RECONNECT=TRUE
-
-@features_1341_a
-Automatic mixed mode
-
-@features_1342_td
- jdbc:h2:<url>;AUTO_SERVER=TRUE
-
-@features_1343_td
- jdbc:h2:~/test;AUTO_SERVER=TRUE
-
-@features_1344_a
-Page size
-
-@features_1345_td
- jdbc:h2:<url>;PAGE_SIZE=512
-
-@features_1346_a
-Changing other settings
-
-@features_1347_td
- jdbc:h2:<url>;<setting>=<value>[;<setting>=<value>...]
-
-@features_1348_td
- jdbc:h2:file:~/sample;TRACE_LEVEL_SYSTEM_OUT=3
-
-@features_1349_h2
-Connecting to an Embedded (Local) Database
-
-@features_1350_p
- The database URL for connecting to a local database is jdbc:h2:[file:][<path>]<databaseName>
. The prefix file:
is optional. If no or only a relative path is used, then the current working directory is used as a starting point. The case sensitivity of the path and database name depend on the operating system, however it is recommended to use lowercase letters only. The database name must be at least three characters long (a limitation of File.createTempFile
). The database name must not contain a semicolon. To point to the user home directory, use ~/
, as in: jdbc:h2:~/test
.
-
-@features_1351_h2
-In-Memory Databases
-
-@features_1352_p
- For certain use cases (for example: rapid prototyping, testing, high performance operations, read-only databases), it may not be required to persist data, or persist changes to the data. This database supports the in-memory mode, where the data is not persisted.
-
-@features_1353_p
- In some cases, only one connection to a in-memory database is required. This means the database to be opened is private. In this case, the database URL is jdbc:h2:mem:
Opening two connections within the same virtual machine means opening two different (private) databases.
-
-@features_1354_p
- Sometimes multiple connections to the same in-memory database are required. In this case, the database URL must include a name. Example: jdbc:h2:mem:db1
. Accessing the same database using this URL only works within the same virtual machine and class loader environment.
-
-@features_1355_p
- To access an in-memory database from another process or from another computer, you need to start a TCP server in the same process as the in-memory database was created. The other processes then need to access the database over TCP/IP or TLS, using a database URL such as: jdbc:h2:tcp://localhost/mem:db1
.
-
-@features_1356_p
- By default, closing the last connection to a database closes the database. For an in-memory database, this means the content is lost. To keep the database open, add ;DB_CLOSE_DELAY=-1
to the database URL. To keep the content of an in-memory database as long as the virtual machine is alive, use jdbc:h2:mem:test;DB_CLOSE_DELAY=-1
.
-
-@features_1357_h2
-Database Files Encryption
-
-@features_1358_p
- The database files can be encrypted. Two encryption algorithm AES is supported. To use file encryption, you need to specify the encryption algorithm (the 'cipher') and the file password (in addition to the user password) when connecting to the database.
-
-@features_1359_h3
-Creating a New Database with File Encryption
-
-@features_1360_p
- By default, a new database is automatically created if it does not exist yet. To create an encrypted database, connect to it as it would already exist.
-
-@features_1361_h3
-Connecting to an Encrypted Database
-
-@features_1362_p
- The encryption algorithm is set in the database URL, and the file password is specified in the password field, before the user password. A single space separates the file password and the user password; the file password itself may not contain spaces. File passwords and user passwords are case sensitive. Here is an example to connect to a password-encrypted database:
-
-@features_1363_h3
-Encrypting or Decrypting a Database
-
-@features_1364_p
- To encrypt an existing database, use the ChangeFileEncryption
tool. This tool can also decrypt an encrypted database, or change the file encryption key. The tool is available from within the H2 Console in the tools section, or you can run it from the command line. The following command line will encrypt the database test
in the user home directory with the file password filepwd
and the encryption algorithm AES:
-
-@features_1365_h2
-Database File Locking
-
-@features_1366_p
- Whenever a database is opened, a lock file is created to signal other processes that the database is in use. If database is closed, or if the process that opened the database terminates, this lock file is deleted.
-
-@features_1367_p
- The following file locking methods are implemented:
-
-@features_1368_li
-The default method is FILE
and uses a watchdog thread to protect the database file. The watchdog reads the lock file each second.
-
-@features_1369_li
-The second method is SOCKET
and opens a server socket. The socket method does not require reading the lock file every second. The socket method should only be used if the database files are only accessed by one (and always the same) computer.
-
-@features_1370_li
-The third method is FS
. This will use native file locking using FileChannel.lock
.
-
-@features_1371_li
-It is also possible to open the database without file locking; in this case it is up to the application to protect the database files. Failing to do so will result in a corrupted database. Using the method NO
forces the database to not create a lock file at all. Please note that this is unsafe as another process is able to open the same database, possibly leading to data corruption.
-
-@features_1372_p
- To open the database with a different file locking method, use the parameter FILE_LOCK
. The following code opens the database with the 'socket' locking method:
-
-@features_1373_p
- For more information about the algorithms, see Advanced / File Locking Protocols.
-
-@features_1374_h2
-Opening a Database Only if it Already Exists
-
-@features_1375_p
- By default, when an application calls DriverManager.getConnection(url, ...)
and the database specified in the URL does not yet exist, a new (empty) database is created. In some situations, it is better to restrict creating new databases, and only allow to open existing databases. To do this, add ;IFEXISTS=TRUE
to the database URL. In this case, if the database does not already exist, an exception is thrown when trying to connect. The connection only succeeds when the database already exists. The complete URL may look like this:
-
-@features_1376_h2
-Closing a Database
-
-@features_1377_h3
-Delayed Database Closing
-
-@features_1378_p
- Usually, a database is closed when the last connection to it is closed. In some situations this slows down the application, for example when it is not possible to keep at least one connection open. The automatic closing of a database can be delayed or disabled with the SQL statement SET DB_CLOSE_DELAY <seconds>
. The parameter <seconds> specifies the number of seconds to keep a database open after the last connection to it was closed. The following statement will keep a database open for 10 seconds after the last connection was closed:
-
-@features_1379_p
- The value -1 means the database is not closed automatically. The value 0 is the default and means the database is closed when the last connection is closed. This setting is persistent and can be set by an administrator only. It is possible to set the value in the database URL: jdbc:h2:~/test;DB_CLOSE_DELAY=10
.
-
-@features_1380_h3
-Don't Close a Database when the VM Exits
-
-@features_1381_p
- By default, a database is closed when the last connection is closed. However, if it is never closed, the database is closed when the virtual machine exits normally, using a shutdown hook. In some situations, the database should not be closed in this case, for example because the database is still used at virtual machine shutdown (to store the shutdown process in the database for example). For those cases, the automatic closing of the database can be disabled in the database URL. The first connection (the one that is opening the database) needs to set the option in the database URL (it is not possible to change the setting afterwards). The database URL to disable database closing on exit is:
-
-@features_1382_h2
-Execute SQL on Connection
-
-@features_1383_p
- Sometimes, particularly for in-memory databases, it is useful to be able to execute DDL or DML commands automatically when a client connects to a database. This functionality is enabled via the INIT property. Note that multiple commands may be passed to INIT, but the semicolon delimiter must be escaped, as in the example below.
-
-@features_1384_p
- Please note the double backslash is only required in a Java or properties file. In a GUI, or in an XML file, only one backslash is required:
-
-@features_1385_p
- Backslashes within the init script (for example within a runscript statement, to specify the folder names in Windows) need to be escaped as well (using a second backslash). It might be simpler to avoid backslashes in folder names for this reason; use forward slashes instead.
-
-@features_1386_h2
-Ignore Unknown Settings
-
-@features_1387_p
- Some applications (for example OpenOffice.org Base) pass some additional parameters when connecting to the database. Why those parameters are passed is unknown. The parameters PREFERDOSLIKELINEENDS
and IGNOREDRIVERPRIVILEGES
are such examples; they are simply ignored to improve the compatibility with OpenOffice.org. If an application passes other parameters when connecting to the database, usually the database throws an exception saying the parameter is not supported. It is possible to ignored such parameters by adding ;IGNORE_UNKNOWN_SETTINGS=TRUE
to the database URL.
-
-@features_1388_h2
-Changing Other Settings when Opening a Connection
-
-@features_1389_p
- In addition to the settings already described, other database settings can be passed in the database URL. Adding ;setting=value
at the end of a database URL is the same as executing the statement SET setting value
just after connecting. For a list of supported settings, see SQL Grammar or the DbSettings javadoc.
-
-@features_1390_h2
-Custom File Access Mode
-
-@features_1391_p
- Usually, the database opens the database file with the access mode rw
, meaning read-write (except for read only databases, where the mode r
is used). To open a database in read-only mode if the database file is not read-only, use ACCESS_MODE_DATA=r
. Also supported are rws
and rwd
. This setting must be specified in the database URL:
-
-@features_1392_p
- For more information see Durability Problems. On many operating systems the access mode rws
does not guarantee that the data is written to the disk.
-
-@features_1393_h2
-Multiple Connections
-
-@features_1394_h3
-Opening Multiple Databases at the Same Time
-
-@features_1395_p
- An application can open multiple databases at the same time, including multiple connections to the same database. The number of open database is only limited by the memory available.
-
-@features_1396_h3
-Multiple Connections to the Same Database: Client/Server
-
-@features_1397_p
- If you want to access the same database at the same time from different processes or computers, you need to use the client / server mode. In this case, one process acts as the server, and the other processes (that could reside on other computers as well) connect to the server via TCP/IP (or TLS over TCP/IP for improved security).
-
-@features_1398_h3
-Multithreading Support
-
-@features_1399_p
- This database is multithreading-safe. That means, if an application is multi-threaded, it does not need to worry about synchronizing access to the database. Internally, most requests to the same database are synchronized. That means an application can use multiple threads that access the same database at the same time, however if one thread executes a long running query, the other threads need to wait.
-
-@features_1400_p
- An application should normally use one connection per thread. This database synchronizes access to the same connection, but other databases may not do this.
-
-@features_1401_h3
-Locking, Lock-Timeout, Deadlocks
-
-@features_1402_p
- Please note MVCC is enabled in version 1.4.x by default, when using the MVStore. In this case, table level locking is not used. If multi-version concurrency is not used, the database uses table level locks to give each connection a consistent state of the data. There are two kinds of locks: read locks (shared locks) and write locks (exclusive locks). All locks are released when the transaction commits or rolls back. When using the default transaction isolation level 'read committed', read locks are already released after each statement.
-
-@features_1403_p
- If a connection wants to reads from a table, and there is no write lock on the table, then a read lock is added to the table. If there is a write lock, then this connection waits for the other connection to release the lock. If a connection cannot get a lock for a specified time, then a lock timeout exception is thrown.
-
-@features_1404_p
- Usually, SELECT
statements will generate read locks. This includes subqueries. Statements that modify data use write locks. It is also possible to lock a table exclusively without modifying data, using the statement SELECT ... FOR UPDATE
. The statements COMMIT
and ROLLBACK
releases all open locks. The commands SAVEPOINT
and ROLLBACK TO SAVEPOINT
don't affect locks. The locks are also released when the autocommit mode changes, and for connections with autocommit set to true (this is the default), locks are released after each statement. The following statements generate locks:
-
-@features_1405_th
-Type of Lock
-
-@features_1406_th
-SQL Statement
-
-@features_1407_td
-Read
-
-@features_1408_td
-SELECT * FROM TEST;
-
-@features_1409_td
- CALL SELECT MAX(ID) FROM TEST;
-
-@features_1410_td
- SCRIPT;
-
-@features_1411_td
-Write
-
-@features_1412_td
-SELECT * FROM TEST WHERE 1=0 FOR UPDATE;
-
-@features_1413_td
-Write
-
-@features_1414_td
-INSERT INTO TEST VALUES(1, 'Hello');
-
-@features_1415_td
- INSERT INTO TEST SELECT * FROM TEST;
-
-@features_1416_td
- UPDATE TEST SET NAME='Hi';
-
-@features_1417_td
- DELETE FROM TEST;
-
-@features_1418_td
-Write
-
-@features_1419_td
-ALTER TABLE TEST ...;
-
-@features_1420_td
- CREATE INDEX ... ON TEST ...;
-
-@features_1421_td
- DROP INDEX ...;
-
-@features_1422_p
- The number of seconds until a lock timeout exception is thrown can be set separately for each connection using the SQL command SET LOCK_TIMEOUT <milliseconds>
. The initial lock timeout (that is the timeout used for new connections) can be set using the SQL command SET DEFAULT_LOCK_TIMEOUT <milliseconds>
. The default lock timeout is persistent.
-
-@features_1423_h3
-Avoiding Deadlocks
-
-@features_1424_p
- To avoid deadlocks, ensure that all transactions lock the tables in the same order (for example in alphabetical order), and avoid upgrading read locks to write locks. Both can be achieved using explicitly locking tables using SELECT ... FOR UPDATE
.
-
-@features_1425_h2
-Database File Layout
-
-@features_1426_p
- The following files are created for persistent databases:
-
-@features_1427_th
-File Name
-
-@features_1428_th
-Description
-
-@features_1429_th
-Number of Files
-
-@features_1430_td
- test.h2.db
-
-@features_1431_td
- Database file.
-
-@features_1432_td
- Contains the transaction log, indexes, and data for all tables.
-
-@features_1433_td
- Format: <database>.h2.db
-
-@features_1434_td
- 1 per database
-
-@features_1435_td
- test.lock.db
-
-@features_1436_td
- Database lock file.
-
-@features_1437_td
- Automatically (re-)created while the database is in use.
-
-@features_1438_td
- Format: <database>.lock.db
-
-@features_1439_td
- 1 per database (only if in use)
-
-@features_1440_td
- test.trace.db
-
-@features_1441_td
- Trace file (if the trace option is enabled).
-
-@features_1442_td
- Contains trace information.
-
-@features_1443_td
- Format: <database>.trace.db
-
-@features_1444_td
- Renamed to <database>.trace.db.old
is too big.
-
-@features_1445_td
- 0 or 1 per database
-
-@features_1446_td
- test.lobs.db/*
-
-@features_1447_td
- Directory containing one file for each
-
-@features_1448_td
- BLOB or CLOB value larger than a certain size.
-
-@features_1449_td
- Format: <id>.t<tableId>.lob.db
-
-@features_1450_td
- 1 per large object
-
-@features_1451_td
- test.123.temp.db
-
-@features_1452_td
- Temporary file.
-
-@features_1453_td
- Contains a temporary blob or a large result set.
-
-@features_1454_td
- Format: <database>.<id>.temp.db
-
-@features_1455_td
- 1 per object
-
-@features_1456_h3
-Moving and Renaming Database Files
-
-@features_1457_p
- Database name and location are not stored inside the database files.
-
-@features_1458_p
- While a database is closed, the files can be moved to another directory, and they can be renamed as well (as long as all files of the same database start with the same name and the respective extensions are unchanged).
-
-@features_1459_p
- As there is no platform specific data in the files, they can be moved to other operating systems without problems.
-
-@features_1460_h3
-Backup
-
-@features_1461_p
- When the database is closed, it is possible to backup the database files.
-
-@features_1462_p
- To backup data while the database is running, the SQL commands SCRIPT
and BACKUP
can be used.
-
-@features_1463_h2
-Logging and Recovery
-
-@features_1464_p
- Whenever data is modified in the database and those changes are committed, the changes are written to the transaction log (except for in-memory objects). The changes to the main data area itself are usually written later on, to optimize disk access. If there is a power failure, the main data area is not up-to-date, but because the changes are in the transaction log, the next time the database is opened, the changes are re-applied automatically.
-
-@features_1465_h2
-Compatibility
-
-@features_1466_p
- All database engines behave a little bit different. Where possible, H2 supports the ANSI SQL standard, and tries to be compatible to other databases. There are still a few differences however:
-
-@features_1467_p
- In MySQL text columns are case insensitive by default, while in H2 they are case sensitive. However H2 supports case insensitive columns as well. To create the tables with case insensitive texts, append IGNORECASE=TRUE
to the database URL (example: jdbc:h2:~/test;IGNORECASE=TRUE
).
-
-@features_1468_h3
-Compatibility Modes
-
-@features_1469_p
- For certain features, this database can emulate the behavior of specific databases. However, only a small subset of the differences between databases are implemented in this way. Here is the list of currently supported modes and the differences to the regular mode:
-
-@features_1470_h3
-DB2 Compatibility Mode
-
-@features_1471_p
- To use the IBM DB2 mode, use the database URL jdbc:h2:~/test;MODE=DB2
or the SQL statement SET MODE DB2
.
-
-@features_1472_li
-For aliased columns, ResultSetMetaData.getColumnName()
returns the alias name and getTableName()
returns null
.
-
-@features_1473_li
-Support for the syntax [OFFSET .. ROW] [FETCH ... ONLY]
as an alternative for LIMIT .. OFFSET
.
-
-@features_1474_li
-Concatenating NULL
with another value results in the other value.
-
-@features_1475_li
-Support the pseudo-table SYSIBM.SYSDUMMY1.
-
-@features_1476_h3
-Derby Compatibility Mode
-
-@features_1477_p
- To use the Apache Derby mode, use the database URL jdbc:h2:~/test;MODE=Derby
or the SQL statement SET MODE Derby
.
-
-@features_1478_li
-For aliased columns, ResultSetMetaData.getColumnName()
returns the alias name and getTableName()
returns null
.
-
-@features_1479_li
-For unique indexes, NULL
is distinct. That means only one row with NULL
in one of the columns is allowed.
-
-@features_1480_li
-Concatenating NULL
with another value results in the other value.
-
-@features_1481_li
-Support the pseudo-table SYSIBM.SYSDUMMY1.
-
-@features_1482_h3
-HSQLDB Compatibility Mode
-
-@features_1483_p
- To use the HSQLDB mode, use the database URL jdbc:h2:~/test;MODE=HSQLDB
or the SQL statement SET MODE HSQLDB
.
-
-@features_1484_li
-For aliased columns, ResultSetMetaData.getColumnName()
returns the alias name and getTableName()
returns null
.
-
-@features_1485_li
-When converting the scale of decimal data, the number is only converted if the new scale is smaller than the current scale. Usually, the scale is converted and 0s are added if required.
-
-@features_1486_li
-For unique indexes, NULL
is distinct. That means only one row with NULL
in one of the columns is allowed.
-
-@features_1487_li
-Text can be concatenated using '+'.
-
-@features_1488_h3
-MS SQL Server Compatibility Mode
-
-@features_1489_p
- To use the MS SQL Server mode, use the database URL jdbc:h2:~/test;MODE=MSSQLServer
or the SQL statement SET MODE MSSQLServer
.
-
-@features_1490_li
-For aliased columns, ResultSetMetaData.getColumnName()
returns the alias name and getTableName()
returns null
.
-
-@features_1491_li
-Identifiers may be quoted using square brackets as in [Test]
.
-
-@features_1492_li
-For unique indexes, NULL
is distinct. That means only one row with NULL
in one of the columns is allowed.
-
-@features_1493_li
-Concatenating NULL
with another value results in the other value.
-
-@features_1494_li
-Text can be concatenated using '+'.
-
-@features_1495_h3
-MySQL Compatibility Mode
-
-@features_1496_p
- To use the MySQL mode, use the database URL jdbc:h2:~/test;MODE=MySQL
or the SQL statement SET MODE MySQL
.
-
-@features_1497_li
-When inserting data, if a column is defined to be NOT NULL
and NULL
is inserted, then a 0 (or empty string, or the current timestamp for timestamp columns) value is used. Usually, this operation is not allowed and an exception is thrown.
-
-@features_1498_li
-Creating indexes in the CREATE TABLE
statement is allowed using INDEX(..)
or KEY(..)
. Example: create table test(id int primary key, name varchar(255), key idx_name(name));
-
-@features_1499_li
-Meta data calls return identifiers in lower case.
-
-@features_1500_li
-When converting a floating point number to an integer, the fractional digits are not truncated, but the value is rounded.
-
-@features_1501_li
-Concatenating NULL
with another value results in the other value.
-
-@features_1502_p
- Text comparison in MySQL is case insensitive by default, while in H2 it is case sensitive (as in most other databases). H2 does support case insensitive text comparison, but it needs to be set separately, using SET IGNORECASE TRUE
. This affects comparison using =, LIKE, REGEXP
.
-
-@features_1503_h3
-Oracle Compatibility Mode
-
-@features_1504_p
- To use the Oracle mode, use the database URL jdbc:h2:~/test;MODE=Oracle
or the SQL statement SET MODE Oracle
.
-
-@features_1505_li
-For aliased columns, ResultSetMetaData.getColumnName()
returns the alias name and getTableName()
returns null
.
-
-@features_1506_li
-When using unique indexes, multiple rows with NULL
in all columns are allowed, however it is not allowed to have multiple rows with the same values otherwise.
-
-@features_1507_li
-Concatenating NULL
with another value results in the other value.
-
-@features_1508_li
-Empty strings are treated like NULL
values.
-
-@features_1509_h3
-PostgreSQL Compatibility Mode
-
-@features_1510_p
- To use the PostgreSQL mode, use the database URL jdbc:h2:~/test;MODE=PostgreSQL
or the SQL statement SET MODE PostgreSQL
.
-
-@features_1511_li
-For aliased columns, ResultSetMetaData.getColumnName()
returns the alias name and getTableName()
returns null
.
-
-@features_1512_li
-When converting a floating point number to an integer, the fractional digits are not be truncated, but the value is rounded.
-
-@features_1513_li
-The system columns CTID
and OID
are supported.
-
-@features_1514_li
-LOG(x) is base 10 in this mode.
-
-@features_1515_h2
-Auto-Reconnect
-
-@features_1516_p
- The auto-reconnect feature causes the JDBC driver to reconnect to the database if the connection is lost. The automatic re-connect only occurs when auto-commit is enabled; if auto-commit is disabled, an exception is thrown. To enable this mode, append ;AUTO_RECONNECT=TRUE
to the database URL.
-
-@features_1517_p
- Re-connecting will open a new session. After an automatic re-connect, variables and local temporary tables definitions (excluding data) are re-created. The contents of the system table INFORMATION_SCHEMA.SESSION_STATE
contains all client side state that is re-created.
-
-@features_1518_p
- If another connection uses the database in exclusive mode (enabled using SET EXCLUSIVE 1
or SET EXCLUSIVE 2
), then this connection will try to re-connect until the exclusive mode ends.
-
-@features_1519_h2
-Automatic Mixed Mode
-
-@features_1520_p
- Multiple processes can access the same database without having to start the server manually. To do that, append ;AUTO_SERVER=TRUE
to the database URL. You can use the same database URL independent of whether the database is already open or not. This feature doesn't work with in-memory databases. Example database URL:
-
-@features_1521_p
- Use the same URL for all connections to this database. Internally, when using this mode, the first connection to the database is made in embedded mode, and additionally a server is started internally (as a daemon thread). If the database is already open in another process, the server mode is used automatically. The IP address and port of the server are stored in the file .lock.db
, that's why in-memory databases can't be supported.
-
-@features_1522_p
- The application that opens the first connection to the database uses the embedded mode, which is faster than the server mode. Therefore the main application should open the database first if possible. The first connection automatically starts a server on a random port. This server allows remote connections, however only to this database (to ensure that, the client reads .lock.db
file and sends the the random key that is stored there to the server). When the first connection is closed, the server stops. If other (remote) connections are still open, one of them will then start a server (auto-reconnect is enabled automatically).
-
-@features_1523_p
- All processes need to have access to the database files. If the first connection is closed (the connection that started the server), open transactions of other connections will be rolled back (this may not be a problem if you don't disable autocommit). Explicit client/server connections (using jdbc:h2:tcp://
or ssl://
) are not supported. This mode is not supported for in-memory databases.
-
-@features_1524_p
- Here is an example how to use this mode. Application 1 and 2 are not necessarily started on the same computer, but they need to have access to the database files. Application 1 and 2 are typically two different processes (however they could run within the same process).
-
-@features_1525_p
- When using this feature, by default the server uses any free TCP port. The port can be set manually using AUTO_SERVER_PORT=9090
.
-
-@features_1526_h2
-Page Size
-
-@features_1527_p
- The page size for new databases is 2 KB (2048), unless the page size is set explicitly in the database URL using PAGE_SIZE=
when the database is created. The page size of existing databases can not be changed, so this property needs to be set when the database is created.
-
-@features_1528_h2
-Using the Trace Options
-
-@features_1529_p
- To find problems in an application, it is sometimes good to see what database operations where executed. This database offers the following trace features:
-
-@features_1530_li
-Trace to System.out
and/or to a file
-
-@features_1531_li
-Support for trace levels OFF, ERROR, INFO, DEBUG
-
-@features_1532_li
-The maximum size of the trace file can be set
-
-@features_1533_li
-It is possible to generate Java source code from the trace file
-
-@features_1534_li
-Trace can be enabled at runtime by manually creating a file
-
-@features_1535_h3
-Trace Options
-
-@features_1536_p
- The simplest way to enable the trace option is setting it in the database URL. There are two settings, one for System.out
(TRACE_LEVEL_SYSTEM_OUT
) tracing, and one for file tracing (TRACE_LEVEL_FILE
). The trace levels are 0 for OFF
, 1 for ERROR
(the default), 2 for INFO
, and 3 for DEBUG
. A database URL with both levels set to DEBUG
is:
-
-@features_1537_p
- The trace level can be changed at runtime by executing the SQL command SET TRACE_LEVEL_SYSTEM_OUT level
(for System.out
tracing) or SET TRACE_LEVEL_FILE level
(for file tracing). Example:
-
-@features_1538_h3
-Setting the Maximum Size of the Trace File
-
-@features_1539_p
- When using a high trace level, the trace file can get very big quickly. The default size limit is 16 MB, if the trace file exceeds this limit, it is renamed to .old
and a new file is created. If another such file exists, it is deleted. To limit the size to a certain number of megabytes, use SET TRACE_MAX_FILE_SIZE mb
. Example:
-
-@features_1540_h3
-Java Code Generation
-
-@features_1541_p
- When setting the trace level to INFO
or DEBUG
, Java source code is generated as well. This simplifies reproducing problems. The trace file looks like this:
-
-@features_1542_p
- To filter the Java source code, use the ConvertTraceFile
tool as follows:
-
-@features_1543_p
- The generated file Test.java
will contain the Java source code. The generated source code may be too large to compile (the size of a Java method is limited). If this is the case, the source code needs to be split in multiple methods. The password is not listed in the trace file and therefore not included in the source code.
-
-@features_1544_h2
-Using Other Logging APIs
-
-@features_1545_p
- By default, this database uses its own native 'trace' facility. This facility is called 'trace' and not 'log' within this database to avoid confusion with the transaction log. Trace messages can be written to both file and System.out
. In most cases, this is sufficient, however sometimes it is better to use the same facility as the application, for example Log4j. To do that, this database support SLF4J.
-
-@features_1546_a
-SLF4J
-
-@features_1547_p
- is a simple facade for various logging APIs and allows to plug in the desired implementation at deployment time. SLF4J supports implementations such as Logback, Log4j, Jakarta Commons Logging (JCL), Java logging, x4juli, and Simple Log.
-
-@features_1548_p
- To enable SLF4J, set the file trace level to 4 in the database URL:
-
-@features_1549_p
- Changing the log mechanism is not possible after the database is open, that means executing the SQL statement SET TRACE_LEVEL_FILE 4
when the database is already open will not have the desired effect. To use SLF4J, all required jar files need to be in the classpath. The logger name is h2database
. If it does not work, check the file <database>.trace.db
for error messages.
-
-@features_1550_h2
-Read Only Databases
-
-@features_1551_p
- If the database files are read-only, then the database is read-only as well. It is not possible to create new tables, add or modify data in this database. Only SELECT
and CALL
statements are allowed. To create a read-only database, close the database. Then, make the database file read-only. When you open the database now, it is read-only. There are two ways an application can find out whether database is read-only: by calling Connection.isReadOnly()
or by executing the SQL statement CALL READONLY()
.
-
-@features_1552_p
- Using the Custom Access Mode r
the database can also be opened in read-only mode, even if the database file is not read only.
-
-@features_1553_h2
-Read Only Databases in Zip or Jar File
-
-@features_1554_p
- To create a read-only database in a zip file, first create a regular persistent database, and then create a backup. The database must not have pending changes, that means you need to close all connections to the database first. To speed up opening the read-only database and running queries, the database should be closed using SHUTDOWN DEFRAG
. If you are using a database named test
, an easy way to create a zip file is using the Backup
tool. You can start the tool from the command line, or from within the H2 Console (Tools - Backup). Please note that the database must be closed when the backup is created. Therefore, the SQL statement BACKUP TO
can not be used.
-
-@features_1555_p
- When the zip file is created, you can open the database in the zip file using the following database URL:
-
-@features_1556_p
- Databases in zip files are read-only. The performance for some queries will be slower than when using a regular database, because random access in zip files is not supported (only streaming). How much this affects the performance depends on the queries and the data. The database is not read in memory; therefore large databases are supported as well. The same indexes are used as when using a regular database.
-
-@features_1557_p
- If the database is larger than a few megabytes, performance is much better if the database file is split into multiple smaller files, because random access in compressed files is not possible. See also the sample application ReadOnlyDatabaseInZip.
-
-@features_1558_h3
-Opening a Corrupted Database
-
-@features_1559_p
- If a database cannot be opened because the boot info (the SQL script that is run at startup) is corrupted, then the database can be opened by specifying a database event listener. The exceptions are logged, but opening the database will continue.
-
-@features_1560_h2
-Computed Columns / Function Based Index
-
-@features_1561_p
- A computed column is a column whose value is calculated before storing. The formula is evaluated when the row is inserted, and re-evaluated every time the row is updated. One use case is to automatically update the last-modification time:
-
-@features_1562_p
- Function indexes are not directly supported by this database, but they can be emulated by using computed columns. For example, if an index on the upper-case version of a column is required, create a computed column with the upper-case version of the original column, and create an index for this column:
-
-@features_1563_p
- When inserting data, it is not required (and not allowed) to specify a value for the upper-case version of the column, because the value is generated. But you can use the column when querying the table:
-
-@features_1564_h2
-Multi-Dimensional Indexes
-
-@features_1565_p
- A tool is provided to execute efficient multi-dimension (spatial) range queries. This database does not support a specialized spatial index (R-Tree or similar). Instead, the B-Tree index is used. For each record, the multi-dimensional key is converted (mapped) to a single dimensional (scalar) value. This value specifies the location on a space-filling curve.
-
-@features_1566_p
- Currently, Z-order (also called N-order or Morton-order) is used; Hilbert curve could also be used, but the implementation is more complex. The algorithm to convert the multi-dimensional value is called bit-interleaving. The scalar value is indexed using a B-Tree index (usually using a computed column).
-
-@features_1567_p
- The method can result in a drastic performance improvement over just using an index on the first column. Depending on the data and number of dimensions, the improvement is usually higher than factor 5. The tool generates a SQL query from a specified multi-dimensional range. The method used is not database dependent, and the tool can easily be ported to other databases. For an example how to use the tool, please have a look at the sample code provided in TestMultiDimension.java
.
-
-@features_1568_h2
-User-Defined Functions and Stored Procedures
-
-@features_1569_p
- In addition to the built-in functions, this database supports user-defined Java functions. In this database, Java functions can be used as stored procedures as well. A function must be declared (registered) before it can be used. A function can be defined using source code, or as a reference to a compiled class that is available in the classpath. By default, the function aliases are stored in the current schema.
-
-@features_1570_h3
-Referencing a Compiled Method
-
-@features_1571_p
- When referencing a method, the class must already be compiled and included in the classpath where the database is running. Only static Java methods are supported; both the class and the method must be public. Example Java class:
-
-@features_1572_p
- The Java function must be registered in the database by calling CREATE ALIAS ... FOR
:
-
-@features_1573_p
- For a complete sample application, see src/test/org/h2/samples/Function.java
.
-
-@features_1574_h3
-Declaring Functions as Source Code
-
-@features_1575_p
- When defining a function alias with source code, the database tries to compile the source code using the Sun Java compiler (the class com.sun.tools.javac.Main
) if the tools.jar
is in the classpath. If not, javac
is run as a separate process. Only the source code is stored in the database; the class is compiled each time the database is re-opened. Source code is usually passed as dollar quoted text to avoid escaping problems, however single quotes can be used as well. Example:
-
-@features_1576_p
- By default, the three packages java.util, java.math, java.sql
are imported. The method name (nextPrime
in the example above) is ignored. Method overloading is not supported when declaring functions as source code, that means only one method may be declared for an alias. If different import statements are required, they must be declared at the beginning and separated with the tag @CODE
:
-
-@features_1577_p
- The following template is used to create a complete Java class:
-
-@features_1578_h3
-Method Overloading
-
-@features_1579_p
- Multiple methods may be bound to a SQL function if the class is already compiled and included in the classpath. Each Java method must have a different number of arguments. Method overloading is not supported when declaring functions as source code.
-
-@features_1580_h3
-Function Data Type Mapping
-
-@features_1581_p
- Functions that accept non-nullable parameters such as int
will not be called if one of those parameters is NULL
. Instead, the result of the function is NULL
. If the function should be called if a parameter is NULL
, you need to use java.lang.Integer
instead.
-
-@features_1582_p
- SQL types are mapped to Java classes and vice-versa as in the JDBC API. For details, see Data Types. There are a few special cases: java.lang.Object
is mapped to OTHER
(a serialized object). Therefore, java.lang.Object
can not be used to match all SQL types (matching all SQL types is not supported). The second special case is Object[]
: arrays of any class are mapped to ARRAY
. Objects of type org.h2.value.Value
(the internal value class) are passed through without conversion.
-
-@features_1583_h3
-Functions That Require a Connection
-
-@features_1584_p
- If the first parameter of a Java function is a java.sql.Connection
, then the connection to database is provided. This connection does not need to be closed before returning. When calling the method from within the SQL statement, this connection parameter does not need to be (can not be) specified.
-
-@features_1585_h3
-Functions Throwing an Exception
-
-@features_1586_p
- If a function throws an exception, then the current statement is rolled back and the exception is thrown to the application. SQLException are directly re-thrown to the calling application; all other exceptions are first converted to a SQLException.
-
-@features_1587_h3
-Functions Returning a Result Set
-
-@features_1588_p
- Functions may returns a result set. Such a function can be called with the CALL
statement:
-
-@features_1589_h3
-Using SimpleResultSet
-
-@features_1590_p
- A function can create a result set using the SimpleResultSet
tool:
-
-@features_1591_h3
-Using a Function as a Table
-
-@features_1592_p
- A function that returns a result set can be used like a table. However, in this case the function is called at least twice: first while parsing the statement to collect the column names (with parameters set to null
where not known at compile time). And then, while executing the statement to get the data (maybe multiple times if this is a join). If the function is called just to get the column list, the URL of the connection passed to the function is jdbc:columnlist:connection
. Otherwise, the URL of the connection is jdbc:default:connection
.
-
-@features_1593_h2
-Pluggable or User-Defined Tables
-
-@features_1594_p
- For situations where you need to expose other data-sources to the SQL engine as a table, there are "pluggable tables". For some examples, have a look at the code in org.h2.test.db.TestTableEngines
.
-
-@features_1595_p
- In order to create your own TableEngine, you need to implement the org.h2.api.TableEngine
interface e.g. something like this:
-
-@features_1596_p
- and then create the table from SQL like this:
-
-@features_1597_p
- It is also possible to pass in parameters to the table engine, like so:
-
-@features_1598_p
- In which case the parameters are passed down in the tableEngineParams field of the CreateTableData object.
-
-@features_1599_h2
-Triggers
-
-@features_1600_p
- This database supports Java triggers that are called before or after a row is updated, inserted or deleted. Triggers can be used for complex consistency checks, or to update related data in the database. It is also possible to use triggers to simulate materialized views. For a complete sample application, see src/test/org/h2/samples/TriggerSample.java
. A Java trigger must implement the interface org.h2.api.Trigger
. The trigger class must be available in the classpath of the database engine (when using the server mode, it must be in the classpath of the server).
-
-@features_1601_p
- The connection can be used to query or update data in other tables. The trigger then needs to be defined in the database:
-
-@features_1602_p
- The trigger can be used to veto a change by throwing a SQLException
.
-
-@features_1603_p
- As an alternative to implementing the Trigger
interface, an application can extend the abstract class org.h2.tools.TriggerAdapter
. This will allows to use the ResultSet
interface within trigger implementations. In this case, only the fire
method needs to be implemented:
-
-@features_1604_h2
-Compacting a Database
-
-@features_1605_p
- Empty space in the database file re-used automatically. When closing the database, the database is automatically compacted for up to 200 milliseconds by default. To compact more, use the SQL statement SHUTDOWN COMPACT. However re-creating the database may further reduce the database size because this will re-build the indexes. Here is a sample function to do this:
-
-@features_1606_p
- See also the sample application org.h2.samples.Compact
. The commands SCRIPT / RUNSCRIPT
can be used as well to create a backup of a database and re-build the database from the script.
-
-@features_1607_h2
-Cache Settings
-
-@features_1608_p
- The database keeps most frequently used data in the main memory. The amount of memory used for caching can be changed using the setting CACHE_SIZE
. This setting can be set in the database connection URL (jdbc:h2:~/test;CACHE_SIZE=131072
), or it can be changed at runtime using SET CACHE_SIZE size
. The size of the cache, as represented by CACHE_SIZE
is measured in KB, with each KB being 1024 bytes. This setting has no effect for in-memory databases. For persistent databases, the setting is stored in the database and re-used when the database is opened the next time. However, when opening an existing database, the cache size is set to at most half the amount of memory available for the virtual machine (Runtime.getRuntime().maxMemory()), even if the cache size setting stored in the database is larger; however the setting stored in the database is kept. Setting the cache size in the database URL or explicitly using SET CACHE_SIZE
overrides this value (even if larger than the physical memory). To get the current used maximum cache size, use the query SELECT * FROM INFORMATION_SCHEMA.SETTINGS WHERE NAME = 'info.CACHE_MAX_SIZE'
-
-@features_1609_p
- An experimental scan-resistant cache algorithm "Two Queue" (2Q) is available. To enable it, append ;CACHE_TYPE=TQ
to the database URL. The cache might not actually improve performance. If you plan to use it, please run your own test cases first.
-
-@features_1610_p
- Also included is an experimental second level soft reference cache. Rows in this cache are only garbage collected on low memory. By default the second level cache is disabled. To enable it, use the prefix SOFT_
. Example: jdbc:h2:~/test;CACHE_TYPE=SOFT_LRU
. The cache might not actually improve performance. If you plan to use it, please run your own test cases first.
-
-@features_1611_p
- To get information about page reads and writes, and the current caching algorithm in use, call SELECT * FROM INFORMATION_SCHEMA.SETTINGS
. The number of pages read / written is listed.
-
-@fragments_1000_div
- ▲
-
-@fragments_1001_label
-Search:
-
-@fragments_1002_label
-Highlight keyword(s)
-
-@fragments_1003_a
-Home
-
-@fragments_1004_a
-Download
-
-@fragments_1005_a
-Cheat Sheet
-
-@fragments_1006_b
-Documentation
-
-@fragments_1007_a
-Quickstart
-
-@fragments_1008_a
-Installation
-
-@fragments_1009_a
-Tutorial
-
-@fragments_1010_a
-Features
-
-@fragments_1011_a
-Performance
-
-@fragments_1012_a
-Advanced
-
-@fragments_1013_b
-Reference
-
-@fragments_1014_a
-SQL Grammar
-
-@fragments_1015_a
-Functions
-
-@fragments_1016_a
-Data Types
-
-@fragments_1017_a
-Javadoc
-
-@fragments_1018_a
-PDF (1 MB)
-
-@fragments_1019_b
-Support
-
-@fragments_1020_a
-FAQ
-
-@fragments_1021_a
-Error Analyzer
-
-@fragments_1022_a
-Google Group (English)
-
-@fragments_1023_a
-Google Group (Japanese)
-
-@fragments_1024_a
-Google Group (Chinese)
-
-@fragments_1025_b
-Appendix
-
-@fragments_1026_a
-History & Roadmap
-
-@fragments_1027_a
-License
-
-@fragments_1028_a
-Build
-
-@fragments_1029_a
-Links
-
-@fragments_1030_a
-JaQu
-
-@fragments_1031_a
-MVStore
-
-@fragments_1032_a
-Architecture
-
-@fragments_1033_td
-
-
-@frame_1000_h1
-H2 Database Engine
-
-@frame_1001_p
- Welcome to H2, the free SQL database. The main feature of H2 are:
-
-@frame_1002_li
-It is free to use for everybody, source code is included
-
-@frame_1003_li
-Written in Java, but also available as native executable
-
-@frame_1004_li
-JDBC and (partial) ODBC API
-
-@frame_1005_li
-Embedded and client/server modes
-
-@frame_1006_li
-Clustering is supported
-
-@frame_1007_li
-A web client is included
-
-@frame_1008_h2
-No Javascript
-
-@frame_1009_p
- If you are not automatically redirected to the main page, then Javascript is currently disabled or your browser does not support Javascript. Some features (for example the integrated search) require Javascript.
-
-@frame_1010_p
- Please enable Javascript, or go ahead without it: H2 Database Engine
-
-@history_1000_h1
-History and Roadmap
-
-@history_1001_a
- Change Log
-
-@history_1002_a
- Roadmap
-
-@history_1003_a
- History of this Database Engine
-
-@history_1004_a
- Why Java
-
-@history_1005_a
- Supporters
-
-@history_1006_h2
-Change Log
-
-@history_1007_p
- The up-to-date change log is available at http://www.h2database.com/html/changelog.html
-
-@history_1008_h2
-Roadmap
-
-@history_1009_p
- The current roadmap is available at http://www.h2database.com/html/roadmap.html
-
-@history_1010_h2
-History of this Database Engine
-
-@history_1011_p
- The development of H2 was started in May 2004, but it was first published on December 14th 2005. The main author of H2, Thomas Mueller, is also the original developer of Hypersonic SQL. In 2001, he joined PointBase Inc. where he wrote PointBase Micro, a commercial Java SQL database. At that point, he had to discontinue Hypersonic SQL. The HSQLDB Group was formed to continued to work on the Hypersonic SQL codebase. The name H2 stands for Hypersonic 2, however H2 does not share code with Hypersonic SQL or HSQLDB. H2 is built from scratch.
-
-@history_1012_h2
-Why Java
-
-@history_1013_p
- The main reasons to use a Java database are:
-
-@history_1014_li
-Very simple to integrate in Java applications
-
-@history_1015_li
-Support for many different platforms
-
-@history_1016_li
-More secure than native applications (no buffer overflows)
-
-@history_1017_li
-User defined functions (or triggers) run very fast
-
-@history_1018_li
-Unicode support
-
-@history_1019_p
- Some think Java is too slow for low level operations, but this is no longer true. Garbage collection for example is now faster than manual memory management.
-
-@history_1020_p
- Developing Java code is faster than developing C or C++ code. When using Java, most time can be spent on improving the algorithms instead of porting the code to different platforms or doing memory management. Features such as Unicode and network libraries are already built-in. In Java, writing secure code is easier because buffer overflows can not occur. Features such as reflection can be used for randomized testing.
-
-@history_1021_p
- Java is future proof: a lot of companies support Java. Java is now open source.
-
-@history_1022_p
- To increase the portability and ease of use, this software depends on very few libraries. Features that are not available in open source Java implementations (such as Swing) are not used, or only used for optional features.
-
-@history_1023_h2
-Supporters
-
-@history_1024_p
- Many thanks for those who reported bugs, gave valuable feedback, spread the word, and translated this project. Also many thanks to the donors. To become a donor, use PayPal (at the very bottom of the main web page).
-
-@history_1025_a
-xso; xBase Software Ontwikkeling, Netherlands
-
-@history_1026_a
-Cognitect, USA
-
-@history_1027_a
-Code 42 Software, Inc., Minneapolis
-
-@history_1028_li
-Martin Wildam, Austria
-
-@history_1029_a
-Code Lutin, France
-
-@history_1030_a
-NetSuxxess GmbH, Germany
-
-@history_1031_a
-Poker Copilot, Steve McLeod, Germany
-
-@history_1032_a
-SkyCash, Poland
-
-@history_1033_a
-Lumber-mill, Inc., Japan
-
-@history_1034_a
-StockMarketEye, USA
-
-@history_1035_a
-Eckenfelder GmbH & Co.KG, Germany
-
-@history_1036_li
-Anthony Goubard, Netherlands
-
-@history_1037_li
-Richard Hickey, USA
-
-@history_1038_li
-Alessio Jacopo D'Adamo, Italy
-
-@history_1039_li
-Ashwin Jayaprakash, USA
-
-@history_1040_li
-Donald Bleyl, USA
-
-@history_1041_li
-Frank Berger, Germany
-
-@history_1042_li
-Florent Ramiere, France
-
-@history_1043_li
-Jun Iyama, Japan
-
-@history_1044_li
-Antonio Casqueiro, Portugal
-
-@history_1045_li
-Oliver Computing LLC, USA
-
-@history_1046_li
-Harpal Grover Consulting Inc., USA
-
-@history_1047_li
-Elisabetta Berlini, Italy
-
-@history_1048_li
-William Gilbert, USA
-
-@history_1049_li
-Antonio Dieguez Rojas, Chile
-
-@history_1050_a
-Ontology Works, USA
-
-@history_1051_li
-Pete Haidinyak, USA
-
-@history_1052_li
-William Osmond, USA
-
-@history_1053_li
-Joachim Ansorg, Germany
-
-@history_1054_li
-Oliver Soerensen, Germany
-
-@history_1055_li
-Christos Vasilakis, Greece
-
-@history_1056_li
-Fyodor Kupolov, Denmark
-
-@history_1057_li
-Jakob Jenkov, Denmark
-
-@history_1058_li
-Stéphane Chartrand, Switzerland
-
-@history_1059_li
-Glenn Kidd, USA
-
-@history_1060_li
-Gustav Trede, Sweden
-
-@history_1061_li
-Joonas Pulakka, Finland
-
-@history_1062_li
-Bjorn Darri Sigurdsson, Iceland
-
-@history_1063_li
-Iyama Jun, Japan
-
-@history_1064_li
-Gray Watson, USA
-
-@history_1065_li
-Erik Dick, Germany
-
-@history_1066_li
-Pengxiang Shao, China
-
-@history_1067_li
-Bilingual Marketing Group, USA
-
-@history_1068_li
-Philippe Marschall, Switzerland
-
-@history_1069_li
-Knut Staring, Norway
-
-@history_1070_li
-Theis Borg, Denmark
-
-@history_1071_li
-Mark De Mendonca Duske, USA
-
-@history_1072_li
-Joel A. Garringer, USA
-
-@history_1073_li
-Olivier Chafik, France
-
-@history_1074_li
-Rene Schwietzke, Germany
-
-@history_1075_li
-Jalpesh Patadia, USA
-
-@history_1076_li
-Takanori Kawashima, Japan
-
-@history_1077_li
-Terrence JC Huang, China
-
-@history_1078_a
-JiaDong Huang, Australia
-
-@history_1079_li
-Laurent van Roy, Belgium
-
-@history_1080_li
-Qian Chen, China
-
-@history_1081_li
-Clinton Hyde, USA
-
-@history_1082_li
-Kritchai Phromros, Thailand
-
-@history_1083_li
-Alan Thompson, USA
-
-@history_1084_li
-Ladislav Jech, Czech Republic
-
-@history_1085_li
-Dimitrijs Fedotovs, Latvia
-
-@history_1086_li
-Richard Manley-Reeve, United Kingdom
-
-@installation_1000_h1
-Installation
-
-@installation_1001_a
- Requirements
-
-@installation_1002_a
- Supported Platforms
-
-@installation_1003_a
- Installing the Software
-
-@installation_1004_a
- Directory Structure
-
-@installation_1005_h2
-Requirements
-
-@installation_1006_p
- To run this database, the following software stack is known to work. Other software most likely also works, but is not tested as much.
-
-@installation_1007_h3
-Database Engine
-
-@installation_1008_li
-Windows XP or Vista, Mac OS X, or Linux
-
-@installation_1009_li
-Sun Java 6 or newer
-
-@installation_1010_li
-Recommended Windows file system: NTFS (FAT32 only supports files up to 4 GB)
-
-@installation_1011_h3
-H2 Console
-
-@installation_1012_li
-Mozilla Firefox
-
-@installation_1013_h2
-Supported Platforms
-
-@installation_1014_p
- As this database is written in Java, it can run on many different platforms. It is tested with Java 6 and 7. Currently, the database is developed and tested on Windows 8 and Mac OS X using Java 6, but it also works in many other operating systems and using other Java runtime environments. All major operating systems (Windows XP, Windows Vista, Windows 7, Mac OS, Ubuntu,...) are supported.
-
-@installation_1015_h2
-Installing the Software
-
-@installation_1016_p
- To install the software, run the installer or unzip it to a directory of your choice.
-
-@installation_1017_h2
-Directory Structure
-
-@installation_1018_p
- After installing, you should get the following directory structure:
-
-@installation_1019_th
-Directory
-
-@installation_1020_th
-Contents
-
-@installation_1021_td
-bin
-
-@installation_1022_td
-JAR and batch files
-
-@installation_1023_td
-docs
-
-@installation_1024_td
-Documentation
-
-@installation_1025_td
-docs/html
-
-@installation_1026_td
-HTML pages
-
-@installation_1027_td
-docs/javadoc
-
-@installation_1028_td
-Javadoc files
-
-@installation_1029_td
-ext
-
-@installation_1030_td
-External dependencies (downloaded when building)
-
-@installation_1031_td
-service
-
-@installation_1032_td
-Tools to run the database as a Windows Service
-
-@installation_1033_td
-src
-
-@installation_1034_td
-Source files
-
-@installation_1035_td
-src/docsrc
-
-@installation_1036_td
-Documentation sources
-
-@installation_1037_td
-src/installer
-
-@installation_1038_td
-Installer, shell, and release build script
-
-@installation_1039_td
-src/main
-
-@installation_1040_td
-Database engine source code
-
-@installation_1041_td
-src/test
-
-@installation_1042_td
-Test source code
-
-@installation_1043_td
-src/tools
-
-@installation_1044_td
-Tools and database adapters source code
-
-@jaqu_1000_h1
-JaQu
-
-@jaqu_1001_a
- What is JaQu
-
-@jaqu_1002_a
- Differences to Other Data Access Tools
-
-@jaqu_1003_a
- Current State
-
-@jaqu_1004_a
- Building the JaQu Library
-
-@jaqu_1005_a
- Requirements
-
-@jaqu_1006_a
- Example Code
-
-@jaqu_1007_a
- Configuration
-
-@jaqu_1008_a
- Natural Syntax
-
-@jaqu_1009_a
- Other Ideas
-
-@jaqu_1010_a
- Similar Projects
-
-@jaqu_1011_h2
-What is JaQu
-
-@jaqu_1012_p
- Note: This project is currently in maintenance mode. A friendly fork of JaQu is available under the name iciql.
-
-@jaqu_1013_p
- JaQu stands for Java Query and allows to access databases using pure Java. JaQu provides a fluent interface (or internal DSL). JaQu is something like LINQ for Java (LINQ stands for "language integrated query" and is a Microsoft .NET technology). The following JaQu code:
-
-@jaqu_1014_p
- stands for the SQL statement:
-
-@jaqu_1015_h2
-Differences to Other Data Access Tools
-
-@jaqu_1016_p
- Unlike SQL, JaQu can be easily integrated in Java applications. Because JaQu is pure Java, auto-complete in the IDE is supported. Type checking is performed by the compiler. JaQu fully protects against SQL injection.
-
-@jaqu_1017_p
- JaQu is meant as replacement for JDBC and SQL and not as much as a replacement for tools like Hibernate. With JaQu, you don't write SQL statements as strings. JaQu is much smaller and simpler than other persistence frameworks such as Hibernate, but it also does not provide all the features of those. Unlike iBatis and Hibernate, no XML or annotation based configuration is required; instead the configuration (if required at all) is done in pure Java, within the application.
-
-@jaqu_1018_p
- JaQu does not require or contain any data caching mechanism. Like JDBC and iBatis, JaQu provides full control over when and what SQL statements are executed (but without having to write SQL statements as strings).
-
-@jaqu_1019_h3
-Restrictions
-
-@jaqu_1020_p
- Primitive types (eg. boolean, int, long, double
) are not supported. Use java.lang.Boolean, Integer, Long, Double
instead.
-
-@jaqu_1021_h3
-Why in Java?
-
-@jaqu_1022_p
- Most applications are written in Java. Mixing Java and another language (for example Scala or Groovy) in the same application is complicated: you would need to split the application and database code, and write adapter / wrapper code.
-
-@jaqu_1023_h2
-Current State
-
-@jaqu_1024_p
- Currently, JaQu is only tested with the H2 database. The API may change in future versions. JaQu is not part of the h2 jar file, however the source code is included in H2, under:
-
-@jaqu_1025_code
-src/test/org/h2/test/jaqu/*
-
-@jaqu_1026_li
- (samples and tests)
-
-@jaqu_1027_code
-src/tools/org/h2/jaqu/*
-
-@jaqu_1028_li
- (framework)
-
-@jaqu_1029_h2
-Building the JaQu Library
-
-@jaqu_1030_p
- To create the JaQu jar file, run: build jarJaqu
. This will create the file bin/h2jaqu.jar
.
-
-@jaqu_1031_h2
-Requirements
-
-@jaqu_1032_p
- JaQu requires Java 6. Annotations are not need. Currently, JaQu is only tested with the H2 database engine, however in theory it should work with any database that supports the JDBC API.
-
-@jaqu_1033_h2
-Example Code
-
-@jaqu_1034_h2
-Configuration
-
-@jaqu_1035_p
- JaQu does not require any configuration when using the default field to column mapping. To define table indices, or if you want to map a class to a table with a different name, or a field to a column with another name, create a function called define
in the data class. Example:
-
-@jaqu_1036_p
- The method define()
contains the mapping definition. It is called once when the class is used for the first time. Like annotations, the mapping is defined in the class itself. Unlike when using annotations, the compiler can check the syntax even for multi-column objects (multi-column indexes, multi-column primary keys and so on). Because the definition is written in Java, the configuration can be set at runtime, which is not possible using annotations. Unlike XML mapping configuration, the configuration is integrated in the class itself.
-
-@jaqu_1037_h2
-Natural Syntax
-
-@jaqu_1038_p
-The plan is to support more natural (pure Java) syntax in conditions. To do that, the condition class is de-compiled to a SQL condition. A proof of concept decompiler is included (but it doesn't fully work yet; patches are welcome). The planned syntax is:
-
-@jaqu_1039_h2
-Other Ideas
-
-@jaqu_1040_p
- This project has just been started, and nothing is fixed yet. Some ideas are:
-
-@jaqu_1041_li
-Support queries on collections (instead of using a database).
-
-@jaqu_1042_li
-Provide API level compatibility with JPA (so that JaQu can be used as an extension of JPA).
-
-@jaqu_1043_li
-Internally use a JPA implementation (for example Hibernate) instead of SQL directly.
-
-@jaqu_1044_li
-Use PreparedStatements and cache them.
-
-@jaqu_1045_h2
-Similar Projects
-
-@jaqu_1046_a
-iciql (a friendly fork of JaQu)
-
-@jaqu_1047_a
-Cement Framework
-
-@jaqu_1048_a
-Dreamsource ORM
-
-@jaqu_1049_a
-Empire-db
-
-@jaqu_1050_a
-JEQUEL: Java Embedded QUEry Language
-
-@jaqu_1051_a
-Joist
-
-@jaqu_1052_a
-jOOQ
-
-@jaqu_1053_a
-JoSQL
-
-@jaqu_1054_a
-LIQUidFORM
-
-@jaqu_1055_a
-Quaere (Alias implementation)
-
-@jaqu_1056_a
-Quaere
-
-@jaqu_1057_a
-Querydsl
-
-@jaqu_1058_a
-Squill
-
-@license_1000_h1
-License
-
-@license_1001_a
- Summary and License FAQ
-
-@license_1002_a
- Mozilla Public License Version 2.0
-
-@license_1003_a
- Eclipse Public License - Version 1.0
-
-@license_1004_a
- Export Control Classification Number (ECCN)
-
-@license_1005_h2
-Summary and License FAQ
-
-@license_1006_p
- H2 is dual licensed and available under the MPL 2.0 (Mozilla Public License Version 2.0) or under the EPL 1.0 (Eclipse Public License). There is a license FAQ for both the MPL and the EPL.
-
-@license_1007_li
-You can use H2 for free.
-
-@license_1008_li
-You can integrate it into your applications (including in commercial applications) and distribute it.
-
-@license_1009_li
-Files containing only your code are not covered by this license (it is 'commercial friendly').
-
-@license_1010_li
-Modifications to the H2 source code must be published.
-
-@license_1011_li
-You don't need to provide the source code of H2 if you did not modify anything.
-
-@license_1012_li
-If you distribute a binary that includes H2, you need to add a disclaimer of liability - see the example below.
-
-@license_1013_p
- However, nobody is allowed to rename H2, modify it a little, and sell it as a database engine without telling the customers it is in fact H2. This happened to HSQLDB: a company called 'bungisoft' copied HSQLDB, renamed it to 'RedBase', and tried to sell it, hiding the fact that it was in fact just HSQLDB. It seems 'bungisoft' does not exist any more, but you can use the Wayback Machine and visit old web pages of http://www.bungisoft.com
.
-
-@license_1014_p
- About porting the source code to another language (for example C# or C++): converted source code (even if done manually) stays under the same copyright and license as the original code. The copyright of the ported source code does not (automatically) go to the person who ported the code.
-
-@license_1015_p
- If you distribute a binary that includes H2, you need to add the license and a disclaimer of liability (as you should do for your own code). You should add a disclaimer for each open source library you use. For example, add a file 3rdparty_license.txt
in the directory where the jar files are, and list all open source libraries, each one with its license and disclaimer. For H2, a simple solution is to copy the following text below. You may also include a copy of the complete license.
-
-@license_1016_h2
-Mozilla Public License Version 2.0
-
-@license_1017_h3
-1. Definitions
-
-@license_1018_p
-1.1. "Contributor" means each individual or legal entity that creates, contributes to the creation of, or owns Covered Software.
-
-@license_1019_p
-1.2. "Contributor Version" means the combination of the Contributions of others (if any) used by a Contributor and that particular Contributor's Contribution.
-
-@license_1020_p
-1.3. "Contribution" means Covered Software of a particular Contributor.
-
-@license_1021_p
-1.4. "Covered Software" means Source Code Form to which the initial Contributor has attached the notice in Exhibit A, the Executable Form of such Source Code Form, and Modifications of such Source Code Form, in each case including portions thereof.
-
-@license_1022_p
-1.5. "Incompatible With Secondary Licenses" means
-
-@license_1023_p
-a. that the initial Contributor has attached the notice described in Exhibit B to the Covered Software; or
-
-@license_1024_p
-b. that the Covered Software was made available under the terms of version 1.1 or earlier of the License, but not also under the terms of a Secondary License.
-
-@license_1025_p
-1.6. "Executable Form" means any form of the work other than Source Code Form.
-
-@license_1026_p
-1.7. "Larger Work" means a work that combines Covered Software with other material, in a separate file or files, that is not Covered Software.
-
-@license_1027_p
-1.8. "License" means this document.
-
-@license_1028_p
-1.9. "Licensable" means having the right to grant, to the maximum extent possible, whether at the time of the initial grant or subsequently, any and all of the rights conveyed by this License.
-
-@license_1029_p
-1.10. "Modifications" means any of the following:
-
-@license_1030_p
-a. any file in Source Code Form that results from an addition to, deletion from, or modification of the contents of Covered Software; or
-
-@license_1031_p
-b. any new file in Source Code Form that contains any Covered Software.
-
-@license_1032_p
-1.11. "Patent Claims" of a Contributor means any patent claim(s), including without limitation, method, process, and apparatus claims, in any patent Licensable by such Contributor that would be infringed, but for the grant of the License, by the making, using, selling, offering for sale, having made, import, or transfer of either its Contributions or its Contributor Version.
-
-@license_1033_p
-1.12. "Secondary License" means either the GNU General Public License, Version 2.0, the GNU Lesser General Public License, Version 2.1, the GNU Affero General Public License, Version 3.0, or any later versions of those licenses.
-
-@license_1034_p
-1.13. "Source Code Form" means the form of the work preferred for making modifications.
-
-@license_1035_p
-1.14. "You" (or "Your") means an individual or a legal entity exercising rights under this License. For legal entities, "You" includes any entity that controls, is controlled by, or is under common control with You. For purposes of this definition, "control" means (a) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (b) ownership of more than fifty percent (50%) of the outstanding shares or beneficial ownership of such entity.
-
-@license_1036_h3
-2. License Grants and Conditions
-
-@license_1037_h4
-2.1. Grants
-
-@license_1038_p
-Each Contributor hereby grants You a world-wide, royalty-free, non-exclusive license:
-
-@license_1039_p
-under intellectual property rights (other than patent or trademark) Licensable by such Contributor to use, reproduce, make available, modify, display, perform, distribute, and otherwise exploit its Contributions, either on an unmodified basis, with Modifications, or as part of a Larger Work; and
-
-@license_1040_p
-under Patent Claims of such Contributor to make, use, sell, offer for sale, have made, import, and otherwise transfer either its Contributions or its Contributor Version.
-
-@license_1041_h4
-2.2. Effective Date
-
-@license_1042_p
-The licenses granted in Section 2.1 with respect to any Contribution become effective for each Contribution on the date the Contributor first distributes such Contribution.
-
-@license_1043_h4
-2.3. Limitations on Grant Scope
-
-@license_1044_p
-The licenses granted in this Section 2 are the only rights granted under this License. No additional rights or licenses will be implied from the distribution or licensing of Covered Software under this License. Notwithstanding Section 2.1(b) above, no patent license is granted by a Contributor:
-
-@license_1045_p
-for any code that a Contributor has removed from Covered Software; or
-
-@license_1046_p
-for infringements caused by: (i) Your and any other third party's modifications of Covered Software, or (ii) the combination of its Contributions with other software (except as part of its Contributor Version); or
-
-@license_1047_p
-under Patent Claims infringed by Covered Software in the absence of its Contributions.
-
-@license_1048_p
-This License does not grant any rights in the trademarks, service marks, or logos of any Contributor (except as may be necessary to comply with the notice requirements in Section 3.4).
-
-@license_1049_h4
-2.4. Subsequent Licenses
-
-@license_1050_p
-No Contributor makes additional grants as a result of Your choice to distribute the Covered Software under a subsequent version of this License (see Section 10.2) or under the terms of a Secondary License (if permitted under the terms of Section 3.3).
-
-@license_1051_h4
-2.5. Representation
-
-@license_1052_p
-Each Contributor represents that the Contributor believes its Contributions are its original creation(s) or it has sufficient rights to grant the rights to its Contributions conveyed by this License.
-
-@license_1053_h4
-2.6. Fair Use
-
-@license_1054_p
-This License is not intended to limit any rights You have under applicable copyright doctrines of fair use, fair dealing, or other equivalents.
-
-@license_1055_h4
-2.7. Conditions
-
-@license_1056_p
-Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in Section 2.1.
-
-@license_1057_h3
-3. Responsibilities
-
-@license_1058_h4
-3.1. Distribution of Source Form
-
-@license_1059_p
-All distribution of Covered Software in Source Code Form, including any Modifications that You create or to which You contribute, must be under the terms of this License. You must inform recipients that the Source Code Form of the Covered Software is governed by the terms of this License, and how they can obtain a copy of this License. You may not attempt to alter or restrict the recipients' rights in the Source Code Form.
-
-@license_1060_h4
-3.2. Distribution of Executable Form
-
-@license_1061_p
-If You distribute Covered Software in Executable Form then:
-
-@license_1062_p
-such Covered Software must also be made available in Source Code Form, as described in Section 3.1, and You must inform recipients of the Executable Form how they can obtain a copy of such Source Code Form by reasonable means in a timely manner, at a charge no more than the cost of distribution to the recipient; and
-
-@license_1063_p
-You may distribute such Executable Form under the terms of this License, or sublicense it under different terms, provided that the license for the Executable Form does not attempt to limit or alter the recipients' rights in the Source Code Form under this License.
-
-@license_1064_h4
-3.3. Distribution of a Larger Work
-
-@license_1065_p
-You may create and distribute a Larger Work under terms of Your choice, provided that You also comply with the requirements of this License for the Covered Software. If the Larger Work is a combination of Covered Software with a work governed by one or more Secondary Licenses, and the Covered Software is not Incompatible With Secondary Licenses, this License permits You to additionally distribute such Covered Software under the terms of such Secondary License(s), so that the recipient of the Larger Work may, at their option, further distribute the Covered Software under the terms of either this License or such Secondary License(s).
-
-@license_1066_h4
-3.4. Notices
-
-@license_1067_p
-You may not remove or alter the substance of any license notices (including copyright notices, patent notices, disclaimers of warranty, or limitations of liability) contained within the Source Code Form of the Covered Software, except that You may alter any license notices to the extent required to remedy known factual inaccuracies.
-
-@license_1068_h4
-3.5. Application of Additional Terms
-
-@license_1069_p
-You may choose to offer, and to charge a fee for, warranty, support, indemnity or liability obligations to one or more recipients of Covered Software. However, You may do so only on Your own behalf, and not on behalf of any Contributor. You must make it absolutely clear that any such warranty, support, indemnity, or liability obligation is offered by You alone, and You hereby agree to indemnify every Contributor for any liability incurred by such Contributor as a result of warranty, support, indemnity or liability terms You offer. You may include additional disclaimers of warranty and limitations of liability specific to any jurisdiction.
-
-@license_1070_h3
-4. Inability to Comply Due to Statute or Regulation
-
-@license_1071_p
-If it is impossible for You to comply with any of the terms of this License with respect to some or all of the Covered Software due to statute, judicial order, or regulation then You must: (a) comply with the terms of this License to the maximum extent possible; and (b) describe the limitations and the code they affect. Such description must be placed in a text file included with all distributions of the Covered Software under this License. Except to the extent prohibited by statute or regulation, such description must be sufficiently detailed for a recipient of ordinary skill to be able to understand it.
-
-@license_1072_h3
-5. Termination
-
-@license_1073_p
-5.1. The rights granted under this License will terminate automatically if You fail to comply with any of its terms. However, if You become compliant, then the rights granted under this License from a particular Contributor are reinstated (a) provisionally, unless and until such Contributor explicitly and finally terminates Your grants, and (b) on an ongoing basis, if such Contributor fails to notify You of the non-compliance by some reasonable means prior to 60 days after You have come back into compliance. Moreover, Your grants from a particular Contributor are reinstated on an ongoing basis if such Contributor notifies You of the non-compliance by some reasonable means, this is the first time You have received notice of non-compliance with this License from such Contributor, and You become compliant prior to 30 days after Your receipt of the notice.
-
-@license_1074_p
-5.2. If You initiate litigation against any entity by asserting a patent infringement claim (excluding declaratory judgment actions, counter-claims, and cross-claims) alleging that a Contributor Version directly or indirectly infringes any patent, then the rights granted to You by any and all Contributors for the Covered Software under Section 2.1 of this License shall terminate.
-
-@license_1075_p
-5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user license agreements (excluding distributors and resellers) which have been validly granted by You or Your distributors under this License prior to termination shall survive termination.
-
-@license_1076_h3
-6. Disclaimer of Warranty
-
-@license_1077_p
-Covered Software is provided under this License on an "as is" basis, without warranty of any kind, either expressed, implied, or statutory, including, without limitation, warranties that the Covered Software is free of defects, merchantable, fit for a particular purpose or non-infringing. The entire risk as to the quality and performance of the Covered Software is with You. Should any Covered Software prove defective in any respect, You (not any Contributor) assume the cost of any necessary servicing, repair, or correction. This disclaimer of warranty constitutes an essential part of this License. No use of any Covered Software is authorized under this License except under this disclaimer.
-
-@license_1078_h3
-7. Limitation of Liability
-
-@license_1079_p
-Under no circumstances and under no legal theory, whether tort (including negligence), contract, or otherwise, shall any Contributor, or anyone who distributes Covered Software as permitted above, be liable to You for any direct, indirect, special, incidental, or consequential damages of any character including, without limitation, damages for lost profits, loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses, even if such party shall have been informed of the possibility of such damages. This limitation of liability shall not apply to liability for death or personal injury resulting from such party's negligence to the extent applicable law prohibits such limitation. Some jurisdictions do not allow the exclusion or limitation of incidental or consequential damages, so this exclusion and limitation may not apply to You.
-
-@license_1080_h3
-8. Litigation
-
-@license_1081_p
-Any litigation relating to this License may be brought only in the courts of a jurisdiction where the defendant maintains its principal place of business and such litigation shall be governed by laws of that jurisdiction, without reference to its conflict-of-law provisions. Nothing in this Section shall prevent a party's ability to bring cross-claims or counter-claims.
-
-@license_1082_h3
-9. Miscellaneous
-
-@license_1083_p
-This License represents the complete agreement concerning the subject matter hereof. If any provision of this License is held to be unenforceable, such provision shall be reformed only to the extent necessary to make it enforceable. Any law or regulation which provides that the language of a contract shall be construed against the drafter shall not be used to construe this License against a Contributor.
-
-@license_1084_h3
-10. Versions of the License
-
-@license_1085_h4
-10.1. New Versions
-
-@license_1086_p
-Mozilla Foundation is the license steward. Except as provided in Section 10.3, no one other than the license steward has the right to modify or publish new versions of this License. Each version will be given a distinguishing version number.
-
-@license_1087_h4
-10.2. Effect of New Versions
-
-@license_1088_p
-You may distribute the Covered Software under the terms of the version of the License under which You originally received the Covered Software, or under the terms of any subsequent version published by the license steward.
-
-@license_1089_h4
-10.3. Modified Versions
-
-@license_1090_p
-If you create software not governed by this License, and you want to create a new license for such software, you may create and use a modified version of this License if you rename the license and remove any references to the name of the license steward (except to note that such modified license differs from this License).
-
-@license_1091_h4
-10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
-
-@license_1092_p
-If You choose to distribute Source Code Form that is Incompatible With Secondary Licenses under the terms of this version of the License, the notice described in Exhibit B of this License must be attached.
-
-@license_1093_h3
-Exhibit A - Source Code Form License Notice
-
-@license_1094_p
-If it is not possible or desirable to put the notice in a particular file, then You may include the notice in a location (such as a LICENSE file in a relevant directory) where a recipient would be likely to look for such a notice.
-
-@license_1095_p
-You may add additional accurate notices of copyright ownership.
-
-@license_1096_h3
-Exhibit B - "Incompatible With Secondary Licenses" Notice
-
-@license_1097_h2
-Eclipse Public License - Version 1.0
-
-@license_1098_p
- THE ACCOMPANYING PROGRAM IS PROVIDED UNDER THE TERMS OF THIS ECLIPSE PUBLIC LICENSE ("AGREEMENT"). ANY USE, REPRODUCTION OR DISTRIBUTION OF THE PROGRAM CONSTITUTES RECIPIENT'S ACCEPTANCE OF THIS AGREEMENT.
-
-@license_1099_h3
-1. DEFINITIONS
-
-@license_1100_p
- "Contribution" means:
-
-@license_1101_p
- a) in the case of the initial Contributor, the initial code and documentation distributed under this Agreement, and
-
-@license_1102_p
- b) in the case of each subsequent Contributor:
-
-@license_1103_p
- i) changes to the Program, and
-
-@license_1104_p
- ii) additions to the Program;
-
-@license_1105_p
- where such changes and/or additions to the Program originate from and are distributed by that particular Contributor. A Contribution 'originates' from a Contributor if it was added to the Program by such Contributor itself or anyone acting on such Contributor's behalf. Contributions do not include additions to the Program which: (i) are separate modules of software distributed in conjunction with the Program under their own license agreement, and (ii) are not derivative works of the Program.
-
-@license_1106_p
- "Contributor" means any person or entity that distributes the Program.
-
-@license_1107_p
- "Licensed Patents " mean patent claims licensable by a Contributor which are necessarily infringed by the use or sale of its Contribution alone or when combined with the Program.
-
-@license_1108_p
- "Program" means the Contributions distributed in accordance with this Agreement.
-
-@license_1109_p
- "Recipient" means anyone who receives the Program under this Agreement, including all Contributors.
-
-@license_1110_h3
-2. GRANT OF RIGHTS
-
-@license_1111_p
- a) Subject to the terms of this Agreement, each Contributor hereby grants Recipient a non-exclusive, worldwide, royalty-free copyright license to reproduce, prepare derivative works of, publicly display, publicly perform, distribute and sublicense the Contribution of such Contributor, if any, and such derivative works, in source code and object code form.
-
-@license_1112_p
- b) Subject to the terms of this Agreement, each Contributor hereby grants Recipient a non-exclusive, worldwide, royalty-free patent license under Licensed Patents to make, use, sell, offer to sell, import and otherwise transfer the Contribution of such Contributor, if any, in source code and object code form. This patent license shall apply to the combination of the Contribution and the Program if, at the time the Contribution is added by the Contributor, such addition of the Contribution causes such combination to be covered by the Licensed Patents. The patent license shall not apply to any other combinations which include the Contribution. No hardware per se is licensed hereunder.
-
-@license_1113_p
- c) Recipient understands that although each Contributor grants the licenses to its Contributions set forth herein, no assurances are provided by any Contributor that the Program does not infringe the patent or other intellectual property rights of any other entity. Each Contributor disclaims any liability to Recipient for claims brought by any other entity based on infringement of intellectual property rights or otherwise. As a condition to exercising the rights and licenses granted hereunder, each Recipient hereby assumes sole responsibility to secure any other intellectual property rights needed, if any. For example, if a third party patent license is required to allow Recipient to distribute the Program, it is Recipient's responsibility to acquire that license before distributing the Program.
-
-@license_1114_p
- d) Each Contributor represents that to its knowledge it has sufficient copyright rights in its Contribution, if any, to grant the copyright license set forth in this Agreement.
-
-@license_1115_h3
-3. REQUIREMENTS
-
-@license_1116_p
- A Contributor may choose to distribute the Program in object code form under its own license agreement, provided that:
-
-@license_1117_p
- a) it complies with the terms and conditions of this Agreement; and
-
-@license_1118_p
- b) its license agreement:
-
-@license_1119_p
- i) effectively disclaims on behalf of all Contributors all warranties and conditions, express and implied, including warranties or conditions of title and non-infringement, and implied warranties or conditions of merchantability and fitness for a particular purpose;
-
-@license_1120_p
- ii) effectively excludes on behalf of all Contributors all liability for damages, including direct, indirect, special, incidental and consequential damages, such as lost profits;
-
-@license_1121_p
- iii) states that any provisions which differ from this Agreement are offered by that Contributor alone and not by any other party; and
-
-@license_1122_p
- iv) states that source code for the Program is available from such Contributor, and informs licensees how to obtain it in a reasonable manner on or through a medium customarily used for software exchange.
-
-@license_1123_p
- When the Program is made available in source code form:
-
-@license_1124_p
- a) it must be made available under this Agreement; and
-
-@license_1125_p
- b) a copy of this Agreement must be included with each copy of the Program.
-
-@license_1126_p
- Contributors may not remove or alter any copyright notices contained within the Program.
-
-@license_1127_p
- Each Contributor must identify itself as the originator of its Contribution, if any, in a manner that reasonably allows subsequent Recipients to identify the originator of the Contribution.
-
-@license_1128_h3
-4. COMMERCIAL DISTRIBUTION
-
-@license_1129_p
- Commercial distributors of software may accept certain responsibilities with respect to end users, business partners and the like. While this license is intended to facilitate the commercial use of the Program, the Contributor who includes the Program in a commercial product offering should do so in a manner which does not create potential liability for other Contributors. Therefore, if a Contributor includes the Program in a commercial product offering, such Contributor ("Commercial Contributor") hereby agrees to defend and indemnify every other Contributor ("Indemnified Contributor") against any losses, damages and costs (collectively "Losses") arising from claims, lawsuits and other legal actions brought by a third party against the Indemnified Contributor to the extent caused by the acts or omissions of such Commercial Contributor in connection with its distribution of the Program in a commercial product offering. The obligations in this section do not apply to any claims or Losses relating to any actual or alleged intellectual property infringement. In order to qualify, an Indemnified Contributor must: a) promptly notify the Commercial Contributor in writing of such claim, and b) allow the Commercial Contributor to control, and cooperate with the Commercial Contributor in, the defense and any related settlement negotiations. The Indemnified Contributor may participate in any such claim at its own expense.
-
-@license_1130_p
- For example, a Contributor might include the Program in a commercial product offering, Product X. That Contributor is then a Commercial Contributor. If that Commercial Contributor then makes performance claims, or offers warranties related to Product X, those performance claims and warranties are such Commercial Contributor's responsibility alone. Under this section, the Commercial Contributor would have to defend claims against the other Contributors related to those performance claims and warranties, and if a court requires any other Contributor to pay any damages as a result, the Commercial Contributor must pay those damages.
-
-@license_1131_h3
-5. NO WARRANTY
-
-@license_1132_p
- EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is solely responsible for determining the appropriateness of using and distributing the Program and assumes all risks associated with its exercise of rights under this Agreement, including but not limited to the risks and costs of program errors, compliance with applicable laws, damage to or loss of data, programs or equipment, and unavailability or interruption of operations.
-
-@license_1133_h3
-6. DISCLAIMER OF LIABILITY
-
-@license_1134_p
- EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
-
-@license_1135_h3
-7. GENERAL
-
-@license_1136_p
- If any provision of this Agreement is invalid or unenforceable under applicable law, it shall not affect the validity or enforceability of the remainder of the terms of this Agreement, and without further action by the parties hereto, such provision shall be reformed to the minimum extent necessary to make such provision valid and enforceable.
-
-@license_1137_p
- If Recipient institutes patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Program itself (excluding combinations of the Program with other software or hardware) infringes such Recipient's patent(s), then such Recipient's rights granted under Section 2(b) shall terminate as of the date such litigation is filed.
-
-@license_1138_p
- All Recipient's rights under this Agreement shall terminate if it fails to comply with any of the material terms or conditions of this Agreement and does not cure such failure in a reasonable period of time after becoming aware of such noncompliance. If all Recipient's rights under this Agreement terminate, Recipient agrees to cease use and distribution of the Program as soon as reasonably practicable. However, Recipient's obligations under this Agreement and any licenses granted by Recipient relating to the Program shall continue and survive.
-
-@license_1139_p
- Everyone is permitted to copy and distribute copies of this Agreement, but in order to avoid inconsistency the Agreement is copyrighted and may only be modified in the following manner. The Agreement Steward reserves the right to publish new versions (including revisions) of this Agreement from time to time. No one other than the Agreement Steward has the right to modify this Agreement. The Eclipse Foundation is the initial Agreement Steward. The Eclipse Foundation may assign the responsibility to serve as the Agreement Steward to a suitable separate entity. Each new version of the Agreement will be given a distinguishing version number. The Program (including Contributions) may always be distributed subject to the version of the Agreement under which it was received. In addition, after a new version of the Agreement is published, Contributor may elect to distribute the Program (including its Contributions) under the new version. Except as expressly stated in Sections 2(a) and 2(b) above, Recipient receives no rights or licenses to the intellectual property of any Contributor under this Agreement, whether expressly, by implication, estoppel or otherwise. All rights in the Program not expressly granted under this Agreement are reserved.
-
-@license_1140_p
- This Agreement is governed by the laws of the State of New York and the intellectual property laws of the United States of America. No party to this Agreement will bring a legal action under this Agreement more than one year after the cause of action arose. Each party waives its rights to a jury trial in any resulting litigation.
-
-@license_1141_h2
-Export Control Classification Number (ECCN)
-
-@license_1142_p
- As far as we know, the U.S. Export Control Classification Number (ECCN) for this software is 5D002
. However, for legal reasons, we can make no warranty that this information is correct. For details, see also the Apache Software Foundation Export Classifications page.
-
-@links_1000_h1
-Links
-
-@links_1001_p
- If you want to add a link, please send it to the support email address or post it to the group.
-
-@links_1002_a
- Commercial Support
-
-@links_1003_a
- Quotes
-
-@links_1004_a
- Books
-
-@links_1005_a
- Extensions
-
-@links_1006_a
- Blog Articles, Videos
-
-@links_1007_a
- Database Frontends / Tools
-
-@links_1008_a
- Products and Projects
-
-@links_1009_h2
-Commercial Support
-
-@links_1010_a
-Commercial support for H2 is available
-
-@links_1011_p
- from Steve McLeod (steve dot mcleod at gmail dot com). Please note he is not one of the main developers of H2. He describes himself as follows:
-
-@links_1012_li
-I'm a long time user of H2, routinely working with H2 databases several gigabytes in size.
-
-@links_1013_li
-I'm the creator of popular commercial desktop software that uses H2.
-
-@links_1014_li
-I'm a certified Java developer (SCJP).
-
-@links_1015_li
-I have a decade and more of IT consulting experience with large and small clients in Australia, the UK, and Germany.
-
-@links_1016_li
-I'm based in Germany, and willing to travel within Europe. I can work remotely with teams in the USA and other locations."
-
-@links_1017_h2
-Quotes
-
-@links_1018_a
- Quote
-
-@links_1019_p
-: "This is by far the easiest and fastest database that I have ever used. Originally the web application that I am working on is using SQL server. But, in less than 15 minutes I had H2 up and working with little recoding of the SQL. Thanks..... "
-
-@links_1020_h2
-Books
-
-@links_1021_a
- Seam In Action
-
-@links_1022_h2
-Extensions
-
-@links_1023_a
- Grails H2 Database Plugin
-
-@links_1024_a
- h2osgi: OSGi for the H2 Database
-
-@links_1025_a
- H2Sharp: ADO.NET interface for the H2 database engine
-
-@links_1026_a
- A spatial extension of the H2 database.
-
-@links_1027_h2
-Blog Articles, Videos
-
-@links_1028_a
- Youtube: Minecraft 1.7.3 / How to install Bukkit Server with xAuth and H2
-
-@links_1029_a
- Analyzing CSVs with H2 in under 10 minutes (2009-12-07)
-
-@links_1030_a
- Efficient sorting and iteration on large databases (2009-06-15)
-
-@links_1031_a
- Porting Flexive to the H2 Database (2008-12-05)
-
-@links_1032_a
- H2 Database with GlassFish (2008-11-24)
-
-@links_1033_a
- H2 Database - Performance Tracing (2008-04-30)
-
-@links_1034_a
- Open Source Databases Comparison (2007-09-11)
-
-@links_1035_a
- The Codist: The Open Source Frameworks I Use (2007-07-23)
-
-@links_1036_a
- The Codist: SQL Injections: How Not To Get Stuck (2007-05-08)
-
-@links_1037_a
- David Coldrick's Weblog: New Version of H2 Database Released (2007-01-06)
-
-@links_1038_a
- The Codist: Write Your Own Database, Again (2006-11-13)
-
-@links_1039_h2
-Project Pages
-
-@links_1040_a
- Ohloh
-
-@links_1041_a
- Freshmeat Project Page
-
-@links_1042_a
- Wikipedia
-
-@links_1043_a
- Java Source Net
-
-@links_1044_a
- Linux Package Manager
-
-@links_1045_h2
-Database Frontends / Tools
-
-@links_1046_a
- Dataflyer
-
-@links_1047_p
- A tool to browse databases and export data.
-
-@links_1048_a
- DB Solo
-
-@links_1049_p
- SQL query tool.
-
-@links_1050_a
- DbVisualizer
-
-@links_1051_p
- Database tool.
-
-@links_1052_a
- Execute Query
-
-@links_1053_p
- Database utility written in Java.
-
-@links_1054_a
- Flyway
-
-@links_1055_p
- The agile database migration framework for Java.
-
-@links_1056_a
- [fleXive]
-
-@links_1057_p
- JavaEE 5 open source framework for the development of complex and evolving (web-)applications.
-
-@links_1058_a
- JDBC Console
-
-@links_1059_p
- This small webapp gives an ability to execute SQL against datasources bound in container's JNDI. Based on H2 Console.
-
-@links_1060_a
- HenPlus
-
-@links_1061_p
- HenPlus is a SQL shell written in Java.
-
-@links_1062_a
- JDBC lint
-
-@links_1063_p
- Helps write correct and efficient code when using the JDBC API.
-
-@links_1064_a
- OpenOffice
-
-@links_1065_p
- Base is OpenOffice.org's database application. It provides access to relational data sources.
-
-@links_1066_a
- RazorSQL
-
-@links_1067_p
- An SQL query tool, database browser, SQL editor, and database administration tool.
-
-@links_1068_a
- SQL Developer
-
-@links_1069_p
- Universal Database Frontend.
-
-@links_1070_a
- SQL Workbench/J
-
-@links_1071_p
- Free DBMS-independent SQL tool.
-
-@links_1072_a
- SQuirreL SQL Client
-
-@links_1073_p
- Graphical tool to view the structure of a database, browse the data, issue SQL commands etc.
-
-@links_1074_a
- SQuirreL DB Copy Plugin
-
-@links_1075_p
- Tool to copy data from one database to another.
-
-@links_1076_h2
-Products and Projects
-
-@links_1077_a
- AccuProcess
-
-@links_1078_p
- Visual business process modeling and simulation software for business users.
-
-@links_1079_a
- Adeptia BPM
-
-@links_1080_p
- A Business Process Management (BPM) suite to quickly and easily automate business processes and workflows.
-
-@links_1081_a
- Adeptia Integration
-
-@links_1082_p
- Process-centric, services-based application integration suite.
-
-@links_1083_a
- Aejaks
-
-@links_1084_p
- A server-side scripting environment to build AJAX enabled web applications.
-
-@links_1085_a
- Axiom Stack
-
-@links_1086_p
- A web framework that let's you write dynamic web applications with Zen-like simplicity.
-
-@links_1087_a
- Apache Cayenne
-
-@links_1088_p
- Open source persistence framework providing object-relational mapping (ORM) and remoting services.
-
-@links_1089_a
- Apache Jackrabbit
-
-@links_1090_p
- Open source implementation of the Java Content Repository API (JCR).
-
-@links_1091_a
- Apache OpenJPA
-
-@links_1092_p
- Open source implementation of the Java Persistence API (JPA).
-
-@links_1093_a
- AppFuse
-
-@links_1094_p
- Helps building web applications.
-
-@links_1095_a
- BGBlitz
-
-@links_1096_p
- The Swiss army knife of Backgammon.
-
-@links_1097_a
- Bonita
-
-@links_1098_p
- Open source workflow solution for handing long-running, user-oriented processes providing out of the box workflow and business process management features.
-
-@links_1099_a
- Bookmarks Portlet
-
-@links_1100_p
- JSR 168 compliant bookmarks management portlet application.
-
-@links_1101_a
- Claros inTouch
-
-@links_1102_p
- Ajax communication suite with mail, addresses, notes, IM, and rss reader.
-
-@links_1103_a
- CrashPlan PRO Server
-
-@links_1104_p
- Easy and cross platform backup solution for business and service providers.
-
-@links_1105_a
- DataNucleus
-
-@links_1106_p
- Java persistent objects.
-
-@links_1107_a
- DbUnit
-
-@links_1108_p
- A JUnit extension (also usable with Ant) targeted for database-driven projects.
-
-@links_1109_a
- DiffKit
-
-@links_1110_p
- DiffKit is a tool for comparing two tables of data, field-by-field. DiffKit is like the Unix diff utility, but for tables instead of lines of text.
-
-@links_1111_a
- Dinamica Framework
-
-@links_1112_p
- Ajax/J2EE framework for RAD development (mainly oriented toward hispanic markets).
-
-@links_1113_a
- District Health Information Software 2 (DHIS)
-
-@links_1114_p
- The DHIS 2 is a tool for collection, validation, analysis, and presentation of aggregate statistical data, tailored (but not limited) to integrated health information management activities.
-
-@links_1115_a
- Ebean ORM Persistence Layer
-
-@links_1116_p
- Open source Java Object Relational Mapping tool.
-
-@links_1117_a
- Eclipse CDO
-
-@links_1118_p
- The CDO (Connected Data Objects) Model Repository is a distributed shared model framework for EMF models, and a fast server-based O/R mapping solution.
-
-@links_1119_a
- Fabric3
-
-@links_1120_p
- Fabric3 is a project implementing a federated service network based on the Service Component Architecture specification (http://www.osoa.org).
-
-@links_1121_a
- FIT4Data
-
-@links_1122_p
- A testing framework for data management applications built on the Java implementation of FIT.
-
-@links_1123_a
- Flux
-
-@links_1124_p
- Java job scheduler, file transfer, workflow, and BPM.
-
-@links_1125_a
- GeoServer
-
-@links_1126_p
- GeoServer is a Java-based software server that allows users to view and edit geospatial data. Using open standards set forth by the Open Geospatial Consortium (OGC), GeoServer allows for great flexibility in map creation and data sharing.
-
-@links_1127_a
- GBIF Integrated Publishing Toolkit (IPT)
-
-@links_1128_p
- The GBIF IPT is an open source, Java based web application that connects and serves three types of biodiversity data: taxon primary occurrence data, taxon checklists and general resource metadata.
-
-@links_1129_a
- GNU Gluco Control
-
-@links_1130_p
- Helps you to manage your diabetes.
-
-@links_1131_a
- Golden T Studios
-
-@links_1132_p
- Fun-to-play games with a simple interface.
-
-@links_1133_a
- GridGain
-
-@links_1134_p
- GridGain is easy to use Cloud Application Platform that enables development of highly scalable distributed Java and Scala applications that auto-scale on any grid or cloud infrastructure.
-
-@links_1135_a
- Group Session
-
-@links_1136_p
- Open source web groupware.
-
-@links_1137_a
- HA-JDBC
-
-@links_1138_p
- High-Availability JDBC: A JDBC proxy that provides light-weight, transparent, fault tolerant clustering capability to any underlying JDBC driver.
-
-@links_1139_a
- Hibernate
-
-@links_1140_p
- Relational persistence for idiomatic Java (O-R mapping tool).
-
-@links_1141_a
- Hibicius
-
-@links_1142_p
- Online Banking Client for the HBCI protocol.
-
-@links_1143_a
- ImageMapper
-
-@links_1144_p
- ImageMapper frees users from having to use file browsers to view their images. They get fast access to images and easy cataloguing of them via a user friendly interface.
-
-@links_1145_a
- JAMWiki
-
-@links_1146_p
- Java-based Wiki engine.
-
-@links_1147_a
- Jaspa
-
-@links_1148_p
- Java Spatial. Jaspa potentially brings around 200 spatial functions.
-
-@links_1149_a
- Java Simon
-
-@links_1150_p
- Simple Monitoring API.
-
-@links_1151_a
- JBoss jBPM
-
-@links_1152_p
- A platform for executable process languages ranging from business process management (BPM) over workflow to service orchestration.
-
-@links_1153_a
- JBoss Jopr
-
-@links_1154_p
- An enterprise management solution for JBoss middleware projects and other application technologies.
-
-@links_1155_a
- JGeocoder
-
-@links_1156_p
- Free Java geocoder. Geocoding is the process of estimating a latitude and longitude for a given location.
-
-@links_1157_a
- JGrass
-
-@links_1158_p
- Java Geographic Resources Analysis Support System. Free, multi platform, open source GIS based on the GIS framework of uDig.
-
-@links_1159_a
- Jena
-
-@links_1160_p
- Java framework for building Semantic Web applications.
-
-@links_1161_a
- JMatter
-
-@links_1162_p
- Framework for constructing workgroup business applications based on the Naked Objects Architectural Pattern.
-
-@links_1163_a
- jOOQ (Java Object Oriented Querying)
-
-@links_1164_p
- jOOQ is a fluent API for typesafe SQL query construction and execution
-
-@links_1165_a
- Liftweb
-
-@links_1166_p
- A Scala-based, secure, developer friendly web framework.
-
-@links_1167_a
- LiquiBase
-
-@links_1168_p
- A tool to manage database changes and refactorings.
-
-@links_1169_a
- Luntbuild
-
-@links_1170_p
- Build automation and management tool.
-
-@links_1171_a
- localdb
-
-@links_1172_p
- A tool that locates the full file path of the folder containing the database files.
-
-@links_1173_a
- Magnolia
-
-@links_1174_p
- Microarray Data Management and Export System for PFGRC (Pathogen Functional Genomics Resource Center) Microarrays.
-
-@links_1175_a
- MiniConnectionPoolManager
-
-@links_1176_p
- A lightweight standalone JDBC connection pool manager.
-
-@links_1177_a
- Mr. Persister
-
-@links_1178_p
- Simple, small and fast object relational mapping.
-
-@links_1179_a
- Myna Application Server
-
-@links_1180_p
- Java web app that provides dynamic web content and Java libraries access from JavaScript.
-
-@links_1181_a
- MyTunesRss
-
-@links_1182_p
- MyTunesRSS lets you listen to your music wherever you are.
-
-@links_1183_a
- NCGC CurveFit
-
-@links_1184_p
- From: NIH Chemical Genomics Center, National Institutes of Health, USA. An open source application in the life sciences research field. This application handles chemical structures and biological responses of thousands of compounds with the potential to handle million+ compounds. It utilizes an embedded H2 database to enable flexible query/retrieval of all data including advanced chemical substructure and similarity searching. The application highlights an automated curve fitting and classification algorithm that outperforms commercial packages in the field. Commercial alternatives are typically small desktop software that handle a few dose response curves at a time. A couple of commercial packages that do handle several thousand curves are very expensive tools (>60k USD) that require manual curation of analysis by the user; require a license to Oracle; lack advanced query/retrieval; and the ability to handle chemical structures.
-
-@links_1185_a
- Nuxeo
-
-@links_1186_p
- Standards-based, open source platform for building ECM applications.
-
-@links_1187_a
- nWire
-
-@links_1188_p
- Eclipse plug-in which expedites Java development. It's main purpose is to help developers find code quicker and easily understand how it relates to the rest of the application, thus, understand the application structure.
-
-@links_1189_a
- Ontology Works
-
-@links_1190_p
- This company provides semantic technologies including deductive information repositories (the Ontology Works Knowledge Servers), semantic information fusion and semantic federation of legacy databases, ontology-based domain modeling, and management of the distributed enterprise.
-
-@links_1191_a
- Ontoprise OntoBroker
-
-@links_1192_p
- SemanticWeb-Middleware. It supports all W3C Semantic Web recommendations: OWL, RDF, RDFS, SPARQL, and F-Logic.
-
-@links_1193_a
- Open Anzo
-
-@links_1194_p
- Semantic Application Server.
-
-@links_1195_a
- OpenGroove
-
-@links_1196_p
- OpenGroove is a groupware program that allows users to synchronize data.
-
-@links_1197_a
- OpenSocial Development Environment (OSDE)
-
-@links_1198_p
- Development tool for OpenSocial application.
-
-@links_1199_a
- Orion
-
-@links_1200_p
- J2EE Application Server.
-
-@links_1201_a
- P5H2
-
-@links_1202_p
- A library for the Processing programming language and environment.
-
-@links_1203_a
- Phase-6
-
-@links_1204_p
- A computer based learning software.
-
-@links_1205_a
- Pickle
-
-@links_1206_p
- Pickle is a Java library containing classes for persistence, concurrency, and logging.
-
-@links_1207_a
- Piman
-
-@links_1208_p
- Water treatment projects data management.
-
-@links_1209_a
- PolePosition
-
-@links_1210_p
- Open source database benchmark.
-
-@links_1211_a
- Poormans
-
-@links_1212_p
- Very basic CMS running as a SWT application and generating static html pages.
-
-@links_1213_a
- Railo
-
-@links_1214_p
- Railo is an alternative engine for the Cold Fusion Markup Language, that compiles code programmed in CFML into Java bytecode and executes it on a servlet engine.
-
-@links_1215_a
- Razuna
-
-@links_1216_p
- Open source Digital Asset Management System with integrated Web Content Management.
-
-@links_1217_a
- RIFE
-
-@links_1218_p
- A full-stack web application framework with tools and APIs to implement most common web features.
-
-@links_1219_a
- Sava
-
-@links_1220_p
- Open-source web-based content management system.
-
-@links_1221_a
- Scriptella
-
-@links_1222_p
- ETL (Extract-Transform-Load) and script execution tool.
-
-@links_1223_a
- Sesar
-
-@links_1224_p
- Dependency Injection Container with Aspect Oriented Programming.
-
-@links_1225_a
- SemmleCode
-
-@links_1226_p
- Eclipse plugin to help you improve software quality.
-
-@links_1227_a
- SeQuaLite
-
-@links_1228_p
- A free, light-weight, java data access framework.
-
-@links_1229_a
- ShapeLogic
-
-@links_1230_p
- Toolkit for declarative programming, image processing and computer vision.
-
-@links_1231_a
- Shellbook
-
-@links_1232_p
- Desktop publishing application.
-
-@links_1233_a
- Signsoft intelliBO
-
-@links_1234_p
- Persistence middleware supporting the JDO specification.
-
-@links_1235_a
- SimpleORM
-
-@links_1236_p
- Simple Java Object Relational Mapping.
-
-@links_1237_a
- SymmetricDS
-
-@links_1238_p
- A web-enabled, database independent, data synchronization/replication software.
-
-@links_1239_a
- SmartFoxServer
-
-@links_1240_p
- Platform for developing multiuser applications and games with Macromedia Flash.
-
-@links_1241_a
- Social Bookmarks Friend Finder
-
-@links_1242_p
- A GUI application that allows you to find users with similar bookmarks to the user specified (for delicious.com).
-
-@links_1243_a
- sormula
-
-@links_1244_p
- Simple object relational mapping.
-
-@links_1245_a
- Springfuse
-
-@links_1246_p
- Code generation For Spring, Spring MVC & Hibernate.
-
-@links_1247_a
- SQLOrm
-
-@links_1248_p
- Java Object Relation Mapping.
-
-@links_1249_a
- StelsCSV and StelsXML
-
-@links_1250_p
- StelsCSV is a CSV JDBC type 4 driver that allows to perform SQL queries and other JDBC operations on text files. StelsXML is a XML JDBC type 4 driver that allows to perform SQL queries and other JDBC operations on XML files. Both use H2 as the SQL engine.
-
-@links_1251_a
- StorYBook
-
-@links_1252_p
- A summary-based tool for novelist and script writers. It helps to keep the overview over the various traces a story has.
-
-@links_1253_a
- StreamCruncher
-
-@links_1254_p
- Event (stream) processing kernel.
-
-@links_1255_a
- SUSE Manager, part of Linux Enterprise Server 11
-
-@links_1256_p
- The SUSE Manager eases the burden of compliance with regulatory requirements and corporate policies.
-
-@links_1257_a
- Tune Backup
-
-@links_1258_p
- Easy-to-use backup solution for your iTunes library.
-
-@links_1259_a
- weblica
-
-@links_1260_p
- Desktop CMS.
-
-@links_1261_a
- Web of Web
-
-@links_1262_p
- Collaborative and realtime interactive media platform for the web.
-
-@links_1263_a
- Werkzeugkasten
-
-@links_1264_p
- Minimum Java Toolset.
-
-@links_1265_a
- VPDA
-
-@links_1266_p
- View providers driven applications is a Java based application framework for building applications composed from server components - view providers.
-
-@links_1267_a
- Volunteer database
-
-@links_1268_p
- A database front end to register volunteers, partnership and donation for a Non Profit organization.
-
-@mainWeb_1000_h1
-H2 Database Engine
-
-@mainWeb_1001_p
- Welcome to H2, the Java SQL database. The main features of H2 are:
-
-@mainWeb_1002_li
-Very fast, open source, JDBC API
-
-@mainWeb_1003_li
-Embedded and server modes; in-memory databases
-
-@mainWeb_1004_li
-Browser based Console application
-
-@mainWeb_1005_li
-Small footprint: around 1.5 MB jar file size
-
-@mainWeb_1006_h2
-Download
-
-@mainWeb_1007_td
- Version 1.4.187 (2015-04-10), Beta
-
-@mainWeb_1008_a
-Windows Installer (5 MB)
-
-@mainWeb_1009_a
-All Platforms (zip, 8 MB)
-
-@mainWeb_1010_a
-All Downloads
-
-@mainWeb_1011_td
-
-
-@mainWeb_1012_h2
-Support
-
-@mainWeb_1013_a
-Stack Overflow (tag H2)
-
-@mainWeb_1014_a
-Google Group English
-
-@mainWeb_1015_p
-, Japanese
-
-@mainWeb_1016_p
- For non-technical issues, use:
-
-@mainWeb_1017_h2
-Features
-
-@mainWeb_1018_th
-H2
-
-@mainWeb_1019_a
-Derby
-
-@mainWeb_1020_a
-HSQLDB
-
-@mainWeb_1021_a
-MySQL
-
-@mainWeb_1022_a
-PostgreSQL
-
-@mainWeb_1023_td
-Pure Java
-
-@mainWeb_1024_td
-Yes
-
-@mainWeb_1025_td
-Yes
-
-@mainWeb_1026_td
-Yes
-
-@mainWeb_1027_td
-No
-
-@mainWeb_1028_td
-No
-
-@mainWeb_1029_td
-Memory Mode
-
-@mainWeb_1030_td
-Yes
-
-@mainWeb_1031_td
-Yes
-
-@mainWeb_1032_td
-Yes
-
-@mainWeb_1033_td
-No
-
-@mainWeb_1034_td
-No
-
-@mainWeb_1035_td
-Encrypted Database
-
-@mainWeb_1036_td
-Yes
-
-@mainWeb_1037_td
-Yes
-
-@mainWeb_1038_td
-Yes
-
-@mainWeb_1039_td
-No
-
-@mainWeb_1040_td
-No
-
-@mainWeb_1041_td
-ODBC Driver
-
-@mainWeb_1042_td
-Yes
-
-@mainWeb_1043_td
-No
-
-@mainWeb_1044_td
-No
-
-@mainWeb_1045_td
-Yes
-
-@mainWeb_1046_td
-Yes
-
-@mainWeb_1047_td
-Fulltext Search
-
-@mainWeb_1048_td
-Yes
-
-@mainWeb_1049_td
-No
-
-@mainWeb_1050_td
-No
-
-@mainWeb_1051_td
-Yes
-
-@mainWeb_1052_td
-Yes
-
-@mainWeb_1053_td
-Multi Version Concurrency
-
-@mainWeb_1054_td
-Yes
-
-@mainWeb_1055_td
-No
-
-@mainWeb_1056_td
-Yes
-
-@mainWeb_1057_td
-Yes
-
-@mainWeb_1058_td
-Yes
-
-@mainWeb_1059_td
-Footprint (jar/dll size)
-
-@mainWeb_1060_td
-~1 MB
-
-@mainWeb_1061_td
-~2 MB
-
-@mainWeb_1062_td
-~1 MB
-
-@mainWeb_1063_td
-~4 MB
-
-@mainWeb_1064_td
-~6 MB
-
-@mainWeb_1065_p
- See also the detailed comparison.
-
-@mainWeb_1066_h2
-News
-
-@mainWeb_1067_b
-Newsfeeds:
-
-@mainWeb_1068_a
-Full text (Atom)
-
-@mainWeb_1069_p
- or Header only (RSS).
-
-@mainWeb_1070_b
-Email Newsletter:
-
-@mainWeb_1071_p
- Subscribe to H2 Database News (Google account required) to get informed about new releases. Your email address is only used in this context.
-
-@mainWeb_1072_td
-
-
-@mainWeb_1073_h2
-Contribute
-
-@mainWeb_1074_p
- You can contribute to the development of H2 by sending feedback and bug reports, or translate the H2 Console application (for details, start the H2 Console and select Options / Translate). To donate money, click on the PayPal button below. You will be listed as a supporter:
-
-@main_1000_h1
-H2 Database Engine
-
-@main_1001_p
- Welcome to H2, the free Java SQL database engine.
-
-@main_1002_a
-Quickstart
-
-@main_1003_p
- Get a fast overview.
-
-@main_1004_a
-Tutorial
-
-@main_1005_p
- Go through the samples.
-
-@main_1006_a
-Features
-
-@main_1007_p
- See what this database can do and how to use these features.
-
-@mvstore_1000_h1
-MVStore
-
-@mvstore_1001_a
- Overview
-
-@mvstore_1002_a
- Example Code
-
-@mvstore_1003_a
- Store Builder
-
-@mvstore_1004_a
- R-Tree
-
-@mvstore_1005_a
- Features
-
-@mvstore_1006_a
-- Maps
-
-@mvstore_1007_a
-- Versions
-
-@mvstore_1008_a
-- Transactions
-
-@mvstore_1009_a
-- In-Memory Performance and Usage
-
-@mvstore_1010_a
-- Pluggable Data Types
-
-@mvstore_1011_a
-- BLOB Support
-
-@mvstore_1012_a
-- R-Tree and Pluggable Map Implementations
-
-@mvstore_1013_a
-- Concurrent Operations and Caching
-
-@mvstore_1014_a
-- Log Structured Storage
-
-@mvstore_1015_a
-- Off-Heap and Pluggable Storage
-
-@mvstore_1016_a
-- File System Abstraction, File Locking and Online Backup
-
-@mvstore_1017_a
-- Encrypted Files
-
-@mvstore_1018_a
-- Tools
-
-@mvstore_1019_a
-- Exception Handling
-
-@mvstore_1020_a
-- Storage Engine for H2
-
-@mvstore_1021_a
- File Format
-
-@mvstore_1022_a
- Similar Projects and Differences to Other Storage Engines
-
-@mvstore_1023_a
- Current State
-
-@mvstore_1024_a
- Requirements
-
-@mvstore_1025_h2
-Overview
-
-@mvstore_1026_p
- The MVStore is a persistent, log structured key-value store. It is planned to be the next storage subsystem of H2, but it can also be used directly within an application, without using JDBC or SQL.
-
-@mvstore_1027_li
-MVStore stands for "multi-version store".
-
-@mvstore_1028_li
-Each store contains a number of maps that can be accessed using the java.util.Map
interface.
-
-@mvstore_1029_li
-Both file-based persistence and in-memory operation are supported.
-
-@mvstore_1030_li
-It is intended to be fast, simple to use, and small.
-
-@mvstore_1031_li
-Concurrent read and write operations are supported.
-
-@mvstore_1032_li
-Transactions are supported (including concurrent transactions and 2-phase commit).
-
-@mvstore_1033_li
-The tool is very modular. It supports pluggable data types and serialization, pluggable storage (to a file, to off-heap memory), pluggable map implementations (B-tree, R-tree, concurrent B-tree currently), BLOB storage, and a file system abstraction to support encrypted files and zip files.
-
-@mvstore_1034_h2
-Example Code
-
-@mvstore_1035_p
- The following sample code shows how to use the tool:
-
-@mvstore_1036_h2
-Store Builder
-
-@mvstore_1037_p
- The MVStore.Builder
provides a fluid interface to build a store if configuration options are needed. Example usage:
-
-@mvstore_1038_p
- The list of available options is:
-
-@mvstore_1039_li
-autoCommitBufferSize: the size of the write buffer.
-
-@mvstore_1040_li
-autoCommitDisabled: to disable auto-commit.
-
-@mvstore_1041_li
-backgroundExceptionHandler: a handler for exceptions that could occur while writing in the background.
-
-@mvstore_1042_li
-cacheSize: the cache size in MB.
-
-@mvstore_1043_li
-compress: compress the data when storing using a fast algorithm (LZF).
-
-@mvstore_1044_li
-compressHigh: compress the data when storing using a slower algorithm (Deflate).
-
-@mvstore_1045_li
-encryptionKey: the key for file encryption.
-
-@mvstore_1046_li
-fileName: the name of the file, for file based stores.
-
-@mvstore_1047_li
-fileStore: the storage implementation to use.
-
-@mvstore_1048_li
-pageSplitSize: the point where pages are split.
-
-@mvstore_1049_li
-readOnly: open the file in read-only mode.
-
-@mvstore_1050_h2
-R-Tree
-
-@mvstore_1051_p
- The MVRTreeMap
is an R-tree implementation that supports fast spatial queries. It can be used as follows:
-
-@mvstore_1052_p
- The default number of dimensions is 2. To use a different number of dimensions, call new MVRTreeMap.Builder<String>().dimensions(3)
. The minimum number of dimensions is 1, the maximum is 32.
-
-@mvstore_1053_h2
-Features
-
-@mvstore_1054_h3
-Maps
-
-@mvstore_1055_p
- Each store contains a set of named maps. A map is sorted by key, and supports the common lookup operations, including access to the first and last key, iterate over some or all keys, and so on.
-
-@mvstore_1056_p
- Also supported, and very uncommon for maps, is fast index lookup: the entries of the map can be be efficiently accessed like a random-access list (get the entry at the given index), and the index of a key can be calculated efficiently. That also means getting the median of two keys is very fast, and a range of keys can be counted very quickly. The iterator supports fast skipping. This is possible because internally, each map is organized in the form of a counted B+-tree.
-
-@mvstore_1057_p
- In database terms, a map can be used like a table, where the key of the map is the primary key of the table, and the value is the row. A map can also represent an index, where the key of the map is the key of the index, and the value of the map is the primary key of the table (for non-unique indexes, the key of the map must also contain the primary key).
-
-@mvstore_1058_h3
-Versions
-
-@mvstore_1059_p
- A version is a snapshot of all the data of all maps at a given point in time. Creating a snapshot is fast: only those pages that are changed after a snapshot are copied. This behavior is also called COW (copy on write). Old versions are readable. Rollback to an old version is supported.
-
-@mvstore_1060_p
- The following sample code show how to create a store, open a map, add some data, and access the current and an old version:
-
-@mvstore_1061_h3
-Transactions
-
-@mvstore_1062_p
- To support multiple concurrent open transactions, a transaction utility is included, the TransactionStore
. The tool supports PostgreSQL style "read committed" transaction isolation with savepoints, two-phase commit, and other features typically available in a database. There is no limit on the size of a transaction (the log is written to disk for large or long running transactions).
-
-@mvstore_1063_p
- Internally, this utility stores the old versions of changed entries in a separate map, similar to a transaction log, except that entries of a closed transaction are removed, and the log is usually not stored for short transactions. For common use cases, the storage overhead of this utility is very small compared to the overhead of a regular transaction log.
-
-@mvstore_1064_h3
-In-Memory Performance and Usage
-
-@mvstore_1065_p
- Performance of in-memory operations is about 50% slower than java.util.TreeMap
.
-
-@mvstore_1066_p
- The memory overhead for large maps is slightly better than for the regular map implementations, but there is a higher overhead per map. For maps with less than about 25 entries, the regular map implementations need less memory.
-
-@mvstore_1067_p
- If no file name is specified, the store operates purely in memory. Except for persisting data, all features are supported in this mode (multi-versioning, index lookup, R-tree and so on). If a file name is specified, all operations occur in memory (with the same performance characteristics) until data is persisted.
-
-@mvstore_1068_p
- As in all map implementations, keys need to be immutable, that means changing the key object after an entry has been added is not allowed. If a file name is specified, the value may also not be changed after adding an entry, because it might be serialized (which could happen at any time when autocommit is enabled).
-
-@mvstore_1069_h3
-Pluggable Data Types
-
-@mvstore_1070_p
- Serialization is pluggable. The default serialization currently supports many common data types, and uses Java serialization for other objects. The following classes are currently directly supported: Boolean, Byte, Short, Character, Integer, Long, Float, Double, BigInteger, BigDecimal, String, UUID, Date
and arrays (both primitive arrays and object arrays). For serialized objects, the size estimate is adjusted using an exponential moving average.
-
-@mvstore_1071_p
- Parameterized data types are supported (for example one could build a string data type that limits the length).
-
-@mvstore_1072_p
- The storage engine itself does not have any length limits, so that keys, values, pages, and chunks can be very big (as big as fits in memory). Also, there is no inherent limit to the number of maps and chunks. Due to using a log structured storage, there is no special case handling for large keys or pages.
-
-@mvstore_1073_h3
-BLOB Support
-
-@mvstore_1074_p
- There is a mechanism that stores large binary objects by splitting them into smaller blocks. This allows to store objects that don't fit in memory. Streaming as well as random access reads on such objects are supported. This tool is written on top of the store, using only the map interface.
-
-@mvstore_1075_h3
-R-Tree and Pluggable Map Implementations
-
-@mvstore_1076_p
- The map implementation is pluggable. In addition to the default MVMap
(multi-version map), there is a map that supports concurrent write operations, and a multi-version R-tree map implementation for spatial operations.
-
-@mvstore_1077_h3
-Concurrent Operations and Caching
-
-@mvstore_1078_p
- Concurrent reads and writes are supported. All such read operations can occur in parallel. Concurrent reads from the page cache, as well as concurrent reads from the file system are supported. Write operations first read the relevant pages from disk to memory (this can happen concurrently), and only then modify the data. The in-memory parts of write operations are synchronized. Writing changes to the file can occur concurrently to modifying the data, as writing operates on a snapshot.
-
-@mvstore_1079_p
- Caching is done on the page level. The page cache is a concurrent LIRS cache, which should be resistant against scan operations.
-
-@mvstore_1080_p
- For fully scalable concurrent write operations to a map (in-memory and to disk), the map could be split into multiple maps in different stores ('sharding'). The plan is to add such a mechanism later when needed.
-
-@mvstore_1081_h3
-Log Structured Storage
-
-@mvstore_1082_p
- Internally, changes are buffered in memory, and once enough changes have accumulated, they are written in one continuous disk write operation. Compared to traditional database storage engines, this should improve write performance for file systems and storage systems that do not efficiently support small random writes, such as Btrfs, as well as SSDs. (According to a test, write throughput of a common SSD increases with write block size, until a block size of 2 MB, and then does not further increase.) By default, changes are automatically written when more than a number of pages are modified, and once every second in a background thread, even if only little data was changed. Changes can also be written explicitly by calling commit()
.
-
-@mvstore_1083_p
- When storing, all changed pages are serialized, optionally compressed using the LZF algorithm, and written sequentially to a free area of the file. Each such change set is called a chunk. All parent pages of the changed B-trees are stored in this chunk as well, so that each chunk also contains the root of each changed map (which is the entry point for reading this version of the data). There is no separate index: all data is stored as a list of pages. Per store, there is one additional map that contains the metadata (the list of maps, where the root page of each map is stored, and the list of chunks).
-
-@mvstore_1084_p
- There are usually two write operations per chunk: one to store the chunk data (the pages), and one to update the file header (so it points to the latest chunk). If the chunk is appended at the end of the file, the file header is only written at the end of the chunk. There is no transaction log, no undo log, and there are no in-place updates (however, unused chunks are overwritten by default).
-
-@mvstore_1085_p
- Old data is kept for at least 45 seconds (configurable), so that there are no explicit sync operations required to guarantee data consistency. An application can also sync explicitly when needed. To reuse disk space, the chunks with the lowest amount of live data are compacted (the live data is stored again in the next chunk). To improve data locality and disk space usage, the plan is to automatically defragment and compact data.
-
-@mvstore_1086_p
- Compared to traditional storage engines (that use a transaction log, undo log, and main storage area), the log structured storage is simpler, more flexible, and typically needs less disk operations per change, as data is only written once instead of twice or 3 times, and because the B-tree pages are always full (they are stored next to each other) and can be easily compressed. But temporarily, disk space usage might actually be a bit higher than for a regular database, as disk space is not immediately re-used (there are no in-place updates).
-
-@mvstore_1087_h3
-Off-Heap and Pluggable Storage
-
-@mvstore_1088_p
- Storage is pluggable. Unless pure in-memory operation is used, the default storage is to a single file.
-
-@mvstore_1089_p
- An off-heap storage implementation is available. This storage keeps the data in the off-heap memory, meaning outside of the regular garbage collected heap. This allows to use very large in-memory stores without having to increase the JVM heap, which would increase Java garbage collection pauses a lot. Memory is allocated using ByteBuffer.allocateDirect
. One chunk is allocated at a time (each chunk is usually a few MB large), so that allocation cost is low. To use the off-heap storage, call:
-
-@mvstore_1090_h3
-File System Abstraction, File Locking and Online Backup
-
-@mvstore_1091_p
- The file system is pluggable. The same file system abstraction is used as H2 uses. The file can be encrypted using a encrypting file system wrapper. Other file system implementations support reading from a compressed zip or jar file. The file system abstraction closely matches the Java 7 file system API.
-
-@mvstore_1092_p
- Each store may only be opened once within a JVM. When opening a store, the file is locked in exclusive mode, so that the file can only be changed from within one process. Files can be opened in read-only mode, in which case a shared lock is used.
-
-@mvstore_1093_p
- The persisted data can be backed up at any time, even during write operations (online backup). To do that, automatic disk space reuse needs to be first disabled, so that new data is always appended at the end of the file. Then, the file can be copied. The file handle is available to the application. It is recommended to use the utility class FileChannelInputStream
to do this. For encrypted databases, both the encrypted (raw) file content, as well as the clear text content, can be backed up.
-
-@mvstore_1094_h3
-Encrypted Files
-
-@mvstore_1095_p
- File encryption ensures the data can only be read with the correct password. Data can be encrypted as follows:
-
-@mvstore_1096_p
- The following algorithms and settings are used:
-
-@mvstore_1097_li
-The password char array is cleared after use, to reduce the risk that the password is stolen even if the attacker has access to the main memory.
-
-@mvstore_1098_li
-The password is hashed according to the PBKDF2 standard, using the SHA-256 hash algorithm.
-
-@mvstore_1099_li
-The length of the salt is 64 bits, so that an attacker can not use a pre-calculated password hash table (rainbow table). It is generated using a cryptographically secure random number generator.
-
-@mvstore_1100_li
-To speed up opening an encrypted stores on Android, the number of PBKDF2 iterations is 10. The higher the value, the better the protection against brute-force password cracking attacks, but the slower is opening a file.
-
-@mvstore_1101_li
-The file itself is encrypted using the standardized disk encryption mode XTS-AES. Only little more than one AES-128 round per block is needed.
-
-@mvstore_1102_h3
-Tools
-
-@mvstore_1103_p
- There is a tool, the MVStoreTool
, to dump the contents of a file.
-
-@mvstore_1104_h3
-Exception Handling
-
-@mvstore_1105_p
- This tool does not throw checked exceptions. Instead, unchecked exceptions are thrown if needed. The error message always contains the version of the tool. The following exceptions can occur:
-
-@mvstore_1106_code
-IllegalStateException
-
-@mvstore_1107_li
- if a map was already closed or an IO exception occurred, for example if the file was locked, is already closed, could not be opened or closed, if reading or writing failed, if the file is corrupt, or if there is an internal error in the tool. For such exceptions, an error code is added so that the application can distinguish between different error cases.
-
-@mvstore_1108_code
-IllegalArgumentException
-
-@mvstore_1109_li
- if a method was called with an illegal argument.
-
-@mvstore_1110_code
-UnsupportedOperationException
-
-@mvstore_1111_li
- if a method was called that is not supported, for example trying to modify a read-only map.
-
-@mvstore_1112_code
-ConcurrentModificationException
-
-@mvstore_1113_li
- if a map is modified concurrently.
-
-@mvstore_1114_h3
-Storage Engine for H2
-
-@mvstore_1115_p
- For H2 version 1.4 and newer, the MVStore is the default storage engine (supporting SQL, JDBC, transactions, MVCC, and so on). For older versions, append ;MV_STORE=TRUE
to the database URL. Even though it can be used with the default table level locking, by default the MVCC mode is enabled when using the MVStore.
-
-@mvstore_1116_h2
-File Format
-
-@mvstore_1117_p
- The data is stored in one file. The file contains two file headers (for safety), and a number of chunks. The file headers are one block each; a block is 4096 bytes. Each chunk is at least one block, but typically 200 blocks or more. Data is stored in the chunks in the form of a log structured storage. There is one chunk for every version.
-
-@mvstore_1118_p
- Each chunk contains a number of B-tree pages. As an example, the following code:
-
-@mvstore_1119_p
- will result in the following two chunks (excluding metadata):
-
-@mvstore_1120_b
-Chunk 1:
-
-@mvstore_1121_p
- - Page 1: (root) node with 2 entries pointing to page 2 and 3
-
-@mvstore_1122_p
- - Page 2: leaf with 140 entries (keys 0 - 139)
-
-@mvstore_1123_p
- - Page 3: leaf with 260 entries (keys 140 - 399)
-
-@mvstore_1124_b
-Chunk 2:
-
-@mvstore_1125_p
- - Page 4: (root) node with 2 entries pointing to page 3 and 5
-
-@mvstore_1126_p
- - Page 5: leaf with 140 entries (keys 0 - 139)
-
-@mvstore_1127_p
- That means each chunk contains the changes of one version: the new version of the changed pages and the parent pages, recursively, up to the root page. Pages in subsequent chunks refer to pages in earlier chunks.
-
-@mvstore_1128_h3
-File Header
-
-@mvstore_1129_p
- There are two file headers, which normally contain the exact same data. But once in a while, the file headers are updated, and writing could partially fail, which could corrupt a header. That's why there is a second header. Only the file headers are updated in this way (called "in-place update"). The headers contain the following data:
-
-@mvstore_1130_p
- The data is stored in the form of a key-value pair. Each value is stored as a hexadecimal number. The entries are:
-
-@mvstore_1131_li
-H: The entry "H:2" stands for the the H2 database.
-
-@mvstore_1132_li
-block: The block number where one of the newest chunks starts (but not necessarily the newest).
-
-@mvstore_1133_li
-blockSize: The block size of the file; currently always hex 1000, which is decimal 4096, to match the disk sector length of modern hard disks.
-
-@mvstore_1134_li
-chunk: The chunk id, which is normally the same value as the version; however, the chunk id might roll over to 0, while the version doesn't.
-
-@mvstore_1135_li
-created: The number of milliseconds since 1970 when the file was created.
-
-@mvstore_1136_li
-format: The file format number. Currently 1.
-
-@mvstore_1137_li
-version: The version number of the chunk.
-
-@mvstore_1138_li
-fletcher: The Fletcher-32 checksum of the header.
-
-@mvstore_1139_p
- When opening the file, both headers are read and the checksum is verified. If both headers are valid, the one with the newer version is used. The chunk with the latest version is then detected (details about this see below), and the rest of the metadata is read from there. If the chunk id, block and version are not stored in the file header, then the latest chunk lookup starts with the last chunk in the file.
-
-@mvstore_1140_h3
-Chunk Format
-
-@mvstore_1141_p
- There is one chunk per version. Each chunk consists of a header, the pages that were modified in this version, and a footer. The pages contain the actual data of the maps. The pages inside a chunk are stored right after the header, next to each other (unaligned). The size of a chunk is a multiple of the block size. The footer is stored in the last 128 bytes of the chunk.
-
-@mvstore_1142_p
- The footer allows to verify that the chunk is completely written (a chunk is written as one write operation), and allows to find the start position of the very last chunk in the file. The chunk header and footer contain the following data:
-
-@mvstore_1143_p
- The fields of the chunk header and footer are:
-
-@mvstore_1144_li
-chunk: The chunk id.
-
-@mvstore_1145_li
-block: The first block of the chunk (multiply by the block size to get the position in the file).
-
-@mvstore_1146_li
-len: The size of the chunk in number of blocks.
-
-@mvstore_1147_li
-map: The id of the newest map; incremented when a new map is created.
-
-@mvstore_1148_li
-max: The sum of all maximum page sizes (see page format).
-
-@mvstore_1149_li
-next: The predicted start block of the next chunk.
-
-@mvstore_1150_li
-pages: The number of pages in the chunk.
-
-@mvstore_1151_li
-root: The position of the metadata root page (see page format).
-
-@mvstore_1152_li
-time: The time the chunk was written, in milliseconds after the file was created.
-
-@mvstore_1153_li
-version: The version this chunk represents.
-
-@mvstore_1154_li
-fletcher: The checksum of the footer.
-
-@mvstore_1155_p
- Chunks are never updated in-place. Each chunk contains the pages that were changed in that version (there is one chunk per version, see above), plus all the parent nodes of those pages, recursively, up to the root page. If an entry in a map is changed, removed, or added, then the respective page is copied, modified, and stored in the next chunk, and the number of live pages in the old chunk is decremented. This mechanism is called copy-on-write, and is similar to how the Btrfs file system works. Chunks without live pages are marked as free, so the space can be re-used by more recent chunks. Because not all chunks are of the same size, there can be a number of free blocks in front of a chunk for some time (until a small chunk is written or the chunks are compacted). There is a delay of 45 seconds (by default) before a free chunk is overwritten, to ensure new versions are persisted first.
-
-@mvstore_1156_p
- How the newest chunk is located when opening a store: The file header contains the position of a recent chunk, but not always the newest one. This is to reduce the number of file header updates. After opening the file, the file headers, and the chunk footer of the very last chunk (at the end of the file) are read. From those candidates, the header of the most recent chunk is read. If it contains a "next" pointer (see above), those chunk's header and footer are read as well. If it turned out to be a newer valid chunk, this is repeated, until the newest chunk was found. Before writing a chunk, the position of the next chunk is predicted based on the assumption that the next chunk will be of the same size as the current one. When the next chunk is written, and the previous prediction turned out to be incorrect, the file header is updated as well. In any case, the file header is updated if the next chain gets longer than 20 hops.
-
-@mvstore_1157_h3
-Page Format
-
-@mvstore_1158_p
- Each map is a B-tree, and the map data is stored in (B-tree-) pages. There are leaf pages that contain the key-value pairs of the map, and internal nodes, which only contain keys and pointers to leaf pages. The root of a tree is either a leaf or an internal node. Unlike file header and chunk header and footer, the page data is not human readable. Instead, it is stored as byte arrays, with long (8 bytes), int (4 bytes), short (2 bytes), and variable size int and long (1 to 5 / 10 bytes). The page format is:
-
-@mvstore_1159_li
-length (int): Length of the page in bytes.
-
-@mvstore_1160_li
-checksum (short): Checksum (chunk id xor offset within the chunk xor page length).
-
-@mvstore_1161_li
-mapId (variable size int): The id of the map this page belongs to.
-
-@mvstore_1162_li
-len (variable size int): The number of keys in the page.
-
-@mvstore_1163_li
-type (byte): The page type (0 for leaf page, 1 for internal node; plus 2 if the keys and values are compressed with the LZF algorithm, or plus 6 if the keys and values are compressed with the Deflate algorithm).
-
-@mvstore_1164_li
-children (array of long; internal nodes only): The position of the children.
-
-@mvstore_1165_li
-childCounts (array of variable size long; internal nodes only): The total number of entries for the given child page.
-
-@mvstore_1166_li
-keys (byte array): All keys, stored depending on the data type.
-
-@mvstore_1167_li
-values (byte array; leaf pages only): All values, stored depending on the data type.
-
-@mvstore_1168_p
- Even though this is not required by the file format, pages are stored in the following order: For each map, the root page is stored first, then the internal nodes (if there are any), and then the leaf pages. This should speed up reads for media where sequential reads are faster than random access reads. The metadata map is stored at the end of a chunk.
-
-@mvstore_1169_p
- Pointers to pages are stored as a long, using a special format: 26 bits for the chunk id, 32 bits for the offset within the chunk, 5 bits for the length code, 1 bit for the page type (leaf or internal node). The page type is encoded so that when clearing or removing a map, leaf pages don't have to be read (internal nodes do have to be read in order to know where all the pages are; but in a typical B-tree the vast majority of the pages are leaf pages). The absolute file position is not included so that chunks can be moved within the file without having to change page pointers; only the chunk metadata needs to be changed. The length code is a number from 0 to 31, where 0 means the maximum length of the page is 32 bytes, 1 means 48 bytes, 2: 64, 3: 96, 4: 128, 5: 192, and so on until 31 which means longer than 1 MB. That way, reading a page only requires one read operation (except for very large pages). The sum of the maximum length of all pages is stored in the chunk metadata (field "max"), and when a page is marked as removed, the live maximum length is adjusted. This allows to estimate the amount of free space within a block, in addition to the number of free pages.
-
-@mvstore_1170_p
- The total number of entries in child pages are kept to allow efficient range counting, lookup by index, and skip operations. The pages form a counted B-tree.
-
-@mvstore_1171_p
- Data compression: The data after the page type are optionally compressed using the LZF algorithm.
-
-@mvstore_1172_h3
-Metadata Map
-
-@mvstore_1173_p
- In addition to the user maps, there is one metadata map that contains names and positions of user maps, and chunk metadata. The very last page of a chunk contains the root page of that metadata map. The exact position of this root page is stored in the chunk header. This page (directly or indirectly) points to the root pages of all other maps. The metadata map of a store with a map named "data", and one chunk, contains the following entries:
-
-@mvstore_1174_li
-chunk.1: The metadata of chunk 1. This is the same data as the chunk header, plus the number of live pages, and the maximum live length.
-
-@mvstore_1175_li
-map.1: The metadata of map 1. The entries are: name, createVersion, and type.
-
-@mvstore_1176_li
-name.data: The map id of the map named "data". The value is "1".
-
-@mvstore_1177_li
-root.1: The root position of map 1.
-
-@mvstore_1178_li
-setting.storeVersion: The store version (a user defined value).
-
-@mvstore_1179_h2
-Similar Projects and Differences to Other Storage Engines
-
-@mvstore_1180_p
- Unlike similar storage engines like LevelDB and Kyoto Cabinet, the MVStore is written in Java and can easily be embedded in a Java and Android application.
-
-@mvstore_1181_p
- The MVStore is somewhat similar to the Berkeley DB Java Edition because it is also written in Java, and is also a log structured storage, but the H2 license is more liberal.
-
-@mvstore_1182_p
- Like SQLite 3, the MVStore keeps all data in one file. Unlike SQLite 3, the MVStore uses is a log structured storage. The plan is to make the MVStore both easier to use as well as faster than SQLite 3. In a recent (very simple) test, the MVStore was about twice as fast as SQLite 3 on Android.
-
-@mvstore_1183_p
- The API of the MVStore is similar to MapDB (previously known as JDBM) from Jan Kotek, and some code is shared between MVStore and MapDB. However, unlike MapDB, the MVStore uses is a log structured storage. The MVStore does not have a record size limit.
-
-@mvstore_1184_h2
-Current State
-
-@mvstore_1185_p
- The code is still experimental at this stage. The API as well as the behavior may partially change. Features may be added and removed (even though the main features will stay).
-
-@mvstore_1186_h2
-Requirements
-
-@mvstore_1187_p
- The MVStore is included in the latest H2 jar file.
-
-@mvstore_1188_p
- There are no special requirements to use it. The MVStore should run on any JVM as well as on Android.
-
-@mvstore_1189_p
- To build just the MVStore (without the database engine), run:
-
-@mvstore_1190_p
- This will create the file bin/h2mvstore-1.4.187.jar
(about 200 KB).
-
-@performance_1000_h1
-Performance
-
-@performance_1001_a
- Performance Comparison
-
-@performance_1002_a
- PolePosition Benchmark
-
-@performance_1003_a
- Database Performance Tuning
-
-@performance_1004_a
- Using the Built-In Profiler
-
-@performance_1005_a
- Application Profiling
-
-@performance_1006_a
- Database Profiling
-
-@performance_1007_a
- Statement Execution Plans
-
-@performance_1008_a
- How Data is Stored and How Indexes Work
-
-@performance_1009_a
- Fast Database Import
-
-@performance_1010_h2
-Performance Comparison
-
-@performance_1011_p
- In many cases H2 is faster than other (open source and not open source) database engines. Please note this is mostly a single connection benchmark run on one computer, with many very simple operations running against the database. This benchmark does not include very complex queries. The embedded mode of H2 is faster than the client-server mode because the per-statement overhead is greatly reduced.
-
-@performance_1012_h3
-Embedded
-
-@performance_1013_th
-Test Case
-
-@performance_1014_th
-Unit
-
-@performance_1015_th
-H2
-
-@performance_1016_th
-HSQLDB
-
-@performance_1017_th
-Derby
-
-@performance_1018_td
-Simple: Init
-
-@performance_1019_td
-ms
-
-@performance_1020_td
-1019
-
-@performance_1021_td
-1907
-
-@performance_1022_td
-8280
-
-@performance_1023_td
-Simple: Query (random)
-
-@performance_1024_td
-ms
-
-@performance_1025_td
-1304
-
-@performance_1026_td
-873
-
-@performance_1027_td
-1912
-
-@performance_1028_td
-Simple: Query (sequential)
-
-@performance_1029_td
-ms
-
-@performance_1030_td
-835
-
-@performance_1031_td
-1839
-
-@performance_1032_td
-5415
-
-@performance_1033_td
-Simple: Update (sequential)
-
-@performance_1034_td
-ms
-
-@performance_1035_td
-961
-
-@performance_1036_td
-2333
-
-@performance_1037_td
-21759
-
-@performance_1038_td
-Simple: Delete (sequential)
-
-@performance_1039_td
-ms
-
-@performance_1040_td
-950
-
-@performance_1041_td
-1922
-
-@performance_1042_td
-32016
-
-@performance_1043_td
-Simple: Memory Usage
-
-@performance_1044_td
-MB
-
-@performance_1045_td
-21
-
-@performance_1046_td
-10
-
-@performance_1047_td
-8
-
-@performance_1048_td
-BenchA: Init
-
-@performance_1049_td
-ms
-
-@performance_1050_td
-919
-
-@performance_1051_td
-2133
-
-@performance_1052_td
-7528
-
-@performance_1053_td
-BenchA: Transactions
-
-@performance_1054_td
-ms
-
-@performance_1055_td
-1219
-
-@performance_1056_td
-2297
-
-@performance_1057_td
-8541
-
-@performance_1058_td
-BenchA: Memory Usage
-
-@performance_1059_td
-MB
-
-@performance_1060_td
-12
-
-@performance_1061_td
-15
-
-@performance_1062_td
-7
-
-@performance_1063_td
-BenchB: Init
-
-@performance_1064_td
-ms
-
-@performance_1065_td
-905
-
-@performance_1066_td
-1993
-
-@performance_1067_td
-8049
-
-@performance_1068_td
-BenchB: Transactions
-
-@performance_1069_td
-ms
-
-@performance_1070_td
-1091
-
-@performance_1071_td
-583
-
-@performance_1072_td
-1165
-
-@performance_1073_td
-BenchB: Memory Usage
-
-@performance_1074_td
-MB
-
-@performance_1075_td
-17
-
-@performance_1076_td
-11
-
-@performance_1077_td
-8
-
-@performance_1078_td
-BenchC: Init
-
-@performance_1079_td
-ms
-
-@performance_1080_td
-2491
-
-@performance_1081_td
-4003
-
-@performance_1082_td
-8064
-
-@performance_1083_td
-BenchC: Transactions
-
-@performance_1084_td
-ms
-
-@performance_1085_td
-1979
-
-@performance_1086_td
-803
-
-@performance_1087_td
-2840
-
-@performance_1088_td
-BenchC: Memory Usage
-
-@performance_1089_td
-MB
-
-@performance_1090_td
-19
-
-@performance_1091_td
-22
-
-@performance_1092_td
-9
-
-@performance_1093_td
-Executed statements
-
-@performance_1094_td
-#
-
-@performance_1095_td
-1930995
-
-@performance_1096_td
-1930995
-
-@performance_1097_td
-1930995
-
-@performance_1098_td
-Total time
-
-@performance_1099_td
-ms
-
-@performance_1100_td
-13673
-
-@performance_1101_td
-20686
-
-@performance_1102_td
-105569
-
-@performance_1103_td
-Statements per second
-
-@performance_1104_td
-#
-
-@performance_1105_td
-141226
-
-@performance_1106_td
-93347
-
-@performance_1107_td
-18291
-
-@performance_1108_h3
-Client-Server
-
-@performance_1109_th
-Test Case
-
-@performance_1110_th
-Unit
-
-@performance_1111_th
-H2 (Server)
-
-@performance_1112_th
-HSQLDB
-
-@performance_1113_th
-Derby
-
-@performance_1114_th
-PostgreSQL
-
-@performance_1115_th
-MySQL
-
-@performance_1116_td
-Simple: Init
-
-@performance_1117_td
-ms
-
-@performance_1118_td
-16338
-
-@performance_1119_td
-17198
-
-@performance_1120_td
-27860
-
-@performance_1121_td
-30156
-
-@performance_1122_td
-29409
-
-@performance_1123_td
-Simple: Query (random)
-
-@performance_1124_td
-ms
-
-@performance_1125_td
-3399
-
-@performance_1126_td
-2582
-
-@performance_1127_td
-6190
-
-@performance_1128_td
-3315
-
-@performance_1129_td
-3342
-
-@performance_1130_td
-Simple: Query (sequential)
-
-@performance_1131_td
-ms
-
-@performance_1132_td
-21841
-
-@performance_1133_td
-18699
-
-@performance_1134_td
-42347
-
-@performance_1135_td
-30774
-
-@performance_1136_td
-32611
-
-@performance_1137_td
-Simple: Update (sequential)
-
-@performance_1138_td
-ms
-
-@performance_1139_td
-6913
-
-@performance_1140_td
-7745
-
-@performance_1141_td
-28576
-
-@performance_1142_td
-32698
-
-@performance_1143_td
-11350
-
-@performance_1144_td
-Simple: Delete (sequential)
-
-@performance_1145_td
-ms
-
-@performance_1146_td
-8051
-
-@performance_1147_td
-9751
-
-@performance_1148_td
-42202
-
-@performance_1149_td
-44480
-
-@performance_1150_td
-16555
-
-@performance_1151_td
-Simple: Memory Usage
-
-@performance_1152_td
-MB
-
-@performance_1153_td
-22
-
-@performance_1154_td
-11
-
-@performance_1155_td
-9
-
-@performance_1156_td
-0
-
-@performance_1157_td
-1
-
-@performance_1158_td
-BenchA: Init
-
-@performance_1159_td
-ms
-
-@performance_1160_td
-12996
-
-@performance_1161_td
-14720
-
-@performance_1162_td
-24722
-
-@performance_1163_td
-26375
-
-@performance_1164_td
-26060
-
-@performance_1165_td
-BenchA: Transactions
-
-@performance_1166_td
-ms
-
-@performance_1167_td
-10134
-
-@performance_1168_td
-10250
-
-@performance_1169_td
-18452
-
-@performance_1170_td
-21453
-
-@performance_1171_td
-15877
-
-@performance_1172_td
-BenchA: Memory Usage
-
-@performance_1173_td
-MB
-
-@performance_1174_td
-13
-
-@performance_1175_td
-15
-
-@performance_1176_td
-9
-
-@performance_1177_td
-0
-
-@performance_1178_td
-1
-
-@performance_1179_td
-BenchB: Init
-
-@performance_1180_td
-ms
-
-@performance_1181_td
-15264
-
-@performance_1182_td
-16889
-
-@performance_1183_td
-28546
-
-@performance_1184_td
-31610
-
-@performance_1185_td
-29747
-
-@performance_1186_td
-BenchB: Transactions
-
-@performance_1187_td
-ms
-
-@performance_1188_td
-3017
-
-@performance_1189_td
-3376
-
-@performance_1190_td
-1842
-
-@performance_1191_td
-2771
-
-@performance_1192_td
-1433
-
-@performance_1193_td
-BenchB: Memory Usage
-
-@performance_1194_td
-MB
-
-@performance_1195_td
-17
-
-@performance_1196_td
-12
-
-@performance_1197_td
-11
-
-@performance_1198_td
-1
-
-@performance_1199_td
-1
-
-@performance_1200_td
-BenchC: Init
-
-@performance_1201_td
-ms
-
-@performance_1202_td
-14020
-
-@performance_1203_td
-10407
-
-@performance_1204_td
-17655
-
-@performance_1205_td
-19520
-
-@performance_1206_td
-17532
-
-@performance_1207_td
-BenchC: Transactions
-
-@performance_1208_td
-ms
-
-@performance_1209_td
-5076
-
-@performance_1210_td
-3160
-
-@performance_1211_td
-6411
-
-@performance_1212_td
-6063
-
-@performance_1213_td
-4530
-
-@performance_1214_td
-BenchC: Memory Usage
-
-@performance_1215_td
-MB
-
-@performance_1216_td
-19
-
-@performance_1217_td
-21
-
-@performance_1218_td
-11
-
-@performance_1219_td
-1
-
-@performance_1220_td
-1
-
-@performance_1221_td
-Executed statements
-
-@performance_1222_td
-#
-
-@performance_1223_td
-1930995
-
-@performance_1224_td
-1930995
-
-@performance_1225_td
-1930995
-
-@performance_1226_td
-1930995
-
-@performance_1227_td
-1930995
-
-@performance_1228_td
-Total time
-
-@performance_1229_td
-ms
-
-@performance_1230_td
-117049
-
-@performance_1231_td
-114777
-
-@performance_1232_td
-244803
-
-@performance_1233_td
-249215
-
-@performance_1234_td
-188446
-
-@performance_1235_td
-Statements per second
-
-@performance_1236_td
-#
-
-@performance_1237_td
-16497
-
-@performance_1238_td
-16823
-
-@performance_1239_td
-7887
-
-@performance_1240_td
-7748
-
-@performance_1241_td
-10246
-
-@performance_1242_h3
-Benchmark Results and Comments
-
-@performance_1243_h4
-H2
-
-@performance_1244_p
- Version 1.4.177 (2014-04-12) was used for the test. For most operations, the performance of H2 is about the same as for HSQLDB. One situation where H2 is slow is large result sets, because they are buffered to disk if more than a certain number of records are returned. The advantage of buffering is: there is no limit on the result set size.
-
-@performance_1245_h4
-HSQLDB
-
-@performance_1246_p
- Version 2.3.2 was used for the test. Cached tables are used in this test (hsqldb.default_table_type=cached
), and the write delay is 1 second (SET WRITE_DELAY 1
).
-
-@performance_1247_h4
-Derby
-
-@performance_1248_p
- Version 10.10.1.1 was used for the test. Derby is clearly the slowest embedded database in this test. This seems to be a structural problem, because all operations are really slow. It will be hard for the developers of Derby to improve the performance to a reasonable level. A few problems have been identified: leaving autocommit on is a problem for Derby. If it is switched off during the whole test, the results are about 20% better for Derby. Derby calls FileChannel.force(false)
, but only twice per log file (not on each commit). Disabling this call improves performance for Derby by about 2%. Unlike H2, Derby does not call FileDescriptor.sync()
on each checkpoint. Derby supports a testing mode (system property derby.system.durability=test
) where durability is disabled. According to the documentation, this setting should be used for testing only, as the database may not recover after a crash. Enabling this setting improves performance by a factor of 2.6 (embedded mode) or 1.4 (server mode). Even if enabled, Derby is still less than half as fast as H2 in default mode.
-
-@performance_1249_h4
-PostgreSQL
-
-@performance_1250_p
- Version 9.1.5 was used for the test. The following options where changed in postgresql.conf: fsync = off, commit_delay = 1000
. PostgreSQL is run in server mode. The memory usage number is incorrect, because only the memory usage of the JDBC driver is measured.
-
-@performance_1251_h4
-MySQL
-
-@performance_1252_p
- Version 5.1.65-log was used for the test. MySQL was run with the InnoDB backend. The setting innodb_flush_log_at_trx_commit
(found in the my.ini / my.cnf
file) was set to 0. Otherwise (and by default), MySQL is slow (around 140 statements per second in this test) because it tries to flush the data to disk for each commit. For small transactions (when autocommit is on) this is really slow. But many use cases use small or relatively small transactions. Too bad this setting is not listed in the configuration wizard, and it always overwritten when using the wizard. You need to change this setting manually in the file my.ini / my.cnf
, and then restart the service. The memory usage number is incorrect, because only the memory usage of the JDBC driver is measured.
-
-@performance_1253_h4
-Firebird
-
-@performance_1254_p
- Firebird 1.5 (default installation) was tested, but the results are not published currently. It is possible to run the performance test with the Firebird database, and any information on how to configure Firebird for higher performance are welcome.
-
-@performance_1255_h4
-Why Oracle / MS SQL Server / DB2 are Not Listed
-
-@performance_1256_p
- The license of these databases does not allow to publish benchmark results. This doesn't mean that they are fast. They are in fact quite slow, and need a lot of memory. But you will need to test this yourself. SQLite was not tested because the JDBC driver doesn't support transactions.
-
-@performance_1257_h3
-About this Benchmark
-
-@performance_1258_h4
-How to Run
-
-@performance_1259_p
- This test was as follows:
-
-@performance_1260_h4
-Separate Process per Database
-
-@performance_1261_p
- For each database, a new process is started, to ensure the previous test does not impact the current test.
-
-@performance_1262_h4
-Number of Connections
-
-@performance_1263_p
- This is mostly a single-connection benchmark. BenchB uses multiple connections; the other tests use one connection.
-
-@performance_1264_h4
-Real-World Tests
-
-@performance_1265_p
- Good benchmarks emulate real-world use cases. This benchmark includes 4 test cases: BenchSimple uses one table and many small updates / deletes. BenchA is similar to the TPC-A test, but single connection / single threaded (see also: www.tpc.org). BenchB is similar to the TPC-B test, using multiple connections (one thread per connection). BenchC is similar to the TPC-C test, but single connection / single threaded.
-
-@performance_1266_h4
-Comparing Embedded with Server Databases
-
-@performance_1267_p
- This is mainly a benchmark for embedded databases (where the application runs in the same virtual machine as the database engine). However MySQL and PostgreSQL are not Java databases and cannot be embedded into a Java application. For the Java databases, both embedded and server modes are tested.
-
-@performance_1268_h4
-Test Platform
-
-@performance_1269_p
- This test is run on Mac OS X 10.6. No virus scanner was used, and disk indexing was disabled. The JVM used is Sun JDK 1.6.
-
-@performance_1270_h4
-Multiple Runs
-
-@performance_1271_p
- When a Java benchmark is run first, the code is not fully compiled and therefore runs slower than when running multiple times. A benchmark should always run the same test multiple times and ignore the first run(s). This benchmark runs three times, but only the last run is measured.
-
-@performance_1272_h4
-Memory Usage
-
-@performance_1273_p
- It is not enough to measure the time taken, the memory usage is important as well. Performance can be improved by using a bigger cache, but the amount of memory is limited. HSQLDB tables are kept fully in memory by default; this benchmark uses 'disk based' tables for all databases. Unfortunately, it is not so easy to calculate the memory usage of PostgreSQL and MySQL, because they run in a different process than the test. This benchmark currently does not print memory usage of those databases.
-
-@performance_1274_h4
-Delayed Operations
-
-@performance_1275_p
- Some databases delay some operations (for example flushing the buffers) until after the benchmark is run. This benchmark waits between each database tested, and each database runs in a different process (sequentially).
-
-@performance_1276_h4
-Transaction Commit / Durability
-
-@performance_1277_p
- Durability means transaction committed to the database will not be lost. Some databases (for example MySQL) try to enforce this by default by calling fsync()
to flush the buffers, but most hard drives don't actually flush all data. Calling the method slows down transaction commit a lot, but doesn't always make data durable. When comparing the results, it is important to think about the effect. Many database suggest to 'batch' operations when possible. This benchmark switches off autocommit when loading the data, and calls commit after each 1000 inserts. However many applications need 'short' transactions at runtime (a commit after each update). This benchmark commits after each update / delete in the simple benchmark, and after each business transaction in the other benchmarks. For databases that support delayed commits, a delay of one second is used.
-
-@performance_1278_h4
-Using Prepared Statements
-
-@performance_1279_p
- Wherever possible, the test cases use prepared statements.
-
-@performance_1280_h4
-Currently Not Tested: Startup Time
-
-@performance_1281_p
- The startup time of a database engine is important as well for embedded use. This time is not measured currently. Also, not tested is the time used to create a database and open an existing database. Here, one (wrapper) connection is opened at the start, and for each step a new connection is opened and then closed.
-
-@performance_1282_h2
-PolePosition Benchmark
-
-@performance_1283_p
- The PolePosition is an open source benchmark. The algorithms are all quite simple. It was developed / sponsored by db4o. This test was not run for a longer time, so please be aware that the results below are for older database versions (H2 version 1.1, HSQLDB 1.8, Java 1.4).
-
-@performance_1284_th
-Test Case
-
-@performance_1285_th
-Unit
-
-@performance_1286_th
-H2
-
-@performance_1287_th
-HSQLDB
-
-@performance_1288_th
-MySQL
-
-@performance_1289_td
-Melbourne write
-
-@performance_1290_td
-ms
-
-@performance_1291_td
-369
-
-@performance_1292_td
-249
-
-@performance_1293_td
-2022
-
-@performance_1294_td
-Melbourne read
-
-@performance_1295_td
-ms
-
-@performance_1296_td
-47
-
-@performance_1297_td
-49
-
-@performance_1298_td
-93
-
-@performance_1299_td
-Melbourne read_hot
-
-@performance_1300_td
-ms
-
-@performance_1301_td
-24
-
-@performance_1302_td
-43
-
-@performance_1303_td
-95
-
-@performance_1304_td
-Melbourne delete
-
-@performance_1305_td
-ms
-
-@performance_1306_td
-147
-
-@performance_1307_td
-133
-
-@performance_1308_td
-176
-
-@performance_1309_td
-Sepang write
-
-@performance_1310_td
-ms
-
-@performance_1311_td
-965
-
-@performance_1312_td
-1201
-
-@performance_1313_td
-3213
-
-@performance_1314_td
-Sepang read
-
-@performance_1315_td
-ms
-
-@performance_1316_td
-765
-
-@performance_1317_td
-948
-
-@performance_1318_td
-3455
-
-@performance_1319_td
-Sepang read_hot
-
-@performance_1320_td
-ms
-
-@performance_1321_td
-789
-
-@performance_1322_td
-859
-
-@performance_1323_td
-3563
-
-@performance_1324_td
-Sepang delete
-
-@performance_1325_td
-ms
-
-@performance_1326_td
-1384
-
-@performance_1327_td
-1596
-
-@performance_1328_td
-6214
-
-@performance_1329_td
-Bahrain write
-
-@performance_1330_td
-ms
-
-@performance_1331_td
-1186
-
-@performance_1332_td
-1387
-
-@performance_1333_td
-6904
-
-@performance_1334_td
-Bahrain query_indexed_string
-
-@performance_1335_td
-ms
-
-@performance_1336_td
-336
-
-@performance_1337_td
-170
-
-@performance_1338_td
-693
-
-@performance_1339_td
-Bahrain query_string
-
-@performance_1340_td
-ms
-
-@performance_1341_td
-18064
-
-@performance_1342_td
-39703
-
-@performance_1343_td
-41243
-
-@performance_1344_td
-Bahrain query_indexed_int
-
-@performance_1345_td
-ms
-
-@performance_1346_td
-104
-
-@performance_1347_td
-134
-
-@performance_1348_td
-678
-
-@performance_1349_td
-Bahrain update
-
-@performance_1350_td
-ms
-
-@performance_1351_td
-191
-
-@performance_1352_td
-87
-
-@performance_1353_td
-159
-
-@performance_1354_td
-Bahrain delete
-
-@performance_1355_td
-ms
-
-@performance_1356_td
-1215
-
-@performance_1357_td
-729
-
-@performance_1358_td
-6812
-
-@performance_1359_td
-Imola retrieve
-
-@performance_1360_td
-ms
-
-@performance_1361_td
-198
-
-@performance_1362_td
-194
-
-@performance_1363_td
-4036
-
-@performance_1364_td
-Barcelona write
-
-@performance_1365_td
-ms
-
-@performance_1366_td
-413
-
-@performance_1367_td
-832
-
-@performance_1368_td
-3191
-
-@performance_1369_td
-Barcelona read
-
-@performance_1370_td
-ms
-
-@performance_1371_td
-119
-
-@performance_1372_td
-160
-
-@performance_1373_td
-1177
-
-@performance_1374_td
-Barcelona query
-
-@performance_1375_td
-ms
-
-@performance_1376_td
-20
-
-@performance_1377_td
-5169
-
-@performance_1378_td
-101
-
-@performance_1379_td
-Barcelona delete
-
-@performance_1380_td
-ms
-
-@performance_1381_td
-388
-
-@performance_1382_td
-319
-
-@performance_1383_td
-3287
-
-@performance_1384_td
-Total
-
-@performance_1385_td
-ms
-
-@performance_1386_td
-26724
-
-@performance_1387_td
-53962
-
-@performance_1388_td
-87112
-
-@performance_1389_p
- There are a few problems with the PolePosition test:
-
-@performance_1390_li
- HSQLDB uses in-memory tables by default while H2 uses persistent tables. The HSQLDB version included in PolePosition does not support changing this, so you need to replace poleposition-0.20/lib/hsqldb.jar
with a newer version (for example hsqldb-1.8.0.7.jar
), and then use the setting hsqldb.connecturl=jdbc:hsqldb:file:data/hsqldb/dbbench2;hsqldb.default_table_type=cached;sql.enforce_size=true
in the file Jdbc.properties
.
-
-@performance_1391_li
-HSQLDB keeps the database open between tests, while H2 closes the database (losing all the cache). To change that, use the database URL jdbc:h2:file:data/h2/dbbench;DB_CLOSE_DELAY=-1
-
-@performance_1392_li
-The amount of cache memory is quite important, specially for the PolePosition test. Unfortunately, the PolePosition test does not take this into account.
-
-@performance_1393_h2
-Database Performance Tuning
-
-@performance_1394_h3
-Keep Connections Open or Use a Connection Pool
-
-@performance_1395_p
- If your application opens and closes connections a lot (for example, for each request), you should consider using a connection pool. Opening a connection using DriverManager.getConnection
is specially slow if the database is closed. By default the database is closed if the last connection is closed.
-
-@performance_1396_p
- If you open and close connections a lot but don't want to use a connection pool, consider keeping a 'sentinel' connection open for as long as the application runs, or use delayed database closing. See also Closing a database.
-
-@performance_1397_h3
-Use a Modern JVM
-
-@performance_1398_p
- Newer JVMs are faster. Upgrading to the latest version of your JVM can provide a "free" boost to performance. Switching from the default Client JVM to the Server JVM using the -server
command-line option improves performance at the cost of a slight increase in start-up time.
-
-@performance_1399_h3
-Virus Scanners
-
-@performance_1400_p
- Some virus scanners scan files every time they are accessed. It is very important for performance that database files are not scanned for viruses. The database engine never interprets the data stored in the files as programs, that means even if somebody would store a virus in a database file, this would be harmless (when the virus does not run, it cannot spread). Some virus scanners allow to exclude files by suffix. Ensure files ending with .db
are not scanned.
-
-@performance_1401_h3
-Using the Trace Options
-
-@performance_1402_p
- If the performance hot spots are in the database engine, in many cases the performance can be optimized by creating additional indexes, or changing the schema. Sometimes the application does not directly generate the SQL statements, for example if an O/R mapping tool is used. To view the SQL statements and JDBC API calls, you can use the trace options. For more information, see Using the Trace Options.
-
-@performance_1403_h3
-Index Usage
-
-@performance_1404_p
- This database uses indexes to improve the performance of SELECT, UPDATE, DELETE
. If a column is used in the WHERE
clause of a query, and if an index exists on this column, then the index can be used. Multi-column indexes are used if all or the first columns of the index are used. Both equality lookup and range scans are supported. Indexes are used to order result sets, but only if the condition uses the same index or no index at all. The results are sorted in memory if required. Indexes are created automatically for primary key and unique constraints. Indexes are also created for foreign key constraints, if required. For other columns, indexes need to be created manually using the CREATE INDEX
statement.
-
-@performance_1405_h3
-How Data is Stored Internally
-
-@performance_1406_p
- For persistent databases, if a table is created with a single column primary key of type BIGINT, INT, SMALLINT, TINYINT
, then the data of the table is organized in this way. This is sometimes also called a "clustered index" or "index organized table".
-
-@performance_1407_p
- H2 internally stores table data and indexes in the form of b-trees. Each b-tree stores entries as a list of unique keys (one or more columns) and data (zero or more columns). The table data is always organized in the form of a "data b-tree" with a single column key of type long
. If a single column primary key of type BIGINT, INT, SMALLINT, TINYINT
is specified when creating the table (or just after creating the table, but before inserting any rows), then this column is used as the key of the data b-tree. If no primary key has been specified, if the primary key column is of another data type, or if the primary key contains more than one column, then a hidden auto-increment column of type BIGINT
is added to the table, which is used as the key for the data b-tree. All other columns of the table are stored within the data area of this data b-tree (except for large BLOB, CLOB
columns, which are stored externally).
-
-@performance_1408_p
- For each additional index, one new "index b-tree" is created. The key of this b-tree consists of the indexed columns, plus the key of the data b-tree. If a primary key is created after the table has been created, or if the primary key contains multiple column, or if the primary key is not of the data types listed above, then the primary key is stored in a new index b-tree.
-
-@performance_1409_h3
-Optimizer
-
-@performance_1410_p
- This database uses a cost based optimizer. For simple and queries and queries with medium complexity (less than 7 tables in the join), the expected cost (running time) of all possible plans is calculated, and the plan with the lowest cost is used. For more complex queries, the algorithm first tries all possible combinations for the first few tables, and the remaining tables added using a greedy algorithm (this works well for most joins). Afterwards a genetic algorithm is used to test at most 2000 distinct plans. Only left-deep plans are evaluated.
-
-@performance_1411_h3
-Expression Optimization
-
-@performance_1412_p
- After the statement is parsed, all expressions are simplified automatically if possible. Operations are evaluated only once if all parameters are constant. Functions are also optimized, but only if the function is constant (always returns the same result for the same parameter values). If the WHERE
clause is always false, then the table is not accessed at all.
-
-@performance_1413_h3
-COUNT(*) Optimization
-
-@performance_1414_p
- If the query only counts all rows of a table, then the data is not accessed. However, this is only possible if no WHERE
clause is used, that means it only works for queries of the form SELECT COUNT(*) FROM table
.
-
-@performance_1415_h3
-Updating Optimizer Statistics / Column Selectivity
-
-@performance_1416_p
- When executing a query, at most one index per join can be used. If the same table is joined multiple times, for each join only one index is used (the same index could be used for both joins, or each join could use a different index). Example: for the query SELECT * FROM TEST T1, TEST T2 WHERE T1.NAME='A' AND T2.ID=T1.ID
, two index can be used, in this case the index on NAME for T1 and the index on ID for T2.
-
-@performance_1417_p
- If a table has multiple indexes, sometimes more than one index could be used. Example: if there is a table TEST(ID, NAME, FIRSTNAME)
and an index on each column, then two indexes could be used for the query SELECT * FROM TEST WHERE NAME='A' AND FIRSTNAME='B'
, the index on NAME or the index on FIRSTNAME. It is not possible to use both indexes at the same time. Which index is used depends on the selectivity of the column. The selectivity describes the 'uniqueness' of values in a column. A selectivity of 100 means each value appears only once, and a selectivity of 1 means the same value appears in many or most rows. For the query above, the index on NAME should be used if the table contains more distinct names than first names.
-
-@performance_1418_p
- The SQL statement ANALYZE
can be used to automatically estimate the selectivity of the columns in the tables. This command should be run from time to time to improve the query plans generated by the optimizer.
-
-@performance_1419_h3
-In-Memory (Hash) Indexes
-
-@performance_1420_p
- Using in-memory indexes, specially in-memory hash indexes, can speed up queries and data manipulation.
-
-@performance_1421_p
-In-memory indexes are automatically used for in-memory databases, but can also be created for persistent databases using CREATE MEMORY TABLE
. In many cases, the rows itself will also be kept in-memory. Please note this may cause memory problems for large tables.
-
-@performance_1422_p
- In-memory hash indexes are backed by a hash table and are usually faster than regular indexes. However, hash indexes only supports direct lookup (WHERE ID = ?
) but not range scan (WHERE ID < ?
). To use hash indexes, use HASH as in: CREATE UNIQUE HASH INDEX
and CREATE TABLE ...(ID INT PRIMARY KEY HASH,...)
.
-
-@performance_1423_h3
-Use Prepared Statements
-
-@performance_1424_p
- If possible, use prepared statements with parameters.
-
-@performance_1425_h3
-Prepared Statements and IN(...)
-
-@performance_1426_p
- Avoid generating SQL statements with a variable size IN(...) list. Instead, use a prepared statement with arrays as in the following example:
-
-@performance_1427_h3
-Optimization Examples
-
-@performance_1428_p
- See src/test/org/h2/samples/optimizations.sql
for a few examples of queries that benefit from special optimizations built into the database.
-
-@performance_1429_h3
-Cache Size and Type
-
-@performance_1430_p
- By default the cache size of H2 is quite small. Consider using a larger cache size, or enable the second level soft reference cache. See also Cache Settings.
-
-@performance_1431_h3
-Data Types
-
-@performance_1432_p
- Each data type has different storage and performance characteristics:
-
-@performance_1433_li
-The DECIMAL/NUMERIC
type is slower and requires more storage than the REAL
and DOUBLE
types.
-
-@performance_1434_li
-Text types are slower to read, write, and compare than numeric types and generally require more storage.
-
-@performance_1435_li
-See Large Objects for information on BINARY
vs. BLOB
and VARCHAR
vs. CLOB
performance.
-
-@performance_1436_li
-Parsing and formatting takes longer for the TIME
, DATE
, and TIMESTAMP
types than the numeric types.
-
-@performance_1437_code
-SMALLINT/TINYINT/BOOLEAN
-
-@performance_1438_li
- are not significantly smaller or faster to work with than INTEGER
in most modes.
-
-@performance_1439_h3
-Sorted Insert Optimization
-
-@performance_1440_p
- To reduce disk space usage and speed up table creation, an optimization for sorted inserts is available. When used, b-tree pages are split at the insertion point. To use this optimization, add SORTED
before the SELECT
statement:
-
-@performance_1441_h2
-Using the Built-In Profiler
-
-@performance_1442_p
- A very simple Java profiler is built-in. To use it, use the following template:
-
-@performance_1443_h2
-Application Profiling
-
-@performance_1444_h3
-Analyze First
-
-@performance_1445_p
- Before trying to optimize performance, it is important to understand where the problem is (what part of the application is slow). Blind optimization or optimization based on guesses should be avoided, because usually it is not an efficient strategy. There are various ways to analyze an application. Sometimes two implementations can be compared using System.currentTimeMillis()
. But this does not work for complex applications with many modules, and for memory problems.
-
-@performance_1446_p
- A simple way to profile an application is to use the built-in profiling tool of java. Example:
-
-@performance_1447_p
- Unfortunately, it is only possible to profile the application from start to end. Another solution is to create a number of full thread dumps. To do that, first run jps -l
to get the process id, and then run jstack <pid>
or kill -QUIT <pid>
(Linux) or press Ctrl+C (Windows).
-
-@performance_1448_p
- A simple profiling tool is included in H2. To use it, the application needs to be changed slightly. Example:
-
-@performance_1449_p
- The profiler is built into the H2 Console tool, to analyze databases that open slowly. To use it, run the H2 Console, and then click on 'Test Connection'. Afterwards, click on "Test successful" and you get the most common stack traces, which helps to find out why it took so long to connect. You will only get the stack traces if opening the database took more than a few seconds.
-
-@performance_1450_h2
-Database Profiling
-
-@performance_1451_p
- The ConvertTraceFile
tool generates SQL statement statistics at the end of the SQL script file. The format used is similar to the profiling data generated when using java -Xrunhprof
. For this to work, the trace level needs to be 2 or higher (TRACE_LEVEL_FILE=2
). The easiest way to set the trace level is to append the setting to the database URL, for example: jdbc:h2:~/test;TRACE_LEVEL_FILE=2
or jdbc:h2:tcp://localhost/~/test;TRACE_LEVEL_FILE=2
. As an example, execute the the following script using the H2 Console:
-
-@performance_1452_p
- After running the test case, convert the .trace.db
file using the ConvertTraceFile
tool. The trace file is located in the same directory as the database file.
-
-@performance_1453_p
- The generated file test.sql
will contain the SQL statements as well as the following profiling data (results vary):
-
-@performance_1454_h2
-Statement Execution Plans
-
-@performance_1455_p
- The SQL statement EXPLAIN
displays the indexes and optimizations the database uses for a statement. The following statements support EXPLAIN
: SELECT, UPDATE, DELETE, MERGE, INSERT
. The following query shows that the database uses the primary key index to search for rows:
-
-@performance_1456_p
- For joins, the tables in the execution plan are sorted in the order they are processed. The following query shows the database first processes the table INVOICE
(using the primary key). For each row, it will additionally check that the value of the column AMOUNT
is larger than zero, and for those rows the database will search in the table CUSTOMER
(using the primary key). The query plan contains some redundancy so it is a valid statement.
-
-@performance_1457_h3
-Displaying the Scan Count
-
-@performance_1458_code
-EXPLAIN ANALYZE
-
-@performance_1459_p
- additionally shows the scanned rows per table and pages read from disk per table or index. This will actually execute the query, unlike EXPLAIN
which only prepares it. The following query scanned 1000 rows, and to do that had to read 85 pages from the data area of the table. Running the query twice will not list the pages read from disk, because they are now in the cache. The tableScan
means this query doesn't use an index.
-
-@performance_1460_p
- The cache will prevent the pages are read twice. H2 reads all columns of the row unless only the columns in the index are read. Except for large CLOB and BLOB, which are not store in the table.
-
-@performance_1461_h3
-Special Optimizations
-
-@performance_1462_p
- For certain queries, the database doesn't need to read all rows, or doesn't need to sort the result even if ORDER BY
is used.
-
-@performance_1463_p
- For queries of the form SELECT COUNT(*), MIN(ID), MAX(ID) FROM TEST
, the query plan includes the line /* direct lookup */
if the data can be read from an index.
-
-@performance_1464_p
- For queries of the form SELECT DISTINCT CUSTOMER_ID FROM INVOICE
, the query plan includes the line /* distinct */
if there is an non-unique or multi-column index on this column, and if this column has a low selectivity.
-
-@performance_1465_p
- For queries of the form SELECT * FROM TEST ORDER BY ID
, the query plan includes the line /* index sorted */
to indicate there is no separate sorting required.
-
-@performance_1466_p
- For queries of the form SELECT * FROM TEST GROUP BY ID ORDER BY ID
, the query plan includes the line /* group sorted */
to indicate there is no separate sorting required.
-
-@performance_1467_h2
-How Data is Stored and How Indexes Work
-
-@performance_1468_p
- Internally, each row in a table is identified by a unique number, the row id. The rows of a table are stored with the row id as the key. The row id is a number of type long. If a table has a single column primary key of type INT
or BIGINT
, then the value of this column is the row id, otherwise the database generates the row id automatically. There is a (non-standard) way to access the row id: using the _ROWID_
pseudo-column:
-
-@performance_1469_p
- The data is stored in the database as follows:
-
-@performance_1470_th
-_ROWID_
-
-@performance_1471_th
-FIRST_NAME
-
-@performance_1472_th
-NAME
-
-@performance_1473_th
-CITY
-
-@performance_1474_th
-PHONE
-
-@performance_1475_td
-1
-
-@performance_1476_td
-John
-
-@performance_1477_td
-Miller
-
-@performance_1478_td
-Berne
-
-@performance_1479_td
-123 456 789
-
-@performance_1480_td
-2
-
-@performance_1481_td
-Philip
-
-@performance_1482_td
-Jones
-
-@performance_1483_td
-Berne
-
-@performance_1484_td
-123 012 345
-
-@performance_1485_p
- Access by row id is fast because the data is sorted by this key. Please note the row id is not available until after the row was added (that means, it can not be used in computed columns or constraints). If the query condition does not contain the row id (and if no other index can be used), then all rows of the table are scanned. A table scan iterates over all rows in the table, in the order of the row id. To find out what strategy the database uses to retrieve the data, use EXPLAIN SELECT
:
-
-@performance_1486_h3
-Indexes
-
-@performance_1487_p
- An index internally is basically just a table that contains the indexed column(s), plus the row id:
-
-@performance_1488_p
- In the index, the data is sorted by the indexed columns. So this index contains the following data:
-
-@performance_1489_th
-CITY
-
-@performance_1490_th
-NAME
-
-@performance_1491_th
-FIRST_NAME
-
-@performance_1492_th
-_ROWID_
-
-@performance_1493_td
-Berne
-
-@performance_1494_td
-Jones
-
-@performance_1495_td
-Philip
-
-@performance_1496_td
-2
-
-@performance_1497_td
-Berne
-
-@performance_1498_td
-Miller
-
-@performance_1499_td
-John
-
-@performance_1500_td
-1
-
-@performance_1501_p
- When the database uses an index to query the data, it searches the index for the given data, and (if required) reads the remaining columns in the main data table (retrieved using the row id). An index on city, name, and first name (multi-column index) allows to quickly search for rows when the city, name, and first name are known. If only the city and name, or only the city is known, then this index is also used (so creating an additional index on just the city is not needed). This index is also used when reading all rows, sorted by the indexed columns. However, if only the first name is known, then this index is not used:
-
-@performance_1502_p
- If your application often queries the table for a phone number, then it makes sense to create an additional index on it:
-
-@performance_1503_p
- This index contains the phone number, and the row id:
-
-@performance_1504_th
-PHONE
-
-@performance_1505_th
-_ROWID_
-
-@performance_1506_td
-123 012 345
-
-@performance_1507_td
-2
-
-@performance_1508_td
-123 456 789
-
-@performance_1509_td
-1
-
-@performance_1510_h3
-Using Multiple Indexes
-
-@performance_1511_p
- Within a query, only one index per logical table is used. Using the condition PHONE = '123 567 789' OR CITY = 'Berne'
would use a table scan instead of first using the index on the phone number and then the index on the city. It makes sense to write two queries and combine then using UNION
. In this case, each individual query uses a different index:
-
-@performance_1512_h2
-Fast Database Import
-
-@performance_1513_p
- To speed up large imports, consider using the following options temporarily:
-
-@performance_1514_code
-SET LOG 0
-
-@performance_1515_li
- (disabling the transaction log)
-
-@performance_1516_code
-SET CACHE_SIZE
-
-@performance_1517_li
- (a large cache is faster)
-
-@performance_1518_code
-SET LOCK_MODE 0
-
-@performance_1519_li
- (disable locking)
-
-@performance_1520_code
-SET UNDO_LOG 0
-
-@performance_1521_li
- (disable the session undo log)
-
-@performance_1522_p
- These options can be set in the database URL: jdbc:h2:~/test;LOG=0;CACHE_SIZE=65536;LOCK_MODE=0;UNDO_LOG=0
. Most of those options are not recommended for regular use, that means you need to reset them after use.
-
-@performance_1523_p
- If you have to import a lot of rows, use a PreparedStatement or use CSV import. Please note that CREATE TABLE(...) ... AS SELECT ...
is faster than CREATE TABLE(...); INSERT INTO ... SELECT ...
.
-
-@quickstart_1000_h1
-Quickstart
-
-@quickstart_1001_a
- Embedding H2 in an Application
-
-@quickstart_1002_a
- The H2 Console Application
-
-@quickstart_1003_h2
-Embedding H2 in an Application
-
-@quickstart_1004_p
- This database can be used in embedded mode, or in server mode. To use it in embedded mode, you need to:
-
-@quickstart_1005_li
-Add the h2*.jar
to the classpath (H2 does not have any dependencies)
-
-@quickstart_1006_li
-Use the JDBC driver class: org.h2.Driver
-
-@quickstart_1007_li
-The database URL jdbc:h2:~/test
opens the database test
in your user home directory
-
-@quickstart_1008_li
-A new database is automatically created
-
-@quickstart_1009_h2
-The H2 Console Application
-
-@quickstart_1010_p
- The Console lets you access a SQL database using a browser interface.
-
-@quickstart_1011_p
- If you don't have Windows XP, or if something does not work as expected, please see the detailed description in the Tutorial.
-
-@quickstart_1012_h3
-Step-by-Step
-
-@quickstart_1013_h4
-Installation
-
-@quickstart_1014_p
- Install the software using the Windows Installer (if you did not yet do that).
-
-@quickstart_1015_h4
-Start the Console
-
-@quickstart_1016_p
- Click [Start], [All Programs], [H2], and [H2 Console (Command Line)]:
-
-@quickstart_1017_p
- A new console window appears:
-
-@quickstart_1018_p
- Also, a new browser page should open with the URL http://localhost:8082. You may get a security warning from the firewall. If you don't want other computers in the network to access the database on your machine, you can let the firewall block these connections. Only local connections are required at this time.
-
-@quickstart_1019_h4
-Login
-
-@quickstart_1020_p
- Select [Generic H2] and click [Connect]:
-
-@quickstart_1021_p
- You are now logged in.
-
-@quickstart_1022_h4
-Sample
-
-@quickstart_1023_p
- Click on the [Sample SQL Script]:
-
-@quickstart_1024_p
- The SQL commands appear in the command area.
-
-@quickstart_1025_h4
-Execute
-
-@quickstart_1026_p
- Click [Run]
-
-@quickstart_1027_p
- On the left side, a new entry TEST is added below the database icon. The operations and results of the statements are shown below the script.
-
-@quickstart_1028_h4
-Disconnect
-
-@quickstart_1029_p
- Click on [Disconnect]:
-
-@quickstart_1030_p
- to close the connection.
-
-@quickstart_1031_h4
-End
-
-@quickstart_1032_p
- Close the console window. For more information, see the Tutorial.
-
-@roadmap_1000_h1
-Roadmap
-
-@roadmap_1001_p
- New (feature) requests will usually be added at the very end of the list. The priority is increased for important and popular requests. Of course, patches are always welcome, but are not always applied as is. See also Providing Patches.
-
-@roadmap_1002_h2
-Version 1.5.x: Planned Changes
-
-@roadmap_1003_li
-Replace file password hash with file encryption key; validate encryption key when connecting.
-
-@roadmap_1004_li
-Remove "set binary collation" feature.
-
-@roadmap_1005_li
-Remove the encryption algorithm XTEA.
-
-@roadmap_1006_li
-Disallow referencing other tables in a table (via constraints for example).
-
-@roadmap_1007_li
-Remove PageStore features like compress_lob.
-
-@roadmap_1008_h2
-Version 1.4.x: Planned Changes
-
-@roadmap_1009_li
-Change license to MPL 2.0.
-
-@roadmap_1010_li
-Automatic migration from 1.3 databases to 1.4.
-
-@roadmap_1011_li
-Option to disable the file name suffix somehow (issue 447).
-
-@roadmap_1012_h2
-Priority 1
-
-@roadmap_1013_li
-Bugfixes.
-
-@roadmap_1014_li
-More tests with MULTI_THREADED=1 (and MULTI_THREADED with MVCC): Online backup (using the 'backup' statement).
-
-@roadmap_1015_li
-Server side cursors.
-
-@roadmap_1016_h2
-Priority 2
-
-@roadmap_1017_li
-Support hints for the optimizer (which index to use, enforce the join order).
-
-@roadmap_1018_li
-Full outer joins.
-
-@roadmap_1019_li
-Access rights: remember the owner of an object. Create, alter and drop privileges. COMMENT: allow owner of object to change it. Issue 208: Access rights for schemas.
-
-@roadmap_1020_li
-Test multi-threaded in-memory db access.
-
-@roadmap_1021_li
-MySQL, MS SQL Server compatibility: support case sensitive (mixed case) identifiers without quotes.
-
-@roadmap_1022_li
-Support GRANT SELECT, UPDATE ON [schemaName.] *.
-
-@roadmap_1023_li
-Migrate database tool (also from other database engines). For Oracle, maybe use DBMS_METADATA.GET_DDL / GET_DEPENDENT_DDL.
-
-@roadmap_1024_li
-Clustering: support mixed clustering mode (one embedded, others in server mode).
-
-@roadmap_1025_li
-Clustering: reads should be randomly distributed (optional) or to a designated database on RAM (parameter: READ_FROM=3).
-
-@roadmap_1026_li
-Window functions: RANK() and DENSE_RANK(), partition using OVER(). select *, count(*) over() as fullCount from ... limit 4;
-
-@roadmap_1027_li
-PostgreSQL catalog: use BEFORE SELECT triggers instead of views over metadata tables.
-
-@roadmap_1028_li
-Compatibility: automatically load functions from a script depending on the mode - see FunctionsMySQL.java, issue 211.
-
-@roadmap_1029_li
-Test very large databases and LOBs (up to 256 GB).
-
-@roadmap_1030_li
-Store all temp files in the temp directory.
-
-@roadmap_1031_li
-Don't use temp files, specially not deleteOnExit (bug 4513817: File.deleteOnExit consumes memory). Also to allow opening client / server (remote) connections when using LOBs.
-
-@roadmap_1032_li
-Make DDL (Data Definition) operations transactional.
-
-@roadmap_1033_li
-Deferred integrity checking (DEFERRABLE INITIALLY DEFERRED).
-
-@roadmap_1034_li
-Groovy Stored Procedures: http://groovy.codehaus.org/GSQL
-
-@roadmap_1035_li
-Add a migration guide (list differences between databases).
-
-@roadmap_1036_li
-Optimization: automatic index creation suggestion using the trace file?
-
-@roadmap_1037_li
-Fulltext search Lucene: analyzer configuration, mergeFactor.
-
-@roadmap_1038_li
-Compression performance: don't allocate buffers, compress / expand in to out buffer.
-
-@roadmap_1039_li
-Rebuild index functionality to shrink index size and improve performance.
-
-@roadmap_1040_li
-Console: add accesskey to most important commands (A, AREA, BUTTON, INPUT, LABEL, LEGEND, TEXTAREA).
-
-@roadmap_1041_li
-Test performance again with SQL Server, Oracle, DB2.
-
-@roadmap_1042_li
-Test with Spatial DB in a box / JTS: http://www.opengeospatial.org/standards/sfs - OpenGIS Implementation Specification.
-
-@roadmap_1043_li
-Write more tests and documentation for MVCC (Multi Version Concurrency Control).
-
-@roadmap_1044_li
-Find a tool to view large text file (larger than 100 MB), with find, page up and down (like less), truncate before / after.
-
-@roadmap_1045_li
-Implement, test, document XAConnection and so on.
-
-@roadmap_1046_li
-Pluggable data type (for streaming, hashing, compression, validation, conversion, encryption).
-
-@roadmap_1047_li
-CHECK: find out what makes CHECK=TRUE slow, move to CHECK2.
-
-@roadmap_1048_li
-Drop with invalidate views (so that source code is not lost). Check what other databases do exactly.
-
-@roadmap_1049_li
-Index usage for (ID, NAME)=(1, 'Hi'); document.
-
-@roadmap_1050_li
-Set a connection read only (Connection.setReadOnly) or using a connection parameter.
-
-@roadmap_1051_li
-Access rights: finer grained access control (grant access for specific functions).
-
-@roadmap_1052_li
-ROW_NUMBER() OVER([PARTITION BY columnName][ORDER BY columnName]).
-
-@roadmap_1053_li
-Version check: docs / web console (using Javascript), and maybe in the library (using TCP/IP).
-
-@roadmap_1054_li
-Web server classloader: override findResource / getResourceFrom.
-
-@roadmap_1055_li
-Cost for embedded temporary view is calculated wrong, if result is constant.
-
-@roadmap_1056_li
-Count index range query (count(*) where id between 10 and 20).
-
-@roadmap_1057_li
-Performance: update in-place.
-
-@roadmap_1058_li
-Clustering: when a database is back alive, automatically synchronize with the master (requires readable transaction log).
-
-@roadmap_1059_li
-Database file name suffix: a way to use no or a different suffix (for example using a slash).
-
-@roadmap_1060_li
-Eclipse plugin.
-
-@roadmap_1061_li
-Asynchronous queries to support publish/subscribe: SELECT ... FOR READ WAIT [maxMillisToWait]. See also MS SQL Server "Query Notification".
-
-@roadmap_1062_li
-Fulltext search (native): reader / tokenizer / filter.
-
-@roadmap_1063_li
-Linked schema using CSV files: one schema for a directory of files; support indexes for CSV files.
-
-@roadmap_1064_li
-iReport to support H2.
-
-@roadmap_1065_li
-Include SMTP (mail) client (alert on cluster failure, low disk space,...).
-
-@roadmap_1066_li
-Option for SCRIPT to only process one or a set of schemas or tables, and append to a file.
-
-@roadmap_1067_li
-JSON parser and functions.
-
-@roadmap_1068_li
-Copy database: tool with config GUI and batch mode, extensible (example: compare).
-
-@roadmap_1069_li
-Document, implement tool for long running transactions using user-defined compensation statements.
-
-@roadmap_1070_li
-Support SET TABLE DUAL READONLY.
-
-@roadmap_1071_li
-GCJ: what is the state now?
-
-@roadmap_1072_li
-Events for: database Startup, Connections, Login attempts, Disconnections, Prepare (after parsing), Web Server. See http://docs.openlinksw.com/virtuoso/fn_dbev_startup.html
-
-@roadmap_1073_li
-Optimization: simpler log compression.
-
-@roadmap_1074_li
-Support standard INFORMATION_SCHEMA tables, as defined in http://www.contrib.andrew.cmu.edu/~shadow/sql/sql1992.txt - specially KEY_COLUMN_USAGE: http://dev.mysql.com/doc/refman/5.0/en/information-schema.html, http://www.xcdsql.org/Misc/INFORMATION_SCHEMA%20With%20Rolenames.gif
-
-@roadmap_1075_li
-Compatibility: in MySQL, HSQLDB, /0.0 is NULL; in PostgreSQL, Derby: division by zero. HSQLDB: 0.0e1 / 0.0e1 is NaN.
-
-@roadmap_1076_li
-Functional tables should accept parameters from other tables (see FunctionMultiReturn) SELECT * FROM TEST T, P2C(T.A, T.R).
-
-@roadmap_1077_li
-Custom class loader to reload functions on demand.
-
-@roadmap_1078_li
-Test http://mysql-je.sourceforge.net/
-
-@roadmap_1079_li
-H2 Console: the webclient could support more features like phpMyAdmin.
-
-@roadmap_1080_li
-Support Oracle functions: TO_DATE, TO_NUMBER.
-
-@roadmap_1081_li
-Work on the Java to C converter.
-
-@roadmap_1082_li
-The HELP information schema can be directly exposed in the Console.
-
-@roadmap_1083_li
-Maybe use the 0x1234 notation for binary fields, see MS SQL Server.
-
-@roadmap_1084_li
-Support Oracle CONNECT BY in some way: http://www.adp-gmbh.ch/ora/sql/connect_by.html http://philip.greenspun.com/sql/trees.html
-
-@roadmap_1085_li
-SQL Server 2005, Oracle: support COUNT(*) OVER(). See http://www.orafusion.com/art_anlytc.htm
-
-@roadmap_1086_li
-SQL 2003: http://www.wiscorp.com/sql_2003_standard.zip
-
-@roadmap_1087_li
-Version column (number/sequence and timestamp based).
-
-@roadmap_1088_li
-Optimize getGeneratedKey: send last identity after each execute (server).
-
-@roadmap_1089_li
-Test and document UPDATE TEST SET (ID, NAME) = (SELECT ID*10, NAME || '!' FROM TEST T WHERE T.ID=TEST.ID).
-
-@roadmap_1090_li
-Max memory rows / max undo log size: use block count / row size not row count.
-
-@roadmap_1091_li
-Implement point-in-time recovery.
-
-@roadmap_1092_li
-Support PL/SQL (programming language / control flow statements).
-
-@roadmap_1093_li
-LIKE: improved version for larger texts (currently using naive search).
-
-@roadmap_1094_li
-Throw an exception when the application calls getInt on a Long (optional).
-
-@roadmap_1095_li
-Default date format for input and output (local date constants).
-
-@roadmap_1096_li
-Document ROWNUM usage for reports: SELECT ROWNUM, * FROM (subquery).
-
-@roadmap_1097_li
-File system that writes to two file systems (replication, replicating file system).
-
-@roadmap_1098_li
-Standalone tool to get relevant system properties and add it to the trace output.
-
-@roadmap_1099_li
-Support 'call proc(1=value)' (PostgreSQL, Oracle).
-
-@roadmap_1100_li
-Console: improve editing data (Tab, Shift-Tab, Enter, Up, Down, Shift+Del?).
-
-@roadmap_1101_li
-Console: autocomplete Ctrl+Space inserts template.
-
-@roadmap_1102_li
-Option to encrypt .trace.db file.
-
-@roadmap_1103_li
-Auto-Update feature for database, .jar file.
-
-@roadmap_1104_li
-ResultSet SimpleResultSet.readFromURL(String url): id varchar, state varchar, released timestamp.
-
-@roadmap_1105_li
-Partial indexing (see PostgreSQL).
-
-@roadmap_1106_li
-Add GUI to build a custom version (embedded, fulltext,...) using build flags.
-
-@roadmap_1107_li
-http://rubyforge.org/projects/hypersonic/
-
-@roadmap_1108_li
-Add a sample application that runs the H2 unit test and writes the result to a file (so it can be included in the user app).
-
-@roadmap_1109_li
-Table order: ALTER TABLE TEST ORDER BY NAME DESC (MySQL compatibility).
-
-@roadmap_1110_li
-Backup tool should work with other databases as well.
-
-@roadmap_1111_li
-Console: -ifExists doesn't work for the console. Add a flag to disable other dbs.
-
-@roadmap_1112_li
-Check if 'FSUTIL behavior set disablelastaccess 1' improves the performance (fsutil behavior query disablelastaccess).
-
-@roadmap_1113_li
-Java static code analysis: http://pmd.sourceforge.net/
-
-@roadmap_1114_li
-Java static code analysis: http://www.eclipse.org/tptp/
-
-@roadmap_1115_li
-Compatibility for CREATE SCHEMA AUTHORIZATION.
-
-@roadmap_1116_li
-Implement Clob / Blob truncate and the remaining functionality.
-
-@roadmap_1117_li
-Add multiple columns at the same time with ALTER TABLE .. ADD .. ADD ...
-
-@roadmap_1118_li
-File locking: writing a system property to detect concurrent access from the same VM (different classloaders).
-
-@roadmap_1119_li
-Pure SQL triggers (example: update parent table if the child table is changed).
-
-@roadmap_1120_li
-Add H2 to Gem (Ruby install system).
-
-@roadmap_1121_li
-Support linked JCR tables.
-
-@roadmap_1122_li
-Native fulltext search: min word length; store word positions.
-
-@roadmap_1123_li
-Add an option to the SCRIPT command to generate only portable / standard SQL.
-
-@roadmap_1124_li
-Updatable views: create 'instead of' triggers automatically if possible (simple cases first).
-
-@roadmap_1125_li
-Improve create index performance.
-
-@roadmap_1126_li
-Compact databases without having to close the database (vacuum).
-
-@roadmap_1127_li
-Implement more JDBC 4.0 features.
-
-@roadmap_1128_li
-Support TRANSFORM / PIVOT as in MS Access.
-
-@roadmap_1129_li
-SELECT * FROM (VALUES (...), (...), ....) AS alias(f1, ...).
-
-@roadmap_1130_li
-Support updatable views with join on primary keys (to extend a table).
-
-@roadmap_1131_li
-Public interface for functions (not public static).
-
-@roadmap_1132_li
-Support reading the transaction log.
-
-@roadmap_1133_li
-Feature matrix as in i-net software.
-
-@roadmap_1134_li
-Updatable result set on table without primary key or unique index.
-
-@roadmap_1135_li
-Compatibility with Derby and PostgreSQL: VALUES(1), (2); SELECT * FROM (VALUES (1), (2)) AS myTable(c1). Issue 221.
-
-@roadmap_1136_li
-Allow execution time prepare for SELECT * FROM CSVREAD(?, 'columnNameString')
-
-@roadmap_1137_li
-Support data type INTERVAL
-
-@roadmap_1138_li
-Support nested transactions (possibly using savepoints internally).
-
-@roadmap_1139_li
-Add a benchmark for bigger databases, and one for many users.
-
-@roadmap_1140_li
-Compression in the result set over TCP/IP.
-
-@roadmap_1141_li
-Support curtimestamp (like curtime, curdate).
-
-@roadmap_1142_li
-Support ANALYZE {TABLE|INDEX} tableName COMPUTE|ESTIMATE|DELETE STATISTICS ptnOption options.
-
-@roadmap_1143_li
-Release locks (shared or exclusive) on demand
-
-@roadmap_1144_li
-Support OUTER UNION
-
-@roadmap_1145_li
-Support parameterized views (similar to CSVREAD, but using just SQL for the definition)
-
-@roadmap_1146_li
-A way (JDBC driver) to map an URL (jdbc:h2map:c1) to a connection object
-
-@roadmap_1147_li
-Support dynamic linked schema (automatically adding/updating/removing tables)
-
-@roadmap_1148_li
-Clustering: adding a node should be very fast and without interrupting clients (very short lock)
-
-@roadmap_1149_li
-Compatibility: # is the start of a single line comment (MySQL) but date quote (Access). Mode specific
-
-@roadmap_1150_li
-Run benchmarks with Android, Java 7, java -server
-
-@roadmap_1151_li
-Optimizations: faster hash function for strings.
-
-@roadmap_1152_li
-DatabaseEventListener: callback for all operations (including expected time, RUNSCRIPT) and cancel functionality
-
-@roadmap_1153_li
-Benchmark: add a graph to show how databases scale (performance/database size)
-
-@roadmap_1154_li
-Implement a SQLData interface to map your data over to a custom object
-
-@roadmap_1155_li
-In the MySQL and PostgreSQL mode, use lower case identifiers by default (DatabaseMetaData.storesLowerCaseIdentifiers = true)
-
-@roadmap_1156_li
-Support multiple directories (on different hard drives) for the same database
-
-@roadmap_1157_li
-Server protocol: use challenge response authentication, but client sends hash(user+password) encrypted with response
-
-@roadmap_1158_li
-Support EXEC[UTE] (doesn't return a result set, compatible to MS SQL Server)
-
-@roadmap_1159_li
-Support native XML data type - see http://en.wikipedia.org/wiki/SQL/XML
-
-@roadmap_1160_li
-Support triggers with a string property or option: SpringTrigger, OSGITrigger
-
-@roadmap_1161_li
-MySQL compatibility: update test1 t1, test2 t2 set t1.id = t2.id where t1.id = t2.id;
-
-@roadmap_1162_li
-Ability to resize the cache array when resizing the cache
-
-@roadmap_1163_li
-Time based cache writing (one second after writing the log)
-
-@roadmap_1164_li
-Check state of H2 driver for DDLUtils: http://issues.apache.org/jira/browse/DDLUTILS-185
-
-@roadmap_1165_li
-Index usage for REGEXP LIKE.
-
-@roadmap_1166_li
-Compatibility: add a role DBA (like ADMIN).
-
-@roadmap_1167_li
-Better support multiple processors for in-memory databases.
-
-@roadmap_1168_li
-Support N'text'
-
-@roadmap_1169_li
-Support compatibility for jdbc:hsqldb:res:
-
-@roadmap_1170_li
-HSQLDB compatibility: automatically convert to the next 'higher' data type. Example: cast(2000000000 as int) + cast(2000000000 as int); (HSQLDB: long; PostgreSQL: integer out of range)
-
-@roadmap_1171_li
-Provide an Java SQL builder with standard and H2 syntax
-
-@roadmap_1172_li
-Trace: write OS, file system, JVM,... when opening the database
-
-@roadmap_1173_li
-Support indexes for views (probably requires materialized views)
-
-@roadmap_1174_li
-Document SET SEARCH_PATH, BEGIN, EXECUTE, parameters
-
-@roadmap_1175_li
-Server: use one listener (detect if the request comes from an PG or TCP client)
-
-@roadmap_1176_li
-Optimize SELECT MIN(ID), MAX(ID), COUNT(*) FROM TEST WHERE ID BETWEEN 100 AND 200
-
-@roadmap_1177_li
-Sequence: PostgreSQL compatibility (rename, create) http://www.postgresql.org/docs/8.2/static/sql-altersequence.html
-
-@roadmap_1178_li
-DISTINCT: support large result sets by sorting on all columns (additionally) and then removing duplicates.
-
-@roadmap_1179_li
-Support a special trigger on all tables to allow building a transaction log reader.
-
-@roadmap_1180_li
-File system with a background writer thread; test if this is faster
-
-@roadmap_1181_li
-Better document the source code (high level documentation).
-
-@roadmap_1182_li
-Support select * from dual a left join dual b on b.x=(select max(x) from dual)
-
-@roadmap_1183_li
-Optimization: don't lock when the database is read-only
-
-@roadmap_1184_li
-Issue 146: Support merge join.
-
-@roadmap_1185_li
-Integrate spatial functions from http://geosysin.iict.ch/irstv-trac/wiki/H2spatial/Download
-
-@roadmap_1186_li
-Cluster: hot deploy (adding a node at runtime).
-
-@roadmap_1187_li
-Support DatabaseMetaData.insertsAreDetected: updatable result sets should detect inserts.
-
-@roadmap_1188_li
-Oracle: support DECODE method (convert to CASE WHEN).
-
-@roadmap_1189_li
-Native search: support "phrase search", wildcard search (* and ?), case-insensitive search, boolean operators, and grouping
-
-@roadmap_1190_li
-Improve documentation of access rights.
-
-@roadmap_1191_li
-Support opening a database that is in the classpath, maybe using a new file system. Workaround: detect jar file using getClass().getProtectionDomain().getCodeSource().getLocation().
-
-@roadmap_1192_li
-Support ENUM data type (see MySQL, PostgreSQL, MS SQL Server, maybe others).
-
-@roadmap_1193_li
-Remember the user defined data type (domain) of a column.
-
-@roadmap_1194_li
-MVCC: support multi-threaded kernel with multi-version concurrency.
-
-@roadmap_1195_li
-Auto-server: add option to define the port range or list.
-
-@roadmap_1196_li
-Support Jackcess (MS Access databases)
-
-@roadmap_1197_li
-Built-in methods to write large objects (BLOB and CLOB): FILE_WRITE('test.txt', 'Hello World')
-
-@roadmap_1198_li
-Improve time to open large databases (see mail 'init time for distributed setup')
-
-@roadmap_1199_li
-Move Maven 2 repository from hsql.sf.net to h2database.sf.net
-
-@roadmap_1200_li
-Java 1.5 tool: JdbcUtils.closeSilently(s1, s2,...)
-
-@roadmap_1201_li
-Optimize A=? OR B=? to UNION if the cost is lower.
-
-@roadmap_1202_li
-Javadoc: document design patterns used
-
-@roadmap_1203_li
-Support custom collators, for example for natural sort (for text that contains numbers).
-
-@roadmap_1204_li
-Write an article about SQLInjection (h2/src/docsrc/html/images/SQLInjection.txt)
-
-@roadmap_1205_li
-Convert SQL-injection-2.txt to html document, include SQLInjection.java sample
-
-@roadmap_1206_li
-Support OUT parameters in user-defined procedures.
-
-@roadmap_1207_li
-Web site design: http://www.igniterealtime.org/projects/openfire/index.jsp
-
-@roadmap_1208_li
-HSQLDB compatibility: Openfire server uses: CREATE SCHEMA PUBLIC AUTHORIZATION DBA; CREATE USER SA PASSWORD ""; GRANT DBA TO SA; SET SCHEMA PUBLIC
-
-@roadmap_1209_li
-Translation: use ?? in help.csv
-
-@roadmap_1210_li
-Translated .pdf
-
-@roadmap_1211_li
-Recovery tool: bad blocks should be converted to INSERT INTO SYSTEM_ERRORS(...), and things should go into the .trace.db file
-
-@roadmap_1212_li
-Issue 357: support getGeneratedKeys to return multiple rows when used with batch updates. This is supported by MySQL, but not Derby. Both PostgreSQL and HSQLDB don't support getGeneratedKeys. Also support it when using INSERT ... SELECT.
-
-@roadmap_1213_li
-RECOVER=2 to backup the database, run recovery, open the database
-
-@roadmap_1214_li
-Recovery should work with encrypted databases
-
-@roadmap_1215_li
-Corruption: new error code, add help
-
-@roadmap_1216_li
-Space reuse: after init, scan all storages and free those that don't belong to a live database object
-
-@roadmap_1217_li
-Access rights: add missing features (users should be 'owner' of objects; missing rights for sequences; dropping objects)
-
-@roadmap_1218_li
-Support NOCACHE table option (Oracle).
-
-@roadmap_1219_li
-Support table partitioning.
-
-@roadmap_1220_li
-Add regular javadocs (using the default doclet, but another css) to the homepage.
-
-@roadmap_1221_li
-The database should be kept open for a longer time when using the server mode.
-
-@roadmap_1222_li
-Javadocs: for each tool, add a copy & paste sample in the class level.
-
-@roadmap_1223_li
-Javadocs: add @author tags.
-
-@roadmap_1224_li
-Fluent API for tools: Server.createTcpServer().setPort(9081).setPassword(password).start();
-
-@roadmap_1225_li
-MySQL compatibility: real SQL statement for DESCRIBE TEST
-
-@roadmap_1226_li
-Use a default delay of 1 second before closing a database.
-
-@roadmap_1227_li
-Write (log) to system table before adding to internal data structures.
-
-@roadmap_1228_li
-Support direct lookup for MIN and MAX when using WHERE (see todo.txt / Direct Lookup).
-
-@roadmap_1229_li
-Support other array types (String[], double[]) in PreparedStatement.setObject(int, Object) (with test case).
-
-@roadmap_1230_li
-MVCC should not be memory bound (uncommitted data is kept in memory in the delta index; maybe using a regular b-tree index solves the problem).
-
-@roadmap_1231_li
-Oracle compatibility: support NLS_DATE_FORMAT.
-
-@roadmap_1232_li
-Support for Thread.interrupt to cancel running statements.
-
-@roadmap_1233_li
-Cluster: add feature to make sure cluster nodes can not get out of sync (for example by stopping one process).
-
-@roadmap_1234_li
-H2 Console: support CLOB/BLOB download using a link.
-
-@roadmap_1235_li
-Support flashback queries as in Oracle.
-
-@roadmap_1236_li
-Import / Export of fixed with text files.
-
-@roadmap_1237_li
-HSQLDB compatibility: automatic data type for SUM if value is the value is too big (by default use the same type as the data).
-
-@roadmap_1238_li
-Improve the optimizer to select the right index for special cases: where id between 2 and 4 and booleanColumn
-
-@roadmap_1239_li
-Linked tables: make hidden columns available (Oracle: rowid and ora_rowscn columns).
-
-@roadmap_1240_li
-H2 Console: in-place autocomplete.
-
-@roadmap_1241_li
-Support large databases: split database files to multiple directories / disks (similar to tablespaces).
-
-@roadmap_1242_li
-H2 Console: support configuration option for fixed width (monospace) font.
-
-@roadmap_1243_li
-Native fulltext search: support analyzers (specially for Chinese, Japanese).
-
-@roadmap_1244_li
-Automatically compact databases from time to time (as a background process).
-
-@roadmap_1245_li
-Test Eclipse DTP.
-
-@roadmap_1246_li
-H2 Console: autocomplete: keep the previous setting
-
-@roadmap_1247_li
-executeBatch: option to stop at the first failed statement.
-
-@roadmap_1248_li
-Implement OLAP features as described here: http://www.devx.com/getHelpOn/10MinuteSolution/16573/0/page/5
-
-@roadmap_1249_li
-Support Oracle ROWID (unique identifier for each row).
-
-@roadmap_1250_li
-MySQL compatibility: alter table add index i(c), add constraint c foreign key(c) references t(c);
-
-@roadmap_1251_li
-Server mode: improve performance for batch updates.
-
-@roadmap_1252_li
-Applets: support read-only databases in a zip file (accessed as a resource).
-
-@roadmap_1253_li
-Long running queries / errors / trace system table.
-
-@roadmap_1254_li
-H2 Console should support JaQu directly.
-
-@roadmap_1255_li
-Better document FTL_SEARCH, FTL_SEARCH_DATA.
-
-@roadmap_1256_li
-Sequences: CURRVAL should be session specific. Compatibility with PostgreSQL.
-
-@roadmap_1257_li
-Index creation using deterministic functions.
-
-@roadmap_1258_li
-ANALYZE: for unique indexes that allow null, count the number of null.
-
-@roadmap_1259_li
-MySQL compatibility: multi-table delete: DELETE .. FROM .. [,...] USING - See http://dev.mysql.com/doc/refman/5.0/en/delete.html
-
-@roadmap_1260_li
-AUTO_SERVER: support changing IP addresses (disable a network while the database is open).
-
-@roadmap_1261_li
-Avoid using java.util.Calendar internally because it's slow, complicated, and buggy.
-
-@roadmap_1262_li
-Support TRUNCATE .. CASCADE like PostgreSQL.
-
-@roadmap_1263_li
-Fulltext search: lazy result generation using SimpleRowSource.
-
-@roadmap_1264_li
-Fulltext search: support alternative syntax: WHERE FTL_CONTAINS(name, 'hello').
-
-@roadmap_1265_li
-MySQL compatibility: support REPLACE, see http://dev.mysql.com/doc/refman/6.0/en/replace.html and issue 73.
-
-@roadmap_1266_li
-MySQL compatibility: support INSERT INTO table SET column1 = value1, column2 = value2
-
-@roadmap_1267_li
-Docs: add a one line description for each functions and SQL statements at the top (in the link section).
-
-@roadmap_1268_li
-Javadoc search: weight for titles should be higher ('random' should list Functions as the best match).
-
-@roadmap_1269_li
-Replace information_schema tables with regular tables that are automatically re-built when needed. Use indexes.
-
-@roadmap_1270_li
-Issue 50: Oracle compatibility: support calling 0-parameters functions without parenthesis. Make constants obsolete.
-
-@roadmap_1271_li
-MySQL, HSQLDB compatibility: support where 'a'=1 (not supported by Derby, PostgreSQL)
-
-@roadmap_1272_li
-Support a data type "timestamp with timezone" using java.util.Calendar.
-
-@roadmap_1273_li
-Finer granularity for SLF4J trace - See http://code.google.com/p/h2database/issues/detail?id=62
-
-@roadmap_1274_li
-Add database creation date and time to the database.
-
-@roadmap_1275_li
-Support ASSERTION.
-
-@roadmap_1276_li
-MySQL compatibility: support comparing 1='a'
-
-@roadmap_1277_li
-Support PostgreSQL lock modes: http://www.postgresql.org/docs/8.3/static/explicit-locking.html
-
-@roadmap_1278_li
-PostgreSQL compatibility: test DbVisualizer and Squirrel SQL using a new PostgreSQL JDBC driver.
-
-@roadmap_1279_li
-RunScript should be able to read from system in (or quite mode for Shell).
-
-@roadmap_1280_li
-Natural join: support select x from dual natural join dual.
-
-@roadmap_1281_li
-Support using system properties in database URLs (may be a security problem).
-
-@roadmap_1282_li
-Natural join: somehow support this: select a.x, b.x, x from dual a natural join dual b
-
-@roadmap_1283_li
-Use the Java service provider mechanism to register file systems and function libraries.
-
-@roadmap_1284_li
-MySQL compatibility: for auto_increment columns, convert 0 to next value (as when inserting NULL).
-
-@roadmap_1285_li
-Optimization for multi-column IN: use an index if possible. Example: (A, B) IN((1, 2), (2, 3)).
-
-@roadmap_1286_li
-Optimization for EXISTS: convert to inner join or IN(..) if possible.
-
-@roadmap_1287_li
-Functions: support hashcode(value); cryptographic and fast
-
-@roadmap_1288_li
-Serialized file lock: support long running queries.
-
-@roadmap_1289_li
-Network: use 127.0.0.1 if other addresses don't work.
-
-@roadmap_1290_li
-Pluggable network protocol (currently Socket/ServerSocket over TCP/IP) - see also TransportServer with master slave replication.
-
-@roadmap_1291_li
-Support reading JCR data: one table per node type; query table; cache option
-
-@roadmap_1292_li
-OSGi: create a sample application, test, document.
-
-@roadmap_1293_li
-help.csv: use complete examples for functions; run as test case.
-
-@roadmap_1294_li
-Functions to calculate the memory and disk space usage of a table, a row, or a value.
-
-@roadmap_1295_li
-Re-implement PooledConnection; use a lightweight connection object.
-
-@roadmap_1296_li
-Doclet: convert tests in javadocs to a java class.
-
-@roadmap_1297_li
-Doclet: format fields like methods, but support sorting by name and value.
-
-@roadmap_1298_li
-Doclet: shrink the html files.
-
-@roadmap_1299_li
-MySQL compatibility: support SET NAMES 'latin1' - See also http://code.google.com/p/h2database/issues/detail?id=56
-
-@roadmap_1300_li
-Allow to scan index backwards starting with a value (to better support ORDER BY DESC).
-
-@roadmap_1301_li
-Java Service Wrapper: try http://yajsw.sourceforge.net/
-
-@roadmap_1302_li
-Batch parameter for INSERT, UPDATE, and DELETE, and commit after each batch. See also MySQL DELETE.
-
-@roadmap_1303_li
-MySQL compatibility: support ALTER TABLE .. MODIFY COLUMN.
-
-@roadmap_1304_li
-Use a lazy and auto-close input stream (open resource when reading, close on eof).
-
-@roadmap_1305_li
-Connection pool: 'reset session' command (delete temp tables, rollback, auto-commit true).
-
-@roadmap_1306_li
-Improve SQL documentation, see http://www.w3schools.com/sql/
-
-@roadmap_1307_li
-MySQL compatibility: DatabaseMetaData.stores*() methods should return the same values. Test with SquirrelSQL.
-
-@roadmap_1308_li
-MS SQL Server compatibility: support DATEPART syntax.
-
-@roadmap_1309_li
-Sybase/DB2/Oracle compatibility: support out parameters in stored procedures - See http://code.google.com/p/h2database/issues/detail?id=83
-
-@roadmap_1310_li
-Support INTERVAL data type (see Oracle and others).
-
-@roadmap_1311_li
-Combine Server and Console tool (only keep Server).
-
-@roadmap_1312_li
-Store the Lucene index in the database itself.
-
-@roadmap_1313_li
-Support standard MERGE statement: http://en.wikipedia.org/wiki/Merge_%28SQL%29
-
-@roadmap_1314_li
-Oracle compatibility: support DECODE(x, ...).
-
-@roadmap_1315_li
-MVCC: compare concurrent update behavior with PostgreSQL and Oracle.
-
-@roadmap_1316_li
-HSQLDB compatibility: CREATE FUNCTION (maybe using a Function interface).
-
-@roadmap_1317_li
-HSQLDB compatibility: support CALL "java.lang.Math.sqrt"(2.0)
-
-@roadmap_1318_li
-Support comma as the decimal separator in the CSV tool.
-
-@roadmap_1319_li
-Compatibility: Java functions with SQLJ Part1 http://www.acm.org/sigmod/record/issues/9912/standards.pdf.gz
-
-@roadmap_1320_li
-Compatibility: Java functions with SQL/PSM (Persistent Stored Modules) - need to find the documentation.
-
-@roadmap_1321_li
-CACHE_SIZE: automatically use a fraction of Runtime.maxMemory - maybe automatically the second level cache.
-
-@roadmap_1322_li
-Support date/time/timestamp as documented in http://en.wikipedia.org/wiki/ISO_8601
-
-@roadmap_1323_li
-PostgreSQL compatibility: when in PG mode, treat BYTEA data like PG.
-
-@roadmap_1324_li
-Support =ANY(array) as in PostgreSQL. See also http://www.postgresql.org/docs/8.0/interactive/arrays.html
-
-@roadmap_1325_li
-IBM DB2 compatibility: support PREVIOUS VALUE FOR sequence.
-
-@roadmap_1326_li
-Compatibility: use different LIKE ESCAPE characters depending on the mode (disable for Derby, HSQLDB, DB2, Oracle, MSSQLServer).
-
-@roadmap_1327_li
-Oracle compatibility: support CREATE SYNONYM table FOR schema.table.
-
-@roadmap_1328_li
-FTP: document the server, including -ftpTask option to execute / kill remote processes
-
-@roadmap_1329_li
-FTP: problems with multithreading?
-
-@roadmap_1330_li
-FTP: implement SFTP / FTPS
-
-@roadmap_1331_li
-FTP: access to a database (.csv for a table, a directory for a schema, a file for a lob, a script.sql file).
-
-@roadmap_1332_li
-More secure default configuration if remote access is enabled.
-
-@roadmap_1333_li
-Improve database file locking (maybe use native file locking). The current approach seems to be problematic if the file system is on a remote share (see Google Group 'Lock file modification time is in the future').
-
-@roadmap_1334_li
-Document internal features such as BELONGS_TO_TABLE, NULL_TO_DEFAULT, SEQUENCE.
-
-@roadmap_1335_li
-Issue 107: Prefer using the ORDER BY index if LIMIT is used.
-
-@roadmap_1336_li
-An index on (id, name) should be used for a query: select * from t where s=? order by i
-
-@roadmap_1337_li
-Support reading sequences using DatabaseMetaData.getTables(null, null, null, new String[]{"SEQUENCE"}). See PostgreSQL.
-
-@roadmap_1338_li
-Add option to enable TCP_NODELAY using Socket.setTcpNoDelay(true).
-
-@roadmap_1339_li
-Maybe disallow = within database names (jdbc:h2:mem:MODE=DB2 means database name MODE=DB2).
-
-@roadmap_1340_li
-Fast alter table add column.
-
-@roadmap_1341_li
-Improve concurrency for in-memory database operations.
-
-@roadmap_1342_li
-Issue 122: Support for connection aliases for remote tcp connections.
-
-@roadmap_1343_li
-Fast scrambling (strong encryption doesn't help if the password is included in the application).
-
-@roadmap_1344_li
-H2 Console: support -webPassword to require a password to access preferences or shutdown.
-
-@roadmap_1345_li
-Issue 126: The index name should be "IDX_" plus the constraint name unless there is a conflict, in which case append a number.
-
-@roadmap_1346_li
-Issue 127: Support activation/deactivation of triggers
-
-@roadmap_1347_li
-Issue 130: Custom log event listeners
-
-@roadmap_1348_li
-Issue 131: IBM DB2 compatibility: sysibm.sysdummy1
-
-@roadmap_1349_li
-Issue 132: Use Java enum trigger type.
-
-@roadmap_1350_li
-Issue 134: IBM DB2 compatibility: session global variables.
-
-@roadmap_1351_li
-Cluster: support load balance with values for each server / auto detect.
-
-@roadmap_1352_li
-FTL_SET_OPTION(keyString, valueString) with key stopWords at first.
-
-@roadmap_1353_li
-Pluggable access control mechanism.
-
-@roadmap_1354_li
-Fulltext search (Lucene): support streaming CLOB data.
-
-@roadmap_1355_li
-Document/example how to create and read an encrypted script file.
-
-@roadmap_1356_li
-Check state of http://issues.apache.org/jira/browse/OPENJPA-1367 (H2 does support cross joins).
-
-@roadmap_1357_li
-Fulltext search (Lucene): only prefix column names with _ if they already start with _. Instead of DATA / QUERY / modified use _DATA, _QUERY, _MODIFIED if possible.
-
-@roadmap_1358_li
-Support a way to create or read compressed encrypted script files using an API.
-
-@roadmap_1359_li
-Scripting language support (Javascript).
-
-@roadmap_1360_li
-The network client should better detect if the server is not an H2 server and fail early.
-
-@roadmap_1361_li
-H2 Console: support CLOB/BLOB upload.
-
-@roadmap_1362_li
-Database file lock: detect hibernate / standby / very slow threads (compare system time).
-
-@roadmap_1363_li
-Automatic detection of redundant indexes.
-
-@roadmap_1364_li
-Maybe reject join without "on" (except natural join).
-
-@roadmap_1365_li
-Implement GiST (Generalized Search Tree for Secondary Storage).
-
-@roadmap_1366_li
-Function to read a number of bytes/characters from an BLOB or CLOB.
-
-@roadmap_1367_li
-Issue 156: Support SELECT ? UNION SELECT ?.
-
-@roadmap_1368_li
-Automatic mixed mode: support a port range list (to avoid firewall problems).
-
-@roadmap_1369_li
-Support the pseudo column rowid, oid, _rowid_.
-
-@roadmap_1370_li
-H2 Console / large result sets: stream early instead of keeping a whole result in-memory
-
-@roadmap_1371_li
-Support TRUNCATE for linked tables.
-
-@roadmap_1372_li
-UNION: evaluate INTERSECT before UNION (like most other database except Oracle).
-
-@roadmap_1373_li
-Delay creating the information schema, and share metadata columns.
-
-@roadmap_1374_li
-TCP Server: use a nonce (number used once) to protect unencrypted channels against replay attacks.
-
-@roadmap_1375_li
-Simplify running scripts and recovery: CREATE FORCE USER (overwrites an existing user).
-
-@roadmap_1376_li
-Support CREATE DATABASE LINK (a custom JDBC driver is already supported).
-
-@roadmap_1377_li
-Support large GROUP BY operations. Issue 216.
-
-@roadmap_1378_li
-Issue 163: Allow to create foreign keys on metadata types.
-
-@roadmap_1379_li
-Logback: write a native DBAppender.
-
-@roadmap_1380_li
-Cache size: don't use more cache than what is available.
-
-@roadmap_1381_li
-Allow to defragment at runtime (similar to SHUTDOWN DEFRAG) in a background thread.
-
-@roadmap_1382_li
-Tree index: Instead of an AVL tree, use a general balanced trees or a scapegoat tree.
-
-@roadmap_1383_li
-User defined functions: allow to store the bytecode (of just the class, or the jar file of the extension) in the database.
-
-@roadmap_1384_li
-Compatibility: ResultSet.getObject() on a CLOB (TEXT) should return String for PostgreSQL and MySQL.
-
-@roadmap_1385_li
-Optimizer: WHERE X=? AND Y IN(?), it always uses the index on Y. Should be cost based.
-
-@roadmap_1386_li
-Common Table Expression (CTE) / recursive queries: support parameters. Issue 314.
-
-@roadmap_1387_li
-Oracle compatibility: support INSERT ALL.
-
-@roadmap_1388_li
-Issue 178: Optimizer: index usage when both ascending and descending indexes are available.
-
-@roadmap_1389_li
-Issue 179: Related subqueries in HAVING clause.
-
-@roadmap_1390_li
-IBM DB2 compatibility: NOT NULL WITH DEFAULT. Similar to MySQL Mode.convertInsertNullToZero.
-
-@roadmap_1391_li
-Creating primary key: always create a constraint.
-
-@roadmap_1392_li
-Maybe use a different page layout: keep the data at the head of the page, and ignore the tail (don't store / read it). This may increase write / read performance depending on the file system.
-
-@roadmap_1393_li
-Indexes of temporary tables are currently kept in-memory. Is this how it should be?
-
-@roadmap_1394_li
-The Shell tool should support the same built-in commands as the H2 Console.
-
-@roadmap_1395_li
-Maybe use PhantomReference instead of finalize.
-
-@roadmap_1396_li
-Database file name suffix: should only have one dot by default. Example: .h2db
-
-@roadmap_1397_li
-Issue 196: Function based indexes
-
-@roadmap_1398_li
-ALTER TABLE ... ADD COLUMN IF NOT EXISTS columnName.
-
-@roadmap_1399_li
-Fix the disk space leak (killing the process at the exact right moment will increase the disk space usage; this space is not re-used). See TestDiskSpaceLeak.java
-
-@roadmap_1400_li
-ROWNUM: Oracle compatibility when used within a subquery. Issue 198.
-
-@roadmap_1401_li
-Allow to access the database over HTTP (possibly using port 80) and a servlet in a REST way.
-
-@roadmap_1402_li
-ODBC: encrypted databases are not supported because the ;CIPHER= can not be set.
-
-@roadmap_1403_li
-Support CLOB and BLOB update, specially conn.createBlob().setBinaryStream(1);
-
-@roadmap_1404_li
-Optimizer: index usage when both ascending and descending indexes are available. Issue 178.
-
-@roadmap_1405_li
-Issue 306: Support schema specific domains.
-
-@roadmap_1406_li
-Triggers: support user defined execution order. Oracle: CREATE OR REPLACE TRIGGER TEST_2 BEFORE INSERT ON TEST FOR EACH ROW FOLLOWS TEST_1. SQL specifies that multiple triggers should be fired in time-of-creation order. PostgreSQL uses name order, which was judged to be more convenient. Derby: triggers are fired in the order in which they were created.
-
-@roadmap_1407_li
-PostgreSQL compatibility: combine "users" and "roles". See: http://www.postgresql.org/docs/8.1/interactive/user-manag.html
-
-@roadmap_1408_li
-Improve documentation of system properties: only list the property names, default values, and description.
-
-@roadmap_1409_li
-Support running totals / cumulative sum using SUM(..) OVER(..).
-
-@roadmap_1410_li
-Improve object memory size calculation. Use constants for known VMs, or use reflection to call java.lang.instrument.Instrumentation.getObjectSize(Object objectToSize)
-
-@roadmap_1411_li
-Triggers: NOT NULL checks should be done after running triggers (Oracle behavior, maybe others).
-
-@roadmap_1412_li
-Common Table Expression (CTE) / recursive queries: support INSERT INTO ... SELECT ... Issue 219.
-
-@roadmap_1413_li
-Common Table Expression (CTE) / recursive queries: support non-recursive queries. Issue 217.
-
-@roadmap_1414_li
-Common Table Expression (CTE) / recursive queries: avoid endless loop. Issue 218.
-
-@roadmap_1415_li
-Common Table Expression (CTE) / recursive queries: support multiple named queries. Issue 220.
-
-@roadmap_1416_li
-Common Table Expression (CTE) / recursive queries: identifier scope may be incorrect. Issue 222.
-
-@roadmap_1417_li
-Log long running transactions (similar to long running statements).
-
-@roadmap_1418_li
-Parameter data type is data type of other operand. Issue 205.
-
-@roadmap_1419_li
-Some combinations of nested join with right outer join are not supported.
-
-@roadmap_1420_li
-DatabaseEventListener.openConnection(id) and closeConnection(id).
-
-@roadmap_1421_li
-Listener or authentication module for new connections, or a way to restrict the number of different connections to a tcp server, or to prevent to login with the same username and password from different IPs. Possibly using the DatabaseEventListener API, or a new API.
-
-@roadmap_1422_li
-Compatibility for data type CHAR (Derby, HSQLDB). Issue 212.
-
-@roadmap_1423_li
-Compatibility with MySQL TIMESTAMPDIFF. Issue 209.
-
-@roadmap_1424_li
-Optimizer: use a histogram of the data, specially for non-normal distributions.
-
-@roadmap_1425_li
-Trigger: allow declaring as source code (like functions).
-
-@roadmap_1426_li
-User defined aggregate: allow declaring as source code (like functions).
-
-@roadmap_1427_li
-The error "table not found" is sometimes caused by using the wrong database. Add "(this database is empty)" to the exception message if applicable.
-
-@roadmap_1428_li
-MySQL + PostgreSQL compatibility: support string literal escape with \n.
-
-@roadmap_1429_li
-PostgreSQL compatibility: support string literal escape with double \\.
-
-@roadmap_1430_li
-Document the TCP server "management_db". Maybe include the IP address of the client.
-
-@roadmap_1431_li
-Use javax.tools.JavaCompilerTool instead of com.sun.tools.javac.Main
-
-@roadmap_1432_li
-If a database object was not found in the current schema, but one with the same name existed in another schema, included that in the error message.
-
-@roadmap_1433_li
-Optimization to use an index for OR when using multiple keys: where (key1 = ? and key2 = ?) OR (key1 = ? and key2 = ?)
-
-@roadmap_1434_li
-Issue 302: Support optimizing queries with both inner and outer joins, as in: select * from test a inner join test b on a.id=b.id inner join o on o.id=a.id where b.x=1 (the optimizer should swap a and b here). See also TestNestedJoins, tag "swapInnerJoinTables".
-
-@roadmap_1435_li
-JaQu should support a DataSource and a way to create a Db object using a Connection (for multi-threaded usage with a connection pool).
-
-@roadmap_1436_li
-Move table to a different schema (rename table to a different schema), possibly using ALTER TABLE ... SET SCHEMA ...;
-
-@roadmap_1437_li
-nioMapped file system: automatically fall back to regular (non mapped) IO if there is a problem (out of memory exception for example).
-
-@roadmap_1438_li
-Column as parameter of function table. Issue 228.
-
-@roadmap_1439_li
-Connection pool: detect ;AUTOCOMMIT=FALSE in the database URL, and if set, disable autocommit for all connections.
-
-@roadmap_1440_li
-Compatibility with MS Access: support "&" to concatenate text.
-
-@roadmap_1441_li
-The BACKUP statement should not synchronize on the database, and therefore should not block other users.
-
-@roadmap_1442_li
-Document the database file format.
-
-@roadmap_1443_li
-Support reading LOBs.
-
-@roadmap_1444_li
-Require appending DANGEROUS=TRUE when using certain dangerous settings such as LOG=0, LOG=1, LOCK_MODE=0, disabling FILE_LOCK,...
-
-@roadmap_1445_li
-Support UDT (user defined types) similar to how Apache Derby supports it: check constraint, allow to use it in Java functions as parameters (return values already seem to work).
-
-@roadmap_1446_li
-Encrypted file system (use cipher text stealing so file length doesn't need to decrypt; 4 KB header per file, optional compatibility with current encrypted database files).
-
-@roadmap_1447_li
-Issue 229: SELECT with simple OR tests uses tableScan when it could use indexes.
-
-@roadmap_1448_li
-GROUP BY queries should use a temporary table if there are too many rows.
-
-@roadmap_1449_li
-BLOB: support random access when reading.
-
-@roadmap_1450_li
-CLOB: support random access when reading (this is harder than for BLOB as data is stored in UTF-8 form).
-
-@roadmap_1451_li
-Compatibility: support SELECT INTO (as an alias for CREATE TABLE ... AS SELECT ...).
-
-@roadmap_1452_li
-Compatibility with MySQL: support SELECT INTO OUTFILE (cannot be an existing file) as an alias for CSVWRITE(...).
-
-@roadmap_1453_li
-Compatibility with MySQL: support non-strict mode (sql_mode = "") any data that is too large for the column will just be truncated or set to the default value.
-
-@roadmap_1454_li
-The full condition should be sent to the linked table, not just the indexed condition. Example: TestLinkedTableFullCondition
-
-@roadmap_1455_li
-Compatibility with IBM DB2: CREATE PROCEDURE.
-
-@roadmap_1456_li
-Compatibility with IBM DB2: SQL cursors.
-
-@roadmap_1457_li
-Single-column primary key values are always stored explicitly. This is not required.
-
-@roadmap_1458_li
-Compatibility with MySQL: support CREATE TABLE TEST(NAME VARCHAR(255) CHARACTER SET UTF8).
-
-@roadmap_1459_li
-CALL is incompatible with other databases because it returns a result set, so that CallableStatement.execute() returns true.
-
-@roadmap_1460_li
-Optimization for large lists for column IN(1, 2, 3, 4,...) - currently an list is used, could potentially use a hash set (maybe only for a part of the values - the ones that can be evaluated).
-
-@roadmap_1461_li
-Compatibility for ARRAY data type (Oracle: VARRAY(n) of VARCHAR(m); HSQLDB: VARCHAR(n) ARRAY; Postgres: VARCHAR(n)[]).
-
-@roadmap_1462_li
-PostgreSQL compatible array literal syntax: ARRAY[['a', 'b'], ['c', 'd']]
-
-@roadmap_1463_li
-PostgreSQL compatibility: UPDATE with FROM.
-
-@roadmap_1464_li
-Issue 297: Oracle compatibility for "at time zone".
-
-@roadmap_1465_li
-IBM DB2 compatibility: IDENTITY_VAL_LOCAL().
-
-@roadmap_1466_li
-Support SQL/XML.
-
-@roadmap_1467_li
-Support concurrent opening of databases.
-
-@roadmap_1468_li
-Improved error message and diagnostics in case of network configuration problems.
-
-@roadmap_1469_li
-TRUNCATE should reset the identity columns as in MySQL and MS SQL Server (and possibly other databases).
-
-@roadmap_1470_li
-Adding a primary key should make the columns 'not null' unless if there is a row with null (compatibility with MySQL, PostgreSQL, HSQLDB; not Derby).
-
-@roadmap_1471_li
-ARRAY data type: support Integer[] and so on in Java functions (currently only Object[] is supported).
-
-@roadmap_1472_li
-MySQL compatibility: LOCK TABLES a READ, b READ - see also http://dev.mysql.com/doc/refman/5.0/en/lock-tables.html
-
-@roadmap_1473_li
-The HTML to PDF converter should use http://code.google.com/p/wkhtmltopdf/
-
-@roadmap_1474_li
-Issue 303: automatically convert "X NOT IN(SELECT...)" to "NOT EXISTS(...)".
-
-@roadmap_1475_li
-MySQL compatibility: update test1 t1, test2 t2 set t1.name=t2.name where t1.id=t2.id.
-
-@roadmap_1476_li
-Issue 283: Improve performance of H2 on Android.
-
-@roadmap_1477_li
-Support INSERT INTO / UPDATE / MERGE ... RETURNING to retrieve the generated key(s).
-
-@roadmap_1478_li
-Column compression option - see http://groups.google.com/group/h2-database/browse_thread/thread/3e223504e52671fa/243da82244343f5d
-
-@roadmap_1479_li
-PostgreSQL compatibility: ALTER TABLE ADD combined with adding a foreign key constraint, as in ALTER TABLE FOO ADD COLUMN PARENT BIGINT REFERENCES FOO(ID).
-
-@roadmap_1480_li
-MS SQL Server compatibility: support @@ROWCOUNT.
-
-@roadmap_1481_li
-PostgreSQL compatibility: LOG(x) is LOG10(x) and not LN(x).
-
-@roadmap_1482_li
-Issue 311: Serialized lock mode: executeQuery of write operations fails.
-
-@roadmap_1483_li
-PostgreSQL compatibility: support PgAdmin III (specially the function current_setting).
-
-@roadmap_1484_li
-MySQL compatibility: support TIMESTAMPADD.
-
-@roadmap_1485_li
-Support SELECT ... FOR UPDATE with joins (supported by PostgreSQL, MySQL, and HSQLDB; but not Derby).
-
-@roadmap_1486_li
-Support SELECT ... FOR UPDATE OF [field-list] (supported by PostgreSQL, MySQL, and HSQLDB; but not Derby).
-
-@roadmap_1487_li
-Support SELECT ... FOR UPDATE OF [table-list] (supported by PostgreSQL, HSQLDB, Sybase).
-
-@roadmap_1488_li
-TRANSACTION_ID() for in-memory databases.
-
-@roadmap_1489_li
-TRANSACTION_ID() should be long (same as HSQLDB and PostgreSQL).
-
-@roadmap_1490_li
-Support [INNER | OUTER] JOIN USING(column [,...]).
-
-@roadmap_1491_li
-Support NATURAL [ { LEFT | RIGHT } [ OUTER ] | INNER ] JOIN (Derby, Oracle)
-
-@roadmap_1492_li
-GROUP BY columnNumber (similar to ORDER BY columnNumber) (MySQL, PostgreSQL, SQLite; not by HSQLDB and Derby).
-
-@roadmap_1493_li
-Sybase / MS SQL Server compatibility: CONVERT(..) parameters are swapped.
-
-@roadmap_1494_li
-Index conditions: WHERE AGE>1 should not scan through all rows with AGE=1.
-
-@roadmap_1495_li
-PHP support: H2 should support PDO, or test with PostgreSQL PDO.
-
-@roadmap_1496_li
-Outer joins: if no column of the outer join table is referenced, the outer join table could be removed from the query.
-
-@roadmap_1497_li
-Cluster: allow using auto-increment and identity columns by ensuring executed in lock-step.
-
-@roadmap_1498_li
-MySQL compatibility: index names only need to be unique for the given table.
-
-@roadmap_1499_li
-Issue 352: constraints: distinguish between 'no action' and 'restrict'. Currently, only restrict is supported, and 'no action' is internally mapped to 'restrict'. The database meta data returns 'restrict' in all cases.
-
-@roadmap_1500_li
-Oracle compatibility: support MEDIAN aggregate function.
-
-@roadmap_1501_li
-Issue 348: Oracle compatibility: division should return a decimal result.
-
-@roadmap_1502_li
-Read rows on demand: instead of reading the whole row, only read up to that column that is requested. Keep an pointer to the data area and the column id that is already read.
-
-@roadmap_1503_li
-Long running transactions: log session id when detected.
-
-@roadmap_1504_li
-Optimization: "select id from test" should use the index on id even without "order by".
-
-@roadmap_1505_li
-Issue 362: LIMIT support for UPDATE statements (MySQL compatibility).
-
-@roadmap_1506_li
-Sybase SQL Anywhere compatibility: SELECT TOP ... START AT ...
-
-@roadmap_1507_li
-Use Java 6 SQLException subclasses.
-
-@roadmap_1508_li
-Issue 390: RUNSCRIPT FROM '...' CONTINUE_ON_ERROR
-
-@roadmap_1509_li
-Use Java 6 exceptions: SQLDataException, SQLSyntaxErrorException, SQLTimeoutException,..
-
-@roadmap_1510_li
-Support index-only when doing selects (i.e. without needing to load the actual table data)
-
-@roadmap_1511_h2
-Not Planned
-
-@roadmap_1512_li
-HSQLDB (did) support this: select id i from test where i<0 (other databases don't). Supporting it may break compatibility.
-
-@roadmap_1513_li
-String.intern (so that Strings can be compared with ==) will not be used because some VMs have problems when used extensively.
-
-@roadmap_1514_li
-In prepared statements, identifier names (table names and so on) can not be parameterized. Adding such a feature would complicate the source code without providing reasonable speedup, and would slow down regular prepared statements.
-
-@sourceError_1000_h1
-Error Analyzer
-
-@sourceError_1001_a
-Home
-
-@sourceError_1002_a
-Input
-
-@sourceError_1003_h2
- Details Source Code
-
-@sourceError_1004_p
-Paste the error message and stack trace below and click on 'Details' or 'Source Code':
-
-@sourceError_1005_b
-Error Code:
-
-@sourceError_1006_b
-Product Version:
-
-@sourceError_1007_b
-Message:
-
-@sourceError_1008_b
-More Information:
-
-@sourceError_1009_b
-Stack Trace:
-
-@sourceError_1010_b
-Source File:
-
-@sourceError_1011_p
- Inline
-
-@tutorial_1000_h1
-Tutorial
-
-@tutorial_1001_a
- Starting and Using the H2 Console
-
-@tutorial_1002_a
- Special H2 Console Syntax
-
-@tutorial_1003_a
- Settings of the H2 Console
-
-@tutorial_1004_a
- Connecting to a Database using JDBC
-
-@tutorial_1005_a
- Creating New Databases
-
-@tutorial_1006_a
- Using the Server
-
-@tutorial_1007_a
- Using Hibernate
-
-@tutorial_1008_a
- Using TopLink and Glassfish
-
-@tutorial_1009_a
- Using EclipseLink
-
-@tutorial_1010_a
- Using Apache ActiveMQ
-
-@tutorial_1011_a
- Using H2 within NetBeans
-
-@tutorial_1012_a
- Using H2 with jOOQ
-
-@tutorial_1013_a
- Using Databases in Web Applications
-
-@tutorial_1014_a
- Android
-
-@tutorial_1015_a
- CSV (Comma Separated Values) Support
-
-@tutorial_1016_a
- Upgrade, Backup, and Restore
-
-@tutorial_1017_a
- Command Line Tools
-
-@tutorial_1018_a
- The Shell Tool
-
-@tutorial_1019_a
- Using OpenOffice Base
-
-@tutorial_1020_a
- Java Web Start / JNLP
-
-@tutorial_1021_a
- Using a Connection Pool
-
-@tutorial_1022_a
- Fulltext Search
-
-@tutorial_1023_a
- User-Defined Variables
-
-@tutorial_1024_a
- Date and Time
-
-@tutorial_1025_a
- Using Spring
-
-@tutorial_1026_a
- OSGi
-
-@tutorial_1027_a
- Java Management Extension (JMX)
-
-@tutorial_1028_h2
-Starting and Using the H2 Console
-
-@tutorial_1029_p
- The H2 Console application lets you access a database using a browser. This can be a H2 database, or another database that supports the JDBC API.
-
-@tutorial_1030_p
- This is a client/server application, so both a server and a client (a browser) are required to run it.
-
-@tutorial_1031_p
- Depending on your platform and environment, there are multiple ways to start the H2 Console:
-
-@tutorial_1032_th
-OS
-
-@tutorial_1033_th
-Start
-
-@tutorial_1034_td
-Windows
-
-@tutorial_1035_td
- Click [Start], [All Programs], [H2], and [H2 Console (Command Line)]
-
-@tutorial_1036_td
- An icon will be added to the system tray:
-
-@tutorial_1037_td
- If you don't get the window and the system tray icon, then maybe Java is not installed correctly (in this case, try another way to start the application). A browser window should open and point to the login page at http://localhost:8082
.
-
-@tutorial_1038_td
-Windows
-
-@tutorial_1039_td
- Open a file browser, navigate to h2/bin
, and double click on h2.bat
.
-
-@tutorial_1040_td
- A console window appears. If there is a problem, you will see an error message in this window. A browser window will open and point to the login page (URL: http://localhost:8082
).
-
-@tutorial_1041_td
-Any
-
-@tutorial_1042_td
- Double click on the h2*.jar
file. This only works if the .jar
suffix is associated with Java.
-
-@tutorial_1043_td
-Any
-
-@tutorial_1044_td
- Open a console window, navigate to the directory h2/bin
, and type:
-
-@tutorial_1045_h3
-Firewall
-
-@tutorial_1046_p
- If you start the server, you may get a security warning from the firewall (if you have installed one). If you don't want other computers in the network to access the application on your machine, you can let the firewall block those connections. The connection from the local machine will still work. Only if you want other computers to access the database on this computer, you need allow remote connections in the firewall.
-
-@tutorial_1047_p
- It has been reported that when using Kaspersky 7.0 with firewall, the H2 Console is very slow when connecting over the IP address. A workaround is to connect using 'localhost'.
-
-@tutorial_1048_p
- A small firewall is already built into the server: other computers may not connect to the server by default. To change this, go to 'Preferences' and select 'Allow connections from other computers'.
-
-@tutorial_1049_h3
-Testing Java
-
-@tutorial_1050_p
- To find out which version of Java is installed, open a command prompt and type:
-
-@tutorial_1051_p
- If you get an error message, you may need to add the Java binary directory to the path environment variable.
-
-@tutorial_1052_h3
-Error Message 'Port may be in use'
-
-@tutorial_1053_p
- You can only start one instance of the H2 Console, otherwise you will get the following error message: "The Web server could not be started. Possible cause: another server is already running...". It is possible to start multiple console applications on the same computer (using different ports), but this is usually not required as the console supports multiple concurrent connections.
-
-@tutorial_1054_h3
-Using another Port
-
-@tutorial_1055_p
- If the default port of the H2 Console is already in use by another application, then a different port needs to be configured. The settings are stored in a properties file. For details, see Settings of the H2 Console. The relevant entry is webPort
.
-
-@tutorial_1056_p
- If no port is specified for the TCP and PG servers, each service will try to listen on its default port. If the default port is already in use, a random port is used.
-
-@tutorial_1057_h3
-Connecting to the Server using a Browser
-
-@tutorial_1058_p
- If the server started successfully, you can connect to it using a web browser. Javascript needs to be enabled. If you started the server on the same computer as the browser, open the URL http://localhost:8082
. If you want to connect to the application from another computer, you need to provide the IP address of the server, for example: http://192.168.0.2:8082
. If you enabled TLS on the server side, the URL needs to start with https://
.
-
-@tutorial_1059_h3
-Multiple Concurrent Sessions
-
-@tutorial_1060_p
- Multiple concurrent browser sessions are supported. As that the database objects reside on the server, the amount of concurrent work is limited by the memory available to the server application.
-
-@tutorial_1061_h3
-Login
-
-@tutorial_1062_p
- At the login page, you need to provide connection information to connect to a database. Set the JDBC driver class of your database, the JDBC URL, user name, and password. If you are done, click [Connect].
-
-@tutorial_1063_p
- You can save and reuse previously saved settings. The settings are stored in a properties file (see Settings of the H2 Console).
-
-@tutorial_1064_h3
-Error Messages
-
-@tutorial_1065_p
- Error messages in are shown in red. You can show/hide the stack trace of the exception by clicking on the message.
-
-@tutorial_1066_h3
-Adding Database Drivers
-
-@tutorial_1067_p
- To register additional JDBC drivers (MySQL, PostgreSQL, HSQLDB,...), add the jar file names to the environment variables H2DRIVERS
or CLASSPATH
. Example (Windows): to add the HSQLDB JDBC driver C:\Programs\hsqldb\lib\hsqldb.jar
, set the environment variable H2DRIVERS
to C:\Programs\hsqldb\lib\hsqldb.jar
.
-
-@tutorial_1068_p
- Multiple drivers can be set; entries need to be separated by ;
(Windows) or :
(other operating systems). Spaces in the path names are supported. The settings must not be quoted.
-
-@tutorial_1069_h3
-Using the H2 Console
-
-@tutorial_1070_p
- The H2 Console application has three main panels: the toolbar on top, the tree on the left, and the query/result panel on the right. The database objects (for example, tables) are listed on the left. Type a SQL command in the query panel and click [Run]. The result appears just below the command.
-
-@tutorial_1071_h3
-Inserting Table Names or Column Names
-
-@tutorial_1072_p
- To insert table and column names into the script, click on the item in the tree. If you click on a table while the query is empty, then SELECT * FROM ...
is added. While typing a query, the table that was used is expanded in the tree. For example if you type SELECT * FROM TEST T WHERE T.
then the table TEST is expanded.
-
-@tutorial_1073_h3
-Disconnecting and Stopping the Application
-
-@tutorial_1074_p
- To log out of the database, click [Disconnect] in the toolbar panel. However, the server is still running and ready to accept new sessions.
-
-@tutorial_1075_p
- To stop the server, right click on the system tray icon and select [Exit]. If you don't have the system tray icon, navigate to [Preferences] and click [Shutdown], press [Ctrl]+[C] in the console where the server was started (Windows), or close the console window.
-
-@tutorial_1076_h2
-Special H2 Console Syntax
-
-@tutorial_1077_p
- The H2 Console supports a few built-in commands. Those are interpreted within the H2 Console, so they work with any database. Built-in commands need to be at the beginning of a statement (before any remarks), otherwise they are not parsed correctly. If in doubt, add ;
before the command.
-
-@tutorial_1078_th
-Command(s)
-
-@tutorial_1079_th
-Description
-
-@tutorial_1080_td
- @autocommit_true;
-
-@tutorial_1081_td
- @autocommit_false;
-
-@tutorial_1082_td
- Enable or disable autocommit.
-
-@tutorial_1083_td
- @cancel;
-
-@tutorial_1084_td
- Cancel the currently running statement.
-
-@tutorial_1085_td
- @columns null null TEST;
-
-@tutorial_1086_td
- @index_info null null TEST;
-
-@tutorial_1087_td
- @tables;
-
-@tutorial_1088_td
- @tables null null TEST;
-
-@tutorial_1089_td
- Call the corresponding DatabaseMetaData.get
method. Patterns are case sensitive (usually identifiers are uppercase). For information about the parameters, see the Javadoc documentation. Missing parameters at the end of the line are set to null. The complete list of metadata commands is: @attributes, @best_row_identifier, @catalogs, @columns, @column_privileges, @cross_references, @exported_keys, @imported_keys, @index_info, @primary_keys, @procedures, @procedure_columns, @schemas, @super_tables, @super_types, @tables, @table_privileges, @table_types, @type_info, @udts, @version_columns
-
-@tutorial_1090_td
- @edit select * from test;
-
-@tutorial_1091_td
- Use an updatable result set.
-
-@tutorial_1092_td
- @generated insert into test() values();
-
-@tutorial_1093_td
- Show the result of Statement.getGeneratedKeys()
.
-
-@tutorial_1094_td
- @history;
-
-@tutorial_1095_td
- List the command history.
-
-@tutorial_1096_td
- @info;
-
-@tutorial_1097_td
- Display the result of various Connection
and DatabaseMetaData
methods.
-
-@tutorial_1098_td
- @list select * from test;
-
-@tutorial_1099_td
- Show the result set in list format (each column on its own line, with row numbers).
-
-@tutorial_1100_td
- @loop 1000 select ?, ?/*rnd*/;
-
-@tutorial_1101_td
- @loop 1000 @statement select ?;
-
-@tutorial_1102_td
- Run the statement this many times. Parameters (?
) are set using a loop from 0 up to x - 1. Random values are used for each ?/*rnd*/
. A Statement object is used instead of a PreparedStatement if @statement
is used. Result sets are read until ResultSet.next()
returns false
. Timing information is printed.
-
-@tutorial_1103_td
- @maxrows 20;
-
-@tutorial_1104_td
- Set the maximum number of rows to display.
-
-@tutorial_1105_td
- @memory;
-
-@tutorial_1106_td
- Show the used and free memory. This will call System.gc()
.
-
-@tutorial_1107_td
- @meta select 1;
-
-@tutorial_1108_td
- List the ResultSetMetaData
after running the query.
-
-@tutorial_1109_td
- @parameter_meta select ?;
-
-@tutorial_1110_td
- Show the result of the PreparedStatement.getParameterMetaData()
calls. The statement is not executed.
-
-@tutorial_1111_td
- @prof_start;
-
-@tutorial_1112_td
- call hash('SHA256', '', 1000000);
-
-@tutorial_1113_td
- @prof_stop;
-
-@tutorial_1114_td
- Start/stop the built-in profiling tool. The top 3 stack traces of the statement(s) between start and stop are listed (if there are 3).
-
-@tutorial_1115_td
- @prof_start;
-
-@tutorial_1116_td
- @sleep 10;
-
-@tutorial_1117_td
- @prof_stop;
-
-@tutorial_1118_td
- Sleep for a number of seconds. Used to profile a long running query or operation that is running in another session (but in the same process).
-
-@tutorial_1119_td
- @transaction_isolation;
-
-@tutorial_1120_td
- @transaction_isolation 2;
-
-@tutorial_1121_td
- Display (without parameters) or change (with parameters 1, 2, 4, 8) the transaction isolation level.
-
-@tutorial_1122_h2
-Settings of the H2 Console
-
-@tutorial_1123_p
- The settings of the H2 Console are stored in a configuration file called .h2.server.properties
in you user home directory. For Windows installations, the user home directory is usually C:\Documents and Settings\[username]
or C:\Users\[username]
. The configuration file contains the settings of the application and is automatically created when the H2 Console is first started. Supported settings are:
-
-@tutorial_1124_code
-webAllowOthers
-
-@tutorial_1125_li
-: allow other computers to connect.
-
-@tutorial_1126_code
-webPort
-
-@tutorial_1127_li
-: the port of the H2 Console
-
-@tutorial_1128_code
-webSSL
-
-@tutorial_1129_li
-: use encrypted TLS (HTTPS) connections.
-
-@tutorial_1130_p
- In addition to those settings, the properties of the last recently used connection are listed in the form <number>=<name>|<driver>|<url>|<user>
using the escape character \
. Example: 1=Generic H2 (Embedded)|org.h2.Driver|jdbc\:h2\:~/test|sa
-
-@tutorial_1131_h2
-Connecting to a Database using JDBC
-
-@tutorial_1132_p
- To connect to a database, a Java application first needs to load the database driver, and then get a connection. A simple way to do that is using the following code:
-
-@tutorial_1133_p
- This code first loads the driver (Class.forName(...)
) and then opens a connection (using DriverManager.getConnection()
). The driver name is "org.h2.Driver"
. The database URL always needs to start with jdbc:h2:
to be recognized by this database. The second parameter in the getConnection()
call is the user name (sa
for System Administrator in this example). The third parameter is the password. In this database, user names are not case sensitive, but passwords are.
-
-@tutorial_1134_h2
-Creating New Databases
-
-@tutorial_1135_p
- By default, if the database specified in the URL does not yet exist, a new (empty) database is created automatically. The user that created the database automatically becomes the administrator of this database.
-
-@tutorial_1136_p
- Auto-creating new database can be disabled, see Opening a Database Only if it Already Exists.
-
-@tutorial_1137_h2
-Using the Server
-
-@tutorial_1138_p
- H2 currently supports three server: a web server (for the H2 Console), a TCP server (for client/server connections) and an PG server (for PostgreSQL clients). Please note that only the web server supports browser connections. The servers can be started in different ways, one is using the Server
tool. Starting the server doesn't open a database - databases are opened as soon as a client connects.
-
-@tutorial_1139_h3
-Starting the Server Tool from Command Line
-
-@tutorial_1140_p
- To start the Server
tool from the command line with the default settings, run:
-
-@tutorial_1141_p
- This will start the tool with the default options. To get the list of options and default values, run:
-
-@tutorial_1142_p
- There are options available to use other ports, and start or not start parts.
-
-@tutorial_1143_h3
-Connecting to the TCP Server
-
-@tutorial_1144_p
- To remotely connect to a database using the TCP server, use the following driver and database URL:
-
-@tutorial_1145_li
-JDBC driver class: org.h2.Driver
-
-@tutorial_1146_li
-Database URL: jdbc:h2:tcp://localhost/~/test
-
-@tutorial_1147_p
- For details about the database URL, see also in Features. Please note that you can't connection with a web browser to this URL. You can only connect using a H2 client (over JDBC).
-
-@tutorial_1148_h3
-Starting the TCP Server within an Application
-
-@tutorial_1149_p
- Servers can also be started and stopped from within an application. Sample code:
-
-@tutorial_1150_h3
-Stopping a TCP Server from Another Process
-
-@tutorial_1151_p
- The TCP server can be stopped from another process. To stop the server from the command line, run:
-
-@tutorial_1152_p
- To stop the server from a user application, use the following code:
-
-@tutorial_1153_p
- This function will only stop the TCP server. If other server were started in the same process, they will continue to run. To avoid recovery when the databases are opened the next time, all connections to the databases should be closed before calling this method. To stop a remote server, remote connections must be enabled on the server. Shutting down a TCP server can be protected using the option -tcpPassword
(the same password must be used to start and stop the TCP server).
-
-@tutorial_1154_h2
-Using Hibernate
-
-@tutorial_1155_p
- This database supports Hibernate version 3.1 and newer. You can use the HSQLDB Dialect, or the native H2 Dialect. Unfortunately the H2 Dialect included in some old versions of Hibernate was buggy. A patch for Hibernate has been submitted and is now applied. You can rename it to H2Dialect.java
and include this as a patch in your application, or upgrade to a version of Hibernate where this is fixed.
-
-@tutorial_1156_p
- When using Hibernate, try to use the H2Dialect
if possible. When using the H2Dialect
, compatibility modes such as MODE=MySQL
are not supported. When using such a compatibility mode, use the Hibernate dialect for the corresponding database instead of the H2Dialect
; but please note H2 does not support all features of all databases.
-
-@tutorial_1157_h2
-Using TopLink and Glassfish
-
-@tutorial_1158_p
- To use H2 with Glassfish (or Sun AS), set the Datasource Classname to org.h2.jdbcx.JdbcDataSource
. You can set this in the GUI at Application Server - Resources - JDBC - Connection Pools, or by editing the file sun-resources.xml
: at element jdbc-connection-pool
, set the attribute datasource-classname
to org.h2.jdbcx.JdbcDataSource
.
-
-@tutorial_1159_p
- The H2 database is compatible with HSQLDB and PostgreSQL. To take advantage of H2 specific features, use the H2Platform
. The source code of this platform is included in H2 at src/tools/oracle/toplink/essentials/platform/database/DatabasePlatform.java.txt
. You will need to copy this file to your application, and rename it to .java. To enable it, change the following setting in persistence.xml:
-
-@tutorial_1160_p
- In old versions of Glassfish, the property name is toplink.platform.class.name
.
-
-@tutorial_1161_p
- To use H2 within Glassfish, copy the h2*.jar to the directory glassfish/glassfish/lib
.
-
-@tutorial_1162_h2
-Using EclipseLink
-
-@tutorial_1163_p
- To use H2 in EclipseLink, use the platform class org.eclipse.persistence.platform.database.H2Platform
. If this platform is not available in your version of EclipseLink, you can use the OraclePlatform instead in many case. See also H2Platform.
-
-@tutorial_1164_h2
-Using Apache ActiveMQ
-
-@tutorial_1165_p
- When using H2 as the backend database for Apache ActiveMQ, please use the TransactDatabaseLocker
instead of the default locking mechanism. Otherwise the database file will grow without bounds. The problem is that the default locking mechanism uses an uncommitted UPDATE
transaction, which keeps the transaction log from shrinking (causes the database file to grow). Instead of using an UPDATE
statement, the TransactDatabaseLocker
uses SELECT ... FOR UPDATE
which is not problematic. To use it, change the ApacheMQ configuration element <jdbcPersistenceAdapter>
element, property databaseLocker="org.apache.activemq.store.jdbc.adapter.TransactDatabaseLocker"
. However, using the MVCC mode will again result in the same problem. Therefore, please do not use the MVCC mode in this case. Another (more dangerous) solution is to set useDatabaseLock
to false.
-
-@tutorial_1166_h2
-Using H2 within NetBeans
-
-@tutorial_1167_p
- The project H2 Database Engine Support For NetBeans allows you to start and stop the H2 server from within the IDE.
-
-@tutorial_1168_p
- There is a known issue when using the Netbeans SQL Execution Window: before executing a query, another query in the form SELECT COUNT(*) FROM <query>
is run. This is a problem for queries that modify state, such as SELECT SEQ.NEXTVAL
. In this case, two sequence values are allocated instead of just one.
-
-@tutorial_1169_h2
-Using H2 with jOOQ
-
-@tutorial_1170_p
- jOOQ adds a thin layer on top of JDBC, allowing for type-safe SQL construction, including advanced SQL, stored procedures and advanced data types. jOOQ takes your database schema as a base for code generation. If this is your example schema:
-
-@tutorial_1171_p
- then run the jOOQ code generator on the command line using this command:
-
-@tutorial_1172_p
- ...where codegen.xml
is on the classpath and contains this information
-
-@tutorial_1173_p
- Using the generated source, you can query the database as follows:
-
-@tutorial_1174_p
- See more details on jOOQ Homepage and in the jOOQ Tutorial
-
-@tutorial_1175_h2
-Using Databases in Web Applications
-
-@tutorial_1176_p
- There are multiple ways to access a database from within web applications. Here are some examples if you use Tomcat or JBoss.
-
-@tutorial_1177_h3
-Embedded Mode
-
-@tutorial_1178_p
- The (currently) simplest solution is to use the database in the embedded mode, that means open a connection in your application when it starts (a good solution is using a Servlet Listener, see below), or when a session starts. A database can be accessed from multiple sessions and applications at the same time, as long as they run in the same process. Most Servlet Containers (for example Tomcat) are just using one process, so this is not a problem (unless you run Tomcat in clustered mode). Tomcat uses multiple threads and multiple classloaders. If multiple applications access the same database at the same time, you need to put the database jar in the shared/lib
or server/lib
directory. It is a good idea to open the database when the web application starts, and close it when the web application stops. If using multiple applications, only one (any) of them needs to do that. In the application, an idea is to use one connection per Session, or even one connection per request (action). Those connections should be closed after use if possible (but it's not that bad if they don't get closed).
-
-@tutorial_1179_h3
-Server Mode
-
-@tutorial_1180_p
- The server mode is similar, but it allows you to run the server in another process.
-
-@tutorial_1181_h3
-Using a Servlet Listener to Start and Stop a Database
-
-@tutorial_1182_p
- Add the h2*.jar file to your web application, and add the following snippet to your web.xml file (between the context-param
and the filter
section):
-
-@tutorial_1183_p
- For details on how to access the database, see the file DbStarter.java
. By default this tool opens an embedded connection using the database URL jdbc:h2:~/test
, user name sa
, and password sa
. If you want to use this connection within your servlet, you can access as follows:
-
-@tutorial_1184_code
-DbStarter
-
-@tutorial_1185_p
- can also start the TCP server, however this is disabled by default. To enable it, use the parameter db.tcpServer
in the file web.xml
. Here is the complete list of options. These options need to be placed between the description
tag and the listener
/ filter
tags:
-
-@tutorial_1186_p
- When the web application is stopped, the database connection will be closed automatically. If the TCP server is started within the DbStarter
, it will also be stopped automatically.
-
-@tutorial_1187_h3
-Using the H2 Console Servlet
-
-@tutorial_1188_p
- The H2 Console is a standalone application and includes its own web server, but it can be used as a servlet as well. To do that, include the the h2*.jar
file in your application, and add the following configuration to your web.xml
:
-
-@tutorial_1189_p
- For details, see also src/tools/WEB-INF/web.xml
.
-
-@tutorial_1190_p
- To create a web application with just the H2 Console, run the following command:
-
-@tutorial_1191_h2
-Android
-
-@tutorial_1192_p
- You can use this database on an Android device (using the Dalvik VM) instead of or in addition to SQLite. So far, only very few tests and benchmarks were run, but it seems that performance is similar to SQLite, except for opening and closing a database, which is not yet optimized in H2 (H2 takes about 0.2 seconds, and SQLite about 0.02 seconds). Read operations seem to be a bit faster than SQLite, and write operations seem to be slower. So far, only very few tests have been run, and everything seems to work as expected. Fulltext search was not yet tested, however the native fulltext search should work.
-
-@tutorial_1193_p
- Reasons to use H2 instead of SQLite are:
-
-@tutorial_1194_li
-Full Unicode support including UPPER() and LOWER().
-
-@tutorial_1195_li
-Streaming API for BLOB and CLOB data.
-
-@tutorial_1196_li
-Fulltext search.
-
-@tutorial_1197_li
-Multiple connections.
-
-@tutorial_1198_li
-User defined functions and triggers.
-
-@tutorial_1199_li
-Database file encryption.
-
-@tutorial_1200_li
-Reading and writing CSV files (this feature can be used outside the database as well).
-
-@tutorial_1201_li
-Referential integrity and check constraints.
-
-@tutorial_1202_li
-Better data type and SQL support.
-
-@tutorial_1203_li
-In-memory databases, read-only databases, linked tables.
-
-@tutorial_1204_li
-Better compatibility with other databases which simplifies porting applications.
-
-@tutorial_1205_li
-Possibly better performance (so far for read operations).
-
-@tutorial_1206_li
-Server mode (accessing a database on a different machine over TCP/IP).
-
-@tutorial_1207_p
- Currently only the JDBC API is supported (it is planned to support the Android database API in future releases). Both the regular H2 jar file and the smaller h2small-*.jar
can be used. To create the smaller jar file, run the command ./build.sh jarSmall
(Linux / Mac OS) or build.bat jarSmall
(Windows).
-
-@tutorial_1208_p
- The database files needs to be stored in a place that is accessible for the application. Example:
-
-@tutorial_1209_p
- Limitations: Using a connection pool is currently not supported, because the required javax.sql.
classes are not available on Android.
-
-@tutorial_1210_h2
-CSV (Comma Separated Values) Support
-
-@tutorial_1211_p
- The CSV file support can be used inside the database using the functions CSVREAD
and CSVWRITE
, or it can be used outside the database as a standalone tool.
-
-@tutorial_1212_h3
-Reading a CSV File from Within a Database
-
-@tutorial_1213_p
- A CSV file can be read using the function CSVREAD
. Example:
-
-@tutorial_1214_p
- Please note for performance reason, CSVREAD
should not be used inside a join. Instead, import the data first (possibly into a temporary table), create the required indexes if necessary, and then query this table.
-
-@tutorial_1215_h3
-Importing Data from a CSV File
-
-@tutorial_1216_p
- A fast way to load or import data (sometimes called 'bulk load') from a CSV file is to combine table creation with import. Optionally, the column names and data types can be set when creating the table. Another option is to use INSERT INTO ... SELECT
.
-
-@tutorial_1217_h3
-Writing a CSV File from Within a Database
-
-@tutorial_1218_p
- The built-in function CSVWRITE
can be used to create a CSV file from a query. Example:
-
-@tutorial_1219_h3
-Writing a CSV File from a Java Application
-
-@tutorial_1220_p
- The Csv
tool can be used in a Java application even when not using a database at all. Example:
-
-@tutorial_1221_h3
-Reading a CSV File from a Java Application
-
-@tutorial_1222_p
- It is possible to read a CSV file without opening a database. Example:
-
-@tutorial_1223_h2
-Upgrade, Backup, and Restore
-
-@tutorial_1224_h3
-Database Upgrade
-
-@tutorial_1225_p
- The recommended way to upgrade from one version of the database engine to the next version is to create a backup of the database (in the form of a SQL script) using the old engine, and then execute the SQL script using the new engine.
-
-@tutorial_1226_h3
-Backup using the Script Tool
-
-@tutorial_1227_p
- The recommended way to backup a database is to create a compressed SQL script file. This will result in a small, human readable, and database version independent backup. Creating the script will also verify the checksums of the database file. The Script
tool is ran as follows:
-
-@tutorial_1228_p
- It is also possible to use the SQL command SCRIPT
to create the backup of the database. For more information about the options, see the SQL command SCRIPT
. The backup can be done remotely, however the file will be created on the server side. The built in FTP server could be used to retrieve the file from the server.
-
-@tutorial_1229_h3
-Restore from a Script
-
-@tutorial_1230_p
- To restore a database from a SQL script file, you can use the RunScript
tool:
-
-@tutorial_1231_p
- For more information about the options, see the SQL command RUNSCRIPT
. The restore can be done remotely, however the file needs to be on the server side. The built in FTP server could be used to copy the file to the server. It is also possible to use the SQL command RUNSCRIPT
to execute a SQL script. SQL script files may contain references to other script files, in the form of RUNSCRIPT
commands. However, when using the server mode, the references script files need to be available on the server side.
-
-@tutorial_1232_h3
-Online Backup
-
-@tutorial_1233_p
- The BACKUP
SQL statement and the Backup
tool both create a zip file with the database file. However, the contents of this file are not human readable.
-
-@tutorial_1234_p
- The resulting backup is transactionally consistent, meaning the consistency and atomicity rules apply.
-
-@tutorial_1235_p
- The Backup
tool (org.h2.tools.Backup
) can not be used to create a online backup; the database must not be in use while running this program.
-
-@tutorial_1236_p
- Creating a backup by copying the database files while the database is running is not supported, except if the file systems support creating snapshots. With other file systems, it can't be guaranteed that the data is copied in the right order.
-
-@tutorial_1237_h2
-Command Line Tools
-
-@tutorial_1238_p
- This database comes with a number of command line tools. To get more information about a tool, start it with the parameter '-?', for example:
-
-@tutorial_1239_p
- The command line tools are:
-
-@tutorial_1240_code
-Backup
-
-@tutorial_1241_li
- creates a backup of a database.
-
-@tutorial_1242_code
-ChangeFileEncryption
-
-@tutorial_1243_li
- allows changing the file encryption password or algorithm of a database.
-
-@tutorial_1244_code
-Console
-
-@tutorial_1245_li
- starts the browser based H2 Console.
-
-@tutorial_1246_code
-ConvertTraceFile
-
-@tutorial_1247_li
- converts a .trace.db file to a Java application and SQL script.
-
-@tutorial_1248_code
-CreateCluster
-
-@tutorial_1249_li
- creates a cluster from a standalone database.
-
-@tutorial_1250_code
-DeleteDbFiles
-
-@tutorial_1251_li
- deletes all files belonging to a database.
-
-@tutorial_1252_code
-Recover
-
-@tutorial_1253_li
- helps recovering a corrupted database.
-
-@tutorial_1254_code
-Restore
-
-@tutorial_1255_li
- restores a backup of a database.
-
-@tutorial_1256_code
-RunScript
-
-@tutorial_1257_li
- runs a SQL script against a database.
-
-@tutorial_1258_code
-Script
-
-@tutorial_1259_li
- allows converting a database to a SQL script for backup or migration.
-
-@tutorial_1260_code
-Server
-
-@tutorial_1261_li
- is used in the server mode to start a H2 server.
-
-@tutorial_1262_code
-Shell
-
-@tutorial_1263_li
- is a command line database tool.
-
-@tutorial_1264_p
- The tools can also be called from an application by calling the main or another public method. For details, see the Javadoc documentation.
-
-@tutorial_1265_h2
-The Shell Tool
-
-@tutorial_1266_p
- The Shell tool is a simple interactive command line tool. To start it, type:
-
-@tutorial_1267_p
- You will be asked for a database URL, JDBC driver, user name, and password. The connection setting can also be set as command line parameters. After connecting, you will get the list of options. The built-in commands don't need to end with a semicolon, but SQL statements are only executed if the line ends with a semicolon ;
. This allows to enter multi-line statements:
-
-@tutorial_1268_p
- By default, results are printed as a table. For results with many column, consider using the list mode:
-
-@tutorial_1269_h2
-Using OpenOffice Base
-
-@tutorial_1270_p
- OpenOffice.org Base supports database access over the JDBC API. To connect to a H2 database using OpenOffice Base, you first need to add the JDBC driver to OpenOffice. The steps to connect to a H2 database are:
-
-@tutorial_1271_li
-Start OpenOffice Writer, go to [Tools], [Options]
-
-@tutorial_1272_li
-Make sure you have selected a Java runtime environment in OpenOffice.org / Java
-
-@tutorial_1273_li
-Click [Class Path...], [Add Archive...]
-
-@tutorial_1274_li
-Select your h2 jar file (location is up to you, could be wherever you choose)
-
-@tutorial_1275_li
-Click [OK] (as much as needed), stop OpenOffice (including the Quickstarter)
-
-@tutorial_1276_li
-Start OpenOffice Base
-
-@tutorial_1277_li
-Connect to an existing database; select [JDBC]; [Next]
-
-@tutorial_1278_li
-Example datasource URL: jdbc:h2:~/test
-
-@tutorial_1279_li
-JDBC driver class: org.h2.Driver
-
-@tutorial_1280_p
- Now you can access the database stored in the current users home directory.
-
-@tutorial_1281_p
- To use H2 in NeoOffice (OpenOffice without X11):
-
-@tutorial_1282_li
-In NeoOffice, go to [NeoOffice], [Preferences]
-
-@tutorial_1283_li
-Look for the page under [NeoOffice], [Java]
-
-@tutorial_1284_li
-Click [Class Path], [Add Archive...]
-
-@tutorial_1285_li
-Select your h2 jar file (location is up to you, could be wherever you choose)
-
-@tutorial_1286_li
-Click [OK] (as much as needed), restart NeoOffice.
-
-@tutorial_1287_p
- Now, when creating a new database using the "Database Wizard" :
-
-@tutorial_1288_li
-Click [File], [New], [Database].
-
-@tutorial_1289_li
-Select [Connect to existing database] and the select [JDBC]. Click next.
-
-@tutorial_1290_li
-Example datasource URL: jdbc:h2:~/test
-
-@tutorial_1291_li
-JDBC driver class: org.h2.Driver
-
-@tutorial_1292_p
- Another solution to use H2 in NeoOffice is:
-
-@tutorial_1293_li
-Package the h2 jar within an extension package
-
-@tutorial_1294_li
-Install it as a Java extension in NeoOffice
-
-@tutorial_1295_p
- This can be done by create it using the NetBeans OpenOffice plugin. See also Extensions Development.
-
-@tutorial_1296_h2
-Java Web Start / JNLP
-
-@tutorial_1297_p
- When using Java Web Start / JNLP (Java Network Launch Protocol), permissions tags must be set in the .jnlp file, and the application .jar file must be signed. Otherwise, when trying to write to the file system, the following exception will occur: java.security.AccessControlException
: access denied (java.io.FilePermission ... read
). Example permission tags:
-
-@tutorial_1298_h2
-Using a Connection Pool
-
-@tutorial_1299_p
- For H2, opening a connection is fast if the database is already open. Still, using a connection pool improves performance if you open and close connections a lot. A simple connection pool is included in H2. It is based on the Mini Connection Pool Manager from Christian d'Heureuse. There are other, more complex, open source connection pools available, for example the Apache Commons DBCP. For H2, it is about twice as faster to get a connection from the built-in connection pool than to get one using DriverManager.getConnection()
.The build-in connection pool is used as follows:
-
-@tutorial_1300_h2
-Fulltext Search
-
-@tutorial_1301_p
- H2 includes two fulltext search implementations. One is using Apache Lucene, and the other (the native implementation) stores the index data in special tables in the database.
-
-@tutorial_1302_h3
-Using the Native Fulltext Search
-
-@tutorial_1303_p
- To initialize, call:
-
-@tutorial_1304_p
- You need to initialize it in each database where you want to use it. Afterwards, you can create a fulltext index for a table using:
-
-@tutorial_1305_p
- PUBLIC is the schema name, TEST is the table name. The list of column names (comma separated) is optional, in this case all columns are indexed. The index is updated in realtime. To search the index, use the following query:
-
-@tutorial_1306_p
- This will produce a result set that contains the query needed to retrieve the data:
-
-@tutorial_1307_p
- To drop an index on a table:
-
-@tutorial_1308_p
- To get the raw data, use FT_SEARCH_DATA('Hello', 0, 0);
. The result contains the columns SCHEMA
(the schema name), TABLE
(the table name), COLUMNS
(an array of column names), and KEYS
(an array of objects). To join a table, use a join as in: SELECT T.* FROM FT_SEARCH_DATA('Hello', 0, 0) FT, TEST T WHERE FT.TABLE='TEST' AND T.ID=FT.KEYS[0];
-
-@tutorial_1309_p
- You can also call the index from within a Java application:
-
-@tutorial_1310_h3
-Using the Lucene Fulltext Search
-
-@tutorial_1311_p
- To use the Lucene full text search, you need the Lucene library in the classpath. Currently Apache Lucene version 2.x is used by default for H2 version 1.2.x, and Lucene version 3.x is used by default for H2 version 1.3.x. How to do that depends on the application; if you use the H2 Console, you can add the Lucene jar file to the environment variables H2DRIVERS
or CLASSPATH
. To initialize the Lucene fulltext search in a database, call:
-
-@tutorial_1312_p
- You need to initialize it in each database where you want to use it. Afterwards, you can create a full text index for a table using:
-
-@tutorial_1313_p
- PUBLIC is the schema name, TEST is the table name. The list of column names (comma separated) is optional, in this case all columns are indexed. The index is updated in realtime. To search the index, use the following query:
-
-@tutorial_1314_p
- This will produce a result set that contains the query needed to retrieve the data:
-
-@tutorial_1315_p
- To drop an index on a table (be warned that this will re-index all of the full-text indices for the entire database):
-
-@tutorial_1316_p
- To get the raw data, use FTL_SEARCH_DATA('Hello', 0, 0);
. The result contains the columns SCHEMA
(the schema name), TABLE
(the table name), COLUMNS
(an array of column names), and KEYS
(an array of objects). To join a table, use a join as in: SELECT T.* FROM FTL_SEARCH_DATA('Hello', 0, 0) FT, TEST T WHERE FT.TABLE='TEST' AND T.ID=FT.KEYS[0];
-
-@tutorial_1317_p
- You can also call the index from within a Java application:
-
-@tutorial_1318_p
- The Lucene fulltext search supports searching in specific column only. Column names must be uppercase (except if the original columns are double quoted). For column names starting with an underscore (_), another underscore needs to be added. Example:
-
-@tutorial_1319_p
- The Lucene fulltext search implementation is not synchronized internally. If you update the database and query the fulltext search concurrently (directly using the Java API of H2 or Lucene itself), you need to ensure operations are properly synchronized. If this is not the case, you may get exceptions such as org.apache.lucene.store.AlreadyClosedException: this IndexReader is closed
.
-
-@tutorial_1320_h2
-User-Defined Variables
-
-@tutorial_1321_p
- This database supports user-defined variables. Variables start with @
and can be used wherever expressions or parameters are allowed. Variables are not persisted and session scoped, that means only visible from within the session in which they are defined. A value is usually assigned using the SET command:
-
-@tutorial_1322_p
- The value can also be changed using the SET() method. This is useful in queries:
-
-@tutorial_1323_p
- Variables that are not set evaluate to NULL
. The data type of a user-defined variable is the data type of the value assigned to it, that means it is not necessary (or possible) to declare variable names before using them. There are no restrictions on the assigned values; large objects (LOBs) are supported as well. Rolling back a transaction does not affect the value of a user-defined variable.
-
-@tutorial_1324_h2
-Date and Time
-
-@tutorial_1325_p
- Date, time and timestamp values support ISO 8601 formatting, including time zone:
-
-@tutorial_1326_p
- If the time zone is not set, the value is parsed using the current time zone setting of the system. Date and time information is stored in H2 database files without time zone information. If the database is opened using another system time zone, the date and time will be the same. That means if you store the value '2000-01-01 12:00:00' in one time zone, then close the database and open the database again in a different time zone, you will also get '2000-01-01 12:00:00'. Please note that changing the time zone after the H2 driver is loaded is not supported.
-
-@tutorial_1327_h2
-Using Spring
-
-@tutorial_1328_h3
-Using the TCP Server
-
-@tutorial_1329_p
- Use the following configuration to start and stop the H2 TCP server using the Spring Framework:
-
-@tutorial_1330_p
- The destroy-method
will help prevent exceptions on hot-redeployment or when restarting the server.
-
-@tutorial_1331_h3
-Error Code Incompatibility
-
-@tutorial_1332_p
- There is an incompatibility with the Spring JdbcTemplate and H2 version 1.3.154 and newer, because of a change in the error code. This will cause the JdbcTemplate to not detect a duplicate key condition, and so a DataIntegrityViolationException
is thrown instead of DuplicateKeyException
. See also the issue SPR-8235. The workaround is to add the following XML file to the root of the classpath:
-
-@tutorial_1333_h2
-OSGi
-
-@tutorial_1334_p
- The standard H2 jar can be dropped in as a bundle in an OSGi container. H2 implements the JDBC Service defined in OSGi Service Platform Release 4 Version 4.2 Enterprise Specification. The H2 Data Source Factory service is registered with the following properties: OSGI_JDBC_DRIVER_CLASS=org.h2.Driver
and OSGI_JDBC_DRIVER_NAME=H2
. The OSGI_JDBC_DRIVER_VERSION
property reflects the version of the driver as is.
-
-@tutorial_1335_p
- The following standard configuration properties are supported: JDBC_USER, JDBC_PASSWORD, JDBC_DESCRIPTION, JDBC_DATASOURCE_NAME, JDBC_NETWORK_PROTOCOL, JDBC_URL, JDBC_SERVER_NAME, JDBC_PORT_NUMBER
. Any other standard property will be rejected. Non-standard properties will be passed on to H2 in the connection URL.
-
-@tutorial_1336_h2
-Java Management Extension (JMX)
-
-@tutorial_1337_p
- Management over JMX is supported, but not enabled by default. To enable JMX, append ;JMX=TRUE
to the database URL when opening the database. Various tools support JMX, one such tool is the jconsole
. When opening the jconsole
, connect to the process where the database is open (when using the server mode, you need to connect to the server process). Then go to the MBeans
section. Under org.h2
you will find one entry per database. The object name of the entry is the database short name, plus the path (each colon is replaced with an underscore character).
-
-@tutorial_1338_p
- The following attributes and operations are supported:
-
-@tutorial_1339_code
-CacheSize
-
-@tutorial_1340_li
-: the cache size currently in use in KB.
-
-@tutorial_1341_code
-CacheSizeMax
-
-@tutorial_1342_li
- (read/write): the maximum cache size in KB.
-
-@tutorial_1343_code
-Exclusive
-
-@tutorial_1344_li
-: whether this database is open in exclusive mode or not.
-
-@tutorial_1345_code
-FileReadCount
-
-@tutorial_1346_li
-: the number of file read operations since the database was opened.
-
-@tutorial_1347_code
-FileSize
-
-@tutorial_1348_li
-: the file size in KB.
-
-@tutorial_1349_code
-FileWriteCount
-
-@tutorial_1350_li
-: the number of file write operations since the database was opened.
-
-@tutorial_1351_code
-FileWriteCountTotal
-
-@tutorial_1352_li
-: the number of file write operations since the database was created.
-
-@tutorial_1353_code
-LogMode
-
-@tutorial_1354_li
- (read/write): the current transaction log mode. See SET LOG
for details.
-
-@tutorial_1355_code
-Mode
-
-@tutorial_1356_li
-: the compatibility mode (REGULAR
if no compatibility mode is used).
-
-@tutorial_1357_code
-MultiThreaded
-
-@tutorial_1358_li
-: true if multi-threaded is enabled.
-
-@tutorial_1359_code
-Mvcc
-
-@tutorial_1360_li
-: true if MVCC
is enabled.
-
-@tutorial_1361_code
-ReadOnly
-
-@tutorial_1362_li
-: true if the database is read-only.
-
-@tutorial_1363_code
-TraceLevel
-
-@tutorial_1364_li
- (read/write): the file trace level.
-
-@tutorial_1365_code
-Version
-
-@tutorial_1366_li
-: the database version in use.
-
-@tutorial_1367_code
-listSettings
-
-@tutorial_1368_li
-: list the database settings.
-
-@tutorial_1369_code
-listSessions
-
-@tutorial_1370_li
-: list the open sessions, including currently executing statement (if any) and locked tables (if any).
-
-@tutorial_1371_p
- To enable JMX, you may need to set the system properties com.sun.management.jmxremote
and com.sun.management.jmxremote.port
as required by the JVM.
-
diff --git a/h2/src/docsrc/text/_docs_ja.utf8.txt b/h2/src/docsrc/text/_docs_ja.utf8.txt
deleted file mode 100644
index 0de2d4df6a..0000000000
--- a/h2/src/docsrc/text/_docs_ja.utf8.txt
+++ /dev/null
@@ -1,11985 +0,0 @@
-@advanced_1000_h1
-#Advanced
-
-@advanced_1001_a
-# Result Sets
-
-@advanced_1002_a
-# Large Objects
-
-@advanced_1003_a
-# Linked Tables
-
-@advanced_1004_a
-# Spatial Features
-
-@advanced_1005_a
-# Recursive Queries
-
-@advanced_1006_a
-# Updatable Views
-
-@advanced_1007_a
-# Transaction Isolation
-
-@advanced_1008_a
-# Multi-Version Concurrency Control (MVCC)
-
-@advanced_1009_a
-# Clustering / High Availability
-
-@advanced_1010_a
-# Two Phase Commit
-
-@advanced_1011_a
-# Compatibility
-
-@advanced_1012_a
-# Standards Compliance
-
-@advanced_1013_a
-# Run as Windows Service
-
-@advanced_1014_a
-# ODBC Driver
-
-@advanced_1015_a
-# Using H2 in Microsoft .NET
-
-@advanced_1016_a
-# ACID
-
-@advanced_1017_a
-# Durability Problems
-
-@advanced_1018_a
-# Using the Recover Tool
-
-@advanced_1019_a
-# File Locking Protocols
-
-@advanced_1020_a
-# Using Passwords
-
-@advanced_1021_a
-# Password Hash
-
-@advanced_1022_a
-# Protection against SQL Injection
-
-@advanced_1023_a
-# Protection against Remote Access
-
-@advanced_1024_a
-# Restricting Class Loading and Usage
-
-@advanced_1025_a
-# Security Protocols
-
-@advanced_1026_a
-# TLS Connections
-
-@advanced_1027_a
-# Universally Unique Identifiers (UUID)
-
-@advanced_1028_a
-# Settings Read from System Properties
-
-@advanced_1029_a
-# Setting the Server Bind Address
-
-@advanced_1030_a
-# Pluggable File System
-
-@advanced_1031_a
-# Split File System
-
-@advanced_1032_a
-# Database Upgrade
-
-@advanced_1033_a
-# Java Objects Serialization
-
-@advanced_1034_a
-# Limits and Limitations
-
-@advanced_1035_a
-# Glossary and Links
-
-@advanced_1036_h2
-Result Sets
-
-@advanced_1037_h3
-#Statements that Return a Result Set
-
-@advanced_1038_p
-# The following statements return a result set: SELECT, EXPLAIN, CALL, SCRIPT, SHOW, HELP
. All other statements return an update count.
-
-@advanced_1039_h3
-行数�?�制�?
-
-@advanced_1040_p
-# Before the result is returned to the application, all rows are read by the database. Server side cursors are not supported currently. If only the first few rows are interesting for the application, then the result set size should be limited to improve the performance. This can be done using LIMIT
in a query (example: SELECT * FROM TEST LIMIT 100
), or by using Statement.setMaxRows(max)
.
-
-@advanced_1041_h3
-大�??�?�Result Set �?�外部ソート
-
-@advanced_1042_p
-# For large result set, the result is buffered to disk. The threshold can be defined using the statement SET MAX_MEMORY_ROWS
. If ORDER BY
is used, the sorting is done using an external sort algorithm. In this case, each block of rows is sorted using quick sort, then written to disk; when reading the data, the blocks are merged together.
-
-@advanced_1043_h2
-大�??�?�オブジェクト
-
-@advanced_1044_h3
-大�??�?�オブジェクト�?�ソート�?�読�?�込�?�
-
-@advanced_1045_p
-# If it is possible that the objects don't fit into memory, then the data type CLOB (for textual data) or BLOB (for binary data) should be used. For these data types, the objects are not fully read into memory, by using streams. To store a BLOB, use PreparedStatement.setBinaryStream
. To store a CLOB, use PreparedStatement.setCharacterStream
. To read a BLOB, use ResultSet.getBinaryStream
, and to read a CLOB, use ResultSet.getCharacterStream
. When using the client/server mode, large BLOB and CLOB data is stored in a temporary file on the client side.
-
-@advanced_1046_h3
-#When to use CLOB/BLOB
-
-@advanced_1047_p
-# By default, this database stores large LOB (CLOB and BLOB) objects separate from the main table data. Small LOB objects are stored in-place, the threshold can be set using MAX_LENGTH_INPLACE_LOB, but there is still an overhead to use CLOB/BLOB. Because of this, BLOB and CLOB should never be used for columns with a maximum size below about 200 bytes. The best threshold depends on the use case; reading in-place objects is faster than reading from separate files, but slows down the performance of operations that don't involve this column.
-
-@advanced_1048_h3
-#Large Object Compression
-
-@advanced_1049_p
-# The following feature is only available for the PageStore storage engine. For the MVStore engine (the default for H2 version 1.4.x), append ;COMPRESS=TRUE
to the database URL instead. CLOB and BLOB values can be compressed by using SET COMPRESS_LOB. The LZF algorithm is faster but needs more disk space. By default compression is disabled, which usually speeds up write operations. If you store many large compressible values such as XML, HTML, text, and uncompressed binary files, then compressing can save a lot of disk space (sometimes more than 50%), and read operations may even be faster.
-
-@advanced_1050_h2
-リンクテーブル
-
-@advanced_1051_p
-# This database supports linked tables, which means tables that don't exist in the current database but are just links to another database. To create such a link, use the CREATE LINKED TABLE
statement:
-
-@advanced_1052_p
-# You can then access the table in the usual way. Whenever the linked table is accessed, the database issues specific queries over JDBC. Using the example above, if you issue the query SELECT * FROM LINK WHERE ID=1
, then the following query is run against the PostgreSQL database: SELECT * FROM TEST WHERE ID=?
. The same happens for insert and update statements. Only simple statements are executed against the target database, that means no joins (queries that contain joins are converted to simple queries). Prepared statements are used where possible.
-
-@advanced_1053_p
-# To view the statements that are executed against the target table, set the trace level to 3.
-
-@advanced_1054_p
-# If multiple linked tables point to the same database (using the same database URL), the connection is shared. To disable this, set the system property h2.shareLinkedConnections=false
.
-
-@advanced_1055_p
-# The statement CREATE LINKED TABLE supports an optional schema name parameter.
-
-@advanced_1056_p
-# The following are not supported because they may result in a deadlock: creating a linked table to the same database, and creating a linked table to another database using the server mode if the other database is open in the same server (use the embedded mode instead).
-
-@advanced_1057_p
-# Data types that are not supported in H2 are also not supported for linked tables, for example unsigned data types if the value is outside the range of the signed type. In such cases, the columns needs to be cast to a supported type.
-
-@advanced_1058_h2
-#Updatable Views
-
-@advanced_1059_p
-# By default, views are not updatable. To make a view updatable, use an "instead of" trigger as follows:
-
-@advanced_1060_p
-# Update the base table(s) within the trigger as required. For details, see the sample application org.h2.samples.UpdatableView
.
-
-@advanced_1061_h2
-トランザクション分離
-
-@advanced_1062_p
-# Please note that most data definition language (DDL) statements, such as "create table", commit the current transaction. See the Grammar for details.
-
-@advanced_1063_p
-# Transaction isolation is provided for all data manipulation language (DML) statements.
-
-@advanced_1064_p
-# Please note MVCC is enabled in version 1.4.x by default, when using the MVStore. In this case, table level locking is not used. Instead, rows are locked for update, and read committed is used in all cases (changing the isolation level has no effect).
-
-@advanced_1065_p
-# This database supports the following transaction isolation levels:
-
-@advanced_1066_b
-Read Committed (コミット済�?�読�?��?�り)
-
-@advanced_1067_li
-# This is the default level. Read locks are released immediately after executing the statement, but write locks are kept until the transaction commits. Higher concurrency is possible when using this level.
-
-@advanced_1068_li
-# To enable, execute the SQL statement SET LOCK_MODE 3
-
-@advanced_1069_li
-# or append ;LOCK_MODE=3
to the database URL: jdbc:h2:~/test;LOCK_MODE=3
-
-@advanced_1070_b
-Serializable (直列化)
-
-@advanced_1071_li
-# Both read locks and write locks are kept until the transaction commits. To enable, execute the SQL statement SET LOCK_MODE 1
-
-@advanced_1072_li
-# or append ;LOCK_MODE=1
to the database URL: jdbc:h2:~/test;LOCK_MODE=1
-
-@advanced_1073_b
-Read Uncommitted (�?�コミット読�?��?�り)
-
-@advanced_1074_li
-# This level means that transaction isolation is disabled.
-
-@advanced_1075_li
-# To enable, execute the SQL statement SET LOCK_MODE 0
-
-@advanced_1076_li
-# or append ;LOCK_MODE=0
to the database URL: jdbc:h2:~/test;LOCK_MODE=0
-
-@advanced_1077_p
-# When using the isolation level 'serializable', dirty reads, non-repeatable reads, and phantom reads are prohibited.
-
-@advanced_1078_b
-Dirty Reads (ダーティリード)
-
-@advanced_1079_li
-# Means a connection can read uncommitted changes made by another connection.
-
-@advanced_1080_li
-# Possible with: read uncommitted
-
-@advanced_1081_b
-Non-Repeatable Reads (�??復�?�?�能読�?��?�り)
-
-@advanced_1082_li
-# A connection reads a row, another connection changes a row and commits, and the first connection re-reads the same row and gets the new result.
-
-@advanced_1083_li
-# Possible with: read uncommitted, read committed
-
-@advanced_1084_b
-Phantom Reads (ファントムリード)
-
-@advanced_1085_li
-# A connection reads a set of rows using a condition, another connection inserts a row that falls in this condition and commits, then the first connection re-reads using the same condition and gets the new row.
-
-@advanced_1086_li
-# Possible with: read uncommitted, read committed
-
-@advanced_1087_h3
-テーブルレベルロック
-
-@advanced_1088_p
-# The database allows multiple concurrent connections to the same database. To make sure all connections only see consistent data, table level locking is used by default. This mechanism does not allow high concurrency, but is very fast. Shared locks and exclusive locks are supported. Before reading from a table, the database tries to add a shared lock to the table (this is only possible if there is no exclusive lock on the object by another connection). If the shared lock is added successfully, the table can be read. It is allowed that other connections also have a shared lock on the same object. If a connection wants to write to a table (update or delete a row), an exclusive lock is required. To get the exclusive lock, other connection must not have any locks on the object. After the connection commits, all locks are released. This database keeps all locks in memory. When a lock is released, and multiple connections are waiting for it, one of them is picked at random.
-
-@advanced_1089_h3
-ロックタイムアウト
-
-@advanced_1090_p
-# If a connection cannot get a lock on an object, the connection waits for some amount of time (the lock timeout). During this time, hopefully the connection holding the lock commits and it is then possible to get the lock. If this is not possible because the other connection does not release the lock for some time, the unsuccessful connection will get a lock timeout exception. The lock timeout can be set individually for each connection.
-
-@advanced_1091_h2
-#Multi-Version Concurrency Control (MVCC)
-
-@advanced_1092_p
-# The MVCC feature allows higher concurrency than using (table level or row level) locks. When using MVCC in this database, delete, insert and update operations will only issue a shared lock on the table. An exclusive lock is still used when adding or removing columns, when dropping the table, and when using SELECT ... FOR UPDATE
. Connections only 'see' committed data, and own changes. That means, if connection A updates a row but doesn't commit this change yet, connection B will see the old value. Only when the change is committed, the new value is visible by other connections (read committed). If multiple connections concurrently try to update the same row, the database waits until it can apply the change, but at most until the lock timeout expires.
-
-@advanced_1093_p
-# To use the MVCC feature, append ;MVCC=TRUE
to the database URL:
-
-@advanced_1094_p
-# The setting must be specified in the first connection (the one that opens the database). It is not possible to enable or disable this setting while the database is already open.
-
-@advanced_1095_p
-# If MVCC is enabled, changing the lock mode (LOCK_MODE
) has no effect.
-
-@advanced_1096_div
-# The MVCC mode is enabled by default in version 1.4.x, with the default MVStore storage engine. MVCC is disabled by default when using the PageStore storage engine (which is the default in version 1.3.x). The following applies when using the PageStore storage engine: The MVCC feature is not fully tested yet. The limitations of the MVCC mode are: with the PageStore storage engine, it can not be used at the same time as MULTI_THREADED=TRUE
; the complete undo log (the list of uncommitted changes) must fit in memory when using multi-version concurrency. The setting MAX_MEMORY_UNDO
has no effect. Clustering / High Availability
-
-@advanced_1097_p
-# This database supports a simple clustering / high availability mechanism. The architecture is: two database servers run on two different computers, and on both computers is a copy of the same database. If both servers run, each database operation is executed on both computers. If one server fails (power, hardware or network failure), the other server can still continue to work. From this point on, the operations will be executed only on one server until the other server is back up.
-
-@advanced_1098_p
-# Clustering can only be used in the server mode (the embedded mode does not support clustering). The cluster can be re-created using the CreateCluster
tool without stopping the remaining server. Applications that are still connected are automatically disconnected, however when appending ;AUTO_RECONNECT=TRUE
, they will recover from that.
-
-@advanced_1099_p
-# To initialize the cluster, use the following steps:
-
-@advanced_1100_li
-#Create a database
-
-@advanced_1101_li
-#Use the CreateCluster
tool to copy the database to another location and initialize the clustering. Afterwards, you have two databases containing the same data.
-
-@advanced_1102_li
-#Start two servers (one for each copy of the database)
-
-@advanced_1103_li
-#You are now ready to connect to the databases with the client application(s)
-
-@advanced_1104_h3
-CreateClusterツールを使用�?�る
-
-@advanced_1105_p
-# To understand how clustering works, please try out the following example. In this example, the two databases reside on the same computer, but usually, the databases will be on different servers.
-
-@advanced_1106_li
-#Create two directories: server1, server2
. Each directory will simulate a directory on a computer.
-
-@advanced_1107_li
-#Start a TCP server pointing to the first directory. You can do this using the command line:
-
-@advanced_1108_li
-#Start a second TCP server pointing to the second directory. This will simulate a server running on a second (redundant) computer. You can do this using the command line:
-
-@advanced_1109_li
-#Use the CreateCluster
tool to initialize clustering. This will automatically create a new, empty database if it does not exist. Run the tool on the command line:
-
-@advanced_1110_li
-#You can now connect to the databases using an application or the H2 Console using the JDBC URL jdbc:h2:tcp://localhost:9101,localhost:9102/~/test
-
-@advanced_1111_li
-#If you stop a server (by killing the process), you will notice that the other machine continues to work, and therefore the database is still accessible.
-
-@advanced_1112_li
-#To restore the cluster, you first need to delete the database that failed, then restart the server that was stopped, and re-run the CreateCluster
tool.
-
-@advanced_1113_h3
-#Detect Which Cluster Instances are Running
-
-@advanced_1114_p
-# To find out which cluster nodes are currently running, execute the following SQL statement:
-
-@advanced_1115_p
-# If the result is ''
(two single quotes), then the cluster mode is disabled. Otherwise, the list of servers is returned, enclosed in single quote. Example: 'server1:9191,server2:9191'
.
-
-@advanced_1116_p
-# It is also possible to get the list of servers by using Connection.getClientInfo().
-
-@advanced_1117_p
-# The property list returned from getClientInfo()
contains a numServers
property that returns the number of servers that are in the connection list. To get the actual servers, getClientInfo()
also has properties server0
..serverX
, where serverX is the number of servers minus 1.
-
-@advanced_1118_p
-# Example: To get the 2nd server in the connection list one uses getClientInfo('server1')
. Note: The serverX
property only returns IP addresses and ports and not hostnames.
-
-@advanced_1119_h3
-クラスタリングアルゴリズム�?�制�?
-
-@advanced_1120_p
-# Read-only queries are only executed against the first cluster node, but all other statements are executed against all nodes. There is currently no load balancing made to avoid problems with transactions. The following functions may yield different results on different cluster nodes and must be executed with care: RANDOM_UUID(), SECURE_RAND(), SESSION_ID(), MEMORY_FREE(), MEMORY_USED(), CSVREAD(), CSVWRITE(), RAND()
[when not using a seed]. Those functions should not be used directly in modifying statements (for example INSERT, UPDATE, MERGE
). However, they can be used in read-only statements and the result can then be used for modifying statements. Using auto-increment and identity columns is currently not supported. Instead, sequence values need to be manually requested and then used to insert data (using two statements).
-
-@advanced_1121_p
-# When using the cluster modes, result sets are read fully in memory by the client, so that there is no problem if the server dies that executed the query. Result sets must fit in memory on the client side.
-
-@advanced_1122_p
-# The SQL statement SET AUTOCOMMIT FALSE
is not supported in the cluster mode. To disable autocommit, the method Connection.setAutoCommit(false)
needs to be called.
-
-@advanced_1123_p
-# It is possible that a transaction from one connection overtakes a transaction from a different connection. Depending on the operations, this might result in different results, for example when conditionally incrementing a value in a row.
-
-@advanced_1124_h2
-2フェーズコミット
-
-@advanced_1125_p
-# The two phase commit protocol is supported. 2-phase-commit works as follows:
-
-@advanced_1126_li
-#Autocommit needs to be switched off
-
-@advanced_1127_li
-#A transaction is started, for example by inserting a row
-
-@advanced_1128_li
-#The transaction is marked 'prepared' by executing the SQL statement PREPARE COMMIT transactionName
-
-@advanced_1129_li
-#The transaction can now be committed or rolled back
-
-@advanced_1130_li
-#If a problem occurs before the transaction was successfully committed or rolled back (for example because a network problem occurred), the transaction is in the state 'in-doubt'
-
-@advanced_1131_li
-#When re-connecting to the database, the in-doubt transactions can be listed with SELECT * FROM INFORMATION_SCHEMA.IN_DOUBT
-
-@advanced_1132_li
-#Each transaction in this list must now be committed or rolled back by executing COMMIT TRANSACTION transactionName
or ROLLBACK TRANSACTION transactionName
-
-@advanced_1133_li
-#The database needs to be closed and re-opened to apply the changes
-
-@advanced_1134_h2
-互�?�性
-
-@advanced_1135_p
-# This database is (up to a certain point) compatible to other databases such as HSQLDB, MySQL and PostgreSQL. There are certain areas where H2 is incompatible.
-
-@advanced_1136_h3
-オートコミット�?�ON�?�時�?�トランザクションコミット
-
-@advanced_1137_p
-# At this time, this database engine commits a transaction (if autocommit is switched on) just before returning the result. For a query, this means the transaction is committed even before the application scans through the result set, and before the result set is closed. Other database engines may commit the transaction in this case when the result set is closed.
-
-@advanced_1138_h3
-キーワード / 予約語
-
-@advanced_1139_p
-# There is a list of keywords that can't be used as identifiers (table names, column names and so on), unless they are quoted (surrounded with double quotes). The list is currently:
-
-@advanced_1140_code
-# CROSS, CURRENT_DATE, CURRENT_TIME, CURRENT_TIMESTAMP, DISTINCT, EXCEPT, EXISTS, FALSE, FETCH, FOR, FROM, FULL, GROUP, HAVING, INNER, INTERSECT, IS, JOIN, LIKE, LIMIT, MINUS, NATURAL, NOT, NULL, OFFSET, ON, ORDER, PRIMARY, ROWNUM, SELECT, SYSDATE, SYSTIME, SYSTIMESTAMP, TODAY, TRUE, UNION, UNIQUE, WHERE
-
-@advanced_1141_p
-# Certain words of this list are keywords because they are functions that can be used without '()' for compatibility, for example CURRENT_TIMESTAMP
.
-
-@advanced_1142_h2
-#Standards Compliance
-
-@advanced_1143_p
-# This database tries to be as much standard compliant as possible. For the SQL language, ANSI/ISO is the main standard. There are several versions that refer to the release date: SQL-92, SQL:1999, and SQL:2003. Unfortunately, the standard documentation is not freely available. Another problem is that important features are not standardized. Whenever this is the case, this database tries to be compatible to other databases.
-
-@advanced_1144_h3
-#Supported Character Sets, Character Encoding, and Unicode
-
-@advanced_1145_p
-# H2 internally uses Unicode, and supports all character encoding systems and character sets supported by the virtual machine you use.
-
-@advanced_1146_h2
-Windowsサービス�?��?��?�実行�?�る
-
-@advanced_1147_p
-# Using a native wrapper / adapter, Java applications can be run as a Windows Service. There are various tools available to do that. The Java Service Wrapper from Tanuki Software, Inc. is included in the installation. Batch files are provided to install, start, stop and uninstall the H2 Database Engine Service. This service contains the TCP Server and the H2 Console web application. The batch files are located in the directory h2/service
.
-
-@advanced_1148_p
-# The service wrapper bundled with H2 is a 32-bit version. To use a 64-bit version of Windows (x64), you need to use a 64-bit version of the wrapper, for example the one from Simon Krenger.
-
-@advanced_1149_p
-# When running the database as a service, absolute path should be used. Using ~
in the database URL is problematic in this case, because it means to use the home directory of the current user. The service might run without or with the wrong user, so that the database files might end up in an unexpected place.
-
-@advanced_1150_h3
-サービスをインストール�?�る
-
-@advanced_1151_p
-# The service needs to be registered as a Windows Service first. To do that, double click on 1_install_service.bat
. If successful, a command prompt window will pop up and disappear immediately. If not, a message will appear.
-
-@advanced_1152_h3
-サービスを起動�?�る
-
-@advanced_1153_p
-# You can start the H2 Database Engine Service using the service manager of Windows, or by double clicking on 2_start_service.bat
. Please note that the batch file does not print an error message if the service is not installed.
-
-@advanced_1154_h3
-H2コンソール�?�接続�?�る
-
-@advanced_1155_p
-# After installing and starting the service, you can connect to the H2 Console application using a browser. Double clicking on 3_start_browser.bat
to do that. The default port (8082) is hard coded in the batch file.
-
-@advanced_1156_h3
-サービスを終了�?�る
-
-@advanced_1157_p
-# To stop the service, double click on 4_stop_service.bat
. Please note that the batch file does not print an error message if the service is not installed or started.
-
-@advanced_1158_h3
-サービス�?�アンインストール
-
-@advanced_1159_p
-# To uninstall the service, double click on 5_uninstall_service.bat
. If successful, a command prompt window will pop up and disappear immediately. If not, a message will appear.
-
-@advanced_1160_h3
-#Additional JDBC drivers
-
-@advanced_1161_p
-# To use other databases (for example MySQL), the location of the JDBC drivers of those databases need to be added to the environment variables H2DRIVERS
or CLASSPATH
before installing the service. Multiple drivers can be set; each entry needs to be separated with a ;
(Windows) or :
(other operating systems). Spaces in the path names are supported. The settings must not be quoted.
-
-@advanced_1162_h2
-ODBCドライ�?
-
-@advanced_1163_p
-# This database does not come with its own ODBC driver at this time, but it supports the PostgreSQL network protocol. Therefore, the PostgreSQL ODBC driver can be used. Support for the PostgreSQL network protocol is quite new and should be viewed as experimental. It should not be used for production applications.
-
-@advanced_1164_p
-# To use the PostgreSQL ODBC driver on 64 bit versions of Windows, first run c:/windows/syswow64/odbcad32.exe
. At this point you set up your DSN just like you would on any other system. See also: Re: ODBC Driver on Windows 64 bit
-
-@advanced_1165_h3
-ODBCインストール
-
-@advanced_1166_p
-# First, the ODBC driver must be installed. Any recent PostgreSQL ODBC driver should work, however version 8.2 (psqlodbc-08_02*
) or newer is recommended. The Windows version of the PostgreSQL ODBC driver is available at http://www.postgresql.org/ftp/odbc/versions/msi.
-
-@advanced_1167_h3
-サー�?ー�?�起動
-
-@advanced_1168_p
-# After installing the ODBC driver, start the H2 Server using the command line:
-
-@advanced_1169_p
-# The PG Server (PG for PostgreSQL protocol) is started as well. By default, databases are stored in the current working directory where the server is started. Use -baseDir
to save databases in another directory, for example the user home directory:
-
-@advanced_1170_p
-# The PG server can be started and stopped from within a Java application as follows:
-
-@advanced_1171_p
-# By default, only connections from localhost are allowed. To allow remote connections, use -pgAllowOthers
when starting the server.
-
-@advanced_1172_p
-# To map an ODBC database name to a different JDBC database name, use the option -key
when starting the server. Please note only one mapping is allowed. The following will map the ODBC database named TEST
to the database URL jdbc:h2:~/data/test;cipher=aes
:
-
-@advanced_1173_h3
-ODBC設定
-
-@advanced_1174_p
-# After installing the driver, a new Data Source must be added. In Windows, run odbcad32.exe
to open the Data Source Administrator. Then click on 'Add...' and select the PostgreSQL Unicode driver. Then click 'Finish'. You will be able to change the connection properties. The property column represents the property key in the odbc.ini
file (which may be different from the GUI).
-
-@advanced_1175_th
-プロパティ
-
-@advanced_1176_th
-例
-
-@advanced_1177_th
-コメント
-
-@advanced_1178_td
-Data Source
-
-@advanced_1179_td
-H2 Test
-
-@advanced_1180_td
-ODBCデータソース�?��??称
-
-@advanced_1181_td
-Database
-
-@advanced_1182_td
-#~/test;ifexists=true
-
-@advanced_1183_td
-# The database name. This can include connections settings. By default, the database is stored in the current working directory where the Server is started except when the -baseDir setting is used. The name must be at least 3 characters.
-
-@advanced_1184_td
-#Servername
-
-@advanced_1185_td
-localhost
-
-@advanced_1186_td
-サー�?ー�??�?�?��?��?�IPアドレス
-
-@advanced_1187_td
-デフォルト�?��?��?リモート接続�?��?�許�?��?�れ�?��?��?��?�。
-
-@advanced_1188_td
-#Username
-
-@advanced_1189_td
-sa
-
-@advanced_1190_td
-データベース�?�ユーザー�??
-
-@advanced_1191_td
-#SSL
-
-@advanced_1192_td
-#false (disabled)
-
-@advanced_1193_td
-�?�時点�?��?SSL�?�サ�?ート�?�れ�?��?��?��?�ん。
-
-@advanced_1194_td
-Port
-
-@advanced_1195_td
-5435
-
-@advanced_1196_td
-PGサー�?ー�?�傾�?��?��?��?�る�?ート
-
-@advanced_1197_td
-Password
-
-@advanced_1198_td
-sa
-
-@advanced_1199_td
-データベースパスワード
-
-@advanced_1200_p
-# To improve performance, please enable 'server side prepare' under Options / Datasource / Page 2 / Server side prepare.
-
-@advanced_1201_p
-# Afterwards, you may use this data source.
-
-@advanced_1202_h3
-PGプロトコルサ�?ート�?�制�?
-
-@advanced_1203_p
-# At this time, only a subset of the PostgreSQL network protocol is implemented. Also, there may be compatibility problems on the SQL level, with the catalog, or with text encoding. Problems are fixed as they are found. Currently, statements can not be canceled when using the PG protocol. Also, H2 does not provide index meta over ODBC.
-
-@advanced_1204_p
-# PostgreSQL ODBC Driver Setup requires a database password; that means it is not possible to connect to H2 databases without password. This is a limitation of the ODBC driver.
-
-@advanced_1205_h3
-セキュリティ考慮
-
-@advanced_1206_p
-# Currently, the PG Server does not support challenge response or encrypt passwords. This may be a problem if an attacker can listen to the data transferred between the ODBC driver and the server, because the password is readable to the attacker. Also, it is currently not possible to use encrypted SSL connections. Therefore the ODBC driver should not be used where security is important.
-
-@advanced_1207_p
-# The first connection that opens a database using the PostgreSQL server needs to be an administrator user. Subsequent connections don't need to be opened by an administrator.
-
-@advanced_1208_h3
-#Using Microsoft Access
-
-@advanced_1209_p
-# When using Microsoft Access to edit data in a linked H2 table, you may need to enable the following option: Tools - Options - Edit/Find - ODBC fields.
-
-@advanced_1210_h2
-#Using H2 in Microsoft .NET
-
-@advanced_1211_p
-# The database can be used from Microsoft .NET even without using Java, by using IKVM.NET. You can access a H2 database on .NET using the JDBC API, or using the ADO.NET interface.
-
-@advanced_1212_h3
-#Using the ADO.NET API on .NET
-
-@advanced_1213_p
-# An implementation of the ADO.NET interface is available in the open source project H2Sharp.
-
-@advanced_1214_h3
-#Using the JDBC API on .NET
-
-@advanced_1215_li
-#Install the .NET Framework from Microsoft. Mono has not yet been tested.
-
-@advanced_1216_li
-#Install IKVM.NET.
-
-@advanced_1217_li
-#Copy the h2*.jar
file to ikvm/bin
-
-@advanced_1218_li
-#Run the H2 Console using: ikvm -jar h2*.jar
-
-@advanced_1219_li
-#Convert the H2 Console to an .exe
file using: ikvmc -target:winexe h2*.jar
. You may ignore the warnings.
-
-@advanced_1220_li
-#Create a .dll
file using (change the version accordingly): ikvmc.exe -target:library -version:1.0.69.0 h2*.jar
-
-@advanced_1221_p
-# If you want your C# application use H2, you need to add the h2.dll
and the IKVM.OpenJDK.ClassLibrary.dll
to your C# solution. Here some sample code:
-
-@advanced_1222_h2
-ACID
-
-@advanced_1223_p
-# In the database world, ACID stands for:
-
-@advanced_1224_li
-#Atomicity: transactions must be atomic, meaning either all tasks are performed or none.
-
-@advanced_1225_li
-#Consistency: all operations must comply with the defined constraints.
-
-@advanced_1226_li
-#Isolation: transactions must be isolated from each other.
-
-@advanced_1227_li
-#Durability: committed transaction will not be lost.
-
-@advanced_1228_h3
-Atomicity (原�?性)
-
-@advanced_1229_p
-# Transactions in this database are always atomic.
-
-@advanced_1230_h3
-Consistency (一貫性)
-
-@advanced_1231_p
-# By default, this database is always in a consistent state. Referential integrity rules are enforced except when explicitly disabled.
-
-@advanced_1232_h3
-Isolation (独立性 / 分離性)
-
-@advanced_1233_p
-# For H2, as with most other database systems, the default isolation level is 'read committed'. This provides better performance, but also means that transactions are not completely isolated. H2 supports the transaction isolation levels 'serializable', 'read committed', and 'read uncommitted'.
-
-@advanced_1234_h3
-Durability (永続性)
-
-@advanced_1235_p
-# This database does not guarantee that all committed transactions survive a power failure. Tests show that all databases sometimes lose transactions on power failure (for details, see below). Where losing transactions is not acceptable, a laptop or UPS (uninterruptible power supply) should be used. If durability is required for all possible cases of hardware failure, clustering should be used, such as the H2 clustering mode.
-
-@advanced_1236_h2
-永続性�?題
-
-@advanced_1237_p
-# Complete durability means all committed transaction survive a power failure. Some databases claim they can guarantee durability, but such claims are wrong. A durability test was run against H2, HSQLDB, PostgreSQL, and Derby. All of those databases sometimes lose committed transactions. The test is included in the H2 download, see org.h2.test.poweroff.Test
.
-
-@advanced_1238_h3
-永続性を実�?��?�る (�?��?��?�) 方法
-
-@advanced_1239_p
-# Making sure that committed transactions are not lost is more complicated than it seems first. To guarantee complete durability, a database must ensure that the log record is on the hard drive before the commit call returns. To do that, databases use different methods. One is to use the 'synchronous write' file access mode. In Java, RandomAccessFile
supports the modes rws
and rwd
:
-
-@advanced_1240_code
-#rwd
-
-@advanced_1241_li
-#: every update to the file's content is written synchronously to the underlying storage device.
-
-@advanced_1242_code
-#rws
-
-@advanced_1243_li
-#: in addition to rwd
, every update to the metadata is written synchronously.
-
-@advanced_1244_p
-# A test (org.h2.test.poweroff.TestWrite
) with one of those modes achieves around 50 thousand write operations per second. Even when the operating system write buffer is disabled, the write rate is around 50 thousand operations per second. This feature does not force changes to disk because it does not flush all buffers. The test updates the same byte in the file again and again. If the hard drive was able to write at this rate, then the disk would need to make at least 50 thousand revolutions per second, or 3 million RPM (revolutions per minute). There are no such hard drives. The hard drive used for the test is about 7200 RPM, or about 120 revolutions per second. There is an overhead, so the maximum write rate must be lower than that.
-
-@advanced_1245_p
-# Calling fsync
flushes the buffers. There are two ways to do that in Java:
-
-@advanced_1246_code
-#FileDescriptor.sync()
-
-@advanced_1247_li
-#. The documentation says that this forces all system buffers to synchronize with the underlying device. This method is supposed to return after all in-memory modified copies of buffers associated with this file descriptor have been written to the physical medium.
-
-@advanced_1248_code
-#FileChannel.force()
-
-@advanced_1249_li
-#. This method is supposed to force any updates to this channel's file to be written to the storage device that contains it.
-
-@advanced_1250_p
-# By default, MySQL calls fsync
for each commit. When using one of those methods, only around 60 write operations per second can be achieved, which is consistent with the RPM rate of the hard drive used. Unfortunately, even when calling FileDescriptor.sync()
or FileChannel.force()
, data is not always persisted to the hard drive, because most hard drives do not obey fsync()
: see Your Hard Drive Lies to You. In Mac OS X, fsync
does not flush hard drive buffers. See Bad fsync?. So the situation is confusing, and tests prove there is a problem.
-
-@advanced_1251_p
-# Trying to flush hard drive buffers is hard, and if you do the performance is very bad. First you need to make sure that the hard drive actually flushes all buffers. Tests show that this can not be done in a reliable way. Then the maximum number of transactions is around 60 per second. Because of those reasons, the default behavior of H2 is to delay writing committed transactions.
-
-@advanced_1252_p
-# In H2, after a power failure, a bit more than one second of committed transactions may be lost. To change the behavior, use SET WRITE_DELAY
and CHECKPOINT SYNC
. Most other databases support commit delay as well. In the performance comparison, commit delay was used for all databases that support it.
-
-@advanced_1253_h3
-永続性テストを実行�?�る
-
-@advanced_1254_p
-# To test the durability / non-durability of this and other databases, you can use the test application in the package org.h2.test.poweroff
. Two computers with network connection are required to run this test. One computer just listens, while the test application is run (and power is cut) on the other computer. The computer with the listener application opens a TCP/IP port and listens for an incoming connection. The second computer first connects to the listener, and then created the databases and starts inserting records. The connection is set to 'autocommit', which means after each inserted record a commit is performed automatically. Afterwards, the test computer notifies the listener that this record was inserted successfully. The listener computer displays the last inserted record number every 10 seconds. Now, switch off the power manually, then restart the computer, and run the application again. You will find out that in most cases, none of the databases contains all the records that the listener computer knows about. For details, please consult the source code of the listener and test application.
-
-@advanced_1255_h2
-リカ�?ーツールを使用�?�る
-
-@advanced_1256_p
-# The Recover
tool can be used to extract the contents of a database file, even if the database is corrupted. It also extracts the content of the transaction log and large objects (CLOB or BLOB). To run the tool, type on the command line:
-
-@advanced_1257_p
-# For each database in the current directory, a text file will be created. This file contains raw insert statements (for the data) and data definition (DDL) statements to recreate the schema of the database. This file can be executed using the RunScript
tool or a RUNSCRIPT FROM
SQL statement. The script includes at least one CREATE USER
statement. If you run the script against a database that was created with the same user, or if there are conflicting users, running the script will fail. Consider running the script against a database that was created with a user name that is not in the script.
-
-@advanced_1258_p
-# The Recover
tool creates a SQL script from database file. It also processes the transaction log.
-
-@advanced_1259_p
-# To verify the database can recover at any time, append ;RECOVER_TEST=64
to the database URL in your test environment. This will simulate an application crash after each 64 writes to the database file. A log file named databaseName.h2.db.log
is created that lists the operations. The recovery is tested using an in-memory file system, that means it may require a larger heap setting.
-
-@advanced_1260_h2
-ファイルロックプロトコル
-
-@advanced_1261_p
-# Multiple concurrent connections to the same database are supported, however a database file can only be open for reading and writing (in embedded mode) by one process at the same time. Otherwise, the processes would overwrite each others data and corrupt the database file. To protect against this problem, whenever a database is opened, a lock file is created to signal other processes that the database is in use. If the database is closed, or if the process that opened the database stops normally, this lock file is deleted.
-
-@advanced_1262_p
-# In special cases (if the process did not terminate normally, for example because there was a power failure), the lock file is not deleted by the process that created it. That means the existence of the lock file is not a safe protocol for file locking. However, this software uses a challenge-response protocol to protect the database files. There are two methods (algorithms) implemented to provide both security (that is, the same database files cannot be opened by two processes at the same time) and simplicity (that is, the lock file does not need to be deleted manually by the user). The two methods are 'file method' and 'socket methods'.
-
-@advanced_1263_p
-# The file locking protocols (except the file locking method 'FS') have the following limitation: if a shared file system is used, and the machine with the lock owner is sent to sleep (standby or hibernate), another machine may take over. If the machine that originally held the lock wakes up, the database may become corrupt. If this situation can occur, the application must ensure the database is closed when the application is put to sleep.
-
-@advanced_1264_h3
-ファイルロックメソッド "File"
-
-@advanced_1265_p
-# The default method for database file locking for version 1.3 and older is the 'File Method'. The algorithm is:
-
-@advanced_1266_li
-#If the lock file does not exist, it is created (using the atomic operation File.createNewFile
). Then, the process waits a little bit (20 ms) and checks the file again. If the file was changed during this time, the operation is aborted. This protects against a race condition when one process deletes the lock file just after another one create it, and a third process creates the file again. It does not occur if there are only two writers.
-
-@advanced_1267_li
-# If the file can be created, a random number is inserted together with the locking method ('file'). Afterwards, a watchdog thread is started that checks regularly (every second once by default) if the file was deleted or modified by another (challenger) thread / process. Whenever that occurs, the file is overwritten with the old data. The watchdog thread runs with high priority so that a change to the lock file does not get through undetected even if the system is very busy. However, the watchdog thread does use very little resources (CPU time), because it waits most of the time. Also, the watchdog only reads from the hard disk and does not write to it.
-
-@advanced_1268_li
-# If the lock file exists and was recently modified, the process waits for some time (up to two seconds). If it was still changed, an exception is thrown (database is locked). This is done to eliminate race conditions with many concurrent writers. Afterwards, the file is overwritten with a new version (challenge). After that, the thread waits for 2 seconds. If there is a watchdog thread protecting the file, he will overwrite the change and this process will fail to lock the database. However, if there is no watchdog thread, the lock file will still be as written by this thread. In this case, the file is deleted and atomically created again. The watchdog thread is started in this case and the file is locked.
-
-@advanced_1269_p
-# This algorithm is tested with over 100 concurrent threads. In some cases, when there are many concurrent threads trying to lock the database, they block each other (meaning the file cannot be locked by any of them) for some time. However, the file never gets locked by two threads at the same time. However using that many concurrent threads / processes is not the common use case. Generally, an application should throw an error to the user if it cannot open a database, and not try again in a (fast) loop.
-
-@advanced_1270_h3
-ファイルロックメソッド "Socket"
-
-@advanced_1271_p
-# There is a second locking mechanism implemented, but disabled by default. To use it, append ;FILE_LOCK=SOCKET
to the database URL. The algorithm is:
-
-@advanced_1272_li
-#If the lock file does not exist, it is created. Then a server socket is opened on a defined port, and kept open. The port and IP address of the process that opened the database is written into the lock file.
-
-@advanced_1273_li
-#If the lock file exists, and the lock method is 'file', then the software switches to the 'file' method.
-
-@advanced_1274_li
-#If the lock file exists, and the lock method is 'socket', then the process checks if the port is in use. If the original process is still running, the port is in use and this process throws an exception (database is in use). If the original process died (for example due to a power failure, or abnormal termination of the virtual machine), then the port was released. The new process deletes the lock file and starts again.
-
-@advanced_1275_p
-# This method does not require a watchdog thread actively polling (reading) the same file every second. The problem with this method is, if the file is stored on a network share, two processes (running on different computers) could still open the same database files, if they do not have a direct TCP/IP connection.
-
-@advanced_1276_h3
-#File Locking Method 'FS'
-
-@advanced_1277_p
-# This is the default mode for version 1.4 and newer. This database file locking mechanism uses native file system lock on the database file. No *.lock.db file is created in this case, and no background thread is started. This mechanism may not work on all systems as expected. Some systems allow to lock the same file multiple times within the same virtual machine, and on some system native file locking is not supported or files are not unlocked after a power failure.
-
-@advanced_1278_p
-# To enable this feature, append ;FILE_LOCK=FS
to the database URL.
-
-@advanced_1279_p
-# This feature is relatively new. When using it for production, please ensure your system does in fact lock files as expected.
-
-@advanced_1280_h2
-パスワードを使用�?�る
-
-@advanced_1281_h3
-安全�?�パスワードを使用�?�る
-
-@advanced_1282_p
-# Remember that weak passwords can be broken regardless of the encryption and security protocols. Don't use passwords that can be found in a dictionary. Appending numbers does not make passwords secure. A way to create good passwords that can be remembered is: take the first letters of a sentence, use upper and lower case characters, and creatively include special characters (but it's more important to use a long password than to use special characters). Example:
-
-@advanced_1283_code
-#i'sE2rtPiUKtT
-
-@advanced_1284_p
-# from the sentence it's easy to remember this password if you know the trick
.
-
-@advanced_1285_h3
-パスワード: String�?�代�?り�?�Char Arraysを使用�?�る
-
-@advanced_1286_p
-# Java strings are immutable objects and cannot be safely 'destroyed' by the application. After creating a string, it will remain in the main memory of the computer at least until it is garbage collected. The garbage collection cannot be controlled by the application, and even if it is garbage collected the data may still remain in memory. It might also be possible that the part of memory containing the password is swapped to disk (if not enough main memory is available), which is a problem if the attacker has access to the swap file of the operating system.
-
-@advanced_1287_p
-# It is a good idea to use char arrays instead of strings for passwords. Char arrays can be cleared (filled with zeros) after use, and therefore the password will not be stored in the swap file.
-
-@advanced_1288_p
-# This database supports using char arrays instead of string to pass user and file passwords. The following code can be used to do that:
-
-@advanced_1289_p
-# This example requires Java 1.6. When using Swing, use javax.swing.JPasswordField
.
-
-@advanced_1290_h3
-ユーザー�?? �?� (�?��?��?�) パスワードをURL�?��?証�?�る
-
-@advanced_1291_p
-# Instead of passing the user name as a separate parameter as in Connection conn = DriverManager. getConnection("jdbc:h2:~/test", "sa", "123");
the user name (and/or password) can be supplied in the URL itself: Connection conn = DriverManager. getConnection("jdbc:h2:~/test;USER=sa;PASSWORD=123");
The settings in the URL override the settings passed as a separate parameter.
-
-@advanced_1292_h2
-#Password Hash
-
-@advanced_1293_p
-# Sometimes the database password needs to be stored in a configuration file (for example in the web.xml
file). In addition to connecting with the plain text password, this database supports connecting with the password hash. This means that only the hash of the password (and not the plain text password) needs to be stored in the configuration file. This will only protect others from reading or re-constructing the plain text password (even if they have access to the configuration file); it does not protect others from accessing the database using the password hash.
-
-@advanced_1294_p
-# To connect using the password hash instead of plain text password, append ;PASSWORD_HASH=TRUE
to the database URL, and replace the password with the password hash. To calculate the password hash from a plain text password, run the following command within the H2 Console tool: @password_hash <upperCaseUserName> <password>
. As an example, if the user name is sa
and the password is test
, run the command @password_hash SA test
. Then use the resulting password hash as you would use the plain text password. When using an encrypted database, then the user password and file password need to be hashed separately. To calculate the hash of the file password, run: @password_hash file <filePassword>
.
-
-@advanced_1295_h2
-SQLインジェクション�?�対�?�る防御
-
-@advanced_1296_h3
-SQLインジェクション�?��?�
-
-@advanced_1297_p
-# This database engine provides a solution for the security vulnerability known as 'SQL Injection'. Here is a short description of what SQL injection means. Some applications build SQL statements with embedded user input such as:
-
-@advanced_1298_p
-# If this mechanism is used anywhere in the application, and user input is not correctly filtered or encoded, it is possible for a user to inject SQL functionality or statements by using specially built input such as (in this example) this password: ' OR ''='
. In this case the statement becomes:
-
-@advanced_1299_p
-# Which is always true no matter what the password stored in the database is. For more information about SQL Injection, see Glossary and Links.
-
-@advanced_1300_h3
-リテラルを無効�?��?�る
-
-@advanced_1301_p
-# SQL Injection is not possible if user input is not directly embedded in SQL statements. A simple solution for the problem above is to use a prepared statement:
-
-@advanced_1302_p
-# This database provides a way to enforce usage of parameters when passing user input to the database. This is done by disabling embedded literals in SQL statements. To do this, execute the statement:
-
-@advanced_1303_p
-# Afterwards, SQL statements with text and number literals are not allowed any more. That means, SQL statement of the form WHERE NAME='abc'
or WHERE CustomerId=10
will fail. It is still possible to use prepared statements and parameters as described above. Also, it is still possible to generate SQL statements dynamically, and use the Statement API, as long as the SQL statements do not include literals. There is also a second mode where number literals are allowed: SET ALLOW_LITERALS NUMBERS
. To allow all literals, execute SET ALLOW_LITERALS ALL
(this is the default setting). Literals can only be enabled or disabled by an administrator.
-
-@advanced_1304_h3
-定数を使用�?�る
-
-@advanced_1305_p
-# Disabling literals also means disabling hard-coded 'constant' literals. This database supports defining constants using the CREATE CONSTANT
command. Constants can be defined only when literals are enabled, but used even when literals are disabled. To avoid name clashes with column names, constants can be defined in other schemas:
-
-@advanced_1306_p
-# Even when literals are enabled, it is better to use constants instead of hard-coded number or text literals in queries or views. With constants, typos are found at compile time, the source code is easier to understand and change.
-
-@advanced_1307_h3
-ZERO() 関数を使用�?�る
-
-@advanced_1308_p
-# It is not required to create a constant for the number 0 as there is already a built-in function ZERO()
:
-
-@advanced_1309_h2
-#Protection against Remote Access
-
-@advanced_1310_p
-# By default this database does not allow connections from other machines when starting the H2 Console, the TCP server, or the PG server. Remote access can be enabled using the command line options -webAllowOthers, -tcpAllowOthers, -pgAllowOthers
.
-
-@advanced_1311_p
-# If you enable remote access using -tcpAllowOthers
or -pgAllowOthers
, please also consider using the options -baseDir, -ifExists
, so that remote users can not create new databases or access existing databases with weak passwords. When using the option -baseDir
, only databases within that directory may be accessed. Ensure the existing accessible databases are protected using strong passwords.
-
-@advanced_1312_p
-# If you enable remote access using -webAllowOthers
, please ensure the web server can only be accessed from trusted networks. The options -baseDir, -ifExists
don't protect access to the tools section, prevent remote shutdown of the web server, changes to the preferences, the saved connection settings, or access to other databases accessible from the system.
-
-@advanced_1313_h2
-#Restricting Class Loading and Usage
-
-@advanced_1314_p
-# By default there is no restriction on loading classes and executing Java code for admins. That means an admin may call system functions such as System.setProperty
by executing:
-
-@advanced_1315_p
-# To restrict users (including admins) from loading classes and executing code, the list of allowed classes can be set in the system property h2.allowedClasses
in the form of a comma separated list of classes or patterns (items ending with *
). By default all classes are allowed. Example:
-
-@advanced_1316_p
-# This mechanism is used for all user classes, including database event listeners, trigger classes, user-defined functions, user-defined aggregate functions, and JDBC driver classes (with the exception of the H2 driver) when using the H2 Console.
-
-@advanced_1317_h2
-セキュリティプロトコル
-
-@advanced_1318_p
-# The following paragraphs document the security protocols used in this database. These descriptions are very technical and only intended for security experts that already know the underlying security primitives.
-
-@advanced_1319_h3
-ユーザーパスワード�?�暗�?�化
-
-@advanced_1320_p
-# When a user tries to connect to a database, the combination of user name, @, and password are hashed using SHA-256, and this hash value is transmitted to the database. This step does not protect against an attacker that re-uses the value if he is able to listen to the (unencrypted) transmission between the client and the server. But, the passwords are never transmitted as plain text, even when using an unencrypted connection between client and server. That means if a user reuses the same password for different things, this password is still protected up to some point. See also 'RFC 2617 - HTTP Authentication: Basic and Digest Access Authentication' for more information.
-
-@advanced_1321_p
-# When a new database or user is created, a new random salt value is generated. The size of the salt is 64 bits. Using the random salt reduces the risk of an attacker pre-calculating hash values for many different (commonly used) passwords.
-
-@advanced_1322_p
-# The combination of user-password hash value (see above) and salt is hashed using SHA-256. The resulting value is stored in the database. When a user tries to connect to the database, the database combines user-password hash value with the stored salt value and calculates the hash value. Other products use multiple iterations (hash the hash value again and again), but this is not done in this product to reduce the risk of denial of service attacks (where the attacker tries to connect with bogus passwords, and the server spends a lot of time calculating the hash value for each password). The reasoning is: if the attacker has access to the hashed passwords, he also has access to the data in plain text, and therefore does not need the password any more. If the data is protected by storing it on another computer and only accessible remotely, then the iteration count is not required at all.
-
-@advanced_1323_h3
-ファイル暗�?�化
-
-@advanced_1324_p
-# The database files can be encrypted using the AES-128 algorithm.
-
-@advanced_1325_p
-# When a user tries to connect to an encrypted database, the combination of file@
and the file password is hashed using SHA-256. This hash value is transmitted to the server.
-
-@advanced_1326_p
-# When a new database file is created, a new cryptographically secure random salt value is generated. The size of the salt is 64 bits. The combination of the file password hash and the salt value is hashed 1024 times using SHA-256. The reason for the iteration is to make it harder for an attacker to calculate hash values for common passwords.
-
-@advanced_1327_p
-# The resulting hash value is used as the key for the block cipher algorithm. Then, an initialization vector (IV) key is calculated by hashing the key again using SHA-256. This is to make sure the IV is unknown to the attacker. The reason for using a secret IV is to protect against watermark attacks.
-
-@advanced_1328_p
-# Before saving a block of data (each block is 8 bytes long), the following operations are executed: first, the IV is calculated by encrypting the block number with the IV key (using the same block cipher algorithm). This IV is combined with the plain text using XOR. The resulting data is encrypted using the AES-128 algorithm.
-
-@advanced_1329_p
-# When decrypting, the operation is done in reverse. First, the block is decrypted using the key, and then the IV is calculated combined with the decrypted text using XOR.
-
-@advanced_1330_p
-# Therefore, the block cipher mode of operation is CBC (cipher-block chaining), but each chain is only one block long. The advantage over the ECB (electronic codebook) mode is that patterns in the data are not revealed, and the advantage over multi block CBC is that flipped cipher text bits are not propagated to flipped plaintext bits in the next block.
-
-@advanced_1331_p
-# Database encryption is meant for securing the database while it is not in use (stolen laptop and so on). It is not meant for cases where the attacker has access to files while the database is in use. When he has write access, he can for example replace pieces of files with pieces of older versions and manipulate data like this.
-
-@advanced_1332_p
-# File encryption slows down the performance of the database engine. Compared to unencrypted mode, database operations take about 2.5 times longer using AES (embedded mode).
-
-@advanced_1333_h3
-#Wrong Password / User Name Delay
-
-@advanced_1334_p
-# To protect against remote brute force password attacks, the delay after each unsuccessful login gets double as long. Use the system properties h2.delayWrongPasswordMin
and h2.delayWrongPasswordMax
to change the minimum (the default is 250 milliseconds) or maximum delay (the default is 4000 milliseconds, or 4 seconds). The delay only applies for those using the wrong password. Normally there is no delay for a user that knows the correct password, with one exception: after using the wrong password, there is a delay of up to (randomly distributed) the same delay as for a wrong password. This is to protect against parallel brute force attacks, so that an attacker needs to wait for the whole delay. Delays are synchronized. This is also required to protect against parallel attacks.
-
-@advanced_1335_p
-# There is only one exception message for both wrong user and for wrong password, to make it harder to get the list of user names. It is not possible from the stack trace to see if the user name was wrong or the password.
-
-@advanced_1336_h3
-HTTPS 接続
-
-@advanced_1337_p
-# The web server supports HTTP and HTTPS connections using SSLServerSocket
. There is a default self-certified certificate to support an easy starting point, but custom certificates are supported as well.
-
-@advanced_1338_h2
-#TLS Connections
-
-@advanced_1339_p
-# Remote TLS connections are supported using the Java Secure Socket Extension (SSLServerSocket, SSLSocket
). By default, anonymous TLS is enabled.
-
-@advanced_1340_p
-# To use your own keystore, set the system properties javax.net.ssl.keyStore
and javax.net.ssl.keyStorePassword
before starting the H2 server and client. See also Customizing the Default Key and Trust Stores, Store Types, and Store Passwords for more information.
-
-@advanced_1341_p
-# To disable anonymous TLS, set the system property h2.enableAnonymousTLS
to false.
-
-@advanced_1342_h2
-汎用一�?識別�? (UUID)
-
-@advanced_1343_p
-# This database supports UUIDs. Also supported is a function to create new UUIDs using a cryptographically strong pseudo random number generator. With random UUIDs, the chance of two having the same value can be calculated using the probability theory. See also 'Birthday Paradox'. Standardized randomly generated UUIDs have 122 random bits. 4 bits are used for the version (Randomly generated UUID), and 2 bits for the variant (Leach-Salz). This database supports generating such UUIDs using the built-in function RANDOM_UUID()
. Here is a small program to estimate the probability of having two identical UUIDs after generating a number of values:
-
-@advanced_1344_p
-# Some values are:
-
-@advanced_1345_th
-#Number of UUIs
-
-@advanced_1346_th
-#Probability of Duplicates
-
-@advanced_1347_td
-#2^36=68'719'476'736
-
-@advanced_1348_td
-#0.000'000'000'000'000'4
-
-@advanced_1349_td
-#2^41=2'199'023'255'552
-
-@advanced_1350_td
-#0.000'000'000'000'4
-
-@advanced_1351_td
-#2^46=70'368'744'177'664
-
-@advanced_1352_td
-#0.000'000'000'4
-
-@advanced_1353_p
-# To help non-mathematicians understand what those numbers mean, here a comparison: one's annual risk of being hit by a meteorite is estimated to be one chance in 17 billion, that means the probability is about 0.000'000'000'06.
-
-@advanced_1354_h2
-#Spatial Features
-
-@advanced_1355_p
-# H2 supports the geometry data type and spatial indexes if the JTS Topology Suite is in the classpath. To run the H2 Console tool with the JTS tool, you need to download the JTS 1.13 jar file and place it in the h2 bin directory. Then edit the h2.sh
file as follows:
-
-@advanced_1356_p
-# Here is an example SQL script to create a table with a spatial column and index:
-
-@advanced_1357_p
-# To query the table using geometry envelope intersection, use the operation &&
, as in PostGIS:
-
-@advanced_1358_p
-# You can verify that the spatial index is used using the "explain plan" feature:
-
-@advanced_1359_p
-# For persistent databases, the spatial index is stored on disk; for in-memory databases, the index is kept in memory.
-
-@advanced_1360_h2
-#Recursive Queries
-
-@advanced_1361_p
-# H2 has experimental support for recursive queries using so called "common table expressions" (CTE). Examples:
-
-@advanced_1362_p
-# Limitations: Recursive queries need to be of the type UNION ALL
, and the recursion needs to be on the second part of the query. No tables or views with the name of the table expression may exist. Different table expression names need to be used when using multiple distinct table expressions within the same transaction and for the same session. All columns of the table expression are of type VARCHAR
, and may need to be cast to the required data type. Views with recursive queries are not supported. Subqueries and INSERT INTO ... FROM
with recursive queries are not supported. Parameters are only supported within the last SELECT
statement (a workaround is to use session variables like @start
within the table expression). The syntax is:
-
-@advanced_1363_h2
-システムプロパティ�?�ら読�?�込�?�れ�?�設定
-
-@advanced_1364_p
-# Some settings of the database can be set on the command line using -DpropertyName=value
. It is usually not required to change those settings manually. The settings are case sensitive. Example:
-
-@advanced_1365_p
-# The current value of the settings can be read in the table INFORMATION_SCHEMA.SETTINGS
.
-
-@advanced_1366_p
-# For a complete list of settings, see SysProperties.
-
-@advanced_1367_h2
-#Setting the Server Bind Address
-
-@advanced_1368_p
-# Usually server sockets accept connections on any/all local addresses. This may be a problem on multi-homed hosts. To bind only to one address, use the system property h2.bindAddress
. This setting is used for both regular server sockets and for TLS server sockets. IPv4 and IPv6 address formats are supported.
-
-@advanced_1369_h2
-#Pluggable File System
-
-@advanced_1370_p
-# This database supports a pluggable file system API. The file system implementation is selected using a file name prefix. Internally, the interfaces are very similar to the Java 7 NIO2 API, but do not (yet) use or require Java 7. The following file systems are included:
-
-@advanced_1371_code
-#zip:
-
-@advanced_1372_li
-# read-only zip-file based file system. Format: zip:/zipFileName!/fileName
.
-
-@advanced_1373_code
-#split:
-
-@advanced_1374_li
-# file system that splits files in 1 GB files (stackable with other file systems).
-
-@advanced_1375_code
-#nio:
-
-@advanced_1376_li
-# file system that uses FileChannel
instead of RandomAccessFile
(faster in some operating systems).
-
-@advanced_1377_code
-#nioMapped:
-
-@advanced_1378_li
-# file system that uses memory mapped files (faster in some operating systems). Please note that there currently is a file size limitation of 2 GB when using this file system when using a 32-bit JVM. To work around this limitation, combine it with the split file system: split:nioMapped:test
.
-
-@advanced_1379_code
-#memFS:
-
-@advanced_1380_li
-# in-memory file system (slower than mem; experimental; mainly used for testing the database engine itself).
-
-@advanced_1381_code
-#memLZF:
-
-@advanced_1382_li
-# compressing in-memory file system (slower than memFS but uses less memory; experimental; mainly used for testing the database engine itself).
-
-@advanced_1383_p
-# As an example, to use the the nio
file system, use the following database URL: jdbc:h2:nio:~/test
.
-
-@advanced_1384_p
-# To register a new file system, extend the classes org.h2.store.fs.FilePath, FileBase
, and call the method FilePath.register
before using it.
-
-@advanced_1385_p
-# For input streams (but not for random access files), URLs may be used in addition to the registered file systems. Example: jar:file:///c:/temp/example.zip!/org/example/nested.csv
. To read a stream from the classpath, use the prefix classpath:
, as in classpath:/org/h2/samples/newsfeed.sql
.
-
-@advanced_1386_h2
-#Split File System
-
-@advanced_1387_p
-# The file system prefix split:
is used to split logical files into multiple physical files, for example so that a database can get larger than the maximum file system size of the operating system. If the logical file is larger than the maximum file size, then the file is split as follows:
-
-@advanced_1388_code
-#<fileName>
-
-@advanced_1389_li
-# (first block, is always created)
-
-@advanced_1390_code
-#<fileName>.1.part
-
-@advanced_1391_li
-# (second block)
-
-@advanced_1392_p
-# More physical files (*.2.part, *.3.part
) are automatically created / deleted if needed. The maximum physical file size of a block is 2^30 bytes, which is also called 1 GiB or 1 GB. However this can be changed if required, by specifying the block size in the file name. The file name format is: split:<x>:<fileName>
where the file size per block is 2^x. For 1 MiB block sizes, use x = 20 (because 2^20 is 1 MiB). The following file name means the logical file is split into 1 MiB blocks: split:20:test.h2.db
. An example database URL for this case is jdbc:h2:split:20:~/test
.
-
-@advanced_1393_h2
-データベース�?�アップグレー
-
-@advanced_1394_p
-# In version 1.2, H2 introduced a new file store implementation which is incompatible to the one used in versions < 1.2. To automatically convert databases to the new file store, it is necessary to include an additional jar file. The file can be found at http://h2database.com/h2mig_pagestore_addon.jar . If this file is in the classpath, every connect to an older database will result in a conversion process.
-
-@advanced_1395_p
-# The conversion itself is done internally via 'script to'
and 'runscript from'
. After the conversion process, the files will be renamed from
-
-@advanced_1396_code
-#dbName.data.db
-
-@advanced_1397_li
-# to dbName.data.db.backup
-
-@advanced_1398_code
-#dbName.index.db
-
-@advanced_1399_li
-# to dbName.index.db.backup
-
-@advanced_1400_p
-# by default. Also, the temporary script will be written to the database directory instead of a temporary directory. Both defaults can be customized via
-
-@advanced_1401_code
-#org.h2.upgrade.DbUpgrade.setDeleteOldDb(boolean)
-
-@advanced_1402_code
-#org.h2.upgrade.DbUpgrade.setScriptInTmpDir(boolean)
-
-@advanced_1403_p
-# prior opening a database connection.
-
-@advanced_1404_p
-# Since version 1.2.140 it is possible to let the old h2 classes (v 1.2.128) connect to the database. The automatic upgrade .jar file must be present, and the URL must start with jdbc:h2v1_1:
(the JDBC driver class is org.h2.upgrade.v1_1.Driver
). If the database should automatically connect using the old version if a database with the old format exists (without upgrade), and use the new version otherwise, then append ;NO_UPGRADE=TRUE
to the database URL. Please note the old driver did not process the system property "h2.baseDir"
correctly, so that using this setting is not supported when upgrading.
-
-@advanced_1405_h2
-#Java Objects Serialization
-
-@advanced_1406_p
-# Java objects serialization is enabled by default for columns of type OTHER
, using standard Java serialization/deserialization semantics.
-
-@advanced_1407_p
-# To disable this feature set the system property h2.serializeJavaObject=false
(default: true).
-
-@advanced_1408_p
-# Serialization and deserialization of java objects is customizable both at system level and at database level providing a JavaObjectSerializer implementation:
-
-@advanced_1409_li
-# At system level set the system property h2.javaObjectSerializer
with the Fully Qualified Name of the JavaObjectSerializer
interface implementation. It will be used over the entire JVM session to (de)serialize java objects being stored in column of type OTHER. Example h2.javaObjectSerializer=com.acme.SerializerClassName
.
-
-@advanced_1410_li
-# At database level execute the SQL statement SET JAVA_OBJECT_SERIALIZER 'com.acme.SerializerClassName'
or append ;JAVA_OBJECT_SERIALIZER='com.acme.SerializerClassName'
to the database URL: jdbc:h2:~/test;JAVA_OBJECT_SERIALIZER='com.acme.SerializerClassName'
.
-
-@advanced_1411_p
-# Please note that this SQL statement can only be executed before any tables are defined.
-
-@advanced_1412_h2
-#Limits and Limitations
-
-@advanced_1413_p
-# This database has the following known limitations:
-
-@advanced_1414_li
-#Database file size limit: 4 TB (using the default page size of 2 KB) or higher (when using a larger page size). This limit is including CLOB and BLOB data.
-
-@advanced_1415_li
-#The maximum file size for FAT or FAT32 file systems is 4 GB. That means when using FAT or FAT32, the limit is 4 GB for the data. This is the limitation of the file system. The database does provide a workaround for this problem, it is to use the file name prefix split:
. In that case files are split into files of 1 GB by default. An example database URL is: jdbc:h2:split:~/test
.
-
-@advanced_1416_li
-#The maximum number of rows per table is 2^64.
-
-@advanced_1417_li
-#The maximum number of open transactions is 65535.
-
-@advanced_1418_li
-#Main memory requirements: The larger the database, the more main memory is required. With the current storage mechanism (the page store), the minimum main memory required is around 1 MB for each 8 GB database file size.
-
-@advanced_1419_li
-#Limit on the complexity of SQL statements. Statements of the following form will result in a stack overflow exception:
-
-@advanced_1420_li
-#There is no limit for the following entities, except the memory and storage capacity: maximum identifier length (table name, column name, and so on); maximum number of tables, columns, indexes, triggers, and other database objects; maximum statement length, number of parameters per statement, tables per statement, expressions in order by, group by, having, and so on; maximum rows per query; maximum columns per table, columns per index, indexes per table, lob columns per table, and so on; maximum row length, index row length, select row length; maximum length of a varchar column, decimal column, literal in a statement.
-
-@advanced_1421_li
-#Querying from the metadata tables is slow if there are many tables (thousands).
-
-@advanced_1422_li
-#For limitations on data types, see the documentation of the respective Java data type or the data type documentation of this database.
-
-@advanced_1423_h2
-用語集�?�リンク
-
-@advanced_1424_th
-用語
-
-@advanced_1425_th
-説明
-
-@advanced_1426_td
-AES-128
-
-@advanced_1427_td
-#A block encryption algorithm. See also: Wikipedia: AES
-
-@advanced_1428_td
-Birthday Paradox
-
-@advanced_1429_td
-#Describes the higher than expected probability that two persons in a room have the same birthday. Also valid for randomly generated UUIDs. See also: Wikipedia: Birthday Paradox
-
-@advanced_1430_td
-Digest
-
-@advanced_1431_td
-#Protocol to protect a password (but not to protect data). See also: RFC 2617: HTTP Digest Access Authentication
-
-@advanced_1432_td
-GCJ
-
-@advanced_1433_td
-#Compiler for Java. GNU Compiler for the Java and NativeJ (commercial)
-
-@advanced_1434_td
-HTTPS
-
-@advanced_1435_td
-#A protocol to provide security to HTTP connections. See also: RFC 2818: HTTP Over TLS
-
-@advanced_1436_td
-Modes of Operation
-
-@advanced_1437_a
-#Wikipedia: Block cipher modes of operation
-
-@advanced_1438_td
-Salt
-
-@advanced_1439_td
-#Random number to increase the security of passwords. See also: Wikipedia: Key derivation function
-
-@advanced_1440_td
-SHA-256
-
-@advanced_1441_td
-#A cryptographic one-way hash function. See also: Wikipedia: SHA hash functions
-
-@advanced_1442_td
-SQLインジェクション
-
-@advanced_1443_td
-#A security vulnerability where an application embeds SQL statements or expressions in user input. See also: Wikipedia: SQL Injection
-
-@advanced_1444_td
-Watermark Attack (�?�?��?�攻撃)
-
-@advanced_1445_td
-#Security problem of certain encryption programs where the existence of certain data can be proven without decrypting. For more information, search in the internet for 'watermark attack cryptoloop'
-
-@advanced_1446_td
-SSL/TLS
-
-@advanced_1447_td
-#Secure Sockets Layer / Transport Layer Security. See also: Java Secure Socket Extension (JSSE)
-
-@architecture_1000_h1
-#Architecture
-
-@architecture_1001_a
-# Introduction
-
-@architecture_1002_a
-# Top-down overview
-
-@architecture_1003_a
-# JDBC driver
-
-@architecture_1004_a
-# Connection/session management
-
-@architecture_1005_a
-# Command execution and planning
-
-@architecture_1006_a
-# Table/index/constraints
-
-@architecture_1007_a
-# Undo log, redo log, and transactions layer
-
-@architecture_1008_a
-# B-tree engine and page-based storage allocation
-
-@architecture_1009_a
-# Filesystem abstraction
-
-@architecture_1010_h2
-#Introduction
-
-@architecture_1011_p
-# H2 implements an embedded and standalone ANSI-SQL89 compliant SQL engine on top of a B-tree based disk store.
-
-@architecture_1012_p
-# As of October 2013, Thomas is still working on our next-generation storage engine called MVStore. This will in time replace the B-tree based storage engine.
-
-@architecture_1013_h2
-#Top-down Overview
-
-@architecture_1014_p
-# Working from the top down, the layers look like this:
-
-@architecture_1015_li
-#JDBC driver.
-
-@architecture_1016_li
-#Connection/session management.
-
-@architecture_1017_li
-#SQL Parser.
-
-@architecture_1018_li
-#Command execution and planning.
-
-@architecture_1019_li
-#Table/Index/Constraints.
-
-@architecture_1020_li
-#Undo log, redo log, and transactions layer.
-
-@architecture_1021_li
-#B-tree engine and page-based storage allocation.
-
-@architecture_1022_li
-#Filesystem abstraction.
-
-@architecture_1023_h2
-#JDBC Driver
-
-@architecture_1024_p
-# The JDBC driver implementation lives in org.h2.jdbc, org.h2.jdbcx
-
-@architecture_1025_h2
-#Connection/session management
-
-@architecture_1026_p
-# The primary classes of interest are:
-
-@architecture_1027_th
-#Package
-
-@architecture_1028_th
-説明
-
-@architecture_1029_td
-#org.h2.engine.Database
-
-@architecture_1030_td
-#the root/global class
-
-@architecture_1031_td
-#org.h2.engine.SessionInterface
-
-@architecture_1032_td
-#abstracts over the differences between embedded and remote sessions
-
-@architecture_1033_td
-#org.h2.engine.Session
-
-@architecture_1034_td
-#local/embedded session
-
-@architecture_1035_td
-#org.h2.engine.SessionRemote
-
-@architecture_1036_td
-#remote session
-
-@architecture_1037_h2
-#Parser
-
-@architecture_1038_p
-# The parser lives in org.h2.command.Parser
. It uses a straightforward recursive-descent design.
-
-@architecture_1039_p
-# See Wikipedia Recursive-descent parser page.
-
-@architecture_1040_h2
-#Command execution and planning
-
-@architecture_1041_p
-# Unlike other databases, we do not have an intermediate step where we generate some kind of IR (intermediate representation) of the query. The parser class directly generates a command execution object. Then we run some optimisation steps over the command to possibly generate a more efficient command. The primary packages of interest are:
-
-@architecture_1042_th
-#Package
-
-@architecture_1043_th
-説明
-
-@architecture_1044_td
-#org.h2.command.ddl
-
-@architecture_1045_td
-#Commands that modify schema data structures
-
-@architecture_1046_td
-#org.h2.command.dml
-
-@architecture_1047_td
-#Commands that modify data
-
-@architecture_1048_h2
-#Table/Index/Constraints
-
-@architecture_1049_p
-# One thing to note here is that indexes are simply stored as special kinds of tables.
-
-@architecture_1050_p
-# The primary packages of interest are:
-
-@architecture_1051_th
-#Package
-
-@architecture_1052_th
-説明
-
-@architecture_1053_td
-#org.h2.table
-
-@architecture_1054_td
-#Implementations of different kinds of tables
-
-@architecture_1055_td
-#org.h2.index
-
-@architecture_1056_td
-#Implementations of different kinds of indices
-
-@architecture_1057_h2
-#Undo log, redo log, and transactions layer
-
-@architecture_1058_p
-# We have a transaction log, which is shared among all sessions. See also http://en.wikipedia.org/wiki/Transaction_log http://h2database.com/html/grammar.html#set_log
-
-@architecture_1059_p
-# We also have an undo log, which is per session, to undo an operation (an update that fails for example) and to rollback a transaction. Theoretically, the transaction log could be used, but for simplicity, H2 currently uses it's own "list of operations" (usually in-memory).
-
-@architecture_1060_p
-# With the MVStore, this is no longer needed (just the transaction log).
-
-@architecture_1061_h2
-#B-tree engine and page-based storage allocation.
-
-@architecture_1062_p
-# The primary package of interest is org.h2.store
.
-
-@architecture_1063_p
-# This implements a storage mechanism which allocates pages of storage (typically 2k in size) and also implements a b-tree over those pages to allow fast retrieval and update.
-
-@architecture_1064_h2
-#Filesystem abstraction.
-
-@architecture_1065_p
-# The primary class of interest is org.h2.store.FileStore
.
-
-@architecture_1066_p
-# This implements an abstraction of a random-access file. This allows the higher layers to treat in-memory vs. on-disk vs. zip-file databases the same.
-
-@build_1000_h1
-ビルド
-
-@build_1001_a
-# Portability
-
-@build_1002_a
-# Environment
-
-@build_1003_a
-# Building the Software
-
-@build_1004_a
-# Build Targets
-
-@build_1005_a
-# Using Maven 2
-
-@build_1006_a
-# Using Eclipse
-
-@build_1007_a
-# Translating
-
-@build_1008_a
-# Providing Patches
-
-@build_1009_a
-# Reporting Problems or Requests
-
-@build_1010_a
-# Automated Build
-
-@build_1011_a
-# Generating Railroad Diagrams
-
-@build_1012_h2
-�?ータビリティ
-
-@build_1013_p
-# This database is written in Java and therefore works on many platforms. It can also be compiled to a native executable using GCJ.
-
-@build_1014_h2
-環境
-
-@build_1015_p
-# To run this database, a Java Runtime Environment (JRE) version 1.6 or higher is required.
-
-@build_1016_p
-# To create the database executables, the following software stack was used. To use this database, it is not required to install this software however.
-
-@build_1017_li
-#Mac OS X and Windows
-
-@build_1018_a
-#Sun JDK Version 1.6 and 1.7
-
-@build_1019_a
-#Eclipse
-
-@build_1020_li
-#Eclipse Plugins: Subclipse, Eclipse Checkstyle Plug-in, EclEmma Java Code Coverage
-
-@build_1021_a
-#Emma Java Code Coverage
-
-@build_1022_a
-#Mozilla Firefox
-
-@build_1023_a
-#OpenOffice
-
-@build_1024_a
-#NSIS
-
-@build_1025_li
-# (Nullsoft Scriptable Install System)
-
-@build_1026_a
-#Maven
-
-@build_1027_h2
-ソフトウェア�?�ビルド
-
-@build_1028_p
-# You need to install a JDK, for example the Sun JDK version 1.6 or 1.7. Ensure that Java binary directory is included in the PATH
environment variable, and that the environment variable JAVA_HOME
points to your Java installation. On the command line, go to the directory h2
and execute the following command:
-
-@build_1029_p
-# For Linux and OS X, use ./build.sh
instead of build
.
-
-@build_1030_p
-# You will get a list of targets. If you want to build the jar
file, execute (Windows):
-
-@build_1031_p
-# To run the build tool in shell mode, use the command line option -
as in ./build.sh -
.
-
-@build_1032_h3
-#Switching the Source Code
-
-@build_1033_p
-# The source code uses Java 1.6 features. To switch the source code to the installed version of Java, run:
-
-@build_1034_h2
-#Build Targets
-
-@build_1035_p
-# The build system can generate smaller jar files as well. The following targets are currently supported:
-
-@build_1036_code
-#jarClient
-
-@build_1037_li
-# creates the file h2client.jar
. This only contains the JDBC client.
-
-@build_1038_code
-#jarSmall
-
-@build_1039_li
-# creates the file h2small.jar
. This only contains the embedded database. Debug information is disabled.
-
-@build_1040_code
-#jarJaqu
-
-@build_1041_li
-# creates the file h2jaqu.jar
. This only contains the JaQu (Java Query) implementation. All other jar files do not include JaQu.
-
-@build_1042_code
-#javadocImpl
-
-@build_1043_li
-# creates the Javadocs of the implementation.
-
-@build_1044_p
-# To create the file h2client.jar
, go to the directory h2
and execute the following command:
-
-@build_1045_h3
-#Using Lucene 2 / 3
-
-@build_1046_p
-# Both Apache Lucene 2 and Lucene 3 are supported. Currently Apache Lucene version 2.x is used by default for H2 version 1.2.x, and Lucene version 3.x is used by default for H2 version 1.3.x. To use a different version of Lucene when compiling, it needs to be specified as follows:
-
-@build_1047_h2
-Maven 2 �?�利用
-
-@build_1048_h3
-Centralリ�?ジトリ�?�利用
-
-@build_1049_p
-# You can include the database in your Maven 2 project as a dependency. Example:
-
-@build_1050_p
-# New versions of this database are first uploaded to http://hsql.sourceforge.net/m2-repo/ and then automatically synchronized with the main Maven repository; however after a new release it may take a few hours before they are available there.
-
-@build_1051_h3
-#Maven Plugin to Start and Stop the TCP Server
-
-@build_1052_p
-# A Maven plugin to start and stop the H2 TCP server is available from Laird Nelson at GitHub. To start the H2 server, use:
-
-@build_1053_p
-# To stop the H2 server, use:
-
-@build_1054_h3
-スナップショット�?ージョン�?�利用
-
-@build_1055_p
-# To build a h2-*-SNAPSHOT.jar
file and upload it the to the local Maven 2 repository, execute the following command:
-
-@build_1056_p
-# Afterwards, you can include the database in your Maven 2 project as a dependency:
-
-@build_1057_h2
-#Using Eclipse
-
-@build_1058_p
-# To create an Eclipse project for H2, use the following steps:
-
-@build_1059_li
-#Install Subversion and Eclipse.
-
-@build_1060_li
-#Get the H2 source code from the Subversion repository:
-
-@build_1061_code
-#svn checkout http://h2database.googlecode.com/svn/trunk h2database-read-only
-
-@build_1062_li
-#Download all dependencies (Windows):
-
-@build_1063_code
-#build.bat download
-
-@build_1064_li
-#In Eclipse, create a new Java project from existing source code: File, New, Project, Java Project, Create project from existing source
.
-
-@build_1065_li
-#Select the h2
folder, click Next
and Finish
.
-
-@build_1066_li
-#To resolve com.sun.javadoc
import statements, you may need to manually add the file <java.home>/../lib/tools.jar
to the build path.
-
-@build_1067_h2
-#Translating
-
-@build_1068_p
-# The translation of this software is split into the following parts:
-
-@build_1069_li
-#H2 Console: src/main/org/h2/server/web/res/_text_*.prop
-
-@build_1070_li
-#Error messages: src/main/org/h2/res/_messages_*.prop
-
-@build_1071_p
-# To translate the H2 Console, start it and select Preferences / Translate. After you are done, send the translated *.prop
file to the Google Group. The web site is currently translated using Google.
-
-@build_1072_h2
-#Providing Patches
-
-@build_1073_p
-# If you like to provide patches, please consider the following guidelines to simplify merging them:
-
-@build_1074_li
-#Only use Java 6 features (do not use Java 7) (see Environment).
-
-@build_1075_li
-#Follow the coding style used in the project, and use Checkstyle (see above) to verify. For example, do not use tabs (use spaces instead). The checkstyle configuration is in src/installer/checkstyle.xml
.
-
-@build_1076_li
-#A template of the Eclipse settings are in src/installer/eclipse.settings/*
. If you want to use them, you need to copy them to the .settings
directory. The formatting options (eclipseCodeStyle
) are also included.
-
-@build_1077_li
-#Please provide test cases and integrate them into the test suite. For Java level tests, see src/test/org/h2/test/TestAll.java
. For SQL level tests, see src/test/org/h2/test/test.in.txt
or testSimple.in.txt
.
-
-@build_1078_li
-#The test cases should cover at least 90% of the changed and new code; use a code coverage tool to verify that (see above). or use the build target coverage
.
-
-@build_1079_li
-#Verify that you did not break other features: run the test cases by executing build test
.
-
-@build_1080_li
-#Provide end user documentation if required (src/docsrc/html/*
).
-
-@build_1081_li
-#Document grammar changes in src/docsrc/help/help.csv
-
-@build_1082_li
-#Provide a change log entry (src/docsrc/html/changelog.html
).
-
-@build_1083_li
-#Verify the spelling using build spellcheck
. If required add the new words to src/tools/org/h2/build/doc/dictionary.txt
.
-
-@build_1084_li
-#Run src/installer/buildRelease
to find and fix formatting errors.
-
-@build_1085_li
-#Verify the formatting using build docs
and build javadoc
.
-
-@build_1086_li
-#Submit patches as .patch
files (compressed if big). To create a patch using Eclipse, use Team / Create Patch.
-
-@build_1087_p
-# For legal reasons, patches need to be public in the form of an email to the group, or in the form of an issue report or attachment. Significant contributions need to include the following statement:
-
-@build_1088_p
-# "I wrote the code, it's mine, and I'm contributing it to H2 for distribution multiple-licensed under the MPL 2.0, and the EPL 1.0 (http://h2database.com/html/license.html)."
-
-@build_1089_h2
-#Reporting Problems or Requests
-
-@build_1090_p
-# Please consider the following checklist if you have a question, want to report a problem, or if you have a feature request:
-
-@build_1091_li
-#For bug reports, please provide a short, self contained, correct (compilable), example of the problem.
-
-@build_1092_li
-#Feature requests are always welcome, even if the feature is already on the roadmap. Your mail will help prioritize feature requests. If you urgently need a feature, consider providing a patch.
-
-@build_1093_li
-#Before posting problems, check the FAQ and do a Google search.
-
-@build_1094_li
-#When got an unexpected exception, please try the Error Analyzer tool. If this doesn't help, please report the problem, including the complete error message and stack trace, and the root cause stack trace(s).
-
-@build_1095_li
-#When sending source code, please use a public web clipboard such as Pastebin, Cl1p, or Mystic Paste to avoid formatting problems. Please keep test cases as simple and short as possible, but so that the problem can still be reproduced. As a template, use: HelloWorld.java. Method that simply call other methods should be avoided, as well as unnecessary exception handling. Please use the JDBC API and no external tools or libraries. The test should include all required initialization code, and should be started with the main method.
-
-@build_1096_li
-#For large attachments, use a public temporary storage such as Rapidshare.
-
-@build_1097_li
-#Google Group versus issue tracking: Use the Google Group for questions or if you are not sure it's a bug. If you are sure it's a bug, you can create an issue, but you don't need to (sending an email to the group is enough). Please note that only few people monitor the issue tracking system.
-
-@build_1098_li
-#For out-of-memory problems, please analyze the problem yourself first, for example using the command line option -XX:+HeapDumpOnOutOfMemoryError
(to create a heap dump file on out of memory) and a memory analysis tool such as the Eclipse Memory Analyzer (MAT).
-
-@build_1099_li
-#It may take a few days to get an answers. Please do not double post.
-
-@build_1100_h2
-#Automated Build
-
-@build_1101_p
-# This build process is automated and runs regularly. The build process includes running the tests and code coverage, using the command line ./build.sh clean jar coverage -Dh2.ftpPassword=... uploadBuild
. The last results are available here:
-
-@build_1102_a
-#Test Output
-
-@build_1103_a
-#Code Coverage Summary
-
-@build_1104_a
-#Code Coverage Details (download, 1.3 MB)
-
-@build_1105_a
-#Build Newsfeed
-
-@build_1106_a
-#Latest Jar File (download, 1 MB)
-
-@build_1107_h2
-#Generating Railroad Diagrams
-
-@build_1108_p
-# The railroad diagrams of the SQL grammar are HTML, formatted as nested tables. The diagrams are generated as follows:
-
-@build_1109_li
-#The BNF parser (org.h2.bnf.Bnf
) reads and parses the BNF from the file help.csv
.
-
-@build_1110_li
-#The page parser (org.h2.server.web.PageParser
) reads the template HTML file and fills in the diagrams.
-
-@build_1111_li
-#The rail images (one straight, four junctions, two turns) are generated using a simple Java application.
-
-@build_1112_p
-# To generate railroad diagrams for other grammars, see the package org.h2.jcr
. This package is used to generate the SQL-2 railroad diagrams for the JCR 2.0 specification.
-
-@changelog_1000_h1
-変更履歴
-
-@changelog_1001_h2
-#Next Version (unreleased)
-
-@changelog_1002_li
-#-
-
-@changelog_1003_h2
-#Version 1.4.187 Beta (2015-04-10)
-
-@changelog_1004_li
-#MVStore: concurrent changes to the same row could result in the exception "The transaction log might be corrupt for key ...". This could only be reproduced with 3 or more threads.
-
-@changelog_1005_li
-#Results with CLOB or BLOB data are no longer reused.
-
-@changelog_1006_li
-#References to BLOB and CLOB objects now have a timeout. The configuration setting is LOB_TIMEOUT (default 5 minutes). This should avoid growing the database file if there are many queries that return BLOB or CLOB objects, and the database is not closed for a longer time.
-
-@changelog_1007_li
-#MVStore: when committing a session that removed LOB values, changes were flushed unnecessarily.
-
-@changelog_1008_li
-#Issue 610: possible integer overflow in WriteBuffer.grow().
-
-@changelog_1009_li
-#Issue 609: the spatial index did not support NULL (ClassCastException).
-
-@changelog_1010_li
-#MVStore: in some cases, CLOB/BLOB data blocks were removed incorrectly when opening a database.
-
-@changelog_1011_li
-#MVStore: updates that affected many rows were were slow in some cases if there was a secondary index.
-
-@changelog_1012_li
-#Using "runscript" with autocommit disabled could result in a lock timeout on the internal table "SYS".
-
-@changelog_1013_li
-#Issue 603: there was a memory leak when using H2 in a web application. Apache Tomcat logged an error message: "The web application ... created a ThreadLocal with key of type [org.h2.util.DateTimeUtils$1]".
-
-@changelog_1014_li
-#When using the MVStore, running a SQL script generate by the Recover tool from a PageStore file failed with a strange error message (NullPointerException), now a clear error message is shown.
-
-@changelog_1015_li
-#Issue 605: with version 1.4.186, opening a database could result in an endless loop in LobStorageMap.init.
-
-@changelog_1016_li
-#Queries that use the same table alias multiple times now work. Before, the select expression list was expanded incorrectly. Example: "select * from a as x, b as x".
-
-@changelog_1017_li
-#The MySQL compatibility feature "insert ... on duplicate key update" did not work with a non-default schema.
-
-@changelog_1018_li
-#Issue 599: the condition "in(x, y)" could not be used in the select list when using "group by".
-
-@changelog_1019_li
-#The LIRS cache could grow larger than the allocated memory.
-
-@changelog_1020_li
-#A new file system implementation that re-opens the file if it was closed due to the application calling Thread.interrupt(). File name prefix "retry:". Please note it is strongly recommended to avoid calling Thread.interrupt; this is a problem for various libraries, including Apache Lucene.
-
-@changelog_1021_li
-#MVStore: use RandomAccessFile file system if the file name starts with "file:".
-
-@changelog_1022_li
-#Allow DATEADD to take a long value for count when manipulating milliseconds.
-
-@changelog_1023_li
-#When using MV_STORE=TRUE and the SET CACHE_SIZE setting, the cache size was incorrectly set, so that it was effectively 1024 times smaller than it should be.
-
-@changelog_1024_li
-#Concurrent CREATE TABLE... IF NOT EXISTS in the presence of MULTI_THREAD=TRUE could throw an exception.
-
-@changelog_1025_li
-#Fix bug in MVStore when creating lots of temporary tables, where we could run out of transaction IDs.
-
-@changelog_1026_li
-#Add support for PostgreSQL STRING_AGG function. Patch by Fred Aquiles.
-
-@changelog_1027_li
-#Fix bug in "jdbc:h2:nioMemFS" isRoot() function. Also, the page size was increased to 64 KB.
-
-@changelog_1028_h2
-#Version 1.4.186 Beta (2015-03-02)
-
-@changelog_1029_li
-#The Servlet API 3.0.1 is now used, instead of 2.4.
-
-@changelog_1030_li
-#MVStore: old chunks no longer removed in append-only mode.
-
-@changelog_1031_li
-#MVStore: the cache for page references could grow far too big, resulting in out of memory in some cases.
-
-@changelog_1032_li
-#MVStore: orphaned lob objects were not correctly removed in some cases, making the database grow unnecessarily.
-
-@changelog_1033_li
-#MVStore: the maximum cache size was artificially limited to 2 GB (due to an integer overflow).
-
-@changelog_1034_li
-#MVStore / TransactionStore: concurrent updates could result in a "Too many open transactions" exception.
-
-@changelog_1035_li
-#StringUtils.toUpperEnglish now has a small cache. This should speed up reading from a ResultSet when using the column name.
-
-@changelog_1036_li
-#MVStore: up to 65535 open transactions are now supported. Previously, the limit was at most 65535 transactions between the oldest open and the newest open transaction (which was quite a strange limit).
-
-@changelog_1037_li
-#The default limit for in-place LOB objects was changed from 128 to 256 bytes. This is because each read creates a reference to a LOB, and maintaining the references is a big overhead. With the higher limit, less references are needed.
-
-@changelog_1038_li
-#Tables without columns didn't work. (The use case for such tables is testing.)
-
-@changelog_1039_li
-#The LIRS cache now resizes the table automatically in all cases and no longer needs the averageMemory configuration.
-
-@changelog_1040_li
-#Creating a linked table from an MVStore database to a non-MVStore database created a second (non-MVStore) database file.
-
-@changelog_1041_li
-#In version 1.4.184, a bug was introduced that broke queries that have both joins and wildcards, for example: select * from dual join(select x from dual) on 1=1
-
-@changelog_1042_li
-#Issue 598: parser fails on timestamp "24:00:00.1234" - prevent the creation of out-of-range time values.
-
-@changelog_1043_li
-#Allow declaring triggers as source code (like functions). Patch by Sylvain Cuaz.
-
-@changelog_1044_li
-#Make the planner use indexes for sorting when doing a GROUP BY where all of the GROUP BY columns are not mentioned in the select. Patch by Frederico (zepfred).
-
-@changelog_1045_li
-#PostgreSQL compatibility: generate_series (as an alias for system_range). Patch by litailang.
-
-@changelog_1046_li
-#Fix missing "column" type in right-hand parameter in ConditionIn. Patch by Arnaud Thimel.
-
-@changelog_1047_h2
-#Version 1.4.185 Beta (2015-01-16)
-
-@changelog_1048_li
-#In version 1.4.184, "group by" ignored the table name, and could pick a select column by mistake. Example: select 0 as x from system_range(1, 2) d group by d.x;
-
-@changelog_1049_li
-#New connection setting "REUSE_SPACE" (default: true). If disabled, all changes are appended to the database file, and existing content is never overwritten. This allows to rollback to a previous state of the database by truncating the database file.
-
-@changelog_1050_li
-#Issue 587: MVStore: concurrent compaction and store operations could result in an IllegalStateException.
-
-@changelog_1051_li
-#Issue 594: Profiler.copyInThread does not work properly.
-
-@changelog_1052_li
-#Script tool: Now, SCRIPT ... TO is always used (for higher speed and lower disk space usage).
-
-@changelog_1053_li
-#Script tool: Fix parsing of BLOCKSIZE parameter, original patch by Ken Jorissen.
-
-@changelog_1054_li
-#Fix bug in PageStore#commit method - when the ignoreBigLog flag was set, the logic that cleared the flag could never be reached, resulting in performance degradation. Reported by Alexander Nesterov.
-
-@changelog_1055_li
-#Issue 552: Implement BIT_AND and BIT_OR aggregate functions.
-
-@changelog_1056_h2
-#Version 1.4.184 Beta (2014-12-19)
-
-@changelog_1057_li
-#In version 1.3.183, indexes were not used if the table contains columns with a default value generated by a sequence. This includes tables with identity and auto-increment columns. This bug was introduced by supporting "rownum" in views and derived tables.
-
-@changelog_1058_li
-#MVStore: imported BLOB and CLOB data sometimes disappeared. This was caused by a bug in the ObjectDataType comparison.
-
-@changelog_1059_li
-#Reading from a StreamStore now throws an IOException if the underlying data doesn't exist.
-
-@changelog_1060_li
-#MVStore: if there is an exception while saving, the store is now in all cases immediately closed.
-
-@changelog_1061_li
-#MVStore: the dump tool could go into an endless loop for some files.
-
-@changelog_1062_li
-#MVStore: recovery for a database with many CLOB or BLOB entries is now much faster.
-
-@changelog_1063_li
-#Group by with a quoted select column name alias didn't work. Example: select 1 "a" from dual group by "a"
-
-@changelog_1064_li
-#Auto-server mode: the host name is now stored in the .lock.db file.
-
-@changelog_1065_h2
-#Version 1.4.183 Beta (2014-12-13)
-
-@changelog_1066_li
-#MVStore: the default auto-commit buffer size is now about twice as big. This should reduce the database file size after inserting a lot of data.
-
-@changelog_1067_li
-#The built-in functions "power" and "radians" now always return a double.
-
-@changelog_1068_li
-#Using "row_number" or "rownum" in views or derived tables had unexpected results if the outer query contained constraints for the given view. Example: select b.nr, b.id from (select row_number() over() as nr, a.id as id from (select id from test order by name) as a) as b where b.id = 1
-
-@changelog_1069_li
-#MVStore: the Recover tool can now deal with more types of corruption in the file.
-
-@changelog_1070_li
-#MVStore: the TransactionStore now first needs to be initialized before it can be used.
-
-@changelog_1071_li
-#Views and derived tables with equality and range conditions on the same columns did not work properly. example: select x from (select x from (select 1 as x) where x > 0 and x < 2) where x = 1
-
-@changelog_1072_li
-#The database URL setting PAGE_SIZE setting is now also used for the MVStore.
-
-@changelog_1073_li
-#MVStore: the default page split size for persistent stores is now 4096 (it was 16 KB so far). This should reduce the database file size for most situations (in some cases, less than half the size of the previous version).
-
-@changelog_1074_li
-#With query literals disabled, auto-analyze of a table with CLOB or BLOB did not work.
-
-@changelog_1075_li
-#MVStore: use a mark and sweep GC algorithm instead of reference counting, to ensure used chunks are never overwrite, even if the reference counting algorithm does not work properly.
-
-@changelog_1076_li
-#In the multi-threaded mode, updating the column selectivity ("analyze") in the background sometimes did not work.
-
-@changelog_1077_li
-#In the multi-threaded mode, database metadata operations did sometimes not work if the schema was changed at the same time (for example, if tables were dropped).
-
-@changelog_1078_li
-#Some CLOB and BLOB values could no longer be read when the original row was removed (even when using the MVCC mode).
-
-@changelog_1079_li
-#The MVStoreTool could throw an IllegalArgumentException.
-
-@changelog_1080_li
-#Improved performance for some date / time / timestamp conversion operations. Thanks to Sergey Evdokimov for reporting the problem.
-
-@changelog_1081_li
-#H2 Console: the built-in web server did not work properly if an unknown file was requested.
-
-@changelog_1082_li
-#MVStore: the jar file is renamed to "h2-mvstore-*.jar" and is deployed to Maven separately.
-
-@changelog_1083_li
-#MVStore: support for concurrent reads and writes is now enabled by default.
-
-@changelog_1084_li
-#Server mode: the transfer buffer size has been changed from 16 KB to 64 KB, after it was found that this improves performance on Linux quite a lot.
-
-@changelog_1085_li
-#H2 Console and server mode: SSL is now disabled and TLS is used to protect against the Poodle SSLv3 vulnerability. The system property to disable secure anonymous connections is now "h2.enableAnonymousTLS". The default certificate is still self-signed, so you need to manually install another one if you want to avoid man in the middle attacks.
-
-@changelog_1086_li
-#MVStore: the R-tree did not correctly measure the memory usage.
-
-@changelog_1087_li
-#MVStore: compacting a store with an R-tree did not always work.
-
-@changelog_1088_li
-#Issue 581: When running in LOCK_MODE=0, JdbcDatabaseMetaData#supportsTransactionIsolationLevel(TRANSACTION_READ_UNCOMMITTED) should return false
-
-@changelog_1089_li
-#Fix bug which could generate deadlocks when multiple connections accessed the same table.
-
-@changelog_1090_li
-#Some places in the code were not respecting the value set in the "SET MAX_MEMORY_ROWS x" command
-
-@changelog_1091_li
-#Fix bug which could generate a NegativeArraySizeException when performing large (>40M) row union operations
-
-@changelog_1092_li
-#Fix "USE schema" command for MySQL compatibility, patch by mfulton
-
-@changelog_1093_li
-#Parse and ignore the ROW_FORMAT=DYNAMIC MySQL syntax, patch by mfulton
-
-@changelog_1094_h2
-#Version 1.4.182 Beta (2014-10-17)
-
-@changelog_1095_li
-#MVStore: improved error messages and logging; improved behavior if there is an error when serializing objects.
-
-@changelog_1096_li
-#OSGi: the MVStore packages are now exported.
-
-@changelog_1097_li
-#With the MVStore option, when using multiple threads that concurrently create indexes or tables, it was relatively easy to get a lock timeout on the "SYS" table.
-
-@changelog_1098_li
-#When using the multi-threaded option, the exception "Unexpected code path" could be thrown, specially if the option "analyze_auto" was set to a low value.
-
-@changelog_1099_li
-#In the server mode, when reading from a CLOB or BLOB, if the connection was closed, a NullPointerException could be thrown instead of an exception saying the connection is closed.
-
-@changelog_1100_li
-#DatabaseMetaData.getProcedures and getProcedureColumns could throw an exception if a user defined class is not available.
-
-@changelog_1101_li
-#Issue 584: the error message for a wrong sequence definition was wrong.
-
-@changelog_1102_li
-#CSV tool: the rowSeparator option is no longer supported, as the same can be achieved with the lineSeparator.
-
-@changelog_1103_li
-#Descending indexes on MVStore tables did not work properly.
-
-@changelog_1104_li
-#Issue 579: Conditions on the "_rowid_" pseudo-column didn't use an index when using the MVStore.
-
-@changelog_1105_li
-#Fixed documentation that "offset" and "fetch" are also keywords since version 1.4.x.
-
-@changelog_1106_li
-#The Long.MIN_VALUE could not be parsed for auto-increment (identity) columns.
-
-@changelog_1107_li
-#Issue 573: Add implementation for Methods "isWrapperFor()" and "unwrap()" in other JDBC classes.
-
-@changelog_1108_li
-#Issue 572: MySQL compatibility for "order by" in update statements.
-
-@changelog_1109_li
-#The change in JDBC escape processing in version 1.4.181 affects both the parser (which is running on the server) and the JDBC API (which is running on the client). If you (or a tool you use) use the syntax "{t 'time}", or "{ts 'timestamp'}", or "{d 'data'}", then both the client and the server need to be upgraded to version 1.4.181 or later.
-
-@changelog_1110_h2
-#Version 1.4.181 Beta (2014-08-06)
-
-@changelog_1111_li
-#Improved MySQL compatibility by supporting "use schema". Thanks a lot to Karl Pietrzak for the patch!
-
-@changelog_1112_li
-#Writing to the trace file is now faster, specially with the debug level.
-
-@changelog_1113_li
-#The database option "defrag_always=true" did not work with the MVStore.
-
-@changelog_1114_li
-#The JDBC escape syntax {ts 'value'} did not interpret the value as a timestamp. The same for {d 'value'} (for date) and {t 'value'} (for time). Thanks to Lukas Eder for reporting the issue. The following problem was detected after version 1.4.181 was released: The change in JDBC escape processing affects both the parser (which is running on the server) and the JDBC API (which is running on the client). If you (or a tool you use) use the syntax {t 'time'}, or {ts 'timestamp'}, or {d 'date'}, then both the client and the server need to be upgraded to version 1.4.181 or later.
-
-@changelog_1115_li
-#File system abstraction: support replacing existing files using move (currently not for Windows).
-
-@changelog_1116_li
-#The statement "shutdown defrag" now compresses the database (with the MVStore). This command can greatly reduce the file size, and is relatively fast, but is not incremental.
-
-@changelog_1117_li
-#The MVStore now automatically compacts the store in the background if there is no read or write activity, which should (after some time; sometimes about one minute) reduce the file size. This is still work in progress, feedback is welcome!
-
-@changelog_1118_li
-#Change default value of PAGE_SIZE from 2048 to 4096 to more closely match most file systems block size (PageStore only; the MVStore already used 4096).
-
-@changelog_1119_li
-#Auto-scale MAX_MEMORY_ROWS and CACHE_SIZE settings by the amount of available RAM. Gives a better out of box experience for people with more powerful machines.
-
-@changelog_1120_li
-#Handle tabs like 4 spaces in web console, patch by Martin Grajcar.
-
-@changelog_1121_li
-#Issue 573: Add implementation for Methods "isWrapperFor()" and "unwrap()" in JdbcConnection.java, patch by BigMichi1.
-
-@changelog_1122_h2
-#Version 1.4.180 Beta (2014-07-13)
-
-@changelog_1123_li
-#MVStore: the store is now auto-compacted automatically up to some point, to avoid very large file sizes. This area is still work in progress.
-
-@changelog_1124_li
-#Sequences of temporary tables (auto-increment or identity columns) were persisted unnecessarily in the database file, and were not removed when re-opening the database.
-
-@changelog_1125_li
-#MVStore: an IndexOutOfBoundsException could sometimes occur MVMap.openVersion when concurrently accessing the store.
-
-@changelog_1126_li
-#The LIRS cache now re-sizes the internal hash map if needed.
-
-@changelog_1127_li
-#Optionally persist session history in the H2 console. (patch from Martin Grajcar)
-
-@changelog_1128_li
-#Add client-info property to get the number of servers currently in the cluster and which servers that are available. (patch from Nikolaj Fogh)
-
-@changelog_1129_li
-#Fix bug in changing encrypted DB password that kept the file handle open when the wrong password was supplied. (test case from Jens Hohmuth).
-
-@changelog_1130_li
-#Issue 567: H2 hangs for a long time then (sometimes) recovers. Introduce a queue when doing table locking to prevent session starvation.
-
-@changelog_1131_h2
-#Version 1.4.179 Beta (2014-06-23)
-
-@changelog_1132_li
-#The license was changed to MPL 2.0 (from 1.0) and EPL 1.0.
-
-@changelog_1133_li
-#Issue 565: MVStore: concurrently adding LOB objects (with MULTI_THREADED option) resulted in a NullPointerException.
-
-@changelog_1134_li
-#MVStore: reduced dependencies to other H2 classes.
-
-@changelog_1135_li
-#There was a way to prevent a database from being re-opened, by creating a column constraint that references a table with a higher id, for example with "check" constraints that contains queries. This is now detected, and creating the table is prohibited. In future versions of H2, most likely creating references to other tables will no longer be supported because of such problems.
-
-@changelog_1136_li
-#MVStore: descending indexes with "nulls first" did not work as expected (null was ordered last).
-
-@changelog_1137_li
-#Large result sets now always create temporary tables instead of temporary files.
-
-@changelog_1138_li
-#When using the PageStore, opening a database failed in some cases with a NullPointerException if temporary tables were used (explicitly, or implicitly when using large result sets).
-
-@changelog_1139_li
-#If a database file in the PageStore file format exists, this file and this mode is now used, even if the database URL does not contain "MV_STORE=FALSE". If a MVStore file exists, it is used.
-
-@changelog_1140_li
-#Databases created with version 1.3.175 and earlier that contained foreign keys in combination with multi-column indexes could not be opened in some cases. This was due to a bugfix in version 1.3.176: Referential integrity constraints sometimes used the wrong index.
-
-@changelog_1141_li
-#MVStore: the ObjectDataType comparison method was incorrect if one key was Serializable and the other was of a common class.
-
-@changelog_1142_li
-#Recursive queries with many result rows (more than the setting "max_memory_rows") did not work correctly.
-
-@changelog_1143_li
-#The license has changed to MPL 2.0 + EPL 1.0.
-
-@changelog_1144_li
-#MVStore: temporary tables from result sets could survive re-opening a database, which could result in a ClassCastException.
-
-@changelog_1145_li
-#Issue 566: MVStore: unique indexes that were created later on did not work correctly if there were over 5000 rows in the table. Existing databases need to be re-created (at least the broken index need to be re-built).
-
-@changelog_1146_li
-#MVStore: creating secondary indexes on large tables results in missing rows in the index.
-
-@changelog_1147_li
-#Metadata: the password of linked tables is now only visible for admin users.
-
-@changelog_1148_li
-#For Windows, database URLs of the form "jdbc:h2:/test" where considered relative and did not work unless the system property "h2.implicitRelativePath" was used.
-
-@changelog_1149_li
-#Windows: using a base directory of "C:/" and similar did not work as expected.
-
-@changelog_1150_li
-#Follow JDBC specification on Procedures MetaData, use P0 as return type of procedure.
-
-@changelog_1151_li
-#Issue 531: IDENTITY ignored for added column.
-
-@changelog_1152_li
-#FileSystem: improve exception throwing compatibility with JDK
-
-@changelog_1153_li
-#Spatial Index: adjust costs so we do not use the spatial index if the query does not contain an intersects operator.
-
-@changelog_1154_li
-#Fix multi-threaded deadlock when using a View that includes a TableFunction.
-
-@changelog_1155_li
-#Fix bug in dividing very-small BigDecimal numbers.
-
-@changelog_1156_h2
-#Version 1.4.178 Beta (2014-05-02)
-
-@changelog_1157_li
-#Issue 559: Make dependency on org.osgi.service.jdbc optional.
-
-@changelog_1158_li
-#Improve error message when the user specifies an unsupported combination of database settings.
-
-@changelog_1159_li
-#MVStore: in the multi-threaded mode, NullPointerException and other exceptions could occur.
-
-@changelog_1160_li
-#MVStore: some database file could not be compacted due to a bug in the bookkeeping of the fill rate. Also, database file were compacted quite slowly. This has been improved; but more changes in this area are expected.
-
-@changelog_1161_li
-#MVStore: support for volatile maps (that don't store changes).
-
-@changelog_1162_li
-#MVStore mode: in-memory databases now also use the MVStore.
-
-@changelog_1163_li
-#In server mode, appending ";autocommit=false" to the database URL was working, but the return value of Connection.getAutoCommit() was wrong.
-
-@changelog_1164_li
-#Issue 561: OSGi: the import package declaration of org.h2 excluded version 1.4.
-
-@changelog_1165_li
-#Issue 558: with the MVStore, a NullPointerException could occur when using LOBs at session commit (LobStorageMap.removeLob).
-
-@changelog_1166_li
-#Remove the "h2.MAX_MEMORY_ROWS_DISTINCT" system property to reduce confusion. We already have the MAX_MEMORY_ROWS setting which does a very similar thing, and is better documented.
-
-@changelog_1167_li
-#Issue 554: Web Console in an IFrame was not fully supported.
-
-@changelog_1168_h2
-#Version 1.4.177 Beta (2014-04-12)
-
-@changelog_1169_li
-#By default, the MV_STORE option is enabled, so it is using the new MVStore storage. The MVCC setting is by default set to the same values as the MV_STORE setting, so it is also enabled by default. For testing, both settings can be disabled by appending ";MV_STORE=FALSE" and/or ";MVCC=FALSE" to the database URL.
-
-@changelog_1170_li
-#The file locking method 'serialized' is no longer supported. This mode might return in a future version, however this is not clear right now. A new implementation and new tests would be needed.
-
-@changelog_1171_li
-#Enable the new storage format for dates (system property "h2.storeLocalTime"). For the MVStore mode, this is always enabled, but with version 1.4 this is even enabled in the PageStore mode.
-
-@changelog_1172_li
-#Implicit relative paths are disabled (system property "h2.implicitRelativePath"), so that the database URL jdbc:h2:test now needs to be written as jdbc:h2:./test.
-
-@changelog_1173_li
-#"select ... fetch first 1 row only" is supported with the regular mode. This was disabled so far because "fetch" and "offset" are now keywords. See also Mode.supportOffsetFetch.
-
-@changelog_1174_li
-#Byte arrays are now sorted in unsigned mode (x'99' is larger than x'09'). (System property "h2.sortBinaryUnsigned", Mode.binaryUnsigned, setting "binary_collation").
-
-@changelog_1175_li
-#Csv.getInstance will be removed in future versions of 1.4. Use the public constructor instead.
-
-@changelog_1176_li
-#Remove support for the limited old-style outer join syntax using "(+)". Use "outer join" instead. System property "h2.oldStyleOuterJoin".
-
-@changelog_1177_li
-#Support the data type "DATETIME2" as an alias for "DATETIME", for MS SQL Server compatibility.
-
-@changelog_1178_li
-#Add Oracle-compatible TRANSLATE function, patch by Eric Chatellier.
-
-@changelog_1179_h2
-#Version 1.3.176 (2014-04-05)
-
-@changelog_1180_li
-#The file locking method 'serialized' is no longer documented, as it will not be available in version 1.4.
-
-@changelog_1181_li
-#The static method Csv.getInstance() was removed. Use the public constructor instead.
-
-@changelog_1182_li
-#The default user name for the Script, RunScript, Shell, and CreateCluster tools are no longer "sa" but an empty string.
-
-@changelog_1183_li
-#The stack trace of the exception "The object is already closed" is no longer logged by default.
-
-@changelog_1184_li
-#If a value of a result set was itself a result set, the result could only be read once.
-
-@changelog_1185_li
-#Column constraints are also visible in views (patch from Nicolas Fortin for H2GIS).
-
-@changelog_1186_li
-#Granting a additional right to a role that already had a right for that table was not working.
-
-@changelog_1187_li
-#Spatial index: a few bugs have been fixed (using spatial constraints in views, transferring geometry objects over TCP/IP, the returned geometry object is copied when needed).
-
-@changelog_1188_li
-#Issue 551: the datatype documentation was incorrect (found by Bernd Eckenfels).
-
-@changelog_1189_li
-#Issue 368: ON DUPLICATE KEY UPDATE did not work for multi-row inserts. Test case from Angus Macdonald.
-
-@changelog_1190_li
-#OSGi: the package javax.tools is now imported (as an optional).
-
-@changelog_1191_li
-#H2 Console: auto-complete is now disabled by default, but there is a hot-key (Ctrl+Space).
-
-@changelog_1192_li
-#H2 Console: auto-complete did not work with multi-line statements.
-
-@changelog_1193_li
-#CLOB and BLOB data was not immediately removed after a rollback.
-
-@changelog_1194_li
-#There is a new Aggregate API that supports the internal H2 data types (GEOMETRY for example). Thanks a lot to Nicolas Fortin for the patch!
-
-@changelog_1195_li
-#Referential integrity constraints sometimes used the wrong index, such that updating a row in the referenced table incorrectly failed with a constraint violation.
-
-@changelog_1196_li
-#The Polish translation was completed and corrected by Wojtek Jurczyk. Thanks a lot!
-
-@changelog_1197_li
-#Issue 545: Unnecessary duplicate code was removed.
-
-@changelog_1198_li
-#The profiler tool can now process files with full thread dumps.
-
-@changelog_1199_li
-#MVStore: the file format was changed slightly.
-
-@changelog_1200_li
-#MVStore mode: the CLOB and BLOB storage was re-implemented and is now much faster than with the PageStore (which is still the default storage).
-
-@changelog_1201_li
-#MVStore mode: creating indexes is now much faster (in many cases faster than with the default PageStore).
-
-@changelog_1202_li
-#Various bugs in the MVStore storage and have been fixed, including a bug in the R-tree implementation. The database could get corrupt if there were transient IO exceptions while storing.
-
-@changelog_1203_li
-#The method org.h2.expression.Function.getCost could throw a NullPointException.
-
-@changelog_1204_li
-#Storing LOBs in separate files (outside of the main database file) is no longer supported for new databases.
-
-@changelog_1205_li
-#Lucene 2 is no longer supported.
-
-@changelog_1206_li
-#Fix bug in calculating default MIN and MAX values for SEQUENCE.
-
-@changelog_1207_li
-#Fix bug in performing IN queries with multiple values when IGNORECASE=TRUE
-
-@changelog_1208_li
-#Add entry-point to org.h2.tools.Shell so it can be called from inside an application. patch by Thomas Gillet.
-
-@changelog_1209_li
-#Fix bug that prevented the PgServer from being stopped and started multiple times.
-
-@changelog_1210_li
-#Support some more DDL syntax for MySQL, patch from Peter Jentsch.
-
-@changelog_1211_li
-#Issue 548: TO_CHAR does not format MM and DD correctly when the month or day of the month is 1 digit, patch from "the.tucc"
-
-@changelog_1212_li
-#Fix bug in varargs support in ALIAS's, patch from Nicolas Fortin
-
-@cheatSheet_1000_h1
-#H2 Database Engine Cheat Sheet
-
-@cheatSheet_1001_h2
-#Using H2
-
-@cheatSheet_1002_a
-H2
-
-@cheatSheet_1003_li
-# is open source, free to use and distribute.
-
-@cheatSheet_1004_a
-ダウンロード
-
-@cheatSheet_1005_li
-#: jar, installer (Windows), zip.
-
-@cheatSheet_1006_li
-#To start the H2 Console tool, double click the jar file, or run java -jar h2*.jar
, h2.bat
, or h2.sh
.
-
-@cheatSheet_1007_a
-#A new database is automatically created
-
-@cheatSheet_1008_a
-#by default
-
-@cheatSheet_1009_li
-#.
-
-@cheatSheet_1010_a
-#Closing the last connection closes the database
-
-@cheatSheet_1011_li
-#.
-
-@cheatSheet_1012_h2
-ドキュメント
-
-@cheatSheet_1013_p
-# Reference: SQL grammar, functions, data types, tools, API
-
-@cheatSheet_1014_a
-特徴
-
-@cheatSheet_1015_p
-#: fulltext search, encryption, read-only (zip/jar), CSV, auto-reconnect, triggers, user functions
-
-@cheatSheet_1016_a
-#Database URLs
-
-@cheatSheet_1017_a
-#Embedded
-
-@cheatSheet_1018_code
-jdbc:h2:~/test
-
-@cheatSheet_1019_p
-# 'test' in the user home directory
-
-@cheatSheet_1020_code
-#jdbc:h2:/data/test
-
-@cheatSheet_1021_p
-# 'test' in the directory /data
-
-@cheatSheet_1022_code
-#jdbc:h2:test
-
-@cheatSheet_1023_p
-# in the current(!) working directory
-
-@cheatSheet_1024_a
-#In-Memory
-
-@cheatSheet_1025_code
-#jdbc:h2:mem:test
-
-@cheatSheet_1026_p
-# multiple connections in one process
-
-@cheatSheet_1027_code
-jdbc:h2:mem:
-
-@cheatSheet_1028_p
-# unnamed private; one connection
-
-@cheatSheet_1029_a
-サー�?ーモード
-
-@cheatSheet_1030_code
-#jdbc:h2:tcp://localhost/~/test
-
-@cheatSheet_1031_p
-# user home dir
-
-@cheatSheet_1032_code
-#jdbc:h2:tcp://localhost//data/test
-
-@cheatSheet_1033_p
-# absolute dir
-
-@cheatSheet_1034_a
-#Server start
-
-@cheatSheet_1035_p
-#:java -cp *.jar org.h2.tools.Server
-
-@cheatSheet_1036_a
-#Settings
-
-@cheatSheet_1037_code
-#jdbc:h2:..;MODE=MySQL
-
-@cheatSheet_1038_a
-#compatibility (or HSQLDB,...)
-
-@cheatSheet_1039_code
-#jdbc:h2:..;TRACE_LEVEL_FILE=3
-
-@cheatSheet_1040_a
-#log to *.trace.db
-
-@cheatSheet_1041_a
-#Using the JDBC API
-
-@cheatSheet_1042_a
-#Connection Pool
-
-@cheatSheet_1043_a
-#Maven 2
-
-@cheatSheet_1044_a
-#Hibernate
-
-@cheatSheet_1045_p
-# hibernate.cfg.xml (or use the HSQLDialect):
-
-@cheatSheet_1046_a
-#TopLink and Glassfish
-
-@cheatSheet_1047_p
-# Datasource class: org.h2.jdbcx.JdbcDataSource
-
-@cheatSheet_1048_code
-#oracle.toplink.essentials.platform.
-
-@cheatSheet_1049_code
-#database.H2Platform
-
-@download_1000_h1
-ダウンロード
-
-@download_1001_h3
-#Version 1.4.187 (2015-04-10), Beta
-
-@download_1002_a
-Windows Installer
-
-@download_1003_a
-Platform-Independent Zip
-
-@download_1004_h3
-#Version 1.3.176 (2014-04-05), Last Stable
-
-@download_1005_a
-Windows Installer
-
-@download_1006_a
-Platform-Independent Zip
-
-@download_1007_h3
-#Download Mirror and Older Versions
-
-@download_1008_a
-Platform-Independent Zip
-
-@download_1009_h3
-#Jar File
-
-@download_1010_a
-#Maven.org
-
-@download_1011_a
-#Sourceforge.net
-
-@download_1012_a
-#Latest Automated Build (not released)
-
-@download_1013_h3
-#Maven (Binary, Javadoc, and Source)
-
-@download_1014_a
-#Binary
-
-@download_1015_a
-#Javadoc
-
-@download_1016_a
-#Sources
-
-@download_1017_h3
-#Database Upgrade Helper File
-
-@download_1018_a
-#Upgrade database from 1.1 to the current version
-
-@download_1019_h3
-サブ�?ージョン�?�ソースリ�?ジトリ
-
-@download_1020_a
-Google Code
-
-@download_1021_p
-# For details about changes, see the Change Log.
-
-@download_1022_h3
-#News and Project Information
-
-@download_1023_a
-#Atom Feed
-
-@download_1024_a
-#RSS Feed
-
-@download_1025_a
-#DOAP File
-
-@download_1026_p
-# (what is this)
-
-@faq_1000_h1
-F A Q
-
-@faq_1001_a
-# I Have a Problem or Feature Request
-
-@faq_1002_a
-# Are there Known Bugs? When is the Next Release?
-
-@faq_1003_a
-# Is this Database Engine Open Source?
-
-@faq_1004_a
-# Is Commercial Support Available?
-
-@faq_1005_a
-# How to Create a New Database?
-
-@faq_1006_a
-# How to Connect to a Database?
-
-@faq_1007_a
-# Where are the Database Files Stored?
-
-@faq_1008_a
-# What is the Size Limit (Maximum Size) of a Database?
-
-@faq_1009_a
-# Is it Reliable?
-
-@faq_1010_a
-# Why is Opening my Database Slow?
-
-@faq_1011_a
-# My Query is Slow
-
-@faq_1012_a
-# H2 is Very Slow
-
-@faq_1013_a
-# Column Names are Incorrect?
-
-@faq_1014_a
-# Float is Double?
-
-@faq_1015_a
-# Is the GCJ Version Stable? Faster?
-
-@faq_1016_a
-# How to Translate this Project?
-
-@faq_1017_a
-# How to Contribute to this Project?
-
-@faq_1018_h3
-#I Have a Problem or Feature Request
-
-@faq_1019_p
-# Please read the support checklist.
-
-@faq_1020_h3
-#Are there Known Bugs? When is the Next Release?
-
-@faq_1021_p
-# Usually, bugs get fixes as they are found. There is a release every few weeks. Here is the list of known and confirmed issues:
-
-@faq_1022_li
-#When opening a database file in a timezone that has different daylight saving rules: the time part of dates where the daylight saving doesn't match will differ. This is not a problem within regions that use the same rules (such as, within USA, or within Europe), even if the timezone itself is different. As a workaround, export the database to a SQL script using the old timezone, and create a new database in the new timezone. This problem does not occur when using the system property "h2.storeLocalTime" (however such database files are not compatible with older versions of H2).
-
-@faq_1023_li
-#Apache Harmony: there seems to be a bug in Harmony that affects H2. See HARMONY-6505.
-
-@faq_1024_li
-#Tomcat and Glassfish 3 set most static fields (final or non-final) to null
when unloading a web application. This can cause a NullPointerException
in H2 versions 1.1.107 and older, and may still not work in newer versions. Please report it if you run into this issue. In Tomcat >= 6.0 this behavior can be disabled by setting the system property org.apache.catalina.loader.WebappClassLoader.ENABLE_CLEAR_REFERENCES=false
, however Tomcat may then run out of memory. A known workaround is to put the h2*.jar
file in a shared lib
directory (common/lib
).
-
-@faq_1025_li
-#Some problems have been found with right outer join. Internally, it is converted to left outer join, which does not always produce the same results as other databases when used in combination with other joins. This problem is fixed in H2 version 1.3.
-
-@faq_1026_li
-#When using Install4j before 4.1.4 on Linux and enabling pack200
, the h2*.jar
becomes corrupted by the install process, causing application failure. A workaround is to add an empty file h2*.jar.nopack
next to the h2*.jar
file. This problem is solved in Install4j 4.1.4.
-
-@faq_1027_p
-# For a complete list, see Open Issues.
-
-@faq_1028_h3
-�?��?�データベースエンジン�?�オープンソース�?��?��?�?
-
-@faq_1029_p
-# Yes. It is free to use and distribute, and the source code is included. See also under license.
-
-@faq_1030_h3
-#Is Commercial Support Available?
-
-@faq_1031_p
-# Yes, commercial support is available, see Commercial Support.
-
-@faq_1032_h3
-新�?データベース�?�構築方法�?�?
-
-@faq_1033_p
-# By default, a new database is automatically created if it does not yet exist. See Creating New Databases.
-
-@faq_1034_h3
-データベース�?��?�接続方法�?�?
-
-@faq_1035_p
-# The database driver is org.h2.Driver
, and the database URL starts with jdbc:h2:
. To connect to a database using JDBC, use the following code:
-
-@faq_1036_h3
-データベース�?�ファイル�?��?��?��?��?存�?�れ�?��?��?�?
-
-@faq_1037_p
-# When using database URLs like jdbc:h2:~/test
, the database is stored in the user directory. For Windows, this is usually C:\Documents and Settings\<userName>
or C:\Users\<userName>
. If the base directory is not set (as in jdbc:h2:test
), the database files are stored in the directory where the application is started (the current working directory). When using the H2 Console application from the start menu, this is <Installation Directory>/bin
. The base directory can be set in the database URL. A fixed or relative path can be used. When using the URL jdbc:h2:file:data/sample
, the database is stored in the directory data
(relative to the current working directory). The directory is created automatically if it does not yet exist. It is also possible to use the fully qualified directory name (and for Windows, drive name). Example: jdbc:h2:file:C:/data/test
-
-@faq_1038_h3
-#What is the Size Limit (Maximum Size) of a Database?
-
-@faq_1039_p
-# See Limits and Limitations.
-
-@faq_1040_h3
-�?�れ�?�信頼�?��??るデータベース�?��?��?�?
-
-@faq_1041_p
-# That is not easy to say. It is still a quite new product. A lot of tests have been written, and the code coverage of these tests is higher than 80% for each package. Randomized stress tests are run regularly. But there are probably still bugs that have not yet been found (as with most software). Some features are known to be dangerous, they are only supported for situations where performance is more important than reliability. Those dangerous features are:
-
-@faq_1042_li
-#Disabling the transaction log or FileDescriptor.sync() using LOG=0 or LOG=1.
-
-@faq_1043_li
-#Using the transaction isolation level READ_UNCOMMITTED
(LOCK_MODE 0
) while at the same time using multiple connections.
-
-@faq_1044_li
-#Disabling database file protection using (setting FILE_LOCK
to NO
in the database URL).
-
-@faq_1045_li
-#Disabling referential integrity using SET REFERENTIAL_INTEGRITY FALSE
.
-
-@faq_1046_p
-# In addition to that, running out of memory should be avoided. In older versions, OutOfMemory errors while using the database could corrupt a databases.
-
-@faq_1047_p
-# This database is well tested using automated test cases. The tests run every night and run for more than one hour. But not all areas of this database are equally well tested. When using one of the following features for production, please ensure your use case is well tested (if possible with automated test cases). The areas that are not well tested are:
-
-@faq_1048_li
-#Platforms other than Windows XP, Linux, Mac OS X, or JVMs other than Sun 1.6 or 1.7
-
-@faq_1049_li
-#The features AUTO_SERVER
and AUTO_RECONNECT
.
-
-@faq_1050_li
-#Cluster mode, 2-phase commit, savepoints.
-
-@faq_1051_li
-#24/7 operation.
-
-@faq_1052_li
-#Fulltext search.
-
-@faq_1053_li
-#Operations on LOBs over 2 GB.
-
-@faq_1054_li
-#The optimizer may not always select the best plan.
-
-@faq_1055_li
-#Using the ICU4J collator.
-
-@faq_1056_p
-# Areas considered experimental are:
-
-@faq_1057_li
-#The PostgreSQL server
-
-@faq_1058_li
-#Clustering (there are cases were transaction isolation can be broken due to timing issues, for example one session overtaking another session).
-
-@faq_1059_li
-#Multi-threading within the engine using SET MULTI_THREADED=1
.
-
-@faq_1060_li
-#Compatibility modes for other databases (only some features are implemented).
-
-@faq_1061_li
-#The soft reference cache (CACHE_TYPE=SOFT_LRU
). It might not improve performance, and out of memory issues have been reported.
-
-@faq_1062_p
-# Some users have reported that after a power failure, the database cannot be opened sometimes. In this case, use a backup of the database or the Recover tool. Please report such problems. The plan is that the database automatically recovers in all situations.
-
-@faq_1063_h3
-#Why is Opening my Database Slow?
-
-@faq_1064_p
-# To find out what the problem is, use the H2 Console and click on "Test Connection" instead of "Login". After the "Login Successful" appears, click on it (it's a link). This will list the top stack traces. Then either analyze this yourself, or post those stack traces in the Google Group.
-
-@faq_1065_p
-# Other possible reasons are: the database is very big (many GB), or contains linked tables that are slow to open.
-
-@faq_1066_h3
-#My Query is Slow
-
-@faq_1067_p
-# Slow SELECT
(or DELETE, UPDATE, MERGE
) statement can have multiple reasons. Follow this checklist:
-
-@faq_1068_li
-#Run ANALYZE
(see documentation for details).
-
-@faq_1069_li
-#Run the query with EXPLAIN
and check if indexes are used (see documentation for details).
-
-@faq_1070_li
-#If required, create additional indexes and try again using ANALYZE
and EXPLAIN
.
-
-@faq_1071_li
-#If it doesn't help please report the problem.
-
-@faq_1072_h3
-#H2 is Very Slow
-
-@faq_1073_p
-# By default, H2 closes the database when the last connection is closed. If your application closes the only connection after each operation, the database is opened and closed a lot, which is quite slow. There are multiple ways to solve this problem, see Database Performance Tuning.
-
-@faq_1074_h3
-#Column Names are Incorrect?
-
-@faq_1075_p
-# For the query SELECT ID AS X FROM TEST
the method ResultSetMetaData.getColumnName()
returns ID
, I expect it to return X
. What's wrong?
-
-@faq_1076_p
-# This is not a bug. According the the JDBC specification, the method ResultSetMetaData.getColumnName()
should return the name of the column and not the alias name. If you need the alias name, use ResultSetMetaData.getColumnLabel()
. Some other database don't work like this yet (they don't follow the JDBC specification). If you need compatibility with those databases, use the Compatibility Mode, or append ;ALIAS_COLUMN_NAME=TRUE
to the database URL.
-
-@faq_1077_p
-# This also applies to DatabaseMetaData calls that return a result set. The columns in the JDBC API are column labels, not column names.
-
-@faq_1078_h3
-#Float is Double?
-
-@faq_1079_p
-# For a table defined as CREATE TABLE TEST(X FLOAT)
the method ResultSet.getObject()
returns a java.lang.Double
, I expect it to return a java.lang.Float
. What's wrong?
-
-@faq_1080_p
-# This is not a bug. According the the JDBC specification, the JDBC data type FLOAT
is equivalent to DOUBLE
, and both are mapped to java.lang.Double
. See also Mapping SQL and Java Types - 8.3.10 FLOAT.
-
-@faq_1081_h3
-#Is the GCJ Version Stable? Faster?
-
-@faq_1082_p
-# The GCJ version is not as stable as the Java version. When running the regression test with the GCJ version, sometimes the application just stops at what seems to be a random point without error message. Currently, the GCJ version is also slower than when using the Sun VM. However, the startup of the GCJ version is faster than when using a VM.
-
-@faq_1083_h3
-�?��?�プロジェクト�?�翻訳方法�?�?
-
-@faq_1084_p
-# For more information, see Build/Translating.
-
-@faq_1085_h3
-#How to Contribute to this Project?
-
-@faq_1086_p
-# There are various way to help develop an open source project like H2. The first step could be to translate the error messages and the GUI to your native language. Then, you could provide patches. Please start with small patches. That could be adding a test case to improve the code coverage (the target code coverage for this project is 90%, higher is better). You will have to develop, build and run the tests. Once you are familiar with the code, you could implement missing features from the feature request list. I suggest to start with very small features that are easy to implement. Keep in mind to provide test cases as well.
-
-@features_1000_h1
-特徴
-
-@features_1001_a
-# Feature List
-
-@features_1002_a
-# Comparison to Other Database Engines
-
-@features_1003_a
-# H2 in Use
-
-@features_1004_a
-# Connection Modes
-
-@features_1005_a
-# Database URL Overview
-
-@features_1006_a
-# Connecting to an Embedded (Local) Database
-
-@features_1007_a
-# In-Memory Databases
-
-@features_1008_a
-# Database Files Encryption
-
-@features_1009_a
-# Database File Locking
-
-@features_1010_a
-# Opening a Database Only if it Already Exists
-
-@features_1011_a
-# Closing a Database
-
-@features_1012_a
-# Ignore Unknown Settings
-
-@features_1013_a
-# Changing Other Settings when Opening a Connection
-
-@features_1014_a
-# Custom File Access Mode
-
-@features_1015_a
-# Multiple Connections
-
-@features_1016_a
-# Database File Layout
-
-@features_1017_a
-# Logging and Recovery
-
-@features_1018_a
-# Compatibility
-
-@features_1019_a
-# Auto-Reconnect
-
-@features_1020_a
-# Automatic Mixed Mode
-
-@features_1021_a
-# Page Size
-
-@features_1022_a
-# Using the Trace Options
-
-@features_1023_a
-# Using Other Logging APIs
-
-@features_1024_a
-# Read Only Databases
-
-@features_1025_a
-# Read Only Databases in Zip or Jar File
-
-@features_1026_a
-# Computed Columns / Function Based Index
-
-@features_1027_a
-# Multi-Dimensional Indexes
-
-@features_1028_a
-# User-Defined Functions and Stored Procedures
-
-@features_1029_a
-# Pluggable or User-Defined Tables
-
-@features_1030_a
-# Triggers
-
-@features_1031_a
-# Compacting a Database
-
-@features_1032_a
-# Cache Settings
-
-@features_1033_h2
-特徴一覧
-
-@features_1034_h3
-主�?�特徴
-
-@features_1035_li
-#Very fast database engine
-
-@features_1036_li
-#Open source
-
-@features_1037_li
-#Written in Java
-
-@features_1038_li
-#Supports standard SQL, JDBC API
-
-@features_1039_li
-#Embedded and Server mode, Clustering support
-
-@features_1040_li
-#Strong security features
-
-@features_1041_li
-#The PostgreSQL ODBC driver can be used
-
-@features_1042_li
-#Multi version concurrency
-
-@features_1043_h3
-追加�?�れ�?�特徴
-
-@features_1044_li
-#Disk based or in-memory databases and tables, read-only database support, temporary tables
-
-@features_1045_li
-#Transaction support (read committed), 2-phase-commit
-
-@features_1046_li
-#Multiple connections, table level locking
-
-@features_1047_li
-#Cost based optimizer, using a genetic algorithm for complex queries, zero-administration
-
-@features_1048_li
-#Scrollable and updatable result set support, large result set, external result sorting, functions can return a result set
-
-@features_1049_li
-#Encrypted database (AES), SHA-256 password encryption, encryption functions, SSL
-
-@features_1050_h3
-SQLサ�?ート
-
-@features_1051_li
-#Support for multiple schemas, information schema
-
-@features_1052_li
-#Referential integrity / foreign key constraints with cascade, check constraints
-
-@features_1053_li
-#Inner and outer joins, subqueries, read only views and inline views
-
-@features_1054_li
-#Triggers and Java functions / stored procedures
-
-@features_1055_li
-#Many built-in functions, including XML and lossless data compression
-
-@features_1056_li
-#Wide range of data types including large objects (BLOB/CLOB) and arrays
-
-@features_1057_li
-#Sequence and autoincrement columns, computed columns (can be used for function based indexes)
-
-@features_1058_code
-ORDER BY, GROUP BY, HAVING, UNION, LIMIT, TOP
-
-@features_1059_li
-#Collation support, including support for the ICU4J library
-
-@features_1060_li
-#Support for users and roles
-
-@features_1061_li
-#Compatibility modes for IBM DB2, Apache Derby, HSQLDB, MS SQL Server, MySQL, Oracle, and PostgreSQL.
-
-@features_1062_h3
-セキュリティ�?�特徴
-
-@features_1063_li
-#Includes a solution for the SQL injection problem
-
-@features_1064_li
-#User password authentication uses SHA-256 and salt
-
-@features_1065_li
-#For server mode connections, user passwords are never transmitted in plain text over the network (even when using insecure connections; this only applies to the TCP server and not to the H2 Console however; it also doesn't apply if you set the password in the database URL)
-
-@features_1066_li
-#All database files (including script files that can be used to backup data) can be encrypted using the AES-128 encryption algorithm
-
-@features_1067_li
-#The remote JDBC driver supports TCP/IP connections over TLS
-
-@features_1068_li
-#The built-in web server supports connections over TLS
-
-@features_1069_li
-#Passwords can be sent to the database using char arrays instead of Strings
-
-@features_1070_h3
-他�?�特徴�?�ツール
-
-@features_1071_li
-#Small footprint (smaller than 1.5 MB), low memory requirements
-
-@features_1072_li
-#Multiple index types (b-tree, tree, hash)
-
-@features_1073_li
-#Support for multi-dimensional indexes
-
-@features_1074_li
-#CSV (comma separated values) file support
-
-@features_1075_li
-#Support for linked tables, and a built-in virtual 'range' table
-
-@features_1076_li
-#Supports the EXPLAIN PLAN
statement; sophisticated trace options
-
-@features_1077_li
-#Database closing can be delayed or disabled to improve the performance
-
-@features_1078_li
-#Web-based Console application (translated to many languages) with autocomplete
-
-@features_1079_li
-#The database can generate SQL script files
-
-@features_1080_li
-#Contains a recovery tool that can dump the contents of the database
-
-@features_1081_li
-#Support for variables (for example to calculate running totals)
-
-@features_1082_li
-#Automatic re-compilation of prepared statements
-
-@features_1083_li
-#Uses a small number of database files
-
-@features_1084_li
-#Uses a checksum for each record and log entry for data integrity
-
-@features_1085_li
-#Well tested (high code coverage, randomized stress tests)
-
-@features_1086_h2
-他�?�データベースエンジン�?�比較�?�る
-
-@features_1087_p
-# This comparison is based on H2 1.3, Apache Derby version 10.8, HSQLDB 2.2, MySQL 5.5, PostgreSQL 9.0.
-
-@features_1088_th
-#Feature
-
-@features_1089_th
-H2
-
-@features_1090_th
-Derby
-
-@features_1091_th
-HSQLDB
-
-@features_1092_th
-MySQL
-
-@features_1093_th
-PostgreSQL
-
-@features_1094_td
-Pure Java
-
-@features_1095_td
-対応
-
-@features_1096_td
-対応
-
-@features_1097_td
-対応
-
-@features_1098_td
-�?�対応
-
-@features_1099_td
-�?�対応
-
-@features_1100_td
-エンベッドモード (Java)
-
-@features_1101_td
-対応
-
-@features_1102_td
-対応
-
-@features_1103_td
-対応
-
-@features_1104_td
-�?�対応
-
-@features_1105_td
-�?�対応
-
-@features_1106_td
-#In-Memory Mode
-
-@features_1107_td
-対応
-
-@features_1108_td
-対応
-
-@features_1109_td
-対応
-
-@features_1110_td
-�?�対応
-
-@features_1111_td
-�?�対応
-
-@features_1112_td
-#Explain Plan
-
-@features_1113_td
-対応
-
-@features_1114_td
-#Yes *12
-
-@features_1115_td
-対応
-
-@features_1116_td
-対応
-
-@features_1117_td
-対応
-
-@features_1118_td
-#Built-in Clustering / Replication
-
-@features_1119_td
-対応
-
-@features_1120_td
-対応
-
-@features_1121_td
-�?�対応
-
-@features_1122_td
-対応
-
-@features_1123_td
-対応
-
-@features_1124_td
-暗�?�化データベース
-
-@features_1125_td
-対応
-
-@features_1126_td
-#Yes *10
-
-@features_1127_td
-#Yes *10
-
-@features_1128_td
-�?�対応
-
-@features_1129_td
-�?�対応
-
-@features_1130_td
-リンクテーブル
-
-@features_1131_td
-対応
-
-@features_1132_td
-�?�対応
-
-@features_1133_td
-#Partially *1
-
-@features_1134_td
-#Partially *2
-
-@features_1135_td
-�?�対応
-
-@features_1136_td
-ODBCドライ�?
-
-@features_1137_td
-対応
-
-@features_1138_td
-�?�対応
-
-@features_1139_td
-�?�対応
-
-@features_1140_td
-対応
-
-@features_1141_td
-対応
-
-@features_1142_td
-フルテキストサー�?
-
-@features_1143_td
-対応
-
-@features_1144_td
-対応
-
-@features_1145_td
-�?�対応
-
-@features_1146_td
-対応
-
-@features_1147_td
-対応
-
-@features_1148_td
-#Domains (User-Defined Types)
-
-@features_1149_td
-対応
-
-@features_1150_td
-�?�対応
-
-@features_1151_td
-対応
-
-@features_1152_td
-対応
-
-@features_1153_td
-対応
-
-@features_1154_td
-データベース�?��?��?�ファイル
-
-@features_1155_td
-少
-
-@features_1156_td
-多
-
-@features_1157_td
-少
-
-@features_1158_td
-多
-
-@features_1159_td
-多
-
-@features_1160_td
-#Row Level Locking
-
-@features_1161_td
-#Yes *9
-
-@features_1162_td
-対応
-
-@features_1163_td
-#Yes *9
-
-@features_1164_td
-対応
-
-@features_1165_td
-対応
-
-@features_1166_td
-#Multi Version Concurrency
-
-@features_1167_td
-対応
-
-@features_1168_td
-�?�対応
-
-@features_1169_td
-対応
-
-@features_1170_td
-対応
-
-@features_1171_td
-対応
-
-@features_1172_td
-#Multi-Threaded Statement Processing
-
-@features_1173_td
-#No *11
-
-@features_1174_td
-対応
-
-@features_1175_td
-対応
-
-@features_1176_td
-対応
-
-@features_1177_td
-対応
-
-@features_1178_td
-#Role Based Security
-
-@features_1179_td
-対応
-
-@features_1180_td
-#Yes *3
-
-@features_1181_td
-対応
-
-@features_1182_td
-対応
-
-@features_1183_td
-対応
-
-@features_1184_td
-#Updatable Result Sets
-
-@features_1185_td
-対応
-
-@features_1186_td
-#Yes *7
-
-@features_1187_td
-対応
-
-@features_1188_td
-対応
-
-@features_1189_td
-対応
-
-@features_1190_td
-#Sequences
-
-@features_1191_td
-対応
-
-@features_1192_td
-対応
-
-@features_1193_td
-対応
-
-@features_1194_td
-�?�対応
-
-@features_1195_td
-対応
-
-@features_1196_td
-#Limit and Offset
-
-@features_1197_td
-対応
-
-@features_1198_td
-#Yes *13
-
-@features_1199_td
-対応
-
-@features_1200_td
-対応
-
-@features_1201_td
-対応
-
-@features_1202_td
-#Window Functions
-
-@features_1203_td
-#No *15
-
-@features_1204_td
-#No *15
-
-@features_1205_td
-�?�対応
-
-@features_1206_td
-�?�対応
-
-@features_1207_td
-対応
-
-@features_1208_td
-#Temporary Tables
-
-@features_1209_td
-対応
-
-@features_1210_td
-#Yes *4
-
-@features_1211_td
-対応
-
-@features_1212_td
-対応
-
-@features_1213_td
-対応
-
-@features_1214_td
-#Information Schema
-
-@features_1215_td
-対応
-
-@features_1216_td
-#No *8
-
-@features_1217_td
-対応
-
-@features_1218_td
-対応
-
-@features_1219_td
-対応
-
-@features_1220_td
-#Computed Columns
-
-@features_1221_td
-対応
-
-@features_1222_td
-対応
-
-@features_1223_td
-対応
-
-@features_1224_td
-�?�対応
-
-@features_1225_td
-#Yes *6
-
-@features_1226_td
-#Case Insensitive Columns
-
-@features_1227_td
-対応
-
-@features_1228_td
-#Yes *14
-
-@features_1229_td
-対応
-
-@features_1230_td
-対応
-
-@features_1231_td
-#Yes *6
-
-@features_1232_td
-#Custom Aggregate Functions
-
-@features_1233_td
-対応
-
-@features_1234_td
-�?�対応
-
-@features_1235_td
-対応
-
-@features_1236_td
-対応
-
-@features_1237_td
-対応
-
-@features_1238_td
-#CLOB/BLOB Compression
-
-@features_1239_td
-対応
-
-@features_1240_td
-�?�対応
-
-@features_1241_td
-�?�対応
-
-@features_1242_td
-�?�対応
-
-@features_1243_td
-対応
-
-@features_1244_td
-フットプリント (jar/dll size)
-
-@features_1245_td
-#~1.5 MB *5
-
-@features_1246_td
-#~3 MB
-
-@features_1247_td
-#~1.5 MB
-
-@features_1248_td
-#~4 MB
-
-@features_1249_td
-#~6 MB
-
-@features_1250_p
-# *1 HSQLDB supports text tables.
-
-@features_1251_p
-# *2 MySQL supports linked MySQL tables under the name 'federated tables'.
-
-@features_1252_p
-# *3 Derby support for roles based security and password checking as an option.
-
-@features_1253_p
-# *4 Derby only supports global temporary tables.
-
-@features_1254_p
-# *5 The default H2 jar file contains debug information, jar files for other databases do not.
-
-@features_1255_p
-# *6 PostgreSQL supports functional indexes.
-
-@features_1256_p
-# *7 Derby only supports updatable result sets if the query is not sorted.
-
-@features_1257_p
-# *8 Derby doesn't support standard compliant information schema tables.
-
-@features_1258_p
-# *9 When using MVCC (multi version concurrency).
-
-@features_1259_p
-# *10 Derby and HSQLDB don't hide data patterns well.
-
-@features_1260_p
-# *11 The MULTI_THREADED option is not enabled by default, and not yet supported when using MVCC.
-
-@features_1261_p
-# *12 Derby doesn't support the EXPLAIN
statement, but it supports runtime statistics and retrieving statement execution plans.
-
-@features_1262_p
-# *13 Derby doesn't support the syntax LIMIT .. [OFFSET ..]
, however it supports FETCH FIRST .. ROW[S] ONLY
.
-
-@features_1263_p
-# *14 Using collations. *15 Derby and H2 support ROW_NUMBER() OVER()
.
-
-@features_1264_h3
-DaffodilDb�?�One$Db
-
-@features_1265_p
-# It looks like the development of this database has stopped. The last release was February 2006.
-
-@features_1266_h3
-McKoi
-
-@features_1267_p
-# It looks like the development of this database has stopped. The last release was August 2004.
-
-@features_1268_h2
-#H2 in Use
-
-@features_1269_p
-# For a list of applications that work with or use H2, see: Links.
-
-@features_1270_h2
-接続モード
-
-@features_1271_p
-# The following connection modes are supported:
-
-@features_1272_li
-#Embedded mode (local connections using JDBC)
-
-@features_1273_li
-#Server mode (remote connections using JDBC or ODBC over TCP/IP)
-
-@features_1274_li
-#Mixed mode (local and remote connections at the same time)
-
-@features_1275_h3
-エンベッドモード
-
-@features_1276_p
-# In embedded mode, an application opens a database from within the same JVM using JDBC. This is the fastest and easiest connection mode. The disadvantage is that a database may only be open in one virtual machine (and class loader) at any time. As in all modes, both persistent and in-memory databases are supported. There is no limit on the number of database open concurrently, or on the number of open connections.
-
-@features_1277_h3
-サー�?ーモード
-
-@features_1278_p
-# When using the server mode (sometimes called remote mode or client/server mode), an application opens a database remotely using the JDBC or ODBC API. A server needs to be started within the same or another virtual machine, or on another computer. Many applications can connect to the same database at the same time, by connecting to this server. Internally, the server process opens the database(s) in embedded mode.
-
-@features_1279_p
-# The server mode is slower than the embedded mode, because all data is transferred over TCP/IP. As in all modes, both persistent and in-memory databases are supported. There is no limit on the number of database open concurrently per server, or on the number of open connections.
-
-@features_1280_h3
-#Mixed Mode
-
-@features_1281_p
-# The mixed mode is a combination of the embedded and the server mode. The first application that connects to a database does that in embedded mode, but also starts a server so that other applications (running in different processes or virtual machines) can concurrently access the same data. The local connections are as fast as if the database is used in just the embedded mode, while the remote connections are a bit slower.
-
-@features_1282_p
-# The server can be started and stopped from within the application (using the server API), or automatically (automatic mixed mode). When using the automatic mixed mode, all clients that want to connect to the database (no matter if it's an local or remote connection) can do so using the exact same database URL.
-
-@features_1283_h2
-データベースURL概�?
-
-@features_1284_p
-# This database supports multiple connection modes and connection settings. This is achieved using different database URLs. Settings in the URLs are not case sensitive.
-
-@features_1285_th
-トピック
-
-@features_1286_th
-URLフォーマット�?�例
-
-@features_1287_a
-エンベッド (ローカル) 接続
-
-@features_1288_td
-# jdbc:h2:[file:][<path>]<databaseName>
-
-@features_1289_td
-# jdbc:h2:~/test
-
-@features_1290_td
-# jdbc:h2:file:/data/sample
-
-@features_1291_td
-# jdbc:h2:file:C:/data/sample (Windows only)
-
-@features_1292_a
-#In-memory (private)
-
-@features_1293_td
-jdbc:h2:mem:
-
-@features_1294_a
-#In-memory (named)
-
-@features_1295_td
-# jdbc:h2:mem:<databaseName>
-
-@features_1296_td
-# jdbc:h2:mem:test_mem
-
-@features_1297_a
-#Server mode (remote connections)
-
-@features_1298_a
-# using TCP/IP
-
-@features_1299_td
-# jdbc:h2:tcp://<server>[:<port>]/[<path>]<databaseName>
-
-@features_1300_td
-# jdbc:h2:tcp://localhost/~/test
-
-@features_1301_td
-# jdbc:h2:tcp://dbserv:8084/~/sample
-
-@features_1302_td
-# jdbc:h2:tcp://localhost/mem:test
-
-@features_1303_a
-#Server mode (remote connections)
-
-@features_1304_a
-# using TLS
-
-@features_1305_td
-# jdbc:h2:ssl://<server>[:<port>]/<databaseName>
-
-@features_1306_td
-# jdbc:h2:ssl://localhost:8085/~/sample;
-
-@features_1307_a
-#Using encrypted files
-
-@features_1308_td
-# jdbc:h2:<url>;CIPHER=AES
-
-@features_1309_td
-# jdbc:h2:ssl://localhost/~/test;CIPHER=AES
-
-@features_1310_td
-# jdbc:h2:file:~/secure;CIPHER=AES
-
-@features_1311_a
-#File locking methods
-
-@features_1312_td
-# jdbc:h2:<url>;FILE_LOCK={FILE|SOCKET|NO}
-
-@features_1313_td
-# jdbc:h2:file:~/private;CIPHER=AES;FILE_LOCK=SOCKET
-
-@features_1314_a
-#Only open if it already exists
-
-@features_1315_td
-# jdbc:h2:<url>;IFEXISTS=TRUE
-
-@features_1316_td
-# jdbc:h2:file:~/sample;IFEXISTS=TRUE
-
-@features_1317_a
-#Don't close the database when the VM exits
-
-@features_1318_td
-# jdbc:h2:<url>;DB_CLOSE_ON_EXIT=FALSE
-
-@features_1319_a
-#Execute SQL on connection
-
-@features_1320_td
-# jdbc:h2:<url>;INIT=RUNSCRIPT FROM '~/create.sql'
-
-@features_1321_td
-# jdbc:h2:file:~/sample;INIT=RUNSCRIPT FROM '~/create.sql'\;RUNSCRIPT FROM '~/populate.sql'
-
-@features_1322_a
-#User name and/or password
-
-@features_1323_td
-# jdbc:h2:<url>[;USER=<username>][;PASSWORD=<value>]
-
-@features_1324_td
-# jdbc:h2:file:~/sample;USER=sa;PASSWORD=123
-
-@features_1325_a
-#Debug trace settings
-
-@features_1326_td
-# jdbc:h2:<url>;TRACE_LEVEL_FILE=<level 0..3>
-
-@features_1327_td
-# jdbc:h2:file:~/sample;TRACE_LEVEL_FILE=3
-
-@features_1328_a
-#Ignore unknown settings
-
-@features_1329_td
-# jdbc:h2:<url>;IGNORE_UNKNOWN_SETTINGS=TRUE
-
-@features_1330_a
-#Custom file access mode
-
-@features_1331_td
-# jdbc:h2:<url>;ACCESS_MODE_DATA=rws
-
-@features_1332_a
-#Database in a zip file
-
-@features_1333_td
-# jdbc:h2:zip:<zipFileName>!/<databaseName>
-
-@features_1334_td
-# jdbc:h2:zip:~/db.zip!/test
-
-@features_1335_a
-#Compatibility mode
-
-@features_1336_td
-# jdbc:h2:<url>;MODE=<databaseType>
-
-@features_1337_td
-# jdbc:h2:~/test;MODE=MYSQL
-
-@features_1338_a
-#Auto-reconnect
-
-@features_1339_td
-# jdbc:h2:<url>;AUTO_RECONNECT=TRUE
-
-@features_1340_td
-# jdbc:h2:tcp://localhost/~/test;AUTO_RECONNECT=TRUE
-
-@features_1341_a
-#Automatic mixed mode
-
-@features_1342_td
-# jdbc:h2:<url>;AUTO_SERVER=TRUE
-
-@features_1343_td
-# jdbc:h2:~/test;AUTO_SERVER=TRUE
-
-@features_1344_a
-#Page size
-
-@features_1345_td
-# jdbc:h2:<url>;PAGE_SIZE=512
-
-@features_1346_a
-#Changing other settings
-
-@features_1347_td
-# jdbc:h2:<url>;<setting>=<value>[;<setting>=<value>...]
-
-@features_1348_td
-# jdbc:h2:file:~/sample;TRACE_LEVEL_SYSTEM_OUT=3
-
-@features_1349_h2
-エンベッド (ローカル) データベース�?�接続
-
-@features_1350_p
-# The database URL for connecting to a local database is jdbc:h2:[file:][<path>]<databaseName>
. The prefix file:
is optional. If no or only a relative path is used, then the current working directory is used as a starting point. The case sensitivity of the path and database name depend on the operating system, however it is recommended to use lowercase letters only. The database name must be at least three characters long (a limitation of File.createTempFile
). The database name must not contain a semicolon. To point to the user home directory, use ~/
, as in: jdbc:h2:~/test
.
-
-@features_1351_h2
-#In-Memory Databases
-
-@features_1352_p
-# For certain use cases (for example: rapid prototyping, testing, high performance operations, read-only databases), it may not be required to persist data, or persist changes to the data. This database supports the in-memory mode, where the data is not persisted.
-
-@features_1353_p
-# In some cases, only one connection to a in-memory database is required. This means the database to be opened is private. In this case, the database URL is jdbc:h2:mem:
Opening two connections within the same virtual machine means opening two different (private) databases.
-
-@features_1354_p
-# Sometimes multiple connections to the same in-memory database are required. In this case, the database URL must include a name. Example: jdbc:h2:mem:db1
. Accessing the same database using this URL only works within the same virtual machine and class loader environment.
-
-@features_1355_p
-# To access an in-memory database from another process or from another computer, you need to start a TCP server in the same process as the in-memory database was created. The other processes then need to access the database over TCP/IP or TLS, using a database URL such as: jdbc:h2:tcp://localhost/mem:db1
.
-
-@features_1356_p
-# By default, closing the last connection to a database closes the database. For an in-memory database, this means the content is lost. To keep the database open, add ;DB_CLOSE_DELAY=-1
to the database URL. To keep the content of an in-memory database as long as the virtual machine is alive, use jdbc:h2:mem:test;DB_CLOSE_DELAY=-1
.
-
-@features_1357_h2
-#Database Files Encryption
-
-@features_1358_p
-# The database files can be encrypted. Two encryption algorithm AES is supported. To use file encryption, you need to specify the encryption algorithm (the 'cipher') and the file password (in addition to the user password) when connecting to the database.
-
-@features_1359_h3
-#Creating a New Database with File Encryption
-
-@features_1360_p
-# By default, a new database is automatically created if it does not exist yet. To create an encrypted database, connect to it as it would already exist.
-
-@features_1361_h3
-#Connecting to an Encrypted Database
-
-@features_1362_p
-# The encryption algorithm is set in the database URL, and the file password is specified in the password field, before the user password. A single space separates the file password and the user password; the file password itself may not contain spaces. File passwords and user passwords are case sensitive. Here is an example to connect to a password-encrypted database:
-
-@features_1363_h3
-#Encrypting or Decrypting a Database
-
-@features_1364_p
-# To encrypt an existing database, use the ChangeFileEncryption
tool. This tool can also decrypt an encrypted database, or change the file encryption key. The tool is available from within the H2 Console in the tools section, or you can run it from the command line. The following command line will encrypt the database test
in the user home directory with the file password filepwd
and the encryption algorithm AES:
-
-@features_1365_h2
-データベースファイルロック
-
-@features_1366_p
-# Whenever a database is opened, a lock file is created to signal other processes that the database is in use. If database is closed, or if the process that opened the database terminates, this lock file is deleted.
-
-@features_1367_p
-# The following file locking methods are implemented:
-
-@features_1368_li
-#The default method is FILE
and uses a watchdog thread to protect the database file. The watchdog reads the lock file each second.
-
-@features_1369_li
-#The second method is SOCKET
and opens a server socket. The socket method does not require reading the lock file every second. The socket method should only be used if the database files are only accessed by one (and always the same) computer.
-
-@features_1370_li
-#The third method is FS
. This will use native file locking using FileChannel.lock
.
-
-@features_1371_li
-#It is also possible to open the database without file locking; in this case it is up to the application to protect the database files. Failing to do so will result in a corrupted database. Using the method NO
forces the database to not create a lock file at all. Please note that this is unsafe as another process is able to open the same database, possibly leading to data corruption.
-
-@features_1372_p
-# To open the database with a different file locking method, use the parameter FILE_LOCK
. The following code opens the database with the 'socket' locking method:
-
-@features_1373_p
-# For more information about the algorithms, see Advanced / File Locking Protocols.
-
-@features_1374_h2
-�?��?��?�存在�?�る場�?��?��?��?データベースを開�??
-
-@features_1375_p
-# By default, when an application calls DriverManager.getConnection(url, ...)
and the database specified in the URL does not yet exist, a new (empty) database is created. In some situations, it is better to restrict creating new databases, and only allow to open existing databases. To do this, add ;IFEXISTS=TRUE
to the database URL. In this case, if the database does not already exist, an exception is thrown when trying to connect. The connection only succeeds when the database already exists. The complete URL may look like this:
-
-@features_1376_h2
-#Closing a Database
-
-@features_1377_h3
-データベース�?��?�延終了
-
-@features_1378_p
-# Usually, a database is closed when the last connection to it is closed. In some situations this slows down the application, for example when it is not possible to keep at least one connection open. The automatic closing of a database can be delayed or disabled with the SQL statement SET DB_CLOSE_DELAY <seconds>
. The parameter <seconds> specifies the number of seconds to keep a database open after the last connection to it was closed. The following statement will keep a database open for 10 seconds after the last connection was closed:
-
-@features_1379_p
-# The value -1 means the database is not closed automatically. The value 0 is the default and means the database is closed when the last connection is closed. This setting is persistent and can be set by an administrator only. It is possible to set the value in the database URL: jdbc:h2:~/test;DB_CLOSE_DELAY=10
.
-
-@features_1380_h3
-#Don't Close a Database when the VM Exits
-
-@features_1381_p
-# By default, a database is closed when the last connection is closed. However, if it is never closed, the database is closed when the virtual machine exits normally, using a shutdown hook. In some situations, the database should not be closed in this case, for example because the database is still used at virtual machine shutdown (to store the shutdown process in the database for example). For those cases, the automatic closing of the database can be disabled in the database URL. The first connection (the one that is opening the database) needs to set the option in the database URL (it is not possible to change the setting afterwards). The database URL to disable database closing on exit is:
-
-@features_1382_h2
-#Execute SQL on Connection
-
-@features_1383_p
-# Sometimes, particularly for in-memory databases, it is useful to be able to execute DDL or DML commands automatically when a client connects to a database. This functionality is enabled via the INIT property. Note that multiple commands may be passed to INIT, but the semicolon delimiter must be escaped, as in the example below.
-
-@features_1384_p
-# Please note the double backslash is only required in a Java or properties file. In a GUI, or in an XML file, only one backslash is required:
-
-@features_1385_p
-# Backslashes within the init script (for example within a runscript statement, to specify the folder names in Windows) need to be escaped as well (using a second backslash). It might be simpler to avoid backslashes in folder names for this reason; use forward slashes instead.
-
-@features_1386_h2
-未知�?�設定を無視
-
-@features_1387_p
-# Some applications (for example OpenOffice.org Base) pass some additional parameters when connecting to the database. Why those parameters are passed is unknown. The parameters PREFERDOSLIKELINEENDS
and IGNOREDRIVERPRIVILEGES
are such examples; they are simply ignored to improve the compatibility with OpenOffice.org. If an application passes other parameters when connecting to the database, usually the database throws an exception saying the parameter is not supported. It is possible to ignored such parameters by adding ;IGNORE_UNKNOWN_SETTINGS=TRUE
to the database URL.
-
-@features_1388_h2
-接続�?�開始�?�れ�?�時�?�他�?�設定を変更�?�る
-
-@features_1389_p
-# In addition to the settings already described, other database settings can be passed in the database URL. Adding ;setting=value
at the end of a database URL is the same as executing the statement SET setting value
just after connecting. For a list of supported settings, see SQL Grammar or the DbSettings javadoc.
-
-@features_1390_h2
-カスタムファイル アクセスモード
-
-@features_1391_p
-# Usually, the database opens the database file with the access mode rw
, meaning read-write (except for read only databases, where the mode r
is used). To open a database in read-only mode if the database file is not read-only, use ACCESS_MODE_DATA=r
. Also supported are rws
and rwd
. This setting must be specified in the database URL:
-
-@features_1392_p
-# For more information see Durability Problems. On many operating systems the access mode rws
does not guarantee that the data is written to the disk.
-
-@features_1393_h2
-複数�?�接続
-
-@features_1394_h3
-�?�時�?�複数�?�データベースを開�??
-
-@features_1395_p
-# An application can open multiple databases at the same time, including multiple connections to the same database. The number of open database is only limited by the memory available.
-
-@features_1396_h3
->�?��?�データベース�?��?�複数�?�接続: クライアント/サー�?ー
-
-@features_1397_p
-# If you want to access the same database at the same time from different processes or computers, you need to use the client / server mode. In this case, one process acts as the server, and the other processes (that could reside on other computers as well) connect to the server via TCP/IP (or TLS over TCP/IP for improved security).
-
-@features_1398_h3
-マル�?スレッドサ�?ート
-
-@features_1399_p
-# This database is multithreading-safe. That means, if an application is multi-threaded, it does not need to worry about synchronizing access to the database. Internally, most requests to the same database are synchronized. That means an application can use multiple threads that access the same database at the same time, however if one thread executes a long running query, the other threads need to wait.
-
-@features_1400_p
-# An application should normally use one connection per thread. This database synchronizes access to the same connection, but other databases may not do this.
-
-@features_1401_h3
-ロック�?ロックタイムアウト�?デッドロック
-
-@features_1402_p
-# Please note MVCC is enabled in version 1.4.x by default, when using the MVStore. In this case, table level locking is not used. If multi-version concurrency is not used, the database uses table level locks to give each connection a consistent state of the data. There are two kinds of locks: read locks (shared locks) and write locks (exclusive locks). All locks are released when the transaction commits or rolls back. When using the default transaction isolation level 'read committed', read locks are already released after each statement.
-
-@features_1403_p
-# If a connection wants to reads from a table, and there is no write lock on the table, then a read lock is added to the table. If there is a write lock, then this connection waits for the other connection to release the lock. If a connection cannot get a lock for a specified time, then a lock timeout exception is thrown.
-
-@features_1404_p
-# Usually, SELECT
statements will generate read locks. This includes subqueries. Statements that modify data use write locks. It is also possible to lock a table exclusively without modifying data, using the statement SELECT ... FOR UPDATE
. The statements COMMIT
and ROLLBACK
releases all open locks. The commands SAVEPOINT
and ROLLBACK TO SAVEPOINT
don't affect locks. The locks are also released when the autocommit mode changes, and for connections with autocommit set to true (this is the default), locks are released after each statement. The following statements generate locks:
-
-@features_1405_th
-ロック�?�種類
-
-@features_1406_th
-SQLステートメント
-
-@features_1407_td
-Read
-
-@features_1408_td
-#SELECT * FROM TEST;
-
-@features_1409_td
-# CALL SELECT MAX(ID) FROM TEST;
-
-@features_1410_td
-# SCRIPT;
-
-@features_1411_td
-Write
-
-@features_1412_td
-#SELECT * FROM TEST WHERE 1=0 FOR UPDATE;
-
-@features_1413_td
-Write
-
-@features_1414_td
-#INSERT INTO TEST VALUES(1, 'Hello');
-
-@features_1415_td
-# INSERT INTO TEST SELECT * FROM TEST;
-
-@features_1416_td
-# UPDATE TEST SET NAME='Hi';
-
-@features_1417_td
-# DELETE FROM TEST;
-
-@features_1418_td
-Write
-
-@features_1419_td
-#ALTER TABLE TEST ...;
-
-@features_1420_td
-# CREATE INDEX ... ON TEST ...;
-
-@features_1421_td
-# DROP INDEX ...;
-
-@features_1422_p
-# The number of seconds until a lock timeout exception is thrown can be set separately for each connection using the SQL command SET LOCK_TIMEOUT <milliseconds>
. The initial lock timeout (that is the timeout used for new connections) can be set using the SQL command SET DEFAULT_LOCK_TIMEOUT <milliseconds>
. The default lock timeout is persistent.
-
-@features_1423_h3
-#Avoiding Deadlocks
-
-@features_1424_p
-# To avoid deadlocks, ensure that all transactions lock the tables in the same order (for example in alphabetical order), and avoid upgrading read locks to write locks. Both can be achieved using explicitly locking tables using SELECT ... FOR UPDATE
.
-
-@features_1425_h2
-データベースファイルレイアウト
-
-@features_1426_p
-# The following files are created for persistent databases:
-
-@features_1427_th
-ファイル�??
-
-@features_1428_th
-説明
-
-@features_1429_th
-ファイル数
-
-@features_1430_td
-# test.h2.db
-
-@features_1431_td
-# Database file.
-
-@features_1432_td
-# Contains the transaction log, indexes, and data for all tables.
-
-@features_1433_td
-# Format: <database>.h2.db
-
-@features_1434_td
-# 1 per database
-
-@features_1435_td
-# test.lock.db
-
-@features_1436_td
-# Database lock file.
-
-@features_1437_td
-# Automatically (re-)created while the database is in use.
-
-@features_1438_td
-# Format: <database>.lock.db
-
-@features_1439_td
-# 1 per database (only if in use)
-
-@features_1440_td
-# test.trace.db
-
-@features_1441_td
-# Trace file (if the trace option is enabled).
-
-@features_1442_td
-# Contains trace information.
-
-@features_1443_td
-# Format: <database>.trace.db
-
-@features_1444_td
-# Renamed to <database>.trace.db.old
is too big.
-
-@features_1445_td
-# 0 or 1 per database
-
-@features_1446_td
-# test.lobs.db/*
-
-@features_1447_td
-# Directory containing one file for each
-
-@features_1448_td
-# BLOB or CLOB value larger than a certain size.
-
-@features_1449_td
-# Format: <id>.t<tableId>.lob.db
-
-@features_1450_td
-# 1 per large object
-
-@features_1451_td
-# test.123.temp.db
-
-@features_1452_td
-# Temporary file.
-
-@features_1453_td
-# Contains a temporary blob or a large result set.
-
-@features_1454_td
-# Format: <database>.<id>.temp.db
-
-@features_1455_td
-# 1 per object
-
-@features_1456_h3
-データベースファイル�?�移動�?�改�??
-
-@features_1457_p
-# Database name and location are not stored inside the database files.
-
-@features_1458_p
-# While a database is closed, the files can be moved to another directory, and they can be renamed as well (as long as all files of the same database start with the same name and the respective extensions are unchanged).
-
-@features_1459_p
-# As there is no platform specific data in the files, they can be moved to other operating systems without problems.
-
-@features_1460_h3
-�?ックアップ
-
-@features_1461_p
-# When the database is closed, it is possible to backup the database files.
-
-@features_1462_p
-# To backup data while the database is running, the SQL commands SCRIPT
and BACKUP
can be used.
-
-@features_1463_h2
-ログ�?�リカ�?リー
-
-@features_1464_p
-# Whenever data is modified in the database and those changes are committed, the changes are written to the transaction log (except for in-memory objects). The changes to the main data area itself are usually written later on, to optimize disk access. If there is a power failure, the main data area is not up-to-date, but because the changes are in the transaction log, the next time the database is opened, the changes are re-applied automatically.
-
-@features_1465_h2
-互�?�性
-
-@features_1466_p
-# All database engines behave a little bit different. Where possible, H2 supports the ANSI SQL standard, and tries to be compatible to other databases. There are still a few differences however:
-
-@features_1467_p
-# In MySQL text columns are case insensitive by default, while in H2 they are case sensitive. However H2 supports case insensitive columns as well. To create the tables with case insensitive texts, append IGNORECASE=TRUE
to the database URL (example: jdbc:h2:~/test;IGNORECASE=TRUE
).
-
-@features_1468_h3
-互�?�モード
-
-@features_1469_p
-# For certain features, this database can emulate the behavior of specific databases. However, only a small subset of the differences between databases are implemented in this way. Here is the list of currently supported modes and the differences to the regular mode:
-
-@features_1470_h3
-#DB2 Compatibility Mode
-
-@features_1471_p
-# To use the IBM DB2 mode, use the database URL jdbc:h2:~/test;MODE=DB2
or the SQL statement SET MODE DB2
.
-
-@features_1472_li
-#For aliased columns, ResultSetMetaData.getColumnName()
returns the alias name and getTableName()
returns null
.
-
-@features_1473_li
-#Support for the syntax [OFFSET .. ROW] [FETCH ... ONLY]
as an alternative for LIMIT .. OFFSET
.
-
-@features_1474_li
-#Concatenating NULL
with another value results in the other value.
-
-@features_1475_li
-#Support the pseudo-table SYSIBM.SYSDUMMY1.
-
-@features_1476_h3
-#Derby Compatibility Mode
-
-@features_1477_p
-# To use the Apache Derby mode, use the database URL jdbc:h2:~/test;MODE=Derby
or the SQL statement SET MODE Derby
.
-
-@features_1478_li
-#For aliased columns, ResultSetMetaData.getColumnName()
returns the alias name and getTableName()
returns null
.
-
-@features_1479_li
-#For unique indexes, NULL
is distinct. That means only one row with NULL
in one of the columns is allowed.
-
-@features_1480_li
-#Concatenating NULL
with another value results in the other value.
-
-@features_1481_li
-#Support the pseudo-table SYSIBM.SYSDUMMY1.
-
-@features_1482_h3
-#HSQLDB Compatibility Mode
-
-@features_1483_p
-# To use the HSQLDB mode, use the database URL jdbc:h2:~/test;MODE=HSQLDB
or the SQL statement SET MODE HSQLDB
.
-
-@features_1484_li
-#For aliased columns, ResultSetMetaData.getColumnName()
returns the alias name and getTableName()
returns null
.
-
-@features_1485_li
-#When converting the scale of decimal data, the number is only converted if the new scale is smaller than the current scale. Usually, the scale is converted and 0s are added if required.
-
-@features_1486_li
-#For unique indexes, NULL
is distinct. That means only one row with NULL
in one of the columns is allowed.
-
-@features_1487_li
-#Text can be concatenated using '+'.
-
-@features_1488_h3
-#MS SQL Server Compatibility Mode
-
-@features_1489_p
-# To use the MS SQL Server mode, use the database URL jdbc:h2:~/test;MODE=MSSQLServer
or the SQL statement SET MODE MSSQLServer
.
-
-@features_1490_li
-#For aliased columns, ResultSetMetaData.getColumnName()
returns the alias name and getTableName()
returns null
.
-
-@features_1491_li
-#Identifiers may be quoted using square brackets as in [Test]
.
-
-@features_1492_li
-#For unique indexes, NULL
is distinct. That means only one row with NULL
in one of the columns is allowed.
-
-@features_1493_li
-#Concatenating NULL
with another value results in the other value.
-
-@features_1494_li
-#Text can be concatenated using '+'.
-
-@features_1495_h3
-#MySQL Compatibility Mode
-
-@features_1496_p
-# To use the MySQL mode, use the database URL jdbc:h2:~/test;MODE=MySQL
or the SQL statement SET MODE MySQL
.
-
-@features_1497_li
-#When inserting data, if a column is defined to be NOT NULL
and NULL
is inserted, then a 0 (or empty string, or the current timestamp for timestamp columns) value is used. Usually, this operation is not allowed and an exception is thrown.
-
-@features_1498_li
-#Creating indexes in the CREATE TABLE
statement is allowed using INDEX(..)
or KEY(..)
. Example: create table test(id int primary key, name varchar(255), key idx_name(name));
-
-@features_1499_li
-#Meta data calls return identifiers in lower case.
-
-@features_1500_li
-#When converting a floating point number to an integer, the fractional digits are not truncated, but the value is rounded.
-
-@features_1501_li
-#Concatenating NULL
with another value results in the other value.
-
-@features_1502_p
-# Text comparison in MySQL is case insensitive by default, while in H2 it is case sensitive (as in most other databases). H2 does support case insensitive text comparison, but it needs to be set separately, using SET IGNORECASE TRUE
. This affects comparison using =, LIKE, REGEXP
.
-
-@features_1503_h3
-#Oracle Compatibility Mode
-
-@features_1504_p
-# To use the Oracle mode, use the database URL jdbc:h2:~/test;MODE=Oracle
or the SQL statement SET MODE Oracle
.
-
-@features_1505_li
-#For aliased columns, ResultSetMetaData.getColumnName()
returns the alias name and getTableName()
returns null
.
-
-@features_1506_li
-#When using unique indexes, multiple rows with NULL
in all columns are allowed, however it is not allowed to have multiple rows with the same values otherwise.
-
-@features_1507_li
-#Concatenating NULL
with another value results in the other value.
-
-@features_1508_li
-#Empty strings are treated like NULL
values.
-
-@features_1509_h3
-#PostgreSQL Compatibility Mode
-
-@features_1510_p
-# To use the PostgreSQL mode, use the database URL jdbc:h2:~/test;MODE=PostgreSQL
or the SQL statement SET MODE PostgreSQL
.
-
-@features_1511_li
-#For aliased columns, ResultSetMetaData.getColumnName()
returns the alias name and getTableName()
returns null
.
-
-@features_1512_li
-#When converting a floating point number to an integer, the fractional digits are not be truncated, but the value is rounded.
-
-@features_1513_li
-#The system columns CTID
and OID
are supported.
-
-@features_1514_li
-#LOG(x) is base 10 in this mode.
-
-@features_1515_h2
-#Auto-Reconnect
-
-@features_1516_p
-# The auto-reconnect feature causes the JDBC driver to reconnect to the database if the connection is lost. The automatic re-connect only occurs when auto-commit is enabled; if auto-commit is disabled, an exception is thrown. To enable this mode, append ;AUTO_RECONNECT=TRUE
to the database URL.
-
-@features_1517_p
-# Re-connecting will open a new session. After an automatic re-connect, variables and local temporary tables definitions (excluding data) are re-created. The contents of the system table INFORMATION_SCHEMA.SESSION_STATE
contains all client side state that is re-created.
-
-@features_1518_p
-# If another connection uses the database in exclusive mode (enabled using SET EXCLUSIVE 1
or SET EXCLUSIVE 2
), then this connection will try to re-connect until the exclusive mode ends.
-
-@features_1519_h2
-#Automatic Mixed Mode
-
-@features_1520_p
-# Multiple processes can access the same database without having to start the server manually. To do that, append ;AUTO_SERVER=TRUE
to the database URL. You can use the same database URL independent of whether the database is already open or not. This feature doesn't work with in-memory databases. Example database URL:
-
-@features_1521_p
-# Use the same URL for all connections to this database. Internally, when using this mode, the first connection to the database is made in embedded mode, and additionally a server is started internally (as a daemon thread). If the database is already open in another process, the server mode is used automatically. The IP address and port of the server are stored in the file .lock.db
, that's why in-memory databases can't be supported.
-
-@features_1522_p
-# The application that opens the first connection to the database uses the embedded mode, which is faster than the server mode. Therefore the main application should open the database first if possible. The first connection automatically starts a server on a random port. This server allows remote connections, however only to this database (to ensure that, the client reads .lock.db
file and sends the the random key that is stored there to the server). When the first connection is closed, the server stops. If other (remote) connections are still open, one of them will then start a server (auto-reconnect is enabled automatically).
-
-@features_1523_p
-# All processes need to have access to the database files. If the first connection is closed (the connection that started the server), open transactions of other connections will be rolled back (this may not be a problem if you don't disable autocommit). Explicit client/server connections (using jdbc:h2:tcp://
or ssl://
) are not supported. This mode is not supported for in-memory databases.
-
-@features_1524_p
-# Here is an example how to use this mode. Application 1 and 2 are not necessarily started on the same computer, but they need to have access to the database files. Application 1 and 2 are typically two different processes (however they could run within the same process).
-
-@features_1525_p
-# When using this feature, by default the server uses any free TCP port. The port can be set manually using AUTO_SERVER_PORT=9090
.
-
-@features_1526_h2
-#Page Size
-
-@features_1527_p
-# The page size for new databases is 2 KB (2048), unless the page size is set explicitly in the database URL using PAGE_SIZE=
when the database is created. The page size of existing databases can not be changed, so this property needs to be set when the database is created.
-
-@features_1528_h2
-トレースオプションを使用�?�る
-
-@features_1529_p
-# To find problems in an application, it is sometimes good to see what database operations where executed. This database offers the following trace features:
-
-@features_1530_li
-#Trace to System.out
and/or to a file
-
-@features_1531_li
-#Support for trace levels OFF, ERROR, INFO, DEBUG
-
-@features_1532_li
-#The maximum size of the trace file can be set
-
-@features_1533_li
-#It is possible to generate Java source code from the trace file
-
-@features_1534_li
-#Trace can be enabled at runtime by manually creating a file
-
-@features_1535_h3
-トレースオプション
-
-@features_1536_p
-# The simplest way to enable the trace option is setting it in the database URL. There are two settings, one for System.out
(TRACE_LEVEL_SYSTEM_OUT
) tracing, and one for file tracing (TRACE_LEVEL_FILE
). The trace levels are 0 for OFF
, 1 for ERROR
(the default), 2 for INFO
, and 3 for DEBUG
. A database URL with both levels set to DEBUG
is:
-
-@features_1537_p
-# The trace level can be changed at runtime by executing the SQL command SET TRACE_LEVEL_SYSTEM_OUT level
(for System.out
tracing) or SET TRACE_LEVEL_FILE level
(for file tracing). Example:
-
-@features_1538_h3
-トレースファイル�?�最大サイズを設定
-
-@features_1539_p
-# When using a high trace level, the trace file can get very big quickly. The default size limit is 16 MB, if the trace file exceeds this limit, it is renamed to .old
and a new file is created. If another such file exists, it is deleted. To limit the size to a certain number of megabytes, use SET TRACE_MAX_FILE_SIZE mb
. Example:
-
-@features_1540_h3
-Javaコード生�?
-
-@features_1541_p
-# When setting the trace level to INFO
or DEBUG
, Java source code is generated as well. This simplifies reproducing problems. The trace file looks like this:
-
-@features_1542_p
-# To filter the Java source code, use the ConvertTraceFile
tool as follows:
-
-@features_1543_p
-# The generated file Test.java
will contain the Java source code. The generated source code may be too large to compile (the size of a Java method is limited). If this is the case, the source code needs to be split in multiple methods. The password is not listed in the trace file and therefore not included in the source code.
-
-@features_1544_h2
-#Using Other Logging APIs
-
-@features_1545_p
-# By default, this database uses its own native 'trace' facility. This facility is called 'trace' and not 'log' within this database to avoid confusion with the transaction log. Trace messages can be written to both file and System.out
. In most cases, this is sufficient, however sometimes it is better to use the same facility as the application, for example Log4j. To do that, this database support SLF4J.
-
-@features_1546_a
-#SLF4J
-
-@features_1547_p
-# is a simple facade for various logging APIs and allows to plug in the desired implementation at deployment time. SLF4J supports implementations such as Logback, Log4j, Jakarta Commons Logging (JCL), Java logging, x4juli, and Simple Log.
-
-@features_1548_p
-# To enable SLF4J, set the file trace level to 4 in the database URL:
-
-@features_1549_p
-# Changing the log mechanism is not possible after the database is open, that means executing the SQL statement SET TRACE_LEVEL_FILE 4
when the database is already open will not have the desired effect. To use SLF4J, all required jar files need to be in the classpath. The logger name is h2database
. If it does not work, check the file <database>.trace.db
for error messages.
-
-@features_1550_h2
-読�?��?�り専用データベース
-
-@features_1551_p
-# If the database files are read-only, then the database is read-only as well. It is not possible to create new tables, add or modify data in this database. Only SELECT
and CALL
statements are allowed. To create a read-only database, close the database. Then, make the database file read-only. When you open the database now, it is read-only. There are two ways an application can find out whether database is read-only: by calling Connection.isReadOnly()
or by executing the SQL statement CALL READONLY()
.
-
-@features_1552_p
-# Using the Custom Access Mode r
the database can also be opened in read-only mode, even if the database file is not read only.
-
-@features_1553_h2
-#Read Only Databases in Zip or Jar File
-
-@features_1554_p
-# To create a read-only database in a zip file, first create a regular persistent database, and then create a backup. The database must not have pending changes, that means you need to close all connections to the database first. To speed up opening the read-only database and running queries, the database should be closed using SHUTDOWN DEFRAG
. If you are using a database named test
, an easy way to create a zip file is using the Backup
tool. You can start the tool from the command line, or from within the H2 Console (Tools - Backup). Please note that the database must be closed when the backup is created. Therefore, the SQL statement BACKUP TO
can not be used.
-
-@features_1555_p
-# When the zip file is created, you can open the database in the zip file using the following database URL:
-
-@features_1556_p
-# Databases in zip files are read-only. The performance for some queries will be slower than when using a regular database, because random access in zip files is not supported (only streaming). How much this affects the performance depends on the queries and the data. The database is not read in memory; therefore large databases are supported as well. The same indexes are used as when using a regular database.
-
-@features_1557_p
-# If the database is larger than a few megabytes, performance is much better if the database file is split into multiple smaller files, because random access in compressed files is not possible. See also the sample application ReadOnlyDatabaseInZip.
-
-@features_1558_h3
-破�??�?��?�データベースを開�??
-
-@features_1559_p
-# If a database cannot be opened because the boot info (the SQL script that is run at startup) is corrupted, then the database can be opened by specifying a database event listener. The exceptions are logged, but opening the database will continue.
-
-@features_1560_h2
-computed column / ベースインデックス�?�機能
-
-@features_1561_p
-# A computed column is a column whose value is calculated before storing. The formula is evaluated when the row is inserted, and re-evaluated every time the row is updated. One use case is to automatically update the last-modification time:
-
-@features_1562_p
-# Function indexes are not directly supported by this database, but they can be emulated by using computed columns. For example, if an index on the upper-case version of a column is required, create a computed column with the upper-case version of the original column, and create an index for this column:
-
-@features_1563_p
-# When inserting data, it is not required (and not allowed) to specify a value for the upper-case version of the column, because the value is generated. But you can use the column when querying the table:
-
-@features_1564_h2
-多次元インデックス
-
-@features_1565_p
-# A tool is provided to execute efficient multi-dimension (spatial) range queries. This database does not support a specialized spatial index (R-Tree or similar). Instead, the B-Tree index is used. For each record, the multi-dimensional key is converted (mapped) to a single dimensional (scalar) value. This value specifies the location on a space-filling curve.
-
-@features_1566_p
-# Currently, Z-order (also called N-order or Morton-order) is used; Hilbert curve could also be used, but the implementation is more complex. The algorithm to convert the multi-dimensional value is called bit-interleaving. The scalar value is indexed using a B-Tree index (usually using a computed column).
-
-@features_1567_p
-# The method can result in a drastic performance improvement over just using an index on the first column. Depending on the data and number of dimensions, the improvement is usually higher than factor 5. The tool generates a SQL query from a specified multi-dimensional range. The method used is not database dependent, and the tool can easily be ported to other databases. For an example how to use the tool, please have a look at the sample code provided in TestMultiDimension.java
.
-
-@features_1568_h2
-ユーザー定義�?�関数�?�ストアドプロシージャ
-
-@features_1569_p
-# In addition to the built-in functions, this database supports user-defined Java functions. In this database, Java functions can be used as stored procedures as well. A function must be declared (registered) before it can be used. A function can be defined using source code, or as a reference to a compiled class that is available in the classpath. By default, the function aliases are stored in the current schema.
-
-@features_1570_h3
-#Referencing a Compiled Method
-
-@features_1571_p
-# When referencing a method, the class must already be compiled and included in the classpath where the database is running. Only static Java methods are supported; both the class and the method must be public. Example Java class:
-
-@features_1572_p
-# The Java function must be registered in the database by calling CREATE ALIAS ... FOR
:
-
-@features_1573_p
-# For a complete sample application, see src/test/org/h2/samples/Function.java
.
-
-@features_1574_h3
-#Declaring Functions as Source Code
-
-@features_1575_p
-# When defining a function alias with source code, the database tries to compile the source code using the Sun Java compiler (the class com.sun.tools.javac.Main
) if the tools.jar
is in the classpath. If not, javac
is run as a separate process. Only the source code is stored in the database; the class is compiled each time the database is re-opened. Source code is usually passed as dollar quoted text to avoid escaping problems, however single quotes can be used as well. Example:
-
-@features_1576_p
-# By default, the three packages java.util, java.math, java.sql
are imported. The method name (nextPrime
in the example above) is ignored. Method overloading is not supported when declaring functions as source code, that means only one method may be declared for an alias. If different import statements are required, they must be declared at the beginning and separated with the tag @CODE
:
-
-@features_1577_p
-# The following template is used to create a complete Java class:
-
-@features_1578_h3
-#Method Overloading
-
-@features_1579_p
-# Multiple methods may be bound to a SQL function if the class is already compiled and included in the classpath. Each Java method must have a different number of arguments. Method overloading is not supported when declaring functions as source code.
-
-@features_1580_h3
-データタイプマッピング関数
-
-@features_1581_p
-# Functions that accept non-nullable parameters such as int
will not be called if one of those parameters is NULL
. Instead, the result of the function is NULL
. If the function should be called if a parameter is NULL
, you need to use java.lang.Integer
instead.
-
-@features_1582_p
-# SQL types are mapped to Java classes and vice-versa as in the JDBC API. For details, see Data Types. There are a few special cases: java.lang.Object
is mapped to OTHER
(a serialized object). Therefore, java.lang.Object
can not be used to match all SQL types (matching all SQL types is not supported). The second special case is Object[]
: arrays of any class are mapped to ARRAY
. Objects of type org.h2.value.Value
(the internal value class) are passed through without conversion.
-
-@features_1583_h3
-#Functions That Require a Connection
-
-@features_1584_p
-# If the first parameter of a Java function is a java.sql.Connection
, then the connection to database is provided. This connection does not need to be closed before returning. When calling the method from within the SQL statement, this connection parameter does not need to be (can not be) specified.
-
-@features_1585_h3
-#Functions Throwing an Exception
-
-@features_1586_p
-# If a function throws an exception, then the current statement is rolled back and the exception is thrown to the application. SQLException are directly re-thrown to the calling application; all other exceptions are first converted to a SQLException.
-
-@features_1587_h3
-#Functions Returning a Result Set
-
-@features_1588_p
-# Functions may returns a result set. Such a function can be called with the CALL
statement:
-
-@features_1589_h3
-SimpleResultSetを使用�?�る
-
-@features_1590_p
-# A function can create a result set using the SimpleResultSet
tool:
-
-@features_1591_h3
-関数をテーブル�?��?��?�使用�?�る
-
-@features_1592_p
-# A function that returns a result set can be used like a table. However, in this case the function is called at least twice: first while parsing the statement to collect the column names (with parameters set to null
where not known at compile time). And then, while executing the statement to get the data (maybe multiple times if this is a join). If the function is called just to get the column list, the URL of the connection passed to the function is jdbc:columnlist:connection
. Otherwise, the URL of the connection is jdbc:default:connection
.
-
-@features_1593_h2
-#Pluggable or User-Defined Tables
-
-@features_1594_p
-# For situations where you need to expose other data-sources to the SQL engine as a table, there are "pluggable tables". For some examples, have a look at the code in org.h2.test.db.TestTableEngines
.
-
-@features_1595_p
-# In order to create your own TableEngine, you need to implement the org.h2.api.TableEngine
interface e.g. something like this:
-
-@features_1596_p
-# and then create the table from SQL like this:
-
-@features_1597_p
-# It is also possible to pass in parameters to the table engine, like so:
-
-@features_1598_p
-# In which case the parameters are passed down in the tableEngineParams field of the CreateTableData object.
-
-@features_1599_h2
-トリガー
-
-@features_1600_p
-# This database supports Java triggers that are called before or after a row is updated, inserted or deleted. Triggers can be used for complex consistency checks, or to update related data in the database. It is also possible to use triggers to simulate materialized views. For a complete sample application, see src/test/org/h2/samples/TriggerSample.java
. A Java trigger must implement the interface org.h2.api.Trigger
. The trigger class must be available in the classpath of the database engine (when using the server mode, it must be in the classpath of the server).
-
-@features_1601_p
-# The connection can be used to query or update data in other tables. The trigger then needs to be defined in the database:
-
-@features_1602_p
-# The trigger can be used to veto a change by throwing a SQLException
.
-
-@features_1603_p
-# As an alternative to implementing the Trigger
interface, an application can extend the abstract class org.h2.tools.TriggerAdapter
. This will allows to use the ResultSet
interface within trigger implementations. In this case, only the fire
method needs to be implemented:
-
-@features_1604_h2
-データベースをコンパクト�?��?�る
-
-@features_1605_p
-# Empty space in the database file re-used automatically. When closing the database, the database is automatically compacted for up to 200 milliseconds by default. To compact more, use the SQL statement SHUTDOWN COMPACT. However re-creating the database may further reduce the database size because this will re-build the indexes. Here is a sample function to do this:
-
-@features_1606_p
-# See also the sample application org.h2.samples.Compact
. The commands SCRIPT / RUNSCRIPT
can be used as well to create a backup of a database and re-build the database from the script.
-
-@features_1607_h2
-キャッシュ�?�設定
-
-@features_1608_p
-# The database keeps most frequently used data in the main memory. The amount of memory used for caching can be changed using the setting CACHE_SIZE
. This setting can be set in the database connection URL (jdbc:h2:~/test;CACHE_SIZE=131072
), or it can be changed at runtime using SET CACHE_SIZE size
. The size of the cache, as represented by CACHE_SIZE
is measured in KB, with each KB being 1024 bytes. This setting has no effect for in-memory databases. For persistent databases, the setting is stored in the database and re-used when the database is opened the next time. However, when opening an existing database, the cache size is set to at most half the amount of memory available for the virtual machine (Runtime.getRuntime().maxMemory()), even if the cache size setting stored in the database is larger; however the setting stored in the database is kept. Setting the cache size in the database URL or explicitly using SET CACHE_SIZE
overrides this value (even if larger than the physical memory). To get the current used maximum cache size, use the query SELECT * FROM INFORMATION_SCHEMA.SETTINGS WHERE NAME = 'info.CACHE_MAX_SIZE'
-
-@features_1609_p
-# An experimental scan-resistant cache algorithm "Two Queue" (2Q) is available. To enable it, append ;CACHE_TYPE=TQ
to the database URL. The cache might not actually improve performance. If you plan to use it, please run your own test cases first.
-
-@features_1610_p
-# Also included is an experimental second level soft reference cache. Rows in this cache are only garbage collected on low memory. By default the second level cache is disabled. To enable it, use the prefix SOFT_
. Example: jdbc:h2:~/test;CACHE_TYPE=SOFT_LRU
. The cache might not actually improve performance. If you plan to use it, please run your own test cases first.
-
-@features_1611_p
-# To get information about page reads and writes, and the current caching algorithm in use, call SELECT * FROM INFORMATION_SCHEMA.SETTINGS
. The number of pages read / written is listed.
-
-@fragments_1000_div
-# ▲
-
-@fragments_1001_label
-#Search:
-
-@fragments_1002_label
-#Highlight keyword(s)
-
-@fragments_1003_a
-ホーム
-
-@fragments_1004_a
-ダウンロード
-
-@fragments_1005_a
-#Cheat Sheet
-
-@fragments_1006_b
-ドキュメント
-
-@fragments_1007_a
-クイックスタート
-
-@fragments_1008_a
-インストール
-
-@fragments_1009_a
-�?ュートリアル
-
-@fragments_1010_a
-特徴
-
-@fragments_1011_a
-パフォーマンス
-
-@fragments_1012_a
-#Advanced
-
-@fragments_1013_b
-#Reference
-
-@fragments_1014_a
-#SQL Grammar
-
-@fragments_1015_a
-#Functions
-
-@fragments_1016_a
-データ型
-
-@fragments_1017_a
-#Javadoc
-
-@fragments_1018_a
-#PDF (1 MB)
-
-@fragments_1019_b
-サ�?ート
-
-@fragments_1020_a
-#FAQ
-
-@fragments_1021_a
-#Error Analyzer
-
-@fragments_1022_a
-#Google Group (English)
-
-@fragments_1023_a
-#Google Group (Japanese)
-
-@fragments_1024_a
-#Google Group (Chinese)
-
-@fragments_1025_b
-#Appendix
-
-@fragments_1026_a
-#History & Roadmap
-
-@fragments_1027_a
-ライセンス
-
-@fragments_1028_a
-ビルド
-
-@fragments_1029_a
-#Links
-
-@fragments_1030_a
-#JaQu
-
-@fragments_1031_a
-#MVStore
-
-@fragments_1032_a
-#Architecture
-
-@fragments_1033_td
-
-
-@frame_1000_h1
-H2 データベース エンジン
-
-@frame_1001_p
-# Welcome to H2, the free SQL database. The main feature of H2 are:
-
-@frame_1002_li
-#It is free to use for everybody, source code is included
-
-@frame_1003_li
-#Written in Java, but also available as native executable
-
-@frame_1004_li
-#JDBC and (partial) ODBC API
-
-@frame_1005_li
-#Embedded and client/server modes
-
-@frame_1006_li
-#Clustering is supported
-
-@frame_1007_li
-#A web client is included
-
-@frame_1008_h2
-#No Javascript
-
-@frame_1009_p
-# If you are not automatically redirected to the main page, then Javascript is currently disabled or your browser does not support Javascript. Some features (for example the integrated search) require Javascript.
-
-@frame_1010_p
-# Please enable Javascript, or go ahead without it: H2 Database Engine
-
-@history_1000_h1
-歴�?��?�ロードマップ
-
-@history_1001_a
-# Change Log
-
-@history_1002_a
-# Roadmap
-
-@history_1003_a
-# History of this Database Engine
-
-@history_1004_a
-# Why Java
-
-@history_1005_a
-# Supporters
-
-@history_1006_h2
-変更履歴
-
-@history_1007_p
-# The up-to-date change log is available at http://www.h2database.com/html/changelog.html
-
-@history_1008_h2
-ロードマップ
-
-@history_1009_p
-# The current roadmap is available at http://www.h2database.com/html/roadmap.html
-
-@history_1010_h2
-�?��?�データベースエンジン�?�歴�?�
-
-@history_1011_p
-# The development of H2 was started in May 2004, but it was first published on December 14th 2005. The main author of H2, Thomas Mueller, is also the original developer of Hypersonic SQL. In 2001, he joined PointBase Inc. where he wrote PointBase Micro, a commercial Java SQL database. At that point, he had to discontinue Hypersonic SQL. The HSQLDB Group was formed to continued to work on the Hypersonic SQL codebase. The name H2 stands for Hypersonic 2, however H2 does not share code with Hypersonic SQL or HSQLDB. H2 is built from scratch.
-
-@history_1012_h2
-�?��?�Java�?��?��?�
-
-@history_1013_p
-# The main reasons to use a Java database are:
-
-@history_1014_li
-#Very simple to integrate in Java applications
-
-@history_1015_li
-#Support for many different platforms
-
-@history_1016_li
-#More secure than native applications (no buffer overflows)
-
-@history_1017_li
-#User defined functions (or triggers) run very fast
-
-@history_1018_li
-#Unicode support
-
-@history_1019_p
-# Some think Java is too slow for low level operations, but this is no longer true. Garbage collection for example is now faster than manual memory management.
-
-@history_1020_p
-# Developing Java code is faster than developing C or C++ code. When using Java, most time can be spent on improving the algorithms instead of porting the code to different platforms or doing memory management. Features such as Unicode and network libraries are already built-in. In Java, writing secure code is easier because buffer overflows can not occur. Features such as reflection can be used for randomized testing.
-
-@history_1021_p
-# Java is future proof: a lot of companies support Java. Java is now open source.
-
-@history_1022_p
-# To increase the portability and ease of use, this software depends on very few libraries. Features that are not available in open source Java implementations (such as Swing) are not used, or only used for optional features.
-
-@history_1023_h2
-支�?�者
-
-@history_1024_p
-# Many thanks for those who reported bugs, gave valuable feedback, spread the word, and translated this project. Also many thanks to the donors. To become a donor, use PayPal (at the very bottom of the main web page).
-
-@history_1025_a
-#xso; xBase Software Ontwikkeling, Netherlands
-
-@history_1026_a
-#Cognitect, USA
-
-@history_1027_a
-#Code 42 Software, Inc., Minneapolis
-
-@history_1028_li
-#Martin Wildam, Austria
-
-@history_1029_a
-#Code Lutin, France
-
-@history_1030_a
-#NetSuxxess GmbH, Germany
-
-@history_1031_a
-#Poker Copilot, Steve McLeod, Germany
-
-@history_1032_a
-#SkyCash, Poland
-
-@history_1033_a
-#Lumber-mill, Inc., Japan
-
-@history_1034_a
-#StockMarketEye, USA
-
-@history_1035_a
-#Eckenfelder GmbH & Co.KG, Germany
-
-@history_1036_li
-#Anthony Goubard, Netherlands
-
-@history_1037_li
-#Richard Hickey, USA
-
-@history_1038_li
-#Alessio Jacopo D'Adamo, Italy
-
-@history_1039_li
-#Ashwin Jayaprakash, USA
-
-@history_1040_li
-#Donald Bleyl, USA
-
-@history_1041_li
-#Frank Berger, Germany
-
-@history_1042_li
-#Florent Ramiere, France
-
-@history_1043_li
-#Jun Iyama, Japan
-
-@history_1044_li
-#Antonio Casqueiro, Portugal
-
-@history_1045_li
-#Oliver Computing LLC, USA
-
-@history_1046_li
-#Harpal Grover Consulting Inc., USA
-
-@history_1047_li
-#Elisabetta Berlini, Italy
-
-@history_1048_li
-#William Gilbert, USA
-
-@history_1049_li
-#Antonio Dieguez Rojas, Chile
-
-@history_1050_a
-#Ontology Works, USA
-
-@history_1051_li
-#Pete Haidinyak, USA
-
-@history_1052_li
-#William Osmond, USA
-
-@history_1053_li
-#Joachim Ansorg, Germany
-
-@history_1054_li
-#Oliver Soerensen, Germany
-
-@history_1055_li
-#Christos Vasilakis, Greece
-
-@history_1056_li
-#Fyodor Kupolov, Denmark
-
-@history_1057_li
-#Jakob Jenkov, Denmark
-
-@history_1058_li
-#Stéphane Chartrand, Switzerland
-
-@history_1059_li
-#Glenn Kidd, USA
-
-@history_1060_li
-#Gustav Trede, Sweden
-
-@history_1061_li
-#Joonas Pulakka, Finland
-
-@history_1062_li
-#Bjorn Darri Sigurdsson, Iceland
-
-@history_1063_li
-#Iyama Jun, Japan
-
-@history_1064_li
-#Gray Watson, USA
-
-@history_1065_li
-#Erik Dick, Germany
-
-@history_1066_li
-#Pengxiang Shao, China
-
-@history_1067_li
-#Bilingual Marketing Group, USA
-
-@history_1068_li
-#Philippe Marschall, Switzerland
-
-@history_1069_li
-#Knut Staring, Norway
-
-@history_1070_li
-#Theis Borg, Denmark
-
-@history_1071_li
-#Mark De Mendonca Duske, USA
-
-@history_1072_li
-#Joel A. Garringer, USA
-
-@history_1073_li
-#Olivier Chafik, France
-
-@history_1074_li
-#Rene Schwietzke, Germany
-
-@history_1075_li
-#Jalpesh Patadia, USA
-
-@history_1076_li
-#Takanori Kawashima, Japan
-
-@history_1077_li
-#Terrence JC Huang, China
-
-@history_1078_a
-#JiaDong Huang, Australia
-
-@history_1079_li
-#Laurent van Roy, Belgium
-
-@history_1080_li
-#Qian Chen, China
-
-@history_1081_li
-#Clinton Hyde, USA
-
-@history_1082_li
-#Kritchai Phromros, Thailand
-
-@history_1083_li
-#Alan Thompson, USA
-
-@history_1084_li
-#Ladislav Jech, Czech Republic
-
-@history_1085_li
-#Dimitrijs Fedotovs, Latvia
-
-@history_1086_li
-#Richard Manley-Reeve, United Kingdom
-
-@installation_1000_h1
-インストール
-
-@installation_1001_a
-# Requirements
-
-@installation_1002_a
-# Supported Platforms
-
-@installation_1003_a
-# Installing the Software
-
-@installation_1004_a
-# Directory Structure
-
-@installation_1005_h2
-必�?�?�件
-
-@installation_1006_p
-# To run this database, the following software stack is known to work. Other software most likely also works, but is not tested as much.
-
-@installation_1007_h3
-#Database Engine
-
-@installation_1008_li
-#Windows XP or Vista, Mac OS X, or Linux
-
-@installation_1009_li
-#Sun Java 6 or newer
-
-@installation_1010_li
-#Recommended Windows file system: NTFS (FAT32 only supports files up to 4 GB)
-
-@installation_1011_h3
-#H2 Console
-
-@installation_1012_li
-#Mozilla Firefox
-
-@installation_1013_h2
-サ�?ート�?�れ�?��?�るプラットフォーム
-
-@installation_1014_p
-# As this database is written in Java, it can run on many different platforms. It is tested with Java 6 and 7. Currently, the database is developed and tested on Windows 8 and Mac OS X using Java 6, but it also works in many other operating systems and using other Java runtime environments. All major operating systems (Windows XP, Windows Vista, Windows 7, Mac OS, Ubuntu,...) are supported.
-
-@installation_1015_h2
-ソフトウェア�?�インストール
-
-@installation_1016_p
-# To install the software, run the installer or unzip it to a directory of your choice.
-
-@installation_1017_h2
-ディレクトリ構�?
-
-@installation_1018_p
-# After installing, you should get the following directory structure:
-
-@installation_1019_th
-ディレクトリ
-
-@installation_1020_th
-コンテンツ
-
-@installation_1021_td
-bin
-
-@installation_1022_td
-JAR�?�batchファイル
-
-@installation_1023_td
-docs
-
-@installation_1024_td
-ドキュメント
-
-@installation_1025_td
-docs/html
-
-@installation_1026_td
-HTMLページ
-
-@installation_1027_td
-docs/javadoc
-
-@installation_1028_td
-Javadocファイル
-
-@installation_1029_td
-#ext
-
-@installation_1030_td
-#External dependencies (downloaded when building)
-
-@installation_1031_td
-service
-
-@installation_1032_td
-Windows Service�?��?��?�データベースを実行�?�るツール
-
-@installation_1033_td
-src
-
-@installation_1034_td
-Sourceファイル
-
-@installation_1035_td
-#src/docsrc
-
-@installation_1036_td
-#Documentation sources
-
-@installation_1037_td
-#src/installer
-
-@installation_1038_td
-#Installer, shell, and release build script
-
-@installation_1039_td
-#src/main
-
-@installation_1040_td
-#Database engine source code
-
-@installation_1041_td
-#src/test
-
-@installation_1042_td
-#Test source code
-
-@installation_1043_td
-#src/tools
-
-@installation_1044_td
-#Tools and database adapters source code
-
-@jaqu_1000_h1
-#JaQu
-
-@jaqu_1001_a
-# What is JaQu
-
-@jaqu_1002_a
-# Differences to Other Data Access Tools
-
-@jaqu_1003_a
-# Current State
-
-@jaqu_1004_a
-# Building the JaQu Library
-
-@jaqu_1005_a
-# Requirements
-
-@jaqu_1006_a
-# Example Code
-
-@jaqu_1007_a
-# Configuration
-
-@jaqu_1008_a
-# Natural Syntax
-
-@jaqu_1009_a
-# Other Ideas
-
-@jaqu_1010_a
-# Similar Projects
-
-@jaqu_1011_h2
-#What is JaQu
-
-@jaqu_1012_p
-# Note: This project is currently in maintenance mode. A friendly fork of JaQu is available under the name iciql.
-
-@jaqu_1013_p
-# JaQu stands for Java Query and allows to access databases using pure Java. JaQu provides a fluent interface (or internal DSL). JaQu is something like LINQ for Java (LINQ stands for "language integrated query" and is a Microsoft .NET technology). The following JaQu code:
-
-@jaqu_1014_p
-# stands for the SQL statement:
-
-@jaqu_1015_h2
-#Differences to Other Data Access Tools
-
-@jaqu_1016_p
-# Unlike SQL, JaQu can be easily integrated in Java applications. Because JaQu is pure Java, auto-complete in the IDE is supported. Type checking is performed by the compiler. JaQu fully protects against SQL injection.
-
-@jaqu_1017_p
-# JaQu is meant as replacement for JDBC and SQL and not as much as a replacement for tools like Hibernate. With JaQu, you don't write SQL statements as strings. JaQu is much smaller and simpler than other persistence frameworks such as Hibernate, but it also does not provide all the features of those. Unlike iBatis and Hibernate, no XML or annotation based configuration is required; instead the configuration (if required at all) is done in pure Java, within the application.
-
-@jaqu_1018_p
-# JaQu does not require or contain any data caching mechanism. Like JDBC and iBatis, JaQu provides full control over when and what SQL statements are executed (but without having to write SQL statements as strings).
-
-@jaqu_1019_h3
-#Restrictions
-
-@jaqu_1020_p
-# Primitive types (eg. boolean, int, long, double
) are not supported. Use java.lang.Boolean, Integer, Long, Double
instead.
-
-@jaqu_1021_h3
-#Why in Java?
-
-@jaqu_1022_p
-# Most applications are written in Java. Mixing Java and another language (for example Scala or Groovy) in the same application is complicated: you would need to split the application and database code, and write adapter / wrapper code.
-
-@jaqu_1023_h2
-#Current State
-
-@jaqu_1024_p
-# Currently, JaQu is only tested with the H2 database. The API may change in future versions. JaQu is not part of the h2 jar file, however the source code is included in H2, under:
-
-@jaqu_1025_code
-#src/test/org/h2/test/jaqu/*
-
-@jaqu_1026_li
-# (samples and tests)
-
-@jaqu_1027_code
-#src/tools/org/h2/jaqu/*
-
-@jaqu_1028_li
-# (framework)
-
-@jaqu_1029_h2
-#Building the JaQu Library
-
-@jaqu_1030_p
-# To create the JaQu jar file, run: build jarJaqu
. This will create the file bin/h2jaqu.jar
.
-
-@jaqu_1031_h2
-必�?�?�件
-
-@jaqu_1032_p
-# JaQu requires Java 6. Annotations are not need. Currently, JaQu is only tested with the H2 database engine, however in theory it should work with any database that supports the JDBC API.
-
-@jaqu_1033_h2
-#Example Code
-
-@jaqu_1034_h2
-#Configuration
-
-@jaqu_1035_p
-# JaQu does not require any configuration when using the default field to column mapping. To define table indices, or if you want to map a class to a table with a different name, or a field to a column with another name, create a function called define
in the data class. Example:
-
-@jaqu_1036_p
-# The method define()
contains the mapping definition. It is called once when the class is used for the first time. Like annotations, the mapping is defined in the class itself. Unlike when using annotations, the compiler can check the syntax even for multi-column objects (multi-column indexes, multi-column primary keys and so on). Because the definition is written in Java, the configuration can be set at runtime, which is not possible using annotations. Unlike XML mapping configuration, the configuration is integrated in the class itself.
-
-@jaqu_1037_h2
-#Natural Syntax
-
-@jaqu_1038_p
-#The plan is to support more natural (pure Java) syntax in conditions. To do that, the condition class is de-compiled to a SQL condition. A proof of concept decompiler is included (but it doesn't fully work yet; patches are welcome). The planned syntax is:
-
-@jaqu_1039_h2
-#Other Ideas
-
-@jaqu_1040_p
-# This project has just been started, and nothing is fixed yet. Some ideas are:
-
-@jaqu_1041_li
-#Support queries on collections (instead of using a database).
-
-@jaqu_1042_li
-#Provide API level compatibility with JPA (so that JaQu can be used as an extension of JPA).
-
-@jaqu_1043_li
-#Internally use a JPA implementation (for example Hibernate) instead of SQL directly.
-
-@jaqu_1044_li
-#Use PreparedStatements and cache them.
-
-@jaqu_1045_h2
-#Similar Projects
-
-@jaqu_1046_a
-#iciql (a friendly fork of JaQu)
-
-@jaqu_1047_a
-#Cement Framework
-
-@jaqu_1048_a
-#Dreamsource ORM
-
-@jaqu_1049_a
-#Empire-db
-
-@jaqu_1050_a
-#JEQUEL: Java Embedded QUEry Language
-
-@jaqu_1051_a
-#Joist
-
-@jaqu_1052_a
-#jOOQ
-
-@jaqu_1053_a
-#JoSQL
-
-@jaqu_1054_a
-#LIQUidFORM
-
-@jaqu_1055_a
-#Quaere (Alias implementation)
-
-@jaqu_1056_a
-#Quaere
-
-@jaqu_1057_a
-#Querydsl
-
-@jaqu_1058_a
-#Squill
-
-@license_1000_h1
-ライセンス
-
-@license_1001_a
-# Summary and License FAQ
-
-@license_1002_a
-# Mozilla Public License Version 2.0
-
-@license_1003_a
-# Eclipse Public License - Version 1.0
-
-@license_1004_a
-# Export Control Classification Number (ECCN)
-
-@license_1005_h2
-#Summary and License FAQ
-
-@license_1006_p
-# H2 is dual licensed and available under the MPL 2.0 (Mozilla Public License Version 2.0) or under the EPL 1.0 (Eclipse Public License). There is a license FAQ for both the MPL and the EPL.
-
-@license_1007_li
-#You can use H2 for free.
-
-@license_1008_li
-#You can integrate it into your applications (including in commercial applications) and distribute it.
-
-@license_1009_li
-#Files containing only your code are not covered by this license (it is 'commercial friendly').
-
-@license_1010_li
-#Modifications to the H2 source code must be published.
-
-@license_1011_li
-#You don't need to provide the source code of H2 if you did not modify anything.
-
-@license_1012_li
-#If you distribute a binary that includes H2, you need to add a disclaimer of liability - see the example below.
-
-@license_1013_p
-# However, nobody is allowed to rename H2, modify it a little, and sell it as a database engine without telling the customers it is in fact H2. This happened to HSQLDB: a company called 'bungisoft' copied HSQLDB, renamed it to 'RedBase', and tried to sell it, hiding the fact that it was in fact just HSQLDB. It seems 'bungisoft' does not exist any more, but you can use the Wayback Machine and visit old web pages of http://www.bungisoft.com
.
-
-@license_1014_p
-# About porting the source code to another language (for example C# or C++): converted source code (even if done manually) stays under the same copyright and license as the original code. The copyright of the ported source code does not (automatically) go to the person who ported the code.
-
-@license_1015_p
-# If you distribute a binary that includes H2, you need to add the license and a disclaimer of liability (as you should do for your own code). You should add a disclaimer for each open source library you use. For example, add a file 3rdparty_license.txt
in the directory where the jar files are, and list all open source libraries, each one with its license and disclaimer. For H2, a simple solution is to copy the following text below. You may also include a copy of the complete license.
-
-@license_1016_h2
-#Mozilla Public License Version 2.0
-
-@license_1017_h3
-#1. Definitions
-
-@license_1018_p
-#1.1. "Contributor" means each individual or legal entity that creates, contributes to the creation of, or owns Covered Software.
-
-@license_1019_p
-#1.2. "Contributor Version" means the combination of the Contributions of others (if any) used by a Contributor and that particular Contributor's Contribution.
-
-@license_1020_p
-#1.3. "Contribution" means Covered Software of a particular Contributor.
-
-@license_1021_p
-#1.4. "Covered Software" means Source Code Form to which the initial Contributor has attached the notice in Exhibit A, the Executable Form of such Source Code Form, and Modifications of such Source Code Form, in each case including portions thereof.
-
-@license_1022_p
-#1.5. "Incompatible With Secondary Licenses" means
-
-@license_1023_p
-#a. that the initial Contributor has attached the notice described in Exhibit B to the Covered Software; or
-
-@license_1024_p
-#b. that the Covered Software was made available under the terms of version 1.1 or earlier of the License, but not also under the terms of a Secondary License.
-
-@license_1025_p
-#1.6. "Executable Form" means any form of the work other than Source Code Form.
-
-@license_1026_p
-#1.7. "Larger Work" means a work that combines Covered Software with other material, in a separate file or files, that is not Covered Software.
-
-@license_1027_p
-#1.8. "License" means this document.
-
-@license_1028_p
-#1.9. "Licensable" means having the right to grant, to the maximum extent possible, whether at the time of the initial grant or subsequently, any and all of the rights conveyed by this License.
-
-@license_1029_p
-#1.10. "Modifications" means any of the following:
-
-@license_1030_p
-#a. any file in Source Code Form that results from an addition to, deletion from, or modification of the contents of Covered Software; or
-
-@license_1031_p
-#b. any new file in Source Code Form that contains any Covered Software.
-
-@license_1032_p
-#1.11. "Patent Claims" of a Contributor means any patent claim(s), including without limitation, method, process, and apparatus claims, in any patent Licensable by such Contributor that would be infringed, but for the grant of the License, by the making, using, selling, offering for sale, having made, import, or transfer of either its Contributions or its Contributor Version.
-
-@license_1033_p
-#1.12. "Secondary License" means either the GNU General Public License, Version 2.0, the GNU Lesser General Public License, Version 2.1, the GNU Affero General Public License, Version 3.0, or any later versions of those licenses.
-
-@license_1034_p
-#1.13. "Source Code Form" means the form of the work preferred for making modifications.
-
-@license_1035_p
-#1.14. "You" (or "Your") means an individual or a legal entity exercising rights under this License. For legal entities, "You" includes any entity that controls, is controlled by, or is under common control with You. For purposes of this definition, "control" means (a) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (b) ownership of more than fifty percent (50%) of the outstanding shares or beneficial ownership of such entity.
-
-@license_1036_h3
-#2. License Grants and Conditions
-
-@license_1037_h4
-#2.1. Grants
-
-@license_1038_p
-#Each Contributor hereby grants You a world-wide, royalty-free, non-exclusive license:
-
-@license_1039_p
-#under intellectual property rights (other than patent or trademark) Licensable by such Contributor to use, reproduce, make available, modify, display, perform, distribute, and otherwise exploit its Contributions, either on an unmodified basis, with Modifications, or as part of a Larger Work; and
-
-@license_1040_p
-#under Patent Claims of such Contributor to make, use, sell, offer for sale, have made, import, and otherwise transfer either its Contributions or its Contributor Version.
-
-@license_1041_h4
-#2.2. Effective Date
-
-@license_1042_p
-#The licenses granted in Section 2.1 with respect to any Contribution become effective for each Contribution on the date the Contributor first distributes such Contribution.
-
-@license_1043_h4
-#2.3. Limitations on Grant Scope
-
-@license_1044_p
-#The licenses granted in this Section 2 are the only rights granted under this License. No additional rights or licenses will be implied from the distribution or licensing of Covered Software under this License. Notwithstanding Section 2.1(b) above, no patent license is granted by a Contributor:
-
-@license_1045_p
-#for any code that a Contributor has removed from Covered Software; or
-
-@license_1046_p
-#for infringements caused by: (i) Your and any other third party's modifications of Covered Software, or (ii) the combination of its Contributions with other software (except as part of its Contributor Version); or
-
-@license_1047_p
-#under Patent Claims infringed by Covered Software in the absence of its Contributions.
-
-@license_1048_p
-#This License does not grant any rights in the trademarks, service marks, or logos of any Contributor (except as may be necessary to comply with the notice requirements in Section 3.4).
-
-@license_1049_h4
-#2.4. Subsequent Licenses
-
-@license_1050_p
-#No Contributor makes additional grants as a result of Your choice to distribute the Covered Software under a subsequent version of this License (see Section 10.2) or under the terms of a Secondary License (if permitted under the terms of Section 3.3).
-
-@license_1051_h4
-#2.5. Representation
-
-@license_1052_p
-#Each Contributor represents that the Contributor believes its Contributions are its original creation(s) or it has sufficient rights to grant the rights to its Contributions conveyed by this License.
-
-@license_1053_h4
-#2.6. Fair Use
-
-@license_1054_p
-#This License is not intended to limit any rights You have under applicable copyright doctrines of fair use, fair dealing, or other equivalents.
-
-@license_1055_h4
-#2.7. Conditions
-
-@license_1056_p
-#Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in Section 2.1.
-
-@license_1057_h3
-#3. Responsibilities
-
-@license_1058_h4
-#3.1. Distribution of Source Form
-
-@license_1059_p
-#All distribution of Covered Software in Source Code Form, including any Modifications that You create or to which You contribute, must be under the terms of this License. You must inform recipients that the Source Code Form of the Covered Software is governed by the terms of this License, and how they can obtain a copy of this License. You may not attempt to alter or restrict the recipients' rights in the Source Code Form.
-
-@license_1060_h4
-#3.2. Distribution of Executable Form
-
-@license_1061_p
-#If You distribute Covered Software in Executable Form then:
-
-@license_1062_p
-#such Covered Software must also be made available in Source Code Form, as described in Section 3.1, and You must inform recipients of the Executable Form how they can obtain a copy of such Source Code Form by reasonable means in a timely manner, at a charge no more than the cost of distribution to the recipient; and
-
-@license_1063_p
-#You may distribute such Executable Form under the terms of this License, or sublicense it under different terms, provided that the license for the Executable Form does not attempt to limit or alter the recipients' rights in the Source Code Form under this License.
-
-@license_1064_h4
-#3.3. Distribution of a Larger Work
-
-@license_1065_p
-#You may create and distribute a Larger Work under terms of Your choice, provided that You also comply with the requirements of this License for the Covered Software. If the Larger Work is a combination of Covered Software with a work governed by one or more Secondary Licenses, and the Covered Software is not Incompatible With Secondary Licenses, this License permits You to additionally distribute such Covered Software under the terms of such Secondary License(s), so that the recipient of the Larger Work may, at their option, further distribute the Covered Software under the terms of either this License or such Secondary License(s).
-
-@license_1066_h4
-#3.4. Notices
-
-@license_1067_p
-#You may not remove or alter the substance of any license notices (including copyright notices, patent notices, disclaimers of warranty, or limitations of liability) contained within the Source Code Form of the Covered Software, except that You may alter any license notices to the extent required to remedy known factual inaccuracies.
-
-@license_1068_h4
-#3.5. Application of Additional Terms
-
-@license_1069_p
-#You may choose to offer, and to charge a fee for, warranty, support, indemnity or liability obligations to one or more recipients of Covered Software. However, You may do so only on Your own behalf, and not on behalf of any Contributor. You must make it absolutely clear that any such warranty, support, indemnity, or liability obligation is offered by You alone, and You hereby agree to indemnify every Contributor for any liability incurred by such Contributor as a result of warranty, support, indemnity or liability terms You offer. You may include additional disclaimers of warranty and limitations of liability specific to any jurisdiction.
-
-@license_1070_h3
-#4. Inability to Comply Due to Statute or Regulation
-
-@license_1071_p
-#If it is impossible for You to comply with any of the terms of this License with respect to some or all of the Covered Software due to statute, judicial order, or regulation then You must: (a) comply with the terms of this License to the maximum extent possible; and (b) describe the limitations and the code they affect. Such description must be placed in a text file included with all distributions of the Covered Software under this License. Except to the extent prohibited by statute or regulation, such description must be sufficiently detailed for a recipient of ordinary skill to be able to understand it.
-
-@license_1072_h3
-#5. Termination
-
-@license_1073_p
-#5.1. The rights granted under this License will terminate automatically if You fail to comply with any of its terms. However, if You become compliant, then the rights granted under this License from a particular Contributor are reinstated (a) provisionally, unless and until such Contributor explicitly and finally terminates Your grants, and (b) on an ongoing basis, if such Contributor fails to notify You of the non-compliance by some reasonable means prior to 60 days after You have come back into compliance. Moreover, Your grants from a particular Contributor are reinstated on an ongoing basis if such Contributor notifies You of the non-compliance by some reasonable means, this is the first time You have received notice of non-compliance with this License from such Contributor, and You become compliant prior to 30 days after Your receipt of the notice.
-
-@license_1074_p
-#5.2. If You initiate litigation against any entity by asserting a patent infringement claim (excluding declaratory judgment actions, counter-claims, and cross-claims) alleging that a Contributor Version directly or indirectly infringes any patent, then the rights granted to You by any and all Contributors for the Covered Software under Section 2.1 of this License shall terminate.
-
-@license_1075_p
-#5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user license agreements (excluding distributors and resellers) which have been validly granted by You or Your distributors under this License prior to termination shall survive termination.
-
-@license_1076_h3
-#6. Disclaimer of Warranty
-
-@license_1077_p
-#Covered Software is provided under this License on an "as is" basis, without warranty of any kind, either expressed, implied, or statutory, including, without limitation, warranties that the Covered Software is free of defects, merchantable, fit for a particular purpose or non-infringing. The entire risk as to the quality and performance of the Covered Software is with You. Should any Covered Software prove defective in any respect, You (not any Contributor) assume the cost of any necessary servicing, repair, or correction. This disclaimer of warranty constitutes an essential part of this License. No use of any Covered Software is authorized under this License except under this disclaimer.
-
-@license_1078_h3
-#7. Limitation of Liability
-
-@license_1079_p
-#Under no circumstances and under no legal theory, whether tort (including negligence), contract, or otherwise, shall any Contributor, or anyone who distributes Covered Software as permitted above, be liable to You for any direct, indirect, special, incidental, or consequential damages of any character including, without limitation, damages for lost profits, loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses, even if such party shall have been informed of the possibility of such damages. This limitation of liability shall not apply to liability for death or personal injury resulting from such party's negligence to the extent applicable law prohibits such limitation. Some jurisdictions do not allow the exclusion or limitation of incidental or consequential damages, so this exclusion and limitation may not apply to You.
-
-@license_1080_h3
-#8. Litigation
-
-@license_1081_p
-#Any litigation relating to this License may be brought only in the courts of a jurisdiction where the defendant maintains its principal place of business and such litigation shall be governed by laws of that jurisdiction, without reference to its conflict-of-law provisions. Nothing in this Section shall prevent a party's ability to bring cross-claims or counter-claims.
-
-@license_1082_h3
-#9. Miscellaneous
-
-@license_1083_p
-#This License represents the complete agreement concerning the subject matter hereof. If any provision of this License is held to be unenforceable, such provision shall be reformed only to the extent necessary to make it enforceable. Any law or regulation which provides that the language of a contract shall be construed against the drafter shall not be used to construe this License against a Contributor.
-
-@license_1084_h3
-#10. Versions of the License
-
-@license_1085_h4
-#10.1. New Versions
-
-@license_1086_p
-#Mozilla Foundation is the license steward. Except as provided in Section 10.3, no one other than the license steward has the right to modify or publish new versions of this License. Each version will be given a distinguishing version number.
-
-@license_1087_h4
-#10.2. Effect of New Versions
-
-@license_1088_p
-#You may distribute the Covered Software under the terms of the version of the License under which You originally received the Covered Software, or under the terms of any subsequent version published by the license steward.
-
-@license_1089_h4
-#10.3. Modified Versions
-
-@license_1090_p
-#If you create software not governed by this License, and you want to create a new license for such software, you may create and use a modified version of this License if you rename the license and remove any references to the name of the license steward (except to note that such modified license differs from this License).
-
-@license_1091_h4
-#10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
-
-@license_1092_p
-#If You choose to distribute Source Code Form that is Incompatible With Secondary Licenses under the terms of this version of the License, the notice described in Exhibit B of this License must be attached.
-
-@license_1093_h3
-#Exhibit A - Source Code Form License Notice
-
-@license_1094_p
-#If it is not possible or desirable to put the notice in a particular file, then You may include the notice in a location (such as a LICENSE file in a relevant directory) where a recipient would be likely to look for such a notice.
-
-@license_1095_p
-#You may add additional accurate notices of copyright ownership.
-
-@license_1096_h3
-#Exhibit B - "Incompatible With Secondary Licenses" Notice
-
-@license_1097_h2
-#Eclipse Public License - Version 1.0
-
-@license_1098_p
-# THE ACCOMPANYING PROGRAM IS PROVIDED UNDER THE TERMS OF THIS ECLIPSE PUBLIC LICENSE ("AGREEMENT"). ANY USE, REPRODUCTION OR DISTRIBUTION OF THE PROGRAM CONSTITUTES RECIPIENT'S ACCEPTANCE OF THIS AGREEMENT.
-
-@license_1099_h3
-#1. DEFINITIONS
-
-@license_1100_p
-# "Contribution" means:
-
-@license_1101_p
-# a) in the case of the initial Contributor, the initial code and documentation distributed under this Agreement, and
-
-@license_1102_p
-# b) in the case of each subsequent Contributor:
-
-@license_1103_p
-# i) changes to the Program, and
-
-@license_1104_p
-# ii) additions to the Program;
-
-@license_1105_p
-# where such changes and/or additions to the Program originate from and are distributed by that particular Contributor. A Contribution 'originates' from a Contributor if it was added to the Program by such Contributor itself or anyone acting on such Contributor's behalf. Contributions do not include additions to the Program which: (i) are separate modules of software distributed in conjunction with the Program under their own license agreement, and (ii) are not derivative works of the Program.
-
-@license_1106_p
-# "Contributor" means any person or entity that distributes the Program.
-
-@license_1107_p
-# "Licensed Patents " mean patent claims licensable by a Contributor which are necessarily infringed by the use or sale of its Contribution alone or when combined with the Program.
-
-@license_1108_p
-# "Program" means the Contributions distributed in accordance with this Agreement.
-
-@license_1109_p
-# "Recipient" means anyone who receives the Program under this Agreement, including all Contributors.
-
-@license_1110_h3
-#2. GRANT OF RIGHTS
-
-@license_1111_p
-# a) Subject to the terms of this Agreement, each Contributor hereby grants Recipient a non-exclusive, worldwide, royalty-free copyright license to reproduce, prepare derivative works of, publicly display, publicly perform, distribute and sublicense the Contribution of such Contributor, if any, and such derivative works, in source code and object code form.
-
-@license_1112_p
-# b) Subject to the terms of this Agreement, each Contributor hereby grants Recipient a non-exclusive, worldwide, royalty-free patent license under Licensed Patents to make, use, sell, offer to sell, import and otherwise transfer the Contribution of such Contributor, if any, in source code and object code form. This patent license shall apply to the combination of the Contribution and the Program if, at the time the Contribution is added by the Contributor, such addition of the Contribution causes such combination to be covered by the Licensed Patents. The patent license shall not apply to any other combinations which include the Contribution. No hardware per se is licensed hereunder.
-
-@license_1113_p
-# c) Recipient understands that although each Contributor grants the licenses to its Contributions set forth herein, no assurances are provided by any Contributor that the Program does not infringe the patent or other intellectual property rights of any other entity. Each Contributor disclaims any liability to Recipient for claims brought by any other entity based on infringement of intellectual property rights or otherwise. As a condition to exercising the rights and licenses granted hereunder, each Recipient hereby assumes sole responsibility to secure any other intellectual property rights needed, if any. For example, if a third party patent license is required to allow Recipient to distribute the Program, it is Recipient's responsibility to acquire that license before distributing the Program.
-
-@license_1114_p
-# d) Each Contributor represents that to its knowledge it has sufficient copyright rights in its Contribution, if any, to grant the copyright license set forth in this Agreement.
-
-@license_1115_h3
-#3. REQUIREMENTS
-
-@license_1116_p
-# A Contributor may choose to distribute the Program in object code form under its own license agreement, provided that:
-
-@license_1117_p
-# a) it complies with the terms and conditions of this Agreement; and
-
-@license_1118_p
-# b) its license agreement:
-
-@license_1119_p
-# i) effectively disclaims on behalf of all Contributors all warranties and conditions, express and implied, including warranties or conditions of title and non-infringement, and implied warranties or conditions of merchantability and fitness for a particular purpose;
-
-@license_1120_p
-# ii) effectively excludes on behalf of all Contributors all liability for damages, including direct, indirect, special, incidental and consequential damages, such as lost profits;
-
-@license_1121_p
-# iii) states that any provisions which differ from this Agreement are offered by that Contributor alone and not by any other party; and
-
-@license_1122_p
-# iv) states that source code for the Program is available from such Contributor, and informs licensees how to obtain it in a reasonable manner on or through a medium customarily used for software exchange.
-
-@license_1123_p
-# When the Program is made available in source code form:
-
-@license_1124_p
-# a) it must be made available under this Agreement; and
-
-@license_1125_p
-# b) a copy of this Agreement must be included with each copy of the Program.
-
-@license_1126_p
-# Contributors may not remove or alter any copyright notices contained within the Program.
-
-@license_1127_p
-# Each Contributor must identify itself as the originator of its Contribution, if any, in a manner that reasonably allows subsequent Recipients to identify the originator of the Contribution.
-
-@license_1128_h3
-#4. COMMERCIAL DISTRIBUTION
-
-@license_1129_p
-# Commercial distributors of software may accept certain responsibilities with respect to end users, business partners and the like. While this license is intended to facilitate the commercial use of the Program, the Contributor who includes the Program in a commercial product offering should do so in a manner which does not create potential liability for other Contributors. Therefore, if a Contributor includes the Program in a commercial product offering, such Contributor ("Commercial Contributor") hereby agrees to defend and indemnify every other Contributor ("Indemnified Contributor") against any losses, damages and costs (collectively "Losses") arising from claims, lawsuits and other legal actions brought by a third party against the Indemnified Contributor to the extent caused by the acts or omissions of such Commercial Contributor in connection with its distribution of the Program in a commercial product offering. The obligations in this section do not apply to any claims or Losses relating to any actual or alleged intellectual property infringement. In order to qualify, an Indemnified Contributor must: a) promptly notify the Commercial Contributor in writing of such claim, and b) allow the Commercial Contributor to control, and cooperate with the Commercial Contributor in, the defense and any related settlement negotiations. The Indemnified Contributor may participate in any such claim at its own expense.
-
-@license_1130_p
-# For example, a Contributor might include the Program in a commercial product offering, Product X. That Contributor is then a Commercial Contributor. If that Commercial Contributor then makes performance claims, or offers warranties related to Product X, those performance claims and warranties are such Commercial Contributor's responsibility alone. Under this section, the Commercial Contributor would have to defend claims against the other Contributors related to those performance claims and warranties, and if a court requires any other Contributor to pay any damages as a result, the Commercial Contributor must pay those damages.
-
-@license_1131_h3
-#5. NO WARRANTY
-
-@license_1132_p
-# EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is solely responsible for determining the appropriateness of using and distributing the Program and assumes all risks associated with its exercise of rights under this Agreement, including but not limited to the risks and costs of program errors, compliance with applicable laws, damage to or loss of data, programs or equipment, and unavailability or interruption of operations.
-
-@license_1133_h3
-#6. DISCLAIMER OF LIABILITY
-
-@license_1134_p
-# EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
-
-@license_1135_h3
-#7. GENERAL
-
-@license_1136_p
-# If any provision of this Agreement is invalid or unenforceable under applicable law, it shall not affect the validity or enforceability of the remainder of the terms of this Agreement, and without further action by the parties hereto, such provision shall be reformed to the minimum extent necessary to make such provision valid and enforceable.
-
-@license_1137_p
-# If Recipient institutes patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Program itself (excluding combinations of the Program with other software or hardware) infringes such Recipient's patent(s), then such Recipient's rights granted under Section 2(b) shall terminate as of the date such litigation is filed.
-
-@license_1138_p
-# All Recipient's rights under this Agreement shall terminate if it fails to comply with any of the material terms or conditions of this Agreement and does not cure such failure in a reasonable period of time after becoming aware of such noncompliance. If all Recipient's rights under this Agreement terminate, Recipient agrees to cease use and distribution of the Program as soon as reasonably practicable. However, Recipient's obligations under this Agreement and any licenses granted by Recipient relating to the Program shall continue and survive.
-
-@license_1139_p
-# Everyone is permitted to copy and distribute copies of this Agreement, but in order to avoid inconsistency the Agreement is copyrighted and may only be modified in the following manner. The Agreement Steward reserves the right to publish new versions (including revisions) of this Agreement from time to time. No one other than the Agreement Steward has the right to modify this Agreement. The Eclipse Foundation is the initial Agreement Steward. The Eclipse Foundation may assign the responsibility to serve as the Agreement Steward to a suitable separate entity. Each new version of the Agreement will be given a distinguishing version number. The Program (including Contributions) may always be distributed subject to the version of the Agreement under which it was received. In addition, after a new version of the Agreement is published, Contributor may elect to distribute the Program (including its Contributions) under the new version. Except as expressly stated in Sections 2(a) and 2(b) above, Recipient receives no rights or licenses to the intellectual property of any Contributor under this Agreement, whether expressly, by implication, estoppel or otherwise. All rights in the Program not expressly granted under this Agreement are reserved.
-
-@license_1140_p
-# This Agreement is governed by the laws of the State of New York and the intellectual property laws of the United States of America. No party to this Agreement will bring a legal action under this Agreement more than one year after the cause of action arose. Each party waives its rights to a jury trial in any resulting litigation.
-
-@license_1141_h2
-#Export Control Classification Number (ECCN)
-
-@license_1142_p
-# As far as we know, the U.S. Export Control Classification Number (ECCN) for this software is 5D002
. However, for legal reasons, we can make no warranty that this information is correct. For details, see also the Apache Software Foundation Export Classifications page.
-
-@links_1000_h1
-#Links
-
-@links_1001_p
-# If you want to add a link, please send it to the support email address or post it to the group.
-
-@links_1002_a
-# Commercial Support
-
-@links_1003_a
-# Quotes
-
-@links_1004_a
-# Books
-
-@links_1005_a
-# Extensions
-
-@links_1006_a
-# Blog Articles, Videos
-
-@links_1007_a
-# Database Frontends / Tools
-
-@links_1008_a
-# Products and Projects
-
-@links_1009_h2
-#Commercial Support
-
-@links_1010_a
-#Commercial support for H2 is available
-
-@links_1011_p
-# from Steve McLeod (steve dot mcleod at gmail dot com). Please note he is not one of the main developers of H2. He describes himself as follows:
-
-@links_1012_li
-#I'm a long time user of H2, routinely working with H2 databases several gigabytes in size.
-
-@links_1013_li
-#I'm the creator of popular commercial desktop software that uses H2.
-
-@links_1014_li
-#I'm a certified Java developer (SCJP).
-
-@links_1015_li
-#I have a decade and more of IT consulting experience with large and small clients in Australia, the UK, and Germany.
-
-@links_1016_li
-#I'm based in Germany, and willing to travel within Europe. I can work remotely with teams in the USA and other locations."
-
-@links_1017_h2
-#Quotes
-
-@links_1018_a
-# Quote
-
-@links_1019_p
-#: "This is by far the easiest and fastest database that I have ever used. Originally the web application that I am working on is using SQL server. But, in less than 15 minutes I had H2 up and working with little recoding of the SQL. Thanks..... "
-
-@links_1020_h2
-#Books
-
-@links_1021_a
-# Seam In Action
-
-@links_1022_h2
-#Extensions
-
-@links_1023_a
-# Grails H2 Database Plugin
-
-@links_1024_a
-# h2osgi: OSGi for the H2 Database
-
-@links_1025_a
-# H2Sharp: ADO.NET interface for the H2 database engine
-
-@links_1026_a
-# A spatial extension of the H2 database.
-
-@links_1027_h2
-#Blog Articles, Videos
-
-@links_1028_a
-# Youtube: Minecraft 1.7.3 / How to install Bukkit Server with xAuth and H2
-
-@links_1029_a
-# Analyzing CSVs with H2 in under 10 minutes (2009-12-07)
-
-@links_1030_a
-# Efficient sorting and iteration on large databases (2009-06-15)
-
-@links_1031_a
-# Porting Flexive to the H2 Database (2008-12-05)
-
-@links_1032_a
-# H2 Database with GlassFish (2008-11-24)
-
-@links_1033_a
-# H2 Database - Performance Tracing (2008-04-30)
-
-@links_1034_a
-# Open Source Databases Comparison (2007-09-11)
-
-@links_1035_a
-# The Codist: The Open Source Frameworks I Use (2007-07-23)
-
-@links_1036_a
-# The Codist: SQL Injections: How Not To Get Stuck (2007-05-08)
-
-@links_1037_a
-# David Coldrick's Weblog: New Version of H2 Database Released (2007-01-06)
-
-@links_1038_a
-# The Codist: Write Your Own Database, Again (2006-11-13)
-
-@links_1039_h2
-#Project Pages
-
-@links_1040_a
-# Ohloh
-
-@links_1041_a
-# Freshmeat Project Page
-
-@links_1042_a
-# Wikipedia
-
-@links_1043_a
-# Java Source Net
-
-@links_1044_a
-# Linux Package Manager
-
-@links_1045_h2
-#Database Frontends / Tools
-
-@links_1046_a
-# Dataflyer
-
-@links_1047_p
-# A tool to browse databases and export data.
-
-@links_1048_a
-# DB Solo
-
-@links_1049_p
-# SQL query tool.
-
-@links_1050_a
-# DbVisualizer
-
-@links_1051_p
-# Database tool.
-
-@links_1052_a
-# Execute Query
-
-@links_1053_p
-# Database utility written in Java.
-
-@links_1054_a
-# Flyway
-
-@links_1055_p
-# The agile database migration framework for Java.
-
-@links_1056_a
-# [fleXive]
-
-@links_1057_p
-# JavaEE 5 open source framework for the development of complex and evolving (web-)applications.
-
-@links_1058_a
-# JDBC Console
-
-@links_1059_p
-# This small webapp gives an ability to execute SQL against datasources bound in container's JNDI. Based on H2 Console.
-
-@links_1060_a
-# HenPlus
-
-@links_1061_p
-# HenPlus is a SQL shell written in Java.
-
-@links_1062_a
-# JDBC lint
-
-@links_1063_p
-# Helps write correct and efficient code when using the JDBC API.
-
-@links_1064_a
-# OpenOffice
-
-@links_1065_p
-# Base is OpenOffice.org's database application. It provides access to relational data sources.
-
-@links_1066_a
-# RazorSQL
-
-@links_1067_p
-# An SQL query tool, database browser, SQL editor, and database administration tool.
-
-@links_1068_a
-# SQL Developer
-
-@links_1069_p
-# Universal Database Frontend.
-
-@links_1070_a
-# SQL Workbench/J
-
-@links_1071_p
-# Free DBMS-independent SQL tool.
-
-@links_1072_a
-# SQuirreL SQL Client
-
-@links_1073_p
-# Graphical tool to view the structure of a database, browse the data, issue SQL commands etc.
-
-@links_1074_a
-# SQuirreL DB Copy Plugin
-
-@links_1075_p
-# Tool to copy data from one database to another.
-
-@links_1076_h2
-#Products and Projects
-
-@links_1077_a
-# AccuProcess
-
-@links_1078_p
-# Visual business process modeling and simulation software for business users.
-
-@links_1079_a
-# Adeptia BPM
-
-@links_1080_p
-# A Business Process Management (BPM) suite to quickly and easily automate business processes and workflows.
-
-@links_1081_a
-# Adeptia Integration
-
-@links_1082_p
-# Process-centric, services-based application integration suite.
-
-@links_1083_a
-# Aejaks
-
-@links_1084_p
-# A server-side scripting environment to build AJAX enabled web applications.
-
-@links_1085_a
-# Axiom Stack
-
-@links_1086_p
-# A web framework that let's you write dynamic web applications with Zen-like simplicity.
-
-@links_1087_a
-# Apache Cayenne
-
-@links_1088_p
-# Open source persistence framework providing object-relational mapping (ORM) and remoting services.
-
-@links_1089_a
-# Apache Jackrabbit
-
-@links_1090_p
-# Open source implementation of the Java Content Repository API (JCR).
-
-@links_1091_a
-# Apache OpenJPA
-
-@links_1092_p
-# Open source implementation of the Java Persistence API (JPA).
-
-@links_1093_a
-# AppFuse
-
-@links_1094_p
-# Helps building web applications.
-
-@links_1095_a
-# BGBlitz
-
-@links_1096_p
-# The Swiss army knife of Backgammon.
-
-@links_1097_a
-# Bonita
-
-@links_1098_p
-# Open source workflow solution for handing long-running, user-oriented processes providing out of the box workflow and business process management features.
-
-@links_1099_a
-# Bookmarks Portlet
-
-@links_1100_p
-# JSR 168 compliant bookmarks management portlet application.
-
-@links_1101_a
-# Claros inTouch
-
-@links_1102_p
-# Ajax communication suite with mail, addresses, notes, IM, and rss reader.
-
-@links_1103_a
-# CrashPlan PRO Server
-
-@links_1104_p
-# Easy and cross platform backup solution for business and service providers.
-
-@links_1105_a
-# DataNucleus
-
-@links_1106_p
-# Java persistent objects.
-
-@links_1107_a
-# DbUnit
-
-@links_1108_p
-# A JUnit extension (also usable with Ant) targeted for database-driven projects.
-
-@links_1109_a
-# DiffKit
-
-@links_1110_p
-# DiffKit is a tool for comparing two tables of data, field-by-field. DiffKit is like the Unix diff utility, but for tables instead of lines of text.
-
-@links_1111_a
-# Dinamica Framework
-
-@links_1112_p
-# Ajax/J2EE framework for RAD development (mainly oriented toward hispanic markets).
-
-@links_1113_a
-# District Health Information Software 2 (DHIS)
-
-@links_1114_p
-# The DHIS 2 is a tool for collection, validation, analysis, and presentation of aggregate statistical data, tailored (but not limited) to integrated health information management activities.
-
-@links_1115_a
-# Ebean ORM Persistence Layer
-
-@links_1116_p
-# Open source Java Object Relational Mapping tool.
-
-@links_1117_a
-# Eclipse CDO
-
-@links_1118_p
-# The CDO (Connected Data Objects) Model Repository is a distributed shared model framework for EMF models, and a fast server-based O/R mapping solution.
-
-@links_1119_a
-# Fabric3
-
-@links_1120_p
-# Fabric3 is a project implementing a federated service network based on the Service Component Architecture specification (http://www.osoa.org).
-
-@links_1121_a
-# FIT4Data
-
-@links_1122_p
-# A testing framework for data management applications built on the Java implementation of FIT.
-
-@links_1123_a
-# Flux
-
-@links_1124_p
-# Java job scheduler, file transfer, workflow, and BPM.
-
-@links_1125_a
-# GeoServer
-
-@links_1126_p
-# GeoServer is a Java-based software server that allows users to view and edit geospatial data. Using open standards set forth by the Open Geospatial Consortium (OGC), GeoServer allows for great flexibility in map creation and data sharing.
-
-@links_1127_a
-# GBIF Integrated Publishing Toolkit (IPT)
-
-@links_1128_p
-# The GBIF IPT is an open source, Java based web application that connects and serves three types of biodiversity data: taxon primary occurrence data, taxon checklists and general resource metadata.
-
-@links_1129_a
-# GNU Gluco Control
-
-@links_1130_p
-# Helps you to manage your diabetes.
-
-@links_1131_a
-# Golden T Studios
-
-@links_1132_p
-# Fun-to-play games with a simple interface.
-
-@links_1133_a
-# GridGain
-
-@links_1134_p
-# GridGain is easy to use Cloud Application Platform that enables development of highly scalable distributed Java and Scala applications that auto-scale on any grid or cloud infrastructure.
-
-@links_1135_a
-# Group Session
-
-@links_1136_p
-# Open source web groupware.
-
-@links_1137_a
-# HA-JDBC
-
-@links_1138_p
-# High-Availability JDBC: A JDBC proxy that provides light-weight, transparent, fault tolerant clustering capability to any underlying JDBC driver.
-
-@links_1139_a
-# Hibernate
-
-@links_1140_p
-# Relational persistence for idiomatic Java (O-R mapping tool).
-
-@links_1141_a
-# Hibicius
-
-@links_1142_p
-# Online Banking Client for the HBCI protocol.
-
-@links_1143_a
-# ImageMapper
-
-@links_1144_p
-# ImageMapper frees users from having to use file browsers to view their images. They get fast access to images and easy cataloguing of them via a user friendly interface.
-
-@links_1145_a
-# JAMWiki
-
-@links_1146_p
-# Java-based Wiki engine.
-
-@links_1147_a
-# Jaspa
-
-@links_1148_p
-# Java Spatial. Jaspa potentially brings around 200 spatial functions.
-
-@links_1149_a
-# Java Simon
-
-@links_1150_p
-# Simple Monitoring API.
-
-@links_1151_a
-# JBoss jBPM
-
-@links_1152_p
-# A platform for executable process languages ranging from business process management (BPM) over workflow to service orchestration.
-
-@links_1153_a
-# JBoss Jopr
-
-@links_1154_p
-# An enterprise management solution for JBoss middleware projects and other application technologies.
-
-@links_1155_a
-# JGeocoder
-
-@links_1156_p
-# Free Java geocoder. Geocoding is the process of estimating a latitude and longitude for a given location.
-
-@links_1157_a
-# JGrass
-
-@links_1158_p
-# Java Geographic Resources Analysis Support System. Free, multi platform, open source GIS based on the GIS framework of uDig.
-
-@links_1159_a
-# Jena
-
-@links_1160_p
-# Java framework for building Semantic Web applications.
-
-@links_1161_a
-# JMatter
-
-@links_1162_p
-# Framework for constructing workgroup business applications based on the Naked Objects Architectural Pattern.
-
-@links_1163_a
-# jOOQ (Java Object Oriented Querying)
-
-@links_1164_p
-# jOOQ is a fluent API for typesafe SQL query construction and execution
-
-@links_1165_a
-# Liftweb
-
-@links_1166_p
-# A Scala-based, secure, developer friendly web framework.
-
-@links_1167_a
-# LiquiBase
-
-@links_1168_p
-# A tool to manage database changes and refactorings.
-
-@links_1169_a
-# Luntbuild
-
-@links_1170_p
-# Build automation and management tool.
-
-@links_1171_a
-# localdb
-
-@links_1172_p
-# A tool that locates the full file path of the folder containing the database files.
-
-@links_1173_a
-# Magnolia
-
-@links_1174_p
-# Microarray Data Management and Export System for PFGRC (Pathogen Functional Genomics Resource Center) Microarrays.
-
-@links_1175_a
-# MiniConnectionPoolManager
-
-@links_1176_p
-# A lightweight standalone JDBC connection pool manager.
-
-@links_1177_a
-# Mr. Persister
-
-@links_1178_p
-# Simple, small and fast object relational mapping.
-
-@links_1179_a
-# Myna Application Server
-
-@links_1180_p
-# Java web app that provides dynamic web content and Java libraries access from JavaScript.
-
-@links_1181_a
-# MyTunesRss
-
-@links_1182_p
-# MyTunesRSS lets you listen to your music wherever you are.
-
-@links_1183_a
-# NCGC CurveFit
-
-@links_1184_p
-# From: NIH Chemical Genomics Center, National Institutes of Health, USA. An open source application in the life sciences research field. This application handles chemical structures and biological responses of thousands of compounds with the potential to handle million+ compounds. It utilizes an embedded H2 database to enable flexible query/retrieval of all data including advanced chemical substructure and similarity searching. The application highlights an automated curve fitting and classification algorithm that outperforms commercial packages in the field. Commercial alternatives are typically small desktop software that handle a few dose response curves at a time. A couple of commercial packages that do handle several thousand curves are very expensive tools (>60k USD) that require manual curation of analysis by the user; require a license to Oracle; lack advanced query/retrieval; and the ability to handle chemical structures.
-
-@links_1185_a
-# Nuxeo
-
-@links_1186_p
-# Standards-based, open source platform for building ECM applications.
-
-@links_1187_a
-# nWire
-
-@links_1188_p
-# Eclipse plug-in which expedites Java development. It's main purpose is to help developers find code quicker and easily understand how it relates to the rest of the application, thus, understand the application structure.
-
-@links_1189_a
-# Ontology Works
-
-@links_1190_p
-# This company provides semantic technologies including deductive information repositories (the Ontology Works Knowledge Servers), semantic information fusion and semantic federation of legacy databases, ontology-based domain modeling, and management of the distributed enterprise.
-
-@links_1191_a
-# Ontoprise OntoBroker
-
-@links_1192_p
-# SemanticWeb-Middleware. It supports all W3C Semantic Web recommendations: OWL, RDF, RDFS, SPARQL, and F-Logic.
-
-@links_1193_a
-# Open Anzo
-
-@links_1194_p
-# Semantic Application Server.
-
-@links_1195_a
-# OpenGroove
-
-@links_1196_p
-# OpenGroove is a groupware program that allows users to synchronize data.
-
-@links_1197_a
-# OpenSocial Development Environment (OSDE)
-
-@links_1198_p
-# Development tool for OpenSocial application.
-
-@links_1199_a
-# Orion
-
-@links_1200_p
-# J2EE Application Server.
-
-@links_1201_a
-# P5H2
-
-@links_1202_p
-# A library for the Processing programming language and environment.
-
-@links_1203_a
-# Phase-6
-
-@links_1204_p
-# A computer based learning software.
-
-@links_1205_a
-# Pickle
-
-@links_1206_p
-# Pickle is a Java library containing classes for persistence, concurrency, and logging.
-
-@links_1207_a
-# Piman
-
-@links_1208_p
-# Water treatment projects data management.
-
-@links_1209_a
-# PolePosition
-
-@links_1210_p
-# Open source database benchmark.
-
-@links_1211_a
-# Poormans
-
-@links_1212_p
-# Very basic CMS running as a SWT application and generating static html pages.
-
-@links_1213_a
-# Railo
-
-@links_1214_p
-# Railo is an alternative engine for the Cold Fusion Markup Language, that compiles code programmed in CFML into Java bytecode and executes it on a servlet engine.
-
-@links_1215_a
-# Razuna
-
-@links_1216_p
-# Open source Digital Asset Management System with integrated Web Content Management.
-
-@links_1217_a
-# RIFE
-
-@links_1218_p
-# A full-stack web application framework with tools and APIs to implement most common web features.
-
-@links_1219_a
-# Sava
-
-@links_1220_p
-# Open-source web-based content management system.
-
-@links_1221_a
-# Scriptella
-
-@links_1222_p
-# ETL (Extract-Transform-Load) and script execution tool.
-
-@links_1223_a
-# Sesar
-
-@links_1224_p
-# Dependency Injection Container with Aspect Oriented Programming.
-
-@links_1225_a
-# SemmleCode
-
-@links_1226_p
-# Eclipse plugin to help you improve software quality.
-
-@links_1227_a
-# SeQuaLite
-
-@links_1228_p
-# A free, light-weight, java data access framework.
-
-@links_1229_a
-# ShapeLogic
-
-@links_1230_p
-# Toolkit for declarative programming, image processing and computer vision.
-
-@links_1231_a
-# Shellbook
-
-@links_1232_p
-# Desktop publishing application.
-
-@links_1233_a
-# Signsoft intelliBO
-
-@links_1234_p
-# Persistence middleware supporting the JDO specification.
-
-@links_1235_a
-# SimpleORM
-
-@links_1236_p
-# Simple Java Object Relational Mapping.
-
-@links_1237_a
-# SymmetricDS
-
-@links_1238_p
-# A web-enabled, database independent, data synchronization/replication software.
-
-@links_1239_a
-# SmartFoxServer
-
-@links_1240_p
-# Platform for developing multiuser applications and games with Macromedia Flash.
-
-@links_1241_a
-# Social Bookmarks Friend Finder
-
-@links_1242_p
-# A GUI application that allows you to find users with similar bookmarks to the user specified (for delicious.com).
-
-@links_1243_a
-# sormula
-
-@links_1244_p
-# Simple object relational mapping.
-
-@links_1245_a
-# Springfuse
-
-@links_1246_p
-# Code generation For Spring, Spring MVC & Hibernate.
-
-@links_1247_a
-# SQLOrm
-
-@links_1248_p
-# Java Object Relation Mapping.
-
-@links_1249_a
-# StelsCSV and StelsXML
-
-@links_1250_p
-# StelsCSV is a CSV JDBC type 4 driver that allows to perform SQL queries and other JDBC operations on text files. StelsXML is a XML JDBC type 4 driver that allows to perform SQL queries and other JDBC operations on XML files. Both use H2 as the SQL engine.
-
-@links_1251_a
-# StorYBook
-
-@links_1252_p
-# A summary-based tool for novelist and script writers. It helps to keep the overview over the various traces a story has.
-
-@links_1253_a
-# StreamCruncher
-
-@links_1254_p
-# Event (stream) processing kernel.
-
-@links_1255_a
-# SUSE Manager, part of Linux Enterprise Server 11
-
-@links_1256_p
-# The SUSE Manager eases the burden of compliance with regulatory requirements and corporate policies.
-
-@links_1257_a
-# Tune Backup
-
-@links_1258_p
-# Easy-to-use backup solution for your iTunes library.
-
-@links_1259_a
-# weblica
-
-@links_1260_p
-# Desktop CMS.
-
-@links_1261_a
-# Web of Web
-
-@links_1262_p
-# Collaborative and realtime interactive media platform for the web.
-
-@links_1263_a
-# Werkzeugkasten
-
-@links_1264_p
-# Minimum Java Toolset.
-
-@links_1265_a
-# VPDA
-
-@links_1266_p
-# View providers driven applications is a Java based application framework for building applications composed from server components - view providers.
-
-@links_1267_a
-# Volunteer database
-
-@links_1268_p
-# A database front end to register volunteers, partnership and donation for a Non Profit organization.
-
-@mainWeb_1000_h1
-H2 データベース エンジン
-
-@mainWeb_1001_p
-# Welcome to H2, the Java SQL database. The main features of H2 are:
-
-@mainWeb_1002_li
-#Very fast, open source, JDBC API
-
-@mainWeb_1003_li
-#Embedded and server modes; in-memory databases
-
-@mainWeb_1004_li
-#Browser based Console application
-
-@mainWeb_1005_li
-#Small footprint: around 1.5 MB jar file size
-
-@mainWeb_1006_h2
-ダウンロード
-
-@mainWeb_1007_td
-# Version 1.4.187 (2015-04-10), Beta
-
-@mainWeb_1008_a
-#Windows Installer (5 MB)
-
-@mainWeb_1009_a
-#All Platforms (zip, 8 MB)
-
-@mainWeb_1010_a
-#All Downloads
-
-@mainWeb_1011_td
-
-
-@mainWeb_1012_h2
-サ�?ート
-
-@mainWeb_1013_a
-#Stack Overflow (tag H2)
-
-@mainWeb_1014_a
-#Google Group English
-
-@mainWeb_1015_p
-#, Japanese
-
-@mainWeb_1016_p
-# For non-technical issues, use:
-
-@mainWeb_1017_h2
-特徴
-
-@mainWeb_1018_th
-H2
-
-@mainWeb_1019_a
-Derby
-
-@mainWeb_1020_a
-HSQLDB
-
-@mainWeb_1021_a
-MySQL
-
-@mainWeb_1022_a
-PostgreSQL
-
-@mainWeb_1023_td
-Pure Java
-
-@mainWeb_1024_td
-対応
-
-@mainWeb_1025_td
-対応
-
-@mainWeb_1026_td
-対応
-
-@mainWeb_1027_td
-�?�対応
-
-@mainWeb_1028_td
-�?�対応
-
-@mainWeb_1029_td
-#Memory Mode
-
-@mainWeb_1030_td
-対応
-
-@mainWeb_1031_td
-対応
-
-@mainWeb_1032_td
-対応
-
-@mainWeb_1033_td
-�?�対応
-
-@mainWeb_1034_td
-�?�対応
-
-@mainWeb_1035_td
-暗�?�化データベース
-
-@mainWeb_1036_td
-対応
-
-@mainWeb_1037_td
-対応
-
-@mainWeb_1038_td
-対応
-
-@mainWeb_1039_td
-�?�対応
-
-@mainWeb_1040_td
-�?�対応
-
-@mainWeb_1041_td
-ODBCドライ�?
-
-@mainWeb_1042_td
-対応
-
-@mainWeb_1043_td
-�?�対応
-
-@mainWeb_1044_td
-�?�対応
-
-@mainWeb_1045_td
-対応
-
-@mainWeb_1046_td
-対応
-
-@mainWeb_1047_td
-フルテキストサー�?
-
-@mainWeb_1048_td
-対応
-
-@mainWeb_1049_td
-�?�対応
-
-@mainWeb_1050_td
-�?�対応
-
-@mainWeb_1051_td
-対応
-
-@mainWeb_1052_td
-対応
-
-@mainWeb_1053_td
-#Multi Version Concurrency
-
-@mainWeb_1054_td
-対応
-
-@mainWeb_1055_td
-�?�対応
-
-@mainWeb_1056_td
-対応
-
-@mainWeb_1057_td
-対応
-
-@mainWeb_1058_td
-対応
-
-@mainWeb_1059_td
-フットプリント (jar/dll size)
-
-@mainWeb_1060_td
-#~1 MB
-
-@mainWeb_1061_td
-#~2 MB
-
-@mainWeb_1062_td
-#~1 MB
-
-@mainWeb_1063_td
-#~4 MB
-
-@mainWeb_1064_td
-#~6 MB
-
-@mainWeb_1065_p
-# See also the detailed comparison.
-
-@mainWeb_1066_h2
-ニュース
-
-@mainWeb_1067_b
-ニュースフィード:
-
-@mainWeb_1068_a
-#Full text (Atom)
-
-@mainWeb_1069_p
-# or Header only (RSS).
-
-@mainWeb_1070_b
-Email ニュースレター:
-
-@mainWeb_1071_p
-# Subscribe to H2 Database News (Google account required) to get informed about new releases. Your email address is only used in this context.
-
-@mainWeb_1072_td
-
-
-@mainWeb_1073_h2
-寄稿�?�る
-
-@mainWeb_1074_p
-# You can contribute to the development of H2 by sending feedback and bug reports, or translate the H2 Console application (for details, start the H2 Console and select Options / Translate). To donate money, click on the PayPal button below. You will be listed as a supporter:
-
-@main_1000_h1
-H2 データベース エンジン
-
-@main_1001_p
-# Welcome to H2, the free Java SQL database engine.
-
-@main_1002_a
-クイックスタート
-
-@main_1003_p
-# Get a fast overview.
-
-@main_1004_a
-�?ュートリアル
-
-@main_1005_p
-# Go through the samples.
-
-@main_1006_a
-特徴
-
-@main_1007_p
-# See what this database can do and how to use these features.
-
-@mvstore_1000_h1
-#MVStore
-
-@mvstore_1001_a
-# Overview
-
-@mvstore_1002_a
-# Example Code
-
-@mvstore_1003_a
-# Store Builder
-
-@mvstore_1004_a
-# R-Tree
-
-@mvstore_1005_a
-# Features
-
-@mvstore_1006_a
-#- Maps
-
-@mvstore_1007_a
-#- Versions
-
-@mvstore_1008_a
-#- Transactions
-
-@mvstore_1009_a
-#- In-Memory Performance and Usage
-
-@mvstore_1010_a
-#- Pluggable Data Types
-
-@mvstore_1011_a
-#- BLOB Support
-
-@mvstore_1012_a
-#- R-Tree and Pluggable Map Implementations
-
-@mvstore_1013_a
-#- Concurrent Operations and Caching
-
-@mvstore_1014_a
-#- Log Structured Storage
-
-@mvstore_1015_a
-#- Off-Heap and Pluggable Storage
-
-@mvstore_1016_a
-#- File System Abstraction, File Locking and Online Backup
-
-@mvstore_1017_a
-#- Encrypted Files
-
-@mvstore_1018_a
-#- Tools
-
-@mvstore_1019_a
-#- Exception Handling
-
-@mvstore_1020_a
-#- Storage Engine for H2
-
-@mvstore_1021_a
-# File Format
-
-@mvstore_1022_a
-# Similar Projects and Differences to Other Storage Engines
-
-@mvstore_1023_a
-# Current State
-
-@mvstore_1024_a
-# Requirements
-
-@mvstore_1025_h2
-#Overview
-
-@mvstore_1026_p
-# The MVStore is a persistent, log structured key-value store. It is planned to be the next storage subsystem of H2, but it can also be used directly within an application, without using JDBC or SQL.
-
-@mvstore_1027_li
-#MVStore stands for "multi-version store".
-
-@mvstore_1028_li
-#Each store contains a number of maps that can be accessed using the java.util.Map
interface.
-
-@mvstore_1029_li
-#Both file-based persistence and in-memory operation are supported.
-
-@mvstore_1030_li
-#It is intended to be fast, simple to use, and small.
-
-@mvstore_1031_li
-#Concurrent read and write operations are supported.
-
-@mvstore_1032_li
-#Transactions are supported (including concurrent transactions and 2-phase commit).
-
-@mvstore_1033_li
-#The tool is very modular. It supports pluggable data types and serialization, pluggable storage (to a file, to off-heap memory), pluggable map implementations (B-tree, R-tree, concurrent B-tree currently), BLOB storage, and a file system abstraction to support encrypted files and zip files.
-
-@mvstore_1034_h2
-#Example Code
-
-@mvstore_1035_p
-# The following sample code shows how to use the tool:
-
-@mvstore_1036_h2
-#Store Builder
-
-@mvstore_1037_p
-# The MVStore.Builder
provides a fluid interface to build a store if configuration options are needed. Example usage:
-
-@mvstore_1038_p
-# The list of available options is:
-
-@mvstore_1039_li
-#autoCommitBufferSize: the size of the write buffer.
-
-@mvstore_1040_li
-#autoCommitDisabled: to disable auto-commit.
-
-@mvstore_1041_li
-#backgroundExceptionHandler: a handler for exceptions that could occur while writing in the background.
-
-@mvstore_1042_li
-#cacheSize: the cache size in MB.
-
-@mvstore_1043_li
-#compress: compress the data when storing using a fast algorithm (LZF).
-
-@mvstore_1044_li
-#compressHigh: compress the data when storing using a slower algorithm (Deflate).
-
-@mvstore_1045_li
-#encryptionKey: the key for file encryption.
-
-@mvstore_1046_li
-#fileName: the name of the file, for file based stores.
-
-@mvstore_1047_li
-#fileStore: the storage implementation to use.
-
-@mvstore_1048_li
-#pageSplitSize: the point where pages are split.
-
-@mvstore_1049_li
-#readOnly: open the file in read-only mode.
-
-@mvstore_1050_h2
-#R-Tree
-
-@mvstore_1051_p
-# The MVRTreeMap
is an R-tree implementation that supports fast spatial queries. It can be used as follows:
-
-@mvstore_1052_p
-# The default number of dimensions is 2. To use a different number of dimensions, call new MVRTreeMap.Builder<String>().dimensions(3)
. The minimum number of dimensions is 1, the maximum is 32.
-
-@mvstore_1053_h2
-特徴
-
-@mvstore_1054_h3
-#Maps
-
-@mvstore_1055_p
-# Each store contains a set of named maps. A map is sorted by key, and supports the common lookup operations, including access to the first and last key, iterate over some or all keys, and so on.
-
-@mvstore_1056_p
-# Also supported, and very uncommon for maps, is fast index lookup: the entries of the map can be be efficiently accessed like a random-access list (get the entry at the given index), and the index of a key can be calculated efficiently. That also means getting the median of two keys is very fast, and a range of keys can be counted very quickly. The iterator supports fast skipping. This is possible because internally, each map is organized in the form of a counted B+-tree.
-
-@mvstore_1057_p
-# In database terms, a map can be used like a table, where the key of the map is the primary key of the table, and the value is the row. A map can also represent an index, where the key of the map is the key of the index, and the value of the map is the primary key of the table (for non-unique indexes, the key of the map must also contain the primary key).
-
-@mvstore_1058_h3
-#Versions
-
-@mvstore_1059_p
-# A version is a snapshot of all the data of all maps at a given point in time. Creating a snapshot is fast: only those pages that are changed after a snapshot are copied. This behavior is also called COW (copy on write). Old versions are readable. Rollback to an old version is supported.
-
-@mvstore_1060_p
-# The following sample code show how to create a store, open a map, add some data, and access the current and an old version:
-
-@mvstore_1061_h3
-#Transactions
-
-@mvstore_1062_p
-# To support multiple concurrent open transactions, a transaction utility is included, the TransactionStore
. The tool supports PostgreSQL style "read committed" transaction isolation with savepoints, two-phase commit, and other features typically available in a database. There is no limit on the size of a transaction (the log is written to disk for large or long running transactions).
-
-@mvstore_1063_p
-# Internally, this utility stores the old versions of changed entries in a separate map, similar to a transaction log, except that entries of a closed transaction are removed, and the log is usually not stored for short transactions. For common use cases, the storage overhead of this utility is very small compared to the overhead of a regular transaction log.
-
-@mvstore_1064_h3
-#In-Memory Performance and Usage
-
-@mvstore_1065_p
-# Performance of in-memory operations is about 50% slower than java.util.TreeMap
.
-
-@mvstore_1066_p
-# The memory overhead for large maps is slightly better than for the regular map implementations, but there is a higher overhead per map. For maps with less than about 25 entries, the regular map implementations need less memory.
-
-@mvstore_1067_p
-# If no file name is specified, the store operates purely in memory. Except for persisting data, all features are supported in this mode (multi-versioning, index lookup, R-tree and so on). If a file name is specified, all operations occur in memory (with the same performance characteristics) until data is persisted.
-
-@mvstore_1068_p
-# As in all map implementations, keys need to be immutable, that means changing the key object after an entry has been added is not allowed. If a file name is specified, the value may also not be changed after adding an entry, because it might be serialized (which could happen at any time when autocommit is enabled).
-
-@mvstore_1069_h3
-#Pluggable Data Types
-
-@mvstore_1070_p
-# Serialization is pluggable. The default serialization currently supports many common data types, and uses Java serialization for other objects. The following classes are currently directly supported: Boolean, Byte, Short, Character, Integer, Long, Float, Double, BigInteger, BigDecimal, String, UUID, Date
and arrays (both primitive arrays and object arrays). For serialized objects, the size estimate is adjusted using an exponential moving average.
-
-@mvstore_1071_p
-# Parameterized data types are supported (for example one could build a string data type that limits the length).
-
-@mvstore_1072_p
-# The storage engine itself does not have any length limits, so that keys, values, pages, and chunks can be very big (as big as fits in memory). Also, there is no inherent limit to the number of maps and chunks. Due to using a log structured storage, there is no special case handling for large keys or pages.
-
-@mvstore_1073_h3
-#BLOB Support
-
-@mvstore_1074_p
-# There is a mechanism that stores large binary objects by splitting them into smaller blocks. This allows to store objects that don't fit in memory. Streaming as well as random access reads on such objects are supported. This tool is written on top of the store, using only the map interface.
-
-@mvstore_1075_h3
-#R-Tree and Pluggable Map Implementations
-
-@mvstore_1076_p
-# The map implementation is pluggable. In addition to the default MVMap
(multi-version map), there is a map that supports concurrent write operations, and a multi-version R-tree map implementation for spatial operations.
-
-@mvstore_1077_h3
-#Concurrent Operations and Caching
-
-@mvstore_1078_p
-# Concurrent reads and writes are supported. All such read operations can occur in parallel. Concurrent reads from the page cache, as well as concurrent reads from the file system are supported. Write operations first read the relevant pages from disk to memory (this can happen concurrently), and only then modify the data. The in-memory parts of write operations are synchronized. Writing changes to the file can occur concurrently to modifying the data, as writing operates on a snapshot.
-
-@mvstore_1079_p
-# Caching is done on the page level. The page cache is a concurrent LIRS cache, which should be resistant against scan operations.
-
-@mvstore_1080_p
-# For fully scalable concurrent write operations to a map (in-memory and to disk), the map could be split into multiple maps in different stores ('sharding'). The plan is to add such a mechanism later when needed.
-
-@mvstore_1081_h3
-#Log Structured Storage
-
-@mvstore_1082_p
-# Internally, changes are buffered in memory, and once enough changes have accumulated, they are written in one continuous disk write operation. Compared to traditional database storage engines, this should improve write performance for file systems and storage systems that do not efficiently support small random writes, such as Btrfs, as well as SSDs. (According to a test, write throughput of a common SSD increases with write block size, until a block size of 2 MB, and then does not further increase.) By default, changes are automatically written when more than a number of pages are modified, and once every second in a background thread, even if only little data was changed. Changes can also be written explicitly by calling commit()
.
-
-@mvstore_1083_p
-# When storing, all changed pages are serialized, optionally compressed using the LZF algorithm, and written sequentially to a free area of the file. Each such change set is called a chunk. All parent pages of the changed B-trees are stored in this chunk as well, so that each chunk also contains the root of each changed map (which is the entry point for reading this version of the data). There is no separate index: all data is stored as a list of pages. Per store, there is one additional map that contains the metadata (the list of maps, where the root page of each map is stored, and the list of chunks).
-
-@mvstore_1084_p
-# There are usually two write operations per chunk: one to store the chunk data (the pages), and one to update the file header (so it points to the latest chunk). If the chunk is appended at the end of the file, the file header is only written at the end of the chunk. There is no transaction log, no undo log, and there are no in-place updates (however, unused chunks are overwritten by default).
-
-@mvstore_1085_p
-# Old data is kept for at least 45 seconds (configurable), so that there are no explicit sync operations required to guarantee data consistency. An application can also sync explicitly when needed. To reuse disk space, the chunks with the lowest amount of live data are compacted (the live data is stored again in the next chunk). To improve data locality and disk space usage, the plan is to automatically defragment and compact data.
-
-@mvstore_1086_p
-# Compared to traditional storage engines (that use a transaction log, undo log, and main storage area), the log structured storage is simpler, more flexible, and typically needs less disk operations per change, as data is only written once instead of twice or 3 times, and because the B-tree pages are always full (they are stored next to each other) and can be easily compressed. But temporarily, disk space usage might actually be a bit higher than for a regular database, as disk space is not immediately re-used (there are no in-place updates).
-
-@mvstore_1087_h3
-#Off-Heap and Pluggable Storage
-
-@mvstore_1088_p
-# Storage is pluggable. Unless pure in-memory operation is used, the default storage is to a single file.
-
-@mvstore_1089_p
-# An off-heap storage implementation is available. This storage keeps the data in the off-heap memory, meaning outside of the regular garbage collected heap. This allows to use very large in-memory stores without having to increase the JVM heap, which would increase Java garbage collection pauses a lot. Memory is allocated using ByteBuffer.allocateDirect
. One chunk is allocated at a time (each chunk is usually a few MB large), so that allocation cost is low. To use the off-heap storage, call:
-
-@mvstore_1090_h3
-#File System Abstraction, File Locking and Online Backup
-
-@mvstore_1091_p
-# The file system is pluggable. The same file system abstraction is used as H2 uses. The file can be encrypted using a encrypting file system wrapper. Other file system implementations support reading from a compressed zip or jar file. The file system abstraction closely matches the Java 7 file system API.
-
-@mvstore_1092_p
-# Each store may only be opened once within a JVM. When opening a store, the file is locked in exclusive mode, so that the file can only be changed from within one process. Files can be opened in read-only mode, in which case a shared lock is used.
-
-@mvstore_1093_p
-# The persisted data can be backed up at any time, even during write operations (online backup). To do that, automatic disk space reuse needs to be first disabled, so that new data is always appended at the end of the file. Then, the file can be copied. The file handle is available to the application. It is recommended to use the utility class FileChannelInputStream
to do this. For encrypted databases, both the encrypted (raw) file content, as well as the clear text content, can be backed up.
-
-@mvstore_1094_h3
-#Encrypted Files
-
-@mvstore_1095_p
-# File encryption ensures the data can only be read with the correct password. Data can be encrypted as follows:
-
-@mvstore_1096_p
-# The following algorithms and settings are used:
-
-@mvstore_1097_li
-#The password char array is cleared after use, to reduce the risk that the password is stolen even if the attacker has access to the main memory.
-
-@mvstore_1098_li
-#The password is hashed according to the PBKDF2 standard, using the SHA-256 hash algorithm.
-
-@mvstore_1099_li
-#The length of the salt is 64 bits, so that an attacker can not use a pre-calculated password hash table (rainbow table). It is generated using a cryptographically secure random number generator.
-
-@mvstore_1100_li
-#To speed up opening an encrypted stores on Android, the number of PBKDF2 iterations is 10. The higher the value, the better the protection against brute-force password cracking attacks, but the slower is opening a file.
-
-@mvstore_1101_li
-#The file itself is encrypted using the standardized disk encryption mode XTS-AES. Only little more than one AES-128 round per block is needed.
-
-@mvstore_1102_h3
-#Tools
-
-@mvstore_1103_p
-# There is a tool, the MVStoreTool
, to dump the contents of a file.
-
-@mvstore_1104_h3
-#Exception Handling
-
-@mvstore_1105_p
-# This tool does not throw checked exceptions. Instead, unchecked exceptions are thrown if needed. The error message always contains the version of the tool. The following exceptions can occur:
-
-@mvstore_1106_code
-#IllegalStateException
-
-@mvstore_1107_li
-# if a map was already closed or an IO exception occurred, for example if the file was locked, is already closed, could not be opened or closed, if reading or writing failed, if the file is corrupt, or if there is an internal error in the tool. For such exceptions, an error code is added so that the application can distinguish between different error cases.
-
-@mvstore_1108_code
-#IllegalArgumentException
-
-@mvstore_1109_li
-# if a method was called with an illegal argument.
-
-@mvstore_1110_code
-#UnsupportedOperationException
-
-@mvstore_1111_li
-# if a method was called that is not supported, for example trying to modify a read-only map.
-
-@mvstore_1112_code
-#ConcurrentModificationException
-
-@mvstore_1113_li
-# if a map is modified concurrently.
-
-@mvstore_1114_h3
-#Storage Engine for H2
-
-@mvstore_1115_p
-# For H2 version 1.4 and newer, the MVStore is the default storage engine (supporting SQL, JDBC, transactions, MVCC, and so on). For older versions, append ;MV_STORE=TRUE
to the database URL. Even though it can be used with the default table level locking, by default the MVCC mode is enabled when using the MVStore.
-
-@mvstore_1116_h2
-#File Format
-
-@mvstore_1117_p
-# The data is stored in one file. The file contains two file headers (for safety), and a number of chunks. The file headers are one block each; a block is 4096 bytes. Each chunk is at least one block, but typically 200 blocks or more. Data is stored in the chunks in the form of a log structured storage. There is one chunk for every version.
-
-@mvstore_1118_p
-# Each chunk contains a number of B-tree pages. As an example, the following code:
-
-@mvstore_1119_p
-# will result in the following two chunks (excluding metadata):
-
-@mvstore_1120_b
-#Chunk 1:
-
-@mvstore_1121_p
-# - Page 1: (root) node with 2 entries pointing to page 2 and 3
-
-@mvstore_1122_p
-# - Page 2: leaf with 140 entries (keys 0 - 139)
-
-@mvstore_1123_p
-# - Page 3: leaf with 260 entries (keys 140 - 399)
-
-@mvstore_1124_b
-#Chunk 2:
-
-@mvstore_1125_p
-# - Page 4: (root) node with 2 entries pointing to page 3 and 5
-
-@mvstore_1126_p
-# - Page 5: leaf with 140 entries (keys 0 - 139)
-
-@mvstore_1127_p
-# That means each chunk contains the changes of one version: the new version of the changed pages and the parent pages, recursively, up to the root page. Pages in subsequent chunks refer to pages in earlier chunks.
-
-@mvstore_1128_h3
-#File Header
-
-@mvstore_1129_p
-# There are two file headers, which normally contain the exact same data. But once in a while, the file headers are updated, and writing could partially fail, which could corrupt a header. That's why there is a second header. Only the file headers are updated in this way (called "in-place update"). The headers contain the following data:
-
-@mvstore_1130_p
-# The data is stored in the form of a key-value pair. Each value is stored as a hexadecimal number. The entries are:
-
-@mvstore_1131_li
-#H: The entry "H:2" stands for the the H2 database.
-
-@mvstore_1132_li
-#block: The block number where one of the newest chunks starts (but not necessarily the newest).
-
-@mvstore_1133_li
-#blockSize: The block size of the file; currently always hex 1000, which is decimal 4096, to match the disk sector length of modern hard disks.
-
-@mvstore_1134_li
-#chunk: The chunk id, which is normally the same value as the version; however, the chunk id might roll over to 0, while the version doesn't.
-
-@mvstore_1135_li
-#created: The number of milliseconds since 1970 when the file was created.
-
-@mvstore_1136_li
-#format: The file format number. Currently 1.
-
-@mvstore_1137_li
-#version: The version number of the chunk.
-
-@mvstore_1138_li
-#fletcher: The Fletcher-32 checksum of the header.
-
-@mvstore_1139_p
-# When opening the file, both headers are read and the checksum is verified. If both headers are valid, the one with the newer version is used. The chunk with the latest version is then detected (details about this see below), and the rest of the metadata is read from there. If the chunk id, block and version are not stored in the file header, then the latest chunk lookup starts with the last chunk in the file.
-
-@mvstore_1140_h3
-#Chunk Format
-
-@mvstore_1141_p
-# There is one chunk per version. Each chunk consists of a header, the pages that were modified in this version, and a footer. The pages contain the actual data of the maps. The pages inside a chunk are stored right after the header, next to each other (unaligned). The size of a chunk is a multiple of the block size. The footer is stored in the last 128 bytes of the chunk.
-
-@mvstore_1142_p
-# The footer allows to verify that the chunk is completely written (a chunk is written as one write operation), and allows to find the start position of the very last chunk in the file. The chunk header and footer contain the following data:
-
-@mvstore_1143_p
-# The fields of the chunk header and footer are:
-
-@mvstore_1144_li
-#chunk: The chunk id.
-
-@mvstore_1145_li
-#block: The first block of the chunk (multiply by the block size to get the position in the file).
-
-@mvstore_1146_li
-#len: The size of the chunk in number of blocks.
-
-@mvstore_1147_li
-#map: The id of the newest map; incremented when a new map is created.
-
-@mvstore_1148_li
-#max: The sum of all maximum page sizes (see page format).
-
-@mvstore_1149_li
-#next: The predicted start block of the next chunk.
-
-@mvstore_1150_li
-#pages: The number of pages in the chunk.
-
-@mvstore_1151_li
-#root: The position of the metadata root page (see page format).
-
-@mvstore_1152_li
-#time: The time the chunk was written, in milliseconds after the file was created.
-
-@mvstore_1153_li
-#version: The version this chunk represents.
-
-@mvstore_1154_li
-#fletcher: The checksum of the footer.
-
-@mvstore_1155_p
-# Chunks are never updated in-place. Each chunk contains the pages that were changed in that version (there is one chunk per version, see above), plus all the parent nodes of those pages, recursively, up to the root page. If an entry in a map is changed, removed, or added, then the respective page is copied, modified, and stored in the next chunk, and the number of live pages in the old chunk is decremented. This mechanism is called copy-on-write, and is similar to how the Btrfs file system works. Chunks without live pages are marked as free, so the space can be re-used by more recent chunks. Because not all chunks are of the same size, there can be a number of free blocks in front of a chunk for some time (until a small chunk is written or the chunks are compacted). There is a delay of 45 seconds (by default) before a free chunk is overwritten, to ensure new versions are persisted first.
-
-@mvstore_1156_p
-# How the newest chunk is located when opening a store: The file header contains the position of a recent chunk, but not always the newest one. This is to reduce the number of file header updates. After opening the file, the file headers, and the chunk footer of the very last chunk (at the end of the file) are read. From those candidates, the header of the most recent chunk is read. If it contains a "next" pointer (see above), those chunk's header and footer are read as well. If it turned out to be a newer valid chunk, this is repeated, until the newest chunk was found. Before writing a chunk, the position of the next chunk is predicted based on the assumption that the next chunk will be of the same size as the current one. When the next chunk is written, and the previous prediction turned out to be incorrect, the file header is updated as well. In any case, the file header is updated if the next chain gets longer than 20 hops.
-
-@mvstore_1157_h3
-#Page Format
-
-@mvstore_1158_p
-# Each map is a B-tree, and the map data is stored in (B-tree-) pages. There are leaf pages that contain the key-value pairs of the map, and internal nodes, which only contain keys and pointers to leaf pages. The root of a tree is either a leaf or an internal node. Unlike file header and chunk header and footer, the page data is not human readable. Instead, it is stored as byte arrays, with long (8 bytes), int (4 bytes), short (2 bytes), and variable size int and long (1 to 5 / 10 bytes). The page format is:
-
-@mvstore_1159_li
-#length (int): Length of the page in bytes.
-
-@mvstore_1160_li
-#checksum (short): Checksum (chunk id xor offset within the chunk xor page length).
-
-@mvstore_1161_li
-#mapId (variable size int): The id of the map this page belongs to.
-
-@mvstore_1162_li
-#len (variable size int): The number of keys in the page.
-
-@mvstore_1163_li
-#type (byte): The page type (0 for leaf page, 1 for internal node; plus 2 if the keys and values are compressed with the LZF algorithm, or plus 6 if the keys and values are compressed with the Deflate algorithm).
-
-@mvstore_1164_li
-#children (array of long; internal nodes only): The position of the children.
-
-@mvstore_1165_li
-#childCounts (array of variable size long; internal nodes only): The total number of entries for the given child page.
-
-@mvstore_1166_li
-#keys (byte array): All keys, stored depending on the data type.
-
-@mvstore_1167_li
-#values (byte array; leaf pages only): All values, stored depending on the data type.
-
-@mvstore_1168_p
-# Even though this is not required by the file format, pages are stored in the following order: For each map, the root page is stored first, then the internal nodes (if there are any), and then the leaf pages. This should speed up reads for media where sequential reads are faster than random access reads. The metadata map is stored at the end of a chunk.
-
-@mvstore_1169_p
-# Pointers to pages are stored as a long, using a special format: 26 bits for the chunk id, 32 bits for the offset within the chunk, 5 bits for the length code, 1 bit for the page type (leaf or internal node). The page type is encoded so that when clearing or removing a map, leaf pages don't have to be read (internal nodes do have to be read in order to know where all the pages are; but in a typical B-tree the vast majority of the pages are leaf pages). The absolute file position is not included so that chunks can be moved within the file without having to change page pointers; only the chunk metadata needs to be changed. The length code is a number from 0 to 31, where 0 means the maximum length of the page is 32 bytes, 1 means 48 bytes, 2: 64, 3: 96, 4: 128, 5: 192, and so on until 31 which means longer than 1 MB. That way, reading a page only requires one read operation (except for very large pages). The sum of the maximum length of all pages is stored in the chunk metadata (field "max"), and when a page is marked as removed, the live maximum length is adjusted. This allows to estimate the amount of free space within a block, in addition to the number of free pages.
-
-@mvstore_1170_p
-# The total number of entries in child pages are kept to allow efficient range counting, lookup by index, and skip operations. The pages form a counted B-tree.
-
-@mvstore_1171_p
-# Data compression: The data after the page type are optionally compressed using the LZF algorithm.
-
-@mvstore_1172_h3
-#Metadata Map
-
-@mvstore_1173_p
-# In addition to the user maps, there is one metadata map that contains names and positions of user maps, and chunk metadata. The very last page of a chunk contains the root page of that metadata map. The exact position of this root page is stored in the chunk header. This page (directly or indirectly) points to the root pages of all other maps. The metadata map of a store with a map named "data", and one chunk, contains the following entries:
-
-@mvstore_1174_li
-#chunk.1: The metadata of chunk 1. This is the same data as the chunk header, plus the number of live pages, and the maximum live length.
-
-@mvstore_1175_li
-#map.1: The metadata of map 1. The entries are: name, createVersion, and type.
-
-@mvstore_1176_li
-#name.data: The map id of the map named "data". The value is "1".
-
-@mvstore_1177_li
-#root.1: The root position of map 1.
-
-@mvstore_1178_li
-#setting.storeVersion: The store version (a user defined value).
-
-@mvstore_1179_h2
-#Similar Projects and Differences to Other Storage Engines
-
-@mvstore_1180_p
-# Unlike similar storage engines like LevelDB and Kyoto Cabinet, the MVStore is written in Java and can easily be embedded in a Java and Android application.
-
-@mvstore_1181_p
-# The MVStore is somewhat similar to the Berkeley DB Java Edition because it is also written in Java, and is also a log structured storage, but the H2 license is more liberal.
-
-@mvstore_1182_p
-# Like SQLite 3, the MVStore keeps all data in one file. Unlike SQLite 3, the MVStore uses is a log structured storage. The plan is to make the MVStore both easier to use as well as faster than SQLite 3. In a recent (very simple) test, the MVStore was about twice as fast as SQLite 3 on Android.
-
-@mvstore_1183_p
-# The API of the MVStore is similar to MapDB (previously known as JDBM) from Jan Kotek, and some code is shared between MVStore and MapDB. However, unlike MapDB, the MVStore uses is a log structured storage. The MVStore does not have a record size limit.
-
-@mvstore_1184_h2
-#Current State
-
-@mvstore_1185_p
-# The code is still experimental at this stage. The API as well as the behavior may partially change. Features may be added and removed (even though the main features will stay).
-
-@mvstore_1186_h2
-必�?�?�件
-
-@mvstore_1187_p
-# The MVStore is included in the latest H2 jar file.
-
-@mvstore_1188_p
-# There are no special requirements to use it. The MVStore should run on any JVM as well as on Android.
-
-@mvstore_1189_p
-# To build just the MVStore (without the database engine), run:
-
-@mvstore_1190_p
-# This will create the file bin/h2mvstore-1.4.187.jar
(about 200 KB).
-
-@performance_1000_h1
-パフォーマンス
-
-@performance_1001_a
-# Performance Comparison
-
-@performance_1002_a
-# PolePosition Benchmark
-
-@performance_1003_a
-# Database Performance Tuning
-
-@performance_1004_a
-# Using the Built-In Profiler
-
-@performance_1005_a
-# Application Profiling
-
-@performance_1006_a
-# Database Profiling
-
-@performance_1007_a
-# Statement Execution Plans
-
-@performance_1008_a
-# How Data is Stored and How Indexes Work
-
-@performance_1009_a
-# Fast Database Import
-
-@performance_1010_h2
-#Performance Comparison
-
-@performance_1011_p
-# In many cases H2 is faster than other (open source and not open source) database engines. Please note this is mostly a single connection benchmark run on one computer, with many very simple operations running against the database. This benchmark does not include very complex queries. The embedded mode of H2 is faster than the client-server mode because the per-statement overhead is greatly reduced.
-
-@performance_1012_h3
-#Embedded
-
-@performance_1013_th
-#Test Case
-
-@performance_1014_th
-#Unit
-
-@performance_1015_th
-H2
-
-@performance_1016_th
-HSQLDB
-
-@performance_1017_th
-Derby
-
-@performance_1018_td
-#Simple: Init
-
-@performance_1019_td
-#ms
-
-@performance_1020_td
-#1019
-
-@performance_1021_td
-#1907
-
-@performance_1022_td
-#8280
-
-@performance_1023_td
-#Simple: Query (random)
-
-@performance_1024_td
-#ms
-
-@performance_1025_td
-#1304
-
-@performance_1026_td
-#873
-
-@performance_1027_td
-#1912
-
-@performance_1028_td
-#Simple: Query (sequential)
-
-@performance_1029_td
-#ms
-
-@performance_1030_td
-#835
-
-@performance_1031_td
-#1839
-
-@performance_1032_td
-#5415
-
-@performance_1033_td
-#Simple: Update (sequential)
-
-@performance_1034_td
-#ms
-
-@performance_1035_td
-#961
-
-@performance_1036_td
-#2333
-
-@performance_1037_td
-#21759
-
-@performance_1038_td
-#Simple: Delete (sequential)
-
-@performance_1039_td
-#ms
-
-@performance_1040_td
-#950
-
-@performance_1041_td
-#1922
-
-@performance_1042_td
-#32016
-
-@performance_1043_td
-#Simple: Memory Usage
-
-@performance_1044_td
-#MB
-
-@performance_1045_td
-#21
-
-@performance_1046_td
-#10
-
-@performance_1047_td
-#8
-
-@performance_1048_td
-#BenchA: Init
-
-@performance_1049_td
-#ms
-
-@performance_1050_td
-#919
-
-@performance_1051_td
-#2133
-
-@performance_1052_td
-#7528
-
-@performance_1053_td
-#BenchA: Transactions
-
-@performance_1054_td
-#ms
-
-@performance_1055_td
-#1219
-
-@performance_1056_td
-#2297
-
-@performance_1057_td
-#8541
-
-@performance_1058_td
-#BenchA: Memory Usage
-
-@performance_1059_td
-#MB
-
-@performance_1060_td
-#12
-
-@performance_1061_td
-#15
-
-@performance_1062_td
-#7
-
-@performance_1063_td
-#BenchB: Init
-
-@performance_1064_td
-#ms
-
-@performance_1065_td
-#905
-
-@performance_1066_td
-#1993
-
-@performance_1067_td
-#8049
-
-@performance_1068_td
-#BenchB: Transactions
-
-@performance_1069_td
-#ms
-
-@performance_1070_td
-#1091
-
-@performance_1071_td
-#583
-
-@performance_1072_td
-#1165
-
-@performance_1073_td
-#BenchB: Memory Usage
-
-@performance_1074_td
-#MB
-
-@performance_1075_td
-#17
-
-@performance_1076_td
-#11
-
-@performance_1077_td
-#8
-
-@performance_1078_td
-#BenchC: Init
-
-@performance_1079_td
-#ms
-
-@performance_1080_td
-#2491
-
-@performance_1081_td
-#4003
-
-@performance_1082_td
-#8064
-
-@performance_1083_td
-#BenchC: Transactions
-
-@performance_1084_td
-#ms
-
-@performance_1085_td
-#1979
-
-@performance_1086_td
-#803
-
-@performance_1087_td
-#2840
-
-@performance_1088_td
-#BenchC: Memory Usage
-
-@performance_1089_td
-#MB
-
-@performance_1090_td
-#19
-
-@performance_1091_td
-#22
-
-@performance_1092_td
-#9
-
-@performance_1093_td
-#Executed statements
-
-@performance_1094_td
-##
-
-@performance_1095_td
-#1930995
-
-@performance_1096_td
-#1930995
-
-@performance_1097_td
-#1930995
-
-@performance_1098_td
-#Total time
-
-@performance_1099_td
-#ms
-
-@performance_1100_td
-#13673
-
-@performance_1101_td
-#20686
-
-@performance_1102_td
-#105569
-
-@performance_1103_td
-#Statements per second
-
-@performance_1104_td
-##
-
-@performance_1105_td
-#141226
-
-@performance_1106_td
-#93347
-
-@performance_1107_td
-#18291
-
-@performance_1108_h3
-#Client-Server
-
-@performance_1109_th
-#Test Case
-
-@performance_1110_th
-#Unit
-
-@performance_1111_th
-#H2 (Server)
-
-@performance_1112_th
-HSQLDB
-
-@performance_1113_th
-Derby
-
-@performance_1114_th
-PostgreSQL
-
-@performance_1115_th
-MySQL
-
-@performance_1116_td
-#Simple: Init
-
-@performance_1117_td
-#ms
-
-@performance_1118_td
-#16338
-
-@performance_1119_td
-#17198
-
-@performance_1120_td
-#27860
-
-@performance_1121_td
-#30156
-
-@performance_1122_td
-#29409
-
-@performance_1123_td
-#Simple: Query (random)
-
-@performance_1124_td
-#ms
-
-@performance_1125_td
-#3399
-
-@performance_1126_td
-#2582
-
-@performance_1127_td
-#6190
-
-@performance_1128_td
-#3315
-
-@performance_1129_td
-#3342
-
-@performance_1130_td
-#Simple: Query (sequential)
-
-@performance_1131_td
-#ms
-
-@performance_1132_td
-#21841
-
-@performance_1133_td
-#18699
-
-@performance_1134_td
-#42347
-
-@performance_1135_td
-#30774
-
-@performance_1136_td
-#32611
-
-@performance_1137_td
-#Simple: Update (sequential)
-
-@performance_1138_td
-#ms
-
-@performance_1139_td
-#6913
-
-@performance_1140_td
-#7745
-
-@performance_1141_td
-#28576
-
-@performance_1142_td
-#32698
-
-@performance_1143_td
-#11350
-
-@performance_1144_td
-#Simple: Delete (sequential)
-
-@performance_1145_td
-#ms
-
-@performance_1146_td
-#8051
-
-@performance_1147_td
-#9751
-
-@performance_1148_td
-#42202
-
-@performance_1149_td
-#44480
-
-@performance_1150_td
-#16555
-
-@performance_1151_td
-#Simple: Memory Usage
-
-@performance_1152_td
-#MB
-
-@performance_1153_td
-#22
-
-@performance_1154_td
-#11
-
-@performance_1155_td
-#9
-
-@performance_1156_td
-#0
-
-@performance_1157_td
-#1
-
-@performance_1158_td
-#BenchA: Init
-
-@performance_1159_td
-#ms
-
-@performance_1160_td
-#12996
-
-@performance_1161_td
-#14720
-
-@performance_1162_td
-#24722
-
-@performance_1163_td
-#26375
-
-@performance_1164_td
-#26060
-
-@performance_1165_td
-#BenchA: Transactions
-
-@performance_1166_td
-#ms
-
-@performance_1167_td
-#10134
-
-@performance_1168_td
-#10250
-
-@performance_1169_td
-#18452
-
-@performance_1170_td
-#21453
-
-@performance_1171_td
-#15877
-
-@performance_1172_td
-#BenchA: Memory Usage
-
-@performance_1173_td
-#MB
-
-@performance_1174_td
-#13
-
-@performance_1175_td
-#15
-
-@performance_1176_td
-#9
-
-@performance_1177_td
-#0
-
-@performance_1178_td
-#1
-
-@performance_1179_td
-#BenchB: Init
-
-@performance_1180_td
-#ms
-
-@performance_1181_td
-#15264
-
-@performance_1182_td
-#16889
-
-@performance_1183_td
-#28546
-
-@performance_1184_td
-#31610
-
-@performance_1185_td
-#29747
-
-@performance_1186_td
-#BenchB: Transactions
-
-@performance_1187_td
-#ms
-
-@performance_1188_td
-#3017
-
-@performance_1189_td
-#3376
-
-@performance_1190_td
-#1842
-
-@performance_1191_td
-#2771
-
-@performance_1192_td
-#1433
-
-@performance_1193_td
-#BenchB: Memory Usage
-
-@performance_1194_td
-#MB
-
-@performance_1195_td
-#17
-
-@performance_1196_td
-#12
-
-@performance_1197_td
-#11
-
-@performance_1198_td
-#1
-
-@performance_1199_td
-#1
-
-@performance_1200_td
-#BenchC: Init
-
-@performance_1201_td
-#ms
-
-@performance_1202_td
-#14020
-
-@performance_1203_td
-#10407
-
-@performance_1204_td
-#17655
-
-@performance_1205_td
-#19520
-
-@performance_1206_td
-#17532
-
-@performance_1207_td
-#BenchC: Transactions
-
-@performance_1208_td
-#ms
-
-@performance_1209_td
-#5076
-
-@performance_1210_td
-#3160
-
-@performance_1211_td
-#6411
-
-@performance_1212_td
-#6063
-
-@performance_1213_td
-#4530
-
-@performance_1214_td
-#BenchC: Memory Usage
-
-@performance_1215_td
-#MB
-
-@performance_1216_td
-#19
-
-@performance_1217_td
-#21
-
-@performance_1218_td
-#11
-
-@performance_1219_td
-#1
-
-@performance_1220_td
-#1
-
-@performance_1221_td
-#Executed statements
-
-@performance_1222_td
-##
-
-@performance_1223_td
-#1930995
-
-@performance_1224_td
-#1930995
-
-@performance_1225_td
-#1930995
-
-@performance_1226_td
-#1930995
-
-@performance_1227_td
-#1930995
-
-@performance_1228_td
-#Total time
-
-@performance_1229_td
-#ms
-
-@performance_1230_td
-#117049
-
-@performance_1231_td
-#114777
-
-@performance_1232_td
-#244803
-
-@performance_1233_td
-#249215
-
-@performance_1234_td
-#188446
-
-@performance_1235_td
-#Statements per second
-
-@performance_1236_td
-##
-
-@performance_1237_td
-#16497
-
-@performance_1238_td
-#16823
-
-@performance_1239_td
-#7887
-
-@performance_1240_td
-#7748
-
-@performance_1241_td
-#10246
-
-@performance_1242_h3
-#Benchmark Results and Comments
-
-@performance_1243_h4
-H2
-
-@performance_1244_p
-# Version 1.4.177 (2014-04-12) was used for the test. For most operations, the performance of H2 is about the same as for HSQLDB. One situation where H2 is slow is large result sets, because they are buffered to disk if more than a certain number of records are returned. The advantage of buffering is: there is no limit on the result set size.
-
-@performance_1245_h4
-HSQLDB
-
-@performance_1246_p
-# Version 2.3.2 was used for the test. Cached tables are used in this test (hsqldb.default_table_type=cached
), and the write delay is 1 second (SET WRITE_DELAY 1
).
-
-@performance_1247_h4
-Derby
-
-@performance_1248_p
-# Version 10.10.1.1 was used for the test. Derby is clearly the slowest embedded database in this test. This seems to be a structural problem, because all operations are really slow. It will be hard for the developers of Derby to improve the performance to a reasonable level. A few problems have been identified: leaving autocommit on is a problem for Derby. If it is switched off during the whole test, the results are about 20% better for Derby. Derby calls FileChannel.force(false)
, but only twice per log file (not on each commit). Disabling this call improves performance for Derby by about 2%. Unlike H2, Derby does not call FileDescriptor.sync()
on each checkpoint. Derby supports a testing mode (system property derby.system.durability=test
) where durability is disabled. According to the documentation, this setting should be used for testing only, as the database may not recover after a crash. Enabling this setting improves performance by a factor of 2.6 (embedded mode) or 1.4 (server mode). Even if enabled, Derby is still less than half as fast as H2 in default mode.
-
-@performance_1249_h4
-PostgreSQL
-
-@performance_1250_p
-# Version 9.1.5 was used for the test. The following options where changed in postgresql.conf: fsync = off, commit_delay = 1000
. PostgreSQL is run in server mode. The memory usage number is incorrect, because only the memory usage of the JDBC driver is measured.
-
-@performance_1251_h4
-MySQL
-
-@performance_1252_p
-# Version 5.1.65-log was used for the test. MySQL was run with the InnoDB backend. The setting innodb_flush_log_at_trx_commit
(found in the my.ini / my.cnf
file) was set to 0. Otherwise (and by default), MySQL is slow (around 140 statements per second in this test) because it tries to flush the data to disk for each commit. For small transactions (when autocommit is on) this is really slow. But many use cases use small or relatively small transactions. Too bad this setting is not listed in the configuration wizard, and it always overwritten when using the wizard. You need to change this setting manually in the file my.ini / my.cnf
, and then restart the service. The memory usage number is incorrect, because only the memory usage of the JDBC driver is measured.
-
-@performance_1253_h4
-#Firebird
-
-@performance_1254_p
-# Firebird 1.5 (default installation) was tested, but the results are not published currently. It is possible to run the performance test with the Firebird database, and any information on how to configure Firebird for higher performance are welcome.
-
-@performance_1255_h4
-#Why Oracle / MS SQL Server / DB2 are Not Listed
-
-@performance_1256_p
-# The license of these databases does not allow to publish benchmark results. This doesn't mean that they are fast. They are in fact quite slow, and need a lot of memory. But you will need to test this yourself. SQLite was not tested because the JDBC driver doesn't support transactions.
-
-@performance_1257_h3
-#About this Benchmark
-
-@performance_1258_h4
-#How to Run
-
-@performance_1259_p
-# This test was as follows:
-
-@performance_1260_h4
-#Separate Process per Database
-
-@performance_1261_p
-# For each database, a new process is started, to ensure the previous test does not impact the current test.
-
-@performance_1262_h4
-#Number of Connections
-
-@performance_1263_p
-# This is mostly a single-connection benchmark. BenchB uses multiple connections; the other tests use one connection.
-
-@performance_1264_h4
-#Real-World Tests
-
-@performance_1265_p
-# Good benchmarks emulate real-world use cases. This benchmark includes 4 test cases: BenchSimple uses one table and many small updates / deletes. BenchA is similar to the TPC-A test, but single connection / single threaded (see also: www.tpc.org). BenchB is similar to the TPC-B test, using multiple connections (one thread per connection). BenchC is similar to the TPC-C test, but single connection / single threaded.
-
-@performance_1266_h4
-#Comparing Embedded with Server Databases
-
-@performance_1267_p
-# This is mainly a benchmark for embedded databases (where the application runs in the same virtual machine as the database engine). However MySQL and PostgreSQL are not Java databases and cannot be embedded into a Java application. For the Java databases, both embedded and server modes are tested.
-
-@performance_1268_h4
-#Test Platform
-
-@performance_1269_p
-# This test is run on Mac OS X 10.6. No virus scanner was used, and disk indexing was disabled. The JVM used is Sun JDK 1.6.
-
-@performance_1270_h4
-#Multiple Runs
-
-@performance_1271_p
-# When a Java benchmark is run first, the code is not fully compiled and therefore runs slower than when running multiple times. A benchmark should always run the same test multiple times and ignore the first run(s). This benchmark runs three times, but only the last run is measured.
-
-@performance_1272_h4
-#Memory Usage
-
-@performance_1273_p
-# It is not enough to measure the time taken, the memory usage is important as well. Performance can be improved by using a bigger cache, but the amount of memory is limited. HSQLDB tables are kept fully in memory by default; this benchmark uses 'disk based' tables for all databases. Unfortunately, it is not so easy to calculate the memory usage of PostgreSQL and MySQL, because they run in a different process than the test. This benchmark currently does not print memory usage of those databases.
-
-@performance_1274_h4
-#Delayed Operations
-
-@performance_1275_p
-# Some databases delay some operations (for example flushing the buffers) until after the benchmark is run. This benchmark waits between each database tested, and each database runs in a different process (sequentially).
-
-@performance_1276_h4
-#Transaction Commit / Durability
-
-@performance_1277_p
-# Durability means transaction committed to the database will not be lost. Some databases (for example MySQL) try to enforce this by default by calling fsync()
to flush the buffers, but most hard drives don't actually flush all data. Calling the method slows down transaction commit a lot, but doesn't always make data durable. When comparing the results, it is important to think about the effect. Many database suggest to 'batch' operations when possible. This benchmark switches off autocommit when loading the data, and calls commit after each 1000 inserts. However many applications need 'short' transactions at runtime (a commit after each update). This benchmark commits after each update / delete in the simple benchmark, and after each business transaction in the other benchmarks. For databases that support delayed commits, a delay of one second is used.
-
-@performance_1278_h4
-#Using Prepared Statements
-
-@performance_1279_p
-# Wherever possible, the test cases use prepared statements.
-
-@performance_1280_h4
-#Currently Not Tested: Startup Time
-
-@performance_1281_p
-# The startup time of a database engine is important as well for embedded use. This time is not measured currently. Also, not tested is the time used to create a database and open an existing database. Here, one (wrapper) connection is opened at the start, and for each step a new connection is opened and then closed.
-
-@performance_1282_h2
-#PolePosition Benchmark
-
-@performance_1283_p
-# The PolePosition is an open source benchmark. The algorithms are all quite simple. It was developed / sponsored by db4o. This test was not run for a longer time, so please be aware that the results below are for older database versions (H2 version 1.1, HSQLDB 1.8, Java 1.4).
-
-@performance_1284_th
-#Test Case
-
-@performance_1285_th
-#Unit
-
-@performance_1286_th
-H2
-
-@performance_1287_th
-HSQLDB
-
-@performance_1288_th
-MySQL
-
-@performance_1289_td
-#Melbourne write
-
-@performance_1290_td
-#ms
-
-@performance_1291_td
-#369
-
-@performance_1292_td
-#249
-
-@performance_1293_td
-#2022
-
-@performance_1294_td
-#Melbourne read
-
-@performance_1295_td
-#ms
-
-@performance_1296_td
-#47
-
-@performance_1297_td
-#49
-
-@performance_1298_td
-#93
-
-@performance_1299_td
-#Melbourne read_hot
-
-@performance_1300_td
-#ms
-
-@performance_1301_td
-#24
-
-@performance_1302_td
-#43
-
-@performance_1303_td
-#95
-
-@performance_1304_td
-#Melbourne delete
-
-@performance_1305_td
-#ms
-
-@performance_1306_td
-#147
-
-@performance_1307_td
-#133
-
-@performance_1308_td
-#176
-
-@performance_1309_td
-#Sepang write
-
-@performance_1310_td
-#ms
-
-@performance_1311_td
-#965
-
-@performance_1312_td
-#1201
-
-@performance_1313_td
-#3213
-
-@performance_1314_td
-#Sepang read
-
-@performance_1315_td
-#ms
-
-@performance_1316_td
-#765
-
-@performance_1317_td
-#948
-
-@performance_1318_td
-#3455
-
-@performance_1319_td
-#Sepang read_hot
-
-@performance_1320_td
-#ms
-
-@performance_1321_td
-#789
-
-@performance_1322_td
-#859
-
-@performance_1323_td
-#3563
-
-@performance_1324_td
-#Sepang delete
-
-@performance_1325_td
-#ms
-
-@performance_1326_td
-#1384
-
-@performance_1327_td
-#1596
-
-@performance_1328_td
-#6214
-
-@performance_1329_td
-#Bahrain write
-
-@performance_1330_td
-#ms
-
-@performance_1331_td
-#1186
-
-@performance_1332_td
-#1387
-
-@performance_1333_td
-#6904
-
-@performance_1334_td
-#Bahrain query_indexed_string
-
-@performance_1335_td
-#ms
-
-@performance_1336_td
-#336
-
-@performance_1337_td
-#170
-
-@performance_1338_td
-#693
-
-@performance_1339_td
-#Bahrain query_string
-
-@performance_1340_td
-#ms
-
-@performance_1341_td
-#18064
-
-@performance_1342_td
-#39703
-
-@performance_1343_td
-#41243
-
-@performance_1344_td
-#Bahrain query_indexed_int
-
-@performance_1345_td
-#ms
-
-@performance_1346_td
-#104
-
-@performance_1347_td
-#134
-
-@performance_1348_td
-#678
-
-@performance_1349_td
-#Bahrain update
-
-@performance_1350_td
-#ms
-
-@performance_1351_td
-#191
-
-@performance_1352_td
-#87
-
-@performance_1353_td
-#159
-
-@performance_1354_td
-#Bahrain delete
-
-@performance_1355_td
-#ms
-
-@performance_1356_td
-#1215
-
-@performance_1357_td
-#729
-
-@performance_1358_td
-#6812
-
-@performance_1359_td
-#Imola retrieve
-
-@performance_1360_td
-#ms
-
-@performance_1361_td
-#198
-
-@performance_1362_td
-#194
-
-@performance_1363_td
-#4036
-
-@performance_1364_td
-#Barcelona write
-
-@performance_1365_td
-#ms
-
-@performance_1366_td
-#413
-
-@performance_1367_td
-#832
-
-@performance_1368_td
-#3191
-
-@performance_1369_td
-#Barcelona read
-
-@performance_1370_td
-#ms
-
-@performance_1371_td
-#119
-
-@performance_1372_td
-#160
-
-@performance_1373_td
-#1177
-
-@performance_1374_td
-#Barcelona query
-
-@performance_1375_td
-#ms
-
-@performance_1376_td
-#20
-
-@performance_1377_td
-#5169
-
-@performance_1378_td
-#101
-
-@performance_1379_td
-#Barcelona delete
-
-@performance_1380_td
-#ms
-
-@performance_1381_td
-#388
-
-@performance_1382_td
-#319
-
-@performance_1383_td
-#3287
-
-@performance_1384_td
-#Total
-
-@performance_1385_td
-#ms
-
-@performance_1386_td
-#26724
-
-@performance_1387_td
-#53962
-
-@performance_1388_td
-#87112
-
-@performance_1389_p
-# There are a few problems with the PolePosition test:
-
-@performance_1390_li
-# HSQLDB uses in-memory tables by default while H2 uses persistent tables. The HSQLDB version included in PolePosition does not support changing this, so you need to replace poleposition-0.20/lib/hsqldb.jar
with a newer version (for example hsqldb-1.8.0.7.jar
), and then use the setting hsqldb.connecturl=jdbc:hsqldb:file:data/hsqldb/dbbench2;hsqldb.default_table_type=cached;sql.enforce_size=true
in the file Jdbc.properties
.
-
-@performance_1391_li
-#HSQLDB keeps the database open between tests, while H2 closes the database (losing all the cache). To change that, use the database URL jdbc:h2:file:data/h2/dbbench;DB_CLOSE_DELAY=-1
-
-@performance_1392_li
-#The amount of cache memory is quite important, specially for the PolePosition test. Unfortunately, the PolePosition test does not take this into account.
-
-@performance_1393_h2
-#Database Performance Tuning
-
-@performance_1394_h3
-#Keep Connections Open or Use a Connection Pool
-
-@performance_1395_p
-# If your application opens and closes connections a lot (for example, for each request), you should consider using a connection pool. Opening a connection using DriverManager.getConnection
is specially slow if the database is closed. By default the database is closed if the last connection is closed.
-
-@performance_1396_p
-# If you open and close connections a lot but don't want to use a connection pool, consider keeping a 'sentinel' connection open for as long as the application runs, or use delayed database closing. See also Closing a database.
-
-@performance_1397_h3
-#Use a Modern JVM
-
-@performance_1398_p
-# Newer JVMs are faster. Upgrading to the latest version of your JVM can provide a "free" boost to performance. Switching from the default Client JVM to the Server JVM using the -server
command-line option improves performance at the cost of a slight increase in start-up time.
-
-@performance_1399_h3
-#Virus Scanners
-
-@performance_1400_p
-# Some virus scanners scan files every time they are accessed. It is very important for performance that database files are not scanned for viruses. The database engine never interprets the data stored in the files as programs, that means even if somebody would store a virus in a database file, this would be harmless (when the virus does not run, it cannot spread). Some virus scanners allow to exclude files by suffix. Ensure files ending with .db
are not scanned.
-
-@performance_1401_h3
-トレースオプションを使用�?�る
-
-@performance_1402_p
-# If the performance hot spots are in the database engine, in many cases the performance can be optimized by creating additional indexes, or changing the schema. Sometimes the application does not directly generate the SQL statements, for example if an O/R mapping tool is used. To view the SQL statements and JDBC API calls, you can use the trace options. For more information, see Using the Trace Options.
-
-@performance_1403_h3
-#Index Usage
-
-@performance_1404_p
-# This database uses indexes to improve the performance of SELECT, UPDATE, DELETE
. If a column is used in the WHERE
clause of a query, and if an index exists on this column, then the index can be used. Multi-column indexes are used if all or the first columns of the index are used. Both equality lookup and range scans are supported. Indexes are used to order result sets, but only if the condition uses the same index or no index at all. The results are sorted in memory if required. Indexes are created automatically for primary key and unique constraints. Indexes are also created for foreign key constraints, if required. For other columns, indexes need to be created manually using the CREATE INDEX
statement.
-
-@performance_1405_h3
-#How Data is Stored Internally
-
-@performance_1406_p
-# For persistent databases, if a table is created with a single column primary key of type BIGINT, INT, SMALLINT, TINYINT
, then the data of the table is organized in this way. This is sometimes also called a "clustered index" or "index organized table".
-
-@performance_1407_p
-# H2 internally stores table data and indexes in the form of b-trees. Each b-tree stores entries as a list of unique keys (one or more columns) and data (zero or more columns). The table data is always organized in the form of a "data b-tree" with a single column key of type long
. If a single column primary key of type BIGINT, INT, SMALLINT, TINYINT
is specified when creating the table (or just after creating the table, but before inserting any rows), then this column is used as the key of the data b-tree. If no primary key has been specified, if the primary key column is of another data type, or if the primary key contains more than one column, then a hidden auto-increment column of type BIGINT
is added to the table, which is used as the key for the data b-tree. All other columns of the table are stored within the data area of this data b-tree (except for large BLOB, CLOB
columns, which are stored externally).
-
-@performance_1408_p
-# For each additional index, one new "index b-tree" is created. The key of this b-tree consists of the indexed columns, plus the key of the data b-tree. If a primary key is created after the table has been created, or if the primary key contains multiple column, or if the primary key is not of the data types listed above, then the primary key is stored in a new index b-tree.
-
-@performance_1409_h3
-#Optimizer
-
-@performance_1410_p
-# This database uses a cost based optimizer. For simple and queries and queries with medium complexity (less than 7 tables in the join), the expected cost (running time) of all possible plans is calculated, and the plan with the lowest cost is used. For more complex queries, the algorithm first tries all possible combinations for the first few tables, and the remaining tables added using a greedy algorithm (this works well for most joins). Afterwards a genetic algorithm is used to test at most 2000 distinct plans. Only left-deep plans are evaluated.
-
-@performance_1411_h3
-#Expression Optimization
-
-@performance_1412_p
-# After the statement is parsed, all expressions are simplified automatically if possible. Operations are evaluated only once if all parameters are constant. Functions are also optimized, but only if the function is constant (always returns the same result for the same parameter values). If the WHERE
clause is always false, then the table is not accessed at all.
-
-@performance_1413_h3
-#COUNT(*) Optimization
-
-@performance_1414_p
-# If the query only counts all rows of a table, then the data is not accessed. However, this is only possible if no WHERE
clause is used, that means it only works for queries of the form SELECT COUNT(*) FROM table
.
-
-@performance_1415_h3
-#Updating Optimizer Statistics / Column Selectivity
-
-@performance_1416_p
-# When executing a query, at most one index per join can be used. If the same table is joined multiple times, for each join only one index is used (the same index could be used for both joins, or each join could use a different index). Example: for the query SELECT * FROM TEST T1, TEST T2 WHERE T1.NAME='A' AND T2.ID=T1.ID
, two index can be used, in this case the index on NAME for T1 and the index on ID for T2.
-
-@performance_1417_p
-# If a table has multiple indexes, sometimes more than one index could be used. Example: if there is a table TEST(ID, NAME, FIRSTNAME)
and an index on each column, then two indexes could be used for the query SELECT * FROM TEST WHERE NAME='A' AND FIRSTNAME='B'
, the index on NAME or the index on FIRSTNAME. It is not possible to use both indexes at the same time. Which index is used depends on the selectivity of the column. The selectivity describes the 'uniqueness' of values in a column. A selectivity of 100 means each value appears only once, and a selectivity of 1 means the same value appears in many or most rows. For the query above, the index on NAME should be used if the table contains more distinct names than first names.
-
-@performance_1418_p
-# The SQL statement ANALYZE
can be used to automatically estimate the selectivity of the columns in the tables. This command should be run from time to time to improve the query plans generated by the optimizer.
-
-@performance_1419_h3
-#In-Memory (Hash) Indexes
-
-@performance_1420_p
-# Using in-memory indexes, specially in-memory hash indexes, can speed up queries and data manipulation.
-
-@performance_1421_p
-#In-memory indexes are automatically used for in-memory databases, but can also be created for persistent databases using CREATE MEMORY TABLE
. In many cases, the rows itself will also be kept in-memory. Please note this may cause memory problems for large tables.
-
-@performance_1422_p
-# In-memory hash indexes are backed by a hash table and are usually faster than regular indexes. However, hash indexes only supports direct lookup (WHERE ID = ?
) but not range scan (WHERE ID < ?
). To use hash indexes, use HASH as in: CREATE UNIQUE HASH INDEX
and CREATE TABLE ...(ID INT PRIMARY KEY HASH,...)
.
-
-@performance_1423_h3
-#Use Prepared Statements
-
-@performance_1424_p
-# If possible, use prepared statements with parameters.
-
-@performance_1425_h3
-#Prepared Statements and IN(...)
-
-@performance_1426_p
-# Avoid generating SQL statements with a variable size IN(...) list. Instead, use a prepared statement with arrays as in the following example:
-
-@performance_1427_h3
-#Optimization Examples
-
-@performance_1428_p
-# See src/test/org/h2/samples/optimizations.sql
for a few examples of queries that benefit from special optimizations built into the database.
-
-@performance_1429_h3
-#Cache Size and Type
-
-@performance_1430_p
-# By default the cache size of H2 is quite small. Consider using a larger cache size, or enable the second level soft reference cache. See also Cache Settings.
-
-@performance_1431_h3
-データ型
-
-@performance_1432_p
-# Each data type has different storage and performance characteristics:
-
-@performance_1433_li
-#The DECIMAL/NUMERIC
type is slower and requires more storage than the REAL
and DOUBLE
types.
-
-@performance_1434_li
-#Text types are slower to read, write, and compare than numeric types and generally require more storage.
-
-@performance_1435_li
-#See Large Objects for information on BINARY
vs. BLOB
and VARCHAR
vs. CLOB
performance.
-
-@performance_1436_li
-#Parsing and formatting takes longer for the TIME
, DATE
, and TIMESTAMP
types than the numeric types.
-
-@performance_1437_code
-#SMALLINT/TINYINT/BOOLEAN
-
-@performance_1438_li
-# are not significantly smaller or faster to work with than INTEGER
in most modes.
-
-@performance_1439_h3
-#Sorted Insert Optimization
-
-@performance_1440_p
-# To reduce disk space usage and speed up table creation, an optimization for sorted inserts is available. When used, b-tree pages are split at the insertion point. To use this optimization, add SORTED
before the SELECT
statement:
-
-@performance_1441_h2
-#Using the Built-In Profiler
-
-@performance_1442_p
-# A very simple Java profiler is built-in. To use it, use the following template:
-
-@performance_1443_h2
-#Application Profiling
-
-@performance_1444_h3
-#Analyze First
-
-@performance_1445_p
-# Before trying to optimize performance, it is important to understand where the problem is (what part of the application is slow). Blind optimization or optimization based on guesses should be avoided, because usually it is not an efficient strategy. There are various ways to analyze an application. Sometimes two implementations can be compared using System.currentTimeMillis()
. But this does not work for complex applications with many modules, and for memory problems.
-
-@performance_1446_p
-# A simple way to profile an application is to use the built-in profiling tool of java. Example:
-
-@performance_1447_p
-# Unfortunately, it is only possible to profile the application from start to end. Another solution is to create a number of full thread dumps. To do that, first run jps -l
to get the process id, and then run jstack <pid>
or kill -QUIT <pid>
(Linux) or press Ctrl+C (Windows).
-
-@performance_1448_p
-# A simple profiling tool is included in H2. To use it, the application needs to be changed slightly. Example:
-
-@performance_1449_p
-# The profiler is built into the H2 Console tool, to analyze databases that open slowly. To use it, run the H2 Console, and then click on 'Test Connection'. Afterwards, click on "Test successful" and you get the most common stack traces, which helps to find out why it took so long to connect. You will only get the stack traces if opening the database took more than a few seconds.
-
-@performance_1450_h2
-#Database Profiling
-
-@performance_1451_p
-# The ConvertTraceFile
tool generates SQL statement statistics at the end of the SQL script file. The format used is similar to the profiling data generated when using java -Xrunhprof
. For this to work, the trace level needs to be 2 or higher (TRACE_LEVEL_FILE=2
). The easiest way to set the trace level is to append the setting to the database URL, for example: jdbc:h2:~/test;TRACE_LEVEL_FILE=2
or jdbc:h2:tcp://localhost/~/test;TRACE_LEVEL_FILE=2
. As an example, execute the the following script using the H2 Console:
-
-@performance_1452_p
-# After running the test case, convert the .trace.db
file using the ConvertTraceFile
tool. The trace file is located in the same directory as the database file.
-
-@performance_1453_p
-# The generated file test.sql
will contain the SQL statements as well as the following profiling data (results vary):
-
-@performance_1454_h2
-#Statement Execution Plans
-
-@performance_1455_p
-# The SQL statement EXPLAIN
displays the indexes and optimizations the database uses for a statement. The following statements support EXPLAIN
: SELECT, UPDATE, DELETE, MERGE, INSERT
. The following query shows that the database uses the primary key index to search for rows:
-
-@performance_1456_p
-# For joins, the tables in the execution plan are sorted in the order they are processed. The following query shows the database first processes the table INVOICE
(using the primary key). For each row, it will additionally check that the value of the column AMOUNT
is larger than zero, and for those rows the database will search in the table CUSTOMER
(using the primary key). The query plan contains some redundancy so it is a valid statement.
-
-@performance_1457_h3
-#Displaying the Scan Count
-
-@performance_1458_code
-#EXPLAIN ANALYZE
-
-@performance_1459_p
-# additionally shows the scanned rows per table and pages read from disk per table or index. This will actually execute the query, unlike EXPLAIN
which only prepares it. The following query scanned 1000 rows, and to do that had to read 85 pages from the data area of the table. Running the query twice will not list the pages read from disk, because they are now in the cache. The tableScan
means this query doesn't use an index.
-
-@performance_1460_p
-# The cache will prevent the pages are read twice. H2 reads all columns of the row unless only the columns in the index are read. Except for large CLOB and BLOB, which are not store in the table.
-
-@performance_1461_h3
-#Special Optimizations
-
-@performance_1462_p
-# For certain queries, the database doesn't need to read all rows, or doesn't need to sort the result even if ORDER BY
is used.
-
-@performance_1463_p
-# For queries of the form SELECT COUNT(*), MIN(ID), MAX(ID) FROM TEST
, the query plan includes the line /* direct lookup */
if the data can be read from an index.
-
-@performance_1464_p
-# For queries of the form SELECT DISTINCT CUSTOMER_ID FROM INVOICE
, the query plan includes the line /* distinct */
if there is an non-unique or multi-column index on this column, and if this column has a low selectivity.
-
-@performance_1465_p
-# For queries of the form SELECT * FROM TEST ORDER BY ID
, the query plan includes the line /* index sorted */
to indicate there is no separate sorting required.
-
-@performance_1466_p
-# For queries of the form SELECT * FROM TEST GROUP BY ID ORDER BY ID
, the query plan includes the line /* group sorted */
to indicate there is no separate sorting required.
-
-@performance_1467_h2
-#How Data is Stored and How Indexes Work
-
-@performance_1468_p
-# Internally, each row in a table is identified by a unique number, the row id. The rows of a table are stored with the row id as the key. The row id is a number of type long. If a table has a single column primary key of type INT
or BIGINT
, then the value of this column is the row id, otherwise the database generates the row id automatically. There is a (non-standard) way to access the row id: using the _ROWID_
pseudo-column:
-
-@performance_1469_p
-# The data is stored in the database as follows:
-
-@performance_1470_th
-#_ROWID_
-
-@performance_1471_th
-#FIRST_NAME
-
-@performance_1472_th
-#NAME
-
-@performance_1473_th
-#CITY
-
-@performance_1474_th
-#PHONE
-
-@performance_1475_td
-#1
-
-@performance_1476_td
-#John
-
-@performance_1477_td
-#Miller
-
-@performance_1478_td
-#Berne
-
-@performance_1479_td
-#123 456 789
-
-@performance_1480_td
-#2
-
-@performance_1481_td
-#Philip
-
-@performance_1482_td
-#Jones
-
-@performance_1483_td
-#Berne
-
-@performance_1484_td
-#123 012 345
-
-@performance_1485_p
-# Access by row id is fast because the data is sorted by this key. Please note the row id is not available until after the row was added (that means, it can not be used in computed columns or constraints). If the query condition does not contain the row id (and if no other index can be used), then all rows of the table are scanned. A table scan iterates over all rows in the table, in the order of the row id. To find out what strategy the database uses to retrieve the data, use EXPLAIN SELECT
:
-
-@performance_1486_h3
-#Indexes
-
-@performance_1487_p
-# An index internally is basically just a table that contains the indexed column(s), plus the row id:
-
-@performance_1488_p
-# In the index, the data is sorted by the indexed columns. So this index contains the following data:
-
-@performance_1489_th
-#CITY
-
-@performance_1490_th
-#NAME
-
-@performance_1491_th
-#FIRST_NAME
-
-@performance_1492_th
-#_ROWID_
-
-@performance_1493_td
-#Berne
-
-@performance_1494_td
-#Jones
-
-@performance_1495_td
-#Philip
-
-@performance_1496_td
-#2
-
-@performance_1497_td
-#Berne
-
-@performance_1498_td
-#Miller
-
-@performance_1499_td
-#John
-
-@performance_1500_td
-#1
-
-@performance_1501_p
-# When the database uses an index to query the data, it searches the index for the given data, and (if required) reads the remaining columns in the main data table (retrieved using the row id). An index on city, name, and first name (multi-column index) allows to quickly search for rows when the city, name, and first name are known. If only the city and name, or only the city is known, then this index is also used (so creating an additional index on just the city is not needed). This index is also used when reading all rows, sorted by the indexed columns. However, if only the first name is known, then this index is not used:
-
-@performance_1502_p
-# If your application often queries the table for a phone number, then it makes sense to create an additional index on it:
-
-@performance_1503_p
-# This index contains the phone number, and the row id:
-
-@performance_1504_th
-#PHONE
-
-@performance_1505_th
-#_ROWID_
-
-@performance_1506_td
-#123 012 345
-
-@performance_1507_td
-#2
-
-@performance_1508_td
-#123 456 789
-
-@performance_1509_td
-#1
-
-@performance_1510_h3
-#Using Multiple Indexes
-
-@performance_1511_p
-# Within a query, only one index per logical table is used. Using the condition PHONE = '123 567 789' OR CITY = 'Berne'
would use a table scan instead of first using the index on the phone number and then the index on the city. It makes sense to write two queries and combine then using UNION
. In this case, each individual query uses a different index:
-
-@performance_1512_h2
-#Fast Database Import
-
-@performance_1513_p
-# To speed up large imports, consider using the following options temporarily:
-
-@performance_1514_code
-#SET LOG 0
-
-@performance_1515_li
-# (disabling the transaction log)
-
-@performance_1516_code
-#SET CACHE_SIZE
-
-@performance_1517_li
-# (a large cache is faster)
-
-@performance_1518_code
-#SET LOCK_MODE 0
-
-@performance_1519_li
-# (disable locking)
-
-@performance_1520_code
-#SET UNDO_LOG 0
-
-@performance_1521_li
-# (disable the session undo log)
-
-@performance_1522_p
-# These options can be set in the database URL: jdbc:h2:~/test;LOG=0;CACHE_SIZE=65536;LOCK_MODE=0;UNDO_LOG=0
. Most of those options are not recommended for regular use, that means you need to reset them after use.
-
-@performance_1523_p
-# If you have to import a lot of rows, use a PreparedStatement or use CSV import. Please note that CREATE TABLE(...) ... AS SELECT ...
is faster than CREATE TABLE(...); INSERT INTO ... SELECT ...
.
-
-@quickstart_1000_h1
-クイックスタート
-
-@quickstart_1001_a
-# Embedding H2 in an Application
-
-@quickstart_1002_a
-# The H2 Console Application
-
-@quickstart_1003_h2
-アプリケーション�?�エンベッドH2
-
-@quickstart_1004_p
-# This database can be used in embedded mode, or in server mode. To use it in embedded mode, you need to:
-
-@quickstart_1005_li
-#Add the h2*.jar
to the classpath (H2 does not have any dependencies)
-
-@quickstart_1006_li
-#Use the JDBC driver class: org.h2.Driver
-
-@quickstart_1007_li
-#The database URL jdbc:h2:~/test
opens the database test
in your user home directory
-
-@quickstart_1008_li
-#A new database is automatically created
-
-@quickstart_1009_h2
-H2 コンソール アプリケーション
-
-@quickstart_1010_p
-# The Console lets you access a SQL database using a browser interface.
-
-@quickstart_1011_p
-# If you don't have Windows XP, or if something does not work as expected, please see the detailed description in the Tutorial.
-
-@quickstart_1012_h3
-手順
-
-@quickstart_1013_h4
-インストール
-
-@quickstart_1014_p
-# Install the software using the Windows Installer (if you did not yet do that).
-
-@quickstart_1015_h4
-コンソールを起動�?�る
-
-@quickstart_1016_p
-# Click [Start], [All Programs], [H2], and [H2 Console (Command Line)]:
-
-@quickstart_1017_p
-# A new console window appears:
-
-@quickstart_1018_p
-# Also, a new browser page should open with the URL http://localhost:8082. You may get a security warning from the firewall. If you don't want other computers in the network to access the database on your machine, you can let the firewall block these connections. Only local connections are required at this time.
-
-@quickstart_1019_h4
-ログイン
-
-@quickstart_1020_p
-# Select [Generic H2] and click [Connect]:
-
-@quickstart_1021_p
-# You are now logged in.
-
-@quickstart_1022_h4
-サンプル
-
-@quickstart_1023_p
-# Click on the [Sample SQL Script]:
-
-@quickstart_1024_p
-# The SQL commands appear in the command area.
-
-@quickstart_1025_h4
-実行�?�る
-
-@quickstart_1026_p
-# Click [Run]
-
-@quickstart_1027_p
-# On the left side, a new entry TEST is added below the database icon. The operations and results of the statements are shown below the script.
-
-@quickstart_1028_h4
-切断
-
-@quickstart_1029_p
-# Click on [Disconnect]:
-
-@quickstart_1030_p
-# to close the connection.
-
-@quickstart_1031_h4
-終了
-
-@quickstart_1032_p
-# Close the console window. For more information, see the Tutorial.
-
-@roadmap_1000_h1
-ロードマップ
-
-@roadmap_1001_p
-# New (feature) requests will usually be added at the very end of the list. The priority is increased for important and popular requests. Of course, patches are always welcome, but are not always applied as is. See also Providing Patches.
-
-@roadmap_1002_h2
-#Version 1.5.x: Planned Changes
-
-@roadmap_1003_li
-#Replace file password hash with file encryption key; validate encryption key when connecting.
-
-@roadmap_1004_li
-#Remove "set binary collation" feature.
-
-@roadmap_1005_li
-#Remove the encryption algorithm XTEA.
-
-@roadmap_1006_li
-#Disallow referencing other tables in a table (via constraints for example).
-
-@roadmap_1007_li
-#Remove PageStore features like compress_lob.
-
-@roadmap_1008_h2
-#Version 1.4.x: Planned Changes
-
-@roadmap_1009_li
-#Change license to MPL 2.0.
-
-@roadmap_1010_li
-#Automatic migration from 1.3 databases to 1.4.
-
-@roadmap_1011_li
-#Option to disable the file name suffix somehow (issue 447).
-
-@roadmap_1012_h2
-#Priority 1
-
-@roadmap_1013_li
-#Bugfixes.
-
-@roadmap_1014_li
-#More tests with MULTI_THREADED=1 (and MULTI_THREADED with MVCC): Online backup (using the 'backup' statement).
-
-@roadmap_1015_li
-#Server side cursors.
-
-@roadmap_1016_h2
-#Priority 2
-
-@roadmap_1017_li
-#Support hints for the optimizer (which index to use, enforce the join order).
-
-@roadmap_1018_li
-#Full outer joins.
-
-@roadmap_1019_li
-#Access rights: remember the owner of an object. Create, alter and drop privileges. COMMENT: allow owner of object to change it. Issue 208: Access rights for schemas.
-
-@roadmap_1020_li
-#Test multi-threaded in-memory db access.
-
-@roadmap_1021_li
-#MySQL, MS SQL Server compatibility: support case sensitive (mixed case) identifiers without quotes.
-
-@roadmap_1022_li
-#Support GRANT SELECT, UPDATE ON [schemaName.] *.
-
-@roadmap_1023_li
-#Migrate database tool (also from other database engines). For Oracle, maybe use DBMS_METADATA.GET_DDL / GET_DEPENDENT_DDL.
-
-@roadmap_1024_li
-#Clustering: support mixed clustering mode (one embedded, others in server mode).
-
-@roadmap_1025_li
-#Clustering: reads should be randomly distributed (optional) or to a designated database on RAM (parameter: READ_FROM=3).
-
-@roadmap_1026_li
-#Window functions: RANK() and DENSE_RANK(), partition using OVER(). select *, count(*) over() as fullCount from ... limit 4;
-
-@roadmap_1027_li
-#PostgreSQL catalog: use BEFORE SELECT triggers instead of views over metadata tables.
-
-@roadmap_1028_li
-#Compatibility: automatically load functions from a script depending on the mode - see FunctionsMySQL.java, issue 211.
-
-@roadmap_1029_li
-#Test very large databases and LOBs (up to 256 GB).
-
-@roadmap_1030_li
-#Store all temp files in the temp directory.
-
-@roadmap_1031_li
-#Don't use temp files, specially not deleteOnExit (bug 4513817: File.deleteOnExit consumes memory). Also to allow opening client / server (remote) connections when using LOBs.
-
-@roadmap_1032_li
-#Make DDL (Data Definition) operations transactional.
-
-@roadmap_1033_li
-#Deferred integrity checking (DEFERRABLE INITIALLY DEFERRED).
-
-@roadmap_1034_li
-#Groovy Stored Procedures: http://groovy.codehaus.org/GSQL
-
-@roadmap_1035_li
-#Add a migration guide (list differences between databases).
-
-@roadmap_1036_li
-#Optimization: automatic index creation suggestion using the trace file?
-
-@roadmap_1037_li
-#Fulltext search Lucene: analyzer configuration, mergeFactor.
-
-@roadmap_1038_li
-#Compression performance: don't allocate buffers, compress / expand in to out buffer.
-
-@roadmap_1039_li
-#Rebuild index functionality to shrink index size and improve performance.
-
-@roadmap_1040_li
-#Console: add accesskey to most important commands (A, AREA, BUTTON, INPUT, LABEL, LEGEND, TEXTAREA).
-
-@roadmap_1041_li
-#Test performance again with SQL Server, Oracle, DB2.
-
-@roadmap_1042_li
-#Test with Spatial DB in a box / JTS: http://www.opengeospatial.org/standards/sfs - OpenGIS Implementation Specification.
-
-@roadmap_1043_li
-#Write more tests and documentation for MVCC (Multi Version Concurrency Control).
-
-@roadmap_1044_li
-#Find a tool to view large text file (larger than 100 MB), with find, page up and down (like less), truncate before / after.
-
-@roadmap_1045_li
-#Implement, test, document XAConnection and so on.
-
-@roadmap_1046_li
-#Pluggable data type (for streaming, hashing, compression, validation, conversion, encryption).
-
-@roadmap_1047_li
-#CHECK: find out what makes CHECK=TRUE slow, move to CHECK2.
-
-@roadmap_1048_li
-#Drop with invalidate views (so that source code is not lost). Check what other databases do exactly.
-
-@roadmap_1049_li
-#Index usage for (ID, NAME)=(1, 'Hi'); document.
-
-@roadmap_1050_li
-#Set a connection read only (Connection.setReadOnly) or using a connection parameter.
-
-@roadmap_1051_li
-#Access rights: finer grained access control (grant access for specific functions).
-
-@roadmap_1052_li
-#ROW_NUMBER() OVER([PARTITION BY columnName][ORDER BY columnName]).
-
-@roadmap_1053_li
-#Version check: docs / web console (using Javascript), and maybe in the library (using TCP/IP).
-
-@roadmap_1054_li
-#Web server classloader: override findResource / getResourceFrom.
-
-@roadmap_1055_li
-#Cost for embedded temporary view is calculated wrong, if result is constant.
-
-@roadmap_1056_li
-#Count index range query (count(*) where id between 10 and 20).
-
-@roadmap_1057_li
-#Performance: update in-place.
-
-@roadmap_1058_li
-#Clustering: when a database is back alive, automatically synchronize with the master (requires readable transaction log).
-
-@roadmap_1059_li
-#Database file name suffix: a way to use no or a different suffix (for example using a slash).
-
-@roadmap_1060_li
-#Eclipse plugin.
-
-@roadmap_1061_li
-#Asynchronous queries to support publish/subscribe: SELECT ... FOR READ WAIT [maxMillisToWait]. See also MS SQL Server "Query Notification".
-
-@roadmap_1062_li
-#Fulltext search (native): reader / tokenizer / filter.
-
-@roadmap_1063_li
-#Linked schema using CSV files: one schema for a directory of files; support indexes for CSV files.
-
-@roadmap_1064_li
-#iReport to support H2.
-
-@roadmap_1065_li
-#Include SMTP (mail) client (alert on cluster failure, low disk space,...).
-
-@roadmap_1066_li
-#Option for SCRIPT to only process one or a set of schemas or tables, and append to a file.
-
-@roadmap_1067_li
-#JSON parser and functions.
-
-@roadmap_1068_li
-#Copy database: tool with config GUI and batch mode, extensible (example: compare).
-
-@roadmap_1069_li
-#Document, implement tool for long running transactions using user-defined compensation statements.
-
-@roadmap_1070_li
-#Support SET TABLE DUAL READONLY.
-
-@roadmap_1071_li
-#GCJ: what is the state now?
-
-@roadmap_1072_li
-#Events for: database Startup, Connections, Login attempts, Disconnections, Prepare (after parsing), Web Server. See http://docs.openlinksw.com/virtuoso/fn_dbev_startup.html
-
-@roadmap_1073_li
-#Optimization: simpler log compression.
-
-@roadmap_1074_li
-#Support standard INFORMATION_SCHEMA tables, as defined in http://www.contrib.andrew.cmu.edu/~shadow/sql/sql1992.txt - specially KEY_COLUMN_USAGE: http://dev.mysql.com/doc/refman/5.0/en/information-schema.html, http://www.xcdsql.org/Misc/INFORMATION_SCHEMA%20With%20Rolenames.gif
-
-@roadmap_1075_li
-#Compatibility: in MySQL, HSQLDB, /0.0 is NULL; in PostgreSQL, Derby: division by zero. HSQLDB: 0.0e1 / 0.0e1 is NaN.
-
-@roadmap_1076_li
-#Functional tables should accept parameters from other tables (see FunctionMultiReturn) SELECT * FROM TEST T, P2C(T.A, T.R).
-
-@roadmap_1077_li
-#Custom class loader to reload functions on demand.
-
-@roadmap_1078_li
-#Test http://mysql-je.sourceforge.net/
-
-@roadmap_1079_li
-#H2 Console: the webclient could support more features like phpMyAdmin.
-
-@roadmap_1080_li
-#Support Oracle functions: TO_DATE, TO_NUMBER.
-
-@roadmap_1081_li
-#Work on the Java to C converter.
-
-@roadmap_1082_li
-#The HELP information schema can be directly exposed in the Console.
-
-@roadmap_1083_li
-#Maybe use the 0x1234 notation for binary fields, see MS SQL Server.
-
-@roadmap_1084_li
-#Support Oracle CONNECT BY in some way: http://www.adp-gmbh.ch/ora/sql/connect_by.html http://philip.greenspun.com/sql/trees.html
-
-@roadmap_1085_li
-#SQL Server 2005, Oracle: support COUNT(*) OVER(). See http://www.orafusion.com/art_anlytc.htm
-
-@roadmap_1086_li
-#SQL 2003: http://www.wiscorp.com/sql_2003_standard.zip
-
-@roadmap_1087_li
-#Version column (number/sequence and timestamp based).
-
-@roadmap_1088_li
-#Optimize getGeneratedKey: send last identity after each execute (server).
-
-@roadmap_1089_li
-#Test and document UPDATE TEST SET (ID, NAME) = (SELECT ID*10, NAME || '!' FROM TEST T WHERE T.ID=TEST.ID).
-
-@roadmap_1090_li
-#Max memory rows / max undo log size: use block count / row size not row count.
-
-@roadmap_1091_li
-#Implement point-in-time recovery.
-
-@roadmap_1092_li
-#Support PL/SQL (programming language / control flow statements).
-
-@roadmap_1093_li
-#LIKE: improved version for larger texts (currently using naive search).
-
-@roadmap_1094_li
-#Throw an exception when the application calls getInt on a Long (optional).
-
-@roadmap_1095_li
-#Default date format for input and output (local date constants).
-
-@roadmap_1096_li
-#Document ROWNUM usage for reports: SELECT ROWNUM, * FROM (subquery).
-
-@roadmap_1097_li
-#File system that writes to two file systems (replication, replicating file system).
-
-@roadmap_1098_li
-#Standalone tool to get relevant system properties and add it to the trace output.
-
-@roadmap_1099_li
-#Support 'call proc(1=value)' (PostgreSQL, Oracle).
-
-@roadmap_1100_li
-#Console: improve editing data (Tab, Shift-Tab, Enter, Up, Down, Shift+Del?).
-
-@roadmap_1101_li
-#Console: autocomplete Ctrl+Space inserts template.
-
-@roadmap_1102_li
-#Option to encrypt .trace.db file.
-
-@roadmap_1103_li
-#Auto-Update feature for database, .jar file.
-
-@roadmap_1104_li
-#ResultSet SimpleResultSet.readFromURL(String url): id varchar, state varchar, released timestamp.
-
-@roadmap_1105_li
-#Partial indexing (see PostgreSQL).
-
-@roadmap_1106_li
-#Add GUI to build a custom version (embedded, fulltext,...) using build flags.
-
-@roadmap_1107_li
-#http://rubyforge.org/projects/hypersonic/
-
-@roadmap_1108_li
-#Add a sample application that runs the H2 unit test and writes the result to a file (so it can be included in the user app).
-
-@roadmap_1109_li
-#Table order: ALTER TABLE TEST ORDER BY NAME DESC (MySQL compatibility).
-
-@roadmap_1110_li
-#Backup tool should work with other databases as well.
-
-@roadmap_1111_li
-#Console: -ifExists doesn't work for the console. Add a flag to disable other dbs.
-
-@roadmap_1112_li
-#Check if 'FSUTIL behavior set disablelastaccess 1' improves the performance (fsutil behavior query disablelastaccess).
-
-@roadmap_1113_li
-#Java static code analysis: http://pmd.sourceforge.net/
-
-@roadmap_1114_li
-#Java static code analysis: http://www.eclipse.org/tptp/
-
-@roadmap_1115_li
-#Compatibility for CREATE SCHEMA AUTHORIZATION.
-
-@roadmap_1116_li
-#Implement Clob / Blob truncate and the remaining functionality.
-
-@roadmap_1117_li
-#Add multiple columns at the same time with ALTER TABLE .. ADD .. ADD ...
-
-@roadmap_1118_li
-#File locking: writing a system property to detect concurrent access from the same VM (different classloaders).
-
-@roadmap_1119_li
-#Pure SQL triggers (example: update parent table if the child table is changed).
-
-@roadmap_1120_li
-#Add H2 to Gem (Ruby install system).
-
-@roadmap_1121_li
-#Support linked JCR tables.
-
-@roadmap_1122_li
-#Native fulltext search: min word length; store word positions.
-
-@roadmap_1123_li
-#Add an option to the SCRIPT command to generate only portable / standard SQL.
-
-@roadmap_1124_li
-#Updatable views: create 'instead of' triggers automatically if possible (simple cases first).
-
-@roadmap_1125_li
-#Improve create index performance.
-
-@roadmap_1126_li
-#Compact databases without having to close the database (vacuum).
-
-@roadmap_1127_li
-#Implement more JDBC 4.0 features.
-
-@roadmap_1128_li
-#Support TRANSFORM / PIVOT as in MS Access.
-
-@roadmap_1129_li
-#SELECT * FROM (VALUES (...), (...), ....) AS alias(f1, ...).
-
-@roadmap_1130_li
-#Support updatable views with join on primary keys (to extend a table).
-
-@roadmap_1131_li
-#Public interface for functions (not public static).
-
-@roadmap_1132_li
-#Support reading the transaction log.
-
-@roadmap_1133_li
-#Feature matrix as in i-net software.
-
-@roadmap_1134_li
-#Updatable result set on table without primary key or unique index.
-
-@roadmap_1135_li
-#Compatibility with Derby and PostgreSQL: VALUES(1), (2); SELECT * FROM (VALUES (1), (2)) AS myTable(c1). Issue 221.
-
-@roadmap_1136_li
-#Allow execution time prepare for SELECT * FROM CSVREAD(?, 'columnNameString')
-
-@roadmap_1137_li
-#Support data type INTERVAL
-
-@roadmap_1138_li
-#Support nested transactions (possibly using savepoints internally).
-
-@roadmap_1139_li
-#Add a benchmark for bigger databases, and one for many users.
-
-@roadmap_1140_li
-#Compression in the result set over TCP/IP.
-
-@roadmap_1141_li
-#Support curtimestamp (like curtime, curdate).
-
-@roadmap_1142_li
-#Support ANALYZE {TABLE|INDEX} tableName COMPUTE|ESTIMATE|DELETE STATISTICS ptnOption options.
-
-@roadmap_1143_li
-#Release locks (shared or exclusive) on demand
-
-@roadmap_1144_li
-#Support OUTER UNION
-
-@roadmap_1145_li
-#Support parameterized views (similar to CSVREAD, but using just SQL for the definition)
-
-@roadmap_1146_li
-#A way (JDBC driver) to map an URL (jdbc:h2map:c1) to a connection object
-
-@roadmap_1147_li
-#Support dynamic linked schema (automatically adding/updating/removing tables)
-
-@roadmap_1148_li
-#Clustering: adding a node should be very fast and without interrupting clients (very short lock)
-
-@roadmap_1149_li
-#Compatibility: # is the start of a single line comment (MySQL) but date quote (Access). Mode specific
-
-@roadmap_1150_li
-#Run benchmarks with Android, Java 7, java -server
-
-@roadmap_1151_li
-#Optimizations: faster hash function for strings.
-
-@roadmap_1152_li
-#DatabaseEventListener: callback for all operations (including expected time, RUNSCRIPT) and cancel functionality
-
-@roadmap_1153_li
-#Benchmark: add a graph to show how databases scale (performance/database size)
-
-@roadmap_1154_li
-#Implement a SQLData interface to map your data over to a custom object
-
-@roadmap_1155_li
-#In the MySQL and PostgreSQL mode, use lower case identifiers by default (DatabaseMetaData.storesLowerCaseIdentifiers = true)
-
-@roadmap_1156_li
-#Support multiple directories (on different hard drives) for the same database
-
-@roadmap_1157_li
-#Server protocol: use challenge response authentication, but client sends hash(user+password) encrypted with response
-
-@roadmap_1158_li
-#Support EXEC[UTE] (doesn't return a result set, compatible to MS SQL Server)
-
-@roadmap_1159_li
-#Support native XML data type - see http://en.wikipedia.org/wiki/SQL/XML
-
-@roadmap_1160_li
-#Support triggers with a string property or option: SpringTrigger, OSGITrigger
-
-@roadmap_1161_li
-#MySQL compatibility: update test1 t1, test2 t2 set t1.id = t2.id where t1.id = t2.id;
-
-@roadmap_1162_li
-#Ability to resize the cache array when resizing the cache
-
-@roadmap_1163_li
-#Time based cache writing (one second after writing the log)
-
-@roadmap_1164_li
-#Check state of H2 driver for DDLUtils: http://issues.apache.org/jira/browse/DDLUTILS-185
-
-@roadmap_1165_li
-#Index usage for REGEXP LIKE.
-
-@roadmap_1166_li
-#Compatibility: add a role DBA (like ADMIN).
-
-@roadmap_1167_li
-#Better support multiple processors for in-memory databases.
-
-@roadmap_1168_li
-#Support N'text'
-
-@roadmap_1169_li
-#Support compatibility for jdbc:hsqldb:res:
-
-@roadmap_1170_li
-#HSQLDB compatibility: automatically convert to the next 'higher' data type. Example: cast(2000000000 as int) + cast(2000000000 as int); (HSQLDB: long; PostgreSQL: integer out of range)
-
-@roadmap_1171_li
-#Provide an Java SQL builder with standard and H2 syntax
-
-@roadmap_1172_li
-#Trace: write OS, file system, JVM,... when opening the database
-
-@roadmap_1173_li
-#Support indexes for views (probably requires materialized views)
-
-@roadmap_1174_li
-#Document SET SEARCH_PATH, BEGIN, EXECUTE, parameters
-
-@roadmap_1175_li
-#Server: use one listener (detect if the request comes from an PG or TCP client)
-
-@roadmap_1176_li
-#Optimize SELECT MIN(ID), MAX(ID), COUNT(*) FROM TEST WHERE ID BETWEEN 100 AND 200
-
-@roadmap_1177_li
-#Sequence: PostgreSQL compatibility (rename, create) http://www.postgresql.org/docs/8.2/static/sql-altersequence.html
-
-@roadmap_1178_li
-#DISTINCT: support large result sets by sorting on all columns (additionally) and then removing duplicates.
-
-@roadmap_1179_li
-#Support a special trigger on all tables to allow building a transaction log reader.
-
-@roadmap_1180_li
-#File system with a background writer thread; test if this is faster
-
-@roadmap_1181_li
-#Better document the source code (high level documentation).
-
-@roadmap_1182_li
-#Support select * from dual a left join dual b on b.x=(select max(x) from dual)
-
-@roadmap_1183_li
-#Optimization: don't lock when the database is read-only
-
-@roadmap_1184_li
-#Issue 146: Support merge join.
-
-@roadmap_1185_li
-#Integrate spatial functions from http://geosysin.iict.ch/irstv-trac/wiki/H2spatial/Download
-
-@roadmap_1186_li
-#Cluster: hot deploy (adding a node at runtime).
-
-@roadmap_1187_li
-#Support DatabaseMetaData.insertsAreDetected: updatable result sets should detect inserts.
-
-@roadmap_1188_li
-#Oracle: support DECODE method (convert to CASE WHEN).
-
-@roadmap_1189_li
-#Native search: support "phrase search", wildcard search (* and ?), case-insensitive search, boolean operators, and grouping
-
-@roadmap_1190_li
-#Improve documentation of access rights.
-
-@roadmap_1191_li
-#Support opening a database that is in the classpath, maybe using a new file system. Workaround: detect jar file using getClass().getProtectionDomain().getCodeSource().getLocation().
-
-@roadmap_1192_li
-#Support ENUM data type (see MySQL, PostgreSQL, MS SQL Server, maybe others).
-
-@roadmap_1193_li
-#Remember the user defined data type (domain) of a column.
-
-@roadmap_1194_li
-#MVCC: support multi-threaded kernel with multi-version concurrency.
-
-@roadmap_1195_li
-#Auto-server: add option to define the port range or list.
-
-@roadmap_1196_li
-#Support Jackcess (MS Access databases)
-
-@roadmap_1197_li
-#Built-in methods to write large objects (BLOB and CLOB): FILE_WRITE('test.txt', 'Hello World')
-
-@roadmap_1198_li
-#Improve time to open large databases (see mail 'init time for distributed setup')
-
-@roadmap_1199_li
-#Move Maven 2 repository from hsql.sf.net to h2database.sf.net
-
-@roadmap_1200_li
-#Java 1.5 tool: JdbcUtils.closeSilently(s1, s2,...)
-
-@roadmap_1201_li
-#Optimize A=? OR B=? to UNION if the cost is lower.
-
-@roadmap_1202_li
-#Javadoc: document design patterns used
-
-@roadmap_1203_li
-#Support custom collators, for example for natural sort (for text that contains numbers).
-
-@roadmap_1204_li
-#Write an article about SQLInjection (h2/src/docsrc/html/images/SQLInjection.txt)
-
-@roadmap_1205_li
-#Convert SQL-injection-2.txt to html document, include SQLInjection.java sample
-
-@roadmap_1206_li
-#Support OUT parameters in user-defined procedures.
-
-@roadmap_1207_li
-#Web site design: http://www.igniterealtime.org/projects/openfire/index.jsp
-
-@roadmap_1208_li
-#HSQLDB compatibility: Openfire server uses: CREATE SCHEMA PUBLIC AUTHORIZATION DBA; CREATE USER SA PASSWORD ""; GRANT DBA TO SA; SET SCHEMA PUBLIC
-
-@roadmap_1209_li
-#Translation: use ?? in help.csv
-
-@roadmap_1210_li
-#Translated .pdf
-
-@roadmap_1211_li
-#Recovery tool: bad blocks should be converted to INSERT INTO SYSTEM_ERRORS(...), and things should go into the .trace.db file
-
-@roadmap_1212_li
-#Issue 357: support getGeneratedKeys to return multiple rows when used with batch updates. This is supported by MySQL, but not Derby. Both PostgreSQL and HSQLDB don't support getGeneratedKeys. Also support it when using INSERT ... SELECT.
-
-@roadmap_1213_li
-#RECOVER=2 to backup the database, run recovery, open the database
-
-@roadmap_1214_li
-#Recovery should work with encrypted databases
-
-@roadmap_1215_li
-#Corruption: new error code, add help
-
-@roadmap_1216_li
-#Space reuse: after init, scan all storages and free those that don't belong to a live database object
-
-@roadmap_1217_li
-#Access rights: add missing features (users should be 'owner' of objects; missing rights for sequences; dropping objects)
-
-@roadmap_1218_li
-#Support NOCACHE table option (Oracle).
-
-@roadmap_1219_li
-#Support table partitioning.
-
-@roadmap_1220_li
-#Add regular javadocs (using the default doclet, but another css) to the homepage.
-
-@roadmap_1221_li
-#The database should be kept open for a longer time when using the server mode.
-
-@roadmap_1222_li
-#Javadocs: for each tool, add a copy & paste sample in the class level.
-
-@roadmap_1223_li
-#Javadocs: add @author tags.
-
-@roadmap_1224_li
-#Fluent API for tools: Server.createTcpServer().setPort(9081).setPassword(password).start();
-
-@roadmap_1225_li
-#MySQL compatibility: real SQL statement for DESCRIBE TEST
-
-@roadmap_1226_li
-#Use a default delay of 1 second before closing a database.
-
-@roadmap_1227_li
-#Write (log) to system table before adding to internal data structures.
-
-@roadmap_1228_li
-#Support direct lookup for MIN and MAX when using WHERE (see todo.txt / Direct Lookup).
-
-@roadmap_1229_li
-#Support other array types (String[], double[]) in PreparedStatement.setObject(int, Object) (with test case).
-
-@roadmap_1230_li
-#MVCC should not be memory bound (uncommitted data is kept in memory in the delta index; maybe using a regular b-tree index solves the problem).
-
-@roadmap_1231_li
-#Oracle compatibility: support NLS_DATE_FORMAT.
-
-@roadmap_1232_li
-#Support for Thread.interrupt to cancel running statements.
-
-@roadmap_1233_li
-#Cluster: add feature to make sure cluster nodes can not get out of sync (for example by stopping one process).
-
-@roadmap_1234_li
-#H2 Console: support CLOB/BLOB download using a link.
-
-@roadmap_1235_li
-#Support flashback queries as in Oracle.
-
-@roadmap_1236_li
-#Import / Export of fixed with text files.
-
-@roadmap_1237_li
-#HSQLDB compatibility: automatic data type for SUM if value is the value is too big (by default use the same type as the data).
-
-@roadmap_1238_li
-#Improve the optimizer to select the right index for special cases: where id between 2 and 4 and booleanColumn
-
-@roadmap_1239_li
-#Linked tables: make hidden columns available (Oracle: rowid and ora_rowscn columns).
-
-@roadmap_1240_li
-#H2 Console: in-place autocomplete.
-
-@roadmap_1241_li
-#Support large databases: split database files to multiple directories / disks (similar to tablespaces).
-
-@roadmap_1242_li
-#H2 Console: support configuration option for fixed width (monospace) font.
-
-@roadmap_1243_li
-#Native fulltext search: support analyzers (specially for Chinese, Japanese).
-
-@roadmap_1244_li
-#Automatically compact databases from time to time (as a background process).
-
-@roadmap_1245_li
-#Test Eclipse DTP.
-
-@roadmap_1246_li
-#H2 Console: autocomplete: keep the previous setting
-
-@roadmap_1247_li
-#executeBatch: option to stop at the first failed statement.
-
-@roadmap_1248_li
-#Implement OLAP features as described here: http://www.devx.com/getHelpOn/10MinuteSolution/16573/0/page/5
-
-@roadmap_1249_li
-#Support Oracle ROWID (unique identifier for each row).
-
-@roadmap_1250_li
-#MySQL compatibility: alter table add index i(c), add constraint c foreign key(c) references t(c);
-
-@roadmap_1251_li
-#Server mode: improve performance for batch updates.
-
-@roadmap_1252_li
-#Applets: support read-only databases in a zip file (accessed as a resource).
-
-@roadmap_1253_li
-#Long running queries / errors / trace system table.
-
-@roadmap_1254_li
-#H2 Console should support JaQu directly.
-
-@roadmap_1255_li
-#Better document FTL_SEARCH, FTL_SEARCH_DATA.
-
-@roadmap_1256_li
-#Sequences: CURRVAL should be session specific. Compatibility with PostgreSQL.
-
-@roadmap_1257_li
-#Index creation using deterministic functions.
-
-@roadmap_1258_li
-#ANALYZE: for unique indexes that allow null, count the number of null.
-
-@roadmap_1259_li
-#MySQL compatibility: multi-table delete: DELETE .. FROM .. [,...] USING - See http://dev.mysql.com/doc/refman/5.0/en/delete.html
-
-@roadmap_1260_li
-#AUTO_SERVER: support changing IP addresses (disable a network while the database is open).
-
-@roadmap_1261_li
-#Avoid using java.util.Calendar internally because it's slow, complicated, and buggy.
-
-@roadmap_1262_li
-#Support TRUNCATE .. CASCADE like PostgreSQL.
-
-@roadmap_1263_li
-#Fulltext search: lazy result generation using SimpleRowSource.
-
-@roadmap_1264_li
-#Fulltext search: support alternative syntax: WHERE FTL_CONTAINS(name, 'hello').
-
-@roadmap_1265_li
-#MySQL compatibility: support REPLACE, see http://dev.mysql.com/doc/refman/6.0/en/replace.html and issue 73.
-
-@roadmap_1266_li
-#MySQL compatibility: support INSERT INTO table SET column1 = value1, column2 = value2
-
-@roadmap_1267_li
-#Docs: add a one line description for each functions and SQL statements at the top (in the link section).
-
-@roadmap_1268_li
-#Javadoc search: weight for titles should be higher ('random' should list Functions as the best match).
-
-@roadmap_1269_li
-#Replace information_schema tables with regular tables that are automatically re-built when needed. Use indexes.
-
-@roadmap_1270_li
-#Issue 50: Oracle compatibility: support calling 0-parameters functions without parenthesis. Make constants obsolete.
-
-@roadmap_1271_li
-#MySQL, HSQLDB compatibility: support where 'a'=1 (not supported by Derby, PostgreSQL)
-
-@roadmap_1272_li
-#Support a data type "timestamp with timezone" using java.util.Calendar.
-
-@roadmap_1273_li
-#Finer granularity for SLF4J trace - See http://code.google.com/p/h2database/issues/detail?id=62
-
-@roadmap_1274_li
-#Add database creation date and time to the database.
-
-@roadmap_1275_li
-#Support ASSERTION.
-
-@roadmap_1276_li
-#MySQL compatibility: support comparing 1='a'
-
-@roadmap_1277_li
-#Support PostgreSQL lock modes: http://www.postgresql.org/docs/8.3/static/explicit-locking.html
-
-@roadmap_1278_li
-#PostgreSQL compatibility: test DbVisualizer and Squirrel SQL using a new PostgreSQL JDBC driver.
-
-@roadmap_1279_li
-#RunScript should be able to read from system in (or quite mode for Shell).
-
-@roadmap_1280_li
-#Natural join: support select x from dual natural join dual.
-
-@roadmap_1281_li
-#Support using system properties in database URLs (may be a security problem).
-
-@roadmap_1282_li
-#Natural join: somehow support this: select a.x, b.x, x from dual a natural join dual b
-
-@roadmap_1283_li
-#Use the Java service provider mechanism to register file systems and function libraries.
-
-@roadmap_1284_li
-#MySQL compatibility: for auto_increment columns, convert 0 to next value (as when inserting NULL).
-
-@roadmap_1285_li
-#Optimization for multi-column IN: use an index if possible. Example: (A, B) IN((1, 2), (2, 3)).
-
-@roadmap_1286_li
-#Optimization for EXISTS: convert to inner join or IN(..) if possible.
-
-@roadmap_1287_li
-#Functions: support hashcode(value); cryptographic and fast
-
-@roadmap_1288_li
-#Serialized file lock: support long running queries.
-
-@roadmap_1289_li
-#Network: use 127.0.0.1 if other addresses don't work.
-
-@roadmap_1290_li
-#Pluggable network protocol (currently Socket/ServerSocket over TCP/IP) - see also TransportServer with master slave replication.
-
-@roadmap_1291_li
-#Support reading JCR data: one table per node type; query table; cache option
-
-@roadmap_1292_li
-#OSGi: create a sample application, test, document.
-
-@roadmap_1293_li
-#help.csv: use complete examples for functions; run as test case.
-
-@roadmap_1294_li
-#Functions to calculate the memory and disk space usage of a table, a row, or a value.
-
-@roadmap_1295_li
-#Re-implement PooledConnection; use a lightweight connection object.
-
-@roadmap_1296_li
-#Doclet: convert tests in javadocs to a java class.
-
-@roadmap_1297_li
-#Doclet: format fields like methods, but support sorting by name and value.
-
-@roadmap_1298_li
-#Doclet: shrink the html files.
-
-@roadmap_1299_li
-#MySQL compatibility: support SET NAMES 'latin1' - See also http://code.google.com/p/h2database/issues/detail?id=56
-
-@roadmap_1300_li
-#Allow to scan index backwards starting with a value (to better support ORDER BY DESC).
-
-@roadmap_1301_li
-#Java Service Wrapper: try http://yajsw.sourceforge.net/
-
-@roadmap_1302_li
-#Batch parameter for INSERT, UPDATE, and DELETE, and commit after each batch. See also MySQL DELETE.
-
-@roadmap_1303_li
-#MySQL compatibility: support ALTER TABLE .. MODIFY COLUMN.
-
-@roadmap_1304_li
-#Use a lazy and auto-close input stream (open resource when reading, close on eof).
-
-@roadmap_1305_li
-#Connection pool: 'reset session' command (delete temp tables, rollback, auto-commit true).
-
-@roadmap_1306_li
-#Improve SQL documentation, see http://www.w3schools.com/sql/
-
-@roadmap_1307_li
-#MySQL compatibility: DatabaseMetaData.stores*() methods should return the same values. Test with SquirrelSQL.
-
-@roadmap_1308_li
-#MS SQL Server compatibility: support DATEPART syntax.
-
-@roadmap_1309_li
-#Sybase/DB2/Oracle compatibility: support out parameters in stored procedures - See http://code.google.com/p/h2database/issues/detail?id=83
-
-@roadmap_1310_li
-#Support INTERVAL data type (see Oracle and others).
-
-@roadmap_1311_li
-#Combine Server and Console tool (only keep Server).
-
-@roadmap_1312_li
-#Store the Lucene index in the database itself.
-
-@roadmap_1313_li
-#Support standard MERGE statement: http://en.wikipedia.org/wiki/Merge_%28SQL%29
-
-@roadmap_1314_li
-#Oracle compatibility: support DECODE(x, ...).
-
-@roadmap_1315_li
-#MVCC: compare concurrent update behavior with PostgreSQL and Oracle.
-
-@roadmap_1316_li
-#HSQLDB compatibility: CREATE FUNCTION (maybe using a Function interface).
-
-@roadmap_1317_li
-#HSQLDB compatibility: support CALL "java.lang.Math.sqrt"(2.0)
-
-@roadmap_1318_li
-#Support comma as the decimal separator in the CSV tool.
-
-@roadmap_1319_li
-#Compatibility: Java functions with SQLJ Part1 http://www.acm.org/sigmod/record/issues/9912/standards.pdf.gz
-
-@roadmap_1320_li
-#Compatibility: Java functions with SQL/PSM (Persistent Stored Modules) - need to find the documentation.
-
-@roadmap_1321_li
-#CACHE_SIZE: automatically use a fraction of Runtime.maxMemory - maybe automatically the second level cache.
-
-@roadmap_1322_li
-#Support date/time/timestamp as documented in http://en.wikipedia.org/wiki/ISO_8601
-
-@roadmap_1323_li
-#PostgreSQL compatibility: when in PG mode, treat BYTEA data like PG.
-
-@roadmap_1324_li
-#Support =ANY(array) as in PostgreSQL. See also http://www.postgresql.org/docs/8.0/interactive/arrays.html
-
-@roadmap_1325_li
-#IBM DB2 compatibility: support PREVIOUS VALUE FOR sequence.
-
-@roadmap_1326_li
-#Compatibility: use different LIKE ESCAPE characters depending on the mode (disable for Derby, HSQLDB, DB2, Oracle, MSSQLServer).
-
-@roadmap_1327_li
-#Oracle compatibility: support CREATE SYNONYM table FOR schema.table.
-
-@roadmap_1328_li
-#FTP: document the server, including -ftpTask option to execute / kill remote processes
-
-@roadmap_1329_li
-#FTP: problems with multithreading?
-
-@roadmap_1330_li
-#FTP: implement SFTP / FTPS
-
-@roadmap_1331_li
-#FTP: access to a database (.csv for a table, a directory for a schema, a file for a lob, a script.sql file).
-
-@roadmap_1332_li
-#More secure default configuration if remote access is enabled.
-
-@roadmap_1333_li
-#Improve database file locking (maybe use native file locking). The current approach seems to be problematic if the file system is on a remote share (see Google Group 'Lock file modification time is in the future').
-
-@roadmap_1334_li
-#Document internal features such as BELONGS_TO_TABLE, NULL_TO_DEFAULT, SEQUENCE.
-
-@roadmap_1335_li
-#Issue 107: Prefer using the ORDER BY index if LIMIT is used.
-
-@roadmap_1336_li
-#An index on (id, name) should be used for a query: select * from t where s=? order by i
-
-@roadmap_1337_li
-#Support reading sequences using DatabaseMetaData.getTables(null, null, null, new String[]{"SEQUENCE"}). See PostgreSQL.
-
-@roadmap_1338_li
-#Add option to enable TCP_NODELAY using Socket.setTcpNoDelay(true).
-
-@roadmap_1339_li
-#Maybe disallow = within database names (jdbc:h2:mem:MODE=DB2 means database name MODE=DB2).
-
-@roadmap_1340_li
-#Fast alter table add column.
-
-@roadmap_1341_li
-#Improve concurrency for in-memory database operations.
-
-@roadmap_1342_li
-#Issue 122: Support for connection aliases for remote tcp connections.
-
-@roadmap_1343_li
-#Fast scrambling (strong encryption doesn't help if the password is included in the application).
-
-@roadmap_1344_li
-#H2 Console: support -webPassword to require a password to access preferences or shutdown.
-
-@roadmap_1345_li
-#Issue 126: The index name should be "IDX_" plus the constraint name unless there is a conflict, in which case append a number.
-
-@roadmap_1346_li
-#Issue 127: Support activation/deactivation of triggers
-
-@roadmap_1347_li
-#Issue 130: Custom log event listeners
-
-@roadmap_1348_li
-#Issue 131: IBM DB2 compatibility: sysibm.sysdummy1
-
-@roadmap_1349_li
-#Issue 132: Use Java enum trigger type.
-
-@roadmap_1350_li
-#Issue 134: IBM DB2 compatibility: session global variables.
-
-@roadmap_1351_li
-#Cluster: support load balance with values for each server / auto detect.
-
-@roadmap_1352_li
-#FTL_SET_OPTION(keyString, valueString) with key stopWords at first.
-
-@roadmap_1353_li
-#Pluggable access control mechanism.
-
-@roadmap_1354_li
-#Fulltext search (Lucene): support streaming CLOB data.
-
-@roadmap_1355_li
-#Document/example how to create and read an encrypted script file.
-
-@roadmap_1356_li
-#Check state of http://issues.apache.org/jira/browse/OPENJPA-1367 (H2 does support cross joins).
-
-@roadmap_1357_li
-#Fulltext search (Lucene): only prefix column names with _ if they already start with _. Instead of DATA / QUERY / modified use _DATA, _QUERY, _MODIFIED if possible.
-
-@roadmap_1358_li
-#Support a way to create or read compressed encrypted script files using an API.
-
-@roadmap_1359_li
-#Scripting language support (Javascript).
-
-@roadmap_1360_li
-#The network client should better detect if the server is not an H2 server and fail early.
-
-@roadmap_1361_li
-#H2 Console: support CLOB/BLOB upload.
-
-@roadmap_1362_li
-#Database file lock: detect hibernate / standby / very slow threads (compare system time).
-
-@roadmap_1363_li
-#Automatic detection of redundant indexes.
-
-@roadmap_1364_li
-#Maybe reject join without "on" (except natural join).
-
-@roadmap_1365_li
-#Implement GiST (Generalized Search Tree for Secondary Storage).
-
-@roadmap_1366_li
-#Function to read a number of bytes/characters from an BLOB or CLOB.
-
-@roadmap_1367_li
-#Issue 156: Support SELECT ? UNION SELECT ?.
-
-@roadmap_1368_li
-#Automatic mixed mode: support a port range list (to avoid firewall problems).
-
-@roadmap_1369_li
-#Support the pseudo column rowid, oid, _rowid_.
-
-@roadmap_1370_li
-#H2 Console / large result sets: stream early instead of keeping a whole result in-memory
-
-@roadmap_1371_li
-#Support TRUNCATE for linked tables.
-
-@roadmap_1372_li
-#UNION: evaluate INTERSECT before UNION (like most other database except Oracle).
-
-@roadmap_1373_li
-#Delay creating the information schema, and share metadata columns.
-
-@roadmap_1374_li
-#TCP Server: use a nonce (number used once) to protect unencrypted channels against replay attacks.
-
-@roadmap_1375_li
-#Simplify running scripts and recovery: CREATE FORCE USER (overwrites an existing user).
-
-@roadmap_1376_li
-#Support CREATE DATABASE LINK (a custom JDBC driver is already supported).
-
-@roadmap_1377_li
-#Support large GROUP BY operations. Issue 216.
-
-@roadmap_1378_li
-#Issue 163: Allow to create foreign keys on metadata types.
-
-@roadmap_1379_li
-#Logback: write a native DBAppender.
-
-@roadmap_1380_li
-#Cache size: don't use more cache than what is available.
-
-@roadmap_1381_li
-#Allow to defragment at runtime (similar to SHUTDOWN DEFRAG) in a background thread.
-
-@roadmap_1382_li
-#Tree index: Instead of an AVL tree, use a general balanced trees or a scapegoat tree.
-
-@roadmap_1383_li
-#User defined functions: allow to store the bytecode (of just the class, or the jar file of the extension) in the database.
-
-@roadmap_1384_li
-#Compatibility: ResultSet.getObject() on a CLOB (TEXT) should return String for PostgreSQL and MySQL.
-
-@roadmap_1385_li
-#Optimizer: WHERE X=? AND Y IN(?), it always uses the index on Y. Should be cost based.
-
-@roadmap_1386_li
-#Common Table Expression (CTE) / recursive queries: support parameters. Issue 314.
-
-@roadmap_1387_li
-#Oracle compatibility: support INSERT ALL.
-
-@roadmap_1388_li
-#Issue 178: Optimizer: index usage when both ascending and descending indexes are available.
-
-@roadmap_1389_li
-#Issue 179: Related subqueries in HAVING clause.
-
-@roadmap_1390_li
-#IBM DB2 compatibility: NOT NULL WITH DEFAULT. Similar to MySQL Mode.convertInsertNullToZero.
-
-@roadmap_1391_li
-#Creating primary key: always create a constraint.
-
-@roadmap_1392_li
-#Maybe use a different page layout: keep the data at the head of the page, and ignore the tail (don't store / read it). This may increase write / read performance depending on the file system.
-
-@roadmap_1393_li
-#Indexes of temporary tables are currently kept in-memory. Is this how it should be?
-
-@roadmap_1394_li
-#The Shell tool should support the same built-in commands as the H2 Console.
-
-@roadmap_1395_li
-#Maybe use PhantomReference instead of finalize.
-
-@roadmap_1396_li
-#Database file name suffix: should only have one dot by default. Example: .h2db
-
-@roadmap_1397_li
-#Issue 196: Function based indexes
-
-@roadmap_1398_li
-#ALTER TABLE ... ADD COLUMN IF NOT EXISTS columnName.
-
-@roadmap_1399_li
-#Fix the disk space leak (killing the process at the exact right moment will increase the disk space usage; this space is not re-used). See TestDiskSpaceLeak.java
-
-@roadmap_1400_li
-#ROWNUM: Oracle compatibility when used within a subquery. Issue 198.
-
-@roadmap_1401_li
-#Allow to access the database over HTTP (possibly using port 80) and a servlet in a REST way.
-
-@roadmap_1402_li
-#ODBC: encrypted databases are not supported because the ;CIPHER= can not be set.
-
-@roadmap_1403_li
-#Support CLOB and BLOB update, specially conn.createBlob().setBinaryStream(1);
-
-@roadmap_1404_li
-#Optimizer: index usage when both ascending and descending indexes are available. Issue 178.
-
-@roadmap_1405_li
-#Issue 306: Support schema specific domains.
-
-@roadmap_1406_li
-#Triggers: support user defined execution order. Oracle: CREATE OR REPLACE TRIGGER TEST_2 BEFORE INSERT ON TEST FOR EACH ROW FOLLOWS TEST_1. SQL specifies that multiple triggers should be fired in time-of-creation order. PostgreSQL uses name order, which was judged to be more convenient. Derby: triggers are fired in the order in which they were created.
-
-@roadmap_1407_li
-#PostgreSQL compatibility: combine "users" and "roles". See: http://www.postgresql.org/docs/8.1/interactive/user-manag.html
-
-@roadmap_1408_li
-#Improve documentation of system properties: only list the property names, default values, and description.
-
-@roadmap_1409_li
-#Support running totals / cumulative sum using SUM(..) OVER(..).
-
-@roadmap_1410_li
-#Improve object memory size calculation. Use constants for known VMs, or use reflection to call java.lang.instrument.Instrumentation.getObjectSize(Object objectToSize)
-
-@roadmap_1411_li
-#Triggers: NOT NULL checks should be done after running triggers (Oracle behavior, maybe others).
-
-@roadmap_1412_li
-#Common Table Expression (CTE) / recursive queries: support INSERT INTO ... SELECT ... Issue 219.
-
-@roadmap_1413_li
-#Common Table Expression (CTE) / recursive queries: support non-recursive queries. Issue 217.
-
-@roadmap_1414_li
-#Common Table Expression (CTE) / recursive queries: avoid endless loop. Issue 218.
-
-@roadmap_1415_li
-#Common Table Expression (CTE) / recursive queries: support multiple named queries. Issue 220.
-
-@roadmap_1416_li
-#Common Table Expression (CTE) / recursive queries: identifier scope may be incorrect. Issue 222.
-
-@roadmap_1417_li
-#Log long running transactions (similar to long running statements).
-
-@roadmap_1418_li
-#Parameter data type is data type of other operand. Issue 205.
-
-@roadmap_1419_li
-#Some combinations of nested join with right outer join are not supported.
-
-@roadmap_1420_li
-#DatabaseEventListener.openConnection(id) and closeConnection(id).
-
-@roadmap_1421_li
-#Listener or authentication module for new connections, or a way to restrict the number of different connections to a tcp server, or to prevent to login with the same username and password from different IPs. Possibly using the DatabaseEventListener API, or a new API.
-
-@roadmap_1422_li
-#Compatibility for data type CHAR (Derby, HSQLDB). Issue 212.
-
-@roadmap_1423_li
-#Compatibility with MySQL TIMESTAMPDIFF. Issue 209.
-
-@roadmap_1424_li
-#Optimizer: use a histogram of the data, specially for non-normal distributions.
-
-@roadmap_1425_li
-#Trigger: allow declaring as source code (like functions).
-
-@roadmap_1426_li
-#User defined aggregate: allow declaring as source code (like functions).
-
-@roadmap_1427_li
-#The error "table not found" is sometimes caused by using the wrong database. Add "(this database is empty)" to the exception message if applicable.
-
-@roadmap_1428_li
-#MySQL + PostgreSQL compatibility: support string literal escape with \n.
-
-@roadmap_1429_li
-#PostgreSQL compatibility: support string literal escape with double \\.
-
-@roadmap_1430_li
-#Document the TCP server "management_db". Maybe include the IP address of the client.
-
-@roadmap_1431_li
-#Use javax.tools.JavaCompilerTool instead of com.sun.tools.javac.Main
-
-@roadmap_1432_li
-#If a database object was not found in the current schema, but one with the same name existed in another schema, included that in the error message.
-
-@roadmap_1433_li
-#Optimization to use an index for OR when using multiple keys: where (key1 = ? and key2 = ?) OR (key1 = ? and key2 = ?)
-
-@roadmap_1434_li
-#Issue 302: Support optimizing queries with both inner and outer joins, as in: select * from test a inner join test b on a.id=b.id inner join o on o.id=a.id where b.x=1 (the optimizer should swap a and b here). See also TestNestedJoins, tag "swapInnerJoinTables".
-
-@roadmap_1435_li
-#JaQu should support a DataSource and a way to create a Db object using a Connection (for multi-threaded usage with a connection pool).
-
-@roadmap_1436_li
-#Move table to a different schema (rename table to a different schema), possibly using ALTER TABLE ... SET SCHEMA ...;
-
-@roadmap_1437_li
-#nioMapped file system: automatically fall back to regular (non mapped) IO if there is a problem (out of memory exception for example).
-
-@roadmap_1438_li
-#Column as parameter of function table. Issue 228.
-
-@roadmap_1439_li
-#Connection pool: detect ;AUTOCOMMIT=FALSE in the database URL, and if set, disable autocommit for all connections.
-
-@roadmap_1440_li
-#Compatibility with MS Access: support "&" to concatenate text.
-
-@roadmap_1441_li
-#The BACKUP statement should not synchronize on the database, and therefore should not block other users.
-
-@roadmap_1442_li
-#Document the database file format.
-
-@roadmap_1443_li
-#Support reading LOBs.
-
-@roadmap_1444_li
-#Require appending DANGEROUS=TRUE when using certain dangerous settings such as LOG=0, LOG=1, LOCK_MODE=0, disabling FILE_LOCK,...
-
-@roadmap_1445_li
-#Support UDT (user defined types) similar to how Apache Derby supports it: check constraint, allow to use it in Java functions as parameters (return values already seem to work).
-
-@roadmap_1446_li
-#Encrypted file system (use cipher text stealing so file length doesn't need to decrypt; 4 KB header per file, optional compatibility with current encrypted database files).
-
-@roadmap_1447_li
-#Issue 229: SELECT with simple OR tests uses tableScan when it could use indexes.
-
-@roadmap_1448_li
-#GROUP BY queries should use a temporary table if there are too many rows.
-
-@roadmap_1449_li
-#BLOB: support random access when reading.
-
-@roadmap_1450_li
-#CLOB: support random access when reading (this is harder than for BLOB as data is stored in UTF-8 form).
-
-@roadmap_1451_li
-#Compatibility: support SELECT INTO (as an alias for CREATE TABLE ... AS SELECT ...).
-
-@roadmap_1452_li
-#Compatibility with MySQL: support SELECT INTO OUTFILE (cannot be an existing file) as an alias for CSVWRITE(...).
-
-@roadmap_1453_li
-#Compatibility with MySQL: support non-strict mode (sql_mode = "") any data that is too large for the column will just be truncated or set to the default value.
-
-@roadmap_1454_li
-#The full condition should be sent to the linked table, not just the indexed condition. Example: TestLinkedTableFullCondition
-
-@roadmap_1455_li
-#Compatibility with IBM DB2: CREATE PROCEDURE.
-
-@roadmap_1456_li
-#Compatibility with IBM DB2: SQL cursors.
-
-@roadmap_1457_li
-#Single-column primary key values are always stored explicitly. This is not required.
-
-@roadmap_1458_li
-#Compatibility with MySQL: support CREATE TABLE TEST(NAME VARCHAR(255) CHARACTER SET UTF8).
-
-@roadmap_1459_li
-#CALL is incompatible with other databases because it returns a result set, so that CallableStatement.execute() returns true.
-
-@roadmap_1460_li
-#Optimization for large lists for column IN(1, 2, 3, 4,...) - currently an list is used, could potentially use a hash set (maybe only for a part of the values - the ones that can be evaluated).
-
-@roadmap_1461_li
-#Compatibility for ARRAY data type (Oracle: VARRAY(n) of VARCHAR(m); HSQLDB: VARCHAR(n) ARRAY; Postgres: VARCHAR(n)[]).
-
-@roadmap_1462_li
-#PostgreSQL compatible array literal syntax: ARRAY[['a', 'b'], ['c', 'd']]
-
-@roadmap_1463_li
-#PostgreSQL compatibility: UPDATE with FROM.
-
-@roadmap_1464_li
-#Issue 297: Oracle compatibility for "at time zone".
-
-@roadmap_1465_li
-#IBM DB2 compatibility: IDENTITY_VAL_LOCAL().
-
-@roadmap_1466_li
-#Support SQL/XML.
-
-@roadmap_1467_li
-#Support concurrent opening of databases.
-
-@roadmap_1468_li
-#Improved error message and diagnostics in case of network configuration problems.
-
-@roadmap_1469_li
-#TRUNCATE should reset the identity columns as in MySQL and MS SQL Server (and possibly other databases).
-
-@roadmap_1470_li
-#Adding a primary key should make the columns 'not null' unless if there is a row with null (compatibility with MySQL, PostgreSQL, HSQLDB; not Derby).
-
-@roadmap_1471_li
-#ARRAY data type: support Integer[] and so on in Java functions (currently only Object[] is supported).
-
-@roadmap_1472_li
-#MySQL compatibility: LOCK TABLES a READ, b READ - see also http://dev.mysql.com/doc/refman/5.0/en/lock-tables.html
-
-@roadmap_1473_li
-#The HTML to PDF converter should use http://code.google.com/p/wkhtmltopdf/
-
-@roadmap_1474_li
-#Issue 303: automatically convert "X NOT IN(SELECT...)" to "NOT EXISTS(...)".
-
-@roadmap_1475_li
-#MySQL compatibility: update test1 t1, test2 t2 set t1.name=t2.name where t1.id=t2.id.
-
-@roadmap_1476_li
-#Issue 283: Improve performance of H2 on Android.
-
-@roadmap_1477_li
-#Support INSERT INTO / UPDATE / MERGE ... RETURNING to retrieve the generated key(s).
-
-@roadmap_1478_li
-#Column compression option - see http://groups.google.com/group/h2-database/browse_thread/thread/3e223504e52671fa/243da82244343f5d
-
-@roadmap_1479_li
-#PostgreSQL compatibility: ALTER TABLE ADD combined with adding a foreign key constraint, as in ALTER TABLE FOO ADD COLUMN PARENT BIGINT REFERENCES FOO(ID).
-
-@roadmap_1480_li
-#MS SQL Server compatibility: support @@ROWCOUNT.
-
-@roadmap_1481_li
-#PostgreSQL compatibility: LOG(x) is LOG10(x) and not LN(x).
-
-@roadmap_1482_li
-#Issue 311: Serialized lock mode: executeQuery of write operations fails.
-
-@roadmap_1483_li
-#PostgreSQL compatibility: support PgAdmin III (specially the function current_setting).
-
-@roadmap_1484_li
-#MySQL compatibility: support TIMESTAMPADD.
-
-@roadmap_1485_li
-#Support SELECT ... FOR UPDATE with joins (supported by PostgreSQL, MySQL, and HSQLDB; but not Derby).
-
-@roadmap_1486_li
-#Support SELECT ... FOR UPDATE OF [field-list] (supported by PostgreSQL, MySQL, and HSQLDB; but not Derby).
-
-@roadmap_1487_li
-#Support SELECT ... FOR UPDATE OF [table-list] (supported by PostgreSQL, HSQLDB, Sybase).
-
-@roadmap_1488_li
-#TRANSACTION_ID() for in-memory databases.
-
-@roadmap_1489_li
-#TRANSACTION_ID() should be long (same as HSQLDB and PostgreSQL).
-
-@roadmap_1490_li
-#Support [INNER | OUTER] JOIN USING(column [,...]).
-
-@roadmap_1491_li
-#Support NATURAL [ { LEFT | RIGHT } [ OUTER ] | INNER ] JOIN (Derby, Oracle)
-
-@roadmap_1492_li
-#GROUP BY columnNumber (similar to ORDER BY columnNumber) (MySQL, PostgreSQL, SQLite; not by HSQLDB and Derby).
-
-@roadmap_1493_li
-#Sybase / MS SQL Server compatibility: CONVERT(..) parameters are swapped.
-
-@roadmap_1494_li
-#Index conditions: WHERE AGE>1 should not scan through all rows with AGE=1.
-
-@roadmap_1495_li
-#PHP support: H2 should support PDO, or test with PostgreSQL PDO.
-
-@roadmap_1496_li
-#Outer joins: if no column of the outer join table is referenced, the outer join table could be removed from the query.
-
-@roadmap_1497_li
-#Cluster: allow using auto-increment and identity columns by ensuring executed in lock-step.
-
-@roadmap_1498_li
-#MySQL compatibility: index names only need to be unique for the given table.
-
-@roadmap_1499_li
-#Issue 352: constraints: distinguish between 'no action' and 'restrict'. Currently, only restrict is supported, and 'no action' is internally mapped to 'restrict'. The database meta data returns 'restrict' in all cases.
-
-@roadmap_1500_li
-#Oracle compatibility: support MEDIAN aggregate function.
-
-@roadmap_1501_li
-#Issue 348: Oracle compatibility: division should return a decimal result.
-
-@roadmap_1502_li
-#Read rows on demand: instead of reading the whole row, only read up to that column that is requested. Keep an pointer to the data area and the column id that is already read.
-
-@roadmap_1503_li
-#Long running transactions: log session id when detected.
-
-@roadmap_1504_li
-#Optimization: "select id from test" should use the index on id even without "order by".
-
-@roadmap_1505_li
-#Issue 362: LIMIT support for UPDATE statements (MySQL compatibility).
-
-@roadmap_1506_li
-#Sybase SQL Anywhere compatibility: SELECT TOP ... START AT ...
-
-@roadmap_1507_li
-#Use Java 6 SQLException subclasses.
-
-@roadmap_1508_li
-#Issue 390: RUNSCRIPT FROM '...' CONTINUE_ON_ERROR
-
-@roadmap_1509_li
-#Use Java 6 exceptions: SQLDataException, SQLSyntaxErrorException, SQLTimeoutException,..
-
-@roadmap_1510_li
-#Support index-only when doing selects (i.e. without needing to load the actual table data)
-
-@roadmap_1511_h2
-#Not Planned
-
-@roadmap_1512_li
-#HSQLDB (did) support this: select id i from test where i<0 (other databases don't). Supporting it may break compatibility.
-
-@roadmap_1513_li
-#String.intern (so that Strings can be compared with ==) will not be used because some VMs have problems when used extensively.
-
-@roadmap_1514_li
-#In prepared statements, identifier names (table names and so on) can not be parameterized. Adding such a feature would complicate the source code without providing reasonable speedup, and would slow down regular prepared statements.
-
-@sourceError_1000_h1
-#Error Analyzer
-
-@sourceError_1001_a
-ホーム
-
-@sourceError_1002_a
-#Input
-
-@sourceError_1003_h2
-# Details Source Code
-
-@sourceError_1004_p
-#Paste the error message and stack trace below and click on 'Details' or 'Source Code':
-
-@sourceError_1005_b
-#Error Code:
-
-@sourceError_1006_b
-#Product Version:
-
-@sourceError_1007_b
-#Message:
-
-@sourceError_1008_b
-#More Information:
-
-@sourceError_1009_b
-#Stack Trace:
-
-@sourceError_1010_b
-#Source File:
-
-@sourceError_1011_p
-# Inline
-
-@tutorial_1000_h1
-�?ュートリアル
-
-@tutorial_1001_a
-# Starting and Using the H2 Console
-
-@tutorial_1002_a
-# Special H2 Console Syntax
-
-@tutorial_1003_a
-# Settings of the H2 Console
-
-@tutorial_1004_a
-# Connecting to a Database using JDBC
-
-@tutorial_1005_a
-# Creating New Databases
-
-@tutorial_1006_a
-# Using the Server
-
-@tutorial_1007_a
-# Using Hibernate
-
-@tutorial_1008_a
-# Using TopLink and Glassfish
-
-@tutorial_1009_a
-# Using EclipseLink
-
-@tutorial_1010_a
-# Using Apache ActiveMQ
-
-@tutorial_1011_a
-# Using H2 within NetBeans
-
-@tutorial_1012_a
-# Using H2 with jOOQ
-
-@tutorial_1013_a
-# Using Databases in Web Applications
-
-@tutorial_1014_a
-# Android
-
-@tutorial_1015_a
-# CSV (Comma Separated Values) Support
-
-@tutorial_1016_a
-# Upgrade, Backup, and Restore
-
-@tutorial_1017_a
-# Command Line Tools
-
-@tutorial_1018_a
-# The Shell Tool
-
-@tutorial_1019_a
-# Using OpenOffice Base
-
-@tutorial_1020_a
-# Java Web Start / JNLP
-
-@tutorial_1021_a
-# Using a Connection Pool
-
-@tutorial_1022_a
-# Fulltext Search
-
-@tutorial_1023_a
-# User-Defined Variables
-
-@tutorial_1024_a
-# Date and Time
-
-@tutorial_1025_a
-# Using Spring
-
-@tutorial_1026_a
-# OSGi
-
-@tutorial_1027_a
-# Java Management Extension (JMX)
-
-@tutorial_1028_h2
-起動�?�H2コンソール�?�使用
-
-@tutorial_1029_p
-# The H2 Console application lets you access a database using a browser. This can be a H2 database, or another database that supports the JDBC API.
-
-@tutorial_1030_p
-# This is a client/server application, so both a server and a client (a browser) are required to run it.
-
-@tutorial_1031_p
-# Depending on your platform and environment, there are multiple ways to start the H2 Console:
-
-@tutorial_1032_th
-OS
-
-@tutorial_1033_th
-起動
-
-@tutorial_1034_td
-Windows
-
-@tutorial_1035_td
-# Click [Start], [All Programs], [H2], and [H2 Console (Command Line)]
-
-@tutorial_1036_td
-# An icon will be added to the system tray:
-
-@tutorial_1037_td
-# If you don't get the window and the system tray icon, then maybe Java is not installed correctly (in this case, try another way to start the application). A browser window should open and point to the login page at http://localhost:8082
.
-
-@tutorial_1038_td
-Windows
-
-@tutorial_1039_td
-# Open a file browser, navigate to h2/bin
, and double click on h2.bat
.
-
-@tutorial_1040_td
-# A console window appears. If there is a problem, you will see an error message in this window. A browser window will open and point to the login page (URL: http://localhost:8082
).
-
-@tutorial_1041_td
-Any
-
-@tutorial_1042_td
-# Double click on the h2*.jar
file. This only works if the .jar
suffix is associated with Java.
-
-@tutorial_1043_td
-Any
-
-@tutorial_1044_td
-# Open a console window, navigate to the directory h2/bin
, and type:
-
-@tutorial_1045_h3
-ファイアウォール
-
-@tutorial_1046_p
-# If you start the server, you may get a security warning from the firewall (if you have installed one). If you don't want other computers in the network to access the application on your machine, you can let the firewall block those connections. The connection from the local machine will still work. Only if you want other computers to access the database on this computer, you need allow remote connections in the firewall.
-
-@tutorial_1047_p
-# It has been reported that when using Kaspersky 7.0 with firewall, the H2 Console is very slow when connecting over the IP address. A workaround is to connect using 'localhost'.
-
-@tutorial_1048_p
-# A small firewall is already built into the server: other computers may not connect to the server by default. To change this, go to 'Preferences' and select 'Allow connections from other computers'.
-
-@tutorial_1049_h3
-Javaをテスト�?�る
-
-@tutorial_1050_p
-# To find out which version of Java is installed, open a command prompt and type:
-
-@tutorial_1051_p
-# If you get an error message, you may need to add the Java binary directory to the path environment variable.
-
-@tutorial_1052_h3
-#Error Message 'Port may be in use'
-
-@tutorial_1053_p
-# You can only start one instance of the H2 Console, otherwise you will get the following error message: "The Web server could not be started. Possible cause: another server is already running...". It is possible to start multiple console applications on the same computer (using different ports), but this is usually not required as the console supports multiple concurrent connections.
-
-@tutorial_1054_h3
-他�?��?ートを使用�?�る
-
-@tutorial_1055_p
-# If the default port of the H2 Console is already in use by another application, then a different port needs to be configured. The settings are stored in a properties file. For details, see Settings of the H2 Console. The relevant entry is webPort
.
-
-@tutorial_1056_p
-# If no port is specified for the TCP and PG servers, each service will try to listen on its default port. If the default port is already in use, a random port is used.
-
-@tutorial_1057_h3
-ブラウザを使用�?��?�サー�?ー�?�接続
-
-@tutorial_1058_p
-# If the server started successfully, you can connect to it using a web browser. Javascript needs to be enabled. If you started the server on the same computer as the browser, open the URL http://localhost:8082
. If you want to connect to the application from another computer, you need to provide the IP address of the server, for example: http://192.168.0.2:8082
. If you enabled TLS on the server side, the URL needs to start with https://
.
-
-@tutorial_1059_h3
-複数�?��?�時セッション
-
-@tutorial_1060_p
-# Multiple concurrent browser sessions are supported. As that the database objects reside on the server, the amount of concurrent work is limited by the memory available to the server application.
-
-@tutorial_1061_h3
-ログイン
-
-@tutorial_1062_p
-# At the login page, you need to provide connection information to connect to a database. Set the JDBC driver class of your database, the JDBC URL, user name, and password. If you are done, click [Connect].
-
-@tutorial_1063_p
-# You can save and reuse previously saved settings. The settings are stored in a properties file (see Settings of the H2 Console).
-
-@tutorial_1064_h3
-エラーメッセージ
-
-@tutorial_1065_p
-# Error messages in are shown in red. You can show/hide the stack trace of the exception by clicking on the message.
-
-@tutorial_1066_h3
-データベースドライ�?�?�追加
-
-@tutorial_1067_p
-# To register additional JDBC drivers (MySQL, PostgreSQL, HSQLDB,...), add the jar file names to the environment variables H2DRIVERS
or CLASSPATH
. Example (Windows): to add the HSQLDB JDBC driver C:\Programs\hsqldb\lib\hsqldb.jar
, set the environment variable H2DRIVERS
to C:\Programs\hsqldb\lib\hsqldb.jar
.
-
-@tutorial_1068_p
-# Multiple drivers can be set; entries need to be separated by ;
(Windows) or :
(other operating systems). Spaces in the path names are supported. The settings must not be quoted.
-
-@tutorial_1069_h3
-#Using the H2 Console
-
-@tutorial_1070_p
-# The H2 Console application has three main panels: the toolbar on top, the tree on the left, and the query/result panel on the right. The database objects (for example, tables) are listed on the left. Type a SQL command in the query panel and click [Run]. The result appears just below the command.
-
-@tutorial_1071_h3
-テーブル�??�?�?��?��?�カラム�??をインサート�?�る
-
-@tutorial_1072_p
-# To insert table and column names into the script, click on the item in the tree. If you click on a table while the query is empty, then SELECT * FROM ...
is added. While typing a query, the table that was used is expanded in the tree. For example if you type SELECT * FROM TEST T WHERE T.
then the table TEST is expanded.
-
-@tutorial_1073_h3
-切断�?�アプリケーション�?�終了
-
-@tutorial_1074_p
-# To log out of the database, click [Disconnect] in the toolbar panel. However, the server is still running and ready to accept new sessions.
-
-@tutorial_1075_p
-# To stop the server, right click on the system tray icon and select [Exit]. If you don't have the system tray icon, navigate to [Preferences] and click [Shutdown], press [Ctrl]+[C] in the console where the server was started (Windows), or close the console window.
-
-@tutorial_1076_h2
-#Special H2 Console Syntax
-
-@tutorial_1077_p
-# The H2 Console supports a few built-in commands. Those are interpreted within the H2 Console, so they work with any database. Built-in commands need to be at the beginning of a statement (before any remarks), otherwise they are not parsed correctly. If in doubt, add ;
before the command.
-
-@tutorial_1078_th
-#Command(s)
-
-@tutorial_1079_th
-説明
-
-@tutorial_1080_td
-# @autocommit_true;
-
-@tutorial_1081_td
-# @autocommit_false;
-
-@tutorial_1082_td
-# Enable or disable autocommit.
-
-@tutorial_1083_td
-# @cancel;
-
-@tutorial_1084_td
-# Cancel the currently running statement.
-
-@tutorial_1085_td
-# @columns null null TEST;
-
-@tutorial_1086_td
-# @index_info null null TEST;
-
-@tutorial_1087_td
-# @tables;
-
-@tutorial_1088_td
-# @tables null null TEST;
-
-@tutorial_1089_td
-# Call the corresponding DatabaseMetaData.get
method. Patterns are case sensitive (usually identifiers are uppercase). For information about the parameters, see the Javadoc documentation. Missing parameters at the end of the line are set to null. The complete list of metadata commands is: @attributes, @best_row_identifier, @catalogs, @columns, @column_privileges, @cross_references, @exported_keys, @imported_keys, @index_info, @primary_keys, @procedures, @procedure_columns, @schemas, @super_tables, @super_types, @tables, @table_privileges, @table_types, @type_info, @udts, @version_columns
-
-@tutorial_1090_td
-# @edit select * from test;
-
-@tutorial_1091_td
-# Use an updatable result set.
-
-@tutorial_1092_td
-# @generated insert into test() values();
-
-@tutorial_1093_td
-# Show the result of Statement.getGeneratedKeys()
.
-
-@tutorial_1094_td
-# @history;
-
-@tutorial_1095_td
-# List the command history.
-
-@tutorial_1096_td
-# @info;
-
-@tutorial_1097_td
-# Display the result of various Connection
and DatabaseMetaData
methods.
-
-@tutorial_1098_td
-# @list select * from test;
-
-@tutorial_1099_td
-# Show the result set in list format (each column on its own line, with row numbers).
-
-@tutorial_1100_td
-# @loop 1000 select ?, ?/*rnd*/;
-
-@tutorial_1101_td
-# @loop 1000 @statement select ?;
-
-@tutorial_1102_td
-# Run the statement this many times. Parameters (?
) are set using a loop from 0 up to x - 1. Random values are used for each ?/*rnd*/
. A Statement object is used instead of a PreparedStatement if @statement
is used. Result sets are read until ResultSet.next()
returns false
. Timing information is printed.
-
-@tutorial_1103_td
-# @maxrows 20;
-
-@tutorial_1104_td
-# Set the maximum number of rows to display.
-
-@tutorial_1105_td
-# @memory;
-
-@tutorial_1106_td
-# Show the used and free memory. This will call System.gc()
.
-
-@tutorial_1107_td
-# @meta select 1;
-
-@tutorial_1108_td
-# List the ResultSetMetaData
after running the query.
-
-@tutorial_1109_td
-# @parameter_meta select ?;
-
-@tutorial_1110_td
-# Show the result of the PreparedStatement.getParameterMetaData()
calls. The statement is not executed.
-
-@tutorial_1111_td
-# @prof_start;
-
-@tutorial_1112_td
-# call hash('SHA256', '', 1000000);
-
-@tutorial_1113_td
-# @prof_stop;
-
-@tutorial_1114_td
-# Start/stop the built-in profiling tool. The top 3 stack traces of the statement(s) between start and stop are listed (if there are 3).
-
-@tutorial_1115_td
-# @prof_start;
-
-@tutorial_1116_td
-# @sleep 10;
-
-@tutorial_1117_td
-# @prof_stop;
-
-@tutorial_1118_td
-# Sleep for a number of seconds. Used to profile a long running query or operation that is running in another session (but in the same process).
-
-@tutorial_1119_td
-# @transaction_isolation;
-
-@tutorial_1120_td
-# @transaction_isolation 2;
-
-@tutorial_1121_td
-# Display (without parameters) or change (with parameters 1, 2, 4, 8) the transaction isolation level.
-
-@tutorial_1122_h2
-#Settings of the H2 Console
-
-@tutorial_1123_p
-# The settings of the H2 Console are stored in a configuration file called .h2.server.properties
in you user home directory. For Windows installations, the user home directory is usually C:\Documents and Settings\[username]
or C:\Users\[username]
. The configuration file contains the settings of the application and is automatically created when the H2 Console is first started. Supported settings are:
-
-@tutorial_1124_code
-#webAllowOthers
-
-@tutorial_1125_li
-#: allow other computers to connect.
-
-@tutorial_1126_code
-#webPort
-
-@tutorial_1127_li
-#: the port of the H2 Console
-
-@tutorial_1128_code
-#webSSL
-
-@tutorial_1129_li
-#: use encrypted TLS (HTTPS) connections.
-
-@tutorial_1130_p
-# In addition to those settings, the properties of the last recently used connection are listed in the form <number>=<name>|<driver>|<url>|<user>
using the escape character \
. Example: 1=Generic H2 (Embedded)|org.h2.Driver|jdbc\:h2\:~/test|sa
-
-@tutorial_1131_h2
-JDBCを使用�?��?�データベース�?�接続
-
-@tutorial_1132_p
-# To connect to a database, a Java application first needs to load the database driver, and then get a connection. A simple way to do that is using the following code:
-
-@tutorial_1133_p
-# This code first loads the driver (Class.forName(...)
) and then opens a connection (using DriverManager.getConnection()
). The driver name is "org.h2.Driver"
. The database URL always needs to start with jdbc:h2:
to be recognized by this database. The second parameter in the getConnection()
call is the user name (sa
for System Administrator in this example). The third parameter is the password. In this database, user names are not case sensitive, but passwords are.
-
-@tutorial_1134_h2
-新�?��?�データベースを作�?�?�る
-
-@tutorial_1135_p
-# By default, if the database specified in the URL does not yet exist, a new (empty) database is created automatically. The user that created the database automatically becomes the administrator of this database.
-
-@tutorial_1136_p
-# Auto-creating new database can be disabled, see Opening a Database Only if it Already Exists.
-
-@tutorial_1137_h2
-サー�?ーを使用�?�る
-
-@tutorial_1138_p
-# H2 currently supports three server: a web server (for the H2 Console), a TCP server (for client/server connections) and an PG server (for PostgreSQL clients). Please note that only the web server supports browser connections. The servers can be started in different ways, one is using the Server
tool. Starting the server doesn't open a database - databases are opened as soon as a client connects.
-
-@tutorial_1139_h3
-#Starting the Server Tool from Command Line
-
-@tutorial_1140_p
-# To start the Server
tool from the command line with the default settings, run:
-
-@tutorial_1141_p
-# This will start the tool with the default options. To get the list of options and default values, run:
-
-@tutorial_1142_p
-# There are options available to use other ports, and start or not start parts.
-
-@tutorial_1143_h3
-TCPサー�?ー�?�接続�?�る
-
-@tutorial_1144_p
-# To remotely connect to a database using the TCP server, use the following driver and database URL:
-
-@tutorial_1145_li
-#JDBC driver class: org.h2.Driver
-
-@tutorial_1146_li
-#Database URL: jdbc:h2:tcp://localhost/~/test
-
-@tutorial_1147_p
-# For details about the database URL, see also in Features. Please note that you can't connection with a web browser to this URL. You can only connect using a H2 client (over JDBC).
-
-@tutorial_1148_h3
-#Starting the TCP Server within an Application
-
-@tutorial_1149_p
-# Servers can also be started and stopped from within an application. Sample code:
-
-@tutorial_1150_h3
-他�?��?�程�?�らTCPサー�?ーを終了�?�る
-
-@tutorial_1151_p
-# The TCP server can be stopped from another process. To stop the server from the command line, run:
-
-@tutorial_1152_p
-# To stop the server from a user application, use the following code:
-
-@tutorial_1153_p
-# This function will only stop the TCP server. If other server were started in the same process, they will continue to run. To avoid recovery when the databases are opened the next time, all connections to the databases should be closed before calling this method. To stop a remote server, remote connections must be enabled on the server. Shutting down a TCP server can be protected using the option -tcpPassword
(the same password must be used to start and stop the TCP server).
-
-@tutorial_1154_h2
-Hibernateを使用�?�る
-
-@tutorial_1155_p
-# This database supports Hibernate version 3.1 and newer. You can use the HSQLDB Dialect, or the native H2 Dialect. Unfortunately the H2 Dialect included in some old versions of Hibernate was buggy. A patch for Hibernate has been submitted and is now applied. You can rename it to H2Dialect.java
and include this as a patch in your application, or upgrade to a version of Hibernate where this is fixed.
-
-@tutorial_1156_p
-# When using Hibernate, try to use the H2Dialect
if possible. When using the H2Dialect
, compatibility modes such as MODE=MySQL
are not supported. When using such a compatibility mode, use the Hibernate dialect for the corresponding database instead of the H2Dialect
; but please note H2 does not support all features of all databases.
-
-@tutorial_1157_h2
-#Using TopLink and Glassfish
-
-@tutorial_1158_p
-# To use H2 with Glassfish (or Sun AS), set the Datasource Classname to org.h2.jdbcx.JdbcDataSource
. You can set this in the GUI at Application Server - Resources - JDBC - Connection Pools, or by editing the file sun-resources.xml
: at element jdbc-connection-pool
, set the attribute datasource-classname
to org.h2.jdbcx.JdbcDataSource
.
-
-@tutorial_1159_p
-# The H2 database is compatible with HSQLDB and PostgreSQL. To take advantage of H2 specific features, use the H2Platform
. The source code of this platform is included in H2 at src/tools/oracle/toplink/essentials/platform/database/DatabasePlatform.java.txt
. You will need to copy this file to your application, and rename it to .java. To enable it, change the following setting in persistence.xml:
-
-@tutorial_1160_p
-# In old versions of Glassfish, the property name is toplink.platform.class.name
.
-
-@tutorial_1161_p
-# To use H2 within Glassfish, copy the h2*.jar to the directory glassfish/glassfish/lib
.
-
-@tutorial_1162_h2
-#Using EclipseLink
-
-@tutorial_1163_p
-# To use H2 in EclipseLink, use the platform class org.eclipse.persistence.platform.database.H2Platform
. If this platform is not available in your version of EclipseLink, you can use the OraclePlatform instead in many case. See also H2Platform.
-
-@tutorial_1164_h2
-#Using Apache ActiveMQ
-
-@tutorial_1165_p
-# When using H2 as the backend database for Apache ActiveMQ, please use the TransactDatabaseLocker
instead of the default locking mechanism. Otherwise the database file will grow without bounds. The problem is that the default locking mechanism uses an uncommitted UPDATE
transaction, which keeps the transaction log from shrinking (causes the database file to grow). Instead of using an UPDATE
statement, the TransactDatabaseLocker
uses SELECT ... FOR UPDATE
which is not problematic. To use it, change the ApacheMQ configuration element <jdbcPersistenceAdapter>
element, property databaseLocker="org.apache.activemq.store.jdbc.adapter.TransactDatabaseLocker"
. However, using the MVCC mode will again result in the same problem. Therefore, please do not use the MVCC mode in this case. Another (more dangerous) solution is to set useDatabaseLock
to false.
-
-@tutorial_1166_h2
-#Using H2 within NetBeans
-
-@tutorial_1167_p
-# The project H2 Database Engine Support For NetBeans allows you to start and stop the H2 server from within the IDE.
-
-@tutorial_1168_p
-# There is a known issue when using the Netbeans SQL Execution Window: before executing a query, another query in the form SELECT COUNT(*) FROM <query>
is run. This is a problem for queries that modify state, such as SELECT SEQ.NEXTVAL
. In this case, two sequence values are allocated instead of just one.
-
-@tutorial_1169_h2
-#Using H2 with jOOQ
-
-@tutorial_1170_p
-# jOOQ adds a thin layer on top of JDBC, allowing for type-safe SQL construction, including advanced SQL, stored procedures and advanced data types. jOOQ takes your database schema as a base for code generation. If this is your example schema:
-
-@tutorial_1171_p
-# then run the jOOQ code generator on the command line using this command:
-
-@tutorial_1172_p
-# ...where codegen.xml
is on the classpath and contains this information
-
-@tutorial_1173_p
-# Using the generated source, you can query the database as follows:
-
-@tutorial_1174_p
-# See more details on jOOQ Homepage and in the jOOQ Tutorial
-
-@tutorial_1175_h2
-Webアプリケーション�?� データベースを使用�?�る
-
-@tutorial_1176_p
-# There are multiple ways to access a database from within web applications. Here are some examples if you use Tomcat or JBoss.
-
-@tutorial_1177_h3
-エンベッドモード
-
-@tutorial_1178_p
-# The (currently) simplest solution is to use the database in the embedded mode, that means open a connection in your application when it starts (a good solution is using a Servlet Listener, see below), or when a session starts. A database can be accessed from multiple sessions and applications at the same time, as long as they run in the same process. Most Servlet Containers (for example Tomcat) are just using one process, so this is not a problem (unless you run Tomcat in clustered mode). Tomcat uses multiple threads and multiple classloaders. If multiple applications access the same database at the same time, you need to put the database jar in the shared/lib
or server/lib
directory. It is a good idea to open the database when the web application starts, and close it when the web application stops. If using multiple applications, only one (any) of them needs to do that. In the application, an idea is to use one connection per Session, or even one connection per request (action). Those connections should be closed after use if possible (but it's not that bad if they don't get closed).
-
-@tutorial_1179_h3
-サー�?ーモード
-
-@tutorial_1180_p
-# The server mode is similar, but it allows you to run the server in another process.
-
-@tutorial_1181_h3
-データベース�?�起動�?�終了�?�Servletリスナーを使用�?�る
-
-@tutorial_1182_p
-# Add the h2*.jar file to your web application, and add the following snippet to your web.xml file (between the context-param
and the filter
section):
-
-@tutorial_1183_p
-# For details on how to access the database, see the file DbStarter.java
. By default this tool opens an embedded connection using the database URL jdbc:h2:~/test
, user name sa
, and password sa
. If you want to use this connection within your servlet, you can access as follows:
-
-@tutorial_1184_code
-#DbStarter
-
-@tutorial_1185_p
-# can also start the TCP server, however this is disabled by default. To enable it, use the parameter db.tcpServer
in the file web.xml
. Here is the complete list of options. These options need to be placed between the description
tag and the listener
/ filter
tags:
-
-@tutorial_1186_p
-# When the web application is stopped, the database connection will be closed automatically. If the TCP server is started within the DbStarter
, it will also be stopped automatically.
-
-@tutorial_1187_h3
-#Using the H2 Console Servlet
-
-@tutorial_1188_p
-# The H2 Console is a standalone application and includes its own web server, but it can be used as a servlet as well. To do that, include the the h2*.jar
file in your application, and add the following configuration to your web.xml
:
-
-@tutorial_1189_p
-# For details, see also src/tools/WEB-INF/web.xml
.
-
-@tutorial_1190_p
-# To create a web application with just the H2 Console, run the following command:
-
-@tutorial_1191_h2
-#Android
-
-@tutorial_1192_p
-# You can use this database on an Android device (using the Dalvik VM) instead of or in addition to SQLite. So far, only very few tests and benchmarks were run, but it seems that performance is similar to SQLite, except for opening and closing a database, which is not yet optimized in H2 (H2 takes about 0.2 seconds, and SQLite about 0.02 seconds). Read operations seem to be a bit faster than SQLite, and write operations seem to be slower. So far, only very few tests have been run, and everything seems to work as expected. Fulltext search was not yet tested, however the native fulltext search should work.
-
-@tutorial_1193_p
-# Reasons to use H2 instead of SQLite are:
-
-@tutorial_1194_li
-#Full Unicode support including UPPER() and LOWER().
-
-@tutorial_1195_li
-#Streaming API for BLOB and CLOB data.
-
-@tutorial_1196_li
-#Fulltext search.
-
-@tutorial_1197_li
-#Multiple connections.
-
-@tutorial_1198_li
-#User defined functions and triggers.
-
-@tutorial_1199_li
-#Database file encryption.
-
-@tutorial_1200_li
-#Reading and writing CSV files (this feature can be used outside the database as well).
-
-@tutorial_1201_li
-#Referential integrity and check constraints.
-
-@tutorial_1202_li
-#Better data type and SQL support.
-
-@tutorial_1203_li
-#In-memory databases, read-only databases, linked tables.
-
-@tutorial_1204_li
-#Better compatibility with other databases which simplifies porting applications.
-
-@tutorial_1205_li
-#Possibly better performance (so far for read operations).
-
-@tutorial_1206_li
-#Server mode (accessing a database on a different machine over TCP/IP).
-
-@tutorial_1207_p
-# Currently only the JDBC API is supported (it is planned to support the Android database API in future releases). Both the regular H2 jar file and the smaller h2small-*.jar
can be used. To create the smaller jar file, run the command ./build.sh jarSmall
(Linux / Mac OS) or build.bat jarSmall
(Windows).
-
-@tutorial_1208_p
-# The database files needs to be stored in a place that is accessible for the application. Example:
-
-@tutorial_1209_p
-# Limitations: Using a connection pool is currently not supported, because the required javax.sql.
classes are not available on Android.
-
-@tutorial_1210_h2
-CSV (Comma Separated Values) サ�?ート
-
-@tutorial_1211_p
-# The CSV file support can be used inside the database using the functions CSVREAD
and CSVWRITE
, or it can be used outside the database as a standalone tool.
-
-@tutorial_1212_h3
-データベース内�?�らCSVファイルを読�?�込む
-
-@tutorial_1213_p
-# A CSV file can be read using the function CSVREAD
. Example:
-
-@tutorial_1214_p
-# Please note for performance reason, CSVREAD
should not be used inside a join. Instead, import the data first (possibly into a temporary table), create the required indexes if necessary, and then query this table.
-
-@tutorial_1215_h3
-#Importing Data from a CSV File
-
-@tutorial_1216_p
-# A fast way to load or import data (sometimes called 'bulk load') from a CSV file is to combine table creation with import. Optionally, the column names and data types can be set when creating the table. Another option is to use INSERT INTO ... SELECT
.
-
-@tutorial_1217_h3
-データベース内�?�らCSVファイル�?�書�??込む
-
-@tutorial_1218_p
-# The built-in function CSVWRITE
can be used to create a CSV file from a query. Example:
-
-@tutorial_1219_h3
-Javaアプリケーション�?�らCSVファイル�?�書�??込む
-
-@tutorial_1220_p
-# The Csv
tool can be used in a Java application even when not using a database at all. Example:
-
-@tutorial_1221_h3
-Javaアプリケーション�?�らCSVファイルを読�?�込む
-
-@tutorial_1222_p
-# It is possible to read a CSV file without opening a database. Example:
-
-@tutorial_1223_h2
-アップグレード�? �?ックアップ�?修復
-
-@tutorial_1224_h3
-データベース�?�アップグレー
-
-@tutorial_1225_p
-# The recommended way to upgrade from one version of the database engine to the next version is to create a backup of the database (in the form of a SQL script) using the old engine, and then execute the SQL script using the new engine.
-
-@tutorial_1226_h3
-�?ックアップ
-
-@tutorial_1227_p
-# The recommended way to backup a database is to create a compressed SQL script file. This will result in a small, human readable, and database version independent backup. Creating the script will also verify the checksums of the database file. The Script
tool is ran as follows:
-
-@tutorial_1228_p
-# It is also possible to use the SQL command SCRIPT
to create the backup of the database. For more information about the options, see the SQL command SCRIPT
. The backup can be done remotely, however the file will be created on the server side. The built in FTP server could be used to retrieve the file from the server.
-
-@tutorial_1229_h3
-修復
-
-@tutorial_1230_p
-# To restore a database from a SQL script file, you can use the RunScript
tool:
-
-@tutorial_1231_p
-# For more information about the options, see the SQL command RUNSCRIPT
. The restore can be done remotely, however the file needs to be on the server side. The built in FTP server could be used to copy the file to the server. It is also possible to use the SQL command RUNSCRIPT
to execute a SQL script. SQL script files may contain references to other script files, in the form of RUNSCRIPT
commands. However, when using the server mode, the references script files need to be available on the server side.
-
-@tutorial_1232_h3
-オンライン�?ックアップ
-
-@tutorial_1233_p
-# The BACKUP
SQL statement and the Backup
tool both create a zip file with the database file. However, the contents of this file are not human readable.
-
-@tutorial_1234_p
-# The resulting backup is transactionally consistent, meaning the consistency and atomicity rules apply.
-
-@tutorial_1235_p
-# The Backup
tool (org.h2.tools.Backup
) can not be used to create a online backup; the database must not be in use while running this program.
-
-@tutorial_1236_p
-# Creating a backup by copying the database files while the database is running is not supported, except if the file systems support creating snapshots. With other file systems, it can't be guaranteed that the data is copied in the right order.
-
-@tutorial_1237_h2
-#Command Line Tools
-
-@tutorial_1238_p
-# This database comes with a number of command line tools. To get more information about a tool, start it with the parameter '-?', for example:
-
-@tutorial_1239_p
-# The command line tools are:
-
-@tutorial_1240_code
-�?ックアップ
-
-@tutorial_1241_li
-# creates a backup of a database.
-
-@tutorial_1242_code
-#ChangeFileEncryption
-
-@tutorial_1243_li
-# allows changing the file encryption password or algorithm of a database.
-
-@tutorial_1244_code
-#Console
-
-@tutorial_1245_li
-# starts the browser based H2 Console.
-
-@tutorial_1246_code
-#ConvertTraceFile
-
-@tutorial_1247_li
-# converts a .trace.db file to a Java application and SQL script.
-
-@tutorial_1248_code
-#CreateCluster
-
-@tutorial_1249_li
-# creates a cluster from a standalone database.
-
-@tutorial_1250_code
-#DeleteDbFiles
-
-@tutorial_1251_li
-# deletes all files belonging to a database.
-
-@tutorial_1252_code
-#Recover
-
-@tutorial_1253_li
-# helps recovering a corrupted database.
-
-@tutorial_1254_code
-#Restore
-
-@tutorial_1255_li
-# restores a backup of a database.
-
-@tutorial_1256_code
-#RunScript
-
-@tutorial_1257_li
-# runs a SQL script against a database.
-
-@tutorial_1258_code
-#Script
-
-@tutorial_1259_li
-# allows converting a database to a SQL script for backup or migration.
-
-@tutorial_1260_code
-Server
-
-@tutorial_1261_li
-# is used in the server mode to start a H2 server.
-
-@tutorial_1262_code
-#Shell
-
-@tutorial_1263_li
-# is a command line database tool.
-
-@tutorial_1264_p
-# The tools can also be called from an application by calling the main or another public method. For details, see the Javadoc documentation.
-
-@tutorial_1265_h2
-#The Shell Tool
-
-@tutorial_1266_p
-# The Shell tool is a simple interactive command line tool. To start it, type:
-
-@tutorial_1267_p
-# You will be asked for a database URL, JDBC driver, user name, and password. The connection setting can also be set as command line parameters. After connecting, you will get the list of options. The built-in commands don't need to end with a semicolon, but SQL statements are only executed if the line ends with a semicolon ;
. This allows to enter multi-line statements:
-
-@tutorial_1268_p
-# By default, results are printed as a table. For results with many column, consider using the list mode:
-
-@tutorial_1269_h2
-OpenOffice Baseを使用�?�る
-
-@tutorial_1270_p
-# OpenOffice.org Base supports database access over the JDBC API. To connect to a H2 database using OpenOffice Base, you first need to add the JDBC driver to OpenOffice. The steps to connect to a H2 database are:
-
-@tutorial_1271_li
-#Start OpenOffice Writer, go to [Tools], [Options]
-
-@tutorial_1272_li
-#Make sure you have selected a Java runtime environment in OpenOffice.org / Java
-
-@tutorial_1273_li
-#Click [Class Path...], [Add Archive...]
-
-@tutorial_1274_li
-#Select your h2 jar file (location is up to you, could be wherever you choose)
-
-@tutorial_1275_li
-#Click [OK] (as much as needed), stop OpenOffice (including the Quickstarter)
-
-@tutorial_1276_li
-#Start OpenOffice Base
-
-@tutorial_1277_li
-#Connect to an existing database; select [JDBC]; [Next]
-
-@tutorial_1278_li
-#Example datasource URL: jdbc:h2:~/test
-
-@tutorial_1279_li
-#JDBC driver class: org.h2.Driver
-
-@tutorial_1280_p
-# Now you can access the database stored in the current users home directory.
-
-@tutorial_1281_p
-# To use H2 in NeoOffice (OpenOffice without X11):
-
-@tutorial_1282_li
-#In NeoOffice, go to [NeoOffice], [Preferences]
-
-@tutorial_1283_li
-#Look for the page under [NeoOffice], [Java]
-
-@tutorial_1284_li
-#Click [Class Path], [Add Archive...]
-
-@tutorial_1285_li
-#Select your h2 jar file (location is up to you, could be wherever you choose)
-
-@tutorial_1286_li
-#Click [OK] (as much as needed), restart NeoOffice.
-
-@tutorial_1287_p
-# Now, when creating a new database using the "Database Wizard" :
-
-@tutorial_1288_li
-#Click [File], [New], [Database].
-
-@tutorial_1289_li
-#Select [Connect to existing database] and the select [JDBC]. Click next.
-
-@tutorial_1290_li
-#Example datasource URL: jdbc:h2:~/test
-
-@tutorial_1291_li
-#JDBC driver class: org.h2.Driver
-
-@tutorial_1292_p
-# Another solution to use H2 in NeoOffice is:
-
-@tutorial_1293_li
-#Package the h2 jar within an extension package
-
-@tutorial_1294_li
-#Install it as a Java extension in NeoOffice
-
-@tutorial_1295_p
-# This can be done by create it using the NetBeans OpenOffice plugin. See also Extensions Development.
-
-@tutorial_1296_h2
-Java Web Start / JNLP
-
-@tutorial_1297_p
-# When using Java Web Start / JNLP (Java Network Launch Protocol), permissions tags must be set in the .jnlp file, and the application .jar file must be signed. Otherwise, when trying to write to the file system, the following exception will occur: java.security.AccessControlException
: access denied (java.io.FilePermission ... read
). Example permission tags:
-
-@tutorial_1298_h2
-#Using a Connection Pool
-
-@tutorial_1299_p
-# For H2, opening a connection is fast if the database is already open. Still, using a connection pool improves performance if you open and close connections a lot. A simple connection pool is included in H2. It is based on the Mini Connection Pool Manager from Christian d'Heureuse. There are other, more complex, open source connection pools available, for example the Apache Commons DBCP. For H2, it is about twice as faster to get a connection from the built-in connection pool than to get one using DriverManager.getConnection()
.The build-in connection pool is used as follows:
-
-@tutorial_1300_h2
-フルテキストサー�?
-
-@tutorial_1301_p
-# H2 includes two fulltext search implementations. One is using Apache Lucene, and the other (the native implementation) stores the index data in special tables in the database.
-
-@tutorial_1302_h3
-#Using the Native Fulltext Search
-
-@tutorial_1303_p
-# To initialize, call:
-
-@tutorial_1304_p
-# You need to initialize it in each database where you want to use it. Afterwards, you can create a fulltext index for a table using:
-
-@tutorial_1305_p
-# PUBLIC is the schema name, TEST is the table name. The list of column names (comma separated) is optional, in this case all columns are indexed. The index is updated in realtime. To search the index, use the following query:
-
-@tutorial_1306_p
-# This will produce a result set that contains the query needed to retrieve the data:
-
-@tutorial_1307_p
-# To drop an index on a table:
-
-@tutorial_1308_p
-# To get the raw data, use FT_SEARCH_DATA('Hello', 0, 0);
. The result contains the columns SCHEMA
(the schema name), TABLE
(the table name), COLUMNS
(an array of column names), and KEYS
(an array of objects). To join a table, use a join as in: SELECT T.* FROM FT_SEARCH_DATA('Hello', 0, 0) FT, TEST T WHERE FT.TABLE='TEST' AND T.ID=FT.KEYS[0];
-
-@tutorial_1309_p
-# You can also call the index from within a Java application:
-
-@tutorial_1310_h3
-Luceneフルテキストサー�?を使用�?�る
-
-@tutorial_1311_p
-# To use the Lucene full text search, you need the Lucene library in the classpath. Currently Apache Lucene version 2.x is used by default for H2 version 1.2.x, and Lucene version 3.x is used by default for H2 version 1.3.x. How to do that depends on the application; if you use the H2 Console, you can add the Lucene jar file to the environment variables H2DRIVERS
or CLASSPATH
. To initialize the Lucene fulltext search in a database, call:
-
-@tutorial_1312_p
-# You need to initialize it in each database where you want to use it. Afterwards, you can create a full text index for a table using:
-
-@tutorial_1313_p
-# PUBLIC is the schema name, TEST is the table name. The list of column names (comma separated) is optional, in this case all columns are indexed. The index is updated in realtime. To search the index, use the following query:
-
-@tutorial_1314_p
-# This will produce a result set that contains the query needed to retrieve the data:
-
-@tutorial_1315_p
-# To drop an index on a table (be warned that this will re-index all of the full-text indices for the entire database):
-
-@tutorial_1316_p
-# To get the raw data, use FTL_SEARCH_DATA('Hello', 0, 0);
. The result contains the columns SCHEMA
(the schema name), TABLE
(the table name), COLUMNS
(an array of column names), and KEYS
(an array of objects). To join a table, use a join as in: SELECT T.* FROM FTL_SEARCH_DATA('Hello', 0, 0) FT, TEST T WHERE FT.TABLE='TEST' AND T.ID=FT.KEYS[0];
-
-@tutorial_1317_p
-# You can also call the index from within a Java application:
-
-@tutorial_1318_p
-# The Lucene fulltext search supports searching in specific column only. Column names must be uppercase (except if the original columns are double quoted). For column names starting with an underscore (_), another underscore needs to be added. Example:
-
-@tutorial_1319_p
-# The Lucene fulltext search implementation is not synchronized internally. If you update the database and query the fulltext search concurrently (directly using the Java API of H2 or Lucene itself), you need to ensure operations are properly synchronized. If this is not the case, you may get exceptions such as org.apache.lucene.store.AlreadyClosedException: this IndexReader is closed
.
-
-@tutorial_1320_h2
-#User-Defined Variables
-
-@tutorial_1321_p
-# This database supports user-defined variables. Variables start with @
and can be used wherever expressions or parameters are allowed. Variables are not persisted and session scoped, that means only visible from within the session in which they are defined. A value is usually assigned using the SET command:
-
-@tutorial_1322_p
-# The value can also be changed using the SET() method. This is useful in queries:
-
-@tutorial_1323_p
-# Variables that are not set evaluate to NULL
. The data type of a user-defined variable is the data type of the value assigned to it, that means it is not necessary (or possible) to declare variable names before using them. There are no restrictions on the assigned values; large objects (LOBs) are supported as well. Rolling back a transaction does not affect the value of a user-defined variable.
-
-@tutorial_1324_h2
-#Date and Time
-
-@tutorial_1325_p
-# Date, time and timestamp values support ISO 8601 formatting, including time zone:
-
-@tutorial_1326_p
-# If the time zone is not set, the value is parsed using the current time zone setting of the system. Date and time information is stored in H2 database files without time zone information. If the database is opened using another system time zone, the date and time will be the same. That means if you store the value '2000-01-01 12:00:00' in one time zone, then close the database and open the database again in a different time zone, you will also get '2000-01-01 12:00:00'. Please note that changing the time zone after the H2 driver is loaded is not supported.
-
-@tutorial_1327_h2
-#Using Spring
-
-@tutorial_1328_h3
-#Using the TCP Server
-
-@tutorial_1329_p
-# Use the following configuration to start and stop the H2 TCP server using the Spring Framework:
-
-@tutorial_1330_p
-# The destroy-method
will help prevent exceptions on hot-redeployment or when restarting the server.
-
-@tutorial_1331_h3
-#Error Code Incompatibility
-
-@tutorial_1332_p
-# There is an incompatibility with the Spring JdbcTemplate and H2 version 1.3.154 and newer, because of a change in the error code. This will cause the JdbcTemplate to not detect a duplicate key condition, and so a DataIntegrityViolationException
is thrown instead of DuplicateKeyException
. See also the issue SPR-8235. The workaround is to add the following XML file to the root of the classpath:
-
-@tutorial_1333_h2
-#OSGi
-
-@tutorial_1334_p
-# The standard H2 jar can be dropped in as a bundle in an OSGi container. H2 implements the JDBC Service defined in OSGi Service Platform Release 4 Version 4.2 Enterprise Specification. The H2 Data Source Factory service is registered with the following properties: OSGI_JDBC_DRIVER_CLASS=org.h2.Driver
and OSGI_JDBC_DRIVER_NAME=H2
. The OSGI_JDBC_DRIVER_VERSION
property reflects the version of the driver as is.
-
-@tutorial_1335_p
-# The following standard configuration properties are supported: JDBC_USER, JDBC_PASSWORD, JDBC_DESCRIPTION, JDBC_DATASOURCE_NAME, JDBC_NETWORK_PROTOCOL, JDBC_URL, JDBC_SERVER_NAME, JDBC_PORT_NUMBER
. Any other standard property will be rejected. Non-standard properties will be passed on to H2 in the connection URL.
-
-@tutorial_1336_h2
-#Java Management Extension (JMX)
-
-@tutorial_1337_p
-# Management over JMX is supported, but not enabled by default. To enable JMX, append ;JMX=TRUE
to the database URL when opening the database. Various tools support JMX, one such tool is the jconsole
. When opening the jconsole
, connect to the process where the database is open (when using the server mode, you need to connect to the server process). Then go to the MBeans
section. Under org.h2
you will find one entry per database. The object name of the entry is the database short name, plus the path (each colon is replaced with an underscore character).
-
-@tutorial_1338_p
-# The following attributes and operations are supported:
-
-@tutorial_1339_code
-#CacheSize
-
-@tutorial_1340_li
-#: the cache size currently in use in KB.
-
-@tutorial_1341_code
-#CacheSizeMax
-
-@tutorial_1342_li
-# (read/write): the maximum cache size in KB.
-
-@tutorial_1343_code
-#Exclusive
-
-@tutorial_1344_li
-#: whether this database is open in exclusive mode or not.
-
-@tutorial_1345_code
-#FileReadCount
-
-@tutorial_1346_li
-#: the number of file read operations since the database was opened.
-
-@tutorial_1347_code
-#FileSize
-
-@tutorial_1348_li
-#: the file size in KB.
-
-@tutorial_1349_code
-#FileWriteCount
-
-@tutorial_1350_li
-#: the number of file write operations since the database was opened.
-
-@tutorial_1351_code
-#FileWriteCountTotal
-
-@tutorial_1352_li
-#: the number of file write operations since the database was created.
-
-@tutorial_1353_code
-#LogMode
-
-@tutorial_1354_li
-# (read/write): the current transaction log mode. See SET LOG
for details.
-
-@tutorial_1355_code
-#Mode
-
-@tutorial_1356_li
-#: the compatibility mode (REGULAR
if no compatibility mode is used).
-
-@tutorial_1357_code
-#MultiThreaded
-
-@tutorial_1358_li
-#: true if multi-threaded is enabled.
-
-@tutorial_1359_code
-#Mvcc
-
-@tutorial_1360_li
-#: true if MVCC
is enabled.
-
-@tutorial_1361_code
-#ReadOnly
-
-@tutorial_1362_li
-#: true if the database is read-only.
-
-@tutorial_1363_code
-#TraceLevel
-
-@tutorial_1364_li
-# (read/write): the file trace level.
-
-@tutorial_1365_code
-#Version
-
-@tutorial_1366_li
-#: the database version in use.
-
-@tutorial_1367_code
-#listSettings
-
-@tutorial_1368_li
-#: list the database settings.
-
-@tutorial_1369_code
-#listSessions
-
-@tutorial_1370_li
-#: list the open sessions, including currently executing statement (if any) and locked tables (if any).
-
-@tutorial_1371_p
-# To enable JMX, you may need to set the system properties com.sun.management.jmxremote
and com.sun.management.jmxremote.port
as required by the JVM.
-
diff --git a/h2/src/docsrc/textbase/_docs_en.properties b/h2/src/docsrc/textbase/_docs_en.properties
deleted file mode 100644
index 77324149c2..0000000000
--- a/h2/src/docsrc/textbase/_docs_en.properties
+++ /dev/null
@@ -1,3995 +0,0 @@
-advanced_1000_h1=Advanced
-advanced_1001_a=\ Result Sets
-advanced_1002_a=\ Large Objects
-advanced_1003_a=\ Linked Tables
-advanced_1004_a=\ Spatial Features
-advanced_1005_a=\ Recursive Queries
-advanced_1006_a=\ Updatable Views
-advanced_1007_a=\ Transaction Isolation
-advanced_1008_a=\ Multi-Version Concurrency Control (MVCC)
-advanced_1009_a=\ Clustering / High Availability
-advanced_1010_a=\ Two Phase Commit
-advanced_1011_a=\ Compatibility
-advanced_1012_a=\ Standards Compliance
-advanced_1013_a=\ Run as Windows Service
-advanced_1014_a=\ ODBC Driver
-advanced_1015_a=\ Using H2 in Microsoft .NET
-advanced_1016_a=\ ACID
-advanced_1017_a=\ Durability Problems
-advanced_1018_a=\ Using the Recover Tool
-advanced_1019_a=\ File Locking Protocols
-advanced_1020_a=\ Using Passwords
-advanced_1021_a=\ Password Hash
-advanced_1022_a=\ Protection against SQL Injection
-advanced_1023_a=\ Protection against Remote Access
-advanced_1024_a=\ Restricting Class Loading and Usage
-advanced_1025_a=\ Security Protocols
-advanced_1026_a=\ TLS Connections
-advanced_1027_a=\ Universally Unique Identifiers (UUID)
-advanced_1028_a=\ Settings Read from System Properties
-advanced_1029_a=\ Setting the Server Bind Address
-advanced_1030_a=\ Pluggable File System
-advanced_1031_a=\ Split File System
-advanced_1032_a=\ Database Upgrade
-advanced_1033_a=\ Java Objects Serialization
-advanced_1034_a=\ Limits and Limitations
-advanced_1035_a=\ Glossary and Links
-advanced_1036_h2=Result Sets
-advanced_1037_h3=Statements that Return a Result Set
-advanced_1038_p=\ The following statements return a result set\: SELECT, EXPLAIN, CALL, SCRIPT, SHOW, HELP
. All other statements return an update count.
-advanced_1039_h3=Limiting the Number of Rows
-advanced_1040_p=\ Before the result is returned to the application, all rows are read by the database. Server side cursors are not supported currently. If only the first few rows are interesting for the application, then the result set size should be limited to improve the performance. This can be done using LIMIT
in a query (example\: SELECT * FROM TEST LIMIT 100
), or by using Statement.setMaxRows(max)
.
-advanced_1041_h3=Large Result Sets and External Sorting
-advanced_1042_p=\ For large result set, the result is buffered to disk. The threshold can be defined using the statement SET MAX_MEMORY_ROWS
. If ORDER BY
is used, the sorting is done using an external sort algorithm. In this case, each block of rows is sorted using quick sort, then written to disk; when reading the data, the blocks are merged together.
-advanced_1043_h2=Large Objects
-advanced_1044_h3=Storing and Reading Large Objects
-advanced_1045_p=\ If it is possible that the objects don't fit into memory, then the data type CLOB (for textual data) or BLOB (for binary data) should be used. For these data types, the objects are not fully read into memory, by using streams. To store a BLOB, use PreparedStatement.setBinaryStream
. To store a CLOB, use PreparedStatement.setCharacterStream
. To read a BLOB, use ResultSet.getBinaryStream
, and to read a CLOB, use ResultSet.getCharacterStream
. When using the client/server mode, large BLOB and CLOB data is stored in a temporary file on the client side.
-advanced_1046_h3=When to use CLOB/BLOB
-advanced_1047_p=\ By default, this database stores large LOB (CLOB and BLOB) objects separate from the main table data. Small LOB objects are stored in-place, the threshold can be set using MAX_LENGTH_INPLACE_LOB, but there is still an overhead to use CLOB/BLOB. Because of this, BLOB and CLOB should never be used for columns with a maximum size below about 200 bytes. The best threshold depends on the use case; reading in-place objects is faster than reading from separate files, but slows down the performance of operations that don't involve this column.
-advanced_1048_h3=Large Object Compression
-advanced_1049_p=\ The following feature is only available for the PageStore storage engine. For the MVStore engine (the default for H2 version 1.4.x), append ;COMPRESS\=TRUE
to the database URL instead. CLOB and BLOB values can be compressed by using SET COMPRESS_LOB. The LZF algorithm is faster but needs more disk space. By default compression is disabled, which usually speeds up write operations. If you store many large compressible values such as XML, HTML, text, and uncompressed binary files, then compressing can save a lot of disk space (sometimes more than 50%), and read operations may even be faster.
-advanced_1050_h2=Linked Tables
-advanced_1051_p=\ This database supports linked tables, which means tables that don't exist in the current database but are just links to another database. To create such a link, use the CREATE LINKED TABLE
statement\:
-advanced_1052_p=\ You can then access the table in the usual way. Whenever the linked table is accessed, the database issues specific queries over JDBC. Using the example above, if you issue the query SELECT * FROM LINK WHERE ID\=1
, then the following query is run against the PostgreSQL database\: SELECT * FROM TEST WHERE ID\=?
. The same happens for insert and update statements. Only simple statements are executed against the target database, that means no joins (queries that contain joins are converted to simple queries). Prepared statements are used where possible.
-advanced_1053_p=\ To view the statements that are executed against the target table, set the trace level to 3.
-advanced_1054_p=\ If multiple linked tables point to the same database (using the same database URL), the connection is shared. To disable this, set the system property h2.shareLinkedConnections\=false
.
-advanced_1055_p=\ The statement CREATE LINKED TABLE supports an optional schema name parameter.
-advanced_1056_p=\ The following are not supported because they may result in a deadlock\: creating a linked table to the same database, and creating a linked table to another database using the server mode if the other database is open in the same server (use the embedded mode instead).
-advanced_1057_p=\ Data types that are not supported in H2 are also not supported for linked tables, for example unsigned data types if the value is outside the range of the signed type. In such cases, the columns needs to be cast to a supported type.
-advanced_1058_h2=Updatable Views
-advanced_1059_p=\ By default, views are not updatable. To make a view updatable, use an "instead of" trigger as follows\:
-advanced_1060_p=\ Update the base table(s) within the trigger as required. For details, see the sample application org.h2.samples.UpdatableView
.
-advanced_1061_h2=Transaction Isolation
-advanced_1062_p=\ Please note that most data definition language (DDL) statements, such as "create table", commit the current transaction. See the Grammar for details.
-advanced_1063_p=\ Transaction isolation is provided for all data manipulation language (DML) statements.
-advanced_1064_p=\ Please note MVCC is enabled in version 1.4.x by default, when using the MVStore. In this case, table level locking is not used. Instead, rows are locked for update, and read committed is used in all cases (changing the isolation level has no effect).
-advanced_1065_p=\ This database supports the following transaction isolation levels\:
-advanced_1066_b=Read Committed
-advanced_1067_li=\ This is the default level. Read locks are released immediately after executing the statement, but write locks are kept until the transaction commits. Higher concurrency is possible when using this level.
-advanced_1068_li=\ To enable, execute the SQL statement SET LOCK_MODE 3
-advanced_1069_li=\ or append ;LOCK_MODE\=3
to the database URL\: jdbc\:h2\:~/test;LOCK_MODE\=3
-advanced_1070_b=Serializable
-advanced_1071_li=\ Both read locks and write locks are kept until the transaction commits. To enable, execute the SQL statement SET LOCK_MODE 1
-advanced_1072_li=\ or append ;LOCK_MODE\=1
to the database URL\: jdbc\:h2\:~/test;LOCK_MODE\=1
-advanced_1073_b=Read Uncommitted
-advanced_1074_li=\ This level means that transaction isolation is disabled.
-advanced_1075_li=\ To enable, execute the SQL statement SET LOCK_MODE 0
-advanced_1076_li=\ or append ;LOCK_MODE\=0
to the database URL\: jdbc\:h2\:~/test;LOCK_MODE\=0
-advanced_1077_p=\ When using the isolation level 'serializable', dirty reads, non-repeatable reads, and phantom reads are prohibited.
-advanced_1078_b=Dirty Reads
-advanced_1079_li=\ Means a connection can read uncommitted changes made by another connection.
-advanced_1080_li=\ Possible with\: read uncommitted
-advanced_1081_b=Non-Repeatable Reads
-advanced_1082_li=\ A connection reads a row, another connection changes a row and commits, and the first connection re-reads the same row and gets the new result.
-advanced_1083_li=\ Possible with\: read uncommitted, read committed
-advanced_1084_b=Phantom Reads
-advanced_1085_li=\ A connection reads a set of rows using a condition, another connection inserts a row that falls in this condition and commits, then the first connection re-reads using the same condition and gets the new row.
-advanced_1086_li=\ Possible with\: read uncommitted, read committed
-advanced_1087_h3=Table Level Locking
-advanced_1088_p=\ The database allows multiple concurrent connections to the same database. To make sure all connections only see consistent data, table level locking is used by default. This mechanism does not allow high concurrency, but is very fast. Shared locks and exclusive locks are supported. Before reading from a table, the database tries to add a shared lock to the table (this is only possible if there is no exclusive lock on the object by another connection). If the shared lock is added successfully, the table can be read. It is allowed that other connections also have a shared lock on the same object. If a connection wants to write to a table (update or delete a row), an exclusive lock is required. To get the exclusive lock, other connection must not have any locks on the object. After the connection commits, all locks are released. This database keeps all locks in memory. When a lock is released, and multiple connections are waiting for it, one of them is picked at random.
-advanced_1089_h3=Lock Timeout
-advanced_1090_p=\ If a connection cannot get a lock on an object, the connection waits for some amount of time (the lock timeout). During this time, hopefully the connection holding the lock commits and it is then possible to get the lock. If this is not possible because the other connection does not release the lock for some time, the unsuccessful connection will get a lock timeout exception. The lock timeout can be set individually for each connection.
-advanced_1091_h2=Multi-Version Concurrency Control (MVCC)
-advanced_1092_p=\ The MVCC feature allows higher concurrency than using (table level or row level) locks. When using MVCC in this database, delete, insert and update operations will only issue a shared lock on the table. An exclusive lock is still used when adding or removing columns, when dropping the table, and when using SELECT ... FOR UPDATE
. Connections only 'see' committed data, and own changes. That means, if connection A updates a row but doesn't commit this change yet, connection B will see the old value. Only when the change is committed, the new value is visible by other connections (read committed). If multiple connections concurrently try to update the same row, the database waits until it can apply the change, but at most until the lock timeout expires.
-advanced_1093_p=\ To use the MVCC feature, append ;MVCC\=TRUE
to the database URL\:
-advanced_1094_p=\ The setting must be specified in the first connection (the one that opens the database). It is not possible to enable or disable this setting while the database is already open.
-advanced_1095_p=\ If MVCC is enabled, changing the lock mode (LOCK_MODE
) has no effect.
-advanced_1096_div=\ The MVCC mode is enabled by default in version 1.4.x, with the default MVStore storage engine. MVCC is disabled by default when using the PageStore storage engine (which is the default in version 1.3.x). The following applies when using the PageStore storage engine\: The MVCC feature is not fully tested yet. The limitations of the MVCC mode are\: with the PageStore storage engine, it can not be used at the same time as MULTI_THREADED\=TRUE
; the complete undo log (the list of uncommitted changes) must fit in memory when using multi-version concurrency. The setting MAX_MEMORY_UNDO
has no effect. Clustering / High Availability
-advanced_1097_p=\ This database supports a simple clustering / high availability mechanism. The architecture is\: two database servers run on two different computers, and on both computers is a copy of the same database. If both servers run, each database operation is executed on both computers. If one server fails (power, hardware or network failure), the other server can still continue to work. From this point on, the operations will be executed only on one server until the other server is back up.
-advanced_1098_p=\ Clustering can only be used in the server mode (the embedded mode does not support clustering). The cluster can be re-created using the CreateCluster
tool without stopping the remaining server. Applications that are still connected are automatically disconnected, however when appending ;AUTO_RECONNECT\=TRUE
, they will recover from that.
-advanced_1099_p=\ To initialize the cluster, use the following steps\:
-advanced_1100_li=Create a database
-advanced_1101_li=Use the CreateCluster
tool to copy the database to another location and initialize the clustering. Afterwards, you have two databases containing the same data.
-advanced_1102_li=Start two servers (one for each copy of the database)
-advanced_1103_li=You are now ready to connect to the databases with the client application(s)
-advanced_1104_h3=Using the CreateCluster Tool
-advanced_1105_p=\ To understand how clustering works, please try out the following example. In this example, the two databases reside on the same computer, but usually, the databases will be on different servers.
-advanced_1106_li=Create two directories\: server1, server2
. Each directory will simulate a directory on a computer.
-advanced_1107_li=Start a TCP server pointing to the first directory. You can do this using the command line\:
-advanced_1108_li=Start a second TCP server pointing to the second directory. This will simulate a server running on a second (redundant) computer. You can do this using the command line\:
-advanced_1109_li=Use the CreateCluster
tool to initialize clustering. This will automatically create a new, empty database if it does not exist. Run the tool on the command line\:
-advanced_1110_li=You can now connect to the databases using an application or the H2 Console using the JDBC URL jdbc\:h2\:tcp\://localhost\:9101,localhost\:9102/~/test
-advanced_1111_li=If you stop a server (by killing the process), you will notice that the other machine continues to work, and therefore the database is still accessible.
-advanced_1112_li=To restore the cluster, you first need to delete the database that failed, then restart the server that was stopped, and re-run the CreateCluster
tool.
-advanced_1113_h3=Detect Which Cluster Instances are Running
-advanced_1114_p=\ To find out which cluster nodes are currently running, execute the following SQL statement\:
-advanced_1115_p=\ If the result is ''
(two single quotes), then the cluster mode is disabled. Otherwise, the list of servers is returned, enclosed in single quote. Example\: 'server1\:9191,server2\:9191'
.
-advanced_1116_p=\ It is also possible to get the list of servers by using Connection.getClientInfo().
-advanced_1117_p=\ The property list returned from getClientInfo()
contains a numServers
property that returns the number of servers that are in the connection list. To get the actual servers, getClientInfo()
also has properties server0
..serverX
, where serverX is the number of servers minus 1.
-advanced_1118_p=\ Example\: To get the 2nd server in the connection list one uses getClientInfo('server1')
. Note\: The serverX
property only returns IP addresses and ports and not hostnames.
-advanced_1119_h3=Clustering Algorithm and Limitations
-advanced_1120_p=\ Read-only queries are only executed against the first cluster node, but all other statements are executed against all nodes. There is currently no load balancing made to avoid problems with transactions. The following functions may yield different results on different cluster nodes and must be executed with care\: RANDOM_UUID(), SECURE_RAND(), SESSION_ID(), MEMORY_FREE(), MEMORY_USED(), CSVREAD(), CSVWRITE(), RAND()
[when not using a seed]. Those functions should not be used directly in modifying statements (for example INSERT, UPDATE, MERGE
). However, they can be used in read-only statements and the result can then be used for modifying statements. Using auto-increment and identity columns is currently not supported. Instead, sequence values need to be manually requested and then used to insert data (using two statements).
-advanced_1121_p=\ When using the cluster modes, result sets are read fully in memory by the client, so that there is no problem if the server dies that executed the query. Result sets must fit in memory on the client side.
-advanced_1122_p=\ The SQL statement SET AUTOCOMMIT FALSE
is not supported in the cluster mode. To disable autocommit, the method Connection.setAutoCommit(false)
needs to be called.
-advanced_1123_p=\ It is possible that a transaction from one connection overtakes a transaction from a different connection. Depending on the operations, this might result in different results, for example when conditionally incrementing a value in a row.
-advanced_1124_h2=Two Phase Commit
-advanced_1125_p=\ The two phase commit protocol is supported. 2-phase-commit works as follows\:
-advanced_1126_li=Autocommit needs to be switched off
-advanced_1127_li=A transaction is started, for example by inserting a row
-advanced_1128_li=The transaction is marked 'prepared' by executing the SQL statement PREPARE COMMIT transactionName
-advanced_1129_li=The transaction can now be committed or rolled back
-advanced_1130_li=If a problem occurs before the transaction was successfully committed or rolled back (for example because a network problem occurred), the transaction is in the state 'in-doubt'
-advanced_1131_li=When re-connecting to the database, the in-doubt transactions can be listed with SELECT * FROM INFORMATION_SCHEMA.IN_DOUBT
-advanced_1132_li=Each transaction in this list must now be committed or rolled back by executing COMMIT TRANSACTION transactionName
or ROLLBACK TRANSACTION transactionName
-advanced_1133_li=The database needs to be closed and re-opened to apply the changes
-advanced_1134_h2=Compatibility
-advanced_1135_p=\ This database is (up to a certain point) compatible to other databases such as HSQLDB, MySQL and PostgreSQL. There are certain areas where H2 is incompatible.
-advanced_1136_h3=Transaction Commit when Autocommit is On
-advanced_1137_p=\ At this time, this database engine commits a transaction (if autocommit is switched on) just before returning the result. For a query, this means the transaction is committed even before the application scans through the result set, and before the result set is closed. Other database engines may commit the transaction in this case when the result set is closed.
-advanced_1138_h3=Keywords / Reserved Words
-advanced_1139_p=\ There is a list of keywords that can't be used as identifiers (table names, column names and so on), unless they are quoted (surrounded with double quotes). The list is currently\:
-advanced_1140_code=\ CROSS, CURRENT_DATE, CURRENT_TIME, CURRENT_TIMESTAMP, DISTINCT, EXCEPT, EXISTS, FALSE, FETCH, FOR, FROM, FULL, GROUP, HAVING, INNER, INTERSECT, IS, JOIN, LIKE, LIMIT, MINUS, NATURAL, NOT, NULL, OFFSET, ON, ORDER, PRIMARY, ROWNUM, SELECT, SYSDATE, SYSTIME, SYSTIMESTAMP, TODAY, TRUE, UNION, UNIQUE, WHERE
-advanced_1141_p=\ Certain words of this list are keywords because they are functions that can be used without '()' for compatibility, for example CURRENT_TIMESTAMP
.
-advanced_1142_h2=Standards Compliance
-advanced_1143_p=\ This database tries to be as much standard compliant as possible. For the SQL language, ANSI/ISO is the main standard. There are several versions that refer to the release date\: SQL-92, SQL\:1999, and SQL\:2003. Unfortunately, the standard documentation is not freely available. Another problem is that important features are not standardized. Whenever this is the case, this database tries to be compatible to other databases.
-advanced_1144_h3=Supported Character Sets, Character Encoding, and Unicode
-advanced_1145_p=\ H2 internally uses Unicode, and supports all character encoding systems and character sets supported by the virtual machine you use.
-advanced_1146_h2=Run as Windows Service
-advanced_1147_p=\ Using a native wrapper / adapter, Java applications can be run as a Windows Service. There are various tools available to do that. The Java Service Wrapper from Tanuki Software, Inc. is included in the installation. Batch files are provided to install, start, stop and uninstall the H2 Database Engine Service. This service contains the TCP Server and the H2 Console web application. The batch files are located in the directory h2/service
.
-advanced_1148_p=\ The service wrapper bundled with H2 is a 32-bit version. To use a 64-bit version of Windows (x64), you need to use a 64-bit version of the wrapper, for example the one from Simon Krenger.
-advanced_1149_p=\ When running the database as a service, absolute path should be used. Using ~
in the database URL is problematic in this case, because it means to use the home directory of the current user. The service might run without or with the wrong user, so that the database files might end up in an unexpected place.
-advanced_1150_h3=Install the Service
-advanced_1151_p=\ The service needs to be registered as a Windows Service first. To do that, double click on 1_install_service.bat
. If successful, a command prompt window will pop up and disappear immediately. If not, a message will appear.
-advanced_1152_h3=Start the Service
-advanced_1153_p=\ You can start the H2 Database Engine Service using the service manager of Windows, or by double clicking on 2_start_service.bat
. Please note that the batch file does not print an error message if the service is not installed.
-advanced_1154_h3=Connect to the H2 Console
-advanced_1155_p=\ After installing and starting the service, you can connect to the H2 Console application using a browser. Double clicking on 3_start_browser.bat
to do that. The default port (8082) is hard coded in the batch file.
-advanced_1156_h3=Stop the Service
-advanced_1157_p=\ To stop the service, double click on 4_stop_service.bat
. Please note that the batch file does not print an error message if the service is not installed or started.
-advanced_1158_h3=Uninstall the Service
-advanced_1159_p=\ To uninstall the service, double click on 5_uninstall_service.bat
. If successful, a command prompt window will pop up and disappear immediately. If not, a message will appear.
-advanced_1160_h3=Additional JDBC drivers
-advanced_1161_p=\ To use other databases (for example MySQL), the location of the JDBC drivers of those databases need to be added to the environment variables H2DRIVERS
or CLASSPATH
before installing the service. Multiple drivers can be set; each entry needs to be separated with a ;
(Windows) or \:
(other operating systems). Spaces in the path names are supported. The settings must not be quoted.
-advanced_1162_h2=ODBC Driver
-advanced_1163_p=\ This database does not come with its own ODBC driver at this time, but it supports the PostgreSQL network protocol. Therefore, the PostgreSQL ODBC driver can be used. Support for the PostgreSQL network protocol is quite new and should be viewed as experimental. It should not be used for production applications.
-advanced_1164_p=\ To use the PostgreSQL ODBC driver on 64 bit versions of Windows, first run c\:/windows/syswow64/odbcad32.exe
. At this point you set up your DSN just like you would on any other system. See also\: Re\: ODBC Driver on Windows 64 bit
-advanced_1165_h3=ODBC Installation
-advanced_1166_p=\ First, the ODBC driver must be installed. Any recent PostgreSQL ODBC driver should work, however version 8.2 (psqlodbc-08_02*
) or newer is recommended. The Windows version of the PostgreSQL ODBC driver is available at http\://www.postgresql.org/ftp/odbc/versions/msi.
-advanced_1167_h3=Starting the Server
-advanced_1168_p=\ After installing the ODBC driver, start the H2 Server using the command line\:
-advanced_1169_p=\ The PG Server (PG for PostgreSQL protocol) is started as well. By default, databases are stored in the current working directory where the server is started. Use -baseDir
to save databases in another directory, for example the user home directory\:
-advanced_1170_p=\ The PG server can be started and stopped from within a Java application as follows\:
-advanced_1171_p=\ By default, only connections from localhost are allowed. To allow remote connections, use -pgAllowOthers
when starting the server.
-advanced_1172_p=\ To map an ODBC database name to a different JDBC database name, use the option -key
when starting the server. Please note only one mapping is allowed. The following will map the ODBC database named TEST
to the database URL jdbc\:h2\:~/data/test;cipher\=aes
\:
-advanced_1173_h3=ODBC Configuration
-advanced_1174_p=\ After installing the driver, a new Data Source must be added. In Windows, run odbcad32.exe
to open the Data Source Administrator. Then click on 'Add...' and select the PostgreSQL Unicode driver. Then click 'Finish'. You will be able to change the connection properties. The property column represents the property key in the odbc.ini
file (which may be different from the GUI).
-advanced_1175_th=Property
-advanced_1176_th=Example
-advanced_1177_th=Remarks
-advanced_1178_td=Data Source
-advanced_1179_td=H2 Test
-advanced_1180_td=The name of the ODBC Data Source
-advanced_1181_td=Database
-advanced_1182_td=~/test;ifexists\=true
-advanced_1183_td=\ The database name. This can include connections settings. By default, the database is stored in the current working directory where the Server is started except when the -baseDir setting is used. The name must be at least 3 characters.
-advanced_1184_td=Servername
-advanced_1185_td=localhost
-advanced_1186_td=The server name or IP address.
-advanced_1187_td=By default, only remote connections are allowed
-advanced_1188_td=Username
-advanced_1189_td=sa
-advanced_1190_td=The database user name.
-advanced_1191_td=SSL
-advanced_1192_td=false (disabled)
-advanced_1193_td=At this time, SSL is not supported.
-advanced_1194_td=Port
-advanced_1195_td=5435
-advanced_1196_td=The port where the PG Server is listening.
-advanced_1197_td=Password
-advanced_1198_td=sa
-advanced_1199_td=The database password.
-advanced_1200_p=\ To improve performance, please enable 'server side prepare' under Options / Datasource / Page 2 / Server side prepare.
-advanced_1201_p=\ Afterwards, you may use this data source.
-advanced_1202_h3=PG Protocol Support Limitations
-advanced_1203_p=\ At this time, only a subset of the PostgreSQL network protocol is implemented. Also, there may be compatibility problems on the SQL level, with the catalog, or with text encoding. Problems are fixed as they are found. Currently, statements can not be canceled when using the PG protocol. Also, H2 does not provide index meta over ODBC.
-advanced_1204_p=\ PostgreSQL ODBC Driver Setup requires a database password; that means it is not possible to connect to H2 databases without password. This is a limitation of the ODBC driver.
-advanced_1205_h3=Security Considerations
-advanced_1206_p=\ Currently, the PG Server does not support challenge response or encrypt passwords. This may be a problem if an attacker can listen to the data transferred between the ODBC driver and the server, because the password is readable to the attacker. Also, it is currently not possible to use encrypted SSL connections. Therefore the ODBC driver should not be used where security is important.
-advanced_1207_p=\ The first connection that opens a database using the PostgreSQL server needs to be an administrator user. Subsequent connections don't need to be opened by an administrator.
-advanced_1208_h3=Using Microsoft Access
-advanced_1209_p=\ When using Microsoft Access to edit data in a linked H2 table, you may need to enable the following option\: Tools - Options - Edit/Find - ODBC fields.
-advanced_1210_h2=Using H2 in Microsoft .NET
-advanced_1211_p=\ The database can be used from Microsoft .NET even without using Java, by using IKVM.NET. You can access a H2 database on .NET using the JDBC API, or using the ADO.NET interface.
-advanced_1212_h3=Using the ADO.NET API on .NET
-advanced_1213_p=\ An implementation of the ADO.NET interface is available in the open source project H2Sharp.
-advanced_1214_h3=Using the JDBC API on .NET
-advanced_1215_li=Install the .NET Framework from Microsoft. Mono has not yet been tested.
-advanced_1216_li=Install IKVM.NET.
-advanced_1217_li=Copy the h2*.jar
file to ikvm/bin
-advanced_1218_li=Run the H2 Console using\: ikvm -jar h2*.jar
-advanced_1219_li=Convert the H2 Console to an .exe
file using\: ikvmc -target\:winexe h2*.jar
. You may ignore the warnings.
-advanced_1220_li=Create a .dll
file using (change the version accordingly)\: ikvmc.exe -target\:library -version\:1.0.69.0 h2*.jar
-advanced_1221_p=\ If you want your C\# application use H2, you need to add the h2.dll
and the IKVM.OpenJDK.ClassLibrary.dll
to your C\# solution. Here some sample code\:
-advanced_1222_h2=ACID
-advanced_1223_p=\ In the database world, ACID stands for\:
-advanced_1224_li=Atomicity\: transactions must be atomic, meaning either all tasks are performed or none.
-advanced_1225_li=Consistency\: all operations must comply with the defined constraints.
-advanced_1226_li=Isolation\: transactions must be isolated from each other.
-advanced_1227_li=Durability\: committed transaction will not be lost.
-advanced_1228_h3=Atomicity
-advanced_1229_p=\ Transactions in this database are always atomic.
-advanced_1230_h3=Consistency
-advanced_1231_p=\ By default, this database is always in a consistent state. Referential integrity rules are enforced except when explicitly disabled.
-advanced_1232_h3=Isolation
-advanced_1233_p=\ For H2, as with most other database systems, the default isolation level is 'read committed'. This provides better performance, but also means that transactions are not completely isolated. H2 supports the transaction isolation levels 'serializable', 'read committed', and 'read uncommitted'.
-advanced_1234_h3=Durability
-advanced_1235_p=\ This database does not guarantee that all committed transactions survive a power failure. Tests show that all databases sometimes lose transactions on power failure (for details, see below). Where losing transactions is not acceptable, a laptop or UPS (uninterruptible power supply) should be used. If durability is required for all possible cases of hardware failure, clustering should be used, such as the H2 clustering mode.
-advanced_1236_h2=Durability Problems
-advanced_1237_p=\ Complete durability means all committed transaction survive a power failure. Some databases claim they can guarantee durability, but such claims are wrong. A durability test was run against H2, HSQLDB, PostgreSQL, and Derby. All of those databases sometimes lose committed transactions. The test is included in the H2 download, see org.h2.test.poweroff.Test
.
-advanced_1238_h3=Ways to (Not) Achieve Durability
-advanced_1239_p=\ Making sure that committed transactions are not lost is more complicated than it seems first. To guarantee complete durability, a database must ensure that the log record is on the hard drive before the commit call returns. To do that, databases use different methods. One is to use the 'synchronous write' file access mode. In Java, RandomAccessFile
supports the modes rws
and rwd
\:
-advanced_1240_code=rwd
-advanced_1241_li=\: every update to the file's content is written synchronously to the underlying storage device.
-advanced_1242_code=rws
-advanced_1243_li=\: in addition to rwd
, every update to the metadata is written synchronously.
-advanced_1244_p=\ A test (org.h2.test.poweroff.TestWrite
) with one of those modes achieves around 50 thousand write operations per second. Even when the operating system write buffer is disabled, the write rate is around 50 thousand operations per second. This feature does not force changes to disk because it does not flush all buffers. The test updates the same byte in the file again and again. If the hard drive was able to write at this rate, then the disk would need to make at least 50 thousand revolutions per second, or 3 million RPM (revolutions per minute). There are no such hard drives. The hard drive used for the test is about 7200 RPM, or about 120 revolutions per second. There is an overhead, so the maximum write rate must be lower than that.
-advanced_1245_p=\ Calling fsync
flushes the buffers. There are two ways to do that in Java\:
-advanced_1246_code=FileDescriptor.sync()
-advanced_1247_li=. The documentation says that this forces all system buffers to synchronize with the underlying device. This method is supposed to return after all in-memory modified copies of buffers associated with this file descriptor have been written to the physical medium.
-advanced_1248_code=FileChannel.force()
-advanced_1249_li=. This method is supposed to force any updates to this channel's file to be written to the storage device that contains it.
-advanced_1250_p=\ By default, MySQL calls fsync
for each commit. When using one of those methods, only around 60 write operations per second can be achieved, which is consistent with the RPM rate of the hard drive used. Unfortunately, even when calling FileDescriptor.sync()
or FileChannel.force()
, data is not always persisted to the hard drive, because most hard drives do not obey fsync()
\: see Your Hard Drive Lies to You. In Mac OS X, fsync
does not flush hard drive buffers. See Bad fsync?. So the situation is confusing, and tests prove there is a problem.
-advanced_1251_p=\ Trying to flush hard drive buffers is hard, and if you do the performance is very bad. First you need to make sure that the hard drive actually flushes all buffers. Tests show that this can not be done in a reliable way. Then the maximum number of transactions is around 60 per second. Because of those reasons, the default behavior of H2 is to delay writing committed transactions.
-advanced_1252_p=\ In H2, after a power failure, a bit more than one second of committed transactions may be lost. To change the behavior, use SET WRITE_DELAY
and CHECKPOINT SYNC
. Most other databases support commit delay as well. In the performance comparison, commit delay was used for all databases that support it.
-advanced_1253_h3=Running the Durability Test
-advanced_1254_p=\ To test the durability / non-durability of this and other databases, you can use the test application in the package org.h2.test.poweroff
. Two computers with network connection are required to run this test. One computer just listens, while the test application is run (and power is cut) on the other computer. The computer with the listener application opens a TCP/IP port and listens for an incoming connection. The second computer first connects to the listener, and then created the databases and starts inserting records. The connection is set to 'autocommit', which means after each inserted record a commit is performed automatically. Afterwards, the test computer notifies the listener that this record was inserted successfully. The listener computer displays the last inserted record number every 10 seconds. Now, switch off the power manually, then restart the computer, and run the application again. You will find out that in most cases, none of the databases contains all the records that the listener computer knows about. For details, please consult the source code of the listener and test application.
-advanced_1255_h2=Using the Recover Tool
-advanced_1256_p=\ The Recover
tool can be used to extract the contents of a database file, even if the database is corrupted. It also extracts the content of the transaction log and large objects (CLOB or BLOB). To run the tool, type on the command line\:
-advanced_1257_p=\ For each database in the current directory, a text file will be created. This file contains raw insert statements (for the data) and data definition (DDL) statements to recreate the schema of the database. This file can be executed using the RunScript
tool or a RUNSCRIPT FROM
SQL statement. The script includes at least one CREATE USER
statement. If you run the script against a database that was created with the same user, or if there are conflicting users, running the script will fail. Consider running the script against a database that was created with a user name that is not in the script.
-advanced_1258_p=\ The Recover
tool creates a SQL script from database file. It also processes the transaction log.
-advanced_1259_p=\ To verify the database can recover at any time, append ;RECOVER_TEST\=64
to the database URL in your test environment. This will simulate an application crash after each 64 writes to the database file. A log file named databaseName.h2.db.log
is created that lists the operations. The recovery is tested using an in-memory file system, that means it may require a larger heap setting.
-advanced_1260_h2=File Locking Protocols
-advanced_1261_p=\ Multiple concurrent connections to the same database are supported, however a database file can only be open for reading and writing (in embedded mode) by one process at the same time. Otherwise, the processes would overwrite each others data and corrupt the database file. To protect against this problem, whenever a database is opened, a lock file is created to signal other processes that the database is in use. If the database is closed, or if the process that opened the database stops normally, this lock file is deleted.
-advanced_1262_p=\ In special cases (if the process did not terminate normally, for example because there was a power failure), the lock file is not deleted by the process that created it. That means the existence of the lock file is not a safe protocol for file locking. However, this software uses a challenge-response protocol to protect the database files. There are two methods (algorithms) implemented to provide both security (that is, the same database files cannot be opened by two processes at the same time) and simplicity (that is, the lock file does not need to be deleted manually by the user). The two methods are 'file method' and 'socket methods'.
-advanced_1263_p=\ The file locking protocols (except the file locking method 'FS') have the following limitation\: if a shared file system is used, and the machine with the lock owner is sent to sleep (standby or hibernate), another machine may take over. If the machine that originally held the lock wakes up, the database may become corrupt. If this situation can occur, the application must ensure the database is closed when the application is put to sleep.
-advanced_1264_h3=File Locking Method 'File'
-advanced_1265_p=\ The default method for database file locking for version 1.3 and older is the 'File Method'. The algorithm is\:
-advanced_1266_li=If the lock file does not exist, it is created (using the atomic operation File.createNewFile
). Then, the process waits a little bit (20 ms) and checks the file again. If the file was changed during this time, the operation is aborted. This protects against a race condition when one process deletes the lock file just after another one create it, and a third process creates the file again. It does not occur if there are only two writers.
-advanced_1267_li=\ If the file can be created, a random number is inserted together with the locking method ('file'). Afterwards, a watchdog thread is started that checks regularly (every second once by default) if the file was deleted or modified by another (challenger) thread / process. Whenever that occurs, the file is overwritten with the old data. The watchdog thread runs with high priority so that a change to the lock file does not get through undetected even if the system is very busy. However, the watchdog thread does use very little resources (CPU time), because it waits most of the time. Also, the watchdog only reads from the hard disk and does not write to it.
-advanced_1268_li=\ If the lock file exists and was recently modified, the process waits for some time (up to two seconds). If it was still changed, an exception is thrown (database is locked). This is done to eliminate race conditions with many concurrent writers. Afterwards, the file is overwritten with a new version (challenge). After that, the thread waits for 2 seconds. If there is a watchdog thread protecting the file, he will overwrite the change and this process will fail to lock the database. However, if there is no watchdog thread, the lock file will still be as written by this thread. In this case, the file is deleted and atomically created again. The watchdog thread is started in this case and the file is locked.
-advanced_1269_p=\ This algorithm is tested with over 100 concurrent threads. In some cases, when there are many concurrent threads trying to lock the database, they block each other (meaning the file cannot be locked by any of them) for some time. However, the file never gets locked by two threads at the same time. However using that many concurrent threads / processes is not the common use case. Generally, an application should throw an error to the user if it cannot open a database, and not try again in a (fast) loop.
-advanced_1270_h3=File Locking Method 'Socket'
-advanced_1271_p=\ There is a second locking mechanism implemented, but disabled by default. To use it, append ;FILE_LOCK\=SOCKET
to the database URL. The algorithm is\:
-advanced_1272_li=If the lock file does not exist, it is created. Then a server socket is opened on a defined port, and kept open. The port and IP address of the process that opened the database is written into the lock file.
-advanced_1273_li=If the lock file exists, and the lock method is 'file', then the software switches to the 'file' method.
-advanced_1274_li=If the lock file exists, and the lock method is 'socket', then the process checks if the port is in use. If the original process is still running, the port is in use and this process throws an exception (database is in use). If the original process died (for example due to a power failure, or abnormal termination of the virtual machine), then the port was released. The new process deletes the lock file and starts again.
-advanced_1275_p=\ This method does not require a watchdog thread actively polling (reading) the same file every second. The problem with this method is, if the file is stored on a network share, two processes (running on different computers) could still open the same database files, if they do not have a direct TCP/IP connection.
-advanced_1276_h3=File Locking Method 'FS'
-advanced_1277_p=\ This is the default mode for version 1.4 and newer. This database file locking mechanism uses native file system lock on the database file. No *.lock.db file is created in this case, and no background thread is started. This mechanism may not work on all systems as expected. Some systems allow to lock the same file multiple times within the same virtual machine, and on some system native file locking is not supported or files are not unlocked after a power failure.
-advanced_1278_p=\ To enable this feature, append ;FILE_LOCK\=FS
to the database URL.
-advanced_1279_p=\ This feature is relatively new. When using it for production, please ensure your system does in fact lock files as expected.
-advanced_1280_h2=Using Passwords
-advanced_1281_h3=Using Secure Passwords
-advanced_1282_p=\ Remember that weak passwords can be broken regardless of the encryption and security protocols. Don't use passwords that can be found in a dictionary. Appending numbers does not make passwords secure. A way to create good passwords that can be remembered is\: take the first letters of a sentence, use upper and lower case characters, and creatively include special characters (but it's more important to use a long password than to use special characters). Example\:
-advanced_1283_code=i'sE2rtPiUKtT
-advanced_1284_p=\ from the sentence it's easy to remember this password if you know the trick
.
-advanced_1285_h3=Passwords\: Using Char Arrays instead of Strings
-advanced_1286_p=\ Java strings are immutable objects and cannot be safely 'destroyed' by the application. After creating a string, it will remain in the main memory of the computer at least until it is garbage collected. The garbage collection cannot be controlled by the application, and even if it is garbage collected the data may still remain in memory. It might also be possible that the part of memory containing the password is swapped to disk (if not enough main memory is available), which is a problem if the attacker has access to the swap file of the operating system.
-advanced_1287_p=\ It is a good idea to use char arrays instead of strings for passwords. Char arrays can be cleared (filled with zeros) after use, and therefore the password will not be stored in the swap file.
-advanced_1288_p=\ This database supports using char arrays instead of string to pass user and file passwords. The following code can be used to do that\:
-advanced_1289_p=\ This example requires Java 1.6. When using Swing, use javax.swing.JPasswordField
.
-advanced_1290_h3=Passing the User Name and/or Password in the URL
-advanced_1291_p=\ Instead of passing the user name as a separate parameter as in Connection conn \= DriverManager. getConnection("jdbc\:h2\:~/test", "sa", "123");
the user name (and/or password) can be supplied in the URL itself\: Connection conn \= DriverManager. getConnection("jdbc\:h2\:~/test;USER\=sa;PASSWORD\=123");
The settings in the URL override the settings passed as a separate parameter.
-advanced_1292_h2=Password Hash
-advanced_1293_p=\ Sometimes the database password needs to be stored in a configuration file (for example in the web.xml
file). In addition to connecting with the plain text password, this database supports connecting with the password hash. This means that only the hash of the password (and not the plain text password) needs to be stored in the configuration file. This will only protect others from reading or re-constructing the plain text password (even if they have access to the configuration file); it does not protect others from accessing the database using the password hash.
-advanced_1294_p=\ To connect using the password hash instead of plain text password, append ;PASSWORD_HASH\=TRUE
to the database URL, and replace the password with the password hash. To calculate the password hash from a plain text password, run the following command within the H2 Console tool\: @password_hash <upperCaseUserName> <password>
. As an example, if the user name is sa
and the password is test
, run the command @password_hash SA test
. Then use the resulting password hash as you would use the plain text password. When using an encrypted database, then the user password and file password need to be hashed separately. To calculate the hash of the file password, run\: @password_hash file <filePassword>
.
-advanced_1295_h2=Protection against SQL Injection
-advanced_1296_h3=What is SQL Injection
-advanced_1297_p=\ This database engine provides a solution for the security vulnerability known as 'SQL Injection'. Here is a short description of what SQL injection means. Some applications build SQL statements with embedded user input such as\:
-advanced_1298_p=\ If this mechanism is used anywhere in the application, and user input is not correctly filtered or encoded, it is possible for a user to inject SQL functionality or statements by using specially built input such as (in this example) this password\: ' OR ''\='
. In this case the statement becomes\:
-advanced_1299_p=\ Which is always true no matter what the password stored in the database is. For more information about SQL Injection, see Glossary and Links.
-advanced_1300_h3=Disabling Literals
-advanced_1301_p=\ SQL Injection is not possible if user input is not directly embedded in SQL statements. A simple solution for the problem above is to use a prepared statement\:
-advanced_1302_p=\ This database provides a way to enforce usage of parameters when passing user input to the database. This is done by disabling embedded literals in SQL statements. To do this, execute the statement\:
-advanced_1303_p=\ Afterwards, SQL statements with text and number literals are not allowed any more. That means, SQL statement of the form WHERE NAME\='abc'
or WHERE CustomerId\=10
will fail. It is still possible to use prepared statements and parameters as described above. Also, it is still possible to generate SQL statements dynamically, and use the Statement API, as long as the SQL statements do not include literals. There is also a second mode where number literals are allowed\: SET ALLOW_LITERALS NUMBERS
. To allow all literals, execute SET ALLOW_LITERALS ALL
(this is the default setting). Literals can only be enabled or disabled by an administrator.
-advanced_1304_h3=Using Constants
-advanced_1305_p=\ Disabling literals also means disabling hard-coded 'constant' literals. This database supports defining constants using the CREATE CONSTANT
command. Constants can be defined only when literals are enabled, but used even when literals are disabled. To avoid name clashes with column names, constants can be defined in other schemas\:
-advanced_1306_p=\ Even when literals are enabled, it is better to use constants instead of hard-coded number or text literals in queries or views. With constants, typos are found at compile time, the source code is easier to understand and change.
-advanced_1307_h3=Using the ZERO() Function
-advanced_1308_p=\ It is not required to create a constant for the number 0 as there is already a built-in function ZERO()
\:
-advanced_1309_h2=Protection against Remote Access
-advanced_1310_p=\ By default this database does not allow connections from other machines when starting the H2 Console, the TCP server, or the PG server. Remote access can be enabled using the command line options -webAllowOthers, -tcpAllowOthers, -pgAllowOthers
.
-advanced_1311_p=\ If you enable remote access using -tcpAllowOthers
or -pgAllowOthers
, please also consider using the options -baseDir, -ifExists
, so that remote users can not create new databases or access existing databases with weak passwords. When using the option -baseDir
, only databases within that directory may be accessed. Ensure the existing accessible databases are protected using strong passwords.
-advanced_1312_p=\ If you enable remote access using -webAllowOthers
, please ensure the web server can only be accessed from trusted networks. The options -baseDir, -ifExists
don't protect access to the tools section, prevent remote shutdown of the web server, changes to the preferences, the saved connection settings, or access to other databases accessible from the system.
-advanced_1313_h2=Restricting Class Loading and Usage
-advanced_1314_p=\ By default there is no restriction on loading classes and executing Java code for admins. That means an admin may call system functions such as System.setProperty
by executing\:
-advanced_1315_p=\ To restrict users (including admins) from loading classes and executing code, the list of allowed classes can be set in the system property h2.allowedClasses
in the form of a comma separated list of classes or patterns (items ending with *
). By default all classes are allowed. Example\:
-advanced_1316_p=\ This mechanism is used for all user classes, including database event listeners, trigger classes, user-defined functions, user-defined aggregate functions, and JDBC driver classes (with the exception of the H2 driver) when using the H2 Console.
-advanced_1317_h2=Security Protocols
-advanced_1318_p=\ The following paragraphs document the security protocols used in this database. These descriptions are very technical and only intended for security experts that already know the underlying security primitives.
-advanced_1319_h3=User Password Encryption
-advanced_1320_p=\ When a user tries to connect to a database, the combination of user name, @, and password are hashed using SHA-256, and this hash value is transmitted to the database. This step does not protect against an attacker that re-uses the value if he is able to listen to the (unencrypted) transmission between the client and the server. But, the passwords are never transmitted as plain text, even when using an unencrypted connection between client and server. That means if a user reuses the same password for different things, this password is still protected up to some point. See also 'RFC 2617 - HTTP Authentication\: Basic and Digest Access Authentication' for more information.
-advanced_1321_p=\ When a new database or user is created, a new random salt value is generated. The size of the salt is 64 bits. Using the random salt reduces the risk of an attacker pre-calculating hash values for many different (commonly used) passwords.
-advanced_1322_p=\ The combination of user-password hash value (see above) and salt is hashed using SHA-256. The resulting value is stored in the database. When a user tries to connect to the database, the database combines user-password hash value with the stored salt value and calculates the hash value. Other products use multiple iterations (hash the hash value again and again), but this is not done in this product to reduce the risk of denial of service attacks (where the attacker tries to connect with bogus passwords, and the server spends a lot of time calculating the hash value for each password). The reasoning is\: if the attacker has access to the hashed passwords, he also has access to the data in plain text, and therefore does not need the password any more. If the data is protected by storing it on another computer and only accessible remotely, then the iteration count is not required at all.
-advanced_1323_h3=File Encryption
-advanced_1324_p=\ The database files can be encrypted using the AES-128 algorithm.
-advanced_1325_p=\ When a user tries to connect to an encrypted database, the combination of file@
and the file password is hashed using SHA-256. This hash value is transmitted to the server.
-advanced_1326_p=\ When a new database file is created, a new cryptographically secure random salt value is generated. The size of the salt is 64 bits. The combination of the file password hash and the salt value is hashed 1024 times using SHA-256. The reason for the iteration is to make it harder for an attacker to calculate hash values for common passwords.
-advanced_1327_p=\ The resulting hash value is used as the key for the block cipher algorithm. Then, an initialization vector (IV) key is calculated by hashing the key again using SHA-256. This is to make sure the IV is unknown to the attacker. The reason for using a secret IV is to protect against watermark attacks.
-advanced_1328_p=\ Before saving a block of data (each block is 8 bytes long), the following operations are executed\: first, the IV is calculated by encrypting the block number with the IV key (using the same block cipher algorithm). This IV is combined with the plain text using XOR. The resulting data is encrypted using the AES-128 algorithm.
-advanced_1329_p=\ When decrypting, the operation is done in reverse. First, the block is decrypted using the key, and then the IV is calculated combined with the decrypted text using XOR.
-advanced_1330_p=\ Therefore, the block cipher mode of operation is CBC (cipher-block chaining), but each chain is only one block long. The advantage over the ECB (electronic codebook) mode is that patterns in the data are not revealed, and the advantage over multi block CBC is that flipped cipher text bits are not propagated to flipped plaintext bits in the next block.
-advanced_1331_p=\ Database encryption is meant for securing the database while it is not in use (stolen laptop and so on). It is not meant for cases where the attacker has access to files while the database is in use. When he has write access, he can for example replace pieces of files with pieces of older versions and manipulate data like this.
-advanced_1332_p=\ File encryption slows down the performance of the database engine. Compared to unencrypted mode, database operations take about 2.5 times longer using AES (embedded mode).
-advanced_1333_h3=Wrong Password / User Name Delay
-advanced_1334_p=\ To protect against remote brute force password attacks, the delay after each unsuccessful login gets double as long. Use the system properties h2.delayWrongPasswordMin
and h2.delayWrongPasswordMax
to change the minimum (the default is 250 milliseconds) or maximum delay (the default is 4000 milliseconds, or 4 seconds). The delay only applies for those using the wrong password. Normally there is no delay for a user that knows the correct password, with one exception\: after using the wrong password, there is a delay of up to (randomly distributed) the same delay as for a wrong password. This is to protect against parallel brute force attacks, so that an attacker needs to wait for the whole delay. Delays are synchronized. This is also required to protect against parallel attacks.
-advanced_1335_p=\ There is only one exception message for both wrong user and for wrong password, to make it harder to get the list of user names. It is not possible from the stack trace to see if the user name was wrong or the password.
-advanced_1336_h3=HTTPS Connections
-advanced_1337_p=\ The web server supports HTTP and HTTPS connections using SSLServerSocket
. There is a default self-certified certificate to support an easy starting point, but custom certificates are supported as well.
-advanced_1338_h2=TLS Connections
-advanced_1339_p=\ Remote TLS connections are supported using the Java Secure Socket Extension (SSLServerSocket, SSLSocket
). By default, anonymous TLS is enabled.
-advanced_1340_p=\ To use your own keystore, set the system properties javax.net.ssl.keyStore
and javax.net.ssl.keyStorePassword
before starting the H2 server and client. See also Customizing the Default Key and Trust Stores, Store Types, and Store Passwords for more information.
-advanced_1341_p=\ To disable anonymous TLS, set the system property h2.enableAnonymousTLS
to false.
-advanced_1342_h2=Universally Unique Identifiers (UUID)
-advanced_1343_p=\ This database supports UUIDs. Also supported is a function to create new UUIDs using a cryptographically strong pseudo random number generator. With random UUIDs, the chance of two having the same value can be calculated using the probability theory. See also 'Birthday Paradox'. Standardized randomly generated UUIDs have 122 random bits. 4 bits are used for the version (Randomly generated UUID), and 2 bits for the variant (Leach-Salz). This database supports generating such UUIDs using the built-in function RANDOM_UUID()
. Here is a small program to estimate the probability of having two identical UUIDs after generating a number of values\:
-advanced_1344_p=\ Some values are\:
-advanced_1345_th=Number of UUIs
-advanced_1346_th=Probability of Duplicates
-advanced_1347_td=2^36\=68'719'476'736
-advanced_1348_td=0.000'000'000'000'000'4
-advanced_1349_td=2^41\=2'199'023'255'552
-advanced_1350_td=0.000'000'000'000'4
-advanced_1351_td=2^46\=70'368'744'177'664
-advanced_1352_td=0.000'000'000'4
-advanced_1353_p=\ To help non-mathematicians understand what those numbers mean, here a comparison\: one's annual risk of being hit by a meteorite is estimated to be one chance in 17 billion, that means the probability is about 0.000'000'000'06.
-advanced_1354_h2=Spatial Features
-advanced_1355_p=\ H2 supports the geometry data type and spatial indexes if the JTS Topology Suite is in the classpath. To run the H2 Console tool with the JTS tool, you need to download the JTS 1.13 jar file and place it in the h2 bin directory. Then edit the h2.sh
file as follows\:
-advanced_1356_p=\ Here is an example SQL script to create a table with a spatial column and index\:
-advanced_1357_p=\ To query the table using geometry envelope intersection, use the operation &&
, as in PostGIS\:
-advanced_1358_p=\ You can verify that the spatial index is used using the "explain plan" feature\:
-advanced_1359_p=\ For persistent databases, the spatial index is stored on disk; for in-memory databases, the index is kept in memory.
-advanced_1360_h2=Recursive Queries
-advanced_1361_p=\ H2 has experimental support for recursive queries using so called "common table expressions" (CTE). Examples\:
-advanced_1362_p=\ Limitations\: Recursive queries need to be of the type UNION ALL
, and the recursion needs to be on the second part of the query. No tables or views with the name of the table expression may exist. Different table expression names need to be used when using multiple distinct table expressions within the same transaction and for the same session. All columns of the table expression are of type VARCHAR
, and may need to be cast to the required data type. Views with recursive queries are not supported. Subqueries and INSERT INTO ... FROM
with recursive queries are not supported. Parameters are only supported within the last SELECT
statement (a workaround is to use session variables like @start
within the table expression). The syntax is\:
-advanced_1363_h2=Settings Read from System Properties
-advanced_1364_p=\ Some settings of the database can be set on the command line using -DpropertyName\=value
. It is usually not required to change those settings manually. The settings are case sensitive. Example\:
-advanced_1365_p=\ The current value of the settings can be read in the table INFORMATION_SCHEMA.SETTINGS
.
-advanced_1366_p=\ For a complete list of settings, see SysProperties.
-advanced_1367_h2=Setting the Server Bind Address
-advanced_1368_p=\ Usually server sockets accept connections on any/all local addresses. This may be a problem on multi-homed hosts. To bind only to one address, use the system property h2.bindAddress
. This setting is used for both regular server sockets and for TLS server sockets. IPv4 and IPv6 address formats are supported.
-advanced_1369_h2=Pluggable File System
-advanced_1370_p=\ This database supports a pluggable file system API. The file system implementation is selected using a file name prefix. Internally, the interfaces are very similar to the Java 7 NIO2 API, but do not (yet) use or require Java 7. The following file systems are included\:
-advanced_1371_code=zip\:
-advanced_1372_li=\ read-only zip-file based file system. Format\: zip\:/zipFileName\!/fileName
.
-advanced_1373_code=split\:
-advanced_1374_li=\ file system that splits files in 1 GB files (stackable with other file systems).
-advanced_1375_code=nio\:
-advanced_1376_li=\ file system that uses FileChannel
instead of RandomAccessFile
(faster in some operating systems).
-advanced_1377_code=nioMapped\:
-advanced_1378_li=\ file system that uses memory mapped files (faster in some operating systems). Please note that there currently is a file size limitation of 2 GB when using this file system when using a 32-bit JVM. To work around this limitation, combine it with the split file system\: split\:nioMapped\:test
.
-advanced_1379_code=memFS\:
-advanced_1380_li=\ in-memory file system (slower than mem; experimental; mainly used for testing the database engine itself).
-advanced_1381_code=memLZF\:
-advanced_1382_li=\ compressing in-memory file system (slower than memFS but uses less memory; experimental; mainly used for testing the database engine itself).
-advanced_1383_p=\ As an example, to use the the nio
file system, use the following database URL\: jdbc\:h2\:nio\:~/test
.
-advanced_1384_p=\ To register a new file system, extend the classes org.h2.store.fs.FilePath, FileBase
, and call the method FilePath.register
before using it.
-advanced_1385_p=\ For input streams (but not for random access files), URLs may be used in addition to the registered file systems. Example\: jar\:file\:///c\:/temp/example.zip\!/org/example/nested.csv
. To read a stream from the classpath, use the prefix classpath\:
, as in classpath\:/org/h2/samples/newsfeed.sql
.
-advanced_1386_h2=Split File System
-advanced_1387_p=\ The file system prefix split\:
is used to split logical files into multiple physical files, for example so that a database can get larger than the maximum file system size of the operating system. If the logical file is larger than the maximum file size, then the file is split as follows\:
-advanced_1388_code=<fileName>
-advanced_1389_li=\ (first block, is always created)
-advanced_1390_code=<fileName>.1.part
-advanced_1391_li=\ (second block)
-advanced_1392_p=\ More physical files (*.2.part, *.3.part
) are automatically created / deleted if needed. The maximum physical file size of a block is 2^30 bytes, which is also called 1 GiB or 1 GB. However this can be changed if required, by specifying the block size in the file name. The file name format is\: split\:<x>\:<fileName>
where the file size per block is 2^x. For 1 MiB block sizes, use x \= 20 (because 2^20 is 1 MiB). The following file name means the logical file is split into 1 MiB blocks\: split\:20\:test.h2.db
. An example database URL for this case is jdbc\:h2\:split\:20\:~/test
.
-advanced_1393_h2=Database Upgrade
-advanced_1394_p=\ In version 1.2, H2 introduced a new file store implementation which is incompatible to the one used in versions < 1.2. To automatically convert databases to the new file store, it is necessary to include an additional jar file. The file can be found at http\://h2database.com/h2mig_pagestore_addon.jar . If this file is in the classpath, every connect to an older database will result in a conversion process.
-advanced_1395_p=\ The conversion itself is done internally via 'script to'
and 'runscript from'
. After the conversion process, the files will be renamed from
-advanced_1396_code=dbName.data.db
-advanced_1397_li=\ to dbName.data.db.backup
-advanced_1398_code=dbName.index.db
-advanced_1399_li=\ to dbName.index.db.backup
-advanced_1400_p=\ by default. Also, the temporary script will be written to the database directory instead of a temporary directory. Both defaults can be customized via
-advanced_1401_code=org.h2.upgrade.DbUpgrade.setDeleteOldDb(boolean)
-advanced_1402_code=org.h2.upgrade.DbUpgrade.setScriptInTmpDir(boolean)
-advanced_1403_p=\ prior opening a database connection.
-advanced_1404_p=\ Since version 1.2.140 it is possible to let the old h2 classes (v 1.2.128) connect to the database. The automatic upgrade .jar file must be present, and the URL must start with jdbc\:h2v1_1\:
(the JDBC driver class is org.h2.upgrade.v1_1.Driver
). If the database should automatically connect using the old version if a database with the old format exists (without upgrade), and use the new version otherwise, then append ;NO_UPGRADE\=TRUE
to the database URL. Please note the old driver did not process the system property "h2.baseDir"
correctly, so that using this setting is not supported when upgrading.
-advanced_1405_h2=Java Objects Serialization
-advanced_1406_p=\ Java objects serialization is enabled by default for columns of type OTHER
, using standard Java serialization/deserialization semantics.
-advanced_1407_p=\ To disable this feature set the system property h2.serializeJavaObject\=false
(default\: true).
-advanced_1408_p=\ Serialization and deserialization of java objects is customizable both at system level and at database level providing a JavaObjectSerializer implementation\:
-advanced_1409_li=\ At system level set the system property h2.javaObjectSerializer
with the Fully Qualified Name of the JavaObjectSerializer
interface implementation. It will be used over the entire JVM session to (de)serialize java objects being stored in column of type OTHER. Example h2.javaObjectSerializer\=com.acme.SerializerClassName
.
-advanced_1410_li=\ At database level execute the SQL statement SET JAVA_OBJECT_SERIALIZER 'com.acme.SerializerClassName'
or append ;JAVA_OBJECT_SERIALIZER\='com.acme.SerializerClassName'
to the database URL\: jdbc\:h2\:~/test;JAVA_OBJECT_SERIALIZER\='com.acme.SerializerClassName'
.
-advanced_1411_p=\ Please note that this SQL statement can only be executed before any tables are defined.
-advanced_1412_h2=Limits and Limitations
-advanced_1413_p=\ This database has the following known limitations\:
-advanced_1414_li=Database file size limit\: 4 TB (using the default page size of 2 KB) or higher (when using a larger page size). This limit is including CLOB and BLOB data.
-advanced_1415_li=The maximum file size for FAT or FAT32 file systems is 4 GB. That means when using FAT or FAT32, the limit is 4 GB for the data. This is the limitation of the file system. The database does provide a workaround for this problem, it is to use the file name prefix split\:
. In that case files are split into files of 1 GB by default. An example database URL is\: jdbc\:h2\:split\:~/test
.
-advanced_1416_li=The maximum number of rows per table is 2^64.
-advanced_1417_li=The maximum number of open transactions is 65535.
-advanced_1418_li=Main memory requirements\: The larger the database, the more main memory is required. With the current storage mechanism (the page store), the minimum main memory required is around 1 MB for each 8 GB database file size.
-advanced_1419_li=Limit on the complexity of SQL statements. Statements of the following form will result in a stack overflow exception\:
-advanced_1420_li=There is no limit for the following entities, except the memory and storage capacity\: maximum identifier length (table name, column name, and so on); maximum number of tables, columns, indexes, triggers, and other database objects; maximum statement length, number of parameters per statement, tables per statement, expressions in order by, group by, having, and so on; maximum rows per query; maximum columns per table, columns per index, indexes per table, lob columns per table, and so on; maximum row length, index row length, select row length; maximum length of a varchar column, decimal column, literal in a statement.
-advanced_1421_li=Querying from the metadata tables is slow if there are many tables (thousands).
-advanced_1422_li=For limitations on data types, see the documentation of the respective Java data type or the data type documentation of this database.
-advanced_1423_h2=Glossary and Links
-advanced_1424_th=Term
-advanced_1425_th=Description
-advanced_1426_td=AES-128
-advanced_1427_td=A block encryption algorithm. See also\: Wikipedia\: AES
-advanced_1428_td=Birthday Paradox
-advanced_1429_td=Describes the higher than expected probability that two persons in a room have the same birthday. Also valid for randomly generated UUIDs. See also\: Wikipedia\: Birthday Paradox
-advanced_1430_td=Digest
-advanced_1431_td=Protocol to protect a password (but not to protect data). See also\: RFC 2617\: HTTP Digest Access Authentication
-advanced_1432_td=GCJ
-advanced_1433_td=Compiler for Java. GNU Compiler for the Java and NativeJ (commercial)
-advanced_1434_td=HTTPS
-advanced_1435_td=A protocol to provide security to HTTP connections. See also\: RFC 2818\: HTTP Over TLS
-advanced_1436_td=Modes of Operation
-advanced_1437_a=Wikipedia\: Block cipher modes of operation
-advanced_1438_td=Salt
-advanced_1439_td=Random number to increase the security of passwords. See also\: Wikipedia\: Key derivation function
-advanced_1440_td=SHA-256
-advanced_1441_td=A cryptographic one-way hash function. See also\: Wikipedia\: SHA hash functions
-advanced_1442_td=SQL Injection
-advanced_1443_td=A security vulnerability where an application embeds SQL statements or expressions in user input. See also\: Wikipedia\: SQL Injection
-advanced_1444_td=Watermark Attack
-advanced_1445_td=Security problem of certain encryption programs where the existence of certain data can be proven without decrypting. For more information, search in the internet for 'watermark attack cryptoloop'
-advanced_1446_td=SSL/TLS
-advanced_1447_td=Secure Sockets Layer / Transport Layer Security. See also\: Java Secure Socket Extension (JSSE)
-architecture_1000_h1=Architecture
-architecture_1001_a=\ Introduction
-architecture_1002_a=\ Top-down overview
-architecture_1003_a=\ JDBC driver
-architecture_1004_a=\ Connection/session management
-architecture_1005_a=\ Command execution and planning
-architecture_1006_a=\ Table/index/constraints
-architecture_1007_a=\ Undo log, redo log, and transactions layer
-architecture_1008_a=\ B-tree engine and page-based storage allocation
-architecture_1009_a=\ Filesystem abstraction
-architecture_1010_h2=Introduction
-architecture_1011_p=\ H2 implements an embedded and standalone ANSI-SQL89 compliant SQL engine on top of a B-tree based disk store.
-architecture_1012_p=\ As of October 2013, Thomas is still working on our next-generation storage engine called MVStore. This will in time replace the B-tree based storage engine.
-architecture_1013_h2=Top-down Overview
-architecture_1014_p=\ Working from the top down, the layers look like this\:
-architecture_1015_li=JDBC driver.
-architecture_1016_li=Connection/session management.
-architecture_1017_li=SQL Parser.
-architecture_1018_li=Command execution and planning.
-architecture_1019_li=Table/Index/Constraints.
-architecture_1020_li=Undo log, redo log, and transactions layer.
-architecture_1021_li=B-tree engine and page-based storage allocation.
-architecture_1022_li=Filesystem abstraction.
-architecture_1023_h2=JDBC Driver
-architecture_1024_p=\ The JDBC driver implementation lives in org.h2.jdbc, org.h2.jdbcx
-architecture_1025_h2=Connection/session management
-architecture_1026_p=\ The primary classes of interest are\:
-architecture_1027_th=Package
-architecture_1028_th=Description
-architecture_1029_td=org.h2.engine.Database
-architecture_1030_td=the root/global class
-architecture_1031_td=org.h2.engine.SessionInterface
-architecture_1032_td=abstracts over the differences between embedded and remote sessions
-architecture_1033_td=org.h2.engine.Session
-architecture_1034_td=local/embedded session
-architecture_1035_td=org.h2.engine.SessionRemote
-architecture_1036_td=remote session
-architecture_1037_h2=Parser
-architecture_1038_p=\ The parser lives in org.h2.command.Parser
. It uses a straightforward recursive-descent design.
-architecture_1039_p=\ See Wikipedia Recursive-descent parser page.
-architecture_1040_h2=Command execution and planning
-architecture_1041_p=\ Unlike other databases, we do not have an intermediate step where we generate some kind of IR (intermediate representation) of the query. The parser class directly generates a command execution object. Then we run some optimisation steps over the command to possibly generate a more efficient command. The primary packages of interest are\:
-architecture_1042_th=Package
-architecture_1043_th=Description
-architecture_1044_td=org.h2.command.ddl
-architecture_1045_td=Commands that modify schema data structures
-architecture_1046_td=org.h2.command.dml
-architecture_1047_td=Commands that modify data
-architecture_1048_h2=Table/Index/Constraints
-architecture_1049_p=\ One thing to note here is that indexes are simply stored as special kinds of tables.
-architecture_1050_p=\ The primary packages of interest are\:
-architecture_1051_th=Package
-architecture_1052_th=Description
-architecture_1053_td=org.h2.table
-architecture_1054_td=Implementations of different kinds of tables
-architecture_1055_td=org.h2.index
-architecture_1056_td=Implementations of different kinds of indices
-architecture_1057_h2=Undo log, redo log, and transactions layer
-architecture_1058_p=\ We have a transaction log, which is shared among all sessions. See also http\://en.wikipedia.org/wiki/Transaction_log http\://h2database.com/html/grammar.html\#set_log
-architecture_1059_p=\ We also have an undo log, which is per session, to undo an operation (an update that fails for example) and to rollback a transaction. Theoretically, the transaction log could be used, but for simplicity, H2 currently uses it's own "list of operations" (usually in-memory).
-architecture_1060_p=\ With the MVStore, this is no longer needed (just the transaction log).
-architecture_1061_h2=B-tree engine and page-based storage allocation.
-architecture_1062_p=\ The primary package of interest is org.h2.store
.
-architecture_1063_p=\ This implements a storage mechanism which allocates pages of storage (typically 2k in size) and also implements a b-tree over those pages to allow fast retrieval and update.
-architecture_1064_h2=Filesystem abstraction.
-architecture_1065_p=\ The primary class of interest is org.h2.store.FileStore
.
-architecture_1066_p=\ This implements an abstraction of a random-access file. This allows the higher layers to treat in-memory vs. on-disk vs. zip-file databases the same.
-build_1000_h1=Build
-build_1001_a=\ Portability
-build_1002_a=\ Environment
-build_1003_a=\ Building the Software
-build_1004_a=\ Build Targets
-build_1005_a=\ Using Maven 2
-build_1006_a=\ Using Eclipse
-build_1007_a=\ Translating
-build_1008_a=\ Providing Patches
-build_1009_a=\ Reporting Problems or Requests
-build_1010_a=\ Automated Build
-build_1011_a=\ Generating Railroad Diagrams
-build_1012_h2=Portability
-build_1013_p=\ This database is written in Java and therefore works on many platforms. It can also be compiled to a native executable using GCJ.
-build_1014_h2=Environment
-build_1015_p=\ To run this database, a Java Runtime Environment (JRE) version 1.6 or higher is required.
-build_1016_p=\ To create the database executables, the following software stack was used. To use this database, it is not required to install this software however.
-build_1017_li=Mac OS X and Windows
-build_1018_a=Sun JDK Version 1.6 and 1.7
-build_1019_a=Eclipse
-build_1020_li=Eclipse Plugins\: Subclipse, Eclipse Checkstyle Plug-in, EclEmma Java Code Coverage
-build_1021_a=Emma Java Code Coverage
-build_1022_a=Mozilla Firefox
-build_1023_a=OpenOffice
-build_1024_a=NSIS
-build_1025_li=\ (Nullsoft Scriptable Install System)
-build_1026_a=Maven
-build_1027_h2=Building the Software
-build_1028_p=\ You need to install a JDK, for example the Sun JDK version 1.6 or 1.7. Ensure that Java binary directory is included in the PATH
environment variable, and that the environment variable JAVA_HOME
points to your Java installation. On the command line, go to the directory h2
and execute the following command\:
-build_1029_p=\ For Linux and OS X, use ./build.sh
instead of build
.
-build_1030_p=\ You will get a list of targets. If you want to build the jar
file, execute (Windows)\:
-build_1031_p=\ To run the build tool in shell mode, use the command line option -
as in ./build.sh -
.
-build_1032_h3=Switching the Source Code
-build_1033_p=\ The source code uses Java 1.6 features. To switch the source code to the installed version of Java, run\:
-build_1034_h2=Build Targets
-build_1035_p=\ The build system can generate smaller jar files as well. The following targets are currently supported\:
-build_1036_code=jarClient
-build_1037_li=\ creates the file h2client.jar
. This only contains the JDBC client.
-build_1038_code=jarSmall
-build_1039_li=\ creates the file h2small.jar
. This only contains the embedded database. Debug information is disabled.
-build_1040_code=jarJaqu
-build_1041_li=\ creates the file h2jaqu.jar
. This only contains the JaQu (Java Query) implementation. All other jar files do not include JaQu.
-build_1042_code=javadocImpl
-build_1043_li=\ creates the Javadocs of the implementation.
-build_1044_p=\ To create the file h2client.jar
, go to the directory h2
and execute the following command\:
-build_1045_h3=Using Lucene 2 / 3
-build_1046_p=\ Both Apache Lucene 2 and Lucene 3 are supported. Currently Apache Lucene version 2.x is used by default for H2 version 1.2.x, and Lucene version 3.x is used by default for H2 version 1.3.x. To use a different version of Lucene when compiling, it needs to be specified as follows\:
-build_1047_h2=Using Maven 2
-build_1048_h3=Using a Central Repository
-build_1049_p=\ You can include the database in your Maven 2 project as a dependency. Example\:
-build_1050_p=\ New versions of this database are first uploaded to http\://hsql.sourceforge.net/m2-repo/ and then automatically synchronized with the main Maven repository; however after a new release it may take a few hours before they are available there.
-build_1051_h3=Maven Plugin to Start and Stop the TCP Server
-build_1052_p=\ A Maven plugin to start and stop the H2 TCP server is available from Laird Nelson at GitHub. To start the H2 server, use\:
-build_1053_p=\ To stop the H2 server, use\:
-build_1054_h3=Using Snapshot Version
-build_1055_p=\ To build a h2-*-SNAPSHOT.jar
file and upload it the to the local Maven 2 repository, execute the following command\:
-build_1056_p=\ Afterwards, you can include the database in your Maven 2 project as a dependency\:
-build_1057_h2=Using Eclipse
-build_1058_p=\ To create an Eclipse project for H2, use the following steps\:
-build_1059_li=Install Subversion and Eclipse.
-build_1060_li=Get the H2 source code from the Subversion repository\:
-build_1061_code=svn checkout http\://h2database.googlecode.com/svn/trunk h2database-read-only
-build_1062_li=Download all dependencies (Windows)\:
-build_1063_code=build.bat download
-build_1064_li=In Eclipse, create a new Java project from existing source code\: File, New, Project, Java Project, Create project from existing source
.
-build_1065_li=Select the h2
folder, click Next
and Finish
.
-build_1066_li=To resolve com.sun.javadoc
import statements, you may need to manually add the file <java.home>/../lib/tools.jar
to the build path.
-build_1067_h2=Translating
-build_1068_p=\ The translation of this software is split into the following parts\:
-build_1069_li=H2 Console\: src/main/org/h2/server/web/res/_text_*.prop
-build_1070_li=Error messages\: src/main/org/h2/res/_messages_*.prop
-build_1071_p=\ To translate the H2 Console, start it and select Preferences / Translate. After you are done, send the translated *.prop
file to the Google Group. The web site is currently translated using Google.
-build_1072_h2=Providing Patches
-build_1073_p=\ If you like to provide patches, please consider the following guidelines to simplify merging them\:
-build_1074_li=Only use Java 6 features (do not use Java 7) (see Environment).
-build_1075_li=Follow the coding style used in the project, and use Checkstyle (see above) to verify. For example, do not use tabs (use spaces instead). The checkstyle configuration is in src/installer/checkstyle.xml
.
-build_1076_li=A template of the Eclipse settings are in src/installer/eclipse.settings/*
. If you want to use them, you need to copy them to the .settings
directory. The formatting options (eclipseCodeStyle
) are also included.
-build_1077_li=Please provide test cases and integrate them into the test suite. For Java level tests, see src/test/org/h2/test/TestAll.java
. For SQL level tests, see src/test/org/h2/test/test.in.txt
or testSimple.in.txt
.
-build_1078_li=The test cases should cover at least 90% of the changed and new code; use a code coverage tool to verify that (see above). or use the build target coverage
.
-build_1079_li=Verify that you did not break other features\: run the test cases by executing build test
.
-build_1080_li=Provide end user documentation if required (src/docsrc/html/*
).
-build_1081_li=Document grammar changes in src/docsrc/help/help.csv
-build_1082_li=Provide a change log entry (src/docsrc/html/changelog.html
).
-build_1083_li=Verify the spelling using build spellcheck
. If required add the new words to src/tools/org/h2/build/doc/dictionary.txt
.
-build_1084_li=Run src/installer/buildRelease
to find and fix formatting errors.
-build_1085_li=Verify the formatting using build docs
and build javadoc
.
-build_1086_li=Submit patches as .patch
files (compressed if big). To create a patch using Eclipse, use Team / Create Patch.
-build_1087_p=\ For legal reasons, patches need to be public in the form of an email to the group, or in the form of an issue report or attachment. Significant contributions need to include the following statement\:
-build_1088_p=\ "I wrote the code, it's mine, and I'm contributing it to H2 for distribution multiple-licensed under the MPL 2.0, and the EPL 1.0 (http\://h2database.com/html/license.html)."
-build_1089_h2=Reporting Problems or Requests
-build_1090_p=\ Please consider the following checklist if you have a question, want to report a problem, or if you have a feature request\:
-build_1091_li=For bug reports, please provide a short, self contained, correct (compilable), example of the problem.
-build_1092_li=Feature requests are always welcome, even if the feature is already on the roadmap. Your mail will help prioritize feature requests. If you urgently need a feature, consider providing a patch.
-build_1093_li=Before posting problems, check the FAQ and do a Google search.
-build_1094_li=When got an unexpected exception, please try the Error Analyzer tool. If this doesn't help, please report the problem, including the complete error message and stack trace, and the root cause stack trace(s).
-build_1095_li=When sending source code, please use a public web clipboard such as Pastebin, Cl1p, or Mystic Paste to avoid formatting problems. Please keep test cases as simple and short as possible, but so that the problem can still be reproduced. As a template, use\: HelloWorld.java. Method that simply call other methods should be avoided, as well as unnecessary exception handling. Please use the JDBC API and no external tools or libraries. The test should include all required initialization code, and should be started with the main method.
-build_1096_li=For large attachments, use a public temporary storage such as Rapidshare.
-build_1097_li=Google Group versus issue tracking\: Use the Google Group for questions or if you are not sure it's a bug. If you are sure it's a bug, you can create an issue, but you don't need to (sending an email to the group is enough). Please note that only few people monitor the issue tracking system.
-build_1098_li=For out-of-memory problems, please analyze the problem yourself first, for example using the command line option -XX\:+HeapDumpOnOutOfMemoryError
(to create a heap dump file on out of memory) and a memory analysis tool such as the Eclipse Memory Analyzer (MAT).
-build_1099_li=It may take a few days to get an answers. Please do not double post.
-build_1100_h2=Automated Build
-build_1101_p=\ This build process is automated and runs regularly. The build process includes running the tests and code coverage, using the command line ./build.sh clean jar coverage -Dh2.ftpPassword\=... uploadBuild
. The last results are available here\:
-build_1102_a=Test Output
-build_1103_a=Code Coverage Summary
-build_1104_a=Code Coverage Details (download, 1.3 MB)
-build_1105_a=Build Newsfeed
-build_1106_a=Latest Jar File (download, 1 MB)
-build_1107_h2=Generating Railroad Diagrams
-build_1108_p=\ The railroad diagrams of the SQL grammar are HTML, formatted as nested tables. The diagrams are generated as follows\:
-build_1109_li=The BNF parser (org.h2.bnf.Bnf
) reads and parses the BNF from the file help.csv
.
-build_1110_li=The page parser (org.h2.server.web.PageParser
) reads the template HTML file and fills in the diagrams.
-build_1111_li=The rail images (one straight, four junctions, two turns) are generated using a simple Java application.
-build_1112_p=\ To generate railroad diagrams for other grammars, see the package org.h2.jcr
. This package is used to generate the SQL-2 railroad diagrams for the JCR 2.0 specification.
-changelog_1000_h1=Change Log
-changelog_1001_h2=Next Version (unreleased)
-changelog_1002_li=-
-changelog_1003_h2=Version 1.4.187 Beta (2015-04-10)
-changelog_1004_li=MVStore\: concurrent changes to the same row could result in the exception "The transaction log might be corrupt for key ...". This could only be reproduced with 3 or more threads.
-changelog_1005_li=Results with CLOB or BLOB data are no longer reused.
-changelog_1006_li=References to BLOB and CLOB objects now have a timeout. The configuration setting is LOB_TIMEOUT (default 5 minutes). This should avoid growing the database file if there are many queries that return BLOB or CLOB objects, and the database is not closed for a longer time.
-changelog_1007_li=MVStore\: when committing a session that removed LOB values, changes were flushed unnecessarily.
-changelog_1008_li=Issue 610\: possible integer overflow in WriteBuffer.grow().
-changelog_1009_li=Issue 609\: the spatial index did not support NULL (ClassCastException).
-changelog_1010_li=MVStore\: in some cases, CLOB/BLOB data blocks were removed incorrectly when opening a database.
-changelog_1011_li=MVStore\: updates that affected many rows were were slow in some cases if there was a secondary index.
-changelog_1012_li=Using "runscript" with autocommit disabled could result in a lock timeout on the internal table "SYS".
-changelog_1013_li=Issue 603\: there was a memory leak when using H2 in a web application. Apache Tomcat logged an error message\: "The web application ... created a ThreadLocal with key of type [org.h2.util.DateTimeUtils$1]".
-changelog_1014_li=When using the MVStore, running a SQL script generate by the Recover tool from a PageStore file failed with a strange error message (NullPointerException), now a clear error message is shown.
-changelog_1015_li=Issue 605\: with version 1.4.186, opening a database could result in an endless loop in LobStorageMap.init.
-changelog_1016_li=Queries that use the same table alias multiple times now work. Before, the select expression list was expanded incorrectly. Example\: "select * from a as x, b as x".
-changelog_1017_li=The MySQL compatibility feature "insert ... on duplicate key update" did not work with a non-default schema.
-changelog_1018_li=Issue 599\: the condition "in(x, y)" could not be used in the select list when using "group by".
-changelog_1019_li=The LIRS cache could grow larger than the allocated memory.
-changelog_1020_li=A new file system implementation that re-opens the file if it was closed due to the application calling Thread.interrupt(). File name prefix "retry\:". Please note it is strongly recommended to avoid calling Thread.interrupt; this is a problem for various libraries, including Apache Lucene.
-changelog_1021_li=MVStore\: use RandomAccessFile file system if the file name starts with "file\:".
-changelog_1022_li=Allow DATEADD to take a long value for count when manipulating milliseconds.
-changelog_1023_li=When using MV_STORE\=TRUE and the SET CACHE_SIZE setting, the cache size was incorrectly set, so that it was effectively 1024 times smaller than it should be.
-changelog_1024_li=Concurrent CREATE TABLE... IF NOT EXISTS in the presence of MULTI_THREAD\=TRUE could throw an exception.
-changelog_1025_li=Fix bug in MVStore when creating lots of temporary tables, where we could run out of transaction IDs.
-changelog_1026_li=Add support for PostgreSQL STRING_AGG function. Patch by Fred Aquiles.
-changelog_1027_li=Fix bug in "jdbc\:h2\:nioMemFS" isRoot() function. Also, the page size was increased to 64 KB.
-changelog_1028_h2=Version 1.4.186 Beta (2015-03-02)
-changelog_1029_li=The Servlet API 3.0.1 is now used, instead of 2.4.
-changelog_1030_li=MVStore\: old chunks no longer removed in append-only mode.
-changelog_1031_li=MVStore\: the cache for page references could grow far too big, resulting in out of memory in some cases.
-changelog_1032_li=MVStore\: orphaned lob objects were not correctly removed in some cases, making the database grow unnecessarily.
-changelog_1033_li=MVStore\: the maximum cache size was artificially limited to 2 GB (due to an integer overflow).
-changelog_1034_li=MVStore / TransactionStore\: concurrent updates could result in a "Too many open transactions" exception.
-changelog_1035_li=StringUtils.toUpperEnglish now has a small cache. This should speed up reading from a ResultSet when using the column name.
-changelog_1036_li=MVStore\: up to 65535 open transactions are now supported. Previously, the limit was at most 65535 transactions between the oldest open and the newest open transaction (which was quite a strange limit).
-changelog_1037_li=The default limit for in-place LOB objects was changed from 128 to 256 bytes. This is because each read creates a reference to a LOB, and maintaining the references is a big overhead. With the higher limit, less references are needed.
-changelog_1038_li=Tables without columns didn't work. (The use case for such tables is testing.)
-changelog_1039_li=The LIRS cache now resizes the table automatically in all cases and no longer needs the averageMemory configuration.
-changelog_1040_li=Creating a linked table from an MVStore database to a non-MVStore database created a second (non-MVStore) database file.
-changelog_1041_li=In version 1.4.184, a bug was introduced that broke queries that have both joins and wildcards, for example\: select * from dual join(select x from dual) on 1\=1
-changelog_1042_li=Issue 598\: parser fails on timestamp "24\:00\:00.1234" - prevent the creation of out-of-range time values.
-changelog_1043_li=Allow declaring triggers as source code (like functions). Patch by Sylvain Cuaz.
-changelog_1044_li=Make the planner use indexes for sorting when doing a GROUP BY where all of the GROUP BY columns are not mentioned in the select. Patch by Frederico (zepfred).
-changelog_1045_li=PostgreSQL compatibility\: generate_series (as an alias for system_range). Patch by litailang.
-changelog_1046_li=Fix missing "column" type in right-hand parameter in ConditionIn. Patch by Arnaud Thimel.
-changelog_1047_h2=Version 1.4.185 Beta (2015-01-16)
-changelog_1048_li=In version 1.4.184, "group by" ignored the table name, and could pick a select column by mistake. Example\: select 0 as x from system_range(1, 2) d group by d.x;
-changelog_1049_li=New connection setting "REUSE_SPACE" (default\: true). If disabled, all changes are appended to the database file, and existing content is never overwritten. This allows to rollback to a previous state of the database by truncating the database file.
-changelog_1050_li=Issue 587\: MVStore\: concurrent compaction and store operations could result in an IllegalStateException.
-changelog_1051_li=Issue 594\: Profiler.copyInThread does not work properly.
-changelog_1052_li=Script tool\: Now, SCRIPT ... TO is always used (for higher speed and lower disk space usage).
-changelog_1053_li=Script tool\: Fix parsing of BLOCKSIZE parameter, original patch by Ken Jorissen.
-changelog_1054_li=Fix bug in PageStore\#commit method - when the ignoreBigLog flag was set, the logic that cleared the flag could never be reached, resulting in performance degradation. Reported by Alexander Nesterov.
-changelog_1055_li=Issue 552\: Implement BIT_AND and BIT_OR aggregate functions.
-changelog_1056_h2=Version 1.4.184 Beta (2014-12-19)
-changelog_1057_li=In version 1.3.183, indexes were not used if the table contains columns with a default value generated by a sequence. This includes tables with identity and auto-increment columns. This bug was introduced by supporting "rownum" in views and derived tables.
-changelog_1058_li=MVStore\: imported BLOB and CLOB data sometimes disappeared. This was caused by a bug in the ObjectDataType comparison.
-changelog_1059_li=Reading from a StreamStore now throws an IOException if the underlying data doesn't exist.
-changelog_1060_li=MVStore\: if there is an exception while saving, the store is now in all cases immediately closed.
-changelog_1061_li=MVStore\: the dump tool could go into an endless loop for some files.
-changelog_1062_li=MVStore\: recovery for a database with many CLOB or BLOB entries is now much faster.
-changelog_1063_li=Group by with a quoted select column name alias didn't work. Example\: select 1 "a" from dual group by "a"
-changelog_1064_li=Auto-server mode\: the host name is now stored in the .lock.db file.
-changelog_1065_h2=Version 1.4.183 Beta (2014-12-13)
-changelog_1066_li=MVStore\: the default auto-commit buffer size is now about twice as big. This should reduce the database file size after inserting a lot of data.
-changelog_1067_li=The built-in functions "power" and "radians" now always return a double.
-changelog_1068_li=Using "row_number" or "rownum" in views or derived tables had unexpected results if the outer query contained constraints for the given view. Example\: select b.nr, b.id from (select row_number() over() as nr, a.id as id from (select id from test order by name) as a) as b where b.id \= 1
-changelog_1069_li=MVStore\: the Recover tool can now deal with more types of corruption in the file.
-changelog_1070_li=MVStore\: the TransactionStore now first needs to be initialized before it can be used.
-changelog_1071_li=Views and derived tables with equality and range conditions on the same columns did not work properly. example\: select x from (select x from (select 1 as x) where x > 0 and x < 2) where x \= 1
-changelog_1072_li=The database URL setting PAGE_SIZE setting is now also used for the MVStore.
-changelog_1073_li=MVStore\: the default page split size for persistent stores is now 4096 (it was 16 KB so far). This should reduce the database file size for most situations (in some cases, less than half the size of the previous version).
-changelog_1074_li=With query literals disabled, auto-analyze of a table with CLOB or BLOB did not work.
-changelog_1075_li=MVStore\: use a mark and sweep GC algorithm instead of reference counting, to ensure used chunks are never overwrite, even if the reference counting algorithm does not work properly.
-changelog_1076_li=In the multi-threaded mode, updating the column selectivity ("analyze") in the background sometimes did not work.
-changelog_1077_li=In the multi-threaded mode, database metadata operations did sometimes not work if the schema was changed at the same time (for example, if tables were dropped).
-changelog_1078_li=Some CLOB and BLOB values could no longer be read when the original row was removed (even when using the MVCC mode).
-changelog_1079_li=The MVStoreTool could throw an IllegalArgumentException.
-changelog_1080_li=Improved performance for some date / time / timestamp conversion operations. Thanks to Sergey Evdokimov for reporting the problem.
-changelog_1081_li=H2 Console\: the built-in web server did not work properly if an unknown file was requested.
-changelog_1082_li=MVStore\: the jar file is renamed to "h2-mvstore-*.jar" and is deployed to Maven separately.
-changelog_1083_li=MVStore\: support for concurrent reads and writes is now enabled by default.
-changelog_1084_li=Server mode\: the transfer buffer size has been changed from 16 KB to 64 KB, after it was found that this improves performance on Linux quite a lot.
-changelog_1085_li=H2 Console and server mode\: SSL is now disabled and TLS is used to protect against the Poodle SSLv3 vulnerability. The system property to disable secure anonymous connections is now "h2.enableAnonymousTLS". The default certificate is still self-signed, so you need to manually install another one if you want to avoid man in the middle attacks.
-changelog_1086_li=MVStore\: the R-tree did not correctly measure the memory usage.
-changelog_1087_li=MVStore\: compacting a store with an R-tree did not always work.
-changelog_1088_li=Issue 581\: When running in LOCK_MODE\=0, JdbcDatabaseMetaData\#supportsTransactionIsolationLevel(TRANSACTION_READ_UNCOMMITTED) should return false
-changelog_1089_li=Fix bug which could generate deadlocks when multiple connections accessed the same table.
-changelog_1090_li=Some places in the code were not respecting the value set in the "SET MAX_MEMORY_ROWS x" command
-changelog_1091_li=Fix bug which could generate a NegativeArraySizeException when performing large (>40M) row union operations
-changelog_1092_li=Fix "USE schema" command for MySQL compatibility, patch by mfulton
-changelog_1093_li=Parse and ignore the ROW_FORMAT\=DYNAMIC MySQL syntax, patch by mfulton
-changelog_1094_h2=Version 1.4.182 Beta (2014-10-17)
-changelog_1095_li=MVStore\: improved error messages and logging; improved behavior if there is an error when serializing objects.
-changelog_1096_li=OSGi\: the MVStore packages are now exported.
-changelog_1097_li=With the MVStore option, when using multiple threads that concurrently create indexes or tables, it was relatively easy to get a lock timeout on the "SYS" table.
-changelog_1098_li=When using the multi-threaded option, the exception "Unexpected code path" could be thrown, specially if the option "analyze_auto" was set to a low value.
-changelog_1099_li=In the server mode, when reading from a CLOB or BLOB, if the connection was closed, a NullPointerException could be thrown instead of an exception saying the connection is closed.
-changelog_1100_li=DatabaseMetaData.getProcedures and getProcedureColumns could throw an exception if a user defined class is not available.
-changelog_1101_li=Issue 584\: the error message for a wrong sequence definition was wrong.
-changelog_1102_li=CSV tool\: the rowSeparator option is no longer supported, as the same can be achieved with the lineSeparator.
-changelog_1103_li=Descending indexes on MVStore tables did not work properly.
-changelog_1104_li=Issue 579\: Conditions on the "_rowid_" pseudo-column didn't use an index when using the MVStore.
-changelog_1105_li=Fixed documentation that "offset" and "fetch" are also keywords since version 1.4.x.
-changelog_1106_li=The Long.MIN_VALUE could not be parsed for auto-increment (identity) columns.
-changelog_1107_li=Issue 573\: Add implementation for Methods "isWrapperFor()" and "unwrap()" in other JDBC classes.
-changelog_1108_li=Issue 572\: MySQL compatibility for "order by" in update statements.
-changelog_1109_li=The change in JDBC escape processing in version 1.4.181 affects both the parser (which is running on the server) and the JDBC API (which is running on the client). If you (or a tool you use) use the syntax "{t 'time}", or "{ts 'timestamp'}", or "{d 'data'}", then both the client and the server need to be upgraded to version 1.4.181 or later.
-changelog_1110_h2=Version 1.4.181 Beta (2014-08-06)
-changelog_1111_li=Improved MySQL compatibility by supporting "use schema". Thanks a lot to Karl Pietrzak for the patch\!
-changelog_1112_li=Writing to the trace file is now faster, specially with the debug level.
-changelog_1113_li=The database option "defrag_always\=true" did not work with the MVStore.
-changelog_1114_li=The JDBC escape syntax {ts 'value'} did not interpret the value as a timestamp. The same for {d 'value'} (for date) and {t 'value'} (for time). Thanks to Lukas Eder for reporting the issue. The following problem was detected after version 1.4.181 was released\: The change in JDBC escape processing affects both the parser (which is running on the server) and the JDBC API (which is running on the client). If you (or a tool you use) use the syntax {t 'time'}, or {ts 'timestamp'}, or {d 'date'}, then both the client and the server need to be upgraded to version 1.4.181 or later.
-changelog_1115_li=File system abstraction\: support replacing existing files using move (currently not for Windows).
-changelog_1116_li=The statement "shutdown defrag" now compresses the database (with the MVStore). This command can greatly reduce the file size, and is relatively fast, but is not incremental.
-changelog_1117_li=The MVStore now automatically compacts the store in the background if there is no read or write activity, which should (after some time; sometimes about one minute) reduce the file size. This is still work in progress, feedback is welcome\!
-changelog_1118_li=Change default value of PAGE_SIZE from 2048 to 4096 to more closely match most file systems block size (PageStore only; the MVStore already used 4096).
-changelog_1119_li=Auto-scale MAX_MEMORY_ROWS and CACHE_SIZE settings by the amount of available RAM. Gives a better out of box experience for people with more powerful machines.
-changelog_1120_li=Handle tabs like 4 spaces in web console, patch by Martin Grajcar.
-changelog_1121_li=Issue 573\: Add implementation for Methods "isWrapperFor()" and "unwrap()" in JdbcConnection.java, patch by BigMichi1.
-changelog_1122_h2=Version 1.4.180 Beta (2014-07-13)
-changelog_1123_li=MVStore\: the store is now auto-compacted automatically up to some point, to avoid very large file sizes. This area is still work in progress.
-changelog_1124_li=Sequences of temporary tables (auto-increment or identity columns) were persisted unnecessarily in the database file, and were not removed when re-opening the database.
-changelog_1125_li=MVStore\: an IndexOutOfBoundsException could sometimes occur MVMap.openVersion when concurrently accessing the store.
-changelog_1126_li=The LIRS cache now re-sizes the internal hash map if needed.
-changelog_1127_li=Optionally persist session history in the H2 console. (patch from Martin Grajcar)
-changelog_1128_li=Add client-info property to get the number of servers currently in the cluster and which servers that are available. (patch from Nikolaj Fogh)
-changelog_1129_li=Fix bug in changing encrypted DB password that kept the file handle open when the wrong password was supplied. (test case from Jens Hohmuth).
-changelog_1130_li=Issue 567\: H2 hangs for a long time then (sometimes) recovers. Introduce a queue when doing table locking to prevent session starvation.
-changelog_1131_h2=Version 1.4.179 Beta (2014-06-23)
-changelog_1132_li=The license was changed to MPL 2.0 (from 1.0) and EPL 1.0.
-changelog_1133_li=Issue 565\: MVStore\: concurrently adding LOB objects (with MULTI_THREADED option) resulted in a NullPointerException.
-changelog_1134_li=MVStore\: reduced dependencies to other H2 classes.
-changelog_1135_li=There was a way to prevent a database from being re-opened, by creating a column constraint that references a table with a higher id, for example with "check" constraints that contains queries. This is now detected, and creating the table is prohibited. In future versions of H2, most likely creating references to other tables will no longer be supported because of such problems.
-changelog_1136_li=MVStore\: descending indexes with "nulls first" did not work as expected (null was ordered last).
-changelog_1137_li=Large result sets now always create temporary tables instead of temporary files.
-changelog_1138_li=When using the PageStore, opening a database failed in some cases with a NullPointerException if temporary tables were used (explicitly, or implicitly when using large result sets).
-changelog_1139_li=If a database file in the PageStore file format exists, this file and this mode is now used, even if the database URL does not contain "MV_STORE\=FALSE". If a MVStore file exists, it is used.
-changelog_1140_li=Databases created with version 1.3.175 and earlier that contained foreign keys in combination with multi-column indexes could not be opened in some cases. This was due to a bugfix in version 1.3.176\: Referential integrity constraints sometimes used the wrong index.
-changelog_1141_li=MVStore\: the ObjectDataType comparison method was incorrect if one key was Serializable and the other was of a common class.
-changelog_1142_li=Recursive queries with many result rows (more than the setting "max_memory_rows") did not work correctly.
-changelog_1143_li=The license has changed to MPL 2.0 + EPL 1.0.
-changelog_1144_li=MVStore\: temporary tables from result sets could survive re-opening a database, which could result in a ClassCastException.
-changelog_1145_li=Issue 566\: MVStore\: unique indexes that were created later on did not work correctly if there were over 5000 rows in the table. Existing databases need to be re-created (at least the broken index need to be re-built).
-changelog_1146_li=MVStore\: creating secondary indexes on large tables results in missing rows in the index.
-changelog_1147_li=Metadata\: the password of linked tables is now only visible for admin users.
-changelog_1148_li=For Windows, database URLs of the form "jdbc\:h2\:/test" where considered relative and did not work unless the system property "h2.implicitRelativePath" was used.
-changelog_1149_li=Windows\: using a base directory of "C\:/" and similar did not work as expected.
-changelog_1150_li=Follow JDBC specification on Procedures MetaData, use P0 as return type of procedure.
-changelog_1151_li=Issue 531\: IDENTITY ignored for added column.
-changelog_1152_li=FileSystem\: improve exception throwing compatibility with JDK
-changelog_1153_li=Spatial Index\: adjust costs so we do not use the spatial index if the query does not contain an intersects operator.
-changelog_1154_li=Fix multi-threaded deadlock when using a View that includes a TableFunction.
-changelog_1155_li=Fix bug in dividing very-small BigDecimal numbers.
-changelog_1156_h2=Version 1.4.178 Beta (2014-05-02)
-changelog_1157_li=Issue 559\: Make dependency on org.osgi.service.jdbc optional.
-changelog_1158_li=Improve error message when the user specifies an unsupported combination of database settings.
-changelog_1159_li=MVStore\: in the multi-threaded mode, NullPointerException and other exceptions could occur.
-changelog_1160_li=MVStore\: some database file could not be compacted due to a bug in the bookkeeping of the fill rate. Also, database file were compacted quite slowly. This has been improved; but more changes in this area are expected.
-changelog_1161_li=MVStore\: support for volatile maps (that don't store changes).
-changelog_1162_li=MVStore mode\: in-memory databases now also use the MVStore.
-changelog_1163_li=In server mode, appending ";autocommit\=false" to the database URL was working, but the return value of Connection.getAutoCommit() was wrong.
-changelog_1164_li=Issue 561\: OSGi\: the import package declaration of org.h2 excluded version 1.4.
-changelog_1165_li=Issue 558\: with the MVStore, a NullPointerException could occur when using LOBs at session commit (LobStorageMap.removeLob).
-changelog_1166_li=Remove the "h2.MAX_MEMORY_ROWS_DISTINCT" system property to reduce confusion. We already have the MAX_MEMORY_ROWS setting which does a very similar thing, and is better documented.
-changelog_1167_li=Issue 554\: Web Console in an IFrame was not fully supported.
-changelog_1168_h2=Version 1.4.177 Beta (2014-04-12)
-changelog_1169_li=By default, the MV_STORE option is enabled, so it is using the new MVStore storage. The MVCC setting is by default set to the same values as the MV_STORE setting, so it is also enabled by default. For testing, both settings can be disabled by appending ";MV_STORE\=FALSE" and/or ";MVCC\=FALSE" to the database URL.
-changelog_1170_li=The file locking method 'serialized' is no longer supported. This mode might return in a future version, however this is not clear right now. A new implementation and new tests would be needed.
-changelog_1171_li=Enable the new storage format for dates (system property "h2.storeLocalTime"). For the MVStore mode, this is always enabled, but with version 1.4 this is even enabled in the PageStore mode.
-changelog_1172_li=Implicit relative paths are disabled (system property "h2.implicitRelativePath"), so that the database URL jdbc\:h2\:test now needs to be written as jdbc\:h2\:./test.
-changelog_1173_li="select ... fetch first 1 row only" is supported with the regular mode. This was disabled so far because "fetch" and "offset" are now keywords. See also Mode.supportOffsetFetch.
-changelog_1174_li=Byte arrays are now sorted in unsigned mode (x'99' is larger than x'09'). (System property "h2.sortBinaryUnsigned", Mode.binaryUnsigned, setting "binary_collation").
-changelog_1175_li=Csv.getInstance will be removed in future versions of 1.4. Use the public constructor instead.
-changelog_1176_li=Remove support for the limited old-style outer join syntax using "(+)". Use "outer join" instead. System property "h2.oldStyleOuterJoin".
-changelog_1177_li=Support the data type "DATETIME2" as an alias for "DATETIME", for MS SQL Server compatibility.
-changelog_1178_li=Add Oracle-compatible TRANSLATE function, patch by Eric Chatellier.
-changelog_1179_h2=Version 1.3.176 (2014-04-05)
-changelog_1180_li=The file locking method 'serialized' is no longer documented, as it will not be available in version 1.4.
-changelog_1181_li=The static method Csv.getInstance() was removed. Use the public constructor instead.
-changelog_1182_li=The default user name for the Script, RunScript, Shell, and CreateCluster tools are no longer "sa" but an empty string.
-changelog_1183_li=The stack trace of the exception "The object is already closed" is no longer logged by default.
-changelog_1184_li=If a value of a result set was itself a result set, the result could only be read once.
-changelog_1185_li=Column constraints are also visible in views (patch from Nicolas Fortin for H2GIS).
-changelog_1186_li=Granting a additional right to a role that already had a right for that table was not working.
-changelog_1187_li=Spatial index\: a few bugs have been fixed (using spatial constraints in views, transferring geometry objects over TCP/IP, the returned geometry object is copied when needed).
-changelog_1188_li=Issue 551\: the datatype documentation was incorrect (found by Bernd Eckenfels).
-changelog_1189_li=Issue 368\: ON DUPLICATE KEY UPDATE did not work for multi-row inserts. Test case from Angus Macdonald.
-changelog_1190_li=OSGi\: the package javax.tools is now imported (as an optional).
-changelog_1191_li=H2 Console\: auto-complete is now disabled by default, but there is a hot-key (Ctrl+Space).
-changelog_1192_li=H2 Console\: auto-complete did not work with multi-line statements.
-changelog_1193_li=CLOB and BLOB data was not immediately removed after a rollback.
-changelog_1194_li=There is a new Aggregate API that supports the internal H2 data types (GEOMETRY for example). Thanks a lot to Nicolas Fortin for the patch\!
-changelog_1195_li=Referential integrity constraints sometimes used the wrong index, such that updating a row in the referenced table incorrectly failed with a constraint violation.
-changelog_1196_li=The Polish translation was completed and corrected by Wojtek Jurczyk. Thanks a lot\!
-changelog_1197_li=Issue 545\: Unnecessary duplicate code was removed.
-changelog_1198_li=The profiler tool can now process files with full thread dumps.
-changelog_1199_li=MVStore\: the file format was changed slightly.
-changelog_1200_li=MVStore mode\: the CLOB and BLOB storage was re-implemented and is now much faster than with the PageStore (which is still the default storage).
-changelog_1201_li=MVStore mode\: creating indexes is now much faster (in many cases faster than with the default PageStore).
-changelog_1202_li=Various bugs in the MVStore storage and have been fixed, including a bug in the R-tree implementation. The database could get corrupt if there were transient IO exceptions while storing.
-changelog_1203_li=The method org.h2.expression.Function.getCost could throw a NullPointException.
-changelog_1204_li=Storing LOBs in separate files (outside of the main database file) is no longer supported for new databases.
-changelog_1205_li=Lucene 2 is no longer supported.
-changelog_1206_li=Fix bug in calculating default MIN and MAX values for SEQUENCE.
-changelog_1207_li=Fix bug in performing IN queries with multiple values when IGNORECASE\=TRUE
-changelog_1208_li=Add entry-point to org.h2.tools.Shell so it can be called from inside an application. patch by Thomas Gillet.
-changelog_1209_li=Fix bug that prevented the PgServer from being stopped and started multiple times.
-changelog_1210_li=Support some more DDL syntax for MySQL, patch from Peter Jentsch.
-changelog_1211_li=Issue 548\: TO_CHAR does not format MM and DD correctly when the month or day of the month is 1 digit, patch from "the.tucc"
-changelog_1212_li=Fix bug in varargs support in ALIAS's, patch from Nicolas Fortin
-cheatSheet_1000_h1=H2 Database Engine Cheat Sheet
-cheatSheet_1001_h2=Using H2
-cheatSheet_1002_a=H2
-cheatSheet_1003_li=\ is open source, free to use and distribute.
-cheatSheet_1004_a=Download
-cheatSheet_1005_li=\: jar, installer (Windows), zip.
-cheatSheet_1006_li=To start the H2 Console tool, double click the jar file, or run java -jar h2*.jar
, h2.bat
, or h2.sh
.
-cheatSheet_1007_a=A new database is automatically created
-cheatSheet_1008_a=by default
-cheatSheet_1009_li=.
-cheatSheet_1010_a=Closing the last connection closes the database
-cheatSheet_1011_li=.
-cheatSheet_1012_h2=Documentation
-cheatSheet_1013_p=\ Reference\: SQL grammar, functions, data types, tools, API
-cheatSheet_1014_a=Features
-cheatSheet_1015_p=\: fulltext search, encryption, read-only (zip/jar), CSV, auto-reconnect, triggers, user functions
-cheatSheet_1016_a=Database URLs
-cheatSheet_1017_a=Embedded
-cheatSheet_1018_code=jdbc\:h2\:~/test
-cheatSheet_1019_p=\ 'test' in the user home directory
-cheatSheet_1020_code=jdbc\:h2\:/data/test
-cheatSheet_1021_p=\ 'test' in the directory /data
-cheatSheet_1022_code=jdbc\:h2\:test
-cheatSheet_1023_p=\ in the current(\!) working directory
-cheatSheet_1024_a=In-Memory
-cheatSheet_1025_code=jdbc\:h2\:mem\:test
-cheatSheet_1026_p=\ multiple connections in one process
-cheatSheet_1027_code=jdbc\:h2\:mem\:
-cheatSheet_1028_p=\ unnamed private; one connection
-cheatSheet_1029_a=Server Mode
-cheatSheet_1030_code=jdbc\:h2\:tcp\://localhost/~/test
-cheatSheet_1031_p=\ user home dir
-cheatSheet_1032_code=jdbc\:h2\:tcp\://localhost//data/test
-cheatSheet_1033_p=\ absolute dir
-cheatSheet_1034_a=Server start
-cheatSheet_1035_p=\:java -cp *.jar org.h2.tools.Server
-cheatSheet_1036_a=Settings
-cheatSheet_1037_code=jdbc\:h2\:..;MODE\=MySQL
-cheatSheet_1038_a=compatibility (or HSQLDB,...)
-cheatSheet_1039_code=jdbc\:h2\:..;TRACE_LEVEL_FILE\=3
-cheatSheet_1040_a=log to *.trace.db
-cheatSheet_1041_a=Using the JDBC API
-cheatSheet_1042_a=Connection Pool
-cheatSheet_1043_a=Maven 2
-cheatSheet_1044_a=Hibernate
-cheatSheet_1045_p=\ hibernate.cfg.xml (or use the HSQLDialect)\:
-cheatSheet_1046_a=TopLink and Glassfish
-cheatSheet_1047_p=\ Datasource class\: org.h2.jdbcx.JdbcDataSource
-cheatSheet_1048_code=oracle.toplink.essentials.platform.
-cheatSheet_1049_code=database.H2Platform
-download_1000_h1=Downloads
-download_1001_h3=Version 1.4.187 (2015-04-10), Beta
-download_1002_a=Windows Installer
-download_1003_a=Platform-Independent Zip
-download_1004_h3=Version 1.3.176 (2014-04-05), Last Stable
-download_1005_a=Windows Installer
-download_1006_a=Platform-Independent Zip
-download_1007_h3=Download Mirror and Older Versions
-download_1008_a=Platform-Independent Zip
-download_1009_h3=Jar File
-download_1010_a=Maven.org
-download_1011_a=Sourceforge.net
-download_1012_a=Latest Automated Build (not released)
-download_1013_h3=Maven (Binary, Javadoc, and Source)
-download_1014_a=Binary
-download_1015_a=Javadoc
-download_1016_a=Sources
-download_1017_h3=Database Upgrade Helper File
-download_1018_a=Upgrade database from 1.1 to the current version
-download_1019_h3=Subversion Source Repository
-download_1020_a=Google Code
-download_1021_p=\ For details about changes, see the Change Log.
-download_1022_h3=News and Project Information
-download_1023_a=Atom Feed
-download_1024_a=RSS Feed
-download_1025_a=DOAP File
-download_1026_p=\ (what is this)
-faq_1000_h1=Frequently Asked Questions
-faq_1001_a=\ I Have a Problem or Feature Request
-faq_1002_a=\ Are there Known Bugs? When is the Next Release?
-faq_1003_a=\ Is this Database Engine Open Source?
-faq_1004_a=\ Is Commercial Support Available?
-faq_1005_a=\ How to Create a New Database?
-faq_1006_a=\ How to Connect to a Database?
-faq_1007_a=\ Where are the Database Files Stored?
-faq_1008_a=\ What is the Size Limit (Maximum Size) of a Database?
-faq_1009_a=\ Is it Reliable?
-faq_1010_a=\ Why is Opening my Database Slow?
-faq_1011_a=\ My Query is Slow
-faq_1012_a=\ H2 is Very Slow
-faq_1013_a=\ Column Names are Incorrect?
-faq_1014_a=\ Float is Double?
-faq_1015_a=\ Is the GCJ Version Stable? Faster?
-faq_1016_a=\ How to Translate this Project?
-faq_1017_a=\ How to Contribute to this Project?
-faq_1018_h3=I Have a Problem or Feature Request
-faq_1019_p=\ Please read the support checklist.
-faq_1020_h3=Are there Known Bugs? When is the Next Release?
-faq_1021_p=\ Usually, bugs get fixes as they are found. There is a release every few weeks. Here is the list of known and confirmed issues\:
-faq_1022_li=When opening a database file in a timezone that has different daylight saving rules\: the time part of dates where the daylight saving doesn't match will differ. This is not a problem within regions that use the same rules (such as, within USA, or within Europe), even if the timezone itself is different. As a workaround, export the database to a SQL script using the old timezone, and create a new database in the new timezone. This problem does not occur when using the system property "h2.storeLocalTime" (however such database files are not compatible with older versions of H2).
-faq_1023_li=Apache Harmony\: there seems to be a bug in Harmony that affects H2. See HARMONY-6505.
-faq_1024_li=Tomcat and Glassfish 3 set most static fields (final or non-final) to null
when unloading a web application. This can cause a NullPointerException
in H2 versions 1.1.107 and older, and may still not work in newer versions. Please report it if you run into this issue. In Tomcat >\= 6.0 this behavior can be disabled by setting the system property org.apache.catalina.loader.WebappClassLoader.ENABLE_CLEAR_REFERENCES\=false
, however Tomcat may then run out of memory. A known workaround is to put the h2*.jar
file in a shared lib
directory (common/lib
).
-faq_1025_li=Some problems have been found with right outer join. Internally, it is converted to left outer join, which does not always produce the same results as other databases when used in combination with other joins. This problem is fixed in H2 version 1.3.
-faq_1026_li=When using Install4j before 4.1.4 on Linux and enabling pack200
, the h2*.jar
becomes corrupted by the install process, causing application failure. A workaround is to add an empty file h2*.jar.nopack
next to the h2*.jar
file. This problem is solved in Install4j 4.1.4.
-faq_1027_p=\ For a complete list, see Open Issues.
-faq_1028_h3=Is this Database Engine Open Source?
-faq_1029_p=\ Yes. It is free to use and distribute, and the source code is included. See also under license.
-faq_1030_h3=Is Commercial Support Available?
-faq_1031_p=\ Yes, commercial support is available, see Commercial Support.
-faq_1032_h3=How to Create a New Database?
-faq_1033_p=\ By default, a new database is automatically created if it does not yet exist. See Creating New Databases.
-faq_1034_h3=How to Connect to a Database?
-faq_1035_p=\ The database driver is org.h2.Driver
, and the database URL starts with jdbc\:h2\:
. To connect to a database using JDBC, use the following code\:
-faq_1036_h3=Where are the Database Files Stored?
-faq_1037_p=\ When using database URLs like jdbc\:h2\:~/test
, the database is stored in the user directory. For Windows, this is usually C\:\\Documents and Settings\\<userName>
or C\:\\Users\\<userName>
. If the base directory is not set (as in jdbc\:h2\:test
), the database files are stored in the directory where the application is started (the current working directory). When using the H2 Console application from the start menu, this is <Installation Directory>/bin
. The base directory can be set in the database URL. A fixed or relative path can be used. When using the URL jdbc\:h2\:file\:data/sample
, the database is stored in the directory data
(relative to the current working directory). The directory is created automatically if it does not yet exist. It is also possible to use the fully qualified directory name (and for Windows, drive name). Example\: jdbc\:h2\:file\:C\:/data/test
-faq_1038_h3=What is the Size Limit (Maximum Size) of a Database?
-faq_1039_p=\ See Limits and Limitations.
-faq_1040_h3=Is it Reliable?
-faq_1041_p=\ That is not easy to say. It is still a quite new product. A lot of tests have been written, and the code coverage of these tests is higher than 80% for each package. Randomized stress tests are run regularly. But there are probably still bugs that have not yet been found (as with most software). Some features are known to be dangerous, they are only supported for situations where performance is more important than reliability. Those dangerous features are\:
-faq_1042_li=Disabling the transaction log or FileDescriptor.sync() using LOG\=0 or LOG\=1.
-faq_1043_li=Using the transaction isolation level READ_UNCOMMITTED
(LOCK_MODE 0
) while at the same time using multiple connections.
-faq_1044_li=Disabling database file protection using (setting FILE_LOCK
to NO
in the database URL).
-faq_1045_li=Disabling referential integrity using SET REFERENTIAL_INTEGRITY FALSE
.
-faq_1046_p=\ In addition to that, running out of memory should be avoided. In older versions, OutOfMemory errors while using the database could corrupt a databases.
-faq_1047_p=\ This database is well tested using automated test cases. The tests run every night and run for more than one hour. But not all areas of this database are equally well tested. When using one of the following features for production, please ensure your use case is well tested (if possible with automated test cases). The areas that are not well tested are\:
-faq_1048_li=Platforms other than Windows XP, Linux, Mac OS X, or JVMs other than Sun 1.6 or 1.7
-faq_1049_li=The features AUTO_SERVER
and AUTO_RECONNECT
.
-faq_1050_li=Cluster mode, 2-phase commit, savepoints.
-faq_1051_li=24/7 operation.
-faq_1052_li=Fulltext search.
-faq_1053_li=Operations on LOBs over 2 GB.
-faq_1054_li=The optimizer may not always select the best plan.
-faq_1055_li=Using the ICU4J collator.
-faq_1056_p=\ Areas considered experimental are\:
-faq_1057_li=The PostgreSQL server
-faq_1058_li=Clustering (there are cases were transaction isolation can be broken due to timing issues, for example one session overtaking another session).
-faq_1059_li=Multi-threading within the engine using SET MULTI_THREADED\=1
.
-faq_1060_li=Compatibility modes for other databases (only some features are implemented).
-faq_1061_li=The soft reference cache (CACHE_TYPE\=SOFT_LRU
). It might not improve performance, and out of memory issues have been reported.
-faq_1062_p=\ Some users have reported that after a power failure, the database cannot be opened sometimes. In this case, use a backup of the database or the Recover tool. Please report such problems. The plan is that the database automatically recovers in all situations.
-faq_1063_h3=Why is Opening my Database Slow?
-faq_1064_p=\ To find out what the problem is, use the H2 Console and click on "Test Connection" instead of "Login". After the "Login Successful" appears, click on it (it's a link). This will list the top stack traces. Then either analyze this yourself, or post those stack traces in the Google Group.
-faq_1065_p=\ Other possible reasons are\: the database is very big (many GB), or contains linked tables that are slow to open.
-faq_1066_h3=My Query is Slow
-faq_1067_p=\ Slow SELECT
(or DELETE, UPDATE, MERGE
) statement can have multiple reasons. Follow this checklist\:
-faq_1068_li=Run ANALYZE
(see documentation for details).
-faq_1069_li=Run the query with EXPLAIN
and check if indexes are used (see documentation for details).
-faq_1070_li=If required, create additional indexes and try again using ANALYZE
and EXPLAIN
.
-faq_1071_li=If it doesn't help please report the problem.
-faq_1072_h3=H2 is Very Slow
-faq_1073_p=\ By default, H2 closes the database when the last connection is closed. If your application closes the only connection after each operation, the database is opened and closed a lot, which is quite slow. There are multiple ways to solve this problem, see Database Performance Tuning.
-faq_1074_h3=Column Names are Incorrect?
-faq_1075_p=\ For the query SELECT ID AS X FROM TEST
the method ResultSetMetaData.getColumnName()
returns ID
, I expect it to return X
. What's wrong?
-faq_1076_p=\ This is not a bug. According the the JDBC specification, the method ResultSetMetaData.getColumnName()
should return the name of the column and not the alias name. If you need the alias name, use ResultSetMetaData.getColumnLabel()
. Some other database don't work like this yet (they don't follow the JDBC specification). If you need compatibility with those databases, use the Compatibility Mode, or append ;ALIAS_COLUMN_NAME\=TRUE
to the database URL.
-faq_1077_p=\ This also applies to DatabaseMetaData calls that return a result set. The columns in the JDBC API are column labels, not column names.
-faq_1078_h3=Float is Double?
-faq_1079_p=\ For a table defined as CREATE TABLE TEST(X FLOAT)
the method ResultSet.getObject()
returns a java.lang.Double
, I expect it to return a java.lang.Float
. What's wrong?
-faq_1080_p=\ This is not a bug. According the the JDBC specification, the JDBC data type FLOAT
is equivalent to DOUBLE
, and both are mapped to java.lang.Double
. See also Mapping SQL and Java Types - 8.3.10 FLOAT.
-faq_1081_h3=Is the GCJ Version Stable? Faster?
-faq_1082_p=\ The GCJ version is not as stable as the Java version. When running the regression test with the GCJ version, sometimes the application just stops at what seems to be a random point without error message. Currently, the GCJ version is also slower than when using the Sun VM. However, the startup of the GCJ version is faster than when using a VM.
-faq_1083_h3=How to Translate this Project?
-faq_1084_p=\ For more information, see Build/Translating.
-faq_1085_h3=How to Contribute to this Project?
-faq_1086_p=\ There are various way to help develop an open source project like H2. The first step could be to translate the error messages and the GUI to your native language. Then, you could provide patches. Please start with small patches. That could be adding a test case to improve the code coverage (the target code coverage for this project is 90%, higher is better). You will have to develop, build and run the tests. Once you are familiar with the code, you could implement missing features from the feature request list. I suggest to start with very small features that are easy to implement. Keep in mind to provide test cases as well.
-features_1000_h1=Features
-features_1001_a=\ Feature List
-features_1002_a=\ Comparison to Other Database Engines
-features_1003_a=\ H2 in Use
-features_1004_a=\ Connection Modes
-features_1005_a=\ Database URL Overview
-features_1006_a=\ Connecting to an Embedded (Local) Database
-features_1007_a=\ In-Memory Databases
-features_1008_a=\ Database Files Encryption
-features_1009_a=\ Database File Locking
-features_1010_a=\ Opening a Database Only if it Already Exists
-features_1011_a=\ Closing a Database
-features_1012_a=\ Ignore Unknown Settings
-features_1013_a=\ Changing Other Settings when Opening a Connection
-features_1014_a=\ Custom File Access Mode
-features_1015_a=\ Multiple Connections
-features_1016_a=\ Database File Layout
-features_1017_a=\ Logging and Recovery
-features_1018_a=\ Compatibility
-features_1019_a=\ Auto-Reconnect
-features_1020_a=\ Automatic Mixed Mode
-features_1021_a=\ Page Size
-features_1022_a=\ Using the Trace Options
-features_1023_a=\ Using Other Logging APIs
-features_1024_a=\ Read Only Databases
-features_1025_a=\ Read Only Databases in Zip or Jar File
-features_1026_a=\ Computed Columns / Function Based Index
-features_1027_a=\ Multi-Dimensional Indexes
-features_1028_a=\ User-Defined Functions and Stored Procedures
-features_1029_a=\ Pluggable or User-Defined Tables
-features_1030_a=\ Triggers
-features_1031_a=\ Compacting a Database
-features_1032_a=\ Cache Settings
-features_1033_h2=Feature List
-features_1034_h3=Main Features
-features_1035_li=Very fast database engine
-features_1036_li=Open source
-features_1037_li=Written in Java
-features_1038_li=Supports standard SQL, JDBC API
-features_1039_li=Embedded and Server mode, Clustering support
-features_1040_li=Strong security features
-features_1041_li=The PostgreSQL ODBC driver can be used
-features_1042_li=Multi version concurrency
-features_1043_h3=Additional Features
-features_1044_li=Disk based or in-memory databases and tables, read-only database support, temporary tables
-features_1045_li=Transaction support (read committed), 2-phase-commit
-features_1046_li=Multiple connections, table level locking
-features_1047_li=Cost based optimizer, using a genetic algorithm for complex queries, zero-administration
-features_1048_li=Scrollable and updatable result set support, large result set, external result sorting, functions can return a result set
-features_1049_li=Encrypted database (AES), SHA-256 password encryption, encryption functions, SSL
-features_1050_h3=SQL Support
-features_1051_li=Support for multiple schemas, information schema
-features_1052_li=Referential integrity / foreign key constraints with cascade, check constraints
-features_1053_li=Inner and outer joins, subqueries, read only views and inline views
-features_1054_li=Triggers and Java functions / stored procedures
-features_1055_li=Many built-in functions, including XML and lossless data compression
-features_1056_li=Wide range of data types including large objects (BLOB/CLOB) and arrays
-features_1057_li=Sequence and autoincrement columns, computed columns (can be used for function based indexes)
-features_1058_code=ORDER BY, GROUP BY, HAVING, UNION, LIMIT, TOP
-features_1059_li=Collation support, including support for the ICU4J library
-features_1060_li=Support for users and roles
-features_1061_li=Compatibility modes for IBM DB2, Apache Derby, HSQLDB, MS SQL Server, MySQL, Oracle, and PostgreSQL.
-features_1062_h3=Security Features
-features_1063_li=Includes a solution for the SQL injection problem
-features_1064_li=User password authentication uses SHA-256 and salt
-features_1065_li=For server mode connections, user passwords are never transmitted in plain text over the network (even when using insecure connections; this only applies to the TCP server and not to the H2 Console however; it also doesn't apply if you set the password in the database URL)
-features_1066_li=All database files (including script files that can be used to backup data) can be encrypted using the AES-128 encryption algorithm
-features_1067_li=The remote JDBC driver supports TCP/IP connections over TLS
-features_1068_li=The built-in web server supports connections over TLS
-features_1069_li=Passwords can be sent to the database using char arrays instead of Strings
-features_1070_h3=Other Features and Tools
-features_1071_li=Small footprint (smaller than 1.5 MB), low memory requirements
-features_1072_li=Multiple index types (b-tree, tree, hash)
-features_1073_li=Support for multi-dimensional indexes
-features_1074_li=CSV (comma separated values) file support
-features_1075_li=Support for linked tables, and a built-in virtual 'range' table
-features_1076_li=Supports the EXPLAIN PLAN
statement; sophisticated trace options
-features_1077_li=Database closing can be delayed or disabled to improve the performance
-features_1078_li=Web-based Console application (translated to many languages) with autocomplete
-features_1079_li=The database can generate SQL script files
-features_1080_li=Contains a recovery tool that can dump the contents of the database
-features_1081_li=Support for variables (for example to calculate running totals)
-features_1082_li=Automatic re-compilation of prepared statements
-features_1083_li=Uses a small number of database files
-features_1084_li=Uses a checksum for each record and log entry for data integrity
-features_1085_li=Well tested (high code coverage, randomized stress tests)
-features_1086_h2=Comparison to Other Database Engines
-features_1087_p=\ This comparison is based on H2 1.3, Apache Derby version 10.8, HSQLDB 2.2, MySQL 5.5, PostgreSQL 9.0.
-features_1088_th=Feature
-features_1089_th=H2
-features_1090_th=Derby
-features_1091_th=HSQLDB
-features_1092_th=MySQL
-features_1093_th=PostgreSQL
-features_1094_td=Pure Java
-features_1095_td=Yes
-features_1096_td=Yes
-features_1097_td=Yes
-features_1098_td=No
-features_1099_td=No
-features_1100_td=Embedded Mode (Java)
-features_1101_td=Yes
-features_1102_td=Yes
-features_1103_td=Yes
-features_1104_td=No
-features_1105_td=No
-features_1106_td=In-Memory Mode
-features_1107_td=Yes
-features_1108_td=Yes
-features_1109_td=Yes
-features_1110_td=No
-features_1111_td=No
-features_1112_td=Explain Plan
-features_1113_td=Yes
-features_1114_td=Yes *12
-features_1115_td=Yes
-features_1116_td=Yes
-features_1117_td=Yes
-features_1118_td=Built-in Clustering / Replication
-features_1119_td=Yes
-features_1120_td=Yes
-features_1121_td=No
-features_1122_td=Yes
-features_1123_td=Yes
-features_1124_td=Encrypted Database
-features_1125_td=Yes
-features_1126_td=Yes *10
-features_1127_td=Yes *10
-features_1128_td=No
-features_1129_td=No
-features_1130_td=Linked Tables
-features_1131_td=Yes
-features_1132_td=No
-features_1133_td=Partially *1
-features_1134_td=Partially *2
-features_1135_td=No
-features_1136_td=ODBC Driver
-features_1137_td=Yes
-features_1138_td=No
-features_1139_td=No
-features_1140_td=Yes
-features_1141_td=Yes
-features_1142_td=Fulltext Search
-features_1143_td=Yes
-features_1144_td=Yes
-features_1145_td=No
-features_1146_td=Yes
-features_1147_td=Yes
-features_1148_td=Domains (User-Defined Types)
-features_1149_td=Yes
-features_1150_td=No
-features_1151_td=Yes
-features_1152_td=Yes
-features_1153_td=Yes
-features_1154_td=Files per Database
-features_1155_td=Few
-features_1156_td=Many
-features_1157_td=Few
-features_1158_td=Many
-features_1159_td=Many
-features_1160_td=Row Level Locking
-features_1161_td=Yes *9
-features_1162_td=Yes
-features_1163_td=Yes *9
-features_1164_td=Yes
-features_1165_td=Yes
-features_1166_td=Multi Version Concurrency
-features_1167_td=Yes
-features_1168_td=No
-features_1169_td=Yes
-features_1170_td=Yes
-features_1171_td=Yes
-features_1172_td=Multi-Threaded Statement Processing
-features_1173_td=No *11
-features_1174_td=Yes
-features_1175_td=Yes
-features_1176_td=Yes
-features_1177_td=Yes
-features_1178_td=Role Based Security
-features_1179_td=Yes
-features_1180_td=Yes *3
-features_1181_td=Yes
-features_1182_td=Yes
-features_1183_td=Yes
-features_1184_td=Updatable Result Sets
-features_1185_td=Yes
-features_1186_td=Yes *7
-features_1187_td=Yes
-features_1188_td=Yes
-features_1189_td=Yes
-features_1190_td=Sequences
-features_1191_td=Yes
-features_1192_td=Yes
-features_1193_td=Yes
-features_1194_td=No
-features_1195_td=Yes
-features_1196_td=Limit and Offset
-features_1197_td=Yes
-features_1198_td=Yes *13
-features_1199_td=Yes
-features_1200_td=Yes
-features_1201_td=Yes
-features_1202_td=Window Functions
-features_1203_td=No *15
-features_1204_td=No *15
-features_1205_td=No
-features_1206_td=No
-features_1207_td=Yes
-features_1208_td=Temporary Tables
-features_1209_td=Yes
-features_1210_td=Yes *4
-features_1211_td=Yes
-features_1212_td=Yes
-features_1213_td=Yes
-features_1214_td=Information Schema
-features_1215_td=Yes
-features_1216_td=No *8
-features_1217_td=Yes
-features_1218_td=Yes
-features_1219_td=Yes
-features_1220_td=Computed Columns
-features_1221_td=Yes
-features_1222_td=Yes
-features_1223_td=Yes
-features_1224_td=No
-features_1225_td=Yes *6
-features_1226_td=Case Insensitive Columns
-features_1227_td=Yes
-features_1228_td=Yes *14
-features_1229_td=Yes
-features_1230_td=Yes
-features_1231_td=Yes *6
-features_1232_td=Custom Aggregate Functions
-features_1233_td=Yes
-features_1234_td=No
-features_1235_td=Yes
-features_1236_td=Yes
-features_1237_td=Yes
-features_1238_td=CLOB/BLOB Compression
-features_1239_td=Yes
-features_1240_td=No
-features_1241_td=No
-features_1242_td=No
-features_1243_td=Yes
-features_1244_td=Footprint (jar/dll size)
-features_1245_td=~1.5 MB *5
-features_1246_td=~3 MB
-features_1247_td=~1.5 MB
-features_1248_td=~4 MB
-features_1249_td=~6 MB
-features_1250_p=\ *1 HSQLDB supports text tables.
-features_1251_p=\ *2 MySQL supports linked MySQL tables under the name 'federated tables'.
-features_1252_p=\ *3 Derby support for roles based security and password checking as an option.
-features_1253_p=\ *4 Derby only supports global temporary tables.
-features_1254_p=\ *5 The default H2 jar file contains debug information, jar files for other databases do not.
-features_1255_p=\ *6 PostgreSQL supports functional indexes.
-features_1256_p=\ *7 Derby only supports updatable result sets if the query is not sorted.
-features_1257_p=\ *8 Derby doesn't support standard compliant information schema tables.
-features_1258_p=\ *9 When using MVCC (multi version concurrency).
-features_1259_p=\ *10 Derby and HSQLDB don't hide data patterns well.
-features_1260_p=\ *11 The MULTI_THREADED option is not enabled by default, and not yet supported when using MVCC.
-features_1261_p=\ *12 Derby doesn't support the EXPLAIN
statement, but it supports runtime statistics and retrieving statement execution plans.
-features_1262_p=\ *13 Derby doesn't support the syntax LIMIT .. [OFFSET ..]
, however it supports FETCH FIRST .. ROW[S] ONLY
.
-features_1263_p=\ *14 Using collations. *15 Derby and H2 support ROW_NUMBER() OVER()
.
-features_1264_h3=DaffodilDb and One$Db
-features_1265_p=\ It looks like the development of this database has stopped. The last release was February 2006.
-features_1266_h3=McKoi
-features_1267_p=\ It looks like the development of this database has stopped. The last release was August 2004.
-features_1268_h2=H2 in Use
-features_1269_p=\ For a list of applications that work with or use H2, see\: Links.
-features_1270_h2=Connection Modes
-features_1271_p=\ The following connection modes are supported\:
-features_1272_li=Embedded mode (local connections using JDBC)
-features_1273_li=Server mode (remote connections using JDBC or ODBC over TCP/IP)
-features_1274_li=Mixed mode (local and remote connections at the same time)
-features_1275_h3=Embedded Mode
-features_1276_p=\ In embedded mode, an application opens a database from within the same JVM using JDBC. This is the fastest and easiest connection mode. The disadvantage is that a database may only be open in one virtual machine (and class loader) at any time. As in all modes, both persistent and in-memory databases are supported. There is no limit on the number of database open concurrently, or on the number of open connections.
-features_1277_h3=Server Mode
-features_1278_p=\ When using the server mode (sometimes called remote mode or client/server mode), an application opens a database remotely using the JDBC or ODBC API. A server needs to be started within the same or another virtual machine, or on another computer. Many applications can connect to the same database at the same time, by connecting to this server. Internally, the server process opens the database(s) in embedded mode.
-features_1279_p=\ The server mode is slower than the embedded mode, because all data is transferred over TCP/IP. As in all modes, both persistent and in-memory databases are supported. There is no limit on the number of database open concurrently per server, or on the number of open connections.
-features_1280_h3=Mixed Mode
-features_1281_p=\ The mixed mode is a combination of the embedded and the server mode. The first application that connects to a database does that in embedded mode, but also starts a server so that other applications (running in different processes or virtual machines) can concurrently access the same data. The local connections are as fast as if the database is used in just the embedded mode, while the remote connections are a bit slower.
-features_1282_p=\ The server can be started and stopped from within the application (using the server API), or automatically (automatic mixed mode). When using the automatic mixed mode, all clients that want to connect to the database (no matter if it's an local or remote connection) can do so using the exact same database URL.
-features_1283_h2=Database URL Overview
-features_1284_p=\ This database supports multiple connection modes and connection settings. This is achieved using different database URLs. Settings in the URLs are not case sensitive.
-features_1285_th=Topic
-features_1286_th=URL Format and Examples
-features_1287_a=Embedded (local) connection
-features_1288_td=\ jdbc\:h2\:[file\:][<path>]<databaseName>
-features_1289_td=\ jdbc\:h2\:~/test
-features_1290_td=\ jdbc\:h2\:file\:/data/sample
-features_1291_td=\ jdbc\:h2\:file\:C\:/data/sample (Windows only)
-features_1292_a=In-memory (private)
-features_1293_td=jdbc\:h2\:mem\:
-features_1294_a=In-memory (named)
-features_1295_td=\ jdbc\:h2\:mem\:<databaseName>
-features_1296_td=\ jdbc\:h2\:mem\:test_mem
-features_1297_a=Server mode (remote connections)
-features_1298_a=\ using TCP/IP
-features_1299_td=\ jdbc\:h2\:tcp\://<server>[\:<port>]/[<path>]<databaseName>
-features_1300_td=\ jdbc\:h2\:tcp\://localhost/~/test
-features_1301_td=\ jdbc\:h2\:tcp\://dbserv\:8084/~/sample
-features_1302_td=\ jdbc\:h2\:tcp\://localhost/mem\:test
-features_1303_a=Server mode (remote connections)
-features_1304_a=\ using TLS
-features_1305_td=\ jdbc\:h2\:ssl\://<server>[\:<port>]/<databaseName>
-features_1306_td=\ jdbc\:h2\:ssl\://localhost\:8085/~/sample;
-features_1307_a=Using encrypted files
-features_1308_td=\ jdbc\:h2\:<url>;CIPHER\=AES
-features_1309_td=\ jdbc\:h2\:ssl\://localhost/~/test;CIPHER\=AES
-features_1310_td=\ jdbc\:h2\:file\:~/secure;CIPHER\=AES
-features_1311_a=File locking methods
-features_1312_td=\ jdbc\:h2\:<url>;FILE_LOCK\={FILE|SOCKET|NO}
-features_1313_td=\ jdbc\:h2\:file\:~/private;CIPHER\=AES;FILE_LOCK\=SOCKET
-features_1314_a=Only open if it already exists
-features_1315_td=\ jdbc\:h2\:<url>;IFEXISTS\=TRUE
-features_1316_td=\ jdbc\:h2\:file\:~/sample;IFEXISTS\=TRUE
-features_1317_a=Don't close the database when the VM exits
-features_1318_td=\ jdbc\:h2\:<url>;DB_CLOSE_ON_EXIT\=FALSE
-features_1319_a=Execute SQL on connection
-features_1320_td=\ jdbc\:h2\:<url>;INIT\=RUNSCRIPT FROM '~/create.sql'
-features_1321_td=\ jdbc\:h2\:file\:~/sample;INIT\=RUNSCRIPT FROM '~/create.sql'\\;RUNSCRIPT FROM '~/populate.sql'
-features_1322_a=User name and/or password
-features_1323_td=\ jdbc\:h2\:<url>[;USER\=<username>][;PASSWORD\=<value>]
-features_1324_td=\ jdbc\:h2\:file\:~/sample;USER\=sa;PASSWORD\=123
-features_1325_a=Debug trace settings
-features_1326_td=\ jdbc\:h2\:<url>;TRACE_LEVEL_FILE\=<level 0..3>
-features_1327_td=\ jdbc\:h2\:file\:~/sample;TRACE_LEVEL_FILE\=3
-features_1328_a=Ignore unknown settings
-features_1329_td=\ jdbc\:h2\:<url>;IGNORE_UNKNOWN_SETTINGS\=TRUE
-features_1330_a=Custom file access mode
-features_1331_td=\ jdbc\:h2\:<url>;ACCESS_MODE_DATA\=rws
-features_1332_a=Database in a zip file
-features_1333_td=\ jdbc\:h2\:zip\:<zipFileName>\!/<databaseName>
-features_1334_td=\ jdbc\:h2\:zip\:~/db.zip\!/test
-features_1335_a=Compatibility mode
-features_1336_td=\ jdbc\:h2\:<url>;MODE\=<databaseType>
-features_1337_td=\ jdbc\:h2\:~/test;MODE\=MYSQL
-features_1338_a=Auto-reconnect
-features_1339_td=\ jdbc\:h2\:<url>;AUTO_RECONNECT\=TRUE
-features_1340_td=\ jdbc\:h2\:tcp\://localhost/~/test;AUTO_RECONNECT\=TRUE
-features_1341_a=Automatic mixed mode
-features_1342_td=\ jdbc\:h2\:<url>;AUTO_SERVER\=TRUE
-features_1343_td=\ jdbc\:h2\:~/test;AUTO_SERVER\=TRUE
-features_1344_a=Page size
-features_1345_td=\ jdbc\:h2\:<url>;PAGE_SIZE\=512
-features_1346_a=Changing other settings
-features_1347_td=\ jdbc\:h2\:<url>;<setting>\=<value>[;<setting>\=<value>...]
-features_1348_td=\ jdbc\:h2\:file\:~/sample;TRACE_LEVEL_SYSTEM_OUT\=3
-features_1349_h2=Connecting to an Embedded (Local) Database
-features_1350_p=\ The database URL for connecting to a local database is jdbc\:h2\:[file\:][<path>]<databaseName>
. The prefix file\:
is optional. If no or only a relative path is used, then the current working directory is used as a starting point. The case sensitivity of the path and database name depend on the operating system, however it is recommended to use lowercase letters only. The database name must be at least three characters long (a limitation of File.createTempFile
). The database name must not contain a semicolon. To point to the user home directory, use ~/
, as in\: jdbc\:h2\:~/test
.
-features_1351_h2=In-Memory Databases
-features_1352_p=\ For certain use cases (for example\: rapid prototyping, testing, high performance operations, read-only databases), it may not be required to persist data, or persist changes to the data. This database supports the in-memory mode, where the data is not persisted.
-features_1353_p=\ In some cases, only one connection to a in-memory database is required. This means the database to be opened is private. In this case, the database URL is jdbc\:h2\:mem\:
Opening two connections within the same virtual machine means opening two different (private) databases.
-features_1354_p=\ Sometimes multiple connections to the same in-memory database are required. In this case, the database URL must include a name. Example\: jdbc\:h2\:mem\:db1
. Accessing the same database using this URL only works within the same virtual machine and class loader environment.
-features_1355_p=\ To access an in-memory database from another process or from another computer, you need to start a TCP server in the same process as the in-memory database was created. The other processes then need to access the database over TCP/IP or TLS, using a database URL such as\: jdbc\:h2\:tcp\://localhost/mem\:db1
.
-features_1356_p=\ By default, closing the last connection to a database closes the database. For an in-memory database, this means the content is lost. To keep the database open, add ;DB_CLOSE_DELAY\=-1
to the database URL. To keep the content of an in-memory database as long as the virtual machine is alive, use jdbc\:h2\:mem\:test;DB_CLOSE_DELAY\=-1
.
-features_1357_h2=Database Files Encryption
-features_1358_p=\ The database files can be encrypted. Two encryption algorithm AES is supported. To use file encryption, you need to specify the encryption algorithm (the 'cipher') and the file password (in addition to the user password) when connecting to the database.
-features_1359_h3=Creating a New Database with File Encryption
-features_1360_p=\ By default, a new database is automatically created if it does not exist yet. To create an encrypted database, connect to it as it would already exist.
-features_1361_h3=Connecting to an Encrypted Database
-features_1362_p=\ The encryption algorithm is set in the database URL, and the file password is specified in the password field, before the user password. A single space separates the file password and the user password; the file password itself may not contain spaces. File passwords and user passwords are case sensitive. Here is an example to connect to a password-encrypted database\:
-features_1363_h3=Encrypting or Decrypting a Database
-features_1364_p=\ To encrypt an existing database, use the ChangeFileEncryption
tool. This tool can also decrypt an encrypted database, or change the file encryption key. The tool is available from within the H2 Console in the tools section, or you can run it from the command line. The following command line will encrypt the database test
in the user home directory with the file password filepwd
and the encryption algorithm AES\:
-features_1365_h2=Database File Locking
-features_1366_p=\ Whenever a database is opened, a lock file is created to signal other processes that the database is in use. If database is closed, or if the process that opened the database terminates, this lock file is deleted.
-features_1367_p=\ The following file locking methods are implemented\:
-features_1368_li=The default method is FILE
and uses a watchdog thread to protect the database file. The watchdog reads the lock file each second.
-features_1369_li=The second method is SOCKET
and opens a server socket. The socket method does not require reading the lock file every second. The socket method should only be used if the database files are only accessed by one (and always the same) computer.
-features_1370_li=The third method is FS
. This will use native file locking using FileChannel.lock
.
-features_1371_li=It is also possible to open the database without file locking; in this case it is up to the application to protect the database files. Failing to do so will result in a corrupted database. Using the method NO
forces the database to not create a lock file at all. Please note that this is unsafe as another process is able to open the same database, possibly leading to data corruption.
-features_1372_p=\ To open the database with a different file locking method, use the parameter FILE_LOCK
. The following code opens the database with the 'socket' locking method\:
-features_1373_p=\ For more information about the algorithms, see Advanced / File Locking Protocols.
-features_1374_h2=Opening a Database Only if it Already Exists
-features_1375_p=\ By default, when an application calls DriverManager.getConnection(url, ...)
and the database specified in the URL does not yet exist, a new (empty) database is created. In some situations, it is better to restrict creating new databases, and only allow to open existing databases. To do this, add ;IFEXISTS\=TRUE
to the database URL. In this case, if the database does not already exist, an exception is thrown when trying to connect. The connection only succeeds when the database already exists. The complete URL may look like this\:
-features_1376_h2=Closing a Database
-features_1377_h3=Delayed Database Closing
-features_1378_p=\ Usually, a database is closed when the last connection to it is closed. In some situations this slows down the application, for example when it is not possible to keep at least one connection open. The automatic closing of a database can be delayed or disabled with the SQL statement SET DB_CLOSE_DELAY <seconds>
. The parameter <seconds> specifies the number of seconds to keep a database open after the last connection to it was closed. The following statement will keep a database open for 10 seconds after the last connection was closed\:
-features_1379_p=\ The value -1 means the database is not closed automatically. The value 0 is the default and means the database is closed when the last connection is closed. This setting is persistent and can be set by an administrator only. It is possible to set the value in the database URL\: jdbc\:h2\:~/test;DB_CLOSE_DELAY\=10
.
-features_1380_h3=Don't Close a Database when the VM Exits
-features_1381_p=\ By default, a database is closed when the last connection is closed. However, if it is never closed, the database is closed when the virtual machine exits normally, using a shutdown hook. In some situations, the database should not be closed in this case, for example because the database is still used at virtual machine shutdown (to store the shutdown process in the database for example). For those cases, the automatic closing of the database can be disabled in the database URL. The first connection (the one that is opening the database) needs to set the option in the database URL (it is not possible to change the setting afterwards). The database URL to disable database closing on exit is\:
-features_1382_h2=Execute SQL on Connection
-features_1383_p=\ Sometimes, particularly for in-memory databases, it is useful to be able to execute DDL or DML commands automatically when a client connects to a database. This functionality is enabled via the INIT property. Note that multiple commands may be passed to INIT, but the semicolon delimiter must be escaped, as in the example below.
-features_1384_p=\ Please note the double backslash is only required in a Java or properties file. In a GUI, or in an XML file, only one backslash is required\:
-features_1385_p=\ Backslashes within the init script (for example within a runscript statement, to specify the folder names in Windows) need to be escaped as well (using a second backslash). It might be simpler to avoid backslashes in folder names for this reason; use forward slashes instead.
-features_1386_h2=Ignore Unknown Settings
-features_1387_p=\ Some applications (for example OpenOffice.org Base) pass some additional parameters when connecting to the database. Why those parameters are passed is unknown. The parameters PREFERDOSLIKELINEENDS
and IGNOREDRIVERPRIVILEGES
are such examples; they are simply ignored to improve the compatibility with OpenOffice.org. If an application passes other parameters when connecting to the database, usually the database throws an exception saying the parameter is not supported. It is possible to ignored such parameters by adding ;IGNORE_UNKNOWN_SETTINGS\=TRUE
to the database URL.
-features_1388_h2=Changing Other Settings when Opening a Connection
-features_1389_p=\ In addition to the settings already described, other database settings can be passed in the database URL. Adding ;setting\=value
at the end of a database URL is the same as executing the statement SET setting value
just after connecting. For a list of supported settings, see SQL Grammar or the DbSettings javadoc.
-features_1390_h2=Custom File Access Mode
-features_1391_p=\ Usually, the database opens the database file with the access mode rw
, meaning read-write (except for read only databases, where the mode r
is used). To open a database in read-only mode if the database file is not read-only, use ACCESS_MODE_DATA\=r
. Also supported are rws
and rwd
. This setting must be specified in the database URL\:
-features_1392_p=\ For more information see Durability Problems. On many operating systems the access mode rws
does not guarantee that the data is written to the disk.
-features_1393_h2=Multiple Connections
-features_1394_h3=Opening Multiple Databases at the Same Time
-features_1395_p=\ An application can open multiple databases at the same time, including multiple connections to the same database. The number of open database is only limited by the memory available.
-features_1396_h3=Multiple Connections to the Same Database\: Client/Server
-features_1397_p=\ If you want to access the same database at the same time from different processes or computers, you need to use the client / server mode. In this case, one process acts as the server, and the other processes (that could reside on other computers as well) connect to the server via TCP/IP (or TLS over TCP/IP for improved security).
-features_1398_h3=Multithreading Support
-features_1399_p=\ This database is multithreading-safe. That means, if an application is multi-threaded, it does not need to worry about synchronizing access to the database. Internally, most requests to the same database are synchronized. That means an application can use multiple threads that access the same database at the same time, however if one thread executes a long running query, the other threads need to wait.
-features_1400_p=\ An application should normally use one connection per thread. This database synchronizes access to the same connection, but other databases may not do this.
-features_1401_h3=Locking, Lock-Timeout, Deadlocks
-features_1402_p=\ Please note MVCC is enabled in version 1.4.x by default, when using the MVStore. In this case, table level locking is not used. If multi-version concurrency is not used, the database uses table level locks to give each connection a consistent state of the data. There are two kinds of locks\: read locks (shared locks) and write locks (exclusive locks). All locks are released when the transaction commits or rolls back. When using the default transaction isolation level 'read committed', read locks are already released after each statement.
-features_1403_p=\ If a connection wants to reads from a table, and there is no write lock on the table, then a read lock is added to the table. If there is a write lock, then this connection waits for the other connection to release the lock. If a connection cannot get a lock for a specified time, then a lock timeout exception is thrown.
-features_1404_p=\ Usually, SELECT
statements will generate read locks. This includes subqueries. Statements that modify data use write locks. It is also possible to lock a table exclusively without modifying data, using the statement SELECT ... FOR UPDATE
. The statements COMMIT
and ROLLBACK
releases all open locks. The commands SAVEPOINT
and ROLLBACK TO SAVEPOINT
don't affect locks. The locks are also released when the autocommit mode changes, and for connections with autocommit set to true (this is the default), locks are released after each statement. The following statements generate locks\:
-features_1405_th=Type of Lock
-features_1406_th=SQL Statement
-features_1407_td=Read
-features_1408_td=SELECT * FROM TEST;
-features_1409_td=\ CALL SELECT MAX(ID) FROM TEST;
-features_1410_td=\ SCRIPT;
-features_1411_td=Write
-features_1412_td=SELECT * FROM TEST WHERE 1\=0 FOR UPDATE;
-features_1413_td=Write
-features_1414_td=INSERT INTO TEST VALUES(1, 'Hello');
-features_1415_td=\ INSERT INTO TEST SELECT * FROM TEST;
-features_1416_td=\ UPDATE TEST SET NAME\='Hi';
-features_1417_td=\ DELETE FROM TEST;
-features_1418_td=Write
-features_1419_td=ALTER TABLE TEST ...;
-features_1420_td=\ CREATE INDEX ... ON TEST ...;
-features_1421_td=\ DROP INDEX ...;
-features_1422_p=\ The number of seconds until a lock timeout exception is thrown can be set separately for each connection using the SQL command SET LOCK_TIMEOUT <milliseconds>
. The initial lock timeout (that is the timeout used for new connections) can be set using the SQL command SET DEFAULT_LOCK_TIMEOUT <milliseconds>
. The default lock timeout is persistent.
-features_1423_h3=Avoiding Deadlocks
-features_1424_p=\ To avoid deadlocks, ensure that all transactions lock the tables in the same order (for example in alphabetical order), and avoid upgrading read locks to write locks. Both can be achieved using explicitly locking tables using SELECT ... FOR UPDATE
.
-features_1425_h2=Database File Layout
-features_1426_p=\ The following files are created for persistent databases\:
-features_1427_th=File Name
-features_1428_th=Description
-features_1429_th=Number of Files
-features_1430_td=\ test.h2.db
-features_1431_td=\ Database file.
-features_1432_td=\ Contains the transaction log, indexes, and data for all tables.
-features_1433_td=\ Format\: <database>.h2.db
-features_1434_td=\ 1 per database
-features_1435_td=\ test.lock.db
-features_1436_td=\ Database lock file.
-features_1437_td=\ Automatically (re-)created while the database is in use.
-features_1438_td=\ Format\: <database>.lock.db
-features_1439_td=\ 1 per database (only if in use)
-features_1440_td=\ test.trace.db
-features_1441_td=\ Trace file (if the trace option is enabled).
-features_1442_td=\ Contains trace information.
-features_1443_td=\ Format\: <database>.trace.db
-features_1444_td=\ Renamed to <database>.trace.db.old
is too big.
-features_1445_td=\ 0 or 1 per database
-features_1446_td=\ test.lobs.db/*
-features_1447_td=\ Directory containing one file for each
-features_1448_td=\ BLOB or CLOB value larger than a certain size.
-features_1449_td=\ Format\: <id>.t<tableId>.lob.db
-features_1450_td=\ 1 per large object
-features_1451_td=\ test.123.temp.db
-features_1452_td=\ Temporary file.
-features_1453_td=\ Contains a temporary blob or a large result set.
-features_1454_td=\ Format\: <database>.<id>.temp.db
-features_1455_td=\ 1 per object
-features_1456_h3=Moving and Renaming Database Files
-features_1457_p=\ Database name and location are not stored inside the database files.
-features_1458_p=\ While a database is closed, the files can be moved to another directory, and they can be renamed as well (as long as all files of the same database start with the same name and the respective extensions are unchanged).
-features_1459_p=\ As there is no platform specific data in the files, they can be moved to other operating systems without problems.
-features_1460_h3=Backup
-features_1461_p=\ When the database is closed, it is possible to backup the database files.
-features_1462_p=\ To backup data while the database is running, the SQL commands SCRIPT
and BACKUP
can be used.
-features_1463_h2=Logging and Recovery
-features_1464_p=\ Whenever data is modified in the database and those changes are committed, the changes are written to the transaction log (except for in-memory objects). The changes to the main data area itself are usually written later on, to optimize disk access. If there is a power failure, the main data area is not up-to-date, but because the changes are in the transaction log, the next time the database is opened, the changes are re-applied automatically.
-features_1465_h2=Compatibility
-features_1466_p=\ All database engines behave a little bit different. Where possible, H2 supports the ANSI SQL standard, and tries to be compatible to other databases. There are still a few differences however\:
-features_1467_p=\ In MySQL text columns are case insensitive by default, while in H2 they are case sensitive. However H2 supports case insensitive columns as well. To create the tables with case insensitive texts, append IGNORECASE\=TRUE
to the database URL (example\: jdbc\:h2\:~/test;IGNORECASE\=TRUE
).
-features_1468_h3=Compatibility Modes
-features_1469_p=\ For certain features, this database can emulate the behavior of specific databases. However, only a small subset of the differences between databases are implemented in this way. Here is the list of currently supported modes and the differences to the regular mode\:
-features_1470_h3=DB2 Compatibility Mode
-features_1471_p=\ To use the IBM DB2 mode, use the database URL jdbc\:h2\:~/test;MODE\=DB2
or the SQL statement SET MODE DB2
.
-features_1472_li=For aliased columns, ResultSetMetaData.getColumnName()
returns the alias name and getTableName()
returns null
.
-features_1473_li=Support for the syntax [OFFSET .. ROW] [FETCH ... ONLY]
as an alternative for LIMIT .. OFFSET
.
-features_1474_li=Concatenating NULL
with another value results in the other value.
-features_1475_li=Support the pseudo-table SYSIBM.SYSDUMMY1.
-features_1476_h3=Derby Compatibility Mode
-features_1477_p=\ To use the Apache Derby mode, use the database URL jdbc\:h2\:~/test;MODE\=Derby
or the SQL statement SET MODE Derby
.
-features_1478_li=For aliased columns, ResultSetMetaData.getColumnName()
returns the alias name and getTableName()
returns null
.
-features_1479_li=For unique indexes, NULL
is distinct. That means only one row with NULL
in one of the columns is allowed.
-features_1480_li=Concatenating NULL
with another value results in the other value.
-features_1481_li=Support the pseudo-table SYSIBM.SYSDUMMY1.
-features_1482_h3=HSQLDB Compatibility Mode
-features_1483_p=\ To use the HSQLDB mode, use the database URL jdbc\:h2\:~/test;MODE\=HSQLDB
or the SQL statement SET MODE HSQLDB
.
-features_1484_li=For aliased columns, ResultSetMetaData.getColumnName()
returns the alias name and getTableName()
returns null
.
-features_1485_li=When converting the scale of decimal data, the number is only converted if the new scale is smaller than the current scale. Usually, the scale is converted and 0s are added if required.
-features_1486_li=For unique indexes, NULL
is distinct. That means only one row with NULL
in one of the columns is allowed.
-features_1487_li=Text can be concatenated using '+'.
-features_1488_h3=MS SQL Server Compatibility Mode
-features_1489_p=\ To use the MS SQL Server mode, use the database URL jdbc\:h2\:~/test;MODE\=MSSQLServer
or the SQL statement SET MODE MSSQLServer
.
-features_1490_li=For aliased columns, ResultSetMetaData.getColumnName()
returns the alias name and getTableName()
returns null
.
-features_1491_li=Identifiers may be quoted using square brackets as in [Test]
.
-features_1492_li=For unique indexes, NULL
is distinct. That means only one row with NULL
in one of the columns is allowed.
-features_1493_li=Concatenating NULL
with another value results in the other value.
-features_1494_li=Text can be concatenated using '+'.
-features_1495_h3=MySQL Compatibility Mode
-features_1496_p=\ To use the MySQL mode, use the database URL jdbc\:h2\:~/test;MODE\=MySQL
or the SQL statement SET MODE MySQL
.
-features_1497_li=When inserting data, if a column is defined to be NOT NULL
and NULL
is inserted, then a 0 (or empty string, or the current timestamp for timestamp columns) value is used. Usually, this operation is not allowed and an exception is thrown.
-features_1498_li=Creating indexes in the CREATE TABLE
statement is allowed using INDEX(..)
or KEY(..)
. Example\: create table test(id int primary key, name varchar(255), key idx_name(name));
-features_1499_li=Meta data calls return identifiers in lower case.
-features_1500_li=When converting a floating point number to an integer, the fractional digits are not truncated, but the value is rounded.
-features_1501_li=Concatenating NULL
with another value results in the other value.
-features_1502_p=\ Text comparison in MySQL is case insensitive by default, while in H2 it is case sensitive (as in most other databases). H2 does support case insensitive text comparison, but it needs to be set separately, using SET IGNORECASE TRUE
. This affects comparison using \=, LIKE, REGEXP
.
-features_1503_h3=Oracle Compatibility Mode
-features_1504_p=\ To use the Oracle mode, use the database URL jdbc\:h2\:~/test;MODE\=Oracle
or the SQL statement SET MODE Oracle
.
-features_1505_li=For aliased columns, ResultSetMetaData.getColumnName()
returns the alias name and getTableName()
returns null
.
-features_1506_li=When using unique indexes, multiple rows with NULL
in all columns are allowed, however it is not allowed to have multiple rows with the same values otherwise.
-features_1507_li=Concatenating NULL
with another value results in the other value.
-features_1508_li=Empty strings are treated like NULL
values.
-features_1509_h3=PostgreSQL Compatibility Mode
-features_1510_p=\ To use the PostgreSQL mode, use the database URL jdbc\:h2\:~/test;MODE\=PostgreSQL
or the SQL statement SET MODE PostgreSQL
.
-features_1511_li=For aliased columns, ResultSetMetaData.getColumnName()
returns the alias name and getTableName()
returns null
.
-features_1512_li=When converting a floating point number to an integer, the fractional digits are not be truncated, but the value is rounded.
-features_1513_li=The system columns CTID
and OID
are supported.
-features_1514_li=LOG(x) is base 10 in this mode.
-features_1515_h2=Auto-Reconnect
-features_1516_p=\ The auto-reconnect feature causes the JDBC driver to reconnect to the database if the connection is lost. The automatic re-connect only occurs when auto-commit is enabled; if auto-commit is disabled, an exception is thrown. To enable this mode, append ;AUTO_RECONNECT\=TRUE
to the database URL.
-features_1517_p=\ Re-connecting will open a new session. After an automatic re-connect, variables and local temporary tables definitions (excluding data) are re-created. The contents of the system table INFORMATION_SCHEMA.SESSION_STATE
contains all client side state that is re-created.
-features_1518_p=\ If another connection uses the database in exclusive mode (enabled using SET EXCLUSIVE 1
or SET EXCLUSIVE 2
), then this connection will try to re-connect until the exclusive mode ends.
-features_1519_h2=Automatic Mixed Mode
-features_1520_p=\ Multiple processes can access the same database without having to start the server manually. To do that, append ;AUTO_SERVER\=TRUE
to the database URL. You can use the same database URL independent of whether the database is already open or not. This feature doesn't work with in-memory databases. Example database URL\:
-features_1521_p=\ Use the same URL for all connections to this database. Internally, when using this mode, the first connection to the database is made in embedded mode, and additionally a server is started internally (as a daemon thread). If the database is already open in another process, the server mode is used automatically. The IP address and port of the server are stored in the file .lock.db
, that's why in-memory databases can't be supported.
-features_1522_p=\ The application that opens the first connection to the database uses the embedded mode, which is faster than the server mode. Therefore the main application should open the database first if possible. The first connection automatically starts a server on a random port. This server allows remote connections, however only to this database (to ensure that, the client reads .lock.db
file and sends the the random key that is stored there to the server). When the first connection is closed, the server stops. If other (remote) connections are still open, one of them will then start a server (auto-reconnect is enabled automatically).
-features_1523_p=\ All processes need to have access to the database files. If the first connection is closed (the connection that started the server), open transactions of other connections will be rolled back (this may not be a problem if you don't disable autocommit). Explicit client/server connections (using jdbc\:h2\:tcp\://
or ssl\://
) are not supported. This mode is not supported for in-memory databases.
-features_1524_p=\ Here is an example how to use this mode. Application 1 and 2 are not necessarily started on the same computer, but they need to have access to the database files. Application 1 and 2 are typically two different processes (however they could run within the same process).
-features_1525_p=\ When using this feature, by default the server uses any free TCP port. The port can be set manually using AUTO_SERVER_PORT\=9090
.
-features_1526_h2=Page Size
-features_1527_p=\ The page size for new databases is 2 KB (2048), unless the page size is set explicitly in the database URL using PAGE_SIZE\=
when the database is created. The page size of existing databases can not be changed, so this property needs to be set when the database is created.
-features_1528_h2=Using the Trace Options
-features_1529_p=\ To find problems in an application, it is sometimes good to see what database operations where executed. This database offers the following trace features\:
-features_1530_li=Trace to System.out
and/or to a file
-features_1531_li=Support for trace levels OFF, ERROR, INFO, DEBUG
-features_1532_li=The maximum size of the trace file can be set
-features_1533_li=It is possible to generate Java source code from the trace file
-features_1534_li=Trace can be enabled at runtime by manually creating a file
-features_1535_h3=Trace Options
-features_1536_p=\ The simplest way to enable the trace option is setting it in the database URL. There are two settings, one for System.out
(TRACE_LEVEL_SYSTEM_OUT
) tracing, and one for file tracing (TRACE_LEVEL_FILE
). The trace levels are 0 for OFF
, 1 for ERROR
(the default), 2 for INFO
, and 3 for DEBUG
. A database URL with both levels set to DEBUG
is\:
-features_1537_p=\ The trace level can be changed at runtime by executing the SQL command SET TRACE_LEVEL_SYSTEM_OUT level
(for System.out
tracing) or SET TRACE_LEVEL_FILE level
(for file tracing). Example\:
-features_1538_h3=Setting the Maximum Size of the Trace File
-features_1539_p=\ When using a high trace level, the trace file can get very big quickly. The default size limit is 16 MB, if the trace file exceeds this limit, it is renamed to .old
and a new file is created. If another such file exists, it is deleted. To limit the size to a certain number of megabytes, use SET TRACE_MAX_FILE_SIZE mb
. Example\:
-features_1540_h3=Java Code Generation
-features_1541_p=\ When setting the trace level to INFO
or DEBUG
, Java source code is generated as well. This simplifies reproducing problems. The trace file looks like this\:
-features_1542_p=\ To filter the Java source code, use the ConvertTraceFile
tool as follows\:
-features_1543_p=\ The generated file Test.java
will contain the Java source code. The generated source code may be too large to compile (the size of a Java method is limited). If this is the case, the source code needs to be split in multiple methods. The password is not listed in the trace file and therefore not included in the source code.
-features_1544_h2=Using Other Logging APIs
-features_1545_p=\ By default, this database uses its own native 'trace' facility. This facility is called 'trace' and not 'log' within this database to avoid confusion with the transaction log. Trace messages can be written to both file and System.out
. In most cases, this is sufficient, however sometimes it is better to use the same facility as the application, for example Log4j. To do that, this database support SLF4J.
-features_1546_a=SLF4J
-features_1547_p=\ is a simple facade for various logging APIs and allows to plug in the desired implementation at deployment time. SLF4J supports implementations such as Logback, Log4j, Jakarta Commons Logging (JCL), Java logging, x4juli, and Simple Log.
-features_1548_p=\ To enable SLF4J, set the file trace level to 4 in the database URL\:
-features_1549_p=\ Changing the log mechanism is not possible after the database is open, that means executing the SQL statement SET TRACE_LEVEL_FILE 4
when the database is already open will not have the desired effect. To use SLF4J, all required jar files need to be in the classpath. The logger name is h2database
. If it does not work, check the file <database>.trace.db
for error messages.
-features_1550_h2=Read Only Databases
-features_1551_p=\ If the database files are read-only, then the database is read-only as well. It is not possible to create new tables, add or modify data in this database. Only SELECT
and CALL
statements are allowed. To create a read-only database, close the database. Then, make the database file read-only. When you open the database now, it is read-only. There are two ways an application can find out whether database is read-only\: by calling Connection.isReadOnly()
or by executing the SQL statement CALL READONLY()
.
-features_1552_p=\ Using the Custom Access Mode r
the database can also be opened in read-only mode, even if the database file is not read only.
-features_1553_h2=Read Only Databases in Zip or Jar File
-features_1554_p=\ To create a read-only database in a zip file, first create a regular persistent database, and then create a backup. The database must not have pending changes, that means you need to close all connections to the database first. To speed up opening the read-only database and running queries, the database should be closed using SHUTDOWN DEFRAG
. If you are using a database named test
, an easy way to create a zip file is using the Backup
tool. You can start the tool from the command line, or from within the H2 Console (Tools - Backup). Please note that the database must be closed when the backup is created. Therefore, the SQL statement BACKUP TO
can not be used.
-features_1555_p=\ When the zip file is created, you can open the database in the zip file using the following database URL\:
-features_1556_p=\ Databases in zip files are read-only. The performance for some queries will be slower than when using a regular database, because random access in zip files is not supported (only streaming). How much this affects the performance depends on the queries and the data. The database is not read in memory; therefore large databases are supported as well. The same indexes are used as when using a regular database.
-features_1557_p=\ If the database is larger than a few megabytes, performance is much better if the database file is split into multiple smaller files, because random access in compressed files is not possible. See also the sample application ReadOnlyDatabaseInZip.
-features_1558_h3=Opening a Corrupted Database
-features_1559_p=\ If a database cannot be opened because the boot info (the SQL script that is run at startup) is corrupted, then the database can be opened by specifying a database event listener. The exceptions are logged, but opening the database will continue.
-features_1560_h2=Computed Columns / Function Based Index
-features_1561_p=\ A computed column is a column whose value is calculated before storing. The formula is evaluated when the row is inserted, and re-evaluated every time the row is updated. One use case is to automatically update the last-modification time\:
-features_1562_p=\ Function indexes are not directly supported by this database, but they can be emulated by using computed columns. For example, if an index on the upper-case version of a column is required, create a computed column with the upper-case version of the original column, and create an index for this column\:
-features_1563_p=\ When inserting data, it is not required (and not allowed) to specify a value for the upper-case version of the column, because the value is generated. But you can use the column when querying the table\:
-features_1564_h2=Multi-Dimensional Indexes
-features_1565_p=\ A tool is provided to execute efficient multi-dimension (spatial) range queries. This database does not support a specialized spatial index (R-Tree or similar). Instead, the B-Tree index is used. For each record, the multi-dimensional key is converted (mapped) to a single dimensional (scalar) value. This value specifies the location on a space-filling curve.
-features_1566_p=\ Currently, Z-order (also called N-order or Morton-order) is used; Hilbert curve could also be used, but the implementation is more complex. The algorithm to convert the multi-dimensional value is called bit-interleaving. The scalar value is indexed using a B-Tree index (usually using a computed column).
-features_1567_p=\ The method can result in a drastic performance improvement over just using an index on the first column. Depending on the data and number of dimensions, the improvement is usually higher than factor 5. The tool generates a SQL query from a specified multi-dimensional range. The method used is not database dependent, and the tool can easily be ported to other databases. For an example how to use the tool, please have a look at the sample code provided in TestMultiDimension.java
.
-features_1568_h2=User-Defined Functions and Stored Procedures
-features_1569_p=\ In addition to the built-in functions, this database supports user-defined Java functions. In this database, Java functions can be used as stored procedures as well. A function must be declared (registered) before it can be used. A function can be defined using source code, or as a reference to a compiled class that is available in the classpath. By default, the function aliases are stored in the current schema.
-features_1570_h3=Referencing a Compiled Method
-features_1571_p=\ When referencing a method, the class must already be compiled and included in the classpath where the database is running. Only static Java methods are supported; both the class and the method must be public. Example Java class\:
-features_1572_p=\ The Java function must be registered in the database by calling CREATE ALIAS ... FOR
\:
-features_1573_p=\ For a complete sample application, see src/test/org/h2/samples/Function.java
.
-features_1574_h3=Declaring Functions as Source Code
-features_1575_p=\ When defining a function alias with source code, the database tries to compile the source code using the Sun Java compiler (the class com.sun.tools.javac.Main
) if the tools.jar
is in the classpath. If not, javac
is run as a separate process. Only the source code is stored in the database; the class is compiled each time the database is re-opened. Source code is usually passed as dollar quoted text to avoid escaping problems, however single quotes can be used as well. Example\:
-features_1576_p=\ By default, the three packages java.util, java.math, java.sql
are imported. The method name (nextPrime
in the example above) is ignored. Method overloading is not supported when declaring functions as source code, that means only one method may be declared for an alias. If different import statements are required, they must be declared at the beginning and separated with the tag @CODE
\:
-features_1577_p=\ The following template is used to create a complete Java class\:
-features_1578_h3=Method Overloading
-features_1579_p=\ Multiple methods may be bound to a SQL function if the class is already compiled and included in the classpath. Each Java method must have a different number of arguments. Method overloading is not supported when declaring functions as source code.
-features_1580_h3=Function Data Type Mapping
-features_1581_p=\ Functions that accept non-nullable parameters such as int
will not be called if one of those parameters is NULL
. Instead, the result of the function is NULL
. If the function should be called if a parameter is NULL
, you need to use java.lang.Integer
instead.
-features_1582_p=\ SQL types are mapped to Java classes and vice-versa as in the JDBC API. For details, see Data Types. There are a few special cases\: java.lang.Object
is mapped to OTHER
(a serialized object). Therefore, java.lang.Object
can not be used to match all SQL types (matching all SQL types is not supported). The second special case is Object[]
\: arrays of any class are mapped to ARRAY
. Objects of type org.h2.value.Value
(the internal value class) are passed through without conversion.
-features_1583_h3=Functions That Require a Connection
-features_1584_p=\ If the first parameter of a Java function is a java.sql.Connection
, then the connection to database is provided. This connection does not need to be closed before returning. When calling the method from within the SQL statement, this connection parameter does not need to be (can not be) specified.
-features_1585_h3=Functions Throwing an Exception
-features_1586_p=\ If a function throws an exception, then the current statement is rolled back and the exception is thrown to the application. SQLException are directly re-thrown to the calling application; all other exceptions are first converted to a SQLException.
-features_1587_h3=Functions Returning a Result Set
-features_1588_p=\ Functions may returns a result set. Such a function can be called with the CALL
statement\:
-features_1589_h3=Using SimpleResultSet
-features_1590_p=\ A function can create a result set using the SimpleResultSet
tool\:
-features_1591_h3=Using a Function as a Table
-features_1592_p=\ A function that returns a result set can be used like a table. However, in this case the function is called at least twice\: first while parsing the statement to collect the column names (with parameters set to null
where not known at compile time). And then, while executing the statement to get the data (maybe multiple times if this is a join). If the function is called just to get the column list, the URL of the connection passed to the function is jdbc\:columnlist\:connection
. Otherwise, the URL of the connection is jdbc\:default\:connection
.
-features_1593_h2=Pluggable or User-Defined Tables
-features_1594_p=\ For situations where you need to expose other data-sources to the SQL engine as a table, there are "pluggable tables". For some examples, have a look at the code in org.h2.test.db.TestTableEngines
.
-features_1595_p=\ In order to create your own TableEngine, you need to implement the org.h2.api.TableEngine
interface e.g. something like this\:
-features_1596_p=\ and then create the table from SQL like this\:
-features_1597_p=\ It is also possible to pass in parameters to the table engine, like so\:
-features_1598_p=\ In which case the parameters are passed down in the tableEngineParams field of the CreateTableData object.
-features_1599_h2=Triggers
-features_1600_p=\ This database supports Java triggers that are called before or after a row is updated, inserted or deleted. Triggers can be used for complex consistency checks, or to update related data in the database. It is also possible to use triggers to simulate materialized views. For a complete sample application, see src/test/org/h2/samples/TriggerSample.java
. A Java trigger must implement the interface org.h2.api.Trigger
. The trigger class must be available in the classpath of the database engine (when using the server mode, it must be in the classpath of the server).
-features_1601_p=\ The connection can be used to query or update data in other tables. The trigger then needs to be defined in the database\:
-features_1602_p=\ The trigger can be used to veto a change by throwing a SQLException
.
-features_1603_p=\ As an alternative to implementing the Trigger
interface, an application can extend the abstract class org.h2.tools.TriggerAdapter
. This will allows to use the ResultSet
interface within trigger implementations. In this case, only the fire
method needs to be implemented\:
-features_1604_h2=Compacting a Database
-features_1605_p=\ Empty space in the database file re-used automatically. When closing the database, the database is automatically compacted for up to 200 milliseconds by default. To compact more, use the SQL statement SHUTDOWN COMPACT. However re-creating the database may further reduce the database size because this will re-build the indexes. Here is a sample function to do this\:
-features_1606_p=\ See also the sample application org.h2.samples.Compact
. The commands SCRIPT / RUNSCRIPT
can be used as well to create a backup of a database and re-build the database from the script.
-features_1607_h2=Cache Settings
-features_1608_p=\ The database keeps most frequently used data in the main memory. The amount of memory used for caching can be changed using the setting CACHE_SIZE
. This setting can be set in the database connection URL (jdbc\:h2\:~/test;CACHE_SIZE\=131072
), or it can be changed at runtime using SET CACHE_SIZE size
. The size of the cache, as represented by CACHE_SIZE
is measured in KB, with each KB being 1024 bytes. This setting has no effect for in-memory databases. For persistent databases, the setting is stored in the database and re-used when the database is opened the next time. However, when opening an existing database, the cache size is set to at most half the amount of memory available for the virtual machine (Runtime.getRuntime().maxMemory()), even if the cache size setting stored in the database is larger; however the setting stored in the database is kept. Setting the cache size in the database URL or explicitly using SET CACHE_SIZE
overrides this value (even if larger than the physical memory). To get the current used maximum cache size, use the query SELECT * FROM INFORMATION_SCHEMA.SETTINGS WHERE NAME \= 'info.CACHE_MAX_SIZE'
-features_1609_p=\ An experimental scan-resistant cache algorithm "Two Queue" (2Q) is available. To enable it, append ;CACHE_TYPE\=TQ
to the database URL. The cache might not actually improve performance. If you plan to use it, please run your own test cases first.
-features_1610_p=\ Also included is an experimental second level soft reference cache. Rows in this cache are only garbage collected on low memory. By default the second level cache is disabled. To enable it, use the prefix SOFT_
. Example\: jdbc\:h2\:~/test;CACHE_TYPE\=SOFT_LRU
. The cache might not actually improve performance. If you plan to use it, please run your own test cases first.
-features_1611_p=\ To get information about page reads and writes, and the current caching algorithm in use, call SELECT * FROM INFORMATION_SCHEMA.SETTINGS
. The number of pages read / written is listed.
-fragments_1000_div=\ &\#x25b2;
-fragments_1001_label=Search\:
-fragments_1002_label=Highlight keyword(s)
-fragments_1003_a=Home
-fragments_1004_a=Download
-fragments_1005_a=Cheat Sheet
-fragments_1006_b=Documentation
-fragments_1007_a=Quickstart
-fragments_1008_a=Installation
-fragments_1009_a=Tutorial
-fragments_1010_a=Features
-fragments_1011_a=Performance
-fragments_1012_a=Advanced
-fragments_1013_b=Reference
-fragments_1014_a=SQL Grammar
-fragments_1015_a=Functions
-fragments_1016_a=Data Types
-fragments_1017_a=Javadoc
-fragments_1018_a=PDF (1 MB)
-fragments_1019_b=Support
-fragments_1020_a=FAQ
-fragments_1021_a=Error Analyzer
-fragments_1022_a=Google Group (English)
-fragments_1023_a=Google Group (Japanese)
-fragments_1024_a=Google Group (Chinese)
-fragments_1025_b=Appendix
-fragments_1026_a=History & Roadmap
-fragments_1027_a=License
-fragments_1028_a=Build
-fragments_1029_a=Links
-fragments_1030_a=JaQu
-fragments_1031_a=MVStore
-fragments_1032_a=Architecture
-fragments_1033_td=
-frame_1000_h1=H2 Database Engine
-frame_1001_p=\ Welcome to H2, the free SQL database. The main feature of H2 are\:
-frame_1002_li=It is free to use for everybody, source code is included
-frame_1003_li=Written in Java, but also available as native executable
-frame_1004_li=JDBC and (partial) ODBC API
-frame_1005_li=Embedded and client/server modes
-frame_1006_li=Clustering is supported
-frame_1007_li=A web client is included
-frame_1008_h2=No Javascript
-frame_1009_p=\ If you are not automatically redirected to the main page, then Javascript is currently disabled or your browser does not support Javascript. Some features (for example the integrated search) require Javascript.
-frame_1010_p=\ Please enable Javascript, or go ahead without it\: H2 Database Engine
-history_1000_h1=History and Roadmap
-history_1001_a=\ Change Log
-history_1002_a=\ Roadmap
-history_1003_a=\ History of this Database Engine
-history_1004_a=\ Why Java
-history_1005_a=\ Supporters
-history_1006_h2=Change Log
-history_1007_p=\ The up-to-date change log is available at http\://www.h2database.com/html/changelog.html
-history_1008_h2=Roadmap
-history_1009_p=\ The current roadmap is available at http\://www.h2database.com/html/roadmap.html
-history_1010_h2=History of this Database Engine
-history_1011_p=\ The development of H2 was started in May 2004, but it was first published on December 14th 2005. The main author of H2, Thomas Mueller, is also the original developer of Hypersonic SQL. In 2001, he joined PointBase Inc. where he wrote PointBase Micro, a commercial Java SQL database. At that point, he had to discontinue Hypersonic SQL. The HSQLDB Group was formed to continued to work on the Hypersonic SQL codebase. The name H2 stands for Hypersonic 2, however H2 does not share code with Hypersonic SQL or HSQLDB. H2 is built from scratch.
-history_1012_h2=Why Java
-history_1013_p=\ The main reasons to use a Java database are\:
-history_1014_li=Very simple to integrate in Java applications
-history_1015_li=Support for many different platforms
-history_1016_li=More secure than native applications (no buffer overflows)
-history_1017_li=User defined functions (or triggers) run very fast
-history_1018_li=Unicode support
-history_1019_p=\ Some think Java is too slow for low level operations, but this is no longer true. Garbage collection for example is now faster than manual memory management.
-history_1020_p=\ Developing Java code is faster than developing C or C++ code. When using Java, most time can be spent on improving the algorithms instead of porting the code to different platforms or doing memory management. Features such as Unicode and network libraries are already built-in. In Java, writing secure code is easier because buffer overflows can not occur. Features such as reflection can be used for randomized testing.
-history_1021_p=\ Java is future proof\: a lot of companies support Java. Java is now open source.
-history_1022_p=\ To increase the portability and ease of use, this software depends on very few libraries. Features that are not available in open source Java implementations (such as Swing) are not used, or only used for optional features.
-history_1023_h2=Supporters
-history_1024_p=\ Many thanks for those who reported bugs, gave valuable feedback, spread the word, and translated this project. Also many thanks to the donors. To become a donor, use PayPal (at the very bottom of the main web page).
-history_1025_a=xso; xBase Software Ontwikkeling, Netherlands
-history_1026_a=Cognitect, USA
-history_1027_a=Code 42 Software, Inc., Minneapolis
-history_1028_li=Martin Wildam, Austria
-history_1029_a=Code Lutin, France
-history_1030_a=NetSuxxess GmbH, Germany
-history_1031_a=Poker Copilot, Steve McLeod, Germany
-history_1032_a=SkyCash, Poland
-history_1033_a=Lumber-mill, Inc., Japan
-history_1034_a=StockMarketEye, USA
-history_1035_a=Eckenfelder GmbH & Co.KG, Germany
-history_1036_li=Anthony Goubard, Netherlands
-history_1037_li=Richard Hickey, USA
-history_1038_li=Alessio Jacopo D'Adamo, Italy
-history_1039_li=Ashwin Jayaprakash, USA
-history_1040_li=Donald Bleyl, USA
-history_1041_li=Frank Berger, Germany
-history_1042_li=Florent Ramiere, France
-history_1043_li=Jun Iyama, Japan
-history_1044_li=Antonio Casqueiro, Portugal
-history_1045_li=Oliver Computing LLC, USA
-history_1046_li=Harpal Grover Consulting Inc., USA
-history_1047_li=Elisabetta Berlini, Italy
-history_1048_li=William Gilbert, USA
-history_1049_li=Antonio Dieguez Rojas, Chile
-history_1050_a=Ontology Works, USA
-history_1051_li=Pete Haidinyak, USA
-history_1052_li=William Osmond, USA
-history_1053_li=Joachim Ansorg, Germany
-history_1054_li=Oliver Soerensen, Germany
-history_1055_li=Christos Vasilakis, Greece
-history_1056_li=Fyodor Kupolov, Denmark
-history_1057_li=Jakob Jenkov, Denmark
-history_1058_li=Stéphane Chartrand, Switzerland
-history_1059_li=Glenn Kidd, USA
-history_1060_li=Gustav Trede, Sweden
-history_1061_li=Joonas Pulakka, Finland
-history_1062_li=Bjorn Darri Sigurdsson, Iceland
-history_1063_li=Iyama Jun, Japan
-history_1064_li=Gray Watson, USA
-history_1065_li=Erik Dick, Germany
-history_1066_li=Pengxiang Shao, China
-history_1067_li=Bilingual Marketing Group, USA
-history_1068_li=Philippe Marschall, Switzerland
-history_1069_li=Knut Staring, Norway
-history_1070_li=Theis Borg, Denmark
-history_1071_li=Mark De Mendonca Duske, USA
-history_1072_li=Joel A. Garringer, USA
-history_1073_li=Olivier Chafik, France
-history_1074_li=Rene Schwietzke, Germany
-history_1075_li=Jalpesh Patadia, USA
-history_1076_li=Takanori Kawashima, Japan
-history_1077_li=Terrence JC Huang, China
-history_1078_a=JiaDong Huang, Australia
-history_1079_li=Laurent van Roy, Belgium
-history_1080_li=Qian Chen, China
-history_1081_li=Clinton Hyde, USA
-history_1082_li=Kritchai Phromros, Thailand
-history_1083_li=Alan Thompson, USA
-history_1084_li=Ladislav Jech, Czech Republic
-history_1085_li=Dimitrijs Fedotovs, Latvia
-history_1086_li=Richard Manley-Reeve, United Kingdom
-installation_1000_h1=Installation
-installation_1001_a=\ Requirements
-installation_1002_a=\ Supported Platforms
-installation_1003_a=\ Installing the Software
-installation_1004_a=\ Directory Structure
-installation_1005_h2=Requirements
-installation_1006_p=\ To run this database, the following software stack is known to work. Other software most likely also works, but is not tested as much.
-installation_1007_h3=Database Engine
-installation_1008_li=Windows XP or Vista, Mac OS X, or Linux
-installation_1009_li=Sun Java 6 or newer
-installation_1010_li=Recommended Windows file system\: NTFS (FAT32 only supports files up to 4 GB)
-installation_1011_h3=H2 Console
-installation_1012_li=Mozilla Firefox
-installation_1013_h2=Supported Platforms
-installation_1014_p=\ As this database is written in Java, it can run on many different platforms. It is tested with Java 6 and 7. Currently, the database is developed and tested on Windows 8 and Mac OS X using Java 6, but it also works in many other operating systems and using other Java runtime environments. All major operating systems (Windows XP, Windows Vista, Windows 7, Mac OS, Ubuntu,...) are supported.
-installation_1015_h2=Installing the Software
-installation_1016_p=\ To install the software, run the installer or unzip it to a directory of your choice.
-installation_1017_h2=Directory Structure
-installation_1018_p=\ After installing, you should get the following directory structure\:
-installation_1019_th=Directory
-installation_1020_th=Contents
-installation_1021_td=bin
-installation_1022_td=JAR and batch files
-installation_1023_td=docs
-installation_1024_td=Documentation
-installation_1025_td=docs/html
-installation_1026_td=HTML pages
-installation_1027_td=docs/javadoc
-installation_1028_td=Javadoc files
-installation_1029_td=ext
-installation_1030_td=External dependencies (downloaded when building)
-installation_1031_td=service
-installation_1032_td=Tools to run the database as a Windows Service
-installation_1033_td=src
-installation_1034_td=Source files
-installation_1035_td=src/docsrc
-installation_1036_td=Documentation sources
-installation_1037_td=src/installer
-installation_1038_td=Installer, shell, and release build script
-installation_1039_td=src/main
-installation_1040_td=Database engine source code
-installation_1041_td=src/test
-installation_1042_td=Test source code
-installation_1043_td=src/tools
-installation_1044_td=Tools and database adapters source code
-jaqu_1000_h1=JaQu
-jaqu_1001_a=\ What is JaQu
-jaqu_1002_a=\ Differences to Other Data Access Tools
-jaqu_1003_a=\ Current State
-jaqu_1004_a=\ Building the JaQu Library
-jaqu_1005_a=\ Requirements
-jaqu_1006_a=\ Example Code
-jaqu_1007_a=\ Configuration
-jaqu_1008_a=\ Natural Syntax
-jaqu_1009_a=\ Other Ideas
-jaqu_1010_a=\ Similar Projects
-jaqu_1011_h2=What is JaQu
-jaqu_1012_p=\ Note\: This project is currently in maintenance mode. A friendly fork of JaQu is available under the name iciql.
-jaqu_1013_p=\ JaQu stands for Java Query and allows to access databases using pure Java. JaQu provides a fluent interface (or internal DSL). JaQu is something like LINQ for Java (LINQ stands for "language integrated query" and is a Microsoft .NET technology). The following JaQu code\:
-jaqu_1014_p=\ stands for the SQL statement\:
-jaqu_1015_h2=Differences to Other Data Access Tools
-jaqu_1016_p=\ Unlike SQL, JaQu can be easily integrated in Java applications. Because JaQu is pure Java, auto-complete in the IDE is supported. Type checking is performed by the compiler. JaQu fully protects against SQL injection.
-jaqu_1017_p=\ JaQu is meant as replacement for JDBC and SQL and not as much as a replacement for tools like Hibernate. With JaQu, you don't write SQL statements as strings. JaQu is much smaller and simpler than other persistence frameworks such as Hibernate, but it also does not provide all the features of those. Unlike iBatis and Hibernate, no XML or annotation based configuration is required; instead the configuration (if required at all) is done in pure Java, within the application.
-jaqu_1018_p=\ JaQu does not require or contain any data caching mechanism. Like JDBC and iBatis, JaQu provides full control over when and what SQL statements are executed (but without having to write SQL statements as strings).
-jaqu_1019_h3=Restrictions
-jaqu_1020_p=\ Primitive types (eg. boolean, int, long, double
) are not supported. Use java.lang.Boolean, Integer, Long, Double
instead.
-jaqu_1021_h3=Why in Java?
-jaqu_1022_p=\ Most applications are written in Java. Mixing Java and another language (for example Scala or Groovy) in the same application is complicated\: you would need to split the application and database code, and write adapter / wrapper code.
-jaqu_1023_h2=Current State
-jaqu_1024_p=\ Currently, JaQu is only tested with the H2 database. The API may change in future versions. JaQu is not part of the h2 jar file, however the source code is included in H2, under\:
-jaqu_1025_code=src/test/org/h2/test/jaqu/*
-jaqu_1026_li=\ (samples and tests)
-jaqu_1027_code=src/tools/org/h2/jaqu/*
-jaqu_1028_li=\ (framework)
-jaqu_1029_h2=Building the JaQu Library
-jaqu_1030_p=\ To create the JaQu jar file, run\: build jarJaqu
. This will create the file bin/h2jaqu.jar
.
-jaqu_1031_h2=Requirements
-jaqu_1032_p=\ JaQu requires Java 6. Annotations are not need. Currently, JaQu is only tested with the H2 database engine, however in theory it should work with any database that supports the JDBC API.
-jaqu_1033_h2=Example Code
-jaqu_1034_h2=Configuration
-jaqu_1035_p=\ JaQu does not require any configuration when using the default field to column mapping. To define table indices, or if you want to map a class to a table with a different name, or a field to a column with another name, create a function called define
in the data class. Example\:
-jaqu_1036_p=\ The method define()
contains the mapping definition. It is called once when the class is used for the first time. Like annotations, the mapping is defined in the class itself. Unlike when using annotations, the compiler can check the syntax even for multi-column objects (multi-column indexes, multi-column primary keys and so on). Because the definition is written in Java, the configuration can be set at runtime, which is not possible using annotations. Unlike XML mapping configuration, the configuration is integrated in the class itself.
-jaqu_1037_h2=Natural Syntax
-jaqu_1038_p=The plan is to support more natural (pure Java) syntax in conditions. To do that, the condition class is de-compiled to a SQL condition. A proof of concept decompiler is included (but it doesn't fully work yet; patches are welcome). The planned syntax is\:
-jaqu_1039_h2=Other Ideas
-jaqu_1040_p=\ This project has just been started, and nothing is fixed yet. Some ideas are\:
-jaqu_1041_li=Support queries on collections (instead of using a database).
-jaqu_1042_li=Provide API level compatibility with JPA (so that JaQu can be used as an extension of JPA).
-jaqu_1043_li=Internally use a JPA implementation (for example Hibernate) instead of SQL directly.
-jaqu_1044_li=Use PreparedStatements and cache them.
-jaqu_1045_h2=Similar Projects
-jaqu_1046_a=iciql (a friendly fork of JaQu)
-jaqu_1047_a=Cement Framework
-jaqu_1048_a=Dreamsource ORM
-jaqu_1049_a=Empire-db
-jaqu_1050_a=JEQUEL\: Java Embedded QUEry Language
-jaqu_1051_a=Joist
-jaqu_1052_a=jOOQ
-jaqu_1053_a=JoSQL
-jaqu_1054_a=LIQUidFORM
-jaqu_1055_a=Quaere (Alias implementation)
-jaqu_1056_a=Quaere
-jaqu_1057_a=Querydsl
-jaqu_1058_a=Squill
-license_1000_h1=License
-license_1001_a=\ Summary and License FAQ
-license_1002_a=\ Mozilla Public License Version 2.0
-license_1003_a=\ Eclipse Public License - Version 1.0
-license_1004_a=\ Export Control Classification Number (ECCN)
-license_1005_h2=Summary and License FAQ
-license_1006_p=\ H2 is dual licensed and available under the MPL 2.0 (Mozilla Public License Version 2.0) or under the EPL 1.0 (Eclipse Public License). There is a license FAQ for both the MPL and the EPL.
-license_1007_li=You can use H2 for free.
-license_1008_li=You can integrate it into your applications (including in commercial applications) and distribute it.
-license_1009_li=Files containing only your code are not covered by this license (it is 'commercial friendly').
-license_1010_li=Modifications to the H2 source code must be published.
-license_1011_li=You don't need to provide the source code of H2 if you did not modify anything.
-license_1012_li=If you distribute a binary that includes H2, you need to add a disclaimer of liability - see the example below.
-license_1013_p=\ However, nobody is allowed to rename H2, modify it a little, and sell it as a database engine without telling the customers it is in fact H2. This happened to HSQLDB\: a company called 'bungisoft' copied HSQLDB, renamed it to 'RedBase', and tried to sell it, hiding the fact that it was in fact just HSQLDB. It seems 'bungisoft' does not exist any more, but you can use the Wayback Machine and visit old web pages of http\://www.bungisoft.com
.
-license_1014_p=\ About porting the source code to another language (for example C\# or C++)\: converted source code (even if done manually) stays under the same copyright and license as the original code. The copyright of the ported source code does not (automatically) go to the person who ported the code.
-license_1015_p=\ If you distribute a binary that includes H2, you need to add the license and a disclaimer of liability (as you should do for your own code). You should add a disclaimer for each open source library you use. For example, add a file 3rdparty_license.txt
in the directory where the jar files are, and list all open source libraries, each one with its license and disclaimer. For H2, a simple solution is to copy the following text below. You may also include a copy of the complete license.
-license_1016_h2=Mozilla Public License Version 2.0
-license_1017_h3=1. Definitions
-license_1018_p=1.1. "Contributor" means each individual or legal entity that creates, contributes to the creation of, or owns Covered Software.
-license_1019_p=1.2. "Contributor Version" means the combination of the Contributions of others (if any) used by a Contributor and that particular Contributor's Contribution.
-license_1020_p=1.3. "Contribution" means Covered Software of a particular Contributor.
-license_1021_p=1.4. "Covered Software" means Source Code Form to which the initial Contributor has attached the notice in Exhibit A, the Executable Form of such Source Code Form, and Modifications of such Source Code Form, in each case including portions thereof.
-license_1022_p=1.5. "Incompatible With Secondary Licenses" means
-license_1023_p=a. that the initial Contributor has attached the notice described in Exhibit B to the Covered Software; or
-license_1024_p=b. that the Covered Software was made available under the terms of version 1.1 or earlier of the License, but not also under the terms of a Secondary License.
-license_1025_p=1.6. "Executable Form" means any form of the work other than Source Code Form.
-license_1026_p=1.7. "Larger Work" means a work that combines Covered Software with other material, in a separate file or files, that is not Covered Software.
-license_1027_p=1.8. "License" means this document.
-license_1028_p=1.9. "Licensable" means having the right to grant, to the maximum extent possible, whether at the time of the initial grant or subsequently, any and all of the rights conveyed by this License.
-license_1029_p=1.10. "Modifications" means any of the following\:
-license_1030_p=a. any file in Source Code Form that results from an addition to, deletion from, or modification of the contents of Covered Software; or
-license_1031_p=b. any new file in Source Code Form that contains any Covered Software.
-license_1032_p=1.11. "Patent Claims" of a Contributor means any patent claim(s), including without limitation, method, process, and apparatus claims, in any patent Licensable by such Contributor that would be infringed, but for the grant of the License, by the making, using, selling, offering for sale, having made, import, or transfer of either its Contributions or its Contributor Version.
-license_1033_p=1.12. "Secondary License" means either the GNU General Public License, Version 2.0, the GNU Lesser General Public License, Version 2.1, the GNU Affero General Public License, Version 3.0, or any later versions of those licenses.
-license_1034_p=1.13. "Source Code Form" means the form of the work preferred for making modifications.
-license_1035_p=1.14. "You" (or "Your") means an individual or a legal entity exercising rights under this License. For legal entities, "You" includes any entity that controls, is controlled by, or is under common control with You. For purposes of this definition, "control" means (a) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (b) ownership of more than fifty percent (50%) of the outstanding shares or beneficial ownership of such entity.
-license_1036_h3=2. License Grants and Conditions
-license_1037_h4=2.1. Grants
-license_1038_p=Each Contributor hereby grants You a world-wide, royalty-free, non-exclusive license\:
-license_1039_p=under intellectual property rights (other than patent or trademark) Licensable by such Contributor to use, reproduce, make available, modify, display, perform, distribute, and otherwise exploit its Contributions, either on an unmodified basis, with Modifications, or as part of a Larger Work; and
-license_1040_p=under Patent Claims of such Contributor to make, use, sell, offer for sale, have made, import, and otherwise transfer either its Contributions or its Contributor Version.
-license_1041_h4=2.2. Effective Date
-license_1042_p=The licenses granted in Section 2.1 with respect to any Contribution become effective for each Contribution on the date the Contributor first distributes such Contribution.
-license_1043_h4=2.3. Limitations on Grant Scope
-license_1044_p=The licenses granted in this Section 2 are the only rights granted under this License. No additional rights or licenses will be implied from the distribution or licensing of Covered Software under this License. Notwithstanding Section 2.1(b) above, no patent license is granted by a Contributor\:
-license_1045_p=for any code that a Contributor has removed from Covered Software; or
-license_1046_p=for infringements caused by\: (i) Your and any other third party's modifications of Covered Software, or (ii) the combination of its Contributions with other software (except as part of its Contributor Version); or
-license_1047_p=under Patent Claims infringed by Covered Software in the absence of its Contributions.
-license_1048_p=This License does not grant any rights in the trademarks, service marks, or logos of any Contributor (except as may be necessary to comply with the notice requirements in Section 3.4).
-license_1049_h4=2.4. Subsequent Licenses
-license_1050_p=No Contributor makes additional grants as a result of Your choice to distribute the Covered Software under a subsequent version of this License (see Section 10.2) or under the terms of a Secondary License (if permitted under the terms of Section 3.3).
-license_1051_h4=2.5. Representation
-license_1052_p=Each Contributor represents that the Contributor believes its Contributions are its original creation(s) or it has sufficient rights to grant the rights to its Contributions conveyed by this License.
-license_1053_h4=2.6. Fair Use
-license_1054_p=This License is not intended to limit any rights You have under applicable copyright doctrines of fair use, fair dealing, or other equivalents.
-license_1055_h4=2.7. Conditions
-license_1056_p=Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in Section 2.1.
-license_1057_h3=3. Responsibilities
-license_1058_h4=3.1. Distribution of Source Form
-license_1059_p=All distribution of Covered Software in Source Code Form, including any Modifications that You create or to which You contribute, must be under the terms of this License. You must inform recipients that the Source Code Form of the Covered Software is governed by the terms of this License, and how they can obtain a copy of this License. You may not attempt to alter or restrict the recipients' rights in the Source Code Form.
-license_1060_h4=3.2. Distribution of Executable Form
-license_1061_p=If You distribute Covered Software in Executable Form then\:
-license_1062_p=such Covered Software must also be made available in Source Code Form, as described in Section 3.1, and You must inform recipients of the Executable Form how they can obtain a copy of such Source Code Form by reasonable means in a timely manner, at a charge no more than the cost of distribution to the recipient; and
-license_1063_p=You may distribute such Executable Form under the terms of this License, or sublicense it under different terms, provided that the license for the Executable Form does not attempt to limit or alter the recipients' rights in the Source Code Form under this License.
-license_1064_h4=3.3. Distribution of a Larger Work
-license_1065_p=You may create and distribute a Larger Work under terms of Your choice, provided that You also comply with the requirements of this License for the Covered Software. If the Larger Work is a combination of Covered Software with a work governed by one or more Secondary Licenses, and the Covered Software is not Incompatible With Secondary Licenses, this License permits You to additionally distribute such Covered Software under the terms of such Secondary License(s), so that the recipient of the Larger Work may, at their option, further distribute the Covered Software under the terms of either this License or such Secondary License(s).
-license_1066_h4=3.4. Notices
-license_1067_p=You may not remove or alter the substance of any license notices (including copyright notices, patent notices, disclaimers of warranty, or limitations of liability) contained within the Source Code Form of the Covered Software, except that You may alter any license notices to the extent required to remedy known factual inaccuracies.
-license_1068_h4=3.5. Application of Additional Terms
-license_1069_p=You may choose to offer, and to charge a fee for, warranty, support, indemnity or liability obligations to one or more recipients of Covered Software. However, You may do so only on Your own behalf, and not on behalf of any Contributor. You must make it absolutely clear that any such warranty, support, indemnity, or liability obligation is offered by You alone, and You hereby agree to indemnify every Contributor for any liability incurred by such Contributor as a result of warranty, support, indemnity or liability terms You offer. You may include additional disclaimers of warranty and limitations of liability specific to any jurisdiction.
-license_1070_h3=4. Inability to Comply Due to Statute or Regulation
-license_1071_p=If it is impossible for You to comply with any of the terms of this License with respect to some or all of the Covered Software due to statute, judicial order, or regulation then You must\: (a) comply with the terms of this License to the maximum extent possible; and (b) describe the limitations and the code they affect. Such description must be placed in a text file included with all distributions of the Covered Software under this License. Except to the extent prohibited by statute or regulation, such description must be sufficiently detailed for a recipient of ordinary skill to be able to understand it.
-license_1072_h3=5. Termination
-license_1073_p=5.1. The rights granted under this License will terminate automatically if You fail to comply with any of its terms. However, if You become compliant, then the rights granted under this License from a particular Contributor are reinstated (a) provisionally, unless and until such Contributor explicitly and finally terminates Your grants, and (b) on an ongoing basis, if such Contributor fails to notify You of the non-compliance by some reasonable means prior to 60 days after You have come back into compliance. Moreover, Your grants from a particular Contributor are reinstated on an ongoing basis if such Contributor notifies You of the non-compliance by some reasonable means, this is the first time You have received notice of non-compliance with this License from such Contributor, and You become compliant prior to 30 days after Your receipt of the notice.
-license_1074_p=5.2. If You initiate litigation against any entity by asserting a patent infringement claim (excluding declaratory judgment actions, counter-claims, and cross-claims) alleging that a Contributor Version directly or indirectly infringes any patent, then the rights granted to You by any and all Contributors for the Covered Software under Section 2.1 of this License shall terminate.
-license_1075_p=5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user license agreements (excluding distributors and resellers) which have been validly granted by You or Your distributors under this License prior to termination shall survive termination.
-license_1076_h3=6. Disclaimer of Warranty
-license_1077_p=Covered Software is provided under this License on an "as is" basis, without warranty of any kind, either expressed, implied, or statutory, including, without limitation, warranties that the Covered Software is free of defects, merchantable, fit for a particular purpose or non-infringing. The entire risk as to the quality and performance of the Covered Software is with You. Should any Covered Software prove defective in any respect, You (not any Contributor) assume the cost of any necessary servicing, repair, or correction. This disclaimer of warranty constitutes an essential part of this License. No use of any Covered Software is authorized under this License except under this disclaimer.
-license_1078_h3=7. Limitation of Liability
-license_1079_p=Under no circumstances and under no legal theory, whether tort (including negligence), contract, or otherwise, shall any Contributor, or anyone who distributes Covered Software as permitted above, be liable to You for any direct, indirect, special, incidental, or consequential damages of any character including, without limitation, damages for lost profits, loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses, even if such party shall have been informed of the possibility of such damages. This limitation of liability shall not apply to liability for death or personal injury resulting from such party's negligence to the extent applicable law prohibits such limitation. Some jurisdictions do not allow the exclusion or limitation of incidental or consequential damages, so this exclusion and limitation may not apply to You.
-license_1080_h3=8. Litigation
-license_1081_p=Any litigation relating to this License may be brought only in the courts of a jurisdiction where the defendant maintains its principal place of business and such litigation shall be governed by laws of that jurisdiction, without reference to its conflict-of-law provisions. Nothing in this Section shall prevent a party's ability to bring cross-claims or counter-claims.
-license_1082_h3=9. Miscellaneous
-license_1083_p=This License represents the complete agreement concerning the subject matter hereof. If any provision of this License is held to be unenforceable, such provision shall be reformed only to the extent necessary to make it enforceable. Any law or regulation which provides that the language of a contract shall be construed against the drafter shall not be used to construe this License against a Contributor.
-license_1084_h3=10. Versions of the License
-license_1085_h4=10.1. New Versions
-license_1086_p=Mozilla Foundation is the license steward. Except as provided in Section 10.3, no one other than the license steward has the right to modify or publish new versions of this License. Each version will be given a distinguishing version number.
-license_1087_h4=10.2. Effect of New Versions
-license_1088_p=You may distribute the Covered Software under the terms of the version of the License under which You originally received the Covered Software, or under the terms of any subsequent version published by the license steward.
-license_1089_h4=10.3. Modified Versions
-license_1090_p=If you create software not governed by this License, and you want to create a new license for such software, you may create and use a modified version of this License if you rename the license and remove any references to the name of the license steward (except to note that such modified license differs from this License).
-license_1091_h4=10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
-license_1092_p=If You choose to distribute Source Code Form that is Incompatible With Secondary Licenses under the terms of this version of the License, the notice described in Exhibit B of this License must be attached.
-license_1093_h3=Exhibit A - Source Code Form License Notice
-license_1094_p=If it is not possible or desirable to put the notice in a particular file, then You may include the notice in a location (such as a LICENSE file in a relevant directory) where a recipient would be likely to look for such a notice.
-license_1095_p=You may add additional accurate notices of copyright ownership.
-license_1096_h3=Exhibit B - "Incompatible With Secondary Licenses" Notice
-license_1097_h2=Eclipse Public License - Version 1.0
-license_1098_p=\ THE ACCOMPANYING PROGRAM IS PROVIDED UNDER THE TERMS OF THIS ECLIPSE PUBLIC LICENSE ("AGREEMENT"). ANY USE, REPRODUCTION OR DISTRIBUTION OF THE PROGRAM CONSTITUTES RECIPIENT'S ACCEPTANCE OF THIS AGREEMENT.
-license_1099_h3=1. DEFINITIONS
-license_1100_p=\ "Contribution" means\:
-license_1101_p=\ a) in the case of the initial Contributor, the initial code and documentation distributed under this Agreement, and
-license_1102_p=\ b) in the case of each subsequent Contributor\:
-license_1103_p=\ i) changes to the Program, and
-license_1104_p=\ ii) additions to the Program;
-license_1105_p=\ where such changes and/or additions to the Program originate from and are distributed by that particular Contributor. A Contribution 'originates' from a Contributor if it was added to the Program by such Contributor itself or anyone acting on such Contributor's behalf. Contributions do not include additions to the Program which\: (i) are separate modules of software distributed in conjunction with the Program under their own license agreement, and (ii) are not derivative works of the Program.
-license_1106_p=\ "Contributor" means any person or entity that distributes the Program.
-license_1107_p=\ "Licensed Patents " mean patent claims licensable by a Contributor which are necessarily infringed by the use or sale of its Contribution alone or when combined with the Program.
-license_1108_p=\ "Program" means the Contributions distributed in accordance with this Agreement.
-license_1109_p=\ "Recipient" means anyone who receives the Program under this Agreement, including all Contributors.
-license_1110_h3=2. GRANT OF RIGHTS
-license_1111_p=\ a) Subject to the terms of this Agreement, each Contributor hereby grants Recipient a non-exclusive, worldwide, royalty-free copyright license to reproduce, prepare derivative works of, publicly display, publicly perform, distribute and sublicense the Contribution of such Contributor, if any, and such derivative works, in source code and object code form.
-license_1112_p=\ b) Subject to the terms of this Agreement, each Contributor hereby grants Recipient a non-exclusive, worldwide, royalty-free patent license under Licensed Patents to make, use, sell, offer to sell, import and otherwise transfer the Contribution of such Contributor, if any, in source code and object code form. This patent license shall apply to the combination of the Contribution and the Program if, at the time the Contribution is added by the Contributor, such addition of the Contribution causes such combination to be covered by the Licensed Patents. The patent license shall not apply to any other combinations which include the Contribution. No hardware per se is licensed hereunder.
-license_1113_p=\ c) Recipient understands that although each Contributor grants the licenses to its Contributions set forth herein, no assurances are provided by any Contributor that the Program does not infringe the patent or other intellectual property rights of any other entity. Each Contributor disclaims any liability to Recipient for claims brought by any other entity based on infringement of intellectual property rights or otherwise. As a condition to exercising the rights and licenses granted hereunder, each Recipient hereby assumes sole responsibility to secure any other intellectual property rights needed, if any. For example, if a third party patent license is required to allow Recipient to distribute the Program, it is Recipient's responsibility to acquire that license before distributing the Program.
-license_1114_p=\ d) Each Contributor represents that to its knowledge it has sufficient copyright rights in its Contribution, if any, to grant the copyright license set forth in this Agreement.
-license_1115_h3=3. REQUIREMENTS
-license_1116_p=\ A Contributor may choose to distribute the Program in object code form under its own license agreement, provided that\:
-license_1117_p=\ a) it complies with the terms and conditions of this Agreement; and
-license_1118_p=\ b) its license agreement\:
-license_1119_p=\ i) effectively disclaims on behalf of all Contributors all warranties and conditions, express and implied, including warranties or conditions of title and non-infringement, and implied warranties or conditions of merchantability and fitness for a particular purpose;
-license_1120_p=\ ii) effectively excludes on behalf of all Contributors all liability for damages, including direct, indirect, special, incidental and consequential damages, such as lost profits;
-license_1121_p=\ iii) states that any provisions which differ from this Agreement are offered by that Contributor alone and not by any other party; and
-license_1122_p=\ iv) states that source code for the Program is available from such Contributor, and informs licensees how to obtain it in a reasonable manner on or through a medium customarily used for software exchange.
-license_1123_p=\ When the Program is made available in source code form\:
-license_1124_p=\ a) it must be made available under this Agreement; and
-license_1125_p=\ b) a copy of this Agreement must be included with each copy of the Program.
-license_1126_p=\ Contributors may not remove or alter any copyright notices contained within the Program.
-license_1127_p=\ Each Contributor must identify itself as the originator of its Contribution, if any, in a manner that reasonably allows subsequent Recipients to identify the originator of the Contribution.
-license_1128_h3=4. COMMERCIAL DISTRIBUTION
-license_1129_p=\ Commercial distributors of software may accept certain responsibilities with respect to end users, business partners and the like. While this license is intended to facilitate the commercial use of the Program, the Contributor who includes the Program in a commercial product offering should do so in a manner which does not create potential liability for other Contributors. Therefore, if a Contributor includes the Program in a commercial product offering, such Contributor ("Commercial Contributor") hereby agrees to defend and indemnify every other Contributor ("Indemnified Contributor") against any losses, damages and costs (collectively "Losses") arising from claims, lawsuits and other legal actions brought by a third party against the Indemnified Contributor to the extent caused by the acts or omissions of such Commercial Contributor in connection with its distribution of the Program in a commercial product offering. The obligations in this section do not apply to any claims or Losses relating to any actual or alleged intellectual property infringement. In order to qualify, an Indemnified Contributor must\: a) promptly notify the Commercial Contributor in writing of such claim, and b) allow the Commercial Contributor to control, and cooperate with the Commercial Contributor in, the defense and any related settlement negotiations. The Indemnified Contributor may participate in any such claim at its own expense.
-license_1130_p=\ For example, a Contributor might include the Program in a commercial product offering, Product X. That Contributor is then a Commercial Contributor. If that Commercial Contributor then makes performance claims, or offers warranties related to Product X, those performance claims and warranties are such Commercial Contributor's responsibility alone. Under this section, the Commercial Contributor would have to defend claims against the other Contributors related to those performance claims and warranties, and if a court requires any other Contributor to pay any damages as a result, the Commercial Contributor must pay those damages.
-license_1131_h3=5. NO WARRANTY
-license_1132_p=\ EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is solely responsible for determining the appropriateness of using and distributing the Program and assumes all risks associated with its exercise of rights under this Agreement, including but not limited to the risks and costs of program errors, compliance with applicable laws, damage to or loss of data, programs or equipment, and unavailability or interruption of operations.
-license_1133_h3=6. DISCLAIMER OF LIABILITY
-license_1134_p=\ EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
-license_1135_h3=7. GENERAL
-license_1136_p=\ If any provision of this Agreement is invalid or unenforceable under applicable law, it shall not affect the validity or enforceability of the remainder of the terms of this Agreement, and without further action by the parties hereto, such provision shall be reformed to the minimum extent necessary to make such provision valid and enforceable.
-license_1137_p=\ If Recipient institutes patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Program itself (excluding combinations of the Program with other software or hardware) infringes such Recipient's patent(s), then such Recipient's rights granted under Section 2(b) shall terminate as of the date such litigation is filed.
-license_1138_p=\ All Recipient's rights under this Agreement shall terminate if it fails to comply with any of the material terms or conditions of this Agreement and does not cure such failure in a reasonable period of time after becoming aware of such noncompliance. If all Recipient's rights under this Agreement terminate, Recipient agrees to cease use and distribution of the Program as soon as reasonably practicable. However, Recipient's obligations under this Agreement and any licenses granted by Recipient relating to the Program shall continue and survive.
-license_1139_p=\ Everyone is permitted to copy and distribute copies of this Agreement, but in order to avoid inconsistency the Agreement is copyrighted and may only be modified in the following manner. The Agreement Steward reserves the right to publish new versions (including revisions) of this Agreement from time to time. No one other than the Agreement Steward has the right to modify this Agreement. The Eclipse Foundation is the initial Agreement Steward. The Eclipse Foundation may assign the responsibility to serve as the Agreement Steward to a suitable separate entity. Each new version of the Agreement will be given a distinguishing version number. The Program (including Contributions) may always be distributed subject to the version of the Agreement under which it was received. In addition, after a new version of the Agreement is published, Contributor may elect to distribute the Program (including its Contributions) under the new version. Except as expressly stated in Sections 2(a) and 2(b) above, Recipient receives no rights or licenses to the intellectual property of any Contributor under this Agreement, whether expressly, by implication, estoppel or otherwise. All rights in the Program not expressly granted under this Agreement are reserved.
-license_1140_p=\ This Agreement is governed by the laws of the State of New York and the intellectual property laws of the United States of America. No party to this Agreement will bring a legal action under this Agreement more than one year after the cause of action arose. Each party waives its rights to a jury trial in any resulting litigation.
-license_1141_h2=Export Control Classification Number (ECCN)
-license_1142_p=\ As far as we know, the U.S. Export Control Classification Number (ECCN) for this software is 5D002
. However, for legal reasons, we can make no warranty that this information is correct. For details, see also the Apache Software Foundation Export Classifications page.
-links_1000_h1=Links
-links_1001_p=\ If you want to add a link, please send it to the support email address or post it to the group.
-links_1002_a=\ Commercial Support
-links_1003_a=\ Quotes
-links_1004_a=\ Books
-links_1005_a=\ Extensions
-links_1006_a=\ Blog Articles, Videos
-links_1007_a=\ Database Frontends / Tools
-links_1008_a=\ Products and Projects
-links_1009_h2=Commercial Support
-links_1010_a=Commercial support for H2 is available
-links_1011_p=\ from Steve McLeod (steve dot mcleod at gmail dot com). Please note he is not one of the main developers of H2. He describes himself as follows\:
-links_1012_li=I'm a long time user of H2, routinely working with H2 databases several gigabytes in size.
-links_1013_li=I'm the creator of popular commercial desktop software that uses H2.
-links_1014_li=I'm a certified Java developer (SCJP).
-links_1015_li=I have a decade and more of IT consulting experience with large and small clients in Australia, the UK, and Germany.
-links_1016_li=I'm based in Germany, and willing to travel within Europe. I can work remotely with teams in the USA and other locations."
-links_1017_h2=Quotes
-links_1018_a=\ Quote
-links_1019_p=\: "This is by far the easiest and fastest database that I have ever used. Originally the web application that I am working on is using SQL server. But, in less than 15 minutes I had H2 up and working with little recoding of the SQL. Thanks..... "
-links_1020_h2=Books
-links_1021_a=\ Seam In Action
-links_1022_h2=Extensions
-links_1023_a=\ Grails H2 Database Plugin
-links_1024_a=\ h2osgi\: OSGi for the H2 Database
-links_1025_a=\ H2Sharp\: ADO.NET interface for the H2 database engine
-links_1026_a=\ A spatial extension of the H2 database.
-links_1027_h2=Blog Articles, Videos
-links_1028_a=\ Youtube\: Minecraft 1.7.3 / How to install Bukkit Server with xAuth and H2
-links_1029_a=\ Analyzing CSVs with H2 in under 10 minutes (2009-12-07)
-links_1030_a=\ Efficient sorting and iteration on large databases (2009-06-15)
-links_1031_a=\ Porting Flexive to the H2 Database (2008-12-05)
-links_1032_a=\ H2 Database with GlassFish (2008-11-24)
-links_1033_a=\ H2 Database - Performance Tracing (2008-04-30)
-links_1034_a=\ Open Source Databases Comparison (2007-09-11)
-links_1035_a=\ The Codist\: The Open Source Frameworks I Use (2007-07-23)
-links_1036_a=\ The Codist\: SQL Injections\: How Not To Get Stuck (2007-05-08)
-links_1037_a=\ David Coldrick's Weblog\: New Version of H2 Database Released (2007-01-06)
-links_1038_a=\ The Codist\: Write Your Own Database, Again (2006-11-13)
-links_1039_h2=Project Pages
-links_1040_a=\ Ohloh
-links_1041_a=\ Freshmeat Project Page
-links_1042_a=\ Wikipedia
-links_1043_a=\ Java Source Net
-links_1044_a=\ Linux Package Manager
-links_1045_h2=Database Frontends / Tools
-links_1046_a=\ Dataflyer
-links_1047_p=\ A tool to browse databases and export data.
-links_1048_a=\ DB Solo
-links_1049_p=\ SQL query tool.
-links_1050_a=\ DbVisualizer
-links_1051_p=\ Database tool.
-links_1052_a=\ Execute Query
-links_1053_p=\ Database utility written in Java.
-links_1054_a=\ Flyway
-links_1055_p=\ The agile database migration framework for Java.
-links_1056_a=\ [fleXive]
-links_1057_p=\ JavaEE 5 open source framework for the development of complex and evolving (web-)applications.
-links_1058_a=\ JDBC Console
-links_1059_p=\ This small webapp gives an ability to execute SQL against datasources bound in container's JNDI. Based on H2 Console.
-links_1060_a=\ HenPlus
-links_1061_p=\ HenPlus is a SQL shell written in Java.
-links_1062_a=\ JDBC lint
-links_1063_p=\ Helps write correct and efficient code when using the JDBC API.
-links_1064_a=\ OpenOffice
-links_1065_p=\ Base is OpenOffice.org's database application. It provides access to relational data sources.
-links_1066_a=\ RazorSQL
-links_1067_p=\ An SQL query tool, database browser, SQL editor, and database administration tool.
-links_1068_a=\ SQL Developer
-links_1069_p=\ Universal Database Frontend.
-links_1070_a=\ SQL Workbench/J
-links_1071_p=\ Free DBMS-independent SQL tool.
-links_1072_a=\ SQuirreL SQL Client
-links_1073_p=\ Graphical tool to view the structure of a database, browse the data, issue SQL commands etc.
-links_1074_a=\ SQuirreL DB Copy Plugin
-links_1075_p=\ Tool to copy data from one database to another.
-links_1076_h2=Products and Projects
-links_1077_a=\ AccuProcess
-links_1078_p=\ Visual business process modeling and simulation software for business users.
-links_1079_a=\ Adeptia BPM
-links_1080_p=\ A Business Process Management (BPM) suite to quickly and easily automate business processes and workflows.
-links_1081_a=\ Adeptia Integration
-links_1082_p=\ Process-centric, services-based application integration suite.
-links_1083_a=\ Aejaks
-links_1084_p=\ A server-side scripting environment to build AJAX enabled web applications.
-links_1085_a=\ Axiom Stack
-links_1086_p=\ A web framework that let's you write dynamic web applications with Zen-like simplicity.
-links_1087_a=\ Apache Cayenne
-links_1088_p=\ Open source persistence framework providing object-relational mapping (ORM) and remoting services.
-links_1089_a=\ Apache Jackrabbit
-links_1090_p=\ Open source implementation of the Java Content Repository API (JCR).
-links_1091_a=\ Apache OpenJPA
-links_1092_p=\ Open source implementation of the Java Persistence API (JPA).
-links_1093_a=\ AppFuse
-links_1094_p=\ Helps building web applications.
-links_1095_a=\ BGBlitz
-links_1096_p=\ The Swiss army knife of Backgammon.
-links_1097_a=\ Bonita
-links_1098_p=\ Open source workflow solution for handing long-running, user-oriented processes providing out of the box workflow and business process management features.
-links_1099_a=\ Bookmarks Portlet
-links_1100_p=\ JSR 168 compliant bookmarks management portlet application.
-links_1101_a=\ Claros inTouch
-links_1102_p=\ Ajax communication suite with mail, addresses, notes, IM, and rss reader.
-links_1103_a=\ CrashPlan PRO Server
-links_1104_p=\ Easy and cross platform backup solution for business and service providers.
-links_1105_a=\ DataNucleus
-links_1106_p=\ Java persistent objects.
-links_1107_a=\ DbUnit
-links_1108_p=\ A JUnit extension (also usable with Ant) targeted for database-driven projects.
-links_1109_a=\ DiffKit
-links_1110_p=\ DiffKit is a tool for comparing two tables of data, field-by-field. DiffKit is like the Unix diff utility, but for tables instead of lines of text.
-links_1111_a=\ Dinamica Framework
-links_1112_p=\ Ajax/J2EE framework for RAD development (mainly oriented toward hispanic markets).
-links_1113_a=\ District Health Information Software 2 (DHIS)
-links_1114_p=\ The DHIS 2 is a tool for collection, validation, analysis, and presentation of aggregate statistical data, tailored (but not limited) to integrated health information management activities.
-links_1115_a=\ Ebean ORM Persistence Layer
-links_1116_p=\ Open source Java Object Relational Mapping tool.
-links_1117_a=\ Eclipse CDO
-links_1118_p=\ The CDO (Connected Data Objects) Model Repository is a distributed shared model framework for EMF models, and a fast server-based O/R mapping solution.
-links_1119_a=\ Fabric3
-links_1120_p=\ Fabric3 is a project implementing a federated service network based on the Service Component Architecture specification (http\://www.osoa.org).
-links_1121_a=\ FIT4Data
-links_1122_p=\ A testing framework for data management applications built on the Java implementation of FIT.
-links_1123_a=\ Flux
-links_1124_p=\ Java job scheduler, file transfer, workflow, and BPM.
-links_1125_a=\ GeoServer
-links_1126_p=\ GeoServer is a Java-based software server that allows users to view and edit geospatial data. Using open standards set forth by the Open Geospatial Consortium (OGC), GeoServer allows for great flexibility in map creation and data sharing.
-links_1127_a=\ GBIF Integrated Publishing Toolkit (IPT)
-links_1128_p=\ The GBIF IPT is an open source, Java based web application that connects and serves three types of biodiversity data\: taxon primary occurrence data, taxon checklists and general resource metadata.
-links_1129_a=\ GNU Gluco Control
-links_1130_p=\ Helps you to manage your diabetes.
-links_1131_a=\ Golden T Studios
-links_1132_p=\ Fun-to-play games with a simple interface.
-links_1133_a=\ GridGain
-links_1134_p=\ GridGain is easy to use Cloud Application Platform that enables development of highly scalable distributed Java and Scala applications that auto-scale on any grid or cloud infrastructure.
-links_1135_a=\ Group Session
-links_1136_p=\ Open source web groupware.
-links_1137_a=\ HA-JDBC
-links_1138_p=\ High-Availability JDBC\: A JDBC proxy that provides light-weight, transparent, fault tolerant clustering capability to any underlying JDBC driver.
-links_1139_a=\ Hibernate
-links_1140_p=\ Relational persistence for idiomatic Java (O-R mapping tool).
-links_1141_a=\ Hibicius
-links_1142_p=\ Online Banking Client for the HBCI protocol.
-links_1143_a=\ ImageMapper
-links_1144_p=\ ImageMapper frees users from having to use file browsers to view their images. They get fast access to images and easy cataloguing of them via a user friendly interface.
-links_1145_a=\ JAMWiki
-links_1146_p=\ Java-based Wiki engine.
-links_1147_a=\ Jaspa
-links_1148_p=\ Java Spatial. Jaspa potentially brings around 200 spatial functions.
-links_1149_a=\ Java Simon
-links_1150_p=\ Simple Monitoring API.
-links_1151_a=\ JBoss jBPM
-links_1152_p=\ A platform for executable process languages ranging from business process management (BPM) over workflow to service orchestration.
-links_1153_a=\ JBoss Jopr
-links_1154_p=\ An enterprise management solution for JBoss middleware projects and other application technologies.
-links_1155_a=\ JGeocoder
-links_1156_p=\ Free Java geocoder. Geocoding is the process of estimating a latitude and longitude for a given location.
-links_1157_a=\ JGrass
-links_1158_p=\ Java Geographic Resources Analysis Support System. Free, multi platform, open source GIS based on the GIS framework of uDig.
-links_1159_a=\ Jena
-links_1160_p=\ Java framework for building Semantic Web applications.
-links_1161_a=\ JMatter
-links_1162_p=\ Framework for constructing workgroup business applications based on the Naked Objects Architectural Pattern.
-links_1163_a=\ jOOQ (Java Object Oriented Querying)
-links_1164_p=\ jOOQ is a fluent API for typesafe SQL query construction and execution
-links_1165_a=\ Liftweb
-links_1166_p=\ A Scala-based, secure, developer friendly web framework.
-links_1167_a=\ LiquiBase
-links_1168_p=\ A tool to manage database changes and refactorings.
-links_1169_a=\ Luntbuild
-links_1170_p=\ Build automation and management tool.
-links_1171_a=\ localdb
-links_1172_p=\ A tool that locates the full file path of the folder containing the database files.
-links_1173_a=\ Magnolia
-links_1174_p=\ Microarray Data Management and Export System for PFGRC (Pathogen Functional Genomics Resource Center) Microarrays.
-links_1175_a=\ MiniConnectionPoolManager
-links_1176_p=\ A lightweight standalone JDBC connection pool manager.
-links_1177_a=\ Mr. Persister
-links_1178_p=\ Simple, small and fast object relational mapping.
-links_1179_a=\ Myna Application Server
-links_1180_p=\ Java web app that provides dynamic web content and Java libraries access from JavaScript.
-links_1181_a=\ MyTunesRss
-links_1182_p=\ MyTunesRSS lets you listen to your music wherever you are.
-links_1183_a=\ NCGC CurveFit
-links_1184_p=\ From\: NIH Chemical Genomics Center, National Institutes of Health, USA. An open source application in the life sciences research field. This application handles chemical structures and biological responses of thousands of compounds with the potential to handle million+ compounds. It utilizes an embedded H2 database to enable flexible query/retrieval of all data including advanced chemical substructure and similarity searching. The application highlights an automated curve fitting and classification algorithm that outperforms commercial packages in the field. Commercial alternatives are typically small desktop software that handle a few dose response curves at a time. A couple of commercial packages that do handle several thousand curves are very expensive tools (>60k USD) that require manual curation of analysis by the user; require a license to Oracle; lack advanced query/retrieval; and the ability to handle chemical structures.
-links_1185_a=\ Nuxeo
-links_1186_p=\ Standards-based, open source platform for building ECM applications.
-links_1187_a=\ nWire
-links_1188_p=\ Eclipse plug-in which expedites Java development. It's main purpose is to help developers find code quicker and easily understand how it relates to the rest of the application, thus, understand the application structure.
-links_1189_a=\ Ontology Works
-links_1190_p=\ This company provides semantic technologies including deductive information repositories (the Ontology Works Knowledge Servers), semantic information fusion and semantic federation of legacy databases, ontology-based domain modeling, and management of the distributed enterprise.
-links_1191_a=\ Ontoprise OntoBroker
-links_1192_p=\ SemanticWeb-Middleware. It supports all W3C Semantic Web recommendations\: OWL, RDF, RDFS, SPARQL, and F-Logic.
-links_1193_a=\ Open Anzo
-links_1194_p=\ Semantic Application Server.
-links_1195_a=\ OpenGroove
-links_1196_p=\ OpenGroove is a groupware program that allows users to synchronize data.
-links_1197_a=\ OpenSocial Development Environment (OSDE)
-links_1198_p=\ Development tool for OpenSocial application.
-links_1199_a=\ Orion
-links_1200_p=\ J2EE Application Server.
-links_1201_a=\ P5H2
-links_1202_p=\ A library for the Processing programming language and environment.
-links_1203_a=\ Phase-6
-links_1204_p=\ A computer based learning software.
-links_1205_a=\ Pickle
-links_1206_p=\ Pickle is a Java library containing classes for persistence, concurrency, and logging.
-links_1207_a=\ Piman
-links_1208_p=\ Water treatment projects data management.
-links_1209_a=\ PolePosition
-links_1210_p=\ Open source database benchmark.
-links_1211_a=\ Poormans
-links_1212_p=\ Very basic CMS running as a SWT application and generating static html pages.
-links_1213_a=\ Railo
-links_1214_p=\ Railo is an alternative engine for the Cold Fusion Markup Language, that compiles code programmed in CFML into Java bytecode and executes it on a servlet engine.
-links_1215_a=\ Razuna
-links_1216_p=\ Open source Digital Asset Management System with integrated Web Content Management.
-links_1217_a=\ RIFE
-links_1218_p=\ A full-stack web application framework with tools and APIs to implement most common web features.
-links_1219_a=\ Sava
-links_1220_p=\ Open-source web-based content management system.
-links_1221_a=\ Scriptella
-links_1222_p=\ ETL (Extract-Transform-Load) and script execution tool.
-links_1223_a=\ Sesar
-links_1224_p=\ Dependency Injection Container with Aspect Oriented Programming.
-links_1225_a=\ SemmleCode
-links_1226_p=\ Eclipse plugin to help you improve software quality.
-links_1227_a=\ SeQuaLite
-links_1228_p=\ A free, light-weight, java data access framework.
-links_1229_a=\ ShapeLogic
-links_1230_p=\ Toolkit for declarative programming, image processing and computer vision.
-links_1231_a=\ Shellbook
-links_1232_p=\ Desktop publishing application.
-links_1233_a=\ Signsoft intelliBO
-links_1234_p=\ Persistence middleware supporting the JDO specification.
-links_1235_a=\ SimpleORM
-links_1236_p=\ Simple Java Object Relational Mapping.
-links_1237_a=\ SymmetricDS
-links_1238_p=\ A web-enabled, database independent, data synchronization/replication software.
-links_1239_a=\ SmartFoxServer
-links_1240_p=\ Platform for developing multiuser applications and games with Macromedia Flash.
-links_1241_a=\ Social Bookmarks Friend Finder
-links_1242_p=\ A GUI application that allows you to find users with similar bookmarks to the user specified (for delicious.com).
-links_1243_a=\ sormula
-links_1244_p=\ Simple object relational mapping.
-links_1245_a=\ Springfuse
-links_1246_p=\ Code generation For Spring, Spring MVC & Hibernate.
-links_1247_a=\ SQLOrm
-links_1248_p=\ Java Object Relation Mapping.
-links_1249_a=\ StelsCSV and StelsXML
-links_1250_p=\ StelsCSV is a CSV JDBC type 4 driver that allows to perform SQL queries and other JDBC operations on text files. StelsXML is a XML JDBC type 4 driver that allows to perform SQL queries and other JDBC operations on XML files. Both use H2 as the SQL engine.
-links_1251_a=\ StorYBook
-links_1252_p=\ A summary-based tool for novelist and script writers. It helps to keep the overview over the various traces a story has.
-links_1253_a=\ StreamCruncher
-links_1254_p=\ Event (stream) processing kernel.
-links_1255_a=\ SUSE Manager, part of Linux Enterprise Server 11
-links_1256_p=\ The SUSE Manager eases the burden of compliance with regulatory requirements and corporate policies.
-links_1257_a=\ Tune Backup
-links_1258_p=\ Easy-to-use backup solution for your iTunes library.
-links_1259_a=\ weblica
-links_1260_p=\ Desktop CMS.
-links_1261_a=\ Web of Web
-links_1262_p=\ Collaborative and realtime interactive media platform for the web.
-links_1263_a=\ Werkzeugkasten
-links_1264_p=\ Minimum Java Toolset.
-links_1265_a=\ VPDA
-links_1266_p=\ View providers driven applications is a Java based application framework for building applications composed from server components - view providers.
-links_1267_a=\ Volunteer database
-links_1268_p=\ A database front end to register volunteers, partnership and donation for a Non Profit organization.
-mainWeb_1000_h1=H2 Database Engine
-mainWeb_1001_p=\ Welcome to H2, the Java SQL database. The main features of H2 are\:
-mainWeb_1002_li=Very fast, open source, JDBC API
-mainWeb_1003_li=Embedded and server modes; in-memory databases
-mainWeb_1004_li=Browser based Console application
-mainWeb_1005_li=Small footprint\: around 1.5 MB jar file size
-mainWeb_1006_h2=Download
-mainWeb_1007_td=\ Version 1.4.187 (2015-04-10), Beta
-mainWeb_1008_a=Windows Installer (5 MB)
-mainWeb_1009_a=All Platforms (zip, 8 MB)
-mainWeb_1010_a=All Downloads
-mainWeb_1011_td=
-mainWeb_1012_h2=Support
-mainWeb_1013_a=Stack Overflow (tag H2)
-mainWeb_1014_a=Google Group English
-mainWeb_1015_p=, Japanese
-mainWeb_1016_p=\ For non-technical issues, use\:
-mainWeb_1017_h2=Features
-mainWeb_1018_th=H2
-mainWeb_1019_a=Derby
-mainWeb_1020_a=HSQLDB
-mainWeb_1021_a=MySQL
-mainWeb_1022_a=PostgreSQL
-mainWeb_1023_td=Pure Java
-mainWeb_1024_td=Yes
-mainWeb_1025_td=Yes
-mainWeb_1026_td=Yes
-mainWeb_1027_td=No
-mainWeb_1028_td=No
-mainWeb_1029_td=Memory Mode
-mainWeb_1030_td=Yes
-mainWeb_1031_td=Yes
-mainWeb_1032_td=Yes
-mainWeb_1033_td=No
-mainWeb_1034_td=No
-mainWeb_1035_td=Encrypted Database
-mainWeb_1036_td=Yes
-mainWeb_1037_td=Yes
-mainWeb_1038_td=Yes
-mainWeb_1039_td=No
-mainWeb_1040_td=No
-mainWeb_1041_td=ODBC Driver
-mainWeb_1042_td=Yes
-mainWeb_1043_td=No
-mainWeb_1044_td=No
-mainWeb_1045_td=Yes
-mainWeb_1046_td=Yes
-mainWeb_1047_td=Fulltext Search
-mainWeb_1048_td=Yes
-mainWeb_1049_td=No
-mainWeb_1050_td=No
-mainWeb_1051_td=Yes
-mainWeb_1052_td=Yes
-mainWeb_1053_td=Multi Version Concurrency
-mainWeb_1054_td=Yes
-mainWeb_1055_td=No
-mainWeb_1056_td=Yes
-mainWeb_1057_td=Yes
-mainWeb_1058_td=Yes
-mainWeb_1059_td=Footprint (jar/dll size)
-mainWeb_1060_td=~1 MB
-mainWeb_1061_td=~2 MB
-mainWeb_1062_td=~1 MB
-mainWeb_1063_td=~4 MB
-mainWeb_1064_td=~6 MB
-mainWeb_1065_p=\ See also the detailed comparison.
-mainWeb_1066_h2=News
-mainWeb_1067_b=Newsfeeds\:
-mainWeb_1068_a=Full text (Atom)
-mainWeb_1069_p=\ or Header only (RSS).
-mainWeb_1070_b=Email Newsletter\:
-mainWeb_1071_p=\ Subscribe to H2 Database News (Google account required) to get informed about new releases. Your email address is only used in this context.
-mainWeb_1072_td=
-mainWeb_1073_h2=Contribute
-mainWeb_1074_p=\ You can contribute to the development of H2 by sending feedback and bug reports, or translate the H2 Console application (for details, start the H2 Console and select Options / Translate). To donate money, click on the PayPal button below. You will be listed as a supporter\:
-main_1000_h1=H2 Database Engine
-main_1001_p=\ Welcome to H2, the free Java SQL database engine.
-main_1002_a=Quickstart
-main_1003_p=\ Get a fast overview.
-main_1004_a=Tutorial
-main_1005_p=\ Go through the samples.
-main_1006_a=Features
-main_1007_p=\ See what this database can do and how to use these features.
-mvstore_1000_h1=MVStore
-mvstore_1001_a=\ Overview
-mvstore_1002_a=\ Example Code
-mvstore_1003_a=\ Store Builder
-mvstore_1004_a=\ R-Tree
-mvstore_1005_a=\ Features
-mvstore_1006_a=- Maps
-mvstore_1007_a=- Versions
-mvstore_1008_a=- Transactions
-mvstore_1009_a=- In-Memory Performance and Usage
-mvstore_1010_a=- Pluggable Data Types
-mvstore_1011_a=- BLOB Support
-mvstore_1012_a=- R-Tree and Pluggable Map Implementations
-mvstore_1013_a=- Concurrent Operations and Caching
-mvstore_1014_a=- Log Structured Storage
-mvstore_1015_a=- Off-Heap and Pluggable Storage
-mvstore_1016_a=- File System Abstraction, File Locking and Online Backup
-mvstore_1017_a=- Encrypted Files
-mvstore_1018_a=- Tools
-mvstore_1019_a=- Exception Handling
-mvstore_1020_a=- Storage Engine for H2
-mvstore_1021_a=\ File Format
-mvstore_1022_a=\ Similar Projects and Differences to Other Storage Engines
-mvstore_1023_a=\ Current State
-mvstore_1024_a=\ Requirements
-mvstore_1025_h2=Overview
-mvstore_1026_p=\ The MVStore is a persistent, log structured key-value store. It is planned to be the next storage subsystem of H2, but it can also be used directly within an application, without using JDBC or SQL.
-mvstore_1027_li=MVStore stands for "multi-version store".
-mvstore_1028_li=Each store contains a number of maps that can be accessed using the java.util.Map
interface.
-mvstore_1029_li=Both file-based persistence and in-memory operation are supported.
-mvstore_1030_li=It is intended to be fast, simple to use, and small.
-mvstore_1031_li=Concurrent read and write operations are supported.
-mvstore_1032_li=Transactions are supported (including concurrent transactions and 2-phase commit).
-mvstore_1033_li=The tool is very modular. It supports pluggable data types and serialization, pluggable storage (to a file, to off-heap memory), pluggable map implementations (B-tree, R-tree, concurrent B-tree currently), BLOB storage, and a file system abstraction to support encrypted files and zip files.
-mvstore_1034_h2=Example Code
-mvstore_1035_p=\ The following sample code shows how to use the tool\:
-mvstore_1036_h2=Store Builder
-mvstore_1037_p=\ The MVStore.Builder
provides a fluid interface to build a store if configuration options are needed. Example usage\:
-mvstore_1038_p=\ The list of available options is\:
-mvstore_1039_li=autoCommitBufferSize\: the size of the write buffer.
-mvstore_1040_li=autoCommitDisabled\: to disable auto-commit.
-mvstore_1041_li=backgroundExceptionHandler\: a handler for exceptions that could occur while writing in the background.
-mvstore_1042_li=cacheSize\: the cache size in MB.
-mvstore_1043_li=compress\: compress the data when storing using a fast algorithm (LZF).
-mvstore_1044_li=compressHigh\: compress the data when storing using a slower algorithm (Deflate).
-mvstore_1045_li=encryptionKey\: the key for file encryption.
-mvstore_1046_li=fileName\: the name of the file, for file based stores.
-mvstore_1047_li=fileStore\: the storage implementation to use.
-mvstore_1048_li=pageSplitSize\: the point where pages are split.
-mvstore_1049_li=readOnly\: open the file in read-only mode.
-mvstore_1050_h2=R-Tree
-mvstore_1051_p=\ The MVRTreeMap
is an R-tree implementation that supports fast spatial queries. It can be used as follows\:
-mvstore_1052_p=\ The default number of dimensions is 2. To use a different number of dimensions, call new MVRTreeMap.Builder<String>().dimensions(3)
. The minimum number of dimensions is 1, the maximum is 32.
-mvstore_1053_h2=Features
-mvstore_1054_h3=Maps
-mvstore_1055_p=\ Each store contains a set of named maps. A map is sorted by key, and supports the common lookup operations, including access to the first and last key, iterate over some or all keys, and so on.
-mvstore_1056_p=\ Also supported, and very uncommon for maps, is fast index lookup\: the entries of the map can be be efficiently accessed like a random-access list (get the entry at the given index), and the index of a key can be calculated efficiently. That also means getting the median of two keys is very fast, and a range of keys can be counted very quickly. The iterator supports fast skipping. This is possible because internally, each map is organized in the form of a counted B+-tree.
-mvstore_1057_p=\ In database terms, a map can be used like a table, where the key of the map is the primary key of the table, and the value is the row. A map can also represent an index, where the key of the map is the key of the index, and the value of the map is the primary key of the table (for non-unique indexes, the key of the map must also contain the primary key).
-mvstore_1058_h3=Versions
-mvstore_1059_p=\ A version is a snapshot of all the data of all maps at a given point in time. Creating a snapshot is fast\: only those pages that are changed after a snapshot are copied. This behavior is also called COW (copy on write). Old versions are readable. Rollback to an old version is supported.
-mvstore_1060_p=\ The following sample code show how to create a store, open a map, add some data, and access the current and an old version\:
-mvstore_1061_h3=Transactions
-mvstore_1062_p=\ To support multiple concurrent open transactions, a transaction utility is included, the TransactionStore
. The tool supports PostgreSQL style "read committed" transaction isolation with savepoints, two-phase commit, and other features typically available in a database. There is no limit on the size of a transaction (the log is written to disk for large or long running transactions).
-mvstore_1063_p=\ Internally, this utility stores the old versions of changed entries in a separate map, similar to a transaction log, except that entries of a closed transaction are removed, and the log is usually not stored for short transactions. For common use cases, the storage overhead of this utility is very small compared to the overhead of a regular transaction log.
-mvstore_1064_h3=In-Memory Performance and Usage
-mvstore_1065_p=\ Performance of in-memory operations is about 50% slower than java.util.TreeMap
.
-mvstore_1066_p=\ The memory overhead for large maps is slightly better than for the regular map implementations, but there is a higher overhead per map. For maps with less than about 25 entries, the regular map implementations need less memory.
-mvstore_1067_p=\ If no file name is specified, the store operates purely in memory. Except for persisting data, all features are supported in this mode (multi-versioning, index lookup, R-tree and so on). If a file name is specified, all operations occur in memory (with the same performance characteristics) until data is persisted.
-mvstore_1068_p=\ As in all map implementations, keys need to be immutable, that means changing the key object after an entry has been added is not allowed. If a file name is specified, the value may also not be changed after adding an entry, because it might be serialized (which could happen at any time when autocommit is enabled).
-mvstore_1069_h3=Pluggable Data Types
-mvstore_1070_p=\ Serialization is pluggable. The default serialization currently supports many common data types, and uses Java serialization for other objects. The following classes are currently directly supported\: Boolean, Byte, Short, Character, Integer, Long, Float, Double, BigInteger, BigDecimal, String, UUID, Date
and arrays (both primitive arrays and object arrays). For serialized objects, the size estimate is adjusted using an exponential moving average.
-mvstore_1071_p=\ Parameterized data types are supported (for example one could build a string data type that limits the length).
-mvstore_1072_p=\ The storage engine itself does not have any length limits, so that keys, values, pages, and chunks can be very big (as big as fits in memory). Also, there is no inherent limit to the number of maps and chunks. Due to using a log structured storage, there is no special case handling for large keys or pages.
-mvstore_1073_h3=BLOB Support
-mvstore_1074_p=\ There is a mechanism that stores large binary objects by splitting them into smaller blocks. This allows to store objects that don't fit in memory. Streaming as well as random access reads on such objects are supported. This tool is written on top of the store, using only the map interface.
-mvstore_1075_h3=R-Tree and Pluggable Map Implementations
-mvstore_1076_p=\ The map implementation is pluggable. In addition to the default MVMap
(multi-version map), there is a map that supports concurrent write operations, and a multi-version R-tree map implementation for spatial operations.
-mvstore_1077_h3=Concurrent Operations and Caching
-mvstore_1078_p=\ Concurrent reads and writes are supported. All such read operations can occur in parallel. Concurrent reads from the page cache, as well as concurrent reads from the file system are supported. Write operations first read the relevant pages from disk to memory (this can happen concurrently), and only then modify the data. The in-memory parts of write operations are synchronized. Writing changes to the file can occur concurrently to modifying the data, as writing operates on a snapshot.
-mvstore_1079_p=\ Caching is done on the page level. The page cache is a concurrent LIRS cache, which should be resistant against scan operations.
-mvstore_1080_p=\ For fully scalable concurrent write operations to a map (in-memory and to disk), the map could be split into multiple maps in different stores ('sharding'). The plan is to add such a mechanism later when needed.
-mvstore_1081_h3=Log Structured Storage
-mvstore_1082_p=\ Internally, changes are buffered in memory, and once enough changes have accumulated, they are written in one continuous disk write operation. Compared to traditional database storage engines, this should improve write performance for file systems and storage systems that do not efficiently support small random writes, such as Btrfs, as well as SSDs. (According to a test, write throughput of a common SSD increases with write block size, until a block size of 2 MB, and then does not further increase.) By default, changes are automatically written when more than a number of pages are modified, and once every second in a background thread, even if only little data was changed. Changes can also be written explicitly by calling commit()
.
-mvstore_1083_p=\ When storing, all changed pages are serialized, optionally compressed using the LZF algorithm, and written sequentially to a free area of the file. Each such change set is called a chunk. All parent pages of the changed B-trees are stored in this chunk as well, so that each chunk also contains the root of each changed map (which is the entry point for reading this version of the data). There is no separate index\: all data is stored as a list of pages. Per store, there is one additional map that contains the metadata (the list of maps, where the root page of each map is stored, and the list of chunks).
-mvstore_1084_p=\ There are usually two write operations per chunk\: one to store the chunk data (the pages), and one to update the file header (so it points to the latest chunk). If the chunk is appended at the end of the file, the file header is only written at the end of the chunk. There is no transaction log, no undo log, and there are no in-place updates (however, unused chunks are overwritten by default).
-mvstore_1085_p=\ Old data is kept for at least 45 seconds (configurable), so that there are no explicit sync operations required to guarantee data consistency. An application can also sync explicitly when needed. To reuse disk space, the chunks with the lowest amount of live data are compacted (the live data is stored again in the next chunk). To improve data locality and disk space usage, the plan is to automatically defragment and compact data.
-mvstore_1086_p=\ Compared to traditional storage engines (that use a transaction log, undo log, and main storage area), the log structured storage is simpler, more flexible, and typically needs less disk operations per change, as data is only written once instead of twice or 3 times, and because the B-tree pages are always full (they are stored next to each other) and can be easily compressed. But temporarily, disk space usage might actually be a bit higher than for a regular database, as disk space is not immediately re-used (there are no in-place updates).
-mvstore_1087_h3=Off-Heap and Pluggable Storage
-mvstore_1088_p=\ Storage is pluggable. Unless pure in-memory operation is used, the default storage is to a single file.
-mvstore_1089_p=\ An off-heap storage implementation is available. This storage keeps the data in the off-heap memory, meaning outside of the regular garbage collected heap. This allows to use very large in-memory stores without having to increase the JVM heap, which would increase Java garbage collection pauses a lot. Memory is allocated using ByteBuffer.allocateDirect
. One chunk is allocated at a time (each chunk is usually a few MB large), so that allocation cost is low. To use the off-heap storage, call\:
-mvstore_1090_h3=File System Abstraction, File Locking and Online Backup
-mvstore_1091_p=\ The file system is pluggable. The same file system abstraction is used as H2 uses. The file can be encrypted using a encrypting file system wrapper. Other file system implementations support reading from a compressed zip or jar file. The file system abstraction closely matches the Java 7 file system API.
-mvstore_1092_p=\ Each store may only be opened once within a JVM. When opening a store, the file is locked in exclusive mode, so that the file can only be changed from within one process. Files can be opened in read-only mode, in which case a shared lock is used.
-mvstore_1093_p=\ The persisted data can be backed up at any time, even during write operations (online backup). To do that, automatic disk space reuse needs to be first disabled, so that new data is always appended at the end of the file. Then, the file can be copied. The file handle is available to the application. It is recommended to use the utility class FileChannelInputStream
to do this. For encrypted databases, both the encrypted (raw) file content, as well as the clear text content, can be backed up.
-mvstore_1094_h3=Encrypted Files
-mvstore_1095_p=\ File encryption ensures the data can only be read with the correct password. Data can be encrypted as follows\:
-mvstore_1096_p=\ The following algorithms and settings are used\:
-mvstore_1097_li=The password char array is cleared after use, to reduce the risk that the password is stolen even if the attacker has access to the main memory.
-mvstore_1098_li=The password is hashed according to the PBKDF2 standard, using the SHA-256 hash algorithm.
-mvstore_1099_li=The length of the salt is 64 bits, so that an attacker can not use a pre-calculated password hash table (rainbow table). It is generated using a cryptographically secure random number generator.
-mvstore_1100_li=To speed up opening an encrypted stores on Android, the number of PBKDF2 iterations is 10. The higher the value, the better the protection against brute-force password cracking attacks, but the slower is opening a file.
-mvstore_1101_li=The file itself is encrypted using the standardized disk encryption mode XTS-AES. Only little more than one AES-128 round per block is needed.
-mvstore_1102_h3=Tools
-mvstore_1103_p=\ There is a tool, the MVStoreTool
, to dump the contents of a file.
-mvstore_1104_h3=Exception Handling
-mvstore_1105_p=\ This tool does not throw checked exceptions. Instead, unchecked exceptions are thrown if needed. The error message always contains the version of the tool. The following exceptions can occur\:
-mvstore_1106_code=IllegalStateException
-mvstore_1107_li=\ if a map was already closed or an IO exception occurred, for example if the file was locked, is already closed, could not be opened or closed, if reading or writing failed, if the file is corrupt, or if there is an internal error in the tool. For such exceptions, an error code is added so that the application can distinguish between different error cases.
-mvstore_1108_code=IllegalArgumentException
-mvstore_1109_li=\ if a method was called with an illegal argument.
-mvstore_1110_code=UnsupportedOperationException
-mvstore_1111_li=\ if a method was called that is not supported, for example trying to modify a read-only map.
-mvstore_1112_code=ConcurrentModificationException
-mvstore_1113_li=\ if a map is modified concurrently.
-mvstore_1114_h3=Storage Engine for H2
-mvstore_1115_p=\ For H2 version 1.4 and newer, the MVStore is the default storage engine (supporting SQL, JDBC, transactions, MVCC, and so on). For older versions, append ;MV_STORE\=TRUE
to the database URL. Even though it can be used with the default table level locking, by default the MVCC mode is enabled when using the MVStore.
-mvstore_1116_h2=File Format
-mvstore_1117_p=\ The data is stored in one file. The file contains two file headers (for safety), and a number of chunks. The file headers are one block each; a block is 4096 bytes. Each chunk is at least one block, but typically 200 blocks or more. Data is stored in the chunks in the form of a log structured storage. There is one chunk for every version.
-mvstore_1118_p=\ Each chunk contains a number of B-tree pages. As an example, the following code\:
-mvstore_1119_p=\ will result in the following two chunks (excluding metadata)\:
-mvstore_1120_b=Chunk 1\:
-mvstore_1121_p=\ - Page 1\: (root) node with 2 entries pointing to page 2 and 3
-mvstore_1122_p=\ - Page 2\: leaf with 140 entries (keys 0 - 139)
-mvstore_1123_p=\ - Page 3\: leaf with 260 entries (keys 140 - 399)
-mvstore_1124_b=Chunk 2\:
-mvstore_1125_p=\ - Page 4\: (root) node with 2 entries pointing to page 3 and 5
-mvstore_1126_p=\ - Page 5\: leaf with 140 entries (keys 0 - 139)
-mvstore_1127_p=\ That means each chunk contains the changes of one version\: the new version of the changed pages and the parent pages, recursively, up to the root page. Pages in subsequent chunks refer to pages in earlier chunks.
-mvstore_1128_h3=File Header
-mvstore_1129_p=\ There are two file headers, which normally contain the exact same data. But once in a while, the file headers are updated, and writing could partially fail, which could corrupt a header. That's why there is a second header. Only the file headers are updated in this way (called "in-place update"). The headers contain the following data\:
-mvstore_1130_p=\ The data is stored in the form of a key-value pair. Each value is stored as a hexadecimal number. The entries are\:
-mvstore_1131_li=H\: The entry "H\:2" stands for the the H2 database.
-mvstore_1132_li=block\: The block number where one of the newest chunks starts (but not necessarily the newest).
-mvstore_1133_li=blockSize\: The block size of the file; currently always hex 1000, which is decimal 4096, to match the disk sector length of modern hard disks.
-mvstore_1134_li=chunk\: The chunk id, which is normally the same value as the version; however, the chunk id might roll over to 0, while the version doesn't.
-mvstore_1135_li=created\: The number of milliseconds since 1970 when the file was created.
-mvstore_1136_li=format\: The file format number. Currently 1.
-mvstore_1137_li=version\: The version number of the chunk.
-mvstore_1138_li=fletcher\: The Fletcher-32 checksum of the header.
-mvstore_1139_p=\ When opening the file, both headers are read and the checksum is verified. If both headers are valid, the one with the newer version is used. The chunk with the latest version is then detected (details about this see below), and the rest of the metadata is read from there. If the chunk id, block and version are not stored in the file header, then the latest chunk lookup starts with the last chunk in the file.
-mvstore_1140_h3=Chunk Format
-mvstore_1141_p=\ There is one chunk per version. Each chunk consists of a header, the pages that were modified in this version, and a footer. The pages contain the actual data of the maps. The pages inside a chunk are stored right after the header, next to each other (unaligned). The size of a chunk is a multiple of the block size. The footer is stored in the last 128 bytes of the chunk.
-mvstore_1142_p=\ The footer allows to verify that the chunk is completely written (a chunk is written as one write operation), and allows to find the start position of the very last chunk in the file. The chunk header and footer contain the following data\:
-mvstore_1143_p=\ The fields of the chunk header and footer are\:
-mvstore_1144_li=chunk\: The chunk id.
-mvstore_1145_li=block\: The first block of the chunk (multiply by the block size to get the position in the file).
-mvstore_1146_li=len\: The size of the chunk in number of blocks.
-mvstore_1147_li=map\: The id of the newest map; incremented when a new map is created.
-mvstore_1148_li=max\: The sum of all maximum page sizes (see page format).
-mvstore_1149_li=next\: The predicted start block of the next chunk.
-mvstore_1150_li=pages\: The number of pages in the chunk.
-mvstore_1151_li=root\: The position of the metadata root page (see page format).
-mvstore_1152_li=time\: The time the chunk was written, in milliseconds after the file was created.
-mvstore_1153_li=version\: The version this chunk represents.
-mvstore_1154_li=fletcher\: The checksum of the footer.
-mvstore_1155_p=\ Chunks are never updated in-place. Each chunk contains the pages that were changed in that version (there is one chunk per version, see above), plus all the parent nodes of those pages, recursively, up to the root page. If an entry in a map is changed, removed, or added, then the respective page is copied, modified, and stored in the next chunk, and the number of live pages in the old chunk is decremented. This mechanism is called copy-on-write, and is similar to how the Btrfs file system works. Chunks without live pages are marked as free, so the space can be re-used by more recent chunks. Because not all chunks are of the same size, there can be a number of free blocks in front of a chunk for some time (until a small chunk is written or the chunks are compacted). There is a delay of 45 seconds (by default) before a free chunk is overwritten, to ensure new versions are persisted first.
-mvstore_1156_p=\ How the newest chunk is located when opening a store\: The file header contains the position of a recent chunk, but not always the newest one. This is to reduce the number of file header updates. After opening the file, the file headers, and the chunk footer of the very last chunk (at the end of the file) are read. From those candidates, the header of the most recent chunk is read. If it contains a "next" pointer (see above), those chunk's header and footer are read as well. If it turned out to be a newer valid chunk, this is repeated, until the newest chunk was found. Before writing a chunk, the position of the next chunk is predicted based on the assumption that the next chunk will be of the same size as the current one. When the next chunk is written, and the previous prediction turned out to be incorrect, the file header is updated as well. In any case, the file header is updated if the next chain gets longer than 20 hops.
-mvstore_1157_h3=Page Format
-mvstore_1158_p=\ Each map is a B-tree, and the map data is stored in (B-tree-) pages. There are leaf pages that contain the key-value pairs of the map, and internal nodes, which only contain keys and pointers to leaf pages. The root of a tree is either a leaf or an internal node. Unlike file header and chunk header and footer, the page data is not human readable. Instead, it is stored as byte arrays, with long (8 bytes), int (4 bytes), short (2 bytes), and variable size int and long (1 to 5 / 10 bytes). The page format is\:
-mvstore_1159_li=length (int)\: Length of the page in bytes.
-mvstore_1160_li=checksum (short)\: Checksum (chunk id xor offset within the chunk xor page length).
-mvstore_1161_li=mapId (variable size int)\: The id of the map this page belongs to.
-mvstore_1162_li=len (variable size int)\: The number of keys in the page.
-mvstore_1163_li=type (byte)\: The page type (0 for leaf page, 1 for internal node; plus 2 if the keys and values are compressed with the LZF algorithm, or plus 6 if the keys and values are compressed with the Deflate algorithm).
-mvstore_1164_li=children (array of long; internal nodes only)\: The position of the children.
-mvstore_1165_li=childCounts (array of variable size long; internal nodes only)\: The total number of entries for the given child page.
-mvstore_1166_li=keys (byte array)\: All keys, stored depending on the data type.
-mvstore_1167_li=values (byte array; leaf pages only)\: All values, stored depending on the data type.
-mvstore_1168_p=\ Even though this is not required by the file format, pages are stored in the following order\: For each map, the root page is stored first, then the internal nodes (if there are any), and then the leaf pages. This should speed up reads for media where sequential reads are faster than random access reads. The metadata map is stored at the end of a chunk.
-mvstore_1169_p=\ Pointers to pages are stored as a long, using a special format\: 26 bits for the chunk id, 32 bits for the offset within the chunk, 5 bits for the length code, 1 bit for the page type (leaf or internal node). The page type is encoded so that when clearing or removing a map, leaf pages don't have to be read (internal nodes do have to be read in order to know where all the pages are; but in a typical B-tree the vast majority of the pages are leaf pages). The absolute file position is not included so that chunks can be moved within the file without having to change page pointers; only the chunk metadata needs to be changed. The length code is a number from 0 to 31, where 0 means the maximum length of the page is 32 bytes, 1 means 48 bytes, 2\: 64, 3\: 96, 4\: 128, 5\: 192, and so on until 31 which means longer than 1 MB. That way, reading a page only requires one read operation (except for very large pages). The sum of the maximum length of all pages is stored in the chunk metadata (field "max"), and when a page is marked as removed, the live maximum length is adjusted. This allows to estimate the amount of free space within a block, in addition to the number of free pages.
-mvstore_1170_p=\ The total number of entries in child pages are kept to allow efficient range counting, lookup by index, and skip operations. The pages form a counted B-tree.
-mvstore_1171_p=\ Data compression\: The data after the page type are optionally compressed using the LZF algorithm.
-mvstore_1172_h3=Metadata Map
-mvstore_1173_p=\ In addition to the user maps, there is one metadata map that contains names and positions of user maps, and chunk metadata. The very last page of a chunk contains the root page of that metadata map. The exact position of this root page is stored in the chunk header. This page (directly or indirectly) points to the root pages of all other maps. The metadata map of a store with a map named "data", and one chunk, contains the following entries\:
-mvstore_1174_li=chunk.1\: The metadata of chunk 1. This is the same data as the chunk header, plus the number of live pages, and the maximum live length.
-mvstore_1175_li=map.1\: The metadata of map 1. The entries are\: name, createVersion, and type.
-mvstore_1176_li=name.data\: The map id of the map named "data". The value is "1".
-mvstore_1177_li=root.1\: The root position of map 1.
-mvstore_1178_li=setting.storeVersion\: The store version (a user defined value).
-mvstore_1179_h2=Similar Projects and Differences to Other Storage Engines
-mvstore_1180_p=\ Unlike similar storage engines like LevelDB and Kyoto Cabinet, the MVStore is written in Java and can easily be embedded in a Java and Android application.
-mvstore_1181_p=\ The MVStore is somewhat similar to the Berkeley DB Java Edition because it is also written in Java, and is also a log structured storage, but the H2 license is more liberal.
-mvstore_1182_p=\ Like SQLite 3, the MVStore keeps all data in one file. Unlike SQLite 3, the MVStore uses is a log structured storage. The plan is to make the MVStore both easier to use as well as faster than SQLite 3. In a recent (very simple) test, the MVStore was about twice as fast as SQLite 3 on Android.
-mvstore_1183_p=\ The API of the MVStore is similar to MapDB (previously known as JDBM) from Jan Kotek, and some code is shared between MVStore and MapDB. However, unlike MapDB, the MVStore uses is a log structured storage. The MVStore does not have a record size limit.
-mvstore_1184_h2=Current State
-mvstore_1185_p=\ The code is still experimental at this stage. The API as well as the behavior may partially change. Features may be added and removed (even though the main features will stay).
-mvstore_1186_h2=Requirements
-mvstore_1187_p=\ The MVStore is included in the latest H2 jar file.
-mvstore_1188_p=\ There are no special requirements to use it. The MVStore should run on any JVM as well as on Android.
-mvstore_1189_p=\ To build just the MVStore (without the database engine), run\:
-mvstore_1190_p=\ This will create the file bin/h2mvstore-1.4.187.jar
(about 200 KB).
-performance_1000_h1=Performance
-performance_1001_a=\ Performance Comparison
-performance_1002_a=\ PolePosition Benchmark
-performance_1003_a=\ Database Performance Tuning
-performance_1004_a=\ Using the Built-In Profiler
-performance_1005_a=\ Application Profiling
-performance_1006_a=\ Database Profiling
-performance_1007_a=\ Statement Execution Plans
-performance_1008_a=\ How Data is Stored and How Indexes Work
-performance_1009_a=\ Fast Database Import
-performance_1010_h2=Performance Comparison
-performance_1011_p=\ In many cases H2 is faster than other (open source and not open source) database engines. Please note this is mostly a single connection benchmark run on one computer, with many very simple operations running against the database. This benchmark does not include very complex queries. The embedded mode of H2 is faster than the client-server mode because the per-statement overhead is greatly reduced.
-performance_1012_h3=Embedded
-performance_1013_th=Test Case
-performance_1014_th=Unit
-performance_1015_th=H2
-performance_1016_th=HSQLDB
-performance_1017_th=Derby
-performance_1018_td=Simple\: Init
-performance_1019_td=ms
-performance_1020_td=1019
-performance_1021_td=1907
-performance_1022_td=8280
-performance_1023_td=Simple\: Query (random)
-performance_1024_td=ms
-performance_1025_td=1304
-performance_1026_td=873
-performance_1027_td=1912
-performance_1028_td=Simple\: Query (sequential)
-performance_1029_td=ms
-performance_1030_td=835
-performance_1031_td=1839
-performance_1032_td=5415
-performance_1033_td=Simple\: Update (sequential)
-performance_1034_td=ms
-performance_1035_td=961
-performance_1036_td=2333
-performance_1037_td=21759
-performance_1038_td=Simple\: Delete (sequential)
-performance_1039_td=ms
-performance_1040_td=950
-performance_1041_td=1922
-performance_1042_td=32016
-performance_1043_td=Simple\: Memory Usage
-performance_1044_td=MB
-performance_1045_td=21
-performance_1046_td=10
-performance_1047_td=8
-performance_1048_td=BenchA\: Init
-performance_1049_td=ms
-performance_1050_td=919
-performance_1051_td=2133
-performance_1052_td=7528
-performance_1053_td=BenchA\: Transactions
-performance_1054_td=ms
-performance_1055_td=1219
-performance_1056_td=2297
-performance_1057_td=8541
-performance_1058_td=BenchA\: Memory Usage
-performance_1059_td=MB
-performance_1060_td=12
-performance_1061_td=15
-performance_1062_td=7
-performance_1063_td=BenchB\: Init
-performance_1064_td=ms
-performance_1065_td=905
-performance_1066_td=1993
-performance_1067_td=8049
-performance_1068_td=BenchB\: Transactions
-performance_1069_td=ms
-performance_1070_td=1091
-performance_1071_td=583
-performance_1072_td=1165
-performance_1073_td=BenchB\: Memory Usage
-performance_1074_td=MB
-performance_1075_td=17
-performance_1076_td=11
-performance_1077_td=8
-performance_1078_td=BenchC\: Init
-performance_1079_td=ms
-performance_1080_td=2491
-performance_1081_td=4003
-performance_1082_td=8064
-performance_1083_td=BenchC\: Transactions
-performance_1084_td=ms
-performance_1085_td=1979
-performance_1086_td=803
-performance_1087_td=2840
-performance_1088_td=BenchC\: Memory Usage
-performance_1089_td=MB
-performance_1090_td=19
-performance_1091_td=22
-performance_1092_td=9
-performance_1093_td=Executed statements
-performance_1094_td=\#
-performance_1095_td=1930995
-performance_1096_td=1930995
-performance_1097_td=1930995
-performance_1098_td=Total time
-performance_1099_td=ms
-performance_1100_td=13673
-performance_1101_td=20686
-performance_1102_td=105569
-performance_1103_td=Statements per second
-performance_1104_td=\#
-performance_1105_td=141226
-performance_1106_td=93347
-performance_1107_td=18291
-performance_1108_h3=Client-Server
-performance_1109_th=Test Case
-performance_1110_th=Unit
-performance_1111_th=H2 (Server)
-performance_1112_th=HSQLDB
-performance_1113_th=Derby
-performance_1114_th=PostgreSQL
-performance_1115_th=MySQL
-performance_1116_td=Simple\: Init
-performance_1117_td=ms
-performance_1118_td=16338
-performance_1119_td=17198
-performance_1120_td=27860
-performance_1121_td=30156
-performance_1122_td=29409
-performance_1123_td=Simple\: Query (random)
-performance_1124_td=ms
-performance_1125_td=3399
-performance_1126_td=2582
-performance_1127_td=6190
-performance_1128_td=3315
-performance_1129_td=3342
-performance_1130_td=Simple\: Query (sequential)
-performance_1131_td=ms
-performance_1132_td=21841
-performance_1133_td=18699
-performance_1134_td=42347
-performance_1135_td=30774
-performance_1136_td=32611
-performance_1137_td=Simple\: Update (sequential)
-performance_1138_td=ms
-performance_1139_td=6913
-performance_1140_td=7745
-performance_1141_td=28576
-performance_1142_td=32698
-performance_1143_td=11350
-performance_1144_td=Simple\: Delete (sequential)
-performance_1145_td=ms
-performance_1146_td=8051
-performance_1147_td=9751
-performance_1148_td=42202
-performance_1149_td=44480
-performance_1150_td=16555
-performance_1151_td=Simple\: Memory Usage
-performance_1152_td=MB
-performance_1153_td=22
-performance_1154_td=11
-performance_1155_td=9
-performance_1156_td=0
-performance_1157_td=1
-performance_1158_td=BenchA\: Init
-performance_1159_td=ms
-performance_1160_td=12996
-performance_1161_td=14720
-performance_1162_td=24722
-performance_1163_td=26375
-performance_1164_td=26060
-performance_1165_td=BenchA\: Transactions
-performance_1166_td=ms
-performance_1167_td=10134
-performance_1168_td=10250
-performance_1169_td=18452
-performance_1170_td=21453
-performance_1171_td=15877
-performance_1172_td=BenchA\: Memory Usage
-performance_1173_td=MB
-performance_1174_td=13
-performance_1175_td=15
-performance_1176_td=9
-performance_1177_td=0
-performance_1178_td=1
-performance_1179_td=BenchB\: Init
-performance_1180_td=ms
-performance_1181_td=15264
-performance_1182_td=16889
-performance_1183_td=28546
-performance_1184_td=31610
-performance_1185_td=29747
-performance_1186_td=BenchB\: Transactions
-performance_1187_td=ms
-performance_1188_td=3017
-performance_1189_td=3376
-performance_1190_td=1842
-performance_1191_td=2771
-performance_1192_td=1433
-performance_1193_td=BenchB\: Memory Usage
-performance_1194_td=MB
-performance_1195_td=17
-performance_1196_td=12
-performance_1197_td=11
-performance_1198_td=1
-performance_1199_td=1
-performance_1200_td=BenchC\: Init
-performance_1201_td=ms
-performance_1202_td=14020
-performance_1203_td=10407
-performance_1204_td=17655
-performance_1205_td=19520
-performance_1206_td=17532
-performance_1207_td=BenchC\: Transactions
-performance_1208_td=ms
-performance_1209_td=5076
-performance_1210_td=3160
-performance_1211_td=6411
-performance_1212_td=6063
-performance_1213_td=4530
-performance_1214_td=BenchC\: Memory Usage
-performance_1215_td=MB
-performance_1216_td=19
-performance_1217_td=21
-performance_1218_td=11
-performance_1219_td=1
-performance_1220_td=1
-performance_1221_td=Executed statements
-performance_1222_td=\#
-performance_1223_td=1930995
-performance_1224_td=1930995
-performance_1225_td=1930995
-performance_1226_td=1930995
-performance_1227_td=1930995
-performance_1228_td=Total time
-performance_1229_td=ms
-performance_1230_td=117049
-performance_1231_td=114777
-performance_1232_td=244803
-performance_1233_td=249215
-performance_1234_td=188446
-performance_1235_td=Statements per second
-performance_1236_td=\#
-performance_1237_td=16497
-performance_1238_td=16823
-performance_1239_td=7887
-performance_1240_td=7748
-performance_1241_td=10246
-performance_1242_h3=Benchmark Results and Comments
-performance_1243_h4=H2
-performance_1244_p=\ Version 1.4.177 (2014-04-12) was used for the test. For most operations, the performance of H2 is about the same as for HSQLDB. One situation where H2 is slow is large result sets, because they are buffered to disk if more than a certain number of records are returned. The advantage of buffering is\: there is no limit on the result set size.
-performance_1245_h4=HSQLDB
-performance_1246_p=\ Version 2.3.2 was used for the test. Cached tables are used in this test (hsqldb.default_table_type\=cached
), and the write delay is 1 second (SET WRITE_DELAY 1
).
-performance_1247_h4=Derby
-performance_1248_p=\ Version 10.10.1.1 was used for the test. Derby is clearly the slowest embedded database in this test. This seems to be a structural problem, because all operations are really slow. It will be hard for the developers of Derby to improve the performance to a reasonable level. A few problems have been identified\: leaving autocommit on is a problem for Derby. If it is switched off during the whole test, the results are about 20% better for Derby. Derby calls FileChannel.force(false)
, but only twice per log file (not on each commit). Disabling this call improves performance for Derby by about 2%. Unlike H2, Derby does not call FileDescriptor.sync()
on each checkpoint. Derby supports a testing mode (system property derby.system.durability\=test
) where durability is disabled. According to the documentation, this setting should be used for testing only, as the database may not recover after a crash. Enabling this setting improves performance by a factor of 2.6 (embedded mode) or 1.4 (server mode). Even if enabled, Derby is still less than half as fast as H2 in default mode.
-performance_1249_h4=PostgreSQL
-performance_1250_p=\ Version 9.1.5 was used for the test. The following options where changed in postgresql.conf\: fsync \= off, commit_delay \= 1000
. PostgreSQL is run in server mode. The memory usage number is incorrect, because only the memory usage of the JDBC driver is measured.
-performance_1251_h4=MySQL
-performance_1252_p=\ Version 5.1.65-log was used for the test. MySQL was run with the InnoDB backend. The setting innodb_flush_log_at_trx_commit
(found in the my.ini / my.cnf
file) was set to 0. Otherwise (and by default), MySQL is slow (around 140 statements per second in this test) because it tries to flush the data to disk for each commit. For small transactions (when autocommit is on) this is really slow. But many use cases use small or relatively small transactions. Too bad this setting is not listed in the configuration wizard, and it always overwritten when using the wizard. You need to change this setting manually in the file my.ini / my.cnf
, and then restart the service. The memory usage number is incorrect, because only the memory usage of the JDBC driver is measured.
-performance_1253_h4=Firebird
-performance_1254_p=\ Firebird 1.5 (default installation) was tested, but the results are not published currently. It is possible to run the performance test with the Firebird database, and any information on how to configure Firebird for higher performance are welcome.
-performance_1255_h4=Why Oracle / MS SQL Server / DB2 are Not Listed
-performance_1256_p=\ The license of these databases does not allow to publish benchmark results. This doesn't mean that they are fast. They are in fact quite slow, and need a lot of memory. But you will need to test this yourself. SQLite was not tested because the JDBC driver doesn't support transactions.
-performance_1257_h3=About this Benchmark
-performance_1258_h4=How to Run
-performance_1259_p=\ This test was as follows\:
-performance_1260_h4=Separate Process per Database
-performance_1261_p=\ For each database, a new process is started, to ensure the previous test does not impact the current test.
-performance_1262_h4=Number of Connections
-performance_1263_p=\ This is mostly a single-connection benchmark. BenchB uses multiple connections; the other tests use one connection.
-performance_1264_h4=Real-World Tests
-performance_1265_p=\ Good benchmarks emulate real-world use cases. This benchmark includes 4 test cases\: BenchSimple uses one table and many small updates / deletes. BenchA is similar to the TPC-A test, but single connection / single threaded (see also\: www.tpc.org). BenchB is similar to the TPC-B test, using multiple connections (one thread per connection). BenchC is similar to the TPC-C test, but single connection / single threaded.
-performance_1266_h4=Comparing Embedded with Server Databases
-performance_1267_p=\ This is mainly a benchmark for embedded databases (where the application runs in the same virtual machine as the database engine). However MySQL and PostgreSQL are not Java databases and cannot be embedded into a Java application. For the Java databases, both embedded and server modes are tested.
-performance_1268_h4=Test Platform
-performance_1269_p=\ This test is run on Mac OS X 10.6. No virus scanner was used, and disk indexing was disabled. The JVM used is Sun JDK 1.6.
-performance_1270_h4=Multiple Runs
-performance_1271_p=\ When a Java benchmark is run first, the code is not fully compiled and therefore runs slower than when running multiple times. A benchmark should always run the same test multiple times and ignore the first run(s). This benchmark runs three times, but only the last run is measured.
-performance_1272_h4=Memory Usage
-performance_1273_p=\ It is not enough to measure the time taken, the memory usage is important as well. Performance can be improved by using a bigger cache, but the amount of memory is limited. HSQLDB tables are kept fully in memory by default; this benchmark uses 'disk based' tables for all databases. Unfortunately, it is not so easy to calculate the memory usage of PostgreSQL and MySQL, because they run in a different process than the test. This benchmark currently does not print memory usage of those databases.
-performance_1274_h4=Delayed Operations
-performance_1275_p=\ Some databases delay some operations (for example flushing the buffers) until after the benchmark is run. This benchmark waits between each database tested, and each database runs in a different process (sequentially).
-performance_1276_h4=Transaction Commit / Durability
-performance_1277_p=\ Durability means transaction committed to the database will not be lost. Some databases (for example MySQL) try to enforce this by default by calling fsync()
to flush the buffers, but most hard drives don't actually flush all data. Calling the method slows down transaction commit a lot, but doesn't always make data durable. When comparing the results, it is important to think about the effect. Many database suggest to 'batch' operations when possible. This benchmark switches off autocommit when loading the data, and calls commit after each 1000 inserts. However many applications need 'short' transactions at runtime (a commit after each update). This benchmark commits after each update / delete in the simple benchmark, and after each business transaction in the other benchmarks. For databases that support delayed commits, a delay of one second is used.
-performance_1278_h4=Using Prepared Statements
-performance_1279_p=\ Wherever possible, the test cases use prepared statements.
-performance_1280_h4=Currently Not Tested\: Startup Time
-performance_1281_p=\ The startup time of a database engine is important as well for embedded use. This time is not measured currently. Also, not tested is the time used to create a database and open an existing database. Here, one (wrapper) connection is opened at the start, and for each step a new connection is opened and then closed.
-performance_1282_h2=PolePosition Benchmark
-performance_1283_p=\ The PolePosition is an open source benchmark. The algorithms are all quite simple. It was developed / sponsored by db4o. This test was not run for a longer time, so please be aware that the results below are for older database versions (H2 version 1.1, HSQLDB 1.8, Java 1.4).
-performance_1284_th=Test Case
-performance_1285_th=Unit
-performance_1286_th=H2
-performance_1287_th=HSQLDB
-performance_1288_th=MySQL
-performance_1289_td=Melbourne write
-performance_1290_td=ms
-performance_1291_td=369
-performance_1292_td=249
-performance_1293_td=2022
-performance_1294_td=Melbourne read
-performance_1295_td=ms
-performance_1296_td=47
-performance_1297_td=49
-performance_1298_td=93
-performance_1299_td=Melbourne read_hot
-performance_1300_td=ms
-performance_1301_td=24
-performance_1302_td=43
-performance_1303_td=95
-performance_1304_td=Melbourne delete
-performance_1305_td=ms
-performance_1306_td=147
-performance_1307_td=133
-performance_1308_td=176
-performance_1309_td=Sepang write
-performance_1310_td=ms
-performance_1311_td=965
-performance_1312_td=1201
-performance_1313_td=3213
-performance_1314_td=Sepang read
-performance_1315_td=ms
-performance_1316_td=765
-performance_1317_td=948
-performance_1318_td=3455
-performance_1319_td=Sepang read_hot
-performance_1320_td=ms
-performance_1321_td=789
-performance_1322_td=859
-performance_1323_td=3563
-performance_1324_td=Sepang delete
-performance_1325_td=ms
-performance_1326_td=1384
-performance_1327_td=1596
-performance_1328_td=6214
-performance_1329_td=Bahrain write
-performance_1330_td=ms
-performance_1331_td=1186
-performance_1332_td=1387
-performance_1333_td=6904
-performance_1334_td=Bahrain query_indexed_string
-performance_1335_td=ms
-performance_1336_td=336
-performance_1337_td=170
-performance_1338_td=693
-performance_1339_td=Bahrain query_string
-performance_1340_td=ms
-performance_1341_td=18064
-performance_1342_td=39703
-performance_1343_td=41243
-performance_1344_td=Bahrain query_indexed_int
-performance_1345_td=ms
-performance_1346_td=104
-performance_1347_td=134
-performance_1348_td=678
-performance_1349_td=Bahrain update
-performance_1350_td=ms
-performance_1351_td=191
-performance_1352_td=87
-performance_1353_td=159
-performance_1354_td=Bahrain delete
-performance_1355_td=ms
-performance_1356_td=1215
-performance_1357_td=729
-performance_1358_td=6812
-performance_1359_td=Imola retrieve
-performance_1360_td=ms
-performance_1361_td=198
-performance_1362_td=194
-performance_1363_td=4036
-performance_1364_td=Barcelona write
-performance_1365_td=ms
-performance_1366_td=413
-performance_1367_td=832
-performance_1368_td=3191
-performance_1369_td=Barcelona read
-performance_1370_td=ms
-performance_1371_td=119
-performance_1372_td=160
-performance_1373_td=1177
-performance_1374_td=Barcelona query
-performance_1375_td=ms
-performance_1376_td=20
-performance_1377_td=5169
-performance_1378_td=101
-performance_1379_td=Barcelona delete
-performance_1380_td=ms
-performance_1381_td=388
-performance_1382_td=319
-performance_1383_td=3287
-performance_1384_td=Total
-performance_1385_td=ms
-performance_1386_td=26724
-performance_1387_td=53962
-performance_1388_td=87112
-performance_1389_p=\ There are a few problems with the PolePosition test\:
-performance_1390_li=\ HSQLDB uses in-memory tables by default while H2 uses persistent tables. The HSQLDB version included in PolePosition does not support changing this, so you need to replace poleposition-0.20/lib/hsqldb.jar
with a newer version (for example hsqldb-1.8.0.7.jar
), and then use the setting hsqldb.connecturl\=jdbc\:hsqldb\:file\:data/hsqldb/dbbench2;hsqldb.default_table_type\=cached;sql.enforce_size\=true
in the file Jdbc.properties
.
-performance_1391_li=HSQLDB keeps the database open between tests, while H2 closes the database (losing all the cache). To change that, use the database URL jdbc\:h2\:file\:data/h2/dbbench;DB_CLOSE_DELAY\=-1
-performance_1392_li=The amount of cache memory is quite important, specially for the PolePosition test. Unfortunately, the PolePosition test does not take this into account.
-performance_1393_h2=Database Performance Tuning
-performance_1394_h3=Keep Connections Open or Use a Connection Pool
-performance_1395_p=\ If your application opens and closes connections a lot (for example, for each request), you should consider using a connection pool. Opening a connection using DriverManager.getConnection
is specially slow if the database is closed. By default the database is closed if the last connection is closed.
-performance_1396_p=\ If you open and close connections a lot but don't want to use a connection pool, consider keeping a 'sentinel' connection open for as long as the application runs, or use delayed database closing. See also Closing a database.
-performance_1397_h3=Use a Modern JVM
-performance_1398_p=\ Newer JVMs are faster. Upgrading to the latest version of your JVM can provide a "free" boost to performance. Switching from the default Client JVM to the Server JVM using the -server
command-line option improves performance at the cost of a slight increase in start-up time.
-performance_1399_h3=Virus Scanners
-performance_1400_p=\ Some virus scanners scan files every time they are accessed. It is very important for performance that database files are not scanned for viruses. The database engine never interprets the data stored in the files as programs, that means even if somebody would store a virus in a database file, this would be harmless (when the virus does not run, it cannot spread). Some virus scanners allow to exclude files by suffix. Ensure files ending with .db
are not scanned.
-performance_1401_h3=Using the Trace Options
-performance_1402_p=\ If the performance hot spots are in the database engine, in many cases the performance can be optimized by creating additional indexes, or changing the schema. Sometimes the application does not directly generate the SQL statements, for example if an O/R mapping tool is used. To view the SQL statements and JDBC API calls, you can use the trace options. For more information, see Using the Trace Options.
-performance_1403_h3=Index Usage
-performance_1404_p=\ This database uses indexes to improve the performance of SELECT, UPDATE, DELETE
. If a column is used in the WHERE
clause of a query, and if an index exists on this column, then the index can be used. Multi-column indexes are used if all or the first columns of the index are used. Both equality lookup and range scans are supported. Indexes are used to order result sets, but only if the condition uses the same index or no index at all. The results are sorted in memory if required. Indexes are created automatically for primary key and unique constraints. Indexes are also created for foreign key constraints, if required. For other columns, indexes need to be created manually using the CREATE INDEX
statement.
-performance_1405_h3=How Data is Stored Internally
-performance_1406_p=\ For persistent databases, if a table is created with a single column primary key of type BIGINT, INT, SMALLINT, TINYINT
, then the data of the table is organized in this way. This is sometimes also called a "clustered index" or "index organized table".
-performance_1407_p=\ H2 internally stores table data and indexes in the form of b-trees. Each b-tree stores entries as a list of unique keys (one or more columns) and data (zero or more columns). The table data is always organized in the form of a "data b-tree" with a single column key of type long
. If a single column primary key of type BIGINT, INT, SMALLINT, TINYINT
is specified when creating the table (or just after creating the table, but before inserting any rows), then this column is used as the key of the data b-tree. If no primary key has been specified, if the primary key column is of another data type, or if the primary key contains more than one column, then a hidden auto-increment column of type BIGINT
is added to the table, which is used as the key for the data b-tree. All other columns of the table are stored within the data area of this data b-tree (except for large BLOB, CLOB
columns, which are stored externally).
-performance_1408_p=\ For each additional index, one new "index b-tree" is created. The key of this b-tree consists of the indexed columns, plus the key of the data b-tree. If a primary key is created after the table has been created, or if the primary key contains multiple column, or if the primary key is not of the data types listed above, then the primary key is stored in a new index b-tree.
-performance_1409_h3=Optimizer
-performance_1410_p=\ This database uses a cost based optimizer. For simple and queries and queries with medium complexity (less than 7 tables in the join), the expected cost (running time) of all possible plans is calculated, and the plan with the lowest cost is used. For more complex queries, the algorithm first tries all possible combinations for the first few tables, and the remaining tables added using a greedy algorithm (this works well for most joins). Afterwards a genetic algorithm is used to test at most 2000 distinct plans. Only left-deep plans are evaluated.
-performance_1411_h3=Expression Optimization
-performance_1412_p=\ After the statement is parsed, all expressions are simplified automatically if possible. Operations are evaluated only once if all parameters are constant. Functions are also optimized, but only if the function is constant (always returns the same result for the same parameter values). If the WHERE
clause is always false, then the table is not accessed at all.
-performance_1413_h3=COUNT(*) Optimization
-performance_1414_p=\ If the query only counts all rows of a table, then the data is not accessed. However, this is only possible if no WHERE
clause is used, that means it only works for queries of the form SELECT COUNT(*) FROM table
.
-performance_1415_h3=Updating Optimizer Statistics / Column Selectivity
-performance_1416_p=\ When executing a query, at most one index per join can be used. If the same table is joined multiple times, for each join only one index is used (the same index could be used for both joins, or each join could use a different index). Example\: for the query SELECT * FROM TEST T1, TEST T2 WHERE T1.NAME\='A' AND T2.ID\=T1.ID
, two index can be used, in this case the index on NAME for T1 and the index on ID for T2.
-performance_1417_p=\ If a table has multiple indexes, sometimes more than one index could be used. Example\: if there is a table TEST(ID, NAME, FIRSTNAME)
and an index on each column, then two indexes could be used for the query SELECT * FROM TEST WHERE NAME\='A' AND FIRSTNAME\='B'
, the index on NAME or the index on FIRSTNAME. It is not possible to use both indexes at the same time. Which index is used depends on the selectivity of the column. The selectivity describes the 'uniqueness' of values in a column. A selectivity of 100 means each value appears only once, and a selectivity of 1 means the same value appears in many or most rows. For the query above, the index on NAME should be used if the table contains more distinct names than first names.
-performance_1418_p=\ The SQL statement ANALYZE
can be used to automatically estimate the selectivity of the columns in the tables. This command should be run from time to time to improve the query plans generated by the optimizer.
-performance_1419_h3=In-Memory (Hash) Indexes
-performance_1420_p=\ Using in-memory indexes, specially in-memory hash indexes, can speed up queries and data manipulation.
-performance_1421_p=In-memory indexes are automatically used for in-memory databases, but can also be created for persistent databases using CREATE MEMORY TABLE
. In many cases, the rows itself will also be kept in-memory. Please note this may cause memory problems for large tables.
-performance_1422_p=\ In-memory hash indexes are backed by a hash table and are usually faster than regular indexes. However, hash indexes only supports direct lookup (WHERE ID \= ?
) but not range scan (WHERE ID < ?
). To use hash indexes, use HASH as in\: CREATE UNIQUE HASH INDEX
and CREATE TABLE ...(ID INT PRIMARY KEY HASH,...)
.
-performance_1423_h3=Use Prepared Statements
-performance_1424_p=\ If possible, use prepared statements with parameters.
-performance_1425_h3=Prepared Statements and IN(...)
-performance_1426_p=\ Avoid generating SQL statements with a variable size IN(...) list. Instead, use a prepared statement with arrays as in the following example\:
-performance_1427_h3=Optimization Examples
-performance_1428_p=\ See src/test/org/h2/samples/optimizations.sql
for a few examples of queries that benefit from special optimizations built into the database.
-performance_1429_h3=Cache Size and Type
-performance_1430_p=\ By default the cache size of H2 is quite small. Consider using a larger cache size, or enable the second level soft reference cache. See also Cache Settings.
-performance_1431_h3=Data Types
-performance_1432_p=\ Each data type has different storage and performance characteristics\:
-performance_1433_li=The DECIMAL/NUMERIC
type is slower and requires more storage than the REAL
and DOUBLE
types.
-performance_1434_li=Text types are slower to read, write, and compare than numeric types and generally require more storage.
-performance_1435_li=See Large Objects for information on BINARY
vs. BLOB
and VARCHAR
vs. CLOB
performance.
-performance_1436_li=Parsing and formatting takes longer for the TIME
, DATE
, and TIMESTAMP
types than the numeric types.
-performance_1437_code=SMALLINT/TINYINT/BOOLEAN
-performance_1438_li=\ are not significantly smaller or faster to work with than INTEGER
in most modes.
-performance_1439_h3=Sorted Insert Optimization
-performance_1440_p=\ To reduce disk space usage and speed up table creation, an optimization for sorted inserts is available. When used, b-tree pages are split at the insertion point. To use this optimization, add SORTED
before the SELECT
statement\:
-performance_1441_h2=Using the Built-In Profiler
-performance_1442_p=\ A very simple Java profiler is built-in. To use it, use the following template\:
-performance_1443_h2=Application Profiling
-performance_1444_h3=Analyze First
-performance_1445_p=\ Before trying to optimize performance, it is important to understand where the problem is (what part of the application is slow). Blind optimization or optimization based on guesses should be avoided, because usually it is not an efficient strategy. There are various ways to analyze an application. Sometimes two implementations can be compared using System.currentTimeMillis()
. But this does not work for complex applications with many modules, and for memory problems.
-performance_1446_p=\ A simple way to profile an application is to use the built-in profiling tool of java. Example\:
-performance_1447_p=\ Unfortunately, it is only possible to profile the application from start to end. Another solution is to create a number of full thread dumps. To do that, first run jps -l
to get the process id, and then run jstack <pid>
or kill -QUIT <pid>
(Linux) or press Ctrl+C (Windows).
-performance_1448_p=\ A simple profiling tool is included in H2. To use it, the application needs to be changed slightly. Example\:
-performance_1449_p=\ The profiler is built into the H2 Console tool, to analyze databases that open slowly. To use it, run the H2 Console, and then click on 'Test Connection'. Afterwards, click on "Test successful" and you get the most common stack traces, which helps to find out why it took so long to connect. You will only get the stack traces if opening the database took more than a few seconds.
-performance_1450_h2=Database Profiling
-performance_1451_p=\ The ConvertTraceFile
tool generates SQL statement statistics at the end of the SQL script file. The format used is similar to the profiling data generated when using java -Xrunhprof
. For this to work, the trace level needs to be 2 or higher (TRACE_LEVEL_FILE\=2
). The easiest way to set the trace level is to append the setting to the database URL, for example\: jdbc\:h2\:~/test;TRACE_LEVEL_FILE\=2
or jdbc\:h2\:tcp\://localhost/~/test;TRACE_LEVEL_FILE\=2
. As an example, execute the the following script using the H2 Console\:
-performance_1452_p=\ After running the test case, convert the .trace.db
file using the ConvertTraceFile
tool. The trace file is located in the same directory as the database file.
-performance_1453_p=\ The generated file test.sql
will contain the SQL statements as well as the following profiling data (results vary)\:
-performance_1454_h2=Statement Execution Plans
-performance_1455_p=\ The SQL statement EXPLAIN
displays the indexes and optimizations the database uses for a statement. The following statements support EXPLAIN
\: SELECT, UPDATE, DELETE, MERGE, INSERT
. The following query shows that the database uses the primary key index to search for rows\:
-performance_1456_p=\ For joins, the tables in the execution plan are sorted in the order they are processed. The following query shows the database first processes the table INVOICE
(using the primary key). For each row, it will additionally check that the value of the column AMOUNT
is larger than zero, and for those rows the database will search in the table CUSTOMER
(using the primary key). The query plan contains some redundancy so it is a valid statement.
-performance_1457_h3=Displaying the Scan Count
-performance_1458_code=EXPLAIN ANALYZE
-performance_1459_p=\ additionally shows the scanned rows per table and pages read from disk per table or index. This will actually execute the query, unlike EXPLAIN
which only prepares it. The following query scanned 1000 rows, and to do that had to read 85 pages from the data area of the table. Running the query twice will not list the pages read from disk, because they are now in the cache. The tableScan
means this query doesn't use an index.
-performance_1460_p=\ The cache will prevent the pages are read twice. H2 reads all columns of the row unless only the columns in the index are read. Except for large CLOB and BLOB, which are not store in the table.
-performance_1461_h3=Special Optimizations
-performance_1462_p=\ For certain queries, the database doesn't need to read all rows, or doesn't need to sort the result even if ORDER BY
is used.
-performance_1463_p=\ For queries of the form SELECT COUNT(*), MIN(ID), MAX(ID) FROM TEST
, the query plan includes the line /* direct lookup */
if the data can be read from an index.
-performance_1464_p=\ For queries of the form SELECT DISTINCT CUSTOMER_ID FROM INVOICE
, the query plan includes the line /* distinct */
if there is an non-unique or multi-column index on this column, and if this column has a low selectivity.
-performance_1465_p=\ For queries of the form SELECT * FROM TEST ORDER BY ID
, the query plan includes the line /* index sorted */
to indicate there is no separate sorting required.
-performance_1466_p=\ For queries of the form SELECT * FROM TEST GROUP BY ID ORDER BY ID
, the query plan includes the line /* group sorted */
to indicate there is no separate sorting required.
-performance_1467_h2=How Data is Stored and How Indexes Work
-performance_1468_p=\ Internally, each row in a table is identified by a unique number, the row id. The rows of a table are stored with the row id as the key. The row id is a number of type long. If a table has a single column primary key of type INT
or BIGINT
, then the value of this column is the row id, otherwise the database generates the row id automatically. There is a (non-standard) way to access the row id\: using the _ROWID_
pseudo-column\:
-performance_1469_p=\ The data is stored in the database as follows\:
-performance_1470_th=_ROWID_
-performance_1471_th=FIRST_NAME
-performance_1472_th=NAME
-performance_1473_th=CITY
-performance_1474_th=PHONE
-performance_1475_td=1
-performance_1476_td=John
-performance_1477_td=Miller
-performance_1478_td=Berne
-performance_1479_td=123 456 789
-performance_1480_td=2
-performance_1481_td=Philip
-performance_1482_td=Jones
-performance_1483_td=Berne
-performance_1484_td=123 012 345
-performance_1485_p=\ Access by row id is fast because the data is sorted by this key. Please note the row id is not available until after the row was added (that means, it can not be used in computed columns or constraints). If the query condition does not contain the row id (and if no other index can be used), then all rows of the table are scanned. A table scan iterates over all rows in the table, in the order of the row id. To find out what strategy the database uses to retrieve the data, use EXPLAIN SELECT
\:
-performance_1486_h3=Indexes
-performance_1487_p=\ An index internally is basically just a table that contains the indexed column(s), plus the row id\:
-performance_1488_p=\ In the index, the data is sorted by the indexed columns. So this index contains the following data\:
-performance_1489_th=CITY
-performance_1490_th=NAME
-performance_1491_th=FIRST_NAME
-performance_1492_th=_ROWID_
-performance_1493_td=Berne
-performance_1494_td=Jones
-performance_1495_td=Philip
-performance_1496_td=2
-performance_1497_td=Berne
-performance_1498_td=Miller
-performance_1499_td=John
-performance_1500_td=1
-performance_1501_p=\ When the database uses an index to query the data, it searches the index for the given data, and (if required) reads the remaining columns in the main data table (retrieved using the row id). An index on city, name, and first name (multi-column index) allows to quickly search for rows when the city, name, and first name are known. If only the city and name, or only the city is known, then this index is also used (so creating an additional index on just the city is not needed). This index is also used when reading all rows, sorted by the indexed columns. However, if only the first name is known, then this index is not used\:
-performance_1502_p=\ If your application often queries the table for a phone number, then it makes sense to create an additional index on it\:
-performance_1503_p=\ This index contains the phone number, and the row id\:
-performance_1504_th=PHONE
-performance_1505_th=_ROWID_
-performance_1506_td=123 012 345
-performance_1507_td=2
-performance_1508_td=123 456 789
-performance_1509_td=1
-performance_1510_h3=Using Multiple Indexes
-performance_1511_p=\ Within a query, only one index per logical table is used. Using the condition PHONE \= '123 567 789' OR CITY \= 'Berne'
would use a table scan instead of first using the index on the phone number and then the index on the city. It makes sense to write two queries and combine then using UNION
. In this case, each individual query uses a different index\:
-performance_1512_h2=Fast Database Import
-performance_1513_p=\ To speed up large imports, consider using the following options temporarily\:
-performance_1514_code=SET LOG 0
-performance_1515_li=\ (disabling the transaction log)
-performance_1516_code=SET CACHE_SIZE
-performance_1517_li=\ (a large cache is faster)
-performance_1518_code=SET LOCK_MODE 0
-performance_1519_li=\ (disable locking)
-performance_1520_code=SET UNDO_LOG 0
-performance_1521_li=\ (disable the session undo log)
-performance_1522_p=\ These options can be set in the database URL\: jdbc\:h2\:~/test;LOG\=0;CACHE_SIZE\=65536;LOCK_MODE\=0;UNDO_LOG\=0
. Most of those options are not recommended for regular use, that means you need to reset them after use.
-performance_1523_p=\ If you have to import a lot of rows, use a PreparedStatement or use CSV import. Please note that CREATE TABLE(...) ... AS SELECT ...
is faster than CREATE TABLE(...); INSERT INTO ... SELECT ...
.
-quickstart_1000_h1=Quickstart
-quickstart_1001_a=\ Embedding H2 in an Application
-quickstart_1002_a=\ The H2 Console Application
-quickstart_1003_h2=Embedding H2 in an Application
-quickstart_1004_p=\ This database can be used in embedded mode, or in server mode. To use it in embedded mode, you need to\:
-quickstart_1005_li=Add the h2*.jar
to the classpath (H2 does not have any dependencies)
-quickstart_1006_li=Use the JDBC driver class\: org.h2.Driver
-quickstart_1007_li=The database URL jdbc\:h2\:~/test
opens the database test
in your user home directory
-quickstart_1008_li=A new database is automatically created
-quickstart_1009_h2=The H2 Console Application
-quickstart_1010_p=\ The Console lets you access a SQL database using a browser interface.
-quickstart_1011_p=\ If you don't have Windows XP, or if something does not work as expected, please see the detailed description in the Tutorial.
-quickstart_1012_h3=Step-by-Step
-quickstart_1013_h4=Installation
-quickstart_1014_p=\ Install the software using the Windows Installer (if you did not yet do that).
-quickstart_1015_h4=Start the Console
-quickstart_1016_p=\ Click [Start], [All Programs], [H2], and [H2 Console (Command Line)]\:
-quickstart_1017_p=\ A new console window appears\:
-quickstart_1018_p=\ Also, a new browser page should open with the URL http\://localhost\:8082. You may get a security warning from the firewall. If you don't want other computers in the network to access the database on your machine, you can let the firewall block these connections. Only local connections are required at this time.
-quickstart_1019_h4=Login
-quickstart_1020_p=\ Select [Generic H2] and click [Connect]\:
-quickstart_1021_p=\ You are now logged in.
-quickstart_1022_h4=Sample
-quickstart_1023_p=\ Click on the [Sample SQL Script]\:
-quickstart_1024_p=\ The SQL commands appear in the command area.
-quickstart_1025_h4=Execute
-quickstart_1026_p=\ Click [Run]
-quickstart_1027_p=\ On the left side, a new entry TEST is added below the database icon. The operations and results of the statements are shown below the script.
-quickstart_1028_h4=Disconnect
-quickstart_1029_p=\ Click on [Disconnect]\:
-quickstart_1030_p=\ to close the connection.
-quickstart_1031_h4=End
-quickstart_1032_p=\ Close the console window. For more information, see the Tutorial.
-roadmap_1000_h1=Roadmap
-roadmap_1001_p=\ New (feature) requests will usually be added at the very end of the list. The priority is increased for important and popular requests. Of course, patches are always welcome, but are not always applied as is. See also Providing Patches.
-roadmap_1002_h2=Version 1.5.x\: Planned Changes
-roadmap_1003_li=Replace file password hash with file encryption key; validate encryption key when connecting.
-roadmap_1004_li=Remove "set binary collation" feature.
-roadmap_1005_li=Remove the encryption algorithm XTEA.
-roadmap_1006_li=Disallow referencing other tables in a table (via constraints for example).
-roadmap_1007_li=Remove PageStore features like compress_lob.
-roadmap_1008_h2=Version 1.4.x\: Planned Changes
-roadmap_1009_li=Change license to MPL 2.0.
-roadmap_1010_li=Automatic migration from 1.3 databases to 1.4.
-roadmap_1011_li=Option to disable the file name suffix somehow (issue 447).
-roadmap_1012_h2=Priority 1
-roadmap_1013_li=Bugfixes.
-roadmap_1014_li=More tests with MULTI_THREADED\=1 (and MULTI_THREADED with MVCC)\: Online backup (using the 'backup' statement).
-roadmap_1015_li=Server side cursors.
-roadmap_1016_h2=Priority 2
-roadmap_1017_li=Support hints for the optimizer (which index to use, enforce the join order).
-roadmap_1018_li=Full outer joins.
-roadmap_1019_li=Access rights\: remember the owner of an object. Create, alter and drop privileges. COMMENT\: allow owner of object to change it. Issue 208\: Access rights for schemas.
-roadmap_1020_li=Test multi-threaded in-memory db access.
-roadmap_1021_li=MySQL, MS SQL Server compatibility\: support case sensitive (mixed case) identifiers without quotes.
-roadmap_1022_li=Support GRANT SELECT, UPDATE ON [schemaName.] *.
-roadmap_1023_li=Migrate database tool (also from other database engines). For Oracle, maybe use DBMS_METADATA.GET_DDL / GET_DEPENDENT_DDL.
-roadmap_1024_li=Clustering\: support mixed clustering mode (one embedded, others in server mode).
-roadmap_1025_li=Clustering\: reads should be randomly distributed (optional) or to a designated database on RAM (parameter\: READ_FROM\=3).
-roadmap_1026_li=Window functions\: RANK() and DENSE_RANK(), partition using OVER(). select *, count(*) over() as fullCount from ... limit 4;
-roadmap_1027_li=PostgreSQL catalog\: use BEFORE SELECT triggers instead of views over metadata tables.
-roadmap_1028_li=Compatibility\: automatically load functions from a script depending on the mode - see FunctionsMySQL.java, issue 211.
-roadmap_1029_li=Test very large databases and LOBs (up to 256 GB).
-roadmap_1030_li=Store all temp files in the temp directory.
-roadmap_1031_li=Don't use temp files, specially not deleteOnExit (bug 4513817\: File.deleteOnExit consumes memory). Also to allow opening client / server (remote) connections when using LOBs.
-roadmap_1032_li=Make DDL (Data Definition) operations transactional.
-roadmap_1033_li=Deferred integrity checking (DEFERRABLE INITIALLY DEFERRED).
-roadmap_1034_li=Groovy Stored Procedures\: http\://groovy.codehaus.org/GSQL
-roadmap_1035_li=Add a migration guide (list differences between databases).
-roadmap_1036_li=Optimization\: automatic index creation suggestion using the trace file?
-roadmap_1037_li=Fulltext search Lucene\: analyzer configuration, mergeFactor.
-roadmap_1038_li=Compression performance\: don't allocate buffers, compress / expand in to out buffer.
-roadmap_1039_li=Rebuild index functionality to shrink index size and improve performance.
-roadmap_1040_li=Console\: add accesskey to most important commands (A, AREA, BUTTON, INPUT, LABEL, LEGEND, TEXTAREA).
-roadmap_1041_li=Test performance again with SQL Server, Oracle, DB2.
-roadmap_1042_li=Test with Spatial DB in a box / JTS\: http\://www.opengeospatial.org/standards/sfs - OpenGIS Implementation Specification.
-roadmap_1043_li=Write more tests and documentation for MVCC (Multi Version Concurrency Control).
-roadmap_1044_li=Find a tool to view large text file (larger than 100 MB), with find, page up and down (like less), truncate before / after.
-roadmap_1045_li=Implement, test, document XAConnection and so on.
-roadmap_1046_li=Pluggable data type (for streaming, hashing, compression, validation, conversion, encryption).
-roadmap_1047_li=CHECK\: find out what makes CHECK\=TRUE slow, move to CHECK2.
-roadmap_1048_li=Drop with invalidate views (so that source code is not lost). Check what other databases do exactly.
-roadmap_1049_li=Index usage for (ID, NAME)\=(1, 'Hi'); document.
-roadmap_1050_li=Set a connection read only (Connection.setReadOnly) or using a connection parameter.
-roadmap_1051_li=Access rights\: finer grained access control (grant access for specific functions).
-roadmap_1052_li=ROW_NUMBER() OVER([PARTITION BY columnName][ORDER BY columnName]).
-roadmap_1053_li=Version check\: docs / web console (using Javascript), and maybe in the library (using TCP/IP).
-roadmap_1054_li=Web server classloader\: override findResource / getResourceFrom.
-roadmap_1055_li=Cost for embedded temporary view is calculated wrong, if result is constant.
-roadmap_1056_li=Count index range query (count(*) where id between 10 and 20).
-roadmap_1057_li=Performance\: update in-place.
-roadmap_1058_li=Clustering\: when a database is back alive, automatically synchronize with the master (requires readable transaction log).
-roadmap_1059_li=Database file name suffix\: a way to use no or a different suffix (for example using a slash).
-roadmap_1060_li=Eclipse plugin.
-roadmap_1061_li=Asynchronous queries to support publish/subscribe\: SELECT ... FOR READ WAIT [maxMillisToWait]. See also MS SQL Server "Query Notification".
-roadmap_1062_li=Fulltext search (native)\: reader / tokenizer / filter.
-roadmap_1063_li=Linked schema using CSV files\: one schema for a directory of files; support indexes for CSV files.
-roadmap_1064_li=iReport to support H2.
-roadmap_1065_li=Include SMTP (mail) client (alert on cluster failure, low disk space,...).
-roadmap_1066_li=Option for SCRIPT to only process one or a set of schemas or tables, and append to a file.
-roadmap_1067_li=JSON parser and functions.
-roadmap_1068_li=Copy database\: tool with config GUI and batch mode, extensible (example\: compare).
-roadmap_1069_li=Document, implement tool for long running transactions using user-defined compensation statements.
-roadmap_1070_li=Support SET TABLE DUAL READONLY.
-roadmap_1071_li=GCJ\: what is the state now?
-roadmap_1072_li=Events for\: database Startup, Connections, Login attempts, Disconnections, Prepare (after parsing), Web Server. See http\://docs.openlinksw.com/virtuoso/fn_dbev_startup.html
-roadmap_1073_li=Optimization\: simpler log compression.
-roadmap_1074_li=Support standard INFORMATION_SCHEMA tables, as defined in http\://www.contrib.andrew.cmu.edu/~shadow/sql/sql1992.txt - specially KEY_COLUMN_USAGE\: http\://dev.mysql.com/doc/refman/5.0/en/information-schema.html, http\://www.xcdsql.org/Misc/INFORMATION_SCHEMA%20With%20Rolenames.gif
-roadmap_1075_li=Compatibility\: in MySQL, HSQLDB, /0.0 is NULL; in PostgreSQL, Derby\: division by zero. HSQLDB\: 0.0e1 / 0.0e1 is NaN.
-roadmap_1076_li=Functional tables should accept parameters from other tables (see FunctionMultiReturn) SELECT * FROM TEST T, P2C(T.A, T.R).
-roadmap_1077_li=Custom class loader to reload functions on demand.
-roadmap_1078_li=Test http\://mysql-je.sourceforge.net/
-roadmap_1079_li=H2 Console\: the webclient could support more features like phpMyAdmin.
-roadmap_1080_li=Support Oracle functions\: TO_DATE, TO_NUMBER.
-roadmap_1081_li=Work on the Java to C converter.
-roadmap_1082_li=The HELP information schema can be directly exposed in the Console.
-roadmap_1083_li=Maybe use the 0x1234 notation for binary fields, see MS SQL Server.
-roadmap_1084_li=Support Oracle CONNECT BY in some way\: http\://www.adp-gmbh.ch/ora/sql/connect_by.html http\://philip.greenspun.com/sql/trees.html
-roadmap_1085_li=SQL Server 2005, Oracle\: support COUNT(*) OVER(). See http\://www.orafusion.com/art_anlytc.htm
-roadmap_1086_li=SQL 2003\: http\://www.wiscorp.com/sql_2003_standard.zip
-roadmap_1087_li=Version column (number/sequence and timestamp based).
-roadmap_1088_li=Optimize getGeneratedKey\: send last identity after each execute (server).
-roadmap_1089_li=Test and document UPDATE TEST SET (ID, NAME) \= (SELECT ID*10, NAME || '\!' FROM TEST T WHERE T.ID\=TEST.ID).
-roadmap_1090_li=Max memory rows / max undo log size\: use block count / row size not row count.
-roadmap_1091_li=Implement point-in-time recovery.
-roadmap_1092_li=Support PL/SQL (programming language / control flow statements).
-roadmap_1093_li=LIKE\: improved version for larger texts (currently using naive search).
-roadmap_1094_li=Throw an exception when the application calls getInt on a Long (optional).
-roadmap_1095_li=Default date format for input and output (local date constants).
-roadmap_1096_li=Document ROWNUM usage for reports\: SELECT ROWNUM, * FROM (subquery).
-roadmap_1097_li=File system that writes to two file systems (replication, replicating file system).
-roadmap_1098_li=Standalone tool to get relevant system properties and add it to the trace output.
-roadmap_1099_li=Support 'call proc(1\=value)' (PostgreSQL, Oracle).
-roadmap_1100_li=Console\: improve editing data (Tab, Shift-Tab, Enter, Up, Down, Shift+Del?).
-roadmap_1101_li=Console\: autocomplete Ctrl+Space inserts template.
-roadmap_1102_li=Option to encrypt .trace.db file.
-roadmap_1103_li=Auto-Update feature for database, .jar file.
-roadmap_1104_li=ResultSet SimpleResultSet.readFromURL(String url)\: id varchar, state varchar, released timestamp.
-roadmap_1105_li=Partial indexing (see PostgreSQL).
-roadmap_1106_li=Add GUI to build a custom version (embedded, fulltext,...) using build flags.
-roadmap_1107_li=http\://rubyforge.org/projects/hypersonic/
-roadmap_1108_li=Add a sample application that runs the H2 unit test and writes the result to a file (so it can be included in the user app).
-roadmap_1109_li=Table order\: ALTER TABLE TEST ORDER BY NAME DESC (MySQL compatibility).
-roadmap_1110_li=Backup tool should work with other databases as well.
-roadmap_1111_li=Console\: -ifExists doesn't work for the console. Add a flag to disable other dbs.
-roadmap_1112_li=Check if 'FSUTIL behavior set disablelastaccess 1' improves the performance (fsutil behavior query disablelastaccess).
-roadmap_1113_li=Java static code analysis\: http\://pmd.sourceforge.net/
-roadmap_1114_li=Java static code analysis\: http\://www.eclipse.org/tptp/
-roadmap_1115_li=Compatibility for CREATE SCHEMA AUTHORIZATION.
-roadmap_1116_li=Implement Clob / Blob truncate and the remaining functionality.
-roadmap_1117_li=Add multiple columns at the same time with ALTER TABLE .. ADD .. ADD ...
-roadmap_1118_li=File locking\: writing a system property to detect concurrent access from the same VM (different classloaders).
-roadmap_1119_li=Pure SQL triggers (example\: update parent table if the child table is changed).
-roadmap_1120_li=Add H2 to Gem (Ruby install system).
-roadmap_1121_li=Support linked JCR tables.
-roadmap_1122_li=Native fulltext search\: min word length; store word positions.
-roadmap_1123_li=Add an option to the SCRIPT command to generate only portable / standard SQL.
-roadmap_1124_li=Updatable views\: create 'instead of' triggers automatically if possible (simple cases first).
-roadmap_1125_li=Improve create index performance.
-roadmap_1126_li=Compact databases without having to close the database (vacuum).
-roadmap_1127_li=Implement more JDBC 4.0 features.
-roadmap_1128_li=Support TRANSFORM / PIVOT as in MS Access.
-roadmap_1129_li=SELECT * FROM (VALUES (...), (...), ....) AS alias(f1, ...).
-roadmap_1130_li=Support updatable views with join on primary keys (to extend a table).
-roadmap_1131_li=Public interface for functions (not public static).
-roadmap_1132_li=Support reading the transaction log.
-roadmap_1133_li=Feature matrix as in i-net software.
-roadmap_1134_li=Updatable result set on table without primary key or unique index.
-roadmap_1135_li=Compatibility with Derby and PostgreSQL\: VALUES(1), (2); SELECT * FROM (VALUES (1), (2)) AS myTable(c1). Issue 221.
-roadmap_1136_li=Allow execution time prepare for SELECT * FROM CSVREAD(?, 'columnNameString')
-roadmap_1137_li=Support data type INTERVAL
-roadmap_1138_li=Support nested transactions (possibly using savepoints internally).
-roadmap_1139_li=Add a benchmark for bigger databases, and one for many users.
-roadmap_1140_li=Compression in the result set over TCP/IP.
-roadmap_1141_li=Support curtimestamp (like curtime, curdate).
-roadmap_1142_li=Support ANALYZE {TABLE|INDEX} tableName COMPUTE|ESTIMATE|DELETE STATISTICS ptnOption options.
-roadmap_1143_li=Release locks (shared or exclusive) on demand
-roadmap_1144_li=Support OUTER UNION
-roadmap_1145_li=Support parameterized views (similar to CSVREAD, but using just SQL for the definition)
-roadmap_1146_li=A way (JDBC driver) to map an URL (jdbc\:h2map\:c1) to a connection object
-roadmap_1147_li=Support dynamic linked schema (automatically adding/updating/removing tables)
-roadmap_1148_li=Clustering\: adding a node should be very fast and without interrupting clients (very short lock)
-roadmap_1149_li=Compatibility\: \# is the start of a single line comment (MySQL) but date quote (Access). Mode specific
-roadmap_1150_li=Run benchmarks with Android, Java 7, java -server
-roadmap_1151_li=Optimizations\: faster hash function for strings.
-roadmap_1152_li=DatabaseEventListener\: callback for all operations (including expected time, RUNSCRIPT) and cancel functionality
-roadmap_1153_li=Benchmark\: add a graph to show how databases scale (performance/database size)
-roadmap_1154_li=Implement a SQLData interface to map your data over to a custom object
-roadmap_1155_li=In the MySQL and PostgreSQL mode, use lower case identifiers by default (DatabaseMetaData.storesLowerCaseIdentifiers \= true)
-roadmap_1156_li=Support multiple directories (on different hard drives) for the same database
-roadmap_1157_li=Server protocol\: use challenge response authentication, but client sends hash(user+password) encrypted with response
-roadmap_1158_li=Support EXEC[UTE] (doesn't return a result set, compatible to MS SQL Server)
-roadmap_1159_li=Support native XML data type - see http\://en.wikipedia.org/wiki/SQL/XML
-roadmap_1160_li=Support triggers with a string property or option\: SpringTrigger, OSGITrigger
-roadmap_1161_li=MySQL compatibility\: update test1 t1, test2 t2 set t1.id \= t2.id where t1.id \= t2.id;
-roadmap_1162_li=Ability to resize the cache array when resizing the cache
-roadmap_1163_li=Time based cache writing (one second after writing the log)
-roadmap_1164_li=Check state of H2 driver for DDLUtils\: http\://issues.apache.org/jira/browse/DDLUTILS-185
-roadmap_1165_li=Index usage for REGEXP LIKE.
-roadmap_1166_li=Compatibility\: add a role DBA (like ADMIN).
-roadmap_1167_li=Better support multiple processors for in-memory databases.
-roadmap_1168_li=Support N'text'
-roadmap_1169_li=Support compatibility for jdbc\:hsqldb\:res\:
-roadmap_1170_li=HSQLDB compatibility\: automatically convert to the next 'higher' data type. Example\: cast(2000000000 as int) + cast(2000000000 as int); (HSQLDB\: long; PostgreSQL\: integer out of range)
-roadmap_1171_li=Provide an Java SQL builder with standard and H2 syntax
-roadmap_1172_li=Trace\: write OS, file system, JVM,... when opening the database
-roadmap_1173_li=Support indexes for views (probably requires materialized views)
-roadmap_1174_li=Document SET SEARCH_PATH, BEGIN, EXECUTE, parameters
-roadmap_1175_li=Server\: use one listener (detect if the request comes from an PG or TCP client)
-roadmap_1176_li=Optimize SELECT MIN(ID), MAX(ID), COUNT(*) FROM TEST WHERE ID BETWEEN 100 AND 200
-roadmap_1177_li=Sequence\: PostgreSQL compatibility (rename, create) http\://www.postgresql.org/docs/8.2/static/sql-altersequence.html
-roadmap_1178_li=DISTINCT\: support large result sets by sorting on all columns (additionally) and then removing duplicates.
-roadmap_1179_li=Support a special trigger on all tables to allow building a transaction log reader.
-roadmap_1180_li=File system with a background writer thread; test if this is faster
-roadmap_1181_li=Better document the source code (high level documentation).
-roadmap_1182_li=Support select * from dual a left join dual b on b.x\=(select max(x) from dual)
-roadmap_1183_li=Optimization\: don't lock when the database is read-only
-roadmap_1184_li=Issue 146\: Support merge join.
-roadmap_1185_li=Integrate spatial functions from http\://geosysin.iict.ch/irstv-trac/wiki/H2spatial/Download
-roadmap_1186_li=Cluster\: hot deploy (adding a node at runtime).
-roadmap_1187_li=Support DatabaseMetaData.insertsAreDetected\: updatable result sets should detect inserts.
-roadmap_1188_li=Oracle\: support DECODE method (convert to CASE WHEN).
-roadmap_1189_li=Native search\: support "phrase search", wildcard search (* and ?), case-insensitive search, boolean operators, and grouping
-roadmap_1190_li=Improve documentation of access rights.
-roadmap_1191_li=Support opening a database that is in the classpath, maybe using a new file system. Workaround\: detect jar file using getClass().getProtectionDomain().getCodeSource().getLocation().
-roadmap_1192_li=Support ENUM data type (see MySQL, PostgreSQL, MS SQL Server, maybe others).
-roadmap_1193_li=Remember the user defined data type (domain) of a column.
-roadmap_1194_li=MVCC\: support multi-threaded kernel with multi-version concurrency.
-roadmap_1195_li=Auto-server\: add option to define the port range or list.
-roadmap_1196_li=Support Jackcess (MS Access databases)
-roadmap_1197_li=Built-in methods to write large objects (BLOB and CLOB)\: FILE_WRITE('test.txt', 'Hello World')
-roadmap_1198_li=Improve time to open large databases (see mail 'init time for distributed setup')
-roadmap_1199_li=Move Maven 2 repository from hsql.sf.net to h2database.sf.net
-roadmap_1200_li=Java 1.5 tool\: JdbcUtils.closeSilently(s1, s2,...)
-roadmap_1201_li=Optimize A\=? OR B\=? to UNION if the cost is lower.
-roadmap_1202_li=Javadoc\: document design patterns used
-roadmap_1203_li=Support custom collators, for example for natural sort (for text that contains numbers).
-roadmap_1204_li=Write an article about SQLInjection (h2/src/docsrc/html/images/SQLInjection.txt)
-roadmap_1205_li=Convert SQL-injection-2.txt to html document, include SQLInjection.java sample
-roadmap_1206_li=Support OUT parameters in user-defined procedures.
-roadmap_1207_li=Web site design\: http\://www.igniterealtime.org/projects/openfire/index.jsp
-roadmap_1208_li=HSQLDB compatibility\: Openfire server uses\: CREATE SCHEMA PUBLIC AUTHORIZATION DBA; CREATE USER SA PASSWORD ""; GRANT DBA TO SA; SET SCHEMA PUBLIC
-roadmap_1209_li=Translation\: use ?? in help.csv
-roadmap_1210_li=Translated .pdf
-roadmap_1211_li=Recovery tool\: bad blocks should be converted to INSERT INTO SYSTEM_ERRORS(...), and things should go into the .trace.db file
-roadmap_1212_li=Issue 357\: support getGeneratedKeys to return multiple rows when used with batch updates. This is supported by MySQL, but not Derby. Both PostgreSQL and HSQLDB don't support getGeneratedKeys. Also support it when using INSERT ... SELECT.
-roadmap_1213_li=RECOVER\=2 to backup the database, run recovery, open the database
-roadmap_1214_li=Recovery should work with encrypted databases
-roadmap_1215_li=Corruption\: new error code, add help
-roadmap_1216_li=Space reuse\: after init, scan all storages and free those that don't belong to a live database object
-roadmap_1217_li=Access rights\: add missing features (users should be 'owner' of objects; missing rights for sequences; dropping objects)
-roadmap_1218_li=Support NOCACHE table option (Oracle).
-roadmap_1219_li=Support table partitioning.
-roadmap_1220_li=Add regular javadocs (using the default doclet, but another css) to the homepage.
-roadmap_1221_li=The database should be kept open for a longer time when using the server mode.
-roadmap_1222_li=Javadocs\: for each tool, add a copy & paste sample in the class level.
-roadmap_1223_li=Javadocs\: add @author tags.
-roadmap_1224_li=Fluent API for tools\: Server.createTcpServer().setPort(9081).setPassword(password).start();
-roadmap_1225_li=MySQL compatibility\: real SQL statement for DESCRIBE TEST
-roadmap_1226_li=Use a default delay of 1 second before closing a database.
-roadmap_1227_li=Write (log) to system table before adding to internal data structures.
-roadmap_1228_li=Support direct lookup for MIN and MAX when using WHERE (see todo.txt / Direct Lookup).
-roadmap_1229_li=Support other array types (String[], double[]) in PreparedStatement.setObject(int, Object) (with test case).
-roadmap_1230_li=MVCC should not be memory bound (uncommitted data is kept in memory in the delta index; maybe using a regular b-tree index solves the problem).
-roadmap_1231_li=Oracle compatibility\: support NLS_DATE_FORMAT.
-roadmap_1232_li=Support for Thread.interrupt to cancel running statements.
-roadmap_1233_li=Cluster\: add feature to make sure cluster nodes can not get out of sync (for example by stopping one process).
-roadmap_1234_li=H2 Console\: support CLOB/BLOB download using a link.
-roadmap_1235_li=Support flashback queries as in Oracle.
-roadmap_1236_li=Import / Export of fixed with text files.
-roadmap_1237_li=HSQLDB compatibility\: automatic data type for SUM if value is the value is too big (by default use the same type as the data).
-roadmap_1238_li=Improve the optimizer to select the right index for special cases\: where id between 2 and 4 and booleanColumn
-roadmap_1239_li=Linked tables\: make hidden columns available (Oracle\: rowid and ora_rowscn columns).
-roadmap_1240_li=H2 Console\: in-place autocomplete.
-roadmap_1241_li=Support large databases\: split database files to multiple directories / disks (similar to tablespaces).
-roadmap_1242_li=H2 Console\: support configuration option for fixed width (monospace) font.
-roadmap_1243_li=Native fulltext search\: support analyzers (specially for Chinese, Japanese).
-roadmap_1244_li=Automatically compact databases from time to time (as a background process).
-roadmap_1245_li=Test Eclipse DTP.
-roadmap_1246_li=H2 Console\: autocomplete\: keep the previous setting
-roadmap_1247_li=executeBatch\: option to stop at the first failed statement.
-roadmap_1248_li=Implement OLAP features as described here\: http\://www.devx.com/getHelpOn/10MinuteSolution/16573/0/page/5
-roadmap_1249_li=Support Oracle ROWID (unique identifier for each row).
-roadmap_1250_li=MySQL compatibility\: alter table add index i(c), add constraint c foreign key(c) references t(c);
-roadmap_1251_li=Server mode\: improve performance for batch updates.
-roadmap_1252_li=Applets\: support read-only databases in a zip file (accessed as a resource).
-roadmap_1253_li=Long running queries / errors / trace system table.
-roadmap_1254_li=H2 Console should support JaQu directly.
-roadmap_1255_li=Better document FTL_SEARCH, FTL_SEARCH_DATA.
-roadmap_1256_li=Sequences\: CURRVAL should be session specific. Compatibility with PostgreSQL.
-roadmap_1257_li=Index creation using deterministic functions.
-roadmap_1258_li=ANALYZE\: for unique indexes that allow null, count the number of null.
-roadmap_1259_li=MySQL compatibility\: multi-table delete\: DELETE .. FROM .. [,...] USING - See http\://dev.mysql.com/doc/refman/5.0/en/delete.html
-roadmap_1260_li=AUTO_SERVER\: support changing IP addresses (disable a network while the database is open).
-roadmap_1261_li=Avoid using java.util.Calendar internally because it's slow, complicated, and buggy.
-roadmap_1262_li=Support TRUNCATE .. CASCADE like PostgreSQL.
-roadmap_1263_li=Fulltext search\: lazy result generation using SimpleRowSource.
-roadmap_1264_li=Fulltext search\: support alternative syntax\: WHERE FTL_CONTAINS(name, 'hello').
-roadmap_1265_li=MySQL compatibility\: support REPLACE, see http\://dev.mysql.com/doc/refman/6.0/en/replace.html and issue 73.
-roadmap_1266_li=MySQL compatibility\: support INSERT INTO table SET column1 \= value1, column2 \= value2
-roadmap_1267_li=Docs\: add a one line description for each functions and SQL statements at the top (in the link section).
-roadmap_1268_li=Javadoc search\: weight for titles should be higher ('random' should list Functions as the best match).
-roadmap_1269_li=Replace information_schema tables with regular tables that are automatically re-built when needed. Use indexes.
-roadmap_1270_li=Issue 50\: Oracle compatibility\: support calling 0-parameters functions without parenthesis. Make constants obsolete.
-roadmap_1271_li=MySQL, HSQLDB compatibility\: support where 'a'\=1 (not supported by Derby, PostgreSQL)
-roadmap_1272_li=Support a data type "timestamp with timezone" using java.util.Calendar.
-roadmap_1273_li=Finer granularity for SLF4J trace - See http\://code.google.com/p/h2database/issues/detail?id\=62
-roadmap_1274_li=Add database creation date and time to the database.
-roadmap_1275_li=Support ASSERTION.
-roadmap_1276_li=MySQL compatibility\: support comparing 1\='a'
-roadmap_1277_li=Support PostgreSQL lock modes\: http\://www.postgresql.org/docs/8.3/static/explicit-locking.html
-roadmap_1278_li=PostgreSQL compatibility\: test DbVisualizer and Squirrel SQL using a new PostgreSQL JDBC driver.
-roadmap_1279_li=RunScript should be able to read from system in (or quite mode for Shell).
-roadmap_1280_li=Natural join\: support select x from dual natural join dual.
-roadmap_1281_li=Support using system properties in database URLs (may be a security problem).
-roadmap_1282_li=Natural join\: somehow support this\: select a.x, b.x, x from dual a natural join dual b
-roadmap_1283_li=Use the Java service provider mechanism to register file systems and function libraries.
-roadmap_1284_li=MySQL compatibility\: for auto_increment columns, convert 0 to next value (as when inserting NULL).
-roadmap_1285_li=Optimization for multi-column IN\: use an index if possible. Example\: (A, B) IN((1, 2), (2, 3)).
-roadmap_1286_li=Optimization for EXISTS\: convert to inner join or IN(..) if possible.
-roadmap_1287_li=Functions\: support hashcode(value); cryptographic and fast
-roadmap_1288_li=Serialized file lock\: support long running queries.
-roadmap_1289_li=Network\: use 127.0.0.1 if other addresses don't work.
-roadmap_1290_li=Pluggable network protocol (currently Socket/ServerSocket over TCP/IP) - see also TransportServer with master slave replication.
-roadmap_1291_li=Support reading JCR data\: one table per node type; query table; cache option
-roadmap_1292_li=OSGi\: create a sample application, test, document.
-roadmap_1293_li=help.csv\: use complete examples for functions; run as test case.
-roadmap_1294_li=Functions to calculate the memory and disk space usage of a table, a row, or a value.
-roadmap_1295_li=Re-implement PooledConnection; use a lightweight connection object.
-roadmap_1296_li=Doclet\: convert tests in javadocs to a java class.
-roadmap_1297_li=Doclet\: format fields like methods, but support sorting by name and value.
-roadmap_1298_li=Doclet\: shrink the html files.
-roadmap_1299_li=MySQL compatibility\: support SET NAMES 'latin1' - See also http\://code.google.com/p/h2database/issues/detail?id\=56
-roadmap_1300_li=Allow to scan index backwards starting with a value (to better support ORDER BY DESC).
-roadmap_1301_li=Java Service Wrapper\: try http\://yajsw.sourceforge.net/
-roadmap_1302_li=Batch parameter for INSERT, UPDATE, and DELETE, and commit after each batch. See also MySQL DELETE.
-roadmap_1303_li=MySQL compatibility\: support ALTER TABLE .. MODIFY COLUMN.
-roadmap_1304_li=Use a lazy and auto-close input stream (open resource when reading, close on eof).
-roadmap_1305_li=Connection pool\: 'reset session' command (delete temp tables, rollback, auto-commit true).
-roadmap_1306_li=Improve SQL documentation, see http\://www.w3schools.com/sql/
-roadmap_1307_li=MySQL compatibility\: DatabaseMetaData.stores*() methods should return the same values. Test with SquirrelSQL.
-roadmap_1308_li=MS SQL Server compatibility\: support DATEPART syntax.
-roadmap_1309_li=Sybase/DB2/Oracle compatibility\: support out parameters in stored procedures - See http\://code.google.com/p/h2database/issues/detail?id\=83
-roadmap_1310_li=Support INTERVAL data type (see Oracle and others).
-roadmap_1311_li=Combine Server and Console tool (only keep Server).
-roadmap_1312_li=Store the Lucene index in the database itself.
-roadmap_1313_li=Support standard MERGE statement\: http\://en.wikipedia.org/wiki/Merge_%28SQL%29
-roadmap_1314_li=Oracle compatibility\: support DECODE(x, ...).
-roadmap_1315_li=MVCC\: compare concurrent update behavior with PostgreSQL and Oracle.
-roadmap_1316_li=HSQLDB compatibility\: CREATE FUNCTION (maybe using a Function interface).
-roadmap_1317_li=HSQLDB compatibility\: support CALL "java.lang.Math.sqrt"(2.0)
-roadmap_1318_li=Support comma as the decimal separator in the CSV tool.
-roadmap_1319_li=Compatibility\: Java functions with SQLJ Part1 http\://www.acm.org/sigmod/record/issues/9912/standards.pdf.gz
-roadmap_1320_li=Compatibility\: Java functions with SQL/PSM (Persistent Stored Modules) - need to find the documentation.
-roadmap_1321_li=CACHE_SIZE\: automatically use a fraction of Runtime.maxMemory - maybe automatically the second level cache.
-roadmap_1322_li=Support date/time/timestamp as documented in http\://en.wikipedia.org/wiki/ISO_8601
-roadmap_1323_li=PostgreSQL compatibility\: when in PG mode, treat BYTEA data like PG.
-roadmap_1324_li=Support \=ANY(array) as in PostgreSQL. See also http\://www.postgresql.org/docs/8.0/interactive/arrays.html
-roadmap_1325_li=IBM DB2 compatibility\: support PREVIOUS VALUE FOR sequence.
-roadmap_1326_li=Compatibility\: use different LIKE ESCAPE characters depending on the mode (disable for Derby, HSQLDB, DB2, Oracle, MSSQLServer).
-roadmap_1327_li=Oracle compatibility\: support CREATE SYNONYM table FOR schema.table.
-roadmap_1328_li=FTP\: document the server, including -ftpTask option to execute / kill remote processes
-roadmap_1329_li=FTP\: problems with multithreading?
-roadmap_1330_li=FTP\: implement SFTP / FTPS
-roadmap_1331_li=FTP\: access to a database (.csv for a table, a directory for a schema, a file for a lob, a script.sql file).
-roadmap_1332_li=More secure default configuration if remote access is enabled.
-roadmap_1333_li=Improve database file locking (maybe use native file locking). The current approach seems to be problematic if the file system is on a remote share (see Google Group 'Lock file modification time is in the future').
-roadmap_1334_li=Document internal features such as BELONGS_TO_TABLE, NULL_TO_DEFAULT, SEQUENCE.
-roadmap_1335_li=Issue 107\: Prefer using the ORDER BY index if LIMIT is used.
-roadmap_1336_li=An index on (id, name) should be used for a query\: select * from t where s\=? order by i
-roadmap_1337_li=Support reading sequences using DatabaseMetaData.getTables(null, null, null, new String[]{"SEQUENCE"}). See PostgreSQL.
-roadmap_1338_li=Add option to enable TCP_NODELAY using Socket.setTcpNoDelay(true).
-roadmap_1339_li=Maybe disallow \= within database names (jdbc\:h2\:mem\:MODE\=DB2 means database name MODE\=DB2).
-roadmap_1340_li=Fast alter table add column.
-roadmap_1341_li=Improve concurrency for in-memory database operations.
-roadmap_1342_li=Issue 122\: Support for connection aliases for remote tcp connections.
-roadmap_1343_li=Fast scrambling (strong encryption doesn't help if the password is included in the application).
-roadmap_1344_li=H2 Console\: support -webPassword to require a password to access preferences or shutdown.
-roadmap_1345_li=Issue 126\: The index name should be "IDX_" plus the constraint name unless there is a conflict, in which case append a number.
-roadmap_1346_li=Issue 127\: Support activation/deactivation of triggers
-roadmap_1347_li=Issue 130\: Custom log event listeners
-roadmap_1348_li=Issue 131\: IBM DB2 compatibility\: sysibm.sysdummy1
-roadmap_1349_li=Issue 132\: Use Java enum trigger type.
-roadmap_1350_li=Issue 134\: IBM DB2 compatibility\: session global variables.
-roadmap_1351_li=Cluster\: support load balance with values for each server / auto detect.
-roadmap_1352_li=FTL_SET_OPTION(keyString, valueString) with key stopWords at first.
-roadmap_1353_li=Pluggable access control mechanism.
-roadmap_1354_li=Fulltext search (Lucene)\: support streaming CLOB data.
-roadmap_1355_li=Document/example how to create and read an encrypted script file.
-roadmap_1356_li=Check state of http\://issues.apache.org/jira/browse/OPENJPA-1367 (H2 does support cross joins).
-roadmap_1357_li=Fulltext search (Lucene)\: only prefix column names with _ if they already start with _. Instead of DATA / QUERY / modified use _DATA, _QUERY, _MODIFIED if possible.
-roadmap_1358_li=Support a way to create or read compressed encrypted script files using an API.
-roadmap_1359_li=Scripting language support (Javascript).
-roadmap_1360_li=The network client should better detect if the server is not an H2 server and fail early.
-roadmap_1361_li=H2 Console\: support CLOB/BLOB upload.
-roadmap_1362_li=Database file lock\: detect hibernate / standby / very slow threads (compare system time).
-roadmap_1363_li=Automatic detection of redundant indexes.
-roadmap_1364_li=Maybe reject join without "on" (except natural join).
-roadmap_1365_li=Implement GiST (Generalized Search Tree for Secondary Storage).
-roadmap_1366_li=Function to read a number of bytes/characters from an BLOB or CLOB.
-roadmap_1367_li=Issue 156\: Support SELECT ? UNION SELECT ?.
-roadmap_1368_li=Automatic mixed mode\: support a port range list (to avoid firewall problems).
-roadmap_1369_li=Support the pseudo column rowid, oid, _rowid_.
-roadmap_1370_li=H2 Console / large result sets\: stream early instead of keeping a whole result in-memory
-roadmap_1371_li=Support TRUNCATE for linked tables.
-roadmap_1372_li=UNION\: evaluate INTERSECT before UNION (like most other database except Oracle).
-roadmap_1373_li=Delay creating the information schema, and share metadata columns.
-roadmap_1374_li=TCP Server\: use a nonce (number used once) to protect unencrypted channels against replay attacks.
-roadmap_1375_li=Simplify running scripts and recovery\: CREATE FORCE USER (overwrites an existing user).
-roadmap_1376_li=Support CREATE DATABASE LINK (a custom JDBC driver is already supported).
-roadmap_1377_li=Support large GROUP BY operations. Issue 216.
-roadmap_1378_li=Issue 163\: Allow to create foreign keys on metadata types.
-roadmap_1379_li=Logback\: write a native DBAppender.
-roadmap_1380_li=Cache size\: don't use more cache than what is available.
-roadmap_1381_li=Allow to defragment at runtime (similar to SHUTDOWN DEFRAG) in a background thread.
-roadmap_1382_li=Tree index\: Instead of an AVL tree, use a general balanced trees or a scapegoat tree.
-roadmap_1383_li=User defined functions\: allow to store the bytecode (of just the class, or the jar file of the extension) in the database.
-roadmap_1384_li=Compatibility\: ResultSet.getObject() on a CLOB (TEXT) should return String for PostgreSQL and MySQL.
-roadmap_1385_li=Optimizer\: WHERE X\=? AND Y IN(?), it always uses the index on Y. Should be cost based.
-roadmap_1386_li=Common Table Expression (CTE) / recursive queries\: support parameters. Issue 314.
-roadmap_1387_li=Oracle compatibility\: support INSERT ALL.
-roadmap_1388_li=Issue 178\: Optimizer\: index usage when both ascending and descending indexes are available.
-roadmap_1389_li=Issue 179\: Related subqueries in HAVING clause.
-roadmap_1390_li=IBM DB2 compatibility\: NOT NULL WITH DEFAULT. Similar to MySQL Mode.convertInsertNullToZero.
-roadmap_1391_li=Creating primary key\: always create a constraint.
-roadmap_1392_li=Maybe use a different page layout\: keep the data at the head of the page, and ignore the tail (don't store / read it). This may increase write / read performance depending on the file system.
-roadmap_1393_li=Indexes of temporary tables are currently kept in-memory. Is this how it should be?
-roadmap_1394_li=The Shell tool should support the same built-in commands as the H2 Console.
-roadmap_1395_li=Maybe use PhantomReference instead of finalize.
-roadmap_1396_li=Database file name suffix\: should only have one dot by default. Example\: .h2db
-roadmap_1397_li=Issue 196\: Function based indexes
-roadmap_1398_li=ALTER TABLE ... ADD COLUMN IF NOT EXISTS columnName.
-roadmap_1399_li=Fix the disk space leak (killing the process at the exact right moment will increase the disk space usage; this space is not re-used). See TestDiskSpaceLeak.java
-roadmap_1400_li=ROWNUM\: Oracle compatibility when used within a subquery. Issue 198.
-roadmap_1401_li=Allow to access the database over HTTP (possibly using port 80) and a servlet in a REST way.
-roadmap_1402_li=ODBC\: encrypted databases are not supported because the ;CIPHER\= can not be set.
-roadmap_1403_li=Support CLOB and BLOB update, specially conn.createBlob().setBinaryStream(1);
-roadmap_1404_li=Optimizer\: index usage when both ascending and descending indexes are available. Issue 178.
-roadmap_1405_li=Issue 306\: Support schema specific domains.
-roadmap_1406_li=Triggers\: support user defined execution order. Oracle\: CREATE OR REPLACE TRIGGER TEST_2 BEFORE INSERT ON TEST FOR EACH ROW FOLLOWS TEST_1. SQL specifies that multiple triggers should be fired in time-of-creation order. PostgreSQL uses name order, which was judged to be more convenient. Derby\: triggers are fired in the order in which they were created.
-roadmap_1407_li=PostgreSQL compatibility\: combine "users" and "roles". See\: http\://www.postgresql.org/docs/8.1/interactive/user-manag.html
-roadmap_1408_li=Improve documentation of system properties\: only list the property names, default values, and description.
-roadmap_1409_li=Support running totals / cumulative sum using SUM(..) OVER(..).
-roadmap_1410_li=Improve object memory size calculation. Use constants for known VMs, or use reflection to call java.lang.instrument.Instrumentation.getObjectSize(Object objectToSize)
-roadmap_1411_li=Triggers\: NOT NULL checks should be done after running triggers (Oracle behavior, maybe others).
-roadmap_1412_li=Common Table Expression (CTE) / recursive queries\: support INSERT INTO ... SELECT ... Issue 219.
-roadmap_1413_li=Common Table Expression (CTE) / recursive queries\: support non-recursive queries. Issue 217.
-roadmap_1414_li=Common Table Expression (CTE) / recursive queries\: avoid endless loop. Issue 218.
-roadmap_1415_li=Common Table Expression (CTE) / recursive queries\: support multiple named queries. Issue 220.
-roadmap_1416_li=Common Table Expression (CTE) / recursive queries\: identifier scope may be incorrect. Issue 222.
-roadmap_1417_li=Log long running transactions (similar to long running statements).
-roadmap_1418_li=Parameter data type is data type of other operand. Issue 205.
-roadmap_1419_li=Some combinations of nested join with right outer join are not supported.
-roadmap_1420_li=DatabaseEventListener.openConnection(id) and closeConnection(id).
-roadmap_1421_li=Listener or authentication module for new connections, or a way to restrict the number of different connections to a tcp server, or to prevent to login with the same username and password from different IPs. Possibly using the DatabaseEventListener API, or a new API.
-roadmap_1422_li=Compatibility for data type CHAR (Derby, HSQLDB). Issue 212.
-roadmap_1423_li=Compatibility with MySQL TIMESTAMPDIFF. Issue 209.
-roadmap_1424_li=Optimizer\: use a histogram of the data, specially for non-normal distributions.
-roadmap_1425_li=Trigger\: allow declaring as source code (like functions).
-roadmap_1426_li=User defined aggregate\: allow declaring as source code (like functions).
-roadmap_1427_li=The error "table not found" is sometimes caused by using the wrong database. Add "(this database is empty)" to the exception message if applicable.
-roadmap_1428_li=MySQL + PostgreSQL compatibility\: support string literal escape with \\n.
-roadmap_1429_li=PostgreSQL compatibility\: support string literal escape with double \\\\.
-roadmap_1430_li=Document the TCP server "management_db". Maybe include the IP address of the client.
-roadmap_1431_li=Use javax.tools.JavaCompilerTool instead of com.sun.tools.javac.Main
-roadmap_1432_li=If a database object was not found in the current schema, but one with the same name existed in another schema, included that in the error message.
-roadmap_1433_li=Optimization to use an index for OR when using multiple keys\: where (key1 \= ? and key2 \= ?) OR (key1 \= ? and key2 \= ?)
-roadmap_1434_li=Issue 302\: Support optimizing queries with both inner and outer joins, as in\: select * from test a inner join test b on a.id\=b.id inner join o on o.id\=a.id where b.x\=1 (the optimizer should swap a and b here). See also TestNestedJoins, tag "swapInnerJoinTables".
-roadmap_1435_li=JaQu should support a DataSource and a way to create a Db object using a Connection (for multi-threaded usage with a connection pool).
-roadmap_1436_li=Move table to a different schema (rename table to a different schema), possibly using ALTER TABLE ... SET SCHEMA ...;
-roadmap_1437_li=nioMapped file system\: automatically fall back to regular (non mapped) IO if there is a problem (out of memory exception for example).
-roadmap_1438_li=Column as parameter of function table. Issue 228.
-roadmap_1439_li=Connection pool\: detect ;AUTOCOMMIT\=FALSE in the database URL, and if set, disable autocommit for all connections.
-roadmap_1440_li=Compatibility with MS Access\: support "&" to concatenate text.
-roadmap_1441_li=The BACKUP statement should not synchronize on the database, and therefore should not block other users.
-roadmap_1442_li=Document the database file format.
-roadmap_1443_li=Support reading LOBs.
-roadmap_1444_li=Require appending DANGEROUS\=TRUE when using certain dangerous settings such as LOG\=0, LOG\=1, LOCK_MODE\=0, disabling FILE_LOCK,...
-roadmap_1445_li=Support UDT (user defined types) similar to how Apache Derby supports it\: check constraint, allow to use it in Java functions as parameters (return values already seem to work).
-roadmap_1446_li=Encrypted file system (use cipher text stealing so file length doesn't need to decrypt; 4 KB header per file, optional compatibility with current encrypted database files).
-roadmap_1447_li=Issue 229\: SELECT with simple OR tests uses tableScan when it could use indexes.
-roadmap_1448_li=GROUP BY queries should use a temporary table if there are too many rows.
-roadmap_1449_li=BLOB\: support random access when reading.
-roadmap_1450_li=CLOB\: support random access when reading (this is harder than for BLOB as data is stored in UTF-8 form).
-roadmap_1451_li=Compatibility\: support SELECT INTO (as an alias for CREATE TABLE ... AS SELECT ...).
-roadmap_1452_li=Compatibility with MySQL\: support SELECT INTO OUTFILE (cannot be an existing file) as an alias for CSVWRITE(...).
-roadmap_1453_li=Compatibility with MySQL\: support non-strict mode (sql_mode \= "") any data that is too large for the column will just be truncated or set to the default value.
-roadmap_1454_li=The full condition should be sent to the linked table, not just the indexed condition. Example\: TestLinkedTableFullCondition
-roadmap_1455_li=Compatibility with IBM DB2\: CREATE PROCEDURE.
-roadmap_1456_li=Compatibility with IBM DB2\: SQL cursors.
-roadmap_1457_li=Single-column primary key values are always stored explicitly. This is not required.
-roadmap_1458_li=Compatibility with MySQL\: support CREATE TABLE TEST(NAME VARCHAR(255) CHARACTER SET UTF8).
-roadmap_1459_li=CALL is incompatible with other databases because it returns a result set, so that CallableStatement.execute() returns true.
-roadmap_1460_li=Optimization for large lists for column IN(1, 2, 3, 4,...) - currently an list is used, could potentially use a hash set (maybe only for a part of the values - the ones that can be evaluated).
-roadmap_1461_li=Compatibility for ARRAY data type (Oracle\: VARRAY(n) of VARCHAR(m); HSQLDB\: VARCHAR(n) ARRAY; Postgres\: VARCHAR(n)[]).
-roadmap_1462_li=PostgreSQL compatible array literal syntax\: ARRAY[['a', 'b'], ['c', 'd']]
-roadmap_1463_li=PostgreSQL compatibility\: UPDATE with FROM.
-roadmap_1464_li=Issue 297\: Oracle compatibility for "at time zone".
-roadmap_1465_li=IBM DB2 compatibility\: IDENTITY_VAL_LOCAL().
-roadmap_1466_li=Support SQL/XML.
-roadmap_1467_li=Support concurrent opening of databases.
-roadmap_1468_li=Improved error message and diagnostics in case of network configuration problems.
-roadmap_1469_li=TRUNCATE should reset the identity columns as in MySQL and MS SQL Server (and possibly other databases).
-roadmap_1470_li=Adding a primary key should make the columns 'not null' unless if there is a row with null (compatibility with MySQL, PostgreSQL, HSQLDB; not Derby).
-roadmap_1471_li=ARRAY data type\: support Integer[] and so on in Java functions (currently only Object[] is supported).
-roadmap_1472_li=MySQL compatibility\: LOCK TABLES a READ, b READ - see also http\://dev.mysql.com/doc/refman/5.0/en/lock-tables.html
-roadmap_1473_li=The HTML to PDF converter should use http\://code.google.com/p/wkhtmltopdf/
-roadmap_1474_li=Issue 303\: automatically convert "X NOT IN(SELECT...)" to "NOT EXISTS(...)".
-roadmap_1475_li=MySQL compatibility\: update test1 t1, test2 t2 set t1.name\=t2.name where t1.id\=t2.id.
-roadmap_1476_li=Issue 283\: Improve performance of H2 on Android.
-roadmap_1477_li=Support INSERT INTO / UPDATE / MERGE ... RETURNING to retrieve the generated key(s).
-roadmap_1478_li=Column compression option - see http\://groups.google.com/group/h2-database/browse_thread/thread/3e223504e52671fa/243da82244343f5d
-roadmap_1479_li=PostgreSQL compatibility\: ALTER TABLE ADD combined with adding a foreign key constraint, as in ALTER TABLE FOO ADD COLUMN PARENT BIGINT REFERENCES FOO(ID).
-roadmap_1480_li=MS SQL Server compatibility\: support @@ROWCOUNT.
-roadmap_1481_li=PostgreSQL compatibility\: LOG(x) is LOG10(x) and not LN(x).
-roadmap_1482_li=Issue 311\: Serialized lock mode\: executeQuery of write operations fails.
-roadmap_1483_li=PostgreSQL compatibility\: support PgAdmin III (specially the function current_setting).
-roadmap_1484_li=MySQL compatibility\: support TIMESTAMPADD.
-roadmap_1485_li=Support SELECT ... FOR UPDATE with joins (supported by PostgreSQL, MySQL, and HSQLDB; but not Derby).
-roadmap_1486_li=Support SELECT ... FOR UPDATE OF [field-list] (supported by PostgreSQL, MySQL, and HSQLDB; but not Derby).
-roadmap_1487_li=Support SELECT ... FOR UPDATE OF [table-list] (supported by PostgreSQL, HSQLDB, Sybase).
-roadmap_1488_li=TRANSACTION_ID() for in-memory databases.
-roadmap_1489_li=TRANSACTION_ID() should be long (same as HSQLDB and PostgreSQL).
-roadmap_1490_li=Support [INNER | OUTER] JOIN USING(column [,...]).
-roadmap_1491_li=Support NATURAL [ { LEFT | RIGHT } [ OUTER ] | INNER ] JOIN (Derby, Oracle)
-roadmap_1492_li=GROUP BY columnNumber (similar to ORDER BY columnNumber) (MySQL, PostgreSQL, SQLite; not by HSQLDB and Derby).
-roadmap_1493_li=Sybase / MS SQL Server compatibility\: CONVERT(..) parameters are swapped.
-roadmap_1494_li=Index conditions\: WHERE AGE>1 should not scan through all rows with AGE\=1.
-roadmap_1495_li=PHP support\: H2 should support PDO, or test with PostgreSQL PDO.
-roadmap_1496_li=Outer joins\: if no column of the outer join table is referenced, the outer join table could be removed from the query.
-roadmap_1497_li=Cluster\: allow using auto-increment and identity columns by ensuring executed in lock-step.
-roadmap_1498_li=MySQL compatibility\: index names only need to be unique for the given table.
-roadmap_1499_li=Issue 352\: constraints\: distinguish between 'no action' and 'restrict'. Currently, only restrict is supported, and 'no action' is internally mapped to 'restrict'. The database meta data returns 'restrict' in all cases.
-roadmap_1500_li=Oracle compatibility\: support MEDIAN aggregate function.
-roadmap_1501_li=Issue 348\: Oracle compatibility\: division should return a decimal result.
-roadmap_1502_li=Read rows on demand\: instead of reading the whole row, only read up to that column that is requested. Keep an pointer to the data area and the column id that is already read.
-roadmap_1503_li=Long running transactions\: log session id when detected.
-roadmap_1504_li=Optimization\: "select id from test" should use the index on id even without "order by".
-roadmap_1505_li=Issue 362\: LIMIT support for UPDATE statements (MySQL compatibility).
-roadmap_1506_li=Sybase SQL Anywhere compatibility\: SELECT TOP ... START AT ...
-roadmap_1507_li=Use Java 6 SQLException subclasses.
-roadmap_1508_li=Issue 390\: RUNSCRIPT FROM '...' CONTINUE_ON_ERROR
-roadmap_1509_li=Use Java 6 exceptions\: SQLDataException, SQLSyntaxErrorException, SQLTimeoutException,..
-roadmap_1510_li=Support index-only when doing selects (i.e. without needing to load the actual table data)
-roadmap_1511_h2=Not Planned
-roadmap_1512_li=HSQLDB (did) support this\: select id i from test where i<0 (other databases don't). Supporting it may break compatibility.
-roadmap_1513_li=String.intern (so that Strings can be compared with \=\=) will not be used because some VMs have problems when used extensively.
-roadmap_1514_li=In prepared statements, identifier names (table names and so on) can not be parameterized. Adding such a feature would complicate the source code without providing reasonable speedup, and would slow down regular prepared statements.
-sourceError_1000_h1=Error Analyzer
-sourceError_1001_a=Home
-sourceError_1002_a=Input
-sourceError_1003_h2= Details Source Code
-sourceError_1004_p=Paste the error message and stack trace below and click on 'Details' or 'Source Code'\:
-sourceError_1005_b=Error Code\:
-sourceError_1006_b=Product Version\:
-sourceError_1007_b=Message\:
-sourceError_1008_b=More Information\:
-sourceError_1009_b=Stack Trace\:
-sourceError_1010_b=Source File\:
-sourceError_1011_p=\ Inline
-tutorial_1000_h1=Tutorial
-tutorial_1001_a=\ Starting and Using the H2 Console
-tutorial_1002_a=\ Special H2 Console Syntax
-tutorial_1003_a=\ Settings of the H2 Console
-tutorial_1004_a=\ Connecting to a Database using JDBC
-tutorial_1005_a=\ Creating New Databases
-tutorial_1006_a=\ Using the Server
-tutorial_1007_a=\ Using Hibernate
-tutorial_1008_a=\ Using TopLink and Glassfish
-tutorial_1009_a=\ Using EclipseLink
-tutorial_1010_a=\ Using Apache ActiveMQ
-tutorial_1011_a=\ Using H2 within NetBeans
-tutorial_1012_a=\ Using H2 with jOOQ
-tutorial_1013_a=\ Using Databases in Web Applications
-tutorial_1014_a=\ Android
-tutorial_1015_a=\ CSV (Comma Separated Values) Support
-tutorial_1016_a=\ Upgrade, Backup, and Restore
-tutorial_1017_a=\ Command Line Tools
-tutorial_1018_a=\ The Shell Tool
-tutorial_1019_a=\ Using OpenOffice Base
-tutorial_1020_a=\ Java Web Start / JNLP
-tutorial_1021_a=\ Using a Connection Pool
-tutorial_1022_a=\ Fulltext Search
-tutorial_1023_a=\ User-Defined Variables
-tutorial_1024_a=\ Date and Time
-tutorial_1025_a=\ Using Spring
-tutorial_1026_a=\ OSGi
-tutorial_1027_a=\ Java Management Extension (JMX)
-tutorial_1028_h2=Starting and Using the H2 Console
-tutorial_1029_p=\ The H2 Console application lets you access a database using a browser. This can be a H2 database, or another database that supports the JDBC API.
-tutorial_1030_p=\ This is a client/server application, so both a server and a client (a browser) are required to run it.
-tutorial_1031_p=\ Depending on your platform and environment, there are multiple ways to start the H2 Console\:
-tutorial_1032_th=OS
-tutorial_1033_th=Start
-tutorial_1034_td=Windows
-tutorial_1035_td=\ Click [Start], [All Programs], [H2], and [H2 Console (Command Line)]
-tutorial_1036_td=\ An icon will be added to the system tray\:
-tutorial_1037_td=\ If you don't get the window and the system tray icon, then maybe Java is not installed correctly (in this case, try another way to start the application). A browser window should open and point to the login page at http\://localhost\:8082
.
-tutorial_1038_td=Windows
-tutorial_1039_td=\ Open a file browser, navigate to h2/bin
, and double click on h2.bat
.
-tutorial_1040_td=\ A console window appears. If there is a problem, you will see an error message in this window. A browser window will open and point to the login page (URL\: http\://localhost\:8082
).
-tutorial_1041_td=Any
-tutorial_1042_td=\ Double click on the h2*.jar
file. This only works if the .jar
suffix is associated with Java.
-tutorial_1043_td=Any
-tutorial_1044_td=\ Open a console window, navigate to the directory h2/bin
, and type\:
-tutorial_1045_h3=Firewall
-tutorial_1046_p=\ If you start the server, you may get a security warning from the firewall (if you have installed one). If you don't want other computers in the network to access the application on your machine, you can let the firewall block those connections. The connection from the local machine will still work. Only if you want other computers to access the database on this computer, you need allow remote connections in the firewall.
-tutorial_1047_p=\ It has been reported that when using Kaspersky 7.0 with firewall, the H2 Console is very slow when connecting over the IP address. A workaround is to connect using 'localhost'.
-tutorial_1048_p=\ A small firewall is already built into the server\: other computers may not connect to the server by default. To change this, go to 'Preferences' and select 'Allow connections from other computers'.
-tutorial_1049_h3=Testing Java
-tutorial_1050_p=\ To find out which version of Java is installed, open a command prompt and type\:
-tutorial_1051_p=\ If you get an error message, you may need to add the Java binary directory to the path environment variable.
-tutorial_1052_h3=Error Message 'Port may be in use'
-tutorial_1053_p=\ You can only start one instance of the H2 Console, otherwise you will get the following error message\: "The Web server could not be started. Possible cause\: another server is already running...". It is possible to start multiple console applications on the same computer (using different ports), but this is usually not required as the console supports multiple concurrent connections.
-tutorial_1054_h3=Using another Port
-tutorial_1055_p=\ If the default port of the H2 Console is already in use by another application, then a different port needs to be configured. The settings are stored in a properties file. For details, see Settings of the H2 Console. The relevant entry is webPort
.
-tutorial_1056_p=\ If no port is specified for the TCP and PG servers, each service will try to listen on its default port. If the default port is already in use, a random port is used.
-tutorial_1057_h3=Connecting to the Server using a Browser
-tutorial_1058_p=\ If the server started successfully, you can connect to it using a web browser. Javascript needs to be enabled. If you started the server on the same computer as the browser, open the URL http\://localhost\:8082
. If you want to connect to the application from another computer, you need to provide the IP address of the server, for example\: http\://192.168.0.2\:8082
. If you enabled TLS on the server side, the URL needs to start with https\://
.
-tutorial_1059_h3=Multiple Concurrent Sessions
-tutorial_1060_p=\ Multiple concurrent browser sessions are supported. As that the database objects reside on the server, the amount of concurrent work is limited by the memory available to the server application.
-tutorial_1061_h3=Login
-tutorial_1062_p=\ At the login page, you need to provide connection information to connect to a database. Set the JDBC driver class of your database, the JDBC URL, user name, and password. If you are done, click [Connect].
-tutorial_1063_p=\ You can save and reuse previously saved settings. The settings are stored in a properties file (see Settings of the H2 Console).
-tutorial_1064_h3=Error Messages
-tutorial_1065_p=\ Error messages in are shown in red. You can show/hide the stack trace of the exception by clicking on the message.
-tutorial_1066_h3=Adding Database Drivers
-tutorial_1067_p=\ To register additional JDBC drivers (MySQL, PostgreSQL, HSQLDB,...), add the jar file names to the environment variables H2DRIVERS
or CLASSPATH
. Example (Windows)\: to add the HSQLDB JDBC driver C\:\\Programs\\hsqldb\\lib\\hsqldb.jar
, set the environment variable H2DRIVERS
to C\:\\Programs\\hsqldb\\lib\\hsqldb.jar
.
-tutorial_1068_p=\ Multiple drivers can be set; entries need to be separated by ;
(Windows) or \:
(other operating systems). Spaces in the path names are supported. The settings must not be quoted.
-tutorial_1069_h3=Using the H2 Console
-tutorial_1070_p=\ The H2 Console application has three main panels\: the toolbar on top, the tree on the left, and the query/result panel on the right. The database objects (for example, tables) are listed on the left. Type a SQL command in the query panel and click [Run]. The result appears just below the command.
-tutorial_1071_h3=Inserting Table Names or Column Names
-tutorial_1072_p=\ To insert table and column names into the script, click on the item in the tree. If you click on a table while the query is empty, then SELECT * FROM ...
is added. While typing a query, the table that was used is expanded in the tree. For example if you type SELECT * FROM TEST T WHERE T.
then the table TEST is expanded.
-tutorial_1073_h3=Disconnecting and Stopping the Application
-tutorial_1074_p=\ To log out of the database, click [Disconnect] in the toolbar panel. However, the server is still running and ready to accept new sessions.
-tutorial_1075_p=\ To stop the server, right click on the system tray icon and select [Exit]. If you don't have the system tray icon, navigate to [Preferences] and click [Shutdown], press [Ctrl]+[C] in the console where the server was started (Windows), or close the console window.
-tutorial_1076_h2=Special H2 Console Syntax
-tutorial_1077_p=\ The H2 Console supports a few built-in commands. Those are interpreted within the H2 Console, so they work with any database. Built-in commands need to be at the beginning of a statement (before any remarks), otherwise they are not parsed correctly. If in doubt, add ;
before the command.
-tutorial_1078_th=Command(s)
-tutorial_1079_th=Description
-tutorial_1080_td=\ @autocommit_true;
-tutorial_1081_td=\ @autocommit_false;
-tutorial_1082_td=\ Enable or disable autocommit.
-tutorial_1083_td=\ @cancel;
-tutorial_1084_td=\ Cancel the currently running statement.
-tutorial_1085_td=\ @columns null null TEST;
-tutorial_1086_td=\ @index_info null null TEST;
-tutorial_1087_td=\ @tables;
-tutorial_1088_td=\ @tables null null TEST;
-tutorial_1089_td=\ Call the corresponding DatabaseMetaData.get
method. Patterns are case sensitive (usually identifiers are uppercase). For information about the parameters, see the Javadoc documentation. Missing parameters at the end of the line are set to null. The complete list of metadata commands is\: @attributes, @best_row_identifier, @catalogs, @columns, @column_privileges, @cross_references, @exported_keys, @imported_keys, @index_info, @primary_keys, @procedures, @procedure_columns, @schemas, @super_tables, @super_types, @tables, @table_privileges, @table_types, @type_info, @udts, @version_columns
-tutorial_1090_td=\ @edit select * from test;
-tutorial_1091_td=\ Use an updatable result set.
-tutorial_1092_td=\ @generated insert into test() values();
-tutorial_1093_td=\ Show the result of Statement.getGeneratedKeys()
.
-tutorial_1094_td=\ @history;
-tutorial_1095_td=\ List the command history.
-tutorial_1096_td=\ @info;
-tutorial_1097_td=\ Display the result of various Connection
and DatabaseMetaData
methods.
-tutorial_1098_td=\ @list select * from test;
-tutorial_1099_td=\ Show the result set in list format (each column on its own line, with row numbers).
-tutorial_1100_td=\ @loop 1000 select ?, ?/*rnd*/;
-tutorial_1101_td=\ @loop 1000 @statement select ?;
-tutorial_1102_td=\ Run the statement this many times. Parameters (?
) are set using a loop from 0 up to x - 1. Random values are used for each ?/*rnd*/
. A Statement object is used instead of a PreparedStatement if @statement
is used. Result sets are read until ResultSet.next()
returns false
. Timing information is printed.
-tutorial_1103_td=\ @maxrows 20;
-tutorial_1104_td=\ Set the maximum number of rows to display.
-tutorial_1105_td=\ @memory;
-tutorial_1106_td=\ Show the used and free memory. This will call System.gc()
.
-tutorial_1107_td=\ @meta select 1;
-tutorial_1108_td=\ List the ResultSetMetaData
after running the query.
-tutorial_1109_td=\ @parameter_meta select ?;
-tutorial_1110_td=\ Show the result of the PreparedStatement.getParameterMetaData()
calls. The statement is not executed.
-tutorial_1111_td=\ @prof_start;
-tutorial_1112_td=\ call hash('SHA256', '', 1000000);
-tutorial_1113_td=\ @prof_stop;
-tutorial_1114_td=\ Start/stop the built-in profiling tool. The top 3 stack traces of the statement(s) between start and stop are listed (if there are 3).
-tutorial_1115_td=\ @prof_start;
-tutorial_1116_td=\ @sleep 10;
-tutorial_1117_td=\ @prof_stop;
-tutorial_1118_td=\ Sleep for a number of seconds. Used to profile a long running query or operation that is running in another session (but in the same process).
-tutorial_1119_td=\ @transaction_isolation;
-tutorial_1120_td=\ @transaction_isolation 2;
-tutorial_1121_td=\ Display (without parameters) or change (with parameters 1, 2, 4, 8) the transaction isolation level.
-tutorial_1122_h2=Settings of the H2 Console
-tutorial_1123_p=\ The settings of the H2 Console are stored in a configuration file called .h2.server.properties
in you user home directory. For Windows installations, the user home directory is usually C\:\\Documents and Settings\\[username]
or C\:\\Users\\[username]
. The configuration file contains the settings of the application and is automatically created when the H2 Console is first started. Supported settings are\:
-tutorial_1124_code=webAllowOthers
-tutorial_1125_li=\: allow other computers to connect.
-tutorial_1126_code=webPort
-tutorial_1127_li=\: the port of the H2 Console
-tutorial_1128_code=webSSL
-tutorial_1129_li=\: use encrypted TLS (HTTPS) connections.
-tutorial_1130_p=\ In addition to those settings, the properties of the last recently used connection are listed in the form <number>\=<name>|<driver>|<url>|<user>
using the escape character \\
. Example\: 1\=Generic H2 (Embedded)|org.h2.Driver|jdbc\\\:h2\\\:~/test|sa
-tutorial_1131_h2=Connecting to a Database using JDBC
-tutorial_1132_p=\ To connect to a database, a Java application first needs to load the database driver, and then get a connection. A simple way to do that is using the following code\:
-tutorial_1133_p=\ This code first loads the driver (Class.forName(...)
) and then opens a connection (using DriverManager.getConnection()
). The driver name is "org.h2.Driver"
. The database URL always needs to start with jdbc\:h2\:
to be recognized by this database. The second parameter in the getConnection()
call is the user name (sa
for System Administrator in this example). The third parameter is the password. In this database, user names are not case sensitive, but passwords are.
-tutorial_1134_h2=Creating New Databases
-tutorial_1135_p=\ By default, if the database specified in the URL does not yet exist, a new (empty) database is created automatically. The user that created the database automatically becomes the administrator of this database.
-tutorial_1136_p=\ Auto-creating new database can be disabled, see Opening a Database Only if it Already Exists.
-tutorial_1137_h2=Using the Server
-tutorial_1138_p=\ H2 currently supports three server\: a web server (for the H2 Console), a TCP server (for client/server connections) and an PG server (for PostgreSQL clients). Please note that only the web server supports browser connections. The servers can be started in different ways, one is using the Server
tool. Starting the server doesn't open a database - databases are opened as soon as a client connects.
-tutorial_1139_h3=Starting the Server Tool from Command Line
-tutorial_1140_p=\ To start the Server
tool from the command line with the default settings, run\:
-tutorial_1141_p=\ This will start the tool with the default options. To get the list of options and default values, run\:
-tutorial_1142_p=\ There are options available to use other ports, and start or not start parts.
-tutorial_1143_h3=Connecting to the TCP Server
-tutorial_1144_p=\ To remotely connect to a database using the TCP server, use the following driver and database URL\:
-tutorial_1145_li=JDBC driver class\: org.h2.Driver
-tutorial_1146_li=Database URL\: jdbc\:h2\:tcp\://localhost/~/test
-tutorial_1147_p=\ For details about the database URL, see also in Features. Please note that you can't connection with a web browser to this URL. You can only connect using a H2 client (over JDBC).
-tutorial_1148_h3=Starting the TCP Server within an Application
-tutorial_1149_p=\ Servers can also be started and stopped from within an application. Sample code\:
-tutorial_1150_h3=Stopping a TCP Server from Another Process
-tutorial_1151_p=\ The TCP server can be stopped from another process. To stop the server from the command line, run\:
-tutorial_1152_p=\ To stop the server from a user application, use the following code\:
-tutorial_1153_p=\ This function will only stop the TCP server. If other server were started in the same process, they will continue to run. To avoid recovery when the databases are opened the next time, all connections to the databases should be closed before calling this method. To stop a remote server, remote connections must be enabled on the server. Shutting down a TCP server can be protected using the option -tcpPassword
(the same password must be used to start and stop the TCP server).
-tutorial_1154_h2=Using Hibernate
-tutorial_1155_p=\ This database supports Hibernate version 3.1 and newer. You can use the HSQLDB Dialect, or the native H2 Dialect. Unfortunately the H2 Dialect included in some old versions of Hibernate was buggy. A patch for Hibernate has been submitted and is now applied. You can rename it to H2Dialect.java
and include this as a patch in your application, or upgrade to a version of Hibernate where this is fixed.
-tutorial_1156_p=\ When using Hibernate, try to use the H2Dialect
if possible. When using the H2Dialect
, compatibility modes such as MODE\=MySQL
are not supported. When using such a compatibility mode, use the Hibernate dialect for the corresponding database instead of the H2Dialect
; but please note H2 does not support all features of all databases.
-tutorial_1157_h2=Using TopLink and Glassfish
-tutorial_1158_p=\ To use H2 with Glassfish (or Sun AS), set the Datasource Classname to org.h2.jdbcx.JdbcDataSource
. You can set this in the GUI at Application Server - Resources - JDBC - Connection Pools, or by editing the file sun-resources.xml
\: at element jdbc-connection-pool
, set the attribute datasource-classname
to org.h2.jdbcx.JdbcDataSource
.
-tutorial_1159_p=\ The H2 database is compatible with HSQLDB and PostgreSQL. To take advantage of H2 specific features, use the H2Platform
. The source code of this platform is included in H2 at src/tools/oracle/toplink/essentials/platform/database/DatabasePlatform.java.txt
. You will need to copy this file to your application, and rename it to .java. To enable it, change the following setting in persistence.xml\:
-tutorial_1160_p=\ In old versions of Glassfish, the property name is toplink.platform.class.name
.
-tutorial_1161_p=\ To use H2 within Glassfish, copy the h2*.jar to the directory glassfish/glassfish/lib
.
-tutorial_1162_h2=Using EclipseLink
-tutorial_1163_p=\ To use H2 in EclipseLink, use the platform class org.eclipse.persistence.platform.database.H2Platform
. If this platform is not available in your version of EclipseLink, you can use the OraclePlatform instead in many case. See also H2Platform.
-tutorial_1164_h2=Using Apache ActiveMQ
-tutorial_1165_p=\ When using H2 as the backend database for Apache ActiveMQ, please use the TransactDatabaseLocker
instead of the default locking mechanism. Otherwise the database file will grow without bounds. The problem is that the default locking mechanism uses an uncommitted UPDATE
transaction, which keeps the transaction log from shrinking (causes the database file to grow). Instead of using an UPDATE
statement, the TransactDatabaseLocker
uses SELECT ... FOR UPDATE
which is not problematic. To use it, change the ApacheMQ configuration element <jdbcPersistenceAdapter>
element, property databaseLocker\="org.apache.activemq.store.jdbc.adapter.TransactDatabaseLocker"
. However, using the MVCC mode will again result in the same problem. Therefore, please do not use the MVCC mode in this case. Another (more dangerous) solution is to set useDatabaseLock
to false.
-tutorial_1166_h2=Using H2 within NetBeans
-tutorial_1167_p=\ The project H2 Database Engine Support For NetBeans allows you to start and stop the H2 server from within the IDE.
-tutorial_1168_p=\ There is a known issue when using the Netbeans SQL Execution Window\: before executing a query, another query in the form SELECT COUNT(*) FROM <query>
is run. This is a problem for queries that modify state, such as SELECT SEQ.NEXTVAL
. In this case, two sequence values are allocated instead of just one.
-tutorial_1169_h2=Using H2 with jOOQ
-tutorial_1170_p=\ jOOQ adds a thin layer on top of JDBC, allowing for type-safe SQL construction, including advanced SQL, stored procedures and advanced data types. jOOQ takes your database schema as a base for code generation. If this is your example schema\:
-tutorial_1171_p=\ then run the jOOQ code generator on the command line using this command\:
-tutorial_1172_p=\ ...where codegen.xml
is on the classpath and contains this information
-tutorial_1173_p=\ Using the generated source, you can query the database as follows\:
-tutorial_1174_p=\ See more details on jOOQ Homepage and in the jOOQ Tutorial
-tutorial_1175_h2=Using Databases in Web Applications
-tutorial_1176_p=\ There are multiple ways to access a database from within web applications. Here are some examples if you use Tomcat or JBoss.
-tutorial_1177_h3=Embedded Mode
-tutorial_1178_p=\ The (currently) simplest solution is to use the database in the embedded mode, that means open a connection in your application when it starts (a good solution is using a Servlet Listener, see below), or when a session starts. A database can be accessed from multiple sessions and applications at the same time, as long as they run in the same process. Most Servlet Containers (for example Tomcat) are just using one process, so this is not a problem (unless you run Tomcat in clustered mode). Tomcat uses multiple threads and multiple classloaders. If multiple applications access the same database at the same time, you need to put the database jar in the shared/lib
or server/lib
directory. It is a good idea to open the database when the web application starts, and close it when the web application stops. If using multiple applications, only one (any) of them needs to do that. In the application, an idea is to use one connection per Session, or even one connection per request (action). Those connections should be closed after use if possible (but it's not that bad if they don't get closed).
-tutorial_1179_h3=Server Mode
-tutorial_1180_p=\ The server mode is similar, but it allows you to run the server in another process.
-tutorial_1181_h3=Using a Servlet Listener to Start and Stop a Database
-tutorial_1182_p=\ Add the h2*.jar file to your web application, and add the following snippet to your web.xml file (between the context-param
and the filter
section)\:
-tutorial_1183_p=\ For details on how to access the database, see the file DbStarter.java
. By default this tool opens an embedded connection using the database URL jdbc\:h2\:~/test
, user name sa
, and password sa
. If you want to use this connection within your servlet, you can access as follows\:
-tutorial_1184_code=DbStarter
-tutorial_1185_p=\ can also start the TCP server, however this is disabled by default. To enable it, use the parameter db.tcpServer
in the file web.xml
. Here is the complete list of options. These options need to be placed between the description
tag and the listener
/ filter
tags\:
-tutorial_1186_p=\ When the web application is stopped, the database connection will be closed automatically. If the TCP server is started within the DbStarter
, it will also be stopped automatically.
-tutorial_1187_h3=Using the H2 Console Servlet
-tutorial_1188_p=\ The H2 Console is a standalone application and includes its own web server, but it can be used as a servlet as well. To do that, include the the h2*.jar
file in your application, and add the following configuration to your web.xml
\:
-tutorial_1189_p=\ For details, see also src/tools/WEB-INF/web.xml
.
-tutorial_1190_p=\ To create a web application with just the H2 Console, run the following command\:
-tutorial_1191_h2=Android
-tutorial_1192_p=\ You can use this database on an Android device (using the Dalvik VM) instead of or in addition to SQLite. So far, only very few tests and benchmarks were run, but it seems that performance is similar to SQLite, except for opening and closing a database, which is not yet optimized in H2 (H2 takes about 0.2 seconds, and SQLite about 0.02 seconds). Read operations seem to be a bit faster than SQLite, and write operations seem to be slower. So far, only very few tests have been run, and everything seems to work as expected. Fulltext search was not yet tested, however the native fulltext search should work.
-tutorial_1193_p=\ Reasons to use H2 instead of SQLite are\:
-tutorial_1194_li=Full Unicode support including UPPER() and LOWER().
-tutorial_1195_li=Streaming API for BLOB and CLOB data.
-tutorial_1196_li=Fulltext search.
-tutorial_1197_li=Multiple connections.
-tutorial_1198_li=User defined functions and triggers.
-tutorial_1199_li=Database file encryption.
-tutorial_1200_li=Reading and writing CSV files (this feature can be used outside the database as well).
-tutorial_1201_li=Referential integrity and check constraints.
-tutorial_1202_li=Better data type and SQL support.
-tutorial_1203_li=In-memory databases, read-only databases, linked tables.
-tutorial_1204_li=Better compatibility with other databases which simplifies porting applications.
-tutorial_1205_li=Possibly better performance (so far for read operations).
-tutorial_1206_li=Server mode (accessing a database on a different machine over TCP/IP).
-tutorial_1207_p=\ Currently only the JDBC API is supported (it is planned to support the Android database API in future releases). Both the regular H2 jar file and the smaller h2small-*.jar
can be used. To create the smaller jar file, run the command ./build.sh jarSmall
(Linux / Mac OS) or build.bat jarSmall
(Windows).
-tutorial_1208_p=\ The database files needs to be stored in a place that is accessible for the application. Example\:
-tutorial_1209_p=\ Limitations\: Using a connection pool is currently not supported, because the required javax.sql.
classes are not available on Android.
-tutorial_1210_h2=CSV (Comma Separated Values) Support
-tutorial_1211_p=\ The CSV file support can be used inside the database using the functions CSVREAD
and CSVWRITE
, or it can be used outside the database as a standalone tool.
-tutorial_1212_h3=Reading a CSV File from Within a Database
-tutorial_1213_p=\ A CSV file can be read using the function CSVREAD
. Example\:
-tutorial_1214_p=\ Please note for performance reason, CSVREAD
should not be used inside a join. Instead, import the data first (possibly into a temporary table), create the required indexes if necessary, and then query this table.
-tutorial_1215_h3=Importing Data from a CSV File
-tutorial_1216_p=\ A fast way to load or import data (sometimes called 'bulk load') from a CSV file is to combine table creation with import. Optionally, the column names and data types can be set when creating the table. Another option is to use INSERT INTO ... SELECT
.
-tutorial_1217_h3=Writing a CSV File from Within a Database
-tutorial_1218_p=\ The built-in function CSVWRITE
can be used to create a CSV file from a query. Example\:
-tutorial_1219_h3=Writing a CSV File from a Java Application
-tutorial_1220_p=\ The Csv
tool can be used in a Java application even when not using a database at all. Example\:
-tutorial_1221_h3=Reading a CSV File from a Java Application
-tutorial_1222_p=\ It is possible to read a CSV file without opening a database. Example\:
-tutorial_1223_h2=Upgrade, Backup, and Restore
-tutorial_1224_h3=Database Upgrade
-tutorial_1225_p=\ The recommended way to upgrade from one version of the database engine to the next version is to create a backup of the database (in the form of a SQL script) using the old engine, and then execute the SQL script using the new engine.
-tutorial_1226_h3=Backup using the Script Tool
-tutorial_1227_p=\ The recommended way to backup a database is to create a compressed SQL script file. This will result in a small, human readable, and database version independent backup. Creating the script will also verify the checksums of the database file. The Script
tool is ran as follows\:
-tutorial_1228_p=\ It is also possible to use the SQL command SCRIPT
to create the backup of the database. For more information about the options, see the SQL command SCRIPT
. The backup can be done remotely, however the file will be created on the server side. The built in FTP server could be used to retrieve the file from the server.
-tutorial_1229_h3=Restore from a Script
-tutorial_1230_p=\ To restore a database from a SQL script file, you can use the RunScript
tool\:
-tutorial_1231_p=\ For more information about the options, see the SQL command RUNSCRIPT
. The restore can be done remotely, however the file needs to be on the server side. The built in FTP server could be used to copy the file to the server. It is also possible to use the SQL command RUNSCRIPT
to execute a SQL script. SQL script files may contain references to other script files, in the form of RUNSCRIPT
commands. However, when using the server mode, the references script files need to be available on the server side.
-tutorial_1232_h3=Online Backup
-tutorial_1233_p=\ The BACKUP
SQL statement and the Backup
tool both create a zip file with the database file. However, the contents of this file are not human readable.
-tutorial_1234_p=\ The resulting backup is transactionally consistent, meaning the consistency and atomicity rules apply.
-tutorial_1235_p=\ The Backup
tool (org.h2.tools.Backup
) can not be used to create a online backup; the database must not be in use while running this program.
-tutorial_1236_p=\ Creating a backup by copying the database files while the database is running is not supported, except if the file systems support creating snapshots. With other file systems, it can't be guaranteed that the data is copied in the right order.
-tutorial_1237_h2=Command Line Tools
-tutorial_1238_p=\ This database comes with a number of command line tools. To get more information about a tool, start it with the parameter '-?', for example\:
-tutorial_1239_p=\ The command line tools are\:
-tutorial_1240_code=Backup
-tutorial_1241_li=\ creates a backup of a database.
-tutorial_1242_code=ChangeFileEncryption
-tutorial_1243_li=\ allows changing the file encryption password or algorithm of a database.
-tutorial_1244_code=Console
-tutorial_1245_li=\ starts the browser based H2 Console.
-tutorial_1246_code=ConvertTraceFile
-tutorial_1247_li=\ converts a .trace.db file to a Java application and SQL script.
-tutorial_1248_code=CreateCluster
-tutorial_1249_li=\ creates a cluster from a standalone database.
-tutorial_1250_code=DeleteDbFiles
-tutorial_1251_li=\ deletes all files belonging to a database.
-tutorial_1252_code=Recover
-tutorial_1253_li=\ helps recovering a corrupted database.
-tutorial_1254_code=Restore
-tutorial_1255_li=\ restores a backup of a database.
-tutorial_1256_code=RunScript
-tutorial_1257_li=\ runs a SQL script against a database.
-tutorial_1258_code=Script
-tutorial_1259_li=\ allows converting a database to a SQL script for backup or migration.
-tutorial_1260_code=Server
-tutorial_1261_li=\ is used in the server mode to start a H2 server.
-tutorial_1262_code=Shell
-tutorial_1263_li=\ is a command line database tool.
-tutorial_1264_p=\ The tools can also be called from an application by calling the main or another public method. For details, see the Javadoc documentation.
-tutorial_1265_h2=The Shell Tool
-tutorial_1266_p=\ The Shell tool is a simple interactive command line tool. To start it, type\:
-tutorial_1267_p=\ You will be asked for a database URL, JDBC driver, user name, and password. The connection setting can also be set as command line parameters. After connecting, you will get the list of options. The built-in commands don't need to end with a semicolon, but SQL statements are only executed if the line ends with a semicolon ;
. This allows to enter multi-line statements\:
-tutorial_1268_p=\ By default, results are printed as a table. For results with many column, consider using the list mode\:
-tutorial_1269_h2=Using OpenOffice Base
-tutorial_1270_p=\ OpenOffice.org Base supports database access over the JDBC API. To connect to a H2 database using OpenOffice Base, you first need to add the JDBC driver to OpenOffice. The steps to connect to a H2 database are\:
-tutorial_1271_li=Start OpenOffice Writer, go to [Tools], [Options]
-tutorial_1272_li=Make sure you have selected a Java runtime environment in OpenOffice.org / Java
-tutorial_1273_li=Click [Class Path...], [Add Archive...]
-tutorial_1274_li=Select your h2 jar file (location is up to you, could be wherever you choose)
-tutorial_1275_li=Click [OK] (as much as needed), stop OpenOffice (including the Quickstarter)
-tutorial_1276_li=Start OpenOffice Base
-tutorial_1277_li=Connect to an existing database; select [JDBC]; [Next]
-tutorial_1278_li=Example datasource URL\: jdbc\:h2\:~/test
-tutorial_1279_li=JDBC driver class\: org.h2.Driver
-tutorial_1280_p=\ Now you can access the database stored in the current users home directory.
-tutorial_1281_p=\ To use H2 in NeoOffice (OpenOffice without X11)\:
-tutorial_1282_li=In NeoOffice, go to [NeoOffice], [Preferences]
-tutorial_1283_li=Look for the page under [NeoOffice], [Java]
-tutorial_1284_li=Click [Class Path], [Add Archive...]
-tutorial_1285_li=Select your h2 jar file (location is up to you, could be wherever you choose)
-tutorial_1286_li=Click [OK] (as much as needed), restart NeoOffice.
-tutorial_1287_p=\ Now, when creating a new database using the "Database Wizard" \:
-tutorial_1288_li=Click [File], [New], [Database].
-tutorial_1289_li=Select [Connect to existing database] and the select [JDBC]. Click next.
-tutorial_1290_li=Example datasource URL\: jdbc\:h2\:~/test
-tutorial_1291_li=JDBC driver class\: org.h2.Driver
-tutorial_1292_p=\ Another solution to use H2 in NeoOffice is\:
-tutorial_1293_li=Package the h2 jar within an extension package
-tutorial_1294_li=Install it as a Java extension in NeoOffice
-tutorial_1295_p=\ This can be done by create it using the NetBeans OpenOffice plugin. See also Extensions Development.
-tutorial_1296_h2=Java Web Start / JNLP
-tutorial_1297_p=\ When using Java Web Start / JNLP (Java Network Launch Protocol), permissions tags must be set in the .jnlp file, and the application .jar file must be signed. Otherwise, when trying to write to the file system, the following exception will occur\: java.security.AccessControlException
\: access denied (java.io.FilePermission ... read
). Example permission tags\:
-tutorial_1298_h2=Using a Connection Pool
-tutorial_1299_p=\ For H2, opening a connection is fast if the database is already open. Still, using a connection pool improves performance if you open and close connections a lot. A simple connection pool is included in H2. It is based on the Mini Connection Pool Manager from Christian d'Heureuse. There are other, more complex, open source connection pools available, for example the Apache Commons DBCP. For H2, it is about twice as faster to get a connection from the built-in connection pool than to get one using DriverManager.getConnection()
.The build-in connection pool is used as follows\:
-tutorial_1300_h2=Fulltext Search
-tutorial_1301_p=\ H2 includes two fulltext search implementations. One is using Apache Lucene, and the other (the native implementation) stores the index data in special tables in the database.
-tutorial_1302_h3=Using the Native Fulltext Search
-tutorial_1303_p=\ To initialize, call\:
-tutorial_1304_p=\ You need to initialize it in each database where you want to use it. Afterwards, you can create a fulltext index for a table using\:
-tutorial_1305_p=\ PUBLIC is the schema name, TEST is the table name. The list of column names (comma separated) is optional, in this case all columns are indexed. The index is updated in realtime. To search the index, use the following query\:
-tutorial_1306_p=\ This will produce a result set that contains the query needed to retrieve the data\:
-tutorial_1307_p=\ To drop an index on a table\:
-tutorial_1308_p=\ To get the raw data, use FT_SEARCH_DATA('Hello', 0, 0);
. The result contains the columns SCHEMA
(the schema name), TABLE
(the table name), COLUMNS
(an array of column names), and KEYS
(an array of objects). To join a table, use a join as in\: SELECT T.* FROM FT_SEARCH_DATA('Hello', 0, 0) FT, TEST T WHERE FT.TABLE\='TEST' AND T.ID\=FT.KEYS[0];
-tutorial_1309_p=\ You can also call the index from within a Java application\:
-tutorial_1310_h3=Using the Lucene Fulltext Search
-tutorial_1311_p=\ To use the Lucene full text search, you need the Lucene library in the classpath. Currently Apache Lucene version 2.x is used by default for H2 version 1.2.x, and Lucene version 3.x is used by default for H2 version 1.3.x. How to do that depends on the application; if you use the H2 Console, you can add the Lucene jar file to the environment variables H2DRIVERS
or CLASSPATH
. To initialize the Lucene fulltext search in a database, call\:
-tutorial_1312_p=\ You need to initialize it in each database where you want to use it. Afterwards, you can create a full text index for a table using\:
-tutorial_1313_p=\ PUBLIC is the schema name, TEST is the table name. The list of column names (comma separated) is optional, in this case all columns are indexed. The index is updated in realtime. To search the index, use the following query\:
-tutorial_1314_p=\ This will produce a result set that contains the query needed to retrieve the data\:
-tutorial_1315_p=\ To drop an index on a table (be warned that this will re-index all of the full-text indices for the entire database)\:
-tutorial_1316_p=\ To get the raw data, use FTL_SEARCH_DATA('Hello', 0, 0);
. The result contains the columns SCHEMA
(the schema name), TABLE
(the table name), COLUMNS
(an array of column names), and KEYS
(an array of objects). To join a table, use a join as in\: SELECT T.* FROM FTL_SEARCH_DATA('Hello', 0, 0) FT, TEST T WHERE FT.TABLE\='TEST' AND T.ID\=FT.KEYS[0];
-tutorial_1317_p=\ You can also call the index from within a Java application\:
-tutorial_1318_p=\ The Lucene fulltext search supports searching in specific column only. Column names must be uppercase (except if the original columns are double quoted). For column names starting with an underscore (_), another underscore needs to be added. Example\:
-tutorial_1319_p=\ The Lucene fulltext search implementation is not synchronized internally. If you update the database and query the fulltext search concurrently (directly using the Java API of H2 or Lucene itself), you need to ensure operations are properly synchronized. If this is not the case, you may get exceptions such as org.apache.lucene.store.AlreadyClosedException\: this IndexReader is closed
.
-tutorial_1320_h2=User-Defined Variables
-tutorial_1321_p=\ This database supports user-defined variables. Variables start with @
and can be used wherever expressions or parameters are allowed. Variables are not persisted and session scoped, that means only visible from within the session in which they are defined. A value is usually assigned using the SET command\:
-tutorial_1322_p=\ The value can also be changed using the SET() method. This is useful in queries\:
-tutorial_1323_p=\ Variables that are not set evaluate to NULL
. The data type of a user-defined variable is the data type of the value assigned to it, that means it is not necessary (or possible) to declare variable names before using them. There are no restrictions on the assigned values; large objects (LOBs) are supported as well. Rolling back a transaction does not affect the value of a user-defined variable.
-tutorial_1324_h2=Date and Time
-tutorial_1325_p=\ Date, time and timestamp values support ISO 8601 formatting, including time zone\:
-tutorial_1326_p=\ If the time zone is not set, the value is parsed using the current time zone setting of the system. Date and time information is stored in H2 database files without time zone information. If the database is opened using another system time zone, the date and time will be the same. That means if you store the value '2000-01-01 12\:00\:00' in one time zone, then close the database and open the database again in a different time zone, you will also get '2000-01-01 12\:00\:00'. Please note that changing the time zone after the H2 driver is loaded is not supported.
-tutorial_1327_h2=Using Spring
-tutorial_1328_h3=Using the TCP Server
-tutorial_1329_p=\ Use the following configuration to start and stop the H2 TCP server using the Spring Framework\:
-tutorial_1330_p=\ The destroy-method
will help prevent exceptions on hot-redeployment or when restarting the server.
-tutorial_1331_h3=Error Code Incompatibility
-tutorial_1332_p=\ There is an incompatibility with the Spring JdbcTemplate and H2 version 1.3.154 and newer, because of a change in the error code. This will cause the JdbcTemplate to not detect a duplicate key condition, and so a DataIntegrityViolationException
is thrown instead of DuplicateKeyException
. See also the issue SPR-8235. The workaround is to add the following XML file to the root of the classpath\:
-tutorial_1333_h2=OSGi
-tutorial_1334_p=\ The standard H2 jar can be dropped in as a bundle in an OSGi container. H2 implements the JDBC Service defined in OSGi Service Platform Release 4 Version 4.2 Enterprise Specification. The H2 Data Source Factory service is registered with the following properties\: OSGI_JDBC_DRIVER_CLASS\=org.h2.Driver
and OSGI_JDBC_DRIVER_NAME\=H2
. The OSGI_JDBC_DRIVER_VERSION
property reflects the version of the driver as is.
-tutorial_1335_p=\ The following standard configuration properties are supported\: JDBC_USER, JDBC_PASSWORD, JDBC_DESCRIPTION, JDBC_DATASOURCE_NAME, JDBC_NETWORK_PROTOCOL, JDBC_URL, JDBC_SERVER_NAME, JDBC_PORT_NUMBER
. Any other standard property will be rejected. Non-standard properties will be passed on to H2 in the connection URL.
-tutorial_1336_h2=Java Management Extension (JMX)
-tutorial_1337_p=\ Management over JMX is supported, but not enabled by default. To enable JMX, append ;JMX\=TRUE
to the database URL when opening the database. Various tools support JMX, one such tool is the jconsole
. When opening the jconsole
, connect to the process where the database is open (when using the server mode, you need to connect to the server process). Then go to the MBeans
section. Under org.h2
you will find one entry per database. The object name of the entry is the database short name, plus the path (each colon is replaced with an underscore character).
-tutorial_1338_p=\ The following attributes and operations are supported\:
-tutorial_1339_code=CacheSize
-tutorial_1340_li=\: the cache size currently in use in KB.
-tutorial_1341_code=CacheSizeMax
-tutorial_1342_li=\ (read/write)\: the maximum cache size in KB.
-tutorial_1343_code=Exclusive
-tutorial_1344_li=\: whether this database is open in exclusive mode or not.
-tutorial_1345_code=FileReadCount
-tutorial_1346_li=\: the number of file read operations since the database was opened.
-tutorial_1347_code=FileSize
-tutorial_1348_li=\: the file size in KB.
-tutorial_1349_code=FileWriteCount
-tutorial_1350_li=\: the number of file write operations since the database was opened.
-tutorial_1351_code=FileWriteCountTotal
-tutorial_1352_li=\: the number of file write operations since the database was created.
-tutorial_1353_code=LogMode
-tutorial_1354_li=\ (read/write)\: the current transaction log mode. See SET LOG
for details.
-tutorial_1355_code=Mode
-tutorial_1356_li=\: the compatibility mode (REGULAR
if no compatibility mode is used).
-tutorial_1357_code=MultiThreaded
-tutorial_1358_li=\: true if multi-threaded is enabled.
-tutorial_1359_code=Mvcc
-tutorial_1360_li=\: true if MVCC
is enabled.
-tutorial_1361_code=ReadOnly
-tutorial_1362_li=\: true if the database is read-only.
-tutorial_1363_code=TraceLevel
-tutorial_1364_li=\ (read/write)\: the file trace level.
-tutorial_1365_code=Version
-tutorial_1366_li=\: the database version in use.
-tutorial_1367_code=listSettings
-tutorial_1368_li=\: list the database settings.
-tutorial_1369_code=listSessions
-tutorial_1370_li=\: list the open sessions, including currently executing statement (if any) and locked tables (if any).
-tutorial_1371_p=\ To enable JMX, you may need to set the system properties com.sun.management.jmxremote
and com.sun.management.jmxremote.port
as required by the JVM.
diff --git a/h2/src/docsrc/textbase/_messages_en.prop b/h2/src/docsrc/textbase/_messages_en.prop
deleted file mode 100644
index 21ee868385..0000000000
--- a/h2/src/docsrc/textbase/_messages_en.prop
+++ /dev/null
@@ -1,171 +0,0 @@
-.translator=Thomas Mueller
-02000=No data is available
-07001=Invalid parameter count for {0}, expected count: {1}
-08000=Error opening database: {0}
-21S02=Column count does not match
-22001=Value too long for column {0}: {1}
-22003=Numeric value out of range: {0}
-22007=Cannot parse {0} constant {1}
-22012=Division by zero: {0}
-22018=Data conversion error converting {0}
-22025=Error in LIKE ESCAPE: {0}
-23502=NULL not allowed for column {0}
-23503=Referential integrity constraint violation: {0}
-23505=Unique index or primary key violation: {0}
-23506=Referential integrity constraint violation: {0}
-23507=No default value is set for column {0}
-23513=Check constraint violation: {0}
-23514=Check constraint invalid: {0}
-28000=Wrong user name or password
-40001=Deadlock detected. The current transaction was rolled back. Details: {0}
-42000=Syntax error in SQL statement {0}
-42001=Syntax error in SQL statement {0}; expected {1}
-42S01=Table {0} already exists
-42S02=Table {0} not found
-42S11=Index {0} already exists
-42S12=Index {0} not found
-42S21=Duplicate column name {0}
-42S22=Column {0} not found
-42S32=Setting {0} not found
-57014=Statement was canceled or the session timed out
-90000=Function {0} must return a result set
-90001=Method is not allowed for a query. Use execute or executeQuery instead of executeUpdate
-90002=Method is only allowed for a query. Use execute or executeUpdate instead of executeQuery
-90003=Hexadecimal string with odd number of characters: {0}
-90004=Hexadecimal string contains non-hex character: {0}
-90006=Sequence {0} has run out of numbers
-90007=The object is already closed
-90008=Invalid value {0} for parameter {1}
-90009=Unable to create or alter sequence {0} because of invalid attributes (start value {1}, min value {2}, max value {3}, increment {4})
-90010=Invalid TO_CHAR format {0}
-90011=A file path that is implicitly relative to the current working directory is not allowed in the database URL {0}. Use an absolute path, ~/name, ./name, or the baseDir setting instead.
-90012=Parameter {0} is not set
-90013=Database {0} not found
-90014=Error parsing {0}
-90015=SUM or AVG on wrong data type for {0}
-90016=Column {0} must be in the GROUP BY list
-90017=Attempt to define a second primary key
-90018=The connection was not closed by the application and is garbage collected
-90019=Cannot drop the current user
-90020=Database may be already in use: {0}. Possible solutions: close all other connection(s); use the server mode
-90021=This combination of database settings is not supported: {0}
-90022=Function {0} not found
-90023=Column {0} must not be nullable
-90024=Error while renaming file {0} to {1}
-90025=Cannot delete file {0}
-90026=Serialization failed, cause: {0}
-90027=Deserialization failed, cause: {0}
-90028=IO Exception: {0}
-90029=Currently not on an updatable row
-90030=File corrupted while reading record: {0}. Possible solution: use the recovery tool
-90031=IO Exception: {0}; {1}
-90032=User {0} not found
-90033=User {0} already exists
-90034=Log file error: {0}, cause: {1}
-90035=Sequence {0} already exists
-90036=Sequence {0} not found
-90037=View {0} not found
-90038=View {0} already exists
-90039=This CLOB or BLOB reference timed out: {0}
-90040=Admin rights are required for this operation
-90041=Trigger {0} already exists
-90042=Trigger {0} not found
-90043=Error creating or initializing trigger {0} object, class {1}, cause: {2}; see root cause for details
-90044=Error executing trigger {0}, class {1}, cause : {2}; see root cause for details
-90045=Constraint {0} already exists
-90046=URL format error; must be {0} but is {1}
-90047=Version mismatch, driver version is {0} but server version is {1}
-90048=Unsupported database file version or invalid file header in file {0}
-90049=Encryption error in file {0}
-90050=Wrong password format, must be: file password user password
-90052=Subquery is not a single column query
-90053=Scalar subquery contains more than one row
-90054=Invalid use of aggregate function {0}
-90055=Unsupported cipher {0}
-90057=Constraint {0} not found
-90058=Commit or rollback is not allowed within a trigger
-90059=Ambiguous column name {0}
-90060=Unsupported file lock method {0}
-90061=Exception opening port {0} (port may be in use), cause: {1}
-90062=Error while creating file {0}
-90063=Savepoint is invalid: {0}
-90064=Savepoint is unnamed
-90065=Savepoint is named
-90066=Duplicate property {0}
-90067=Connection is broken: {0}
-90068=Order by expression {0} must be in the result list in this case
-90069=Role {0} already exists
-90070=Role {0} not found
-90071=User or role {0} not found
-90072=Roles and rights cannot be mixed
-90073=Matching Java methods must have different parameter counts: {0} and {1}
-90074=Role {0} already granted
-90075=Column is part of the index {0}
-90076=Function alias {0} already exists
-90077=Function alias {0} not found
-90078=Schema {0} already exists
-90079=Schema {0} not found
-90080=Schema name must match
-90081=Column {0} contains null values
-90082=Sequence {0} belongs to a table
-90083=Column may be referenced by {0}
-90084=Cannot drop last column {0}
-90085=Index {0} belongs to constraint {1}
-90086=Class {0} not found
-90087=Method {0} not found
-90088=Unknown mode {0}
-90089=Collation cannot be changed because there is a data table: {0}
-90090=Schema {0} cannot be dropped
-90091=Role {0} cannot be dropped
-90093=Clustering error - database currently runs in standalone mode
-90094=Clustering error - database currently runs in cluster mode, server list: {0}
-90095=String format error: {0}
-90096=Not enough rights for object {0}
-90097=The database is read only
-90098=The database has been closed
-90099=Error setting database event listener {0}, cause: {1}
-90101=Wrong XID format: {0}
-90102=Unsupported compression options: {0}
-90103=Unsupported compression algorithm: {0}
-90104=Compression error
-90105=Exception calling user-defined function: {0}
-90106=Cannot truncate {0}
-90107=Cannot drop {0} because {1} depends on it
-90108=Out of memory.
-90109=View {0} is invalid: {1}
-90111=Error accessing linked table with SQL statement {0}, cause: {1}
-90112=Row not found when trying to delete from index {0}
-90113=Unsupported connection setting {0}
-90114=Constant {0} already exists
-90115=Constant {0} not found
-90116=Literals of this kind are not allowed
-90117=Remote connections to this server are not allowed, see -tcpAllowOthers
-90118=Cannot drop table {0}
-90119=User data type {0} already exists
-90120=User data type {0} not found
-90121=Database is already closed (to disable automatic closing at VM shutdown, add ";DB_CLOSE_ON_EXIT=FALSE" to the db URL)
-90122=Operation not supported for table {0} when there are views on the table: {1}
-90123=Cannot mix indexed and non-indexed parameters
-90124=File not found: {0}
-90125=Invalid class, expected {0} but got {1}
-90126=Database is not persistent
-90127=The result set is not updatable. The query must select all columns from a unique key. Only one table may be selected.
-90128=The result set is not scrollable and can not be reset. You may need to use conn.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ..).
-90129=Transaction {0} not found
-90130=This method is not allowed for a prepared statement; use a regular statement instead.
-90131=Concurrent update in table {0}: another transaction has updated or deleted the same row
-90132=Aggregate {0} not found
-90133=Cannot change the setting {0} when the database is already open
-90134=Access to the class {0} is denied
-90135=The database is open in exclusive mode; can not open additional connections
-90136=Unsupported outer join condition: {0}
-90137=Can only assign to a variable, not to: {0}
-90138=Invalid database name: {0}
-90139=The public static Java method was not found: {0}
-90140=The result set is readonly. You may need to use conn.createStatement(.., ResultSet.CONCUR_UPDATABLE).
-90141=Serializer cannot be changed because there is a data table: {0}
-90142=Step size must not be zero
-HY000=General error: {0}
-HY004=Unknown data type: {0}
-HYC00=Feature not supported: {0}
-HYT00=Timeout trying to lock table {0}
diff --git a/h2/src/docsrc/textbase/_text_en.prop b/h2/src/docsrc/textbase/_text_en.prop
deleted file mode 100644
index 9eab3466be..0000000000
--- a/h2/src/docsrc/textbase/_text_en.prop
+++ /dev/null
@@ -1,160 +0,0 @@
-.translator=Thomas Mueller
-a.help=Help
-a.language=English
-a.lynxNotSupported=Sorry, Lynx not supported yet
-a.password=Password
-a.remoteConnectionsDisabled=Sorry, remote connections ('webAllowOthers') are disabled on this server.
-a.title=H2 Console
-a.tools=Tools
-a.user=User Name
-admin.executing=Executing
-admin.ip=IP
-admin.lastAccess=Last Access
-admin.lastQuery=Last Query
-admin.no=no
-admin.notConnected=not connected
-admin.url=URL
-admin.yes=yes
-adminAllow=Allowed clients
-adminConnection=Connection security
-adminHttp=Use unencrypted HTTP connections
-adminHttps=Use encrypted SSL (HTTPS) connections
-adminLocal=Only allow local connections
-adminLogin=Administration Login
-adminLoginCancel=Cancel
-adminLoginOk=OK
-adminLogout=Logout
-adminOthers=Allow connections from other computers
-adminPort=Port number
-adminPortWeb=Web server port number
-adminRestart=Changes take effect after restarting the server.
-adminSave=Save
-adminSessions=Active Sessions
-adminShutdown=Shutdown
-adminTitle=H2 Console Preferences
-adminTranslateHelp=Translate or improve the translation of the H2 Console.
-adminTranslateStart=Translate
-helpAction=Action
-helpAddAnotherRow=Add another row
-helpAddDrivers=Adding Database Drivers
-helpAddDriversText=Additional database drivers can be registered by adding the Jar file location of the driver to the the environment variables H2DRIVERS or CLASSPATH. Example (Windows): to add the database driver library C:/Programs/hsqldb/lib/hsqldb.jar, set the environment variable H2DRIVERS to C:/Programs/hsqldb/lib/hsqldb.jar.
-helpAddRow=Add a new row
-helpCommandHistory=Shows the Command History
-helpCreateTable=Create a new table
-helpDeleteRow=Remove a row
-helpDisconnect=Disconnects from the database
-helpDisplayThis=Displays this Help Page
-helpDropTable=Delete the table if it exists
-helpExecuteCurrent=Executes the current SQL statement
-helpExecuteSelected=Executes the SQL statement defined by the text selection
-helpIcon=Icon
-helpImportantCommands=Important Commands
-helpOperations=Operations
-helpQuery=Query the table
-helpSampleSQL=Sample SQL Script
-helpStatements=SQL statements
-helpUpdate=Change data in a row
-helpWithColumnsIdName=with ID and NAME columns
-key.alt=Alt
-key.ctrl=Ctrl
-key.enter=Enter
-key.shift=Shift
-key.space=Space
-login.connect=Connect
-login.driverClass=Driver Class
-login.driverNotFound=Database driver not found
See in the Help for how to add drivers
-login.goAdmin=Preferences
-login.jdbcUrl=JDBC URL
-login.language=Language
-login.login=Login
-login.remove=Remove
-login.save=Save
-login.savedSetting=Saved Settings
-login.settingName=Setting Name
-login.testConnection=Test Connection
-login.testSuccessful=Test successful
-login.welcome=H2 Console
-result.1row=1 row
-result.autoCommitOff=Auto commit is now OFF
-result.autoCommitOn=Auto commit is now ON
-result.bytes=bytes
-result.characters=characters
-result.maxrowsSet=Max rowcount is set
-result.noRows=no rows
-result.noRunningStatement=There is currently no running statement
-result.rows=rows
-result.statementWasCanceled=The statement was canceled
-result.updateCount=Update count
-resultEdit.action=Action
-resultEdit.add=Add
-resultEdit.cancel=Cancel
-resultEdit.delete=Delete
-resultEdit.edit=Edit
-resultEdit.editResult=Edit
-resultEdit.save=Save
-toolbar.all=All
-toolbar.autoCommit=Auto commit
-toolbar.autoComplete=Auto complete
-toolbar.autoComplete.full=Full
-toolbar.autoComplete.normal=Normal
-toolbar.autoComplete.off=Off
-toolbar.cancelStatement=Cancel the current statement
-toolbar.clear=Clear
-toolbar.commit=Commit
-toolbar.disconnect=Disconnect
-toolbar.history=Command history
-toolbar.maxRows=Max rows
-toolbar.refresh=Refresh
-toolbar.rollback=Rollback
-toolbar.run=Run
-toolbar.runSelected=Run Selected
-toolbar.sqlStatement=SQL statement
-tools.backup=Backup
-tools.backup.help=Creates a backup of a database.
-tools.changeFileEncryption=ChangeFileEncryption
-tools.changeFileEncryption.help=Allows changing the database file encryption password and algorithm.
-tools.cipher=Cipher (AES or XTEA)
-tools.commandLine=Command line
-tools.convertTraceFile=ConvertTraceFile
-tools.convertTraceFile.help=Converts a .trace.db file to a Java application and SQL script.
-tools.createCluster=CreateCluster
-tools.createCluster.help=Creates a cluster from a standalone database.
-tools.databaseName=Database name
-tools.decryptionPassword=Decryption password
-tools.deleteDbFiles=DeleteDbFiles
-tools.deleteDbFiles.help=Deletes all files belonging to a database.
-tools.directory=Directory
-tools.encryptionPassword=Encryption password
-tools.javaDirectoryClassName=Java directory and class name
-tools.recover=Recover
-tools.recover.help=Helps recovering a corrupted database.
-tools.restore=Restore
-tools.restore.help=Restores a database backup.
-tools.result=Result
-tools.run=Run
-tools.runScript=RunScript
-tools.runScript.help=Runs a SQL script.
-tools.script=Script
-tools.script.help=Allows to convert a database to a SQL script for backup or migration.
-tools.scriptFileName=Script file name
-tools.serverList=Server list
-tools.sourceDatabaseName=Source database name
-tools.sourceDatabaseURL=Source database URL
-tools.sourceDirectory=Source directory
-tools.sourceFileName=Source file name
-tools.sourceScriptFileName=Source script file name
-tools.targetDatabaseName=Target database name
-tools.targetDatabaseURL=Target database URL
-tools.targetDirectory=Target directory
-tools.targetFileName=Target file name
-tools.targetScriptFileName=Target script file name
-tools.traceFileName=Trace file name
-tree.admin=Admin
-tree.current=Current value
-tree.hashed=Hashed
-tree.increment=Increment
-tree.indexes=Indexes
-tree.nonUnique=Non unique
-tree.sequences=Sequences
-tree.unique=Unique
-tree.users=Users
diff --git a/h2/src/installer/buildRelease.bat b/h2/src/installer/buildRelease.bat
index 144888313d..5a82084ff2 100644
--- a/h2/src/installer/buildRelease.bat
+++ b/h2/src/installer/buildRelease.bat
@@ -11,9 +11,8 @@ mkdir ..\h2web
rmdir /s /q bin 2>nul
rmdir /s /q temp 2>nul
-call java16 >nul 2>nul
call build -quiet compile
-call build -quiet spellcheck javadocImpl jarClient
+call build -quiet spellcheck javadocImpl
call build -quiet clean compile installer mavenDeployCentral
rem call build -quiet compile benchmark
diff --git a/h2/src/installer/buildRelease.sh b/h2/src/installer/buildRelease.sh
old mode 100644
new mode 100755
index 042a55d174..8782e23845
--- a/h2/src/installer/buildRelease.sh
+++ b/h2/src/installer/buildRelease.sh
@@ -8,7 +8,7 @@ rm -rf bin
rm -rf temp
./build.sh -quiet compile
-./build.sh -quiet spellcheck javadocImpl jarClient
+./build.sh -quiet spellcheck javadocImpl
./build.sh -quiet clean compile installer mavenDeployCentral
# ./build.sh -quiet compile benchmark
diff --git a/h2/src/installer/checkstyle.xml b/h2/src/installer/checkstyle.xml
index 1a1681070d..a9a3e4b465 100644
--- a/h2/src/installer/checkstyle.xml
+++ b/h2/src/installer/checkstyle.xml
@@ -39,7 +39,7 @@
-
+
@@ -55,11 +55,6 @@
-
-
-
-
-
diff --git a/h2/src/installer/favicon.ico b/h2/src/installer/favicon.ico
index 6e0f78aeb1..fd5e73a416 100644
Binary files a/h2/src/installer/favicon.ico and b/h2/src/installer/favicon.ico differ
diff --git a/h2/src/installer/h2.bat b/h2/src/installer/h2.bat
index 98cae20eaf..0a7c7212d7 100644
--- a/h2/src/installer/h2.bat
+++ b/h2/src/installer/h2.bat
@@ -1,2 +1,2 @@
-@java -cp "h2.jar;%H2DRIVERS%;%CLASSPATH%" org.h2.tools.Console %*
+@java -cp "h2.jar;%H2DRIVERS%;%CLASSPATH%" org.h2.tools.Console %*
@if errorlevel 1 pause
\ No newline at end of file
diff --git a/h2/src/installer/h2.nsi b/h2/src/installer/h2.nsi
index d1fa6c380e..ffaf509fd9 100644
--- a/h2/src/installer/h2.nsi
+++ b/h2/src/installer/h2.nsi
@@ -1,3 +1,4 @@
+ Unicode True
!include "MUI.nsh"
SetCompressor /SOLID lzma
diff --git a/h2/src/installer/h2.sh b/h2/src/installer/h2.sh
old mode 100644
new mode 100755
diff --git a/h2/src/installer/h2w.bat b/h2/src/installer/h2w.bat
index cb55e87dc2..c7d8d26a5c 100644
--- a/h2/src/installer/h2w.bat
+++ b/h2/src/installer/h2w.bat
@@ -1,2 +1,2 @@
-@start javaw -cp "h2.jar;%H2DRIVERS%;%CLASSPATH%" org.h2.tools.Console %*
+@start javaw -cp "h2.jar;%H2DRIVERS%;%CLASSPATH%" org.h2.tools.Console %*
@if errorlevel 1 pause
\ No newline at end of file
diff --git a/h2/src/installer/mvstore/MANIFEST.MF b/h2/src/installer/mvstore/MANIFEST.MF
index 415624c9ab..a470ceb294 100644
--- a/h2/src/installer/mvstore/MANIFEST.MF
+++ b/h2/src/installer/mvstore/MANIFEST.MF
@@ -1,18 +1,23 @@
Manifest-Version: 1.0
Implementation-Title: H2 MVStore
-Implementation-URL: http://www.h2database.com
+Implementation-URL: https://h2database.com
Implementation-Version: ${version}
Build-Jdk: ${buildJdk}
Created-By: ${createdBy}
+Automatic-Module-Name: com.h2database.mvstore
Bundle-Description: The MVStore is a persistent, log structured key-value store.
-Bundle-DocURL: http://h2database.com/html/mvstore.html
+Bundle-DocURL: https://h2database.com/html/mvstore.html
Bundle-ManifestVersion: 2
Bundle-Name: H2 MVStore
-Bundle-SymbolicName: org.h2.mvstore
+Bundle-SymbolicName: com.h2database.mvstore
Bundle-Vendor: H2 Group
Bundle-Version: ${version}
-Bundle-License: http://www.h2database.com/html/license.html
+Bundle-License: https://h2database.com/html/license.html
Bundle-Category: utility
+Multi-Release: true
+Import-Package: javax.crypto,
+ javax.crypto.spec
Export-Package: org.h2.mvstore;version="${version}",
+ org.h2.mvstore.tx;version="${version}",
org.h2.mvstore.type;version="${version}",
org.h2.mvstore.rtree;version="${version}"
diff --git a/h2/src/installer/openoffice.txt b/h2/src/installer/openoffice.txt
index 238831aa91..dcd6d32cee 100644
--- a/h2/src/installer/openoffice.txt
+++ b/h2/src/installer/openoffice.txt
@@ -29,6 +29,10 @@ sub H2Pdf
HeadingStyle.BreakType = 3 ' Insert Page Break Before
HeadingStyle.ParaKeepTogether = false
+ For i = 1 to 4
+ ParagraphStyles.getByName("Heading " + i).OutlineLevel = i
+ Next
+
images = document.GraphicObjects
For i = 0 to images.getCount() - 1
image = images.getByIndex(i)
@@ -88,11 +92,15 @@ sub H2Pdf
dim linkStart(0) As New com.sun.star.beans.PropertyValue
dim linkEnd(0) As New com.sun.star.beans.PropertyValue
- For i = 1 To 4
+ for i = 1 To 4
oLevel = toc.LevelFormat.getByIndex(i)
- x = DimArray(5)
- x = Array(linkStart, oLevel(0), oLevel(1), oLevel(2), oLevel(3), linkEnd)
- old = oLevel(0)
+ bound = UBound(oLevel)
+ x = DimArray(bound + 2)
+ x(0) = linkStart
+ for j = 0 to bound
+ x(j + 1) = oLevel(j)
+ next
+ x(bound + 2) = linkEnd
linkStart(0).Name = "TokenType"
linkStart(0).Value = "TokenHyperlinkStart"
linkStart(0).Handle = -1
diff --git a/h2/src/installer/pom-mvstore-template.xml b/h2/src/installer/pom-mvstore-template.xml
index 491445284a..2a2b2cede1 100644
--- a/h2/src/installer/pom-mvstore-template.xml
+++ b/h2/src/installer/pom-mvstore-template.xml
@@ -5,18 +5,23 @@
@version@
jar
H2 MVStore
- http://www.h2database.com/html/mvstore.html
+ https://h2database.com/html/mvstore.html
H2 MVStore
- MPL 2.0, and EPL 1.0
- http://h2database.com/html/license.html
+ MPL 2.0
+ https://www.mozilla.org/en-US/MPL/2.0/
+ repo
+
+
+ EPL 1.0
+ https://opensource.org/licenses/eclipse-1.0.php
repo
- scm:svn:http://h2database.googlecode.com/svn/trunk
- http://h2database.googlecode.com/svn/trunk
+ scm:git:https://github.com/h2database/h2database
+ https://github.com/h2database/h2database
diff --git a/h2/src/installer/pom-template.xml b/h2/src/installer/pom-template.xml
index 1a79eda213..132a1a8f91 100644
--- a/h2/src/installer/pom-template.xml
+++ b/h2/src/installer/pom-template.xml
@@ -5,18 +5,23 @@
@version@
jar
H2 Database Engine
- http://www.h2database.com
+ https://h2database.com
H2 Database Engine
- MPL 2.0, and EPL 1.0
- http://h2database.com/html/license.html
+ MPL 2.0
+ https://www.mozilla.org/en-US/MPL/2.0/
+ repo
+
+
+ EPL 1.0
+ https://opensource.org/licenses/eclipse-1.0.php
repo
- scm:svn:http://h2database.googlecode.com/svn/trunk
- http://h2database.googlecode.com/svn/trunk
+ scm:git:https://github.com/h2database/h2database
+ https://github.com/h2database/h2database
diff --git a/h2/src/installer/release.txt b/h2/src/installer/release.txt
index 518eb63d7a..54bc01212d 100644
--- a/h2/src/installer/release.txt
+++ b/h2/src/installer/release.txt
@@ -1,33 +1,138 @@
-Check dictionary.txt
-svn up
-./build.sh spellcheck
-./build.sh javadocImpl
-./build.sh docs
-./build.sh jarMVStore (should be about 200 KB)
-Update Constants.java - change version and build number
-Update changelog.html - add new version, remove oldest
-Update newsfeed.sql - add new version, remove oldest
-Minor version change: change sourceError.html and source.html
-If a beta, change download.html: Version ${version} (${versionDate}), Beta
-If a beta, change mainWeb.html: Version ${version} (${versionDate}), Beta
-Benchmark: use latest versions of other dbs, change version(s) in performance.html
-Run ./buildRelease.sh / buildRelease.bat
-
-Scan for viruses
-Test installer, H2 Console (test new languages)
-Check docs, versions and links in main, downloads, build numbers
-Check the PDF file size
-
-Upload to SourceForge
-Upload to ftp://h2database.com
-Upload to ftp://h2database.com/m2-repo
-svn commit
-svn copy: /svn/trunk /svn/tags/version-1.1.x; Version 1.1.x (yyyy-mm-dd)
-Newsletter: prepare (always to BCC)
-Newsletter: send to h2-database-jp@googlegroups.com; h2-database@googlegroups.com; h2database-news@googlegroups.com; ...
-Add to http://twitter.com
-- tweet: add @geospatialnews for the new geometry type and disk spatial index
-Close bugs: http://code.google.com/p/h2database/issues/list
-Update statistics
+# Checklist for a release
+## Formatting, Spellchecking, Javadocs
+ git pull
+
+Do this until there are no errors.
+Fix typos, add new words to dictionary.txt:
+
+ ./build.sh clean compile spellcheck
+
+Add documentation for all public methods. Make methods private if possible:
+
+ ./build.sh clean compile javadocImpl
+
+Ensure lines are not overly long:
+
+ ./build.sh clean compile docs
+
+## MVStore Jar File Size Verification
+
+To ensure the MVStore jar file is not too large
+(does not reference the database code by accident).
+The file size should be about 300 KB:
+
+ ./build.sh jarMVStore
+
+## Changing Version Numbers
+
+Update org.h2.engine.Constants.java:
+ change the version and build number:
+ set BUILD_DATE to today
+ increment BUILD_ID, the value must be even (for example, 202)
+ set VERSION_MAJOR / VERSION_MINOR to the new version number
+ if the last TCP_PROTOCOL_VERSION_##
+ doesn't have a release date set it to current BUILD_DATE
+ check and update if necessary links to the latest releases in previous
+ series of releases and their checksums in download.html
+
+Update README.md.
+ set version to the new version
+
+Update changelog.html:
+ * create a new "Next Version (unreleased)" with an empty list
+ * add a new version
+ * remove change log entries of the oldest version (keeping about 500 lines)
+
+Update newsfeed.sql:
+ * add new version, for example:
+ * (150, '1.4.200', '2019-10-14'),
+ * remove oldest entry in that list
+
+Update download-archive.html:
+ * add new version under Distribution section
+
+## Skipped
+
+* Minor version change: change sourceError.html and source.html
+* If a beta, change download.html: Version ${version} (${versionDate}), Beta
+* If a beta, change mainWeb.html: Version ${version} (${versionDate}), Beta
+
+The following can be skipped currently; benchmarks should probably be removed:
+* To update benchmark data: use latest versions of other dbs, change version(s) in performance.html
+
+## Build the Release
+
+In Build.java, comment "-Xdoclint:...", but don't commit that change.
+
+Run the following commands:
+Non-Windows:
+
+ cd src/installer
+ ./buildRelease.sh
+
+Windows:
+
+ cd src/installer
+ buildRelease.bat
+
+Scan for viruses.
+
+Test installer, H2 Console (test new languages).
+
+Check docs, versions and links in main, downloads, build numbers.
+
+Check the PDF file size.
+
+Upload ( = httpdocs and httpsdocs) to ftp://h2database.com//javadoc
+Upload ( = httpdocs and httpsdocs) to ftp://h2database.com//
+Upload ( = httpdocs and httpsdocs) to ftp://h2database.com//m2-repo
+
+Github: create a release.
+
+Newsletter: send (always to BCC!), the following:
+
+ h2-database@googlegroups.com; h2database-news@googlegroups.com; ...
+
+Create tweet at http://twitter.com
+
+## Sign files and publish files on Maven Central
+
+In Build.java, comment "-Xdoclint:none", but don't commit that change.
+
+ ./build.sh clean compile jar mavenDeployCentral
+ cd /data/h2database/m2-repo/com/h2database
+ # remove sha and md5 files:
+ find . -name "*.sha1" -delete
+ find . -name "*.md5" -delete
+ cd h2/1
+ # for each file separately (-javadoc.jar, -sources.jar, .jar, .pom):
+ gpg -u "Thomas Mueller Graf " -ab h2-<...>
+ jar -cvf bundle.jar h2-*
+ cd ../../h2-mvstore/1
+ # for each file separately (-javadoc.jar, -sources.jar, .jar, .pom):
+ gpg -u "Thomas Mueller Graf " -ab h2-mvstore<...>
+ jar -cvf bundle.jar h2-*
+ # http://central.sonatype.org/pages/ossrh-guide.html
+ # http://central.sonatype.org/pages/manual-staging-bundle-creation-and-deployment.html
+ # https://oss.sonatype.org/#welcome - Log In "t..."
+ # sometimes this doesn't work reliably and you will have to retry
+ # - Staging Upload
+ # - Upload Mode: Artifact Bundle, Select Bundle to Upload... - /data/h2database/.../h2/.../bundle.jar
+ # - Upload Bundle
+ # - Staging Repositories - Refresh - select comh2database-<...> - Release - Confirm
+ # - Staging Upload
+ # - Upload Mode: Artifact Bundle, Select Bundle to Upload... - /data/h2database/.../h2-mvstore/.../bundle.jar
+ # - Upload Bundle
+ # - Staging Repositories - Refresh - select comh2database-<...> - Release - Confirm
+
+Update statistics.
+
+Change version in pom.xml, commit, add version-*.*.*** tag.
+
+Update org.h2.engine.Constants.java:
+ increment BUILD_ID again, the value must be odd (for example, 203)
+Update h2/pom.xml.
+ set ...-SNAPSHOT to the next version (with this odd third number)
+Commit.
diff --git a/h2/src/installer/source-manifest.mf b/h2/src/installer/source-manifest.mf
index 63022f8fe7..bb3c215b5a 100644
--- a/h2/src/installer/source-manifest.mf
+++ b/h2/src/installer/source-manifest.mf
@@ -1,7 +1,7 @@
Manifest-Version: 1.0
Bundle-ManifestVersion: 2
Bundle-Name: H2 Database Engine Sources
-Bundle-SymbolicName: org.h2.source
+Bundle-SymbolicName: com.h2database.source
Bundle-Vendor: H2 Group
Bundle-Version: ${version}
-Eclipse-SourceBundle: org.h2;version="${version}"
\ No newline at end of file
+Eclipse-SourceBundle: com.h2database;version="${version}"
diff --git a/h2/src/installer/source-mvstore-manifest.mf b/h2/src/installer/source-mvstore-manifest.mf
new file mode 100644
index 0000000000..48c80436f9
--- /dev/null
+++ b/h2/src/installer/source-mvstore-manifest.mf
@@ -0,0 +1,7 @@
+Manifest-Version: 1.0
+Bundle-ManifestVersion: 2
+Bundle-Name: H2 MVStore Sources
+Bundle-SymbolicName: com.h2database.mvstore.source
+Bundle-Vendor: H2 Group
+Bundle-Version: ${version}
+Eclipse-SourceBundle: com.h2database.mvstore;version="${version}"
diff --git a/h2/src/java10/precompiled/org/h2/util/Utils10.class b/h2/src/java10/precompiled/org/h2/util/Utils10.class
new file mode 100644
index 0000000000..1ae38e89d7
Binary files /dev/null and b/h2/src/java10/precompiled/org/h2/util/Utils10.class differ
diff --git a/h2/src/java10/src/org/h2/util/Utils10.java b/h2/src/java10/src/org/h2/util/Utils10.java
new file mode 100644
index 0000000000..2ba397e893
--- /dev/null
+++ b/h2/src/java10/src/org/h2/util/Utils10.java
@@ -0,0 +1,72 @@
+/*
+ * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0,
+ * and the EPL 1.0 (https://h2database.com/html/license.html).
+ * Initial Developer: H2 Group
+ */
+package org.h2.util;
+
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.net.Socket;
+import java.nio.charset.Charset;
+
+import jdk.net.ExtendedSocketOptions;
+
+/**
+ * Utilities with specialized implementations for Java 10 and later versions.
+ *
+ * This class contains implementations for Java 10 and later versions.
+ */
+public final class Utils10 {
+
+ /**
+ * Converts the buffer's contents into a string by decoding the bytes using
+ * the specified {@link java.nio.charset.Charset charset}.
+ *
+ * @param baos
+ * the buffer to decode
+ * @param charset
+ * the charset to use
+ * @return the decoded string
+ */
+ public static String byteArrayOutputStreamToString(ByteArrayOutputStream baos, Charset charset) {
+ return baos.toString(charset);
+ }
+
+ /**
+ * Returns the value of TCP_QUICKACK option.
+ *
+ * @param socket
+ * the socket
+ * @return the current value of TCP_QUICKACK option
+ * @throws IOException
+ * on I/O exception
+ * @throws UnsupportedOperationException
+ * if TCP_QUICKACK is not supported
+ */
+ public static boolean getTcpQuickack(Socket socket) throws IOException {
+ return socket.getOption(ExtendedSocketOptions.TCP_QUICKACK);
+ }
+
+ /**
+ * Sets the value of TCP_QUICKACK option.
+ *
+ * @param socket
+ * the socket
+ * @param value
+ * the value to set
+ * @return whether operation was successful
+ */
+ public static boolean setTcpQuickack(Socket socket, boolean value) {
+ try {
+ socket.setOption(ExtendedSocketOptions.TCP_QUICKACK, value);
+ return true;
+ } catch (Throwable t) {
+ return false;
+ }
+ }
+
+ private Utils10() {
+ }
+
+}
diff --git a/h2/src/java10/src/org/h2/util/package.html b/h2/src/java10/src/org/h2/util/package.html
new file mode 100644
index 0000000000..5860dd0957
--- /dev/null
+++ b/h2/src/java10/src/org/h2/util/package.html
@@ -0,0 +1,14 @@
+
+
+
+
+Javadoc package documentation
+
+
+Internal utility classes reimplemented for Java 10 and later versions.
+
+
\ No newline at end of file
diff --git a/h2/src/java9/precompiled/org/h2/util/Bits.class b/h2/src/java9/precompiled/org/h2/util/Bits.class
new file mode 100644
index 0000000000..c5dabdfb86
Binary files /dev/null and b/h2/src/java9/precompiled/org/h2/util/Bits.class differ
diff --git a/h2/src/java9/src/org/h2/util/Bits.java b/h2/src/java9/src/org/h2/util/Bits.java
new file mode 100644
index 0000000000..fc323a8abf
--- /dev/null
+++ b/h2/src/java9/src/org/h2/util/Bits.java
@@ -0,0 +1,320 @@
+/*
+ * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0,
+ * and the EPL 1.0 (https://h2database.com/html/license.html).
+ * Initial Developer: H2 Group
+ */
+package org.h2.util;
+
+import java.lang.invoke.MethodHandles;
+import java.lang.invoke.VarHandle;
+import java.nio.ByteOrder;
+import java.util.Arrays;
+import java.util.UUID;
+
+/**
+ * Manipulations with bytes and arrays. Specialized implementation for Java 9
+ * and later versions.
+ */
+public final class Bits {
+
+ /**
+ * VarHandle giving access to elements of a byte[] array viewed as if it
+ * were a int[] array on big-endian system.
+ */
+ private static final VarHandle INT_VH_BE = MethodHandles.byteArrayViewVarHandle(int[].class, ByteOrder.BIG_ENDIAN);
+
+ /**
+ * VarHandle giving access to elements of a byte[] array viewed as if it
+ * were a int[] array on little-endian system.
+ */
+ private static final VarHandle INT_VH_LE = MethodHandles.byteArrayViewVarHandle(int[].class,
+ ByteOrder.LITTLE_ENDIAN);
+
+ /**
+ * VarHandle giving access to elements of a byte[] array viewed as if it
+ * were a long[] array on big-endian system.
+ */
+ private static final VarHandle LONG_VH_BE = MethodHandles.byteArrayViewVarHandle(long[].class,
+ ByteOrder.BIG_ENDIAN);
+
+ /**
+ * VarHandle giving access to elements of a byte[] array viewed as if it
+ * were a long[] array on little-endian system.
+ */
+ private static final VarHandle LONG_VH_LE = MethodHandles.byteArrayViewVarHandle(long[].class,
+ ByteOrder.LITTLE_ENDIAN);
+
+ /**
+ * VarHandle giving access to elements of a byte[] array viewed as if it
+ * were a double[] array on big-endian system.
+ */
+ private static final VarHandle DOUBLE_VH_BE = MethodHandles.byteArrayViewVarHandle(double[].class,
+ ByteOrder.BIG_ENDIAN);
+
+ /**
+ * VarHandle giving access to elements of a byte[] array viewed as if it
+ * were a double[] array on little-endian system.
+ */
+ private static final VarHandle DOUBLE_VH_LE = MethodHandles.byteArrayViewVarHandle(double[].class,
+ ByteOrder.LITTLE_ENDIAN);
+
+ /**
+ * Compare the contents of two char arrays. If the content or length of the
+ * first array is smaller than the second array, -1 is returned. If the content
+ * or length of the second array is smaller than the first array, 1 is returned.
+ * If the contents and lengths are the same, 0 is returned.
+ *
+ * @param data1
+ * the first char array (must not be null)
+ * @param data2
+ * the second char array (must not be null)
+ * @return the result of the comparison (-1, 1 or 0)
+ */
+ public static int compareNotNull(char[] data1, char[] data2) {
+ return Integer.signum(Arrays.compare(data1, data2));
+ }
+
+ /**
+ * Compare the contents of two byte arrays. If the content or length of the
+ * first array is smaller than the second array, -1 is returned. If the content
+ * or length of the second array is smaller than the first array, 1 is returned.
+ * If the contents and lengths are the same, 0 is returned.
+ *
+ *
+ * This method interprets bytes as signed.
+ *
+ *
+ * @param data1
+ * the first byte array (must not be null)
+ * @param data2
+ * the second byte array (must not be null)
+ * @return the result of the comparison (-1, 1 or 0)
+ */
+ public static int compareNotNullSigned(byte[] data1, byte[] data2) {
+ return Integer.signum(Arrays.compare(data1, data2));
+ }
+
+ /**
+ * Compare the contents of two byte arrays. If the content or length of the
+ * first array is smaller than the second array, -1 is returned. If the content
+ * or length of the second array is smaller than the first array, 1 is returned.
+ * If the contents and lengths are the same, 0 is returned.
+ *
+ *
+ * This method interprets bytes as unsigned.
+ *
+ *
+ * @param data1
+ * the first byte array (must not be null)
+ * @param data2
+ * the second byte array (must not be null)
+ * @return the result of the comparison (-1, 1 or 0)
+ */
+ public static int compareNotNullUnsigned(byte[] data1, byte[] data2) {
+ return Integer.signum(Arrays.compareUnsigned(data1, data2));
+ }
+
+ /**
+ * Reads a int value from the byte array at the given position in big-endian
+ * order.
+ *
+ * @param buff
+ * the byte array
+ * @param pos
+ * the position
+ * @return the value
+ */
+ public static int readInt(byte[] buff, int pos) {
+ return (int) INT_VH_BE.get(buff, pos);
+ }
+
+ /**
+ * Reads a int value from the byte array at the given position in
+ * little-endian order.
+ *
+ * @param buff
+ * the byte array
+ * @param pos
+ * the position
+ * @return the value
+ */
+ public static int readIntLE(byte[] buff, int pos) {
+ return (int) INT_VH_LE.get(buff, pos);
+ }
+
+ /**
+ * Reads a long value from the byte array at the given position in
+ * big-endian order.
+ *
+ * @param buff
+ * the byte array
+ * @param pos
+ * the position
+ * @return the value
+ */
+ public static long readLong(byte[] buff, int pos) {
+ return (long) LONG_VH_BE.get(buff, pos);
+ }
+
+ /**
+ * Reads a long value from the byte array at the given position in
+ * little-endian order.
+ *
+ * @param buff
+ * the byte array
+ * @param pos
+ * the position
+ * @return the value
+ */
+ public static long readLongLE(byte[] buff, int pos) {
+ return (long) LONG_VH_LE.get(buff, pos);
+ }
+
+ /**
+ * Reads a double value from the byte array at the given position in
+ * big-endian order.
+ *
+ * @param buff
+ * the byte array
+ * @param pos
+ * the position
+ * @return the value
+ */
+ public static double readDouble(byte[] buff, int pos) {
+ return (double) DOUBLE_VH_BE.get(buff, pos);
+ }
+
+ /**
+ * Reads a double value from the byte array at the given position in
+ * little-endian order.
+ *
+ * @param buff
+ * the byte array
+ * @param pos
+ * the position
+ * @return the value
+ */
+ public static double readDoubleLE(byte[] buff, int pos) {
+ return (double) DOUBLE_VH_LE.get(buff, pos);
+ }
+
+ /**
+ * Converts UUID value to byte array in big-endian order.
+ *
+ * @param msb
+ * most significant part of UUID
+ * @param lsb
+ * least significant part of UUID
+ * @return byte array representation
+ */
+ public static byte[] uuidToBytes(long msb, long lsb) {
+ byte[] buff = new byte[16];
+ LONG_VH_BE.set(buff, 0, msb);
+ LONG_VH_BE.set(buff, 8, lsb);
+ return buff;
+ }
+
+ /**
+ * Converts UUID value to byte array in big-endian order.
+ *
+ * @param uuid
+ * UUID value
+ * @return byte array representation
+ */
+ public static byte[] uuidToBytes(UUID uuid) {
+ return uuidToBytes(uuid.getMostSignificantBits(), uuid.getLeastSignificantBits());
+ }
+
+ /**
+ * Writes a int value to the byte array at the given position in big-endian
+ * order.
+ *
+ * @param buff
+ * the byte array
+ * @param pos
+ * the position
+ * @param x
+ * the value to write
+ */
+ public static void writeInt(byte[] buff, int pos, int x) {
+ INT_VH_BE.set(buff, pos, x);
+ }
+
+ /**
+ * Writes a int value to the byte array at the given position in
+ * little-endian order.
+ *
+ * @param buff
+ * the byte array
+ * @param pos
+ * the position
+ * @param x
+ * the value to write
+ */
+ public static void writeIntLE(byte[] buff, int pos, int x) {
+ INT_VH_LE.set(buff, pos, x);
+ }
+
+ /**
+ * Writes a long value to the byte array at the given position in big-endian
+ * order.
+ *
+ * @param buff
+ * the byte array
+ * @param pos
+ * the position
+ * @param x
+ * the value to write
+ */
+ public static void writeLong(byte[] buff, int pos, long x) {
+ LONG_VH_BE.set(buff, pos, x);
+ }
+
+ /**
+ * Writes a long value to the byte array at the given position in
+ * little-endian order.
+ *
+ * @param buff
+ * the byte array
+ * @param pos
+ * the position
+ * @param x
+ * the value to write
+ */
+ public static void writeLongLE(byte[] buff, int pos, long x) {
+ LONG_VH_LE.set(buff, pos, x);
+ }
+
+ /**
+ * Writes a double value to the byte array at the given position in
+ * big-endian order.
+ *
+ * @param buff
+ * the byte array
+ * @param pos
+ * the position
+ * @param x
+ * the value to write
+ */
+ public static void writeDouble(byte[] buff, int pos, double x) {
+ DOUBLE_VH_BE.set(buff, pos, x);
+ }
+
+ /**
+ * Writes a double value to the byte array at the given position in
+ * little-endian order.
+ *
+ * @param buff
+ * the byte array
+ * @param pos
+ * the position
+ * @param x
+ * the value to write
+ */
+ public static void writeDoubleLE(byte[] buff, int pos, double x) {
+ DOUBLE_VH_LE.set(buff, pos, x);
+ }
+
+ private Bits() {
+ }
+}
diff --git a/h2/src/java9/src/org/h2/util/package.html b/h2/src/java9/src/org/h2/util/package.html
new file mode 100644
index 0000000000..9ef3d9ca4e
--- /dev/null
+++ b/h2/src/java9/src/org/h2/util/package.html
@@ -0,0 +1,14 @@
+
+
+
+
+Javadoc package documentation
+
+
+Internal utility classes reimplemented for Java 9 and later versions.
+
+
\ No newline at end of file
diff --git a/h2/src/main/META-INF/MANIFEST.MF b/h2/src/main/META-INF/MANIFEST.MF
index f954daf194..c4a0ae3b15 100644
--- a/h2/src/main/META-INF/MANIFEST.MF
+++ b/h2/src/main/META-INF/MANIFEST.MF
@@ -1,48 +1,60 @@
Manifest-Version: 1.0
-Implementation-Title: ${title}
-Implementation-URL: http://www.h2database.com
+Implementation-Title: H2 Database Engine
+Implementation-URL: https://h2database.com
Implementation-Version: ${version}
Build-Jdk: ${buildJdk}
Created-By: ${createdBy}
-${mainClassTag}
+Main-Class: org.h2.tools.Console
+Automatic-Module-Name: com.h2database
Bundle-Activator: org.h2.util.DbDriverActivator
Bundle-ManifestVersion: 2
Bundle-Name: H2 Database Engine
-Bundle-SymbolicName: org.h2
+Bundle-SymbolicName: com.h2database
Bundle-Vendor: H2 Group
Bundle-Version: ${version}
-Bundle-License: http://www.h2database.com/html/license.html
+Bundle-License: https://h2database.com/html/license.html
Bundle-Category: jdbc
-Import-Package: javax.management,
+Multi-Release: true
+Import-Package: javax.crypto,
+ javax.crypto.spec,
+ javax.management,
javax.naming;resolution:=optional,
+ javax.naming.directory;resolution:=optional,
javax.naming.spi;resolution:=optional,
javax.net,
javax.net.ssl,
+ javax.script;resolution:=optional,
+ javax.security.auth.callback;resolution:=optional,
+ javax.security.auth.login;resolution:=optional,
javax.servlet;resolution:=optional,
javax.servlet.http;resolution:=optional,
+ jakarta.servlet;resolution:=optional,
+ jakarta.servlet.http;resolution:=optional,
javax.sql,
javax.tools;resolution:=optional,
javax.transaction.xa;resolution:=optional,
- org.apache.lucene.analysis;version="[3.0.0,3.1.0)";resolution:=optional,
- org.apache.lucene.analysis.standard;version="[3.0.0,3.1.0)";resolution:=optional,
- org.apache.lucene.document;version="[3.0.0,3.1.0)";resolution:=optional,
- org.apache.lucene.index;version="[3.0.0,3.1.0)";resolution:=optional,
- org.apache.lucene.queryParser;version="[3.0.0,3.1.0)";resolution:=optional,
- org.apache.lucene.search;version="[3.0.0,3.1.0)";resolution:=optional,
- org.apache.lucene.store;version="[3.0.0,3.1.0)";resolution:=optional,
- org.apache.lucene.util;version="[3.0.0,3.1.0)";resolution:=optional,
- com.vividsolutions.jts.geom;version="1.13";resolution:=optional,
- com.vividsolutions.jts.io;version="1.13";resolution:=optional,
- org.h2;version="[${version},1.5.0)",
- org.h2.api;version="[${version},1.5.0)",
- org.h2.fulltext;version="[${version},1.5.0)",
- org.h2.jdbcx;version="[${version},1.5.0)",
- org.h2.tools;version="[${version},1.5.0)",
- org.h2.util;version="[${version},1.5.0)",
- org.h2.value;version="[${version},1.5.0)",
+ javax.xml.parsers;resolution:=optional,
+ javax.xml.stream;resolution:=optional,
+ javax.xml.transform;resolution:=optional,
+ javax.xml.transform.dom;resolution:=optional,
+ javax.xml.transform.sax;resolution:=optional,
+ javax.xml.transform.stax;resolution:=optional,
+ javax.xml.transform.stream;resolution:=optional,
+ org.w3c.dom;resolution:=optional,
+ org.xml.sax;resolution:=optional,
+ org.xml.sax.helpers;resolution:=optional,
+ org.apache.lucene.analysis;version="[8.5.2,9.0.0)";resolution:=optional,
+ org.apache.lucene.analysis.standard;version="[8.5.2,9.0.0)";resolution:=optional,
+ org.apache.lucene.document;version="[8.5.2,9.0.0)";resolution:=optional,
+ org.apache.lucene.index;version="[8.5.2,9.0.0)";resolution:=optional,
+ org.apache.lucene.queryparser;version="[8.5.2,9.0.0)";resolution:=optional,
+ org.apache.lucene.search;version="[8.5.2,9.0.0)";resolution:=optional,
+ org.apache.lucene.store;version="[8.5.2,9.0.0)";resolution:=optional,
+ org.apache.lucene.util;version="[8.5.2,9.0.0)";resolution:=optional,
+ org.locationtech.jts.geom;version="1.17.0";resolution:=optional,
org.osgi.framework;version="1.5",
org.osgi.service.jdbc;version="1.0";resolution:=optional,
- org.slf4j;version="[1.6.0,1.7.0)";resolution:=optional
+ org.slf4j;version="[1.7.0,1.8.0)";resolution:=optional
Export-Package: org.h2;version="${version}",
org.h2.api;version="${version}",
org.h2.constant;version="${version}",
@@ -55,6 +67,9 @@ Export-Package: org.h2;version="${version}",
org.h2.bnf;version="${version}",
org.h2.bnf.context;version="${version}",
org.h2.mvstore;version="${version}",
+ org.h2.mvstore.tx;version="${version}",
org.h2.mvstore.type;version="${version}",
- org.h2.mvstore.rtree;version="${version}"
+ org.h2.mvstore.rtree;version="${version}",
+ org.h2.store.fs;version="${version}"
+Provide-Capability: osgi.service;objectClass:List=org.osgi.service.jdbc.DataSourceFactory
Premain-Class: org.h2.util.Profiler
diff --git a/h2/src/main/org/h2/Driver.java b/h2/src/main/org/h2/Driver.java
index ba33bbac03..a0660fc5fd 100644
--- a/h2/src/main/org/h2/Driver.java
+++ b/h2/src/main/org/h2/Driver.java
@@ -1,6 +1,6 @@
/*
- * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0,
- * and the EPL 1.0 (http://h2database.com/html/license.html).
+ * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0,
+ * and the EPL 1.0 (https://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2;
@@ -10,14 +10,11 @@
import java.sql.DriverPropertyInfo;
import java.sql.SQLException;
import java.util.Properties;
+import java.util.logging.Logger;
+import org.h2.api.ErrorCode;
import org.h2.engine.Constants;
import org.h2.jdbc.JdbcConnection;
import org.h2.message.DbException;
-import org.h2.upgrade.DbUpgrade;
-
-/*## Java 1.7 ##
-import java.util.logging.Logger;
-//*/
/**
* The database driver. An application should not use this class directly. The
@@ -31,14 +28,14 @@
* "jdbc:h2:˜/test", "sa", "sa");
*
*/
-public class Driver implements java.sql.Driver {
+public class Driver implements java.sql.Driver, JdbcDriverBackwardsCompat {
private static final Driver INSTANCE = new Driver();
private static final String DEFAULT_URL = "jdbc:default:connection";
private static final ThreadLocal DEFAULT_CONNECTION =
- new ThreadLocal();
+ new ThreadLocal<>();
- private static volatile boolean registered;
+ private static boolean registered;
static {
load();
@@ -52,26 +49,18 @@ public class Driver implements java.sql.Driver {
* @param url the database URL
* @param info the connection properties
* @return the new connection or null if the URL is not supported
+ * @throws SQLException on connection exception or if URL is {@code null}
*/
@Override
public Connection connect(String url, Properties info) throws SQLException {
- try {
- if (info == null) {
- info = new Properties();
- }
- if (!acceptsURL(url)) {
- return null;
- }
- if (url.equals(DEFAULT_URL)) {
- return DEFAULT_CONNECTION.get();
- }
- Connection c = DbUpgrade.connectOrUpgrade(url, info);
- if (c != null) {
- return c;
- }
- return new JdbcConnection(url, info);
- } catch (Exception e) {
- throw DbException.toSQLException(e);
+ if (url == null) {
+ throw DbException.getJdbcSQLException(ErrorCode.URL_FORMAT_ERROR_2, null, Constants.URL_FORMAT, null);
+ } else if (url.startsWith(Constants.START_URL)) {
+ return new JdbcConnection(url, info, null, null, false);
+ } else if (url.equals(DEFAULT_URL)) {
+ return DEFAULT_CONNECTION.get();
+ } else {
+ return null;
}
}
@@ -81,17 +70,19 @@ public Connection connect(String url, Properties info) throws SQLException {
*
* @param url the database URL
* @return if the driver understands the URL
+ * @throws SQLException if URL is {@code null}
*/
@Override
- public boolean acceptsURL(String url) {
- if (url != null) {
- if (url.startsWith(Constants.START_URL)) {
- return true;
- } else if (url.equals(DEFAULT_URL)) {
- return DEFAULT_CONNECTION.get() != null;
- }
+ public boolean acceptsURL(String url) throws SQLException {
+ if (url == null) {
+ throw DbException.getJdbcSQLException(ErrorCode.URL_FORMAT_ERROR_2, null, Constants.URL_FORMAT, null);
+ } else if (url.startsWith(Constants.START_URL)) {
+ return true;
+ } else if (url.equals(DEFAULT_URL)) {
+ return DEFAULT_CONNECTION.get() != null;
+ } else {
+ return false;
}
- return false;
}
/**
@@ -143,14 +134,14 @@ public boolean jdbcCompliant() {
/**
* [Not supported]
*/
-/*## Java 1.7 ##
+ @Override
public Logger getParentLogger() {
return null;
}
-//*/
/**
* INTERNAL
+ * @return instance of the driver registered with the DriverManager
*/
public static synchronized Driver load() {
try {
@@ -182,6 +173,7 @@ public static synchronized void unload() {
* INTERNAL
* Sets, on a per-thread basis, the default-connection for
* user-defined functions.
+ * @param c to set default to
*/
public static void setDefaultConnection(Connection c) {
if (c == null) {
@@ -193,6 +185,7 @@ public static void setDefaultConnection(Connection c) {
/**
* INTERNAL
+ * @param thread to set context class loader for
*/
public static void setThreadContextClassLoader(Thread thread) {
// Apache Tomcat: use the classloader of the driver to avoid the
diff --git a/h2/src/main/org/h2/JdbcDriverBackwardsCompat.java b/h2/src/main/org/h2/JdbcDriverBackwardsCompat.java
new file mode 100644
index 0000000000..4d033fd00c
--- /dev/null
+++ b/h2/src/main/org/h2/JdbcDriverBackwardsCompat.java
@@ -0,0 +1,16 @@
+/*
+ * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0,
+ * and the EPL 1.0 (https://h2database.com/html/license.html).
+ * Initial Developer: H2 Group
+ */
+package org.h2;
+
+/**
+ * Allows us to compile on older platforms, while still implementing the methods
+ * from the newer JDBC API.
+ */
+public interface JdbcDriverBackwardsCompat {
+
+ // compatibility interface
+
+}
diff --git a/h2/src/main/org/h2/api/Aggregate.java b/h2/src/main/org/h2/api/Aggregate.java
index 8e6d30e6a8..6169d0cec4 100644
--- a/h2/src/main/org/h2/api/Aggregate.java
+++ b/h2/src/main/org/h2/api/Aggregate.java
@@ -1,6 +1,6 @@
/*
- * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0,
- * and the EPL 1.0 (http://h2database.com/html/license.html).
+ * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0,
+ * and the EPL 1.0 (https://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.api;
@@ -19,8 +19,11 @@ public interface Aggregate {
* A new object is created for each invocation.
*
* @param conn a connection to the database
+ * @throws SQLException on SQL exception
*/
- void init(Connection conn) throws SQLException;
+ default void init(Connection conn) throws SQLException {
+ // Do nothing by default
+ }
/**
* This method must return the H2 data type, {@link org.h2.value.Value},
@@ -40,13 +43,17 @@ public interface Aggregate {
* those are passed as array.
*
* @param value the value(s) for this row
+ * @throws SQLException on failure
*/
void add(Object value) throws SQLException;
/**
- * This method returns the computed aggregate value.
+ * This method returns the computed aggregate value. This method must
+ * preserve previously added values and must be able to reevaluate result if
+ * more values were added since its previous invocation.
*
* @return the aggregated value
+ * @throws SQLException on failure
*/
Object getResult() throws SQLException;
diff --git a/h2/src/main/org/h2/api/AggregateFunction.java b/h2/src/main/org/h2/api/AggregateFunction.java
index 7a547d4b80..916853edcd 100644
--- a/h2/src/main/org/h2/api/AggregateFunction.java
+++ b/h2/src/main/org/h2/api/AggregateFunction.java
@@ -1,6 +1,6 @@
/*
- * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0,
- * and the EPL 1.0 (http://h2database.com/html/license.html).
+ * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0,
+ * and the EPL 1.0 (https://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.api;
@@ -24,8 +24,11 @@ public interface AggregateFunction {
* A new object is created for each invocation.
*
* @param conn a connection to the database
+ * @throws SQLException on SQL exception
*/
- void init(Connection conn) throws SQLException;
+ default void init(Connection conn) throws SQLException {
+ // Do nothing by default
+ }
/**
* This method must return the SQL type of the method, given the SQL type of
@@ -34,6 +37,7 @@ public interface AggregateFunction {
*
* @param inputTypes the SQL type of the parameters, {@link java.sql.Types}
* @return the SQL type of the result
+ * @throws SQLException on failure
*/
int getType(int[] inputTypes) throws SQLException;
@@ -43,13 +47,17 @@ public interface AggregateFunction {
* those are passed as array.
*
* @param value the value(s) for this row
+ * @throws SQLException on failure
*/
void add(Object value) throws SQLException;
/**
- * This method returns the computed aggregate value.
+ * This method returns the computed aggregate value. This method must
+ * preserve previously added values and must be able to reevaluate result if
+ * more values were added since its previous invocation.
*
* @return the aggregated value
+ * @throws SQLException on failure
*/
Object getResult() throws SQLException;
diff --git a/h2/src/main/org/h2/api/CredentialsValidator.java b/h2/src/main/org/h2/api/CredentialsValidator.java
new file mode 100644
index 0000000000..79dae86059
--- /dev/null
+++ b/h2/src/main/org/h2/api/CredentialsValidator.java
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0,
+ * and the EPL 1.0 (https://h2database.com/html/license.html).
+ * Initial Developer: Alessandro Ventura
+ */
+package org.h2.api;
+
+import org.h2.security.auth.AuthenticationInfo;
+import org.h2.security.auth.Configurable;
+
+/**
+ * A class that implement this interface can be used to validate credentials
+ * provided by client.
+ *
+ * This feature is experimental and subject to change
+ *
+ */
+public interface CredentialsValidator extends Configurable {
+
+ /**
+ * Validate user credential.
+ *
+ * @param authenticationInfo
+ * = authentication info
+ * @return true if credentials are valid, otherwise false
+ * @throws Exception
+ * any exception occurred (invalid credentials or internal
+ * issue) prevent user login
+ */
+ boolean validateCredentials(AuthenticationInfo authenticationInfo) throws Exception;
+
+}
diff --git a/h2/src/main/org/h2/api/DatabaseEventListener.java b/h2/src/main/org/h2/api/DatabaseEventListener.java
index 0fa85d0685..67f3c8eb9e 100644
--- a/h2/src/main/org/h2/api/DatabaseEventListener.java
+++ b/h2/src/main/org/h2/api/DatabaseEventListener.java
@@ -1,6 +1,6 @@
/*
- * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0,
- * and the EPL 1.0 (http://h2database.com/html/license.html).
+ * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0,
+ * and the EPL 1.0 (https://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.api;
@@ -12,7 +12,7 @@
* A class that implements this interface can get notified about exceptions
* and other events. A database event listener can be registered when
* connecting to a database. Example database URL:
- * jdbc:h2:test;DATABASE_EVENT_LISTENER='com.acme.DbListener'
+ * jdbc:h2:./test;DATABASE_EVENT_LISTENER='com.acme.DbListener'
*/
public interface DatabaseEventListener extends EventListener {
@@ -66,13 +66,15 @@ public interface DatabaseEventListener extends EventListener {
*
* @param url - the database URL
*/
- void init(String url);
+ default void init(String url) {
+ }
/**
- * This method is called after the database has been opened. It is save to
+ * This method is called after the database has been opened. It is safe to
* connect to the database and execute statements at this point.
*/
- void opened();
+ default void opened() {
+ }
/**
* This method is called if an exception occurred.
@@ -80,7 +82,8 @@ public interface DatabaseEventListener extends EventListener {
* @param e the exception
* @param sql the SQL statement
*/
- void exceptionThrown(SQLException e, String sql);
+ default void exceptionThrown(SQLException e, String sql) {
+ }
/**
* This method is called for long running events, such as recovering,
@@ -93,15 +96,17 @@ public interface DatabaseEventListener extends EventListener {
* @param state the state
* @param name the object name
* @param x the current position
- * @param max the highest possible value (might be 0)
+ * @param max the highest possible value or 0 if unknown
*/
- void setProgress(int state, String name, int x, int max);
+ default void setProgress(int state, String name, long x, long max) {
+ }
/**
- * This method is called before the database is closed normally. It is save
+ * This method is called before the database is closed normally. It is safe
* to connect to the database and execute statements at this point, however
* the connection must be closed before the method returns.
*/
- void closingDatabase();
+ default void closingDatabase() {
+ }
}
diff --git a/h2/src/main/org/h2/api/ErrorCode.java b/h2/src/main/org/h2/api/ErrorCode.java
index 71cec6a1da..bb74ebef80 100644
--- a/h2/src/main/org/h2/api/ErrorCode.java
+++ b/h2/src/main/org/h2/api/ErrorCode.java
@@ -1,6 +1,6 @@
/*
- * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0,
- * and the EPL 1.0 (http://h2database.com/html/license.html).
+ * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0,
+ * and the EPL 1.0 (https://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.api;
@@ -106,6 +106,12 @@ public class ErrorCode {
*/
public static final int NUMERIC_VALUE_OUT_OF_RANGE_1 = 22003;
+ /**
+ * The error with code 22004
is thrown when a value is out of
+ * range when converting to another column's data type.
+ */
+ public static final int NUMERIC_VALUE_OUT_OF_RANGE_2 = 22004;
+
/**
* The error with code 22007
is thrown when
* a text can not be converted to a date, time, or timestamp constant.
@@ -127,6 +133,15 @@ public class ErrorCode {
*/
public static final int DIVISION_BY_ZERO_1 = 22012;
+ /**
+ * The error with code 22013
is thrown when preceding or
+ * following size in a window function is null or negative. Example:
+ *
+ * FIRST_VALUE(N) OVER(ORDER BY N ROWS -1 PRECEDING)
+ *
+ */
+ public static final int INVALID_PRECEDING_OR_FOLLOWING_1 = 22013;
+
/**
* The error with code 22018
is thrown when
* trying to convert a value to a data type where the conversion is
@@ -159,6 +174,55 @@ public class ErrorCode {
*/
public static final int LIKE_ESCAPE_ERROR_1 = 22025;
+ /**
+ * The error with code 22030
is thrown when
+ * an attempt is made to INSERT or UPDATE an ENUM-typed cell,
+ * but the value is not one of the values enumerated by the
+ * type.
+ *
+ * Example:
+ *
+ * CREATE TABLE TEST(CASE ENUM('sensitive','insensitive'));
+ * INSERT INTO TEST VALUES('snake');
+ *
+ */
+ public static final int ENUM_VALUE_NOT_PERMITTED = 22030;
+
+ /**
+ * The error with code 22032
is thrown when an
+ * attempt is made to add or modify an ENUM-typed column so
+ * that one or more of its enumerators would be empty.
+ *
+ * Example:
+ *
+ * CREATE TABLE TEST(CASE ENUM(' '));
+ *
+ */
+ public static final int ENUM_EMPTY = 22032;
+
+ /**
+ * The error with code 22033
is thrown when an
+ * attempt is made to add or modify an ENUM-typed column so
+ * that it would have duplicate values.
+ *
+ * Example:
+ *
+ * CREATE TABLE TEST(CASE ENUM('sensitive', 'sensitive'));
+ *
+ */
+ public static final int ENUM_DUPLICATE = 22033;
+
+ /**
+ * The error with code 22034
is thrown when an
+ * attempt is made to read non-existing element of an array.
+ *
+ * Example:
+ *
+ * VALUES ARRAY[1, 2][3]
+ *
+ */
+ public static final int ARRAY_ELEMENT_ERROR_2 = 22034;
+
// 23: constraint violation
/**
@@ -228,7 +292,7 @@ public class ErrorCode {
* The error with code 23513
is thrown when
* a check constraint is violated. Example:
*
- * CREATE TABLE TEST(ID INT CHECK ID>0);
+ * CREATE TABLE TEST(ID INT CHECK (ID>0));
* INSERT INTO TEST VALUES(0);
*
*/
@@ -236,7 +300,7 @@ public class ErrorCode {
/**
* The error with code 23514
is thrown when
- * evaluation of a check constraint resulted in a error.
+ * evaluation of a check constraint resulted in an error.
*/
public static final int CHECK_CONSTRAINT_INVALID = 23514;
@@ -264,7 +328,7 @@ public class ErrorCode {
* sessions are also possible. To solve deadlock problems, an application
* should lock tables always in the same order, such as always lock table A
* before locking table B. For details, see Wikipedia Deadlock.
+ * href="https://en.wikipedia.org/wiki/Deadlock">Wikipedia Deadlock.
*/
public static final int DEADLOCK_1 = 40001;
@@ -314,6 +378,30 @@ public class ErrorCode {
*/
public static final int TABLE_OR_VIEW_NOT_FOUND_1 = 42102;
+ /**
+ * The error with code 42103
is thrown when
+ * trying to query, modify or drop a table or view that does not exists
+ * in this schema and database but similar names were found. A common cause
+ * is that the names are written in different case.
+ * Example:
+ *
+ * SELECT * FROM ABC;
+ *
+ */
+ public static final int TABLE_OR_VIEW_NOT_FOUND_WITH_CANDIDATES_2 = 42103;
+
+ /**
+ * The error with code 42104
is thrown when
+ * trying to query, modify or drop a table or view that does not exists
+ * in this schema and database but it is empty anyway. A common cause is
+ * that the wrong database was opened.
+ * Example:
+ *
+ * SELECT * FROM ABC;
+ *
+ */
+ public static final int TABLE_OR_VIEW_NOT_FOUND_DATABASE_EMPTY_1 = 42104;
+
/**
* The error with code 42111
is thrown when
* trying to create an index if an index with the same name already exists.
@@ -358,6 +446,52 @@ public class ErrorCode {
*/
public static final int COLUMN_NOT_FOUND_1 = 42122;
+ /**
+ * The error with code 42131
is thrown when
+ * identical expressions should be used, but different
+ * expressions were found.
+ * Example:
+ *
+ * SELECT MODE(A ORDER BY B) FROM TEST;
+ *
+ */
+ public static final int IDENTICAL_EXPRESSIONS_SHOULD_BE_USED = 42131;
+
+ /**
+ * The error with code 42602
is thrown when
+ * invalid name of identifier is used.
+ * Example:
+ *
+ * statement.enquoteIdentifier("\"", true);
+ *
+ */
+ public static final int INVALID_NAME_1 = 42602;
+
+ /**
+ * The error with code 42622
is thrown when
+ * name of identifier is too long.
+ * Example:
+ *
+ * char[] c = new char[1000];
+ * Arrays.fill(c, 'A');
+ * statement.executeQuery("SELECT 1 " + new String(c));
+ *
+ */
+ public static final int NAME_TOO_LONG_2 = 42622;
+
+ // 54: program limit exceeded
+
+ /**
+ * The error with code 54011
is thrown when
+ * too many columns were specified in a table, select statement,
+ * or row value.
+ * Example:
+ *
+ * CREATE TABLE TEST(C1 INTEGER, C2 INTEGER, ..., C20000 INTEGER);
+ *
+ */
+ public static final int TOO_MANY_COLUMNS_1 = 54011;
+
// 0A: feature not supported
// HZ: remote database access
@@ -407,6 +541,18 @@ public class ErrorCode {
*/
public static final int LOCK_TIMEOUT_1 = 50200;
+ /**
+ * The error with code 57014
is thrown when
+ * a statement was canceled using Statement.cancel() or
+ * when the query timeout has been reached.
+ * Examples:
+ *
+ * stat.setQueryTimeout(1);
+ * stat.cancel();
+ *
+ */
+ public static final int STATEMENT_WAS_CANCELED = 57014;
+
/**
* The error with code 90000
is thrown when
* a function that does not return a result set was used in the FROM clause.
@@ -463,10 +609,9 @@ public class ErrorCode {
/**
* The error with code 90005
is thrown when
- * trying to create a trigger and using the combination of SELECT
- * and FOR EACH ROW, which we do not support.
+ * trying to create a trigger with invalid combination of flags.
*/
- public static final int TRIGGER_SELECT_AND_ROW_BASED_NOT_SUPPORTED = 90005;
+ public static final int INVALID_TRIGGER_FLAGS_1 = 90005;
/**
* The error with code 90006
is thrown when
@@ -496,7 +641,7 @@ public class ErrorCode {
* trying to create a sequence with an invalid combination
* of attributes (min value, max value, start value, etc).
*/
- public static final int SEQUENCE_ATTRIBUTES_INVALID = 90009;
+ public static final int SEQUENCE_ATTRIBUTES_INVALID_7 = 90009;
/**
* The error with code 90010
is thrown when
@@ -535,13 +680,11 @@ public class ErrorCode {
public static final int PARAMETER_NOT_SET_1 = 90012;
/**
- * The error with code 90013
is thrown when
- * trying to open a database that does not exist using the flag
- * IFEXISTS=TRUE, or when trying to access a database object with a catalog
- * name that does not match the database name. Example:
+ * The error with code 90013
is thrown when when trying to access
+ * a database object with a catalog name that does not match the database
+ * name.
*
- * CREATE TABLE TEST(ID INT);
- * SELECT XYZ.PUBLIC.TEST.ID FROM TEST;
+ * SELECT * FROM database_that_does_not_exist.table_name
*
*/
public static final int DATABASE_NOT_FOUND_1 = 90013;
@@ -661,13 +804,22 @@ public class ErrorCode {
public static final int FUNCTION_NOT_FOUND_1 = 90022;
/**
- * The error with code 90023
is thrown when
- * trying to set a primary key on a nullable column.
- * Example:
+ * The error with code 90023
is thrown when trying to set a
+ * primary key on a nullable column or when trying to drop NOT NULL
+ * constraint on primary key or identity column.
+ * Examples:
*
* CREATE TABLE TEST(ID INT, NAME VARCHAR);
* ALTER TABLE TEST ADD CONSTRAINT PK PRIMARY KEY(ID);
*
+ *
+ * CREATE TABLE TEST(ID INT PRIMARY KEY, NAME VARCHAR);
+ * ALTER TABLE TEST ALTER COLUMN ID DROP NOT NULL;
+ *
+ *
+ * CREATE TABLE TEST(ID INT GENERATED ALWAYS AS IDENTITY, NAME VARCHAR);
+ * ALTER TABLE TEST ALTER COLUMN ID DROP NOT NULL;
+ *
*/
public static final int COLUMN_MUST_NOT_BE_NULLABLE_1 = 90023;
@@ -926,32 +1078,15 @@ public class ErrorCode {
*/
public static final int WRONG_PASSWORD_FORMAT = 90050;
- /**
- * The error with code 57014
is thrown when
- * a statement was canceled using Statement.cancel() or
- * when the query timeout has been reached.
- * Examples:
- *
- * stat.setQueryTimeout(1);
- * stat.cancel();
- *
- */
- public static final int STATEMENT_WAS_CANCELED = 57014;
+ // 90051 was removed
/**
- * The error with code 90052
is thrown when
- * a subquery that is used as a value contains more than one column.
- * Example of wrong usage:
- *
- * CREATE TABLE TEST(ID INT);
- * INSERT INTO TEST VALUES(1), (2);
- * SELECT * FROM TEST WHERE ID IN (SELECT 1, 2 FROM DUAL);
- *
- * Correct:
+ * The error with code 90052
is thrown when a single-column
+ * subquery is expected but a subquery with other number of columns was
+ * specified.
+ * Example:
*
- * CREATE TABLE TEST(ID INT);
- * INSERT INTO TEST VALUES(1), (2);
- * SELECT * FROM TEST WHERE ID IN (1, 2);
+ * VALUES ARRAY(SELECT A, B FROM TEST)
*
*/
public static final int SUBQUERY_IS_NOT_SINGLE_COLUMN = 90052;
@@ -991,6 +1126,12 @@ public class ErrorCode {
*/
public static final int UNSUPPORTED_CIPHER = 90055;
+ /**
+ * The error with code 90056
is thrown when trying to format a
+ * timestamp using TO_DATE and TO_TIMESTAMP with an invalid format.
+ */
+ public static final int INVALID_TO_DATE_FORMAT = 90056;
+
/**
* The error with code 90057
is thrown when
* trying to drop a constraint that does not exist.
@@ -1316,11 +1457,14 @@ public class ErrorCode {
/**
* The error with code 90085
is thrown when
* trying to manually drop an index that was generated by the system
- * because of a unique or referential constraint. To find out what
- * constraint causes the problem, run:
+ * because of a unique or referential constraint. To find
+ * the owner of the index without attempt to drop it run
*
- * SELECT * FROM INFORMATION_SCHEMA.CONSTRAINTS
- * WHERE UNIQUE_INDEX_NAME = '<index name>';
+ * SELECT CONSTRAINT_SCHEMA, CONSTRAINT_NAME
+ * FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE
+ * WHERE INDEX_SCHEMA = '<index schema>'
+ * AND INDEX_NAME = '<index name>'
+ * FETCH FIRST ROW ONLY
*
* Example of wrong usage:
*
@@ -1348,7 +1492,7 @@ public class ErrorCode {
/**
* The error with code 90087
is thrown when
- * the specified method was not found in the class.
+ * a method with matching number of arguments was not found in the class.
* Example:
*
* CREATE ALIAS TO_BINARY FOR "java.lang.Long.toBinaryString(long)";
@@ -1568,6 +1712,17 @@ public class ErrorCode {
*/
public static final int VIEW_IS_INVALID_2 = 90109;
+ /**
+ * The error with code 90110
is thrown when
+ * trying to compare values of incomparable data types.
+ * Example:
+ *
+ * CREATE TABLE test (id INT NOT NULL, name VARCHAR);
+ * select * from test where id = (1, 2);
+ *
+ */
+ public static final int TYPES_ARE_NOT_COMPARABLE_2 = 90110;
+
/**
* The error with code 90111
is thrown when
* an exception occurred while accessing a linked table.
@@ -1653,7 +1808,7 @@ public class ErrorCode {
* DROP TABLE INFORMATION_SCHEMA.SETTINGS;
*
*/
- public static final int CANNOT_DROP_TABLE_1 = 90118;
+ public static final int CANNOT_DROP_TABLE_1 = 90118;
/**
* The error with code 90119
is thrown when
@@ -1662,11 +1817,17 @@ public class ErrorCode {
* Example:
*
* CREATE DOMAIN INTEGER AS VARCHAR;
- * CREATE DOMAIN EMAIL AS VARCHAR CHECK LOCATE('@', VALUE) > 0;
- * CREATE DOMAIN EMAIL AS VARCHAR CHECK LOCATE('@', VALUE) > 0;
+ * CREATE DOMAIN EMAIL AS VARCHAR CHECK LOCATE('@', VALUE) > 0;
+ * CREATE DOMAIN EMAIL AS VARCHAR CHECK LOCATE('@', VALUE) > 0;
*
*/
- public static final int USER_DATA_TYPE_ALREADY_EXISTS_1 = 90119;
+ public static final int DOMAIN_ALREADY_EXISTS_1 = 90119;
+
+ /**
+ * Deprecated since 1.4.198. Use {@link #DOMAIN_ALREADY_EXISTS_1} instead.
+ */
+ @Deprecated
+ public static final int USER_DATA_TYPE_ALREADY_EXISTS_1 = DOMAIN_ALREADY_EXISTS_1;
/**
* The error with code 90120
is thrown when
@@ -1676,7 +1837,13 @@ public class ErrorCode {
* DROP DOMAIN UNKNOWN;
*
*/
- public static final int USER_DATA_TYPE_NOT_FOUND_1 = 90120;
+ public static final int DOMAIN_NOT_FOUND_1 = 90120;
+
+ /**
+ * Deprecated since 1.4.198. Use {@link #DOMAIN_NOT_FOUND_1} instead.
+ */
+ @Deprecated
+ public static final int USER_DATA_TYPE_NOT_FOUND_1 = DOMAIN_NOT_FOUND_1;
/**
* The error with code 90121
is thrown when
@@ -1685,6 +1852,12 @@ public class ErrorCode {
*/
public static final int DATABASE_CALLED_AT_SHUTDOWN = 90121;
+ /**
+ * The error with code 90122
is thrown when
+ * WITH TIES clause is used without ORDER BY clause.
+ */
+ public static final int WITH_TIES_WITHOUT_ORDER_BY = 90122;
+
/**
* The error with code 90123
is thrown when
* trying mix regular parameters and indexed parameters in the same
@@ -1790,7 +1963,7 @@ public class ErrorCode {
* connections at the same time, or trying to insert two rows with the same
* key from two connections. Example:
*
- * jdbc:h2:~/test;MVCC=TRUE
+ * jdbc:h2:~/test
* Session 1:
* CREATE TABLE TEST(ID INT);
* INSERT INTO TEST VALUES(1);
@@ -1816,8 +1989,7 @@ public class ErrorCode {
/**
* The error with code 90133
is thrown when
* trying to change a specific database property while the database is
- * already open. The MVCC property needs to be set in the first connection
- * (in the connection opening the database) and can not be changed later on.
+ * already open.
*/
public static final int CANNOT_CHANGE_SETTING_WHEN_OPEN_1 = 90133;
@@ -1841,19 +2013,19 @@ public class ErrorCode {
/**
* The error with code 90136
is thrown when
- * executing a query that used an unsupported outer join condition.
+ * trying to reference a window that does not exist.
* Example:
*
- * SELECT * FROM DUAL A LEFT JOIN DUAL B ON B.X=(SELECT MAX(X) FROM DUAL);
+ * SELECT LEAD(X) OVER W FROM TEST;
*
*/
- public static final int UNSUPPORTED_OUTER_JOIN_CONDITION_1 = 90136;
+ public static final int WINDOW_NOT_FOUND_1 = 90136;
/**
* The error with code 90137
is thrown when
* trying to assign a value to something that is not a variable.
*
- * SELECT AMOUNT, SET(@V, IFNULL(@V, 0)+AMOUNT) FROM TEST;
+ * SELECT AMOUNT, SET(@V, COALESCE(@V, 0)+AMOUNT) FROM TEST;
*
*/
public static final int CAN_ONLY_ASSIGN_TO_VARIABLE_1 = 90137;
@@ -1898,7 +2070,6 @@ public class ErrorCode {
*/
public static final int RESULT_SET_READONLY = 90140;
-
/**
* The error with code 90141
is thrown when
* trying to change the java object serializer while there was already data
@@ -1913,8 +2084,163 @@ public class ErrorCode {
*/
public static final int STEP_SIZE_MUST_NOT_BE_ZERO = 90142;
+ /**
+ * The error with code 90143
is thrown when
+ * trying to fetch a row from the primary index and the row is not there.
+ */
+ public static final int ROW_NOT_FOUND_IN_PRIMARY_INDEX = 90143;
+
+ /**
+ * The error with code 90144
is thrown when
+ * user trying to login into a database with AUTHREALM set and
+ * the target database doesn't have an authenticator defined
+ * Authenticator experimental feature can be enabled by
+ *
+ *
+ * SET AUTHENTICATOR TRUE
+ *
+ */
+ public static final int AUTHENTICATOR_NOT_AVAILABLE = 90144;
- // next are 90051, 90056, 90110, 90122, 90143
+ /**
+ * The error with code 90145
is thrown when trying to execute a
+ * SELECT statement with non-window aggregates, DISTINCT, GROUP BY, or
+ * HAVING clauses together with FOR UPDATE clause.
+ *
+ *
+ * SELECT DISTINCT NAME FOR UPDATE;
+ * SELECT MAX(VALUE) FOR UPDATE;
+ *
+ */
+ public static final int FOR_UPDATE_IS_NOT_ALLOWED_IN_DISTINCT_OR_GROUPED_SELECT = 90145;
+
+ /**
+ * The error with code 90146
is thrown when trying to open a
+ * database that does not exist using the flag IFEXISTS=TRUE
+ *
+ * jdbc:h2:./database_that_does_not_exist
+ *
+ */
+ public static final int DATABASE_NOT_FOUND_WITH_IF_EXISTS_1 = 90146;
+
+ /**
+ * The error with code 90147
is thrown when trying to execute a
+ * statement which closes the transaction (such as commit and rollback) and
+ * autocommit mode is on.
+ *
+ * @see org.h2.engine.SysProperties#FORCE_AUTOCOMMIT_OFF_ON_COMMIT
+ */
+ public static final int METHOD_DISABLED_ON_AUTOCOMMIT_TRUE = 90147;
+
+ /**
+ * The error with code 90148
is thrown when trying to access
+ * the current value of a sequence before execution of NEXT VALUE FOR
+ * sequenceName in the current session. Example:
+ *
+ *
+ * SELECT CURRENT VALUE FOR SEQUENCE XYZ;
+ *
+ */
+ public static final int CURRENT_SEQUENCE_VALUE_IS_NOT_DEFINED_IN_SESSION_1 = 90148;
+
+ /**
+ * The error with code 90149
is thrown when trying to open a
+ * database that does not exist remotely without enabling remote database
+ * creation first.
+ *
+ * jdbc:h2:./database_that_does_not_exist
+ *
+ */
+ public static final int REMOTE_DATABASE_NOT_FOUND_1 = 90149;
+
+ /**
+ * The error with code 90150
is thrown when
+ * trying to use an invalid precision.
+ * Example:
+ *
+ * CREATE TABLE TABLE1 ( FAIL INTERVAL YEAR(20) );
+ *
+ */
+ public static final int INVALID_VALUE_PRECISION = 90150;
+
+ /**
+ * The error with code 90151
is thrown when
+ * trying to use an invalid scale or fractional seconds precision.
+ * Example:
+ *
+ * CREATE TABLE TABLE1 ( FAIL TIME(10) );
+ *
+ */
+ public static final int INVALID_VALUE_SCALE = 90151;
+
+ /**
+ * The error with code 90152
is thrown when trying to manually
+ * drop a unique or primary key constraint that is referenced by a foreign
+ * key constraint without a CASCADE clause.
+ *
+ *
+ * CREATE TABLE PARENT(ID INT CONSTRAINT P1 PRIMARY KEY);
+ * CREATE TABLE CHILD(ID INT CONSTRAINT P2 PRIMARY KEY, CHILD INT CONSTRAINT C REFERENCES PARENT);
+ * ALTER TABLE PARENT DROP CONSTRAINT P1 RESTRICT;
+ *
+ */
+ public static final int CONSTRAINT_IS_USED_BY_CONSTRAINT_2 = 90152;
+
+ /**
+ * The error with code 90153
is thrown when trying to reference
+ * a column of another data type when data types aren't comparable or don't
+ * have a session-independent compare order between each other.
+ *
+ *
+ * CREATE TABLE PARENT(T TIMESTAMP UNIQUE);
+ * CREATE TABLE CHILD(T TIMESTAMP WITH TIME ZONE REFERENCES PARENT(T));
+ *
+ */
+ public static final int UNCOMPARABLE_REFERENCED_COLUMN_2 = 90153;
+
+ /**
+ * The error with code 90154
is thrown when trying to assign a
+ * value to a generated column.
+ *
+ *
+ * CREATE TABLE TEST(A INT, B INT GENERATED ALWAYS AS (A + 1));
+ * INSERT INTO TEST(A, B) VALUES (1, 1);
+ *
+ */
+ public static final int GENERATED_COLUMN_CANNOT_BE_ASSIGNED_1 = 90154;
+
+ /**
+ * The error with code 90155
is thrown when trying to create a
+ * referential constraint that can update a referenced generated column.
+ *
+ *
+ * CREATE TABLE PARENT(ID INT PRIMARY KEY, K INT GENERATED ALWAYS AS (ID) UNIQUE);
+ * CREATE TABLE CHILD(ID INT PRIMARY KEY, P INT);
+ * ALTER TABLE CHILD ADD FOREIGN KEY(P) REFERENCES PARENT(K) ON DELETE SET NULL;
+ *
+ */
+ public static final int GENERATED_COLUMN_CANNOT_BE_UPDATABLE_BY_CONSTRAINT_2 = 90155;
+
+ /**
+ * The error with code 90156
is thrown when trying to create a
+ * view or a table from a select and some expression doesn't have a column
+ * name or alias when it is required by a compatibility mode.
+ *
+ *
+ * SET MODE DB2;
+ * CREATE TABLE T1(A INT, B INT);
+ * CREATE TABLE T2 AS (SELECT A + B FROM T1) WITH DATA;
+ *
+ */
+ public static final int COLUMN_ALIAS_IS_NOT_SPECIFIED_1 = 90156;
+
+ /**
+ * The error with code 90157
is thrown when the integer
+ * index that is used in the GROUP BY is not in the SELECT list
+ */
+ public static final int GROUP_BY_NOT_IN_THE_RESULT = 90157;
+
+ // next is 90158
private ErrorCode() {
// utility class
@@ -1922,6 +2248,8 @@ private ErrorCode() {
/**
* INTERNAL
+ * @param errorCode to check
+ * @return true if provided code is common, false otherwise
*/
public static boolean isCommon(int errorCode) {
// this list is sorted alphabetically
@@ -1940,6 +2268,8 @@ public static boolean isCommon(int errorCode) {
case SYNTAX_ERROR_2:
case TABLE_OR_VIEW_ALREADY_EXISTS_1:
case TABLE_OR_VIEW_NOT_FOUND_1:
+ case TABLE_OR_VIEW_NOT_FOUND_WITH_CANDIDATES_2:
+ case TABLE_OR_VIEW_NOT_FOUND_DATABASE_EMPTY_1:
case VALUE_TOO_LONG_2:
return true;
}
@@ -1948,6 +2278,8 @@ public static boolean isCommon(int errorCode) {
/**
* INTERNAL
+ * @param errorCode to get state for
+ * @return error state
*/
public static String getState(int errorCode) {
// To convert SQLState to error code, replace
@@ -1967,13 +2299,19 @@ public static String getState(int errorCode) {
// 21: cardinality violation
case COLUMN_COUNT_DOES_NOT_MATCH: return "21S02";
+ // 22: data exception
+ case ARRAY_ELEMENT_ERROR_2: return "2202E";
+
// 42: syntax error or access rule violation
case TABLE_OR_VIEW_ALREADY_EXISTS_1: return "42S01";
case TABLE_OR_VIEW_NOT_FOUND_1: return "42S02";
+ case TABLE_OR_VIEW_NOT_FOUND_WITH_CANDIDATES_2: return "42S03";
+ case TABLE_OR_VIEW_NOT_FOUND_DATABASE_EMPTY_1: return "42S04";
case INDEX_ALREADY_EXISTS_1: return "42S11";
case INDEX_NOT_FOUND_1: return "42S12";
case DUPLICATE_COLUMN_NAME_1: return "42S21";
case COLUMN_NOT_FOUND_1: return "42S22";
+ case IDENTICAL_EXPRESSIONS_SHOULD_BE_USED: return "42S31";
// 0A: feature not supported
@@ -1986,7 +2324,7 @@ public static String getState(int errorCode) {
case FEATURE_NOT_SUPPORTED_1: return "HYC00";
case LOCK_TIMEOUT_1: return "HYT00";
default:
- return "" + errorCode;
+ return Integer.toString(errorCode);
}
}
diff --git a/h2/src/main/org/h2/api/H2Type.java b/h2/src/main/org/h2/api/H2Type.java
new file mode 100644
index 0000000000..ecc61311e8
--- /dev/null
+++ b/h2/src/main/org/h2/api/H2Type.java
@@ -0,0 +1,321 @@
+/*
+ * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0,
+ * and the EPL 1.0 (https://h2database.com/html/license.html).
+ * Initial Developer: H2 Group
+ */
+package org.h2.api;
+
+import java.sql.SQLType;
+
+import org.h2.value.ExtTypeInfoRow;
+import org.h2.value.TypeInfo;
+import org.h2.value.Value;
+
+/**
+ * Data types of H2.
+ */
+public final class H2Type implements SQLType {
+
+ // Character strings
+
+ /**
+ * The CHARACTER data type.
+ */
+ public static final H2Type CHAR = new H2Type(TypeInfo.getTypeInfo(Value.CHAR), "CHARACTER");
+
+ /**
+ * The CHARACTER VARYING data type.
+ */
+ public static final H2Type VARCHAR = new H2Type(TypeInfo.TYPE_VARCHAR, "CHARACTER VARYING");
+
+ /**
+ * The CHARACTER LARGE OBJECT data type.
+ */
+ public static final H2Type CLOB = new H2Type(TypeInfo.TYPE_CLOB, "CHARACTER LARGE OBJECT");
+
+ /**
+ * The VARCHAR_IGNORECASE data type.
+ */
+ public static final H2Type VARCHAR_IGNORECASE = new H2Type(TypeInfo.TYPE_VARCHAR_IGNORECASE, "VARCHAR_IGNORECASE");
+
+ // Binary strings
+
+ /**
+ * The BINARY data type.
+ */
+ public static final H2Type BINARY = new H2Type(TypeInfo.getTypeInfo(Value.BINARY), "BINARY");
+
+ /**
+ * The BINARY VARYING data type.
+ */
+ public static final H2Type VARBINARY = new H2Type(TypeInfo.TYPE_VARBINARY, "BINARY VARYING");
+
+ /**
+ * The BINARY LARGE OBJECT data type.
+ */
+ public static final H2Type BLOB = new H2Type(TypeInfo.TYPE_BLOB, "BINARY LARGE OBJECT");
+
+ // Boolean
+
+ /**
+ * The BOOLEAN data type
+ */
+ public static final H2Type BOOLEAN = new H2Type(TypeInfo.TYPE_BOOLEAN, "BOOLEAN");
+
+ // Exact numeric data types
+
+ /**
+ * The TINYINT data type.
+ */
+ public static final H2Type TINYINT = new H2Type(TypeInfo.TYPE_TINYINT, "TINYINT");
+
+ /**
+ * The SMALLINT data type.
+ */
+ public static final H2Type SMALLINT = new H2Type(TypeInfo.TYPE_SMALLINT, "SMALLINT");
+
+ /**
+ * The INTEGER data type.
+ */
+ public static final H2Type INTEGER = new H2Type(TypeInfo.TYPE_INTEGER, "INTEGER");
+
+ /**
+ * The BIGINT data type.
+ */
+ public static final H2Type BIGINT = new H2Type(TypeInfo.TYPE_BIGINT, "BIGINT");
+
+ /**
+ * The NUMERIC data type.
+ */
+ public static final H2Type NUMERIC = new H2Type(TypeInfo.TYPE_NUMERIC_FLOATING_POINT, "NUMERIC");
+
+ // Approximate numeric data types
+
+ /**
+ * The REAL data type.
+ */
+ public static final H2Type REAL = new H2Type(TypeInfo.TYPE_REAL, "REAL");
+
+ /**
+ * The DOUBLE PRECISION data type.
+ */
+ public static final H2Type DOUBLE_PRECISION = new H2Type(TypeInfo.TYPE_DOUBLE, "DOUBLE PRECISION");
+
+ // Decimal floating-point type
+
+ /**
+ * The DECFLOAT data type.
+ */
+ public static final H2Type DECFLOAT = new H2Type(TypeInfo.TYPE_DECFLOAT, "DECFLOAT");
+
+ // Date-time data types
+
+ /**
+ * The DATE data type.
+ */
+ public static final H2Type DATE = new H2Type(TypeInfo.TYPE_DATE, "DATE");
+
+ /**
+ * The TIME data type.
+ */
+ public static final H2Type TIME = new H2Type(TypeInfo.TYPE_TIME, "TIME");
+
+ /**
+ * The TIME WITH TIME ZONE data type.
+ */
+ public static final H2Type TIME_WITH_TIME_ZONE = new H2Type(TypeInfo.TYPE_TIME_TZ, "TIME WITH TIME ZONE");
+
+ /**
+ * The TIMESTAMP data type.
+ */
+ public static final H2Type TIMESTAMP = new H2Type(TypeInfo.TYPE_TIMESTAMP, "TIMESTAMP");
+
+ /**
+ * The TIMESTAMP WITH TIME ZONE data type.
+ */
+ public static final H2Type TIMESTAMP_WITH_TIME_ZONE = new H2Type(TypeInfo.TYPE_TIMESTAMP_TZ,
+ "TIMESTAMP WITH TIME ZONE");
+
+ // Intervals
+
+ /**
+ * The INTERVAL YEAR data type.
+ */
+ public static final H2Type INTERVAL_YEAR = new H2Type(TypeInfo.getTypeInfo(Value.INTERVAL_YEAR), "INTERVAL_YEAR");
+
+ /**
+ * The INTERVAL MONTH data type.
+ */
+ public static final H2Type INTERVAL_MONTH = new H2Type(TypeInfo.getTypeInfo(Value.INTERVAL_MONTH),
+ "INTERVAL_MONTH");
+
+ /**
+ * The INTERVAL DAY data type.
+ */
+ public static final H2Type INTERVAL_DAY = new H2Type(TypeInfo.TYPE_INTERVAL_DAY, "INTERVAL_DAY");
+
+ /**
+ * The INTERVAL HOUR data type.
+ */
+ public static final H2Type INTERVAL_HOUR = new H2Type(TypeInfo.getTypeInfo(Value.INTERVAL_HOUR), "INTERVAL_HOUR");
+
+ /**
+ * The INTERVAL MINUTE data type.
+ */
+ public static final H2Type INTERVAL_MINUTE = new H2Type(TypeInfo.getTypeInfo(Value.INTERVAL_MINUTE),
+ "INTERVAL_MINUTE");
+
+ /**
+ * The INTERVAL SECOND data type.
+ */
+ public static final H2Type INTERVAL_SECOND = new H2Type(TypeInfo.getTypeInfo(Value.INTERVAL_SECOND),
+ "INTERVAL_SECOND");
+
+ /**
+ * The INTERVAL YEAR TO MONTH data type.
+ */
+ public static final H2Type INTERVAL_YEAR_TO_MONTH = new H2Type(TypeInfo.TYPE_INTERVAL_YEAR_TO_MONTH,
+ "INTERVAL_YEAR_TO_MONTH");
+
+ /**
+ * The INTERVAL DAY TO HOUR data type.
+ */
+ public static final H2Type INTERVAL_DAY_TO_HOUR = new H2Type(TypeInfo.getTypeInfo(Value.INTERVAL_DAY_TO_HOUR),
+ "INTERVAL_DAY_TO_HOUR");
+
+ /**
+ * The INTERVAL DAY TO MINUTE data type.
+ */
+ public static final H2Type INTERVAL_DAY_TO_MINUTE = new H2Type(TypeInfo.getTypeInfo(Value.INTERVAL_DAY_TO_MINUTE),
+ "INTERVAL_DAY_TO_MINUTE");
+
+ /**
+ * The INTERVAL DAY TO SECOND data type.
+ */
+ public static final H2Type INTERVAL_DAY_TO_SECOND = new H2Type(TypeInfo.TYPE_INTERVAL_DAY_TO_SECOND,
+ "INTERVAL_DAY_TO_SECOND");
+
+ /**
+ * The INTERVAL HOUR TO MINUTE data type.
+ */
+ public static final H2Type INTERVAL_HOUR_TO_MINUTE = new H2Type( //
+ TypeInfo.getTypeInfo(Value.INTERVAL_HOUR_TO_MINUTE), "INTERVAL_HOUR_TO_MINUTE");
+
+ /**
+ * The INTERVAL HOUR TO SECOND data type.
+ */
+ public static final H2Type INTERVAL_HOUR_TO_SECOND = new H2Type(TypeInfo.TYPE_INTERVAL_HOUR_TO_SECOND,
+ "INTERVAL_HOUR_TO_SECOND");
+
+ /**
+ * The INTERVAL MINUTE TO SECOND data type.
+ */
+ public static final H2Type INTERVAL_MINUTE_TO_SECOND = new H2Type(
+ TypeInfo.getTypeInfo(Value.INTERVAL_MINUTE_TO_SECOND), "INTERVAL_MINUTE_TO_SECOND");
+
+ // Other JDBC
+
+ /**
+ * The JAVA_OBJECT data type.
+ */
+ public static final H2Type JAVA_OBJECT = new H2Type(TypeInfo.TYPE_JAVA_OBJECT, "JAVA_OBJECT");
+
+ // Other non-standard
+
+ /**
+ * The ENUM data type.
+ */
+ public static final H2Type ENUM = new H2Type(TypeInfo.TYPE_ENUM_UNDEFINED, "ENUM");
+
+ /**
+ * The GEOMETRY data type.
+ */
+ public static final H2Type GEOMETRY = new H2Type(TypeInfo.TYPE_GEOMETRY, "GEOMETRY");
+
+ /**
+ * The JSON data type.
+ */
+ public static final H2Type JSON = new H2Type(TypeInfo.TYPE_JSON, "JSON");
+
+ /**
+ * The UUID data type.
+ */
+ public static final H2Type UUID = new H2Type(TypeInfo.TYPE_UUID, "UUID");
+
+ // Collections
+
+ // Use arrayOf() for ARRAY
+
+ // Use row() for ROW
+
+ /**
+ * Returns ARRAY data type with the specified component type.
+ *
+ * @param componentType
+ * the type of elements
+ * @return ARRAY data type
+ */
+ public static H2Type array(H2Type componentType) {
+ return new H2Type(TypeInfo.getTypeInfo(Value.ARRAY, -1L, -1, componentType.typeInfo),
+ "array(" + componentType.field + ')');
+ }
+
+ /**
+ * Returns ROW data type with specified types of fields and default names.
+ *
+ * @param fieldTypes
+ * the type of fields
+ * @return ROW data type
+ */
+ public static H2Type row(H2Type... fieldTypes) {
+ int degree = fieldTypes.length;
+ TypeInfo[] row = new TypeInfo[degree];
+ StringBuilder builder = new StringBuilder("row(");
+ for (int i = 0; i < degree; i++) {
+ H2Type t = fieldTypes[i];
+ row[i] = t.typeInfo;
+ if (i > 0) {
+ builder.append(", ");
+ }
+ builder.append(t.field);
+ }
+ return new H2Type(TypeInfo.getTypeInfo(Value.ROW, -1L, -1, new ExtTypeInfoRow(row)),
+ builder.append(')').toString());
+ }
+
+ private TypeInfo typeInfo;
+
+ private String field;
+
+ private H2Type(TypeInfo typeInfo, String field) {
+ this.typeInfo = typeInfo;
+ this.field = "H2Type." + field;
+ }
+
+ @Override
+ public String getName() {
+ return typeInfo.toString();
+ }
+
+ @Override
+ public String getVendor() {
+ return "com.h2database";
+ }
+
+ /**
+ * Returns the vendor specific type number for the data type. The returned
+ * value is actual only for the current version of H2.
+ *
+ * @return the vendor specific data type
+ */
+ @Override
+ public Integer getVendorTypeNumber() {
+ return typeInfo.getValueType();
+ }
+
+ @Override
+ public String toString() {
+ return field;
+ }
+
+}
diff --git a/h2/src/main/org/h2/api/Interval.java b/h2/src/main/org/h2/api/Interval.java
new file mode 100644
index 0000000000..42024b9466
--- /dev/null
+++ b/h2/src/main/org/h2/api/Interval.java
@@ -0,0 +1,635 @@
+/*
+ * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0,
+ * and the EPL 1.0 (https://h2database.com/html/license.html).
+ * Initial Developer: H2 Group
+ */
+package org.h2.api;
+
+import static org.h2.util.DateTimeUtils.NANOS_PER_MINUTE;
+import static org.h2.util.DateTimeUtils.NANOS_PER_SECOND;
+
+import org.h2.message.DbException;
+import org.h2.util.IntervalUtils;
+
+/**
+ * INTERVAL representation for result sets.
+ */
+public final class Interval {
+
+ private final IntervalQualifier qualifier;
+
+ /**
+ * {@code false} for zero or positive intervals, {@code true} for negative
+ * intervals.
+ */
+ private final boolean negative;
+
+ /**
+ * Non-negative long with value of leading field. For INTERVAL SECOND
+ * contains only integer part of seconds.
+ */
+ private final long leading;
+
+ /**
+ * Non-negative long with combined value of all remaining field, or 0 for
+ * single-field intervals, with exception for INTERVAL SECOND that uses this
+ * field to store fractional part of seconds measured in nanoseconds.
+ */
+ private final long remaining;
+
+ /**
+ * Creates a new INTERVAL YEAR.
+ *
+ * @param years
+ * years, |years|<1018
+ * @return INTERVAL YEAR
+ */
+ public static Interval ofYears(long years) {
+ return new Interval(IntervalQualifier.YEAR, years < 0, Math.abs(years), 0);
+ }
+
+ /**
+ * Creates a new INTERVAL MONTH.
+ *
+ * @param months
+ * months, |months|<1018
+ * @return INTERVAL MONTH
+ */
+ public static Interval ofMonths(long months) {
+ return new Interval(IntervalQualifier.MONTH, months < 0, Math.abs(months), 0);
+ }
+
+ /**
+ * Creates a new INTERVAL DAY.
+ *
+ * @param days
+ * days, |days|<1018
+ * @return INTERVAL DAY
+ */
+ public static Interval ofDays(long days) {
+ return new Interval(IntervalQualifier.DAY, days < 0, Math.abs(days), 0);
+ }
+
+ /**
+ * Creates a new INTERVAL HOUR.
+ *
+ * @param hours
+ * hours, |hours|<1018
+ * @return INTERVAL HOUR
+ */
+ public static Interval ofHours(long hours) {
+ return new Interval(IntervalQualifier.HOUR, hours < 0, Math.abs(hours), 0);
+ }
+
+ /**
+ * Creates a new INTERVAL MINUTE.
+ *
+ * @param minutes
+ * minutes, |minutes|<1018
+ * @return interval
+ */
+ public static Interval ofMinutes(long minutes) {
+ return new Interval(IntervalQualifier.MINUTE, minutes < 0, Math.abs(minutes), 0);
+ }
+
+ /**
+ * Creates a new INTERVAL SECOND.
+ *
+ * @param seconds
+ * seconds, |seconds|<1018
+ * @return INTERVAL SECOND
+ */
+ public static Interval ofSeconds(long seconds) {
+ return new Interval(IntervalQualifier.SECOND, seconds < 0, Math.abs(seconds), 0);
+ }
+
+ /**
+ * Creates a new INTERVAL SECOND.
+ *
+ *
+ * If both arguments are not equal to zero they should have the same sign.
+ *
+ *
+ * @param seconds
+ * seconds, |seconds|<1018
+ * @param nanos
+ * nanoseconds, |nanos|<1,000,000,000
+ * @return INTERVAL SECOND
+ */
+ public static Interval ofSeconds(long seconds, int nanos) {
+ // Interval is negative if any field is negative
+ boolean negative = (seconds | nanos) < 0;
+ if (negative) {
+ // Ensure that all fields are negative or zero
+ if (seconds > 0 || nanos > 0) {
+ throw new IllegalArgumentException();
+ }
+ // Make them positive
+ seconds = -seconds;
+ nanos = -nanos;
+ // Long.MIN_VALUE and Integer.MIN_VALUE will be rejected by
+ // constructor
+ }
+ return new Interval(IntervalQualifier.SECOND, negative, seconds, nanos);
+ }
+
+ /**
+ * Creates a new INTERVAL SECOND.
+ *
+ * @param nanos
+ * nanoseconds (including seconds)
+ * @return INTERVAL SECOND
+ */
+ public static Interval ofNanos(long nanos) {
+ boolean negative = nanos < 0;
+ if (negative) {
+ nanos = -nanos;
+ if (nanos < 0) {
+ // Long.MIN_VALUE = -9_223_372_036_854_775_808L
+ return new Interval(IntervalQualifier.SECOND, true, 9_223_372_036L, 854_775_808);
+ }
+ }
+ return new Interval(IntervalQualifier.SECOND, negative, nanos / NANOS_PER_SECOND, nanos % NANOS_PER_SECOND);
+ }
+
+ /**
+ * Creates a new INTERVAL YEAR TO MONTH.
+ *
+ *
+ * If both arguments are not equal to zero they should have the same sign.
+ *
+ *
+ * @param years
+ * years, |years|<1018
+ * @param months
+ * months, |months|<12
+ * @return INTERVAL YEAR TO MONTH
+ */
+ public static Interval ofYearsMonths(long years, int months) {
+ // Interval is negative if any field is negative
+ boolean negative = (years | months) < 0;
+ if (negative) {
+ // Ensure that all fields are negative or zero
+ if (years > 0 || months > 0) {
+ throw new IllegalArgumentException();
+ }
+ // Make them positive
+ years = -years;
+ months = -months;
+ // Long.MIN_VALUE and Integer.MIN_VALUE will be rejected by
+ // constructor
+ }
+ return new Interval(IntervalQualifier.YEAR_TO_MONTH, negative, years, months);
+ }
+
+ /**
+ * Creates a new INTERVAL DAY TO HOUR.
+ *
+ *
+ * If both arguments are not equal to zero they should have the same sign.
+ *
+ *
+ * @param days
+ * days, |days|<1018
+ * @param hours
+ * hours, |hours|<24
+ * @return INTERVAL DAY TO HOUR
+ */
+ public static Interval ofDaysHours(long days, int hours) {
+ // Interval is negative if any field is negative
+ boolean negative = (days | hours) < 0;
+ if (negative) {
+ // Ensure that all fields are negative or zero
+ if (days > 0 || hours > 0) {
+ throw new IllegalArgumentException();
+ }
+ // Make them positive
+ days = -days;
+ hours = -hours;
+ // Long.MIN_VALUE and Integer.MIN_VALUE will be rejected by
+ // constructor
+ }
+ return new Interval(IntervalQualifier.DAY_TO_HOUR, negative, days, hours);
+ }
+
+ /**
+ * Creates a new INTERVAL DAY TO MINUTE.
+ *
+ *
+ * Non-zero arguments should have the same sign.
+ *
+ *
+ * @param days
+ * days, |days|<1018
+ * @param hours
+ * hours, |hours|<24
+ * @param minutes
+ * minutes, |minutes|<60
+ * @return INTERVAL DAY TO MINUTE
+ */
+ public static Interval ofDaysHoursMinutes(long days, int hours, int minutes) {
+ // Interval is negative if any field is negative
+ boolean negative = (days | hours | minutes) < 0;
+ if (negative) {
+ // Ensure that all fields are negative or zero
+ if (days > 0 || hours > 0 || minutes > 0) {
+ throw new IllegalArgumentException();
+ }
+ // Make them positive
+ days = -days;
+ hours = -hours;
+ minutes = -minutes;
+ if ((hours | minutes) < 0) {
+ // Integer.MIN_VALUE
+ throw new IllegalArgumentException();
+ }
+ // days = Long.MIN_VALUE will be rejected by constructor
+ }
+ // Check only minutes.
+ // Overflow in days or hours will be detected by constructor
+ if (minutes >= 60) {
+ throw new IllegalArgumentException();
+ }
+ return new Interval(IntervalQualifier.DAY_TO_MINUTE, negative, days, hours * 60L + minutes);
+ }
+
+ /**
+ * Creates a new INTERVAL DAY TO SECOND.
+ *
+ *
+ * Non-zero arguments should have the same sign.
+ *
+ *
+ * @param days
+ * days, |days|<1018
+ * @param hours
+ * hours, |hours|<24
+ * @param minutes
+ * minutes, |minutes|<60
+ * @param seconds
+ * seconds, |seconds|<60
+ * @return INTERVAL DAY TO SECOND
+ */
+ public static Interval ofDaysHoursMinutesSeconds(long days, int hours, int minutes, int seconds) {
+ return ofDaysHoursMinutesNanos(days, hours, minutes, seconds * NANOS_PER_SECOND);
+ }
+
+ /**
+ * Creates a new INTERVAL DAY TO SECOND.
+ *
+ *
+ * Non-zero arguments should have the same sign.
+ *
+ *
+ * @param days
+ * days, |days|<1018
+ * @param hours
+ * hours, |hours|<24
+ * @param minutes
+ * minutes, |minutes|<60
+ * @param nanos
+ * nanoseconds, |nanos|<60,000,000,000
+ * @return INTERVAL DAY TO SECOND
+ */
+ public static Interval ofDaysHoursMinutesNanos(long days, int hours, int minutes, long nanos) {
+ // Interval is negative if any field is negative
+ boolean negative = (days | hours | minutes | nanos) < 0;
+ if (negative) {
+ // Ensure that all fields are negative or zero
+ if (days > 0 || hours > 0 || minutes > 0 || nanos > 0) {
+ throw new IllegalArgumentException();
+ }
+ // Make them positive
+ days = -days;
+ hours = -hours;
+ minutes = -minutes;
+ nanos = -nanos;
+ if ((hours | minutes | nanos) < 0) {
+ // Integer.MIN_VALUE, Long.MIN_VALUE
+ throw new IllegalArgumentException();
+ }
+ // days = Long.MIN_VALUE will be rejected by constructor
+ }
+ // Check only minutes and nanoseconds.
+ // Overflow in days or hours will be detected by constructor
+ if (minutes >= 60 || nanos >= NANOS_PER_MINUTE) {
+ throw new IllegalArgumentException();
+ }
+ return new Interval(IntervalQualifier.DAY_TO_SECOND, negative, days,
+ (hours * 60L + minutes) * NANOS_PER_MINUTE + nanos);
+ }
+
+ /**
+ * Creates a new INTERVAL HOUR TO MINUTE.
+ *
+ *
+ * If both arguments are not equal to zero they should have the same sign.
+ *
+ *
+ * @param hours
+ * hours, |hours|<1018
+ * @param minutes
+ * minutes, |minutes|<60
+ * @return INTERVAL HOUR TO MINUTE
+ */
+ public static Interval ofHoursMinutes(long hours, int minutes) {
+ // Interval is negative if any field is negative
+ boolean negative = (hours | minutes) < 0;
+ if (negative) {
+ // Ensure that all fields are negative or zero
+ if (hours > 0 || minutes > 0) {
+ throw new IllegalArgumentException();
+ }
+ // Make them positive
+ hours = -hours;
+ minutes = -minutes;
+ // Long.MIN_VALUE and Integer.MIN_VALUE will be rejected by
+ // constructor
+ }
+ return new Interval(IntervalQualifier.HOUR_TO_MINUTE, negative, hours, minutes);
+ }
+
+ /**
+ * Creates a new INTERVAL HOUR TO SECOND.
+ *
+ *
+ * Non-zero arguments should have the same sign.
+ *
+ *
+ * @param hours
+ * hours, |hours|<1018
+ * @param minutes
+ * minutes, |minutes|<60
+ * @param seconds
+ * seconds, |seconds|<60
+ * @return INTERVAL HOUR TO SECOND
+ */
+ public static Interval ofHoursMinutesSeconds(long hours, int minutes, int seconds) {
+ return ofHoursMinutesNanos(hours, minutes, seconds * NANOS_PER_SECOND);
+ }
+
+ /**
+ * Creates a new INTERVAL HOUR TO SECOND.
+ *
+ *
+ * Non-zero arguments should have the same sign.
+ *
+ *
+ * @param hours
+ * hours, |hours|<1018
+ * @param minutes
+ * minutes, |minutes|<60
+ * @param nanos
+ * nanoseconds, |seconds|<60,000,000,000
+ * @return INTERVAL HOUR TO SECOND
+ */
+ public static Interval ofHoursMinutesNanos(long hours, int minutes, long nanos) {
+ // Interval is negative if any field is negative
+ boolean negative = (hours | minutes | nanos) < 0;
+ if (negative) {
+ // Ensure that all fields are negative or zero
+ if (hours > 0 || minutes > 0 || nanos > 0) {
+ throw new IllegalArgumentException();
+ }
+ // Make them positive
+ hours = -hours;
+ minutes = -minutes;
+ nanos = -nanos;
+ if ((minutes | nanos) < 0) {
+ // Integer.MIN_VALUE, Long.MIN_VALUE
+ throw new IllegalArgumentException();
+ }
+ // hours = Long.MIN_VALUE will be rejected by constructor
+ }
+ // Check only nanoseconds.
+ // Overflow in hours or minutes will be detected by constructor
+ if (nanos >= NANOS_PER_MINUTE) {
+ throw new IllegalArgumentException();
+ }
+ return new Interval(IntervalQualifier.HOUR_TO_SECOND, negative, hours, minutes * NANOS_PER_MINUTE + nanos);
+ }
+
+ /**
+ * Creates a new INTERVAL MINUTE TO SECOND.
+ *
+ *
+ * If both arguments are not equal to zero they should have the same sign.
+ *
+ *
+ * @param minutes
+ * minutes, |minutes|<1018
+ * @param seconds
+ * seconds, |seconds|<60
+ * @return INTERVAL MINUTE TO SECOND
+ */
+ public static Interval ofMinutesSeconds(long minutes, int seconds) {
+ return ofMinutesNanos(minutes, seconds * NANOS_PER_SECOND);
+ }
+
+ /**
+ * Creates a new INTERVAL MINUTE TO SECOND.
+ *
+ *
+ * If both arguments are not equal to zero they should have the same sign.
+ *
+ *
+ * @param minutes
+ * minutes, |minutes|<1018
+ * @param nanos
+ * nanoseconds, |nanos|<60,000,000,000
+ * @return INTERVAL MINUTE TO SECOND
+ */
+ public static Interval ofMinutesNanos(long minutes, long nanos) {
+ // Interval is negative if any field is negative
+ boolean negative = (minutes | nanos) < 0;
+ if (negative) {
+ // Ensure that all fields are negative or zero
+ if (minutes > 0 || nanos > 0) {
+ throw new IllegalArgumentException();
+ }
+ // Make them positive
+ minutes = -minutes;
+ nanos = -nanos;
+ // Long.MIN_VALUE will be rejected by constructor
+ }
+ return new Interval(IntervalQualifier.MINUTE_TO_SECOND, negative, minutes, nanos);
+ }
+
+ /**
+ * Creates a new interval. Do not use this constructor, use static methods
+ * instead.
+ *
+ * @param qualifier
+ * qualifier
+ * @param negative
+ * whether interval is negative
+ * @param leading
+ * value of leading field
+ * @param remaining
+ * combined value of all remaining fields
+ */
+ public Interval(IntervalQualifier qualifier, boolean negative, long leading, long remaining) {
+ this.qualifier = qualifier;
+ try {
+ this.negative = IntervalUtils.validateInterval(qualifier, negative, leading, remaining);
+ } catch (DbException e) {
+ throw new IllegalArgumentException();
+ }
+ this.leading = leading;
+ this.remaining = remaining;
+ }
+
+ /**
+ * Returns qualifier of this interval.
+ *
+ * @return qualifier
+ */
+ public IntervalQualifier getQualifier() {
+ return qualifier;
+ }
+
+ /**
+ * Returns where the interval is negative.
+ *
+ * @return where the interval is negative
+ */
+ public boolean isNegative() {
+ return negative;
+ }
+
+ /**
+ * Returns value of leading field of this interval. For {@code SECOND}
+ * intervals returns integer part of seconds.
+ *
+ * @return value of leading field
+ */
+ public long getLeading() {
+ return leading;
+ }
+
+ /**
+ * Returns combined value of remaining fields of this interval. For
+ * {@code SECOND} intervals returns nanoseconds.
+ *
+ * @return combined value of remaining fields
+ */
+ public long getRemaining() {
+ return remaining;
+ }
+
+ /**
+ * Returns years value, if any.
+ *
+ * @return years, or 0
+ */
+ public long getYears() {
+ return IntervalUtils.yearsFromInterval(qualifier, negative, leading, remaining);
+ }
+
+ /**
+ * Returns months value, if any.
+ *
+ * @return months, or 0
+ */
+ public long getMonths() {
+ return IntervalUtils.monthsFromInterval(qualifier, negative, leading, remaining);
+ }
+
+ /**
+ * Returns days value, if any.
+ *
+ * @return days, or 0
+ */
+ public long getDays() {
+ return IntervalUtils.daysFromInterval(qualifier, negative, leading, remaining);
+ }
+
+ /**
+ * Returns hours value, if any.
+ *
+ * @return hours, or 0
+ */
+ public long getHours() {
+ return IntervalUtils.hoursFromInterval(qualifier, negative, leading, remaining);
+ }
+
+ /**
+ * Returns minutes value, if any.
+ *
+ * @return minutes, or 0
+ */
+ public long getMinutes() {
+ return IntervalUtils.minutesFromInterval(qualifier, negative, leading, remaining);
+ }
+
+ /**
+ * Returns value of integer part of seconds, if any.
+ *
+ * @return seconds, or 0
+ */
+ public long getSeconds() {
+ if (qualifier == IntervalQualifier.SECOND) {
+ return negative ? -leading : leading;
+ }
+ return getSecondsAndNanos() / NANOS_PER_SECOND;
+ }
+
+ /**
+ * Returns value of fractional part of seconds (in nanoseconds), if any.
+ *
+ * @return nanoseconds, or 0
+ */
+ public long getNanosOfSecond() {
+ if (qualifier == IntervalQualifier.SECOND) {
+ return negative ? -remaining : remaining;
+ }
+ return getSecondsAndNanos() % NANOS_PER_SECOND;
+ }
+
+ /**
+ * Returns seconds value measured in nanoseconds, if any.
+ *
+ *
+ * This method returns a long value that cannot fit all possible values of
+ * INTERVAL SECOND. For a very large intervals of this type use
+ * {@link #getSeconds()} and {@link #getNanosOfSecond()} instead. This
+ * method can be safely used for intervals of other day-time types.
+ *
+ *
+ * @return nanoseconds (including seconds), or 0
+ */
+ public long getSecondsAndNanos() {
+ return IntervalUtils.nanosFromInterval(qualifier, negative, leading, remaining);
+ }
+
+ @Override
+ public int hashCode() {
+ final int prime = 31;
+ int result = 1;
+ result = prime * result + qualifier.hashCode();
+ result = prime * result + (negative ? 1231 : 1237);
+ result = prime * result + (int) (leading ^ leading >>> 32);
+ result = prime * result + (int) (remaining ^ remaining >>> 32);
+ return result;
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj) {
+ return true;
+ }
+ if (!(obj instanceof Interval)) {
+ return false;
+ }
+ Interval other = (Interval) obj;
+ return qualifier == other.qualifier && negative == other.negative && leading == other.leading
+ && remaining == other.remaining;
+ }
+
+ @Override
+ public String toString() {
+ return IntervalUtils.appendInterval(new StringBuilder(), getQualifier(), negative, leading, remaining)
+ .toString();
+ }
+
+}
diff --git a/h2/src/main/org/h2/api/IntervalQualifier.java b/h2/src/main/org/h2/api/IntervalQualifier.java
new file mode 100644
index 0000000000..1772d1790e
--- /dev/null
+++ b/h2/src/main/org/h2/api/IntervalQualifier.java
@@ -0,0 +1,352 @@
+/*
+ * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0,
+ * and the EPL 1.0 (https://h2database.com/html/license.html).
+ * Initial Developer: H2 Group
+ */
+package org.h2.api;
+
+/**
+ * Interval qualifier.
+ */
+public enum IntervalQualifier {
+
+ /**
+ * {@code YEAR}
+ */
+ YEAR,
+
+ /**
+ * {@code MONTH}
+ */
+ MONTH,
+
+ /**
+ * {@code DAY}
+ */
+ DAY,
+
+ /**
+ * {@code HOUR}
+ */
+ HOUR,
+
+ /**
+ * {@code MINUTE}
+ */
+ MINUTE,
+
+ /**
+ * {@code SECOND}
+ */
+ SECOND,
+
+ /**
+ * {@code YEAR TO MONTH}
+ */
+ YEAR_TO_MONTH,
+
+ /**
+ * {@code DAY TO HOUR}
+ */
+ DAY_TO_HOUR,
+
+ /**
+ * {@code DAY TO MINUTE}
+ */
+ DAY_TO_MINUTE,
+
+ /**
+ * {@code DAY TO SECOND}
+ */
+ DAY_TO_SECOND,
+
+ /**
+ * {@code HOUR TO MINUTE}
+ */
+ HOUR_TO_MINUTE,
+
+ /**
+ * {@code HOUR TO SECOND}
+ */
+ HOUR_TO_SECOND,
+
+ /**
+ * {@code MINUTE TO SECOND}
+ */
+ MINUTE_TO_SECOND;
+
+ private final String string;
+
+ /**
+ * Returns the interval qualifier with the specified ordinal value.
+ *
+ * @param ordinal
+ * Java ordinal value (0-based)
+ * @return interval qualifier with the specified ordinal value
+ */
+ public static IntervalQualifier valueOf(int ordinal) {
+ switch (ordinal) {
+ case 0:
+ return YEAR;
+ case 1:
+ return MONTH;
+ case 2:
+ return DAY;
+ case 3:
+ return HOUR;
+ case 4:
+ return MINUTE;
+ case 5:
+ return SECOND;
+ case 6:
+ return YEAR_TO_MONTH;
+ case 7:
+ return DAY_TO_HOUR;
+ case 8:
+ return DAY_TO_MINUTE;
+ case 9:
+ return DAY_TO_SECOND;
+ case 10:
+ return HOUR_TO_MINUTE;
+ case 11:
+ return HOUR_TO_SECOND;
+ case 12:
+ return MINUTE_TO_SECOND;
+ default:
+ throw new IllegalArgumentException();
+ }
+ }
+
+ private IntervalQualifier() {
+ string = name().replace('_', ' ').intern();
+ }
+
+ /**
+ * Returns whether interval with this qualifier is a year-month interval.
+ *
+ * @return whether interval with this qualifier is a year-month interval
+ */
+ public boolean isYearMonth() {
+ return this == YEAR || this == MONTH || this == YEAR_TO_MONTH;
+ }
+
+ /**
+ * Returns whether interval with this qualifier is a day-time interval.
+ *
+ * @return whether interval with this qualifier is a day-time interval
+ */
+ public boolean isDayTime() {
+ return !isYearMonth();
+ }
+
+ /**
+ * Returns whether interval with this qualifier has years.
+ *
+ * @return whether interval with this qualifier has years
+ */
+ public boolean hasYears() {
+ return this == YEAR || this == YEAR_TO_MONTH;
+ }
+
+ /**
+ * Returns whether interval with this qualifier has months.
+ *
+ * @return whether interval with this qualifier has months
+ */
+ public boolean hasMonths() {
+ return this == MONTH || this == YEAR_TO_MONTH;
+ }
+
+ /**
+ * Returns whether interval with this qualifier has days.
+ *
+ * @return whether interval with this qualifier has days
+ */
+ public boolean hasDays() {
+ switch (this) {
+ case DAY:
+ case DAY_TO_HOUR:
+ case DAY_TO_MINUTE:
+ case DAY_TO_SECOND:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ /**
+ * Returns whether interval with this qualifier has hours.
+ *
+ * @return whether interval with this qualifier has hours
+ */
+ public boolean hasHours() {
+ switch (this) {
+ case HOUR:
+ case DAY_TO_HOUR:
+ case DAY_TO_MINUTE:
+ case DAY_TO_SECOND:
+ case HOUR_TO_MINUTE:
+ case HOUR_TO_SECOND:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ /**
+ * Returns whether interval with this qualifier has minutes.
+ *
+ * @return whether interval with this qualifier has minutes
+ */
+ public boolean hasMinutes() {
+ switch (this) {
+ case MINUTE:
+ case DAY_TO_MINUTE:
+ case DAY_TO_SECOND:
+ case HOUR_TO_MINUTE:
+ case HOUR_TO_SECOND:
+ case MINUTE_TO_SECOND:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ /**
+ * Returns whether interval with this qualifier has seconds.
+ *
+ * @return whether interval with this qualifier has seconds
+ */
+ public boolean hasSeconds() {
+ switch (this) {
+ case SECOND:
+ case DAY_TO_SECOND:
+ case HOUR_TO_SECOND:
+ case MINUTE_TO_SECOND:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ /**
+ * Returns whether interval with this qualifier has multiple fields.
+ *
+ * @return whether interval with this qualifier has multiple fields
+ */
+ public boolean hasMultipleFields() {
+ return ordinal() > 5;
+ }
+
+ @Override
+ public String toString() {
+ return string;
+ }
+
+ /**
+ * Returns full type name.
+ *
+ * @param precision precision, or {@code -1}
+ * @param scale fractional seconds precision, or {@code -1}
+ * @return full type name
+ */
+ public String getTypeName(int precision, int scale) {
+ return getTypeName(new StringBuilder(), precision, scale, false).toString();
+ }
+
+ /**
+ * Appends full type name to the specified string builder.
+ *
+ * @param builder string builder
+ * @param precision precision, or {@code -1}
+ * @param scale fractional seconds precision, or {@code -1}
+ * @param qualifierOnly if {@code true}, don't add the INTERVAL prefix
+ * @return the specified string builder
+ */
+ public StringBuilder getTypeName(StringBuilder builder, int precision, int scale, boolean qualifierOnly) {
+ if (!qualifierOnly) {
+ builder.append("INTERVAL ");
+ }
+ switch (this) {
+ case YEAR:
+ case MONTH:
+ case DAY:
+ case HOUR:
+ case MINUTE:
+ builder.append(string);
+ if (precision > 0) {
+ builder.append('(').append(precision).append(')');
+ }
+ break;
+ case SECOND:
+ builder.append(string);
+ if (precision > 0 || scale >= 0) {
+ builder.append('(').append(precision > 0 ? precision : 2);
+ if (scale >= 0) {
+ builder.append(", ").append(scale);
+ }
+ builder.append(')');
+ }
+ break;
+ case YEAR_TO_MONTH:
+ builder.append("YEAR");
+ if (precision > 0) {
+ builder.append('(').append(precision).append(')');
+ }
+ builder.append(" TO MONTH");
+ break;
+ case DAY_TO_HOUR:
+ builder.append("DAY");
+ if (precision > 0) {
+ builder.append('(').append(precision).append(')');
+ }
+ builder.append(" TO HOUR");
+ break;
+ case DAY_TO_MINUTE:
+ builder.append("DAY");
+ if (precision > 0) {
+ builder.append('(').append(precision).append(')');
+ }
+ builder.append(" TO MINUTE");
+ break;
+ case DAY_TO_SECOND:
+ builder.append("DAY");
+ if (precision > 0) {
+ builder.append('(').append(precision).append(')');
+ }
+ builder.append(" TO SECOND");
+ if (scale >= 0) {
+ builder.append('(').append(scale).append(')');
+ }
+ break;
+ case HOUR_TO_MINUTE:
+ builder.append("HOUR");
+ if (precision > 0) {
+ builder.append('(').append(precision).append(')');
+ }
+ builder.append(" TO MINUTE");
+ break;
+ case HOUR_TO_SECOND:
+ builder.append("HOUR");
+ if (precision > 0) {
+ builder.append('(').append(precision).append(')');
+ }
+ builder.append(" TO SECOND");
+ if (scale >= 0) {
+ builder.append('(').append(scale).append(')');
+ }
+ break;
+ case MINUTE_TO_SECOND:
+ builder.append("MINUTE");
+ if (precision > 0) {
+ builder.append('(').append(precision).append(')');
+ }
+ builder.append(" TO SECOND");
+ if (scale >= 0) {
+ builder.append('(').append(scale).append(')');
+ }
+ }
+ return builder;
+ }
+
+}
diff --git a/h2/src/main/org/h2/api/JavaObjectSerializer.java b/h2/src/main/org/h2/api/JavaObjectSerializer.java
index 748a174de7..9daa53065d 100644
--- a/h2/src/main/org/h2/api/JavaObjectSerializer.java
+++ b/h2/src/main/org/h2/api/JavaObjectSerializer.java
@@ -1,6 +1,6 @@
/*
- * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0,
- * and the EPL 1.0 (http://h2database.com/html/license.html).
+ * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0,
+ * and the EPL 1.0 (https://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.api;
@@ -18,6 +18,7 @@ public interface JavaObjectSerializer {
*
* @param obj the object to serialize
* @return the byte array of the serialized object
+ * @throws Exception on failure
*/
byte[] serialize(Object obj) throws Exception;
@@ -26,6 +27,7 @@ public interface JavaObjectSerializer {
*
* @param bytes the byte array of the serialized object
* @return the object
+ * @throws Exception on failure
*/
Object deserialize(byte[] bytes) throws Exception;
diff --git a/h2/src/main/org/h2/api/TableEngine.java b/h2/src/main/org/h2/api/TableEngine.java
index b123a35e94..497b291949 100644
--- a/h2/src/main/org/h2/api/TableEngine.java
+++ b/h2/src/main/org/h2/api/TableEngine.java
@@ -1,12 +1,12 @@
/*
- * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0,
- * and the EPL 1.0 (http://h2database.com/html/license.html).
+ * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0,
+ * and the EPL 1.0 (https://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.api;
-import org.h2.table.Table;
import org.h2.command.ddl.CreateTableData;
+import org.h2.table.Table;
/**
* A class that implements this interface can create custom table
diff --git a/h2/src/main/org/h2/api/Trigger.java b/h2/src/main/org/h2/api/Trigger.java
index 3ce8f09249..37a1cb74c2 100644
--- a/h2/src/main/org/h2/api/Trigger.java
+++ b/h2/src/main/org/h2/api/Trigger.java
@@ -1,6 +1,6 @@
/*
- * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0,
- * and the EPL 1.0 (http://h2database.com/html/license.html).
+ * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0,
+ * and the EPL 1.0 (https://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.api;
@@ -49,9 +49,12 @@ public interface Trigger {
* operation is performed
* @param type the operation type: INSERT, UPDATE, DELETE, SELECT, or a
* combination (this parameter is a bit field)
+ * @throws SQLException on SQL exception
*/
- void init(Connection conn, String schemaName, String triggerName,
- String tableName, boolean before, int type) throws SQLException;
+ default void init(Connection conn, String schemaName, String triggerName,
+ String tableName, boolean before, int type) throws SQLException {
+ // Does nothing by default
+ }
/**
* This method is called for each triggered action. The method is called
@@ -82,12 +85,20 @@ void fire(Connection conn, Object[] oldRow, Object[] newRow)
* This method is called when the database is closed.
* If the method throws an exception, it will be logged, but
* closing the database will continue.
+ *
+ * @throws SQLException on SQL exception
*/
- void close() throws SQLException;
+ default void close() throws SQLException {
+ // Does nothing by default
+ }
/**
* This method is called when the trigger is dropped.
+ *
+ * @throws SQLException on SQL exception
*/
- void remove() throws SQLException;
+ default void remove() throws SQLException {
+ // Does nothing by default
+ }
}
diff --git a/h2/src/main/org/h2/api/UserToRolesMapper.java b/h2/src/main/org/h2/api/UserToRolesMapper.java
new file mode 100644
index 0000000000..55d59468e2
--- /dev/null
+++ b/h2/src/main/org/h2/api/UserToRolesMapper.java
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0,
+ * and the EPL 1.0 (https://h2database.com/html/license.html).
+ * Initial Developer: Alessandro Ventura
+ */
+package org.h2.api;
+
+import java.util.Collection;
+
+import org.h2.security.auth.AuthenticationException;
+import org.h2.security.auth.AuthenticationInfo;
+import org.h2.security.auth.Configurable;
+
+/**
+ * A class that implement this interface can be used during authentication to
+ * map external users to database roles.
+ *
+ * This feature is experimental and subject to change
+ *
+ */
+public interface UserToRolesMapper extends Configurable {
+
+ /**
+ * Map user identified by authentication info to a set of granted roles.
+ *
+ * @param authenticationInfo
+ * authentication information
+ * @return list of roles to be assigned to the user temporary
+ * @throws AuthenticationException
+ * on authentication exception
+ */
+ Collection mapUserToRoles(AuthenticationInfo authenticationInfo) throws AuthenticationException;
+}
diff --git a/h2/src/main/org/h2/api/package.html b/h2/src/main/org/h2/api/package.html
index ed9c0e7f20..3dd9f31c6c 100644
--- a/h2/src/main/org/h2/api/package.html
+++ b/h2/src/main/org/h2/api/package.html
@@ -1,7 +1,7 @@
diff --git a/h2/src/main/org/h2/bnf/Bnf.java b/h2/src/main/org/h2/bnf/Bnf.java
index 9afa92fcb4..3faccea4e4 100644
--- a/h2/src/main/org/h2/bnf/Bnf.java
+++ b/h2/src/main/org/h2/bnf/Bnf.java
@@ -1,6 +1,6 @@
/*
- * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0,
- * and the EPL 1.0 (http://h2database.com/html/license.html).
+ * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0,
+ * and the EPL 1.0 (https://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.bnf;
@@ -14,10 +14,9 @@
import java.util.ArrayList;
import java.util.HashMap;
import java.util.StringTokenizer;
-
import org.h2.bnf.context.DbContextRule;
+import org.h2.command.dml.Help;
import org.h2.tools.Csv;
-import org.h2.util.New;
import org.h2.util.StringUtils;
import org.h2.util.Utils;
@@ -31,7 +30,7 @@ public class Bnf {
* The rule map. The key is lowercase, and all spaces
* are replaces with underscore.
*/
- private final HashMap ruleMap = New.hashMap();
+ private final HashMap ruleMap = new HashMap<>();
private String syntax;
private String currentToken;
private String[] tokens;
@@ -46,6 +45,8 @@ public class Bnf {
*
* @param csv if not specified, the help.csv is used
* @return a new instance
+ * @throws SQLException on failure
+ * @throws IOException on failure
*/
public static Bnf getInstance(Reader csv) throws SQLException, IOException {
Bnf bnf = new Bnf();
@@ -57,6 +58,17 @@ public static Bnf getInstance(Reader csv) throws SQLException, IOException {
return bnf;
}
+ /**
+ * Add an alias for a rule.
+ *
+ * @param name for example "procedure"
+ * @param replacement for example "@func@"
+ */
+ public void addAlias(String name, String replacement) {
+ RuleHead head = ruleMap.get(replacement);
+ ruleMap.put(name, head);
+ }
+
private void addFixedRule(String name, int fixedType) {
Rule rule = new RuleFixed(fixedType);
addRule(name, "Fixed", rule);
@@ -65,16 +77,15 @@ private void addFixedRule(String name, int fixedType) {
private RuleHead addRule(String topic, String section, Rule rule) {
RuleHead head = new RuleHead(section, topic, rule);
String key = StringUtils.toLowerEnglish(topic.trim().replace(' ', '_'));
- if (ruleMap.get(key) != null) {
+ if (ruleMap.putIfAbsent(key, head) != null) {
throw new AssertionError("already exists: " + topic);
}
- ruleMap.put(key, head);
return head;
}
private void parse(Reader reader) throws SQLException, IOException {
Rule functions = null;
- statements = New.arrayList();
+ statements = new ArrayList<>();
Csv csv = new Csv();
csv.setLineCommentCharacter('#');
ResultSet rs = csv.read(reader, null);
@@ -84,7 +95,7 @@ private void parse(Reader reader) throws SQLException, IOException {
continue;
}
String topic = rs.getString("TOPIC");
- syntax = rs.getString("SYNTAX").trim();
+ syntax = Help.stripAnnotationsFromSyntax(rs.getString("SYNTAX"));
currentTopic = section;
tokens = tokenize();
index = 0;
@@ -108,9 +119,10 @@ private void parse(Reader reader) throws SQLException, IOException {
addFixedRule("@hms@", RuleFixed.HMS);
addFixedRule("@nanos@", RuleFixed.NANOS);
addFixedRule("anything_except_single_quote", RuleFixed.ANY_EXCEPT_SINGLE_QUOTE);
+ addFixedRule("single_character", RuleFixed.ANY_EXCEPT_SINGLE_QUOTE);
addFixedRule("anything_except_double_quote", RuleFixed.ANY_EXCEPT_DOUBLE_QUOTE);
addFixedRule("anything_until_end_of_line", RuleFixed.ANY_UNTIL_EOL);
- addFixedRule("anything_until_end_comment", RuleFixed.ANY_UNTIL_END);
+ addFixedRule("anything_until_comment_start_or_end", RuleFixed.ANY_UNTIL_END);
addFixedRule("anything_except_two_dollar_signs", RuleFixed.ANY_EXCEPT_2_DOLLAR);
addFixedRule("anything", RuleFixed.ANY_WORD);
addFixedRule("@hex_start@", RuleFixed.HEX_START);
@@ -120,6 +132,7 @@ private void parse(Reader reader) throws SQLException, IOException {
addFixedRule("@digit@", RuleFixed.DIGIT);
addFixedRule("@open_bracket@", RuleFixed.OPEN_BRACKET);
addFixedRule("@close_bracket@", RuleFixed.CLOSE_BRACKET);
+ addFixedRule("json_text", RuleFixed.JSON_TEXT);
}
/**
@@ -200,6 +213,28 @@ private Rule parseList() {
return r;
}
+ private RuleExtension parseExtension(boolean compatibility) {
+ read();
+ Rule r;
+ if (firstChar == '[') {
+ read();
+ r = parseOr();
+ r = new RuleOptional(r);
+ if (firstChar != ']') {
+ throw new AssertionError("expected ], got " + currentToken + " syntax:" + syntax);
+ }
+ } else if (firstChar == '{') {
+ read();
+ r = parseOr();
+ if (firstChar != '}') {
+ throw new AssertionError("expected }, got " + currentToken + " syntax:" + syntax);
+ }
+ } else {
+ r = parseOr();
+ }
+ return new RuleExtension(r, compatibility);
+ }
+
private Rule parseToken() {
Rule r;
if ((firstChar >= 'A' && firstChar <= 'Z')
@@ -208,24 +243,30 @@ private Rule parseToken() {
r = new RuleElement(currentToken, currentTopic);
} else if (firstChar == '[') {
read();
- Rule r2 = parseOr();
- r = new RuleOptional(r2);
+ r = parseOr();
+ r = new RuleOptional(r);
if (firstChar != ']') {
- throw new AssertionError("expected ], got " + currentToken
- + " syntax:" + syntax);
+ throw new AssertionError("expected ], got " + currentToken + " syntax:" + syntax);
}
} else if (firstChar == '{') {
read();
r = parseOr();
if (firstChar != '}') {
- throw new AssertionError("expected }, got " + currentToken
- + " syntax:" + syntax);
+ throw new AssertionError("expected }, got " + currentToken + " syntax:" + syntax);
+ }
+ } else if (firstChar == '@') {
+ if ("@commaDots@".equals(currentToken)) {
+ r = new RuleList(new RuleElement(",", currentTopic), lastRepeat, false);
+ r = new RuleRepeat(r, true);
+ } else if ("@dots@".equals(currentToken)) {
+ r = new RuleRepeat(lastRepeat, false);
+ } else if ("@c@".equals(currentToken)) {
+ r = parseExtension(true);
+ } else if ("@h2@".equals(currentToken)) {
+ r = parseExtension(false);
+ } else {
+ r = new RuleElement(currentToken, currentTopic);
}
- } else if ("@commaDots@".equals(currentToken)) {
- r = new RuleList(new RuleElement(",", currentTopic), lastRepeat, false);
- r = new RuleRepeat(r, true);
- } else if ("@dots@".equals(currentToken)) {
- r = new RuleRepeat(lastRepeat, false);
} else {
r = new RuleElement(currentToken, currentTopic);
}
@@ -244,10 +285,25 @@ private void read() {
}
}
+ @Override
+ public String toString() {
+ StringBuilder builder = new StringBuilder();
+ for (int i = 0; i < index; i++) {
+ builder.append(tokens[i]).append(' ');
+ }
+ builder.append("[*]");
+ for (int i = index; i < tokens.length; i++) {
+ builder.append(' ').append(tokens[i]);
+ }
+ return builder.toString();
+ }
+
private String[] tokenize() {
- ArrayList list = New.arrayList();
+ ArrayList list = new ArrayList<>();
syntax = StringUtils.replaceAll(syntax, "yyyy-MM-dd", "@ymd@");
syntax = StringUtils.replaceAll(syntax, "hh:mm:ss", "@hms@");
+ syntax = StringUtils.replaceAll(syntax, "hh:mm", "@hms@");
+ syntax = StringUtils.replaceAll(syntax, "mm:ss", "@hms@");
syntax = StringUtils.replaceAll(syntax, "nnnnnnnnn", "@nanos@");
syntax = StringUtils.replaceAll(syntax, "function", "@func@");
syntax = StringUtils.replaceAll(syntax, "0x", "@hexStart@");
@@ -272,7 +328,7 @@ private String[] tokenize() {
}
list.add(s);
}
- return list.toArray(new String[list.size()]);
+ return list.toArray(new String[0]);
}
/**
@@ -351,7 +407,7 @@ public ArrayList getStatements() {
* @return the tokenizer
*/
public static StringTokenizer getTokenizer(String s) {
- return new StringTokenizer(s, " [](){}|.,\r\n<>:-+*/=<\">!'$", true);
+ return new StringTokenizer(s, " [](){}|.,\r\n<>:-+*/=\"!'$", true);
}
}
diff --git a/h2/src/main/org/h2/bnf/BnfVisitor.java b/h2/src/main/org/h2/bnf/BnfVisitor.java
index c379f30ed9..1a8ec01d6f 100644
--- a/h2/src/main/org/h2/bnf/BnfVisitor.java
+++ b/h2/src/main/org/h2/bnf/BnfVisitor.java
@@ -1,6 +1,6 @@
/*
- * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0,
- * and the EPL 1.0 (http://h2database.com/html/license.html).
+ * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0,
+ * and the EPL 1.0 (https://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.bnf;
@@ -51,4 +51,19 @@ public interface BnfVisitor {
*/
void visitRuleOptional(Rule rule);
+ /**
+ * Visit an OR list of optional rules.
+ *
+ * @param list the optional rules
+ */
+ void visitRuleOptional(ArrayList list);
+
+ /**
+ * Visit a rule with non-standard extension.
+ *
+ * @param rule the rule
+ * @param compatibility whether this rule exists for compatibility only
+ */
+ void visitRuleExtension(Rule rule, boolean compatibility);
+
}
diff --git a/h2/src/main/org/h2/bnf/Rule.java b/h2/src/main/org/h2/bnf/Rule.java
index bb8858f3c7..0070e4e28b 100644
--- a/h2/src/main/org/h2/bnf/Rule.java
+++ b/h2/src/main/org/h2/bnf/Rule.java
@@ -1,6 +1,6 @@
/*
- * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0,
- * and the EPL 1.0 (http://h2database.com/html/license.html).
+ * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0,
+ * and the EPL 1.0 (https://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.bnf;
diff --git a/h2/src/main/org/h2/bnf/RuleElement.java b/h2/src/main/org/h2/bnf/RuleElement.java
index fec0fa0ef6..aca908583b 100644
--- a/h2/src/main/org/h2/bnf/RuleElement.java
+++ b/h2/src/main/org/h2/bnf/RuleElement.java
@@ -1,6 +1,6 @@
/*
- * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0,
- * and the EPL 1.0 (http://h2database.com/html/license.html).
+ * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0,
+ * and the EPL 1.0 (https://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.bnf;
@@ -77,4 +77,9 @@ public boolean autoComplete(Sentence sentence) {
return link.autoComplete(sentence);
}
+ @Override
+ public String toString() {
+ return name;
+ }
+
}
diff --git a/h2/src/main/org/h2/bnf/RuleExtension.java b/h2/src/main/org/h2/bnf/RuleExtension.java
new file mode 100644
index 0000000000..217a946da7
--- /dev/null
+++ b/h2/src/main/org/h2/bnf/RuleExtension.java
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0,
+ * and the EPL 1.0 (https://h2database.com/html/license.html).
+ * Initial Developer: H2 Group
+ */
+package org.h2.bnf;
+
+import java.util.HashMap;
+
+/**
+ * Represents a non-standard syntax.
+ */
+public class RuleExtension implements Rule {
+
+ private final Rule rule;
+ private final boolean compatibility;
+
+ private boolean mapSet;
+
+ public RuleExtension(Rule rule, boolean compatibility) {
+ this.rule = rule;
+ this.compatibility = compatibility;
+ }
+
+ @Override
+ public void accept(BnfVisitor visitor) {
+ visitor.visitRuleExtension(rule, compatibility);
+ }
+
+ @Override
+ public void setLinks(HashMap ruleMap) {
+ if (!mapSet) {
+ rule.setLinks(ruleMap);
+ mapSet = true;
+ }
+ }
+ @Override
+ public boolean autoComplete(Sentence sentence) {
+ sentence.stopIfRequired();
+ rule.autoComplete(sentence);
+ return true;
+ }
+
+ @Override
+ public String toString() {
+ return (compatibility ? "@c@ " : "@h2@ ") + rule.toString();
+ }
+
+}
diff --git a/h2/src/main/org/h2/bnf/RuleFixed.java b/h2/src/main/org/h2/bnf/RuleFixed.java
index a8057dad71..8557e0ae52 100644
--- a/h2/src/main/org/h2/bnf/RuleFixed.java
+++ b/h2/src/main/org/h2/bnf/RuleFixed.java
@@ -1,6 +1,6 @@
/*
- * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0,
- * and the EPL 1.0 (http://h2database.com/html/license.html).
+ * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0,
+ * and the EPL 1.0 (https://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.bnf;
@@ -22,6 +22,7 @@ public class RuleFixed implements Rule {
public static final int HEX_START = 10, CONCAT = 11;
public static final int AZ_UNDERSCORE = 12, AF = 13, DIGIT = 14;
public static final int OPEN_BRACKET = 15, CLOSE_BRACKET = 16;
+ public static final int JSON_TEXT = 17;
private final int type;
@@ -44,7 +45,8 @@ public boolean autoComplete(Sentence sentence) {
sentence.stopIfRequired();
String query = sentence.getQuery();
String s = query;
- switch(type) {
+ boolean removeTrailingSpaces = false;
+ switch (type) {
case YMD:
while (s.length() > 0 && "0123456789-".indexOf(s.charAt(0)) >= 0) {
s = s.substring(1);
@@ -52,6 +54,8 @@ public boolean autoComplete(Sentence sentence) {
if (s.length() == 0) {
sentence.add("2006-01-01", "1", Sentence.KEYWORD);
}
+ // needed for timestamps
+ removeTrailingSpaces = true;
break;
case HMS:
while (s.length() > 0 && "0123456789:".indexOf(s.charAt(0)) >= 0) {
@@ -68,6 +72,7 @@ public boolean autoComplete(Sentence sentence) {
if (s.length() == 0) {
sentence.add("nanoseconds", "0", Sentence.KEYWORD);
}
+ removeTrailingSpaces = true;
break;
case ANY_EXCEPT_SINGLE_QUOTE:
while (true) {
@@ -111,6 +116,7 @@ public boolean autoComplete(Sentence sentence) {
}
break;
case ANY_WORD:
+ case JSON_TEXT:
while (s.length() > 0 && !Bnf.startWithSpace(s)) {
s = s.substring(1);
}
@@ -135,6 +141,7 @@ public boolean autoComplete(Sentence sentence) {
} else if (s.length() == 0) {
sentence.add("||", "||", Sentence.KEYWORD);
}
+ removeTrailingSpaces = true;
break;
case AZ_UNDERSCORE:
if (s.length() > 0 &&
@@ -170,6 +177,7 @@ public boolean autoComplete(Sentence sentence) {
} else if (s.charAt(0) == '[') {
s = s.substring(1);
}
+ removeTrailingSpaces = true;
break;
case CLOSE_BRACKET:
if (s.length() == 0) {
@@ -177,6 +185,7 @@ public boolean autoComplete(Sentence sentence) {
} else if (s.charAt(0) == ']') {
s = s.substring(1);
}
+ removeTrailingSpaces = true;
break;
// no autocomplete support for comments
// (comments are not reachable in the bnf tree)
@@ -186,8 +195,14 @@ public boolean autoComplete(Sentence sentence) {
throw new AssertionError("type="+type);
}
if (!s.equals(query)) {
- while (Bnf.startWithSpace(s)) {
- s = s.substring(1);
+ // can not always remove spaces here, because a repeat
+ // rule for a-z would remove multiple words
+ // but we have to remove spaces after '||'
+ // and after ']'
+ if (removeTrailingSpaces) {
+ while (Bnf.startWithSpace(s)) {
+ s = s.substring(1);
+ }
}
sentence.setQuery(s);
return true;
@@ -195,4 +210,9 @@ public boolean autoComplete(Sentence sentence) {
return false;
}
+ @Override
+ public String toString() {
+ return "#" + type;
+ }
+
}
diff --git a/h2/src/main/org/h2/bnf/RuleHead.java b/h2/src/main/org/h2/bnf/RuleHead.java
index f280c3dcca..95891bd1a0 100644
--- a/h2/src/main/org/h2/bnf/RuleHead.java
+++ b/h2/src/main/org/h2/bnf/RuleHead.java
@@ -1,6 +1,6 @@
/*
- * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0,
- * and the EPL 1.0 (http://h2database.com/html/license.html).
+ * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0,
+ * and the EPL 1.0 (https://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.bnf;
diff --git a/h2/src/main/org/h2/bnf/RuleList.java b/h2/src/main/org/h2/bnf/RuleList.java
index 7518161365..30e8f67893 100644
--- a/h2/src/main/org/h2/bnf/RuleList.java
+++ b/h2/src/main/org/h2/bnf/RuleList.java
@@ -1,25 +1,26 @@
/*
- * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0,
- * and the EPL 1.0 (http://h2database.com/html/license.html).
+ * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0,
+ * and the EPL 1.0 (https://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.bnf;
import java.util.ArrayList;
import java.util.HashMap;
-import org.h2.util.New;
+
+import org.h2.util.Utils;
/**
* Represents a sequence of BNF rules, or a list of alternative rules.
*/
public class RuleList implements Rule {
- private final boolean or;
- private final ArrayList list;
+ final boolean or;
+ final ArrayList list;
private boolean mapSet;
public RuleList(Rule first, Rule next, boolean or) {
- list = New.arrayList();
+ list = Utils.newSmallArrayList();
if (first instanceof RuleList && ((RuleList) first).or == or) {
list.addAll(((RuleList) first).list);
} else {
@@ -70,4 +71,20 @@ public boolean autoComplete(Sentence sentence) {
return true;
}
+ @Override
+ public String toString() {
+ StringBuilder builder = new StringBuilder();
+ for (int i = 0, l = list.size(); i < l; i++) {
+ if (i > 0) {
+ if (or) {
+ builder.append(" | ");
+ } else {
+ builder.append(' ');
+ }
+ }
+ builder.append(list.get(i).toString());
+ }
+ return builder.toString();
+ }
+
}
diff --git a/h2/src/main/org/h2/bnf/RuleOptional.java b/h2/src/main/org/h2/bnf/RuleOptional.java
index a5f2f7aa74..52cfee7f42 100644
--- a/h2/src/main/org/h2/bnf/RuleOptional.java
+++ b/h2/src/main/org/h2/bnf/RuleOptional.java
@@ -1,6 +1,6 @@
/*
- * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0,
- * and the EPL 1.0 (http://h2database.com/html/license.html).
+ * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0,
+ * and the EPL 1.0 (https://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.bnf;
@@ -20,6 +20,13 @@ public RuleOptional(Rule rule) {
@Override
public void accept(BnfVisitor visitor) {
+ if (rule instanceof RuleList) {
+ RuleList ruleList = (RuleList) rule;
+ if (ruleList.or) {
+ visitor.visitRuleOptional(ruleList.list);
+ return;
+ }
+ }
visitor.visitRuleOptional(rule);
}
@@ -37,4 +44,9 @@ public boolean autoComplete(Sentence sentence) {
return true;
}
+ @Override
+ public String toString() {
+ return '[' + rule.toString() + ']';
+ }
+
}
diff --git a/h2/src/main/org/h2/bnf/RuleRepeat.java b/h2/src/main/org/h2/bnf/RuleRepeat.java
index 5c27438c40..347d03a8e7 100644
--- a/h2/src/main/org/h2/bnf/RuleRepeat.java
+++ b/h2/src/main/org/h2/bnf/RuleRepeat.java
@@ -1,6 +1,6 @@
/*
- * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0,
- * and the EPL 1.0 (http://h2database.com/html/license.html).
+ * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0,
+ * and the EPL 1.0 (https://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.bnf;
@@ -36,7 +36,17 @@ public boolean autoComplete(Sentence sentence) {
while (rule.autoComplete(sentence)) {
// nothing to do
}
+ String s = sentence.getQuery();
+ while (Bnf.startWithSpace(s)) {
+ s = s.substring(1);
+ }
+ sentence.setQuery(s);
return true;
}
+ @Override
+ public String toString() {
+ return comma ? ", ..." : " ...";
+ }
+
}
diff --git a/h2/src/main/org/h2/bnf/Sentence.java b/h2/src/main/org/h2/bnf/Sentence.java
index 768d6fa2a5..a0993b0892 100644
--- a/h2/src/main/org/h2/bnf/Sentence.java
+++ b/h2/src/main/org/h2/bnf/Sentence.java
@@ -1,16 +1,16 @@
/*
- * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0,
- * and the EPL 1.0 (http://h2database.com/html/license.html).
+ * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0,
+ * and the EPL 1.0 (https://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.bnf;
import java.util.HashMap;
import java.util.HashSet;
+import java.util.Objects;
import org.h2.bnf.context.DbSchema;
import org.h2.bnf.context.DbTableOrView;
-import org.h2.util.New;
import org.h2.util.StringUtils;
/**
@@ -36,12 +36,12 @@ public class Sentence {
*/
public static final int FUNCTION = 2;
- private static final long MAX_PROCESSING_TIME = 100;
+ private static final int MAX_PROCESSING_TIME = 100;
/**
* The map of next tokens in the form type#tokenName token.
*/
- private final HashMap next = New.hashMap();
+ private final HashMap next = new HashMap<>();
/**
* The complete query string.
@@ -53,7 +53,7 @@ public class Sentence {
*/
private String queryUpper;
- private long stopAt;
+ private long stopAtNs;
private DbSchema lastMatchedSchema;
private DbTableOrView lastMatchedTable;
private DbTableOrView lastTable;
@@ -64,7 +64,7 @@ public class Sentence {
* Start the timer to make sure processing doesn't take too long.
*/
public void start() {
- stopAt = System.currentTimeMillis() + MAX_PROCESSING_TIME;
+ stopAtNs = System.nanoTime() + MAX_PROCESSING_TIME * 1_000_000L;
}
/**
@@ -73,7 +73,7 @@ public void start() {
* If processing is stopped, this methods throws an IllegalStateException
*/
public void stopIfRequired() {
- if (System.currentTimeMillis() > stopAt) {
+ if (System.nanoTime() - stopAtNs > 0L) {
throw new IllegalStateException();
}
}
@@ -97,7 +97,7 @@ public void add(String n, String string, int type) {
*/
public void addAlias(String alias, DbTableOrView table) {
if (aliases == null) {
- aliases = New.hashMap();
+ aliases = new HashMap<>();
}
aliases.put(alias, table);
}
@@ -110,7 +110,7 @@ public void addAlias(String alias, DbTableOrView table) {
public void addTable(DbTableOrView table) {
lastTable = table;
if (tables == null) {
- tables = New.hashSet();
+ tables = new HashSet<>();
}
tables.add(table);
}
@@ -185,7 +185,7 @@ public DbTableOrView getLastMatchedTable() {
* @param query the query string
*/
public void setQuery(String query) {
- if (!StringUtils.equals(this.query, query)) {
+ if (!Objects.equals(this.query, query)) {
this.query = query;
this.queryUpper = StringUtils.toUpperEnglish(query);
}
diff --git a/h2/src/main/org/h2/bnf/context/DbColumn.java b/h2/src/main/org/h2/bnf/context/DbColumn.java
index 10e3bfbe16..db187c3e0a 100644
--- a/h2/src/main/org/h2/bnf/context/DbColumn.java
+++ b/h2/src/main/org/h2/bnf/context/DbColumn.java
@@ -1,6 +1,6 @@
/*
- * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0,
- * and the EPL 1.0 (http://h2database.com/html/license.html).
+ * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0,
+ * and the EPL 1.0 (https://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.bnf.context;
@@ -21,37 +21,36 @@ public class DbColumn {
private final String dataType;
- private int position;
+ private final int position;
private DbColumn(DbContents contents, ResultSet rs, boolean procedureColumn)
throws SQLException {
name = rs.getString("COLUMN_NAME");
quotedName = contents.quoteIdentifier(name);
+ position = rs.getInt("ORDINAL_POSITION");
+ if (contents.isH2() && !procedureColumn) {
+ dataType = rs.getString("COLUMN_TYPE");
+ return;
+ }
String type = rs.getString("TYPE_NAME");
// a procedures column size is identified by PRECISION, for table this
// is COLUMN_SIZE
- String precisionColumnName;
+ String precisionColumnName, scaleColumnName;
if (procedureColumn) {
precisionColumnName = "PRECISION";
+ scaleColumnName = "SCALE";
} else {
precisionColumnName = "COLUMN_SIZE";
+ scaleColumnName = "DECIMAL_DIGITS";
}
int precision = rs.getInt(precisionColumnName);
- position = rs.getInt("ORDINAL_POSITION");
- boolean isSQLite = contents.isSQLite();
- if (precision > 0 && !isSQLite) {
- type += "(" + precision;
- String scaleColumnName;
- if (procedureColumn) {
- scaleColumnName = "SCALE";
+ if (precision > 0 && !contents.isSQLite()) {
+ int scale = rs.getInt(scaleColumnName);
+ if (scale > 0) {
+ type = type + '(' + precision + ", " + scale + ')';
} else {
- scaleColumnName = "DECIMAL_DIGITS";
- }
- int prec = rs.getInt(scaleColumnName);
- if (prec > 0) {
- type += ", " + prec;
+ type = type + '(' + precision + ')';
}
- type += ")";
}
if (rs.getInt("NULLABLE") == DatabaseMetaData.columnNoNulls) {
type += " NOT NULL";
@@ -65,6 +64,7 @@ private DbColumn(DbContents contents, ResultSet rs, boolean procedureColumn)
* @param contents the database contents
* @param rs the result set
* @return the column
+ * @throws SQLException on failure
*/
public static DbColumn getProcedureColumn(DbContents contents, ResultSet rs)
throws SQLException {
@@ -77,6 +77,7 @@ public static DbColumn getProcedureColumn(DbContents contents, ResultSet rs)
* @param contents the database contents
* @param rs the result set
* @return the column
+ * @throws SQLException on failure
*/
public static DbColumn getColumn(DbContents contents, ResultSet rs)
throws SQLException {
diff --git a/h2/src/main/org/h2/bnf/context/DbContents.java b/h2/src/main/org/h2/bnf/context/DbContents.java
index 1d061fbf28..1cedefb0da 100644
--- a/h2/src/main/org/h2/bnf/context/DbContents.java
+++ b/h2/src/main/org/h2/bnf/context/DbContents.java
@@ -1,20 +1,21 @@
/*
- * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0,
- * and the EPL 1.0 (http://h2database.com/html/license.html).
+ * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0,
+ * and the EPL 1.0 (https://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.bnf.context;
import java.sql.Connection;
import java.sql.DatabaseMetaData;
-import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.ArrayList;
-import org.h2.command.Parser;
-import org.h2.util.New;
+import org.h2.engine.Session;
+import org.h2.jdbc.JdbcConnection;
+import org.h2.util.ParserUtil;
import org.h2.util.StringUtils;
+import org.h2.util.Utils;
/**
* Keeps meta data information about a database.
@@ -29,110 +30,121 @@ public class DbContents {
private boolean isPostgreSQL;
private boolean isDerby;
private boolean isSQLite;
- private boolean isH2ModeMySQL;
private boolean isMySQL;
private boolean isFirebird;
private boolean isMSSQLServer;
+ private boolean isDB2;
+
+ private boolean databaseToUpper, databaseToLower;
+
+ private boolean mayHaveStandardViews = true;
/**
- * @return The default schema.
+ * @return the default schema.
*/
public DbSchema getDefaultSchema() {
return defaultSchema;
}
/**
- * @return True if this is an Apache Derby database.
+ * @return true if this is an Apache Derby database.
*/
public boolean isDerby() {
return isDerby;
}
/**
- * @return True if this is a Firebird database.
+ * @return true if this is a Firebird database.
*/
public boolean isFirebird() {
return isFirebird;
}
/**
- * @return True if this is a H2 database.
+ * @return true if this is a H2 database.
*/
public boolean isH2() {
return isH2;
}
/**
- * @return True if this is a H2 database in MySQL mode.
- */
- public boolean isH2ModeMySQL() {
- return isH2ModeMySQL;
- }
-
- /**
- * @return True if this is a MS SQL Server database.
+ * @return true if this is a MS SQL Server database.
*/
public boolean isMSSQLServer() {
return isMSSQLServer;
}
/**
- * @return True if this is a MySQL database.
+ * @return true if this is a MySQL database.
*/
public boolean isMySQL() {
return isMySQL;
}
/**
- * @return True if this is an Oracle database.
+ * @return true if this is an Oracle database.
*/
public boolean isOracle() {
return isOracle;
}
/**
- * @return True if this is a PostgreSQL database.
+ * @return true if this is a PostgreSQL database.
*/
public boolean isPostgreSQL() {
return isPostgreSQL;
}
/**
- * @return True if this is an SQLite database.
+ * @return true if this is an SQLite database.
*/
public boolean isSQLite() {
return isSQLite;
}
/**
- * @return The list of schemas.
+ * @return true if this is an IBM DB2 database.
+ */
+ public boolean isDB2() {
+ return isDB2;
+ }
+
+ /**
+ * @return the list of schemas.
*/
public DbSchema[] getSchemas() {
return schemas;
}
+ /**
+ * Returns whether standard INFORMATION_SCHEMA.VIEWS may be supported.
+ *
+ * @return whether standard INFORMATION_SCHEMA.VIEWS may be supported
+ */
+ public boolean mayHaveStandardViews() {
+ return mayHaveStandardViews;
+ }
+
+ /**
+ * @param mayHaveStandardViews
+ * whether standard INFORMATION_SCHEMA.VIEWS is detected as
+ * supported
+ */
+ public void setMayHaveStandardViews(boolean mayHaveStandardViews) {
+ this.mayHaveStandardViews = mayHaveStandardViews;
+ }
+
/**
* Read the contents of this database from the database meta data.
*
* @param url the database URL
* @param conn the connection
+ * @throws SQLException on failure
*/
public synchronized void readContents(String url, Connection conn)
throws SQLException {
isH2 = url.startsWith("jdbc:h2:");
- if (isH2) {
- PreparedStatement prep = conn.prepareStatement(
- "SELECT UPPER(VALUE) FROM INFORMATION_SCHEMA.SETTINGS " +
- "WHERE NAME=?");
- prep.setString(1, "MODE");
- ResultSet rs = prep.executeQuery();
- rs.next();
- if ("MYSQL".equals(rs.getString(1))) {
- isH2ModeMySQL = true;
- }
- rs.close();
- prep.close();
- }
+ isDB2 = url.startsWith("jdbc:db2:");
isSQLite = url.startsWith("jdbc:sqlite:");
isOracle = url.startsWith("jdbc:oracle:");
// the Vertica engine is based on PostgreSQL
@@ -142,6 +154,17 @@ public synchronized void readContents(String url, Connection conn)
isDerby = url.startsWith("jdbc:derby:");
isFirebird = url.startsWith("jdbc:firebirdsql:");
isMSSQLServer = url.startsWith("jdbc:sqlserver:");
+ if (isH2) {
+ Session.StaticSettings settings = ((JdbcConnection) conn).getStaticSettings();
+ databaseToUpper = settings.databaseToUpper;
+ databaseToLower = settings.databaseToLower;
+ }else if (isMySQL || isPostgreSQL) {
+ databaseToUpper = false;
+ databaseToLower = true;
+ } else {
+ databaseToUpper = true;
+ databaseToLower = false;
+ }
DatabaseMetaData meta = conn.getMetaData();
String defaultSchemaName = getDefaultSchemaName(meta);
String[] schemaNames = getSchemaNames(meta);
@@ -158,7 +181,7 @@ public synchronized void readContents(String url, Connection conn)
String[] tableTypes = { "TABLE", "SYSTEM TABLE", "VIEW",
"SYSTEM VIEW", "TABLE LINK", "SYNONYM", "EXTERNAL" };
schema.readTables(meta, tableTypes);
- if (!isPostgreSQL) {
+ if (!isPostgreSQL && !isDB2) {
schema.readProcedures(meta);
}
}
@@ -187,7 +210,7 @@ private String[] getSchemaNames(DatabaseMetaData meta) throws SQLException {
return new String[] { null };
}
ResultSet rs = meta.getSchemas();
- ArrayList schemaList = New.arrayList();
+ ArrayList schemaList = Utils.newSmallArrayList();
while (rs.next()) {
String schema = rs.getString("TABLE_SCHEM");
String[] ignoreNames = null;
@@ -202,6 +225,14 @@ private String[] getSchemaNames(DatabaseMetaData meta) throws SQLException {
"db_backupoperator", "db_datareader", "db_datawriter",
"db_ddladmin", "db_denydatareader",
"db_denydatawriter", "db_owner", "db_securityadmin" };
+ } else if (isDB2) {
+ ignoreNames = new String[] { "NULLID", "SYSFUN",
+ "SYSIBMINTERNAL", "SYSIBMTS", "SYSPROC", "SYSPUBLIC",
+ // not empty, but not sure what they contain
+ "SYSCAT", "SYSIBM", "SYSIBMADM",
+ "SYSSTAT", "SYSTOOLS",
+ };
+
}
if (ignoreNames != null) {
for (String ignore : ignoreNames) {
@@ -217,15 +248,15 @@ private String[] getSchemaNames(DatabaseMetaData meta) throws SQLException {
schemaList.add(schema);
}
rs.close();
- String[] list = new String[schemaList.size()];
- schemaList.toArray(list);
- return list;
+ return schemaList.toArray(new String[0]);
}
private String getDefaultSchemaName(DatabaseMetaData meta) {
String defaultSchemaName = "";
try {
- if (isOracle) {
+ if (isH2) {
+ return meta.storesLowerCaseIdentifiers() ? "public" : "PUBLIC";
+ } else if (isOracle) {
return meta.getUserName();
} else if (isPostgreSQL) {
return "public";
@@ -236,22 +267,14 @@ private String getDefaultSchemaName(DatabaseMetaData meta) {
} else if (isFirebird) {
return null;
}
- ResultSet rs = meta.getSchemas();
- int index = rs.findColumn("IS_DEFAULT");
- while (rs.next()) {
- if (rs.getBoolean(index)) {
- defaultSchemaName = rs.getString("TABLE_SCHEM");
- }
- }
} catch (SQLException e) {
- // IS_DEFAULT not found
+ // Ignore
}
return defaultSchemaName;
}
/**
* Add double quotes around an identifier if required.
- * For the H2 database, all identifiers are quoted.
*
* @param identifier the identifier
* @return the quoted identifier
@@ -260,10 +283,10 @@ public String quoteIdentifier(String identifier) {
if (identifier == null) {
return null;
}
- if (isH2 && !isH2ModeMySQL) {
- return Parser.quoteIdentifier(identifier);
+ if (ParserUtil.isSimpleIdentifier(identifier, databaseToUpper, databaseToLower)) {
+ return identifier;
}
- return StringUtils.toUpperEnglish(identifier);
+ return StringUtils.quoteIdentifier(identifier);
}
}
diff --git a/h2/src/main/org/h2/bnf/context/DbContextRule.java b/h2/src/main/org/h2/bnf/context/DbContextRule.java
index a48c0495c0..1d295cdb42 100644
--- a/h2/src/main/org/h2/bnf/context/DbContextRule.java
+++ b/h2/src/main/org/h2/bnf/context/DbContextRule.java
@@ -1,6 +1,6 @@
/*
- * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0,
- * and the EPL 1.0 (http://h2database.com/html/license.html).
+ * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0,
+ * and the EPL 1.0 (https://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.bnf.context;
@@ -15,8 +15,8 @@
import org.h2.bnf.RuleHead;
import org.h2.bnf.RuleList;
import org.h2.bnf.Sentence;
-import org.h2.command.Parser;
import org.h2.message.DbException;
+import org.h2.util.ParserUtil;
import org.h2.util.StringUtils;
/**
@@ -154,7 +154,7 @@ public boolean autoComplete(Sentence sentence) {
break;
}
String alias = up.substring(0, i);
- if (Parser.isKeyword(alias, true)) {
+ if (ParserUtil.isKeyword(alias, false)) {
break;
}
s = s.substring(alias.length());
@@ -172,9 +172,7 @@ public boolean autoComplete(Sentence sentence) {
name = column.getQuotedName();
compare = query;
}
- if (compare.startsWith(name) &&
- (columnType == null ||
- column.getDataType().contains(columnType))) {
+ if (compare.startsWith(name) && testColumnType(column)) {
String b = s.substring(name.length());
if (best == null || b.length() < best.length()) {
best = b;
@@ -199,8 +197,7 @@ public boolean autoComplete(Sentence sentence) {
for (DbColumn column : table.getColumns()) {
String name = StringUtils.toUpperEnglish(column
.getName());
- if (columnType == null
- || column.getDataType().contains(columnType)) {
+ if (testColumnType(column)) {
if (up.startsWith(name)) {
String b = s.substring(name.length());
if (best == null || b.length() < best.length()) {
@@ -226,7 +223,7 @@ public boolean autoComplete(Sentence sentence) {
autoCompleteProcedure(sentence);
break;
default:
- throw DbException.throwInternalError("type=" + type);
+ throw DbException.getInternalError("type=" + type);
}
if (!s.equals(query)) {
while (Bnf.startWithSpace(s)) {
@@ -237,6 +234,21 @@ public boolean autoComplete(Sentence sentence) {
}
return false;
}
+
+ private boolean testColumnType(DbColumn column) {
+ if (columnType == null) {
+ return true;
+ }
+ String type = column.getDataType();
+ if (columnType.contains("CHAR") || columnType.contains("CLOB")) {
+ return type.contains("CHAR") || type.contains("CLOB");
+ }
+ if (columnType.contains("BINARY") || columnType.contains("BLOB")) {
+ return type.contains("BINARY") || type.contains("BLOB");
+ }
+ return type.contains(columnType);
+ }
+
private void autoCompleteProcedure(Sentence sentence) {
DbSchema schema = sentence.getLastMatchedSchema();
if (schema == null) {
@@ -244,9 +256,9 @@ private void autoCompleteProcedure(Sentence sentence) {
}
String incompleteSentence = sentence.getQueryUpper();
String incompleteFunctionName = incompleteSentence;
- if (incompleteSentence.contains("(")) {
- incompleteFunctionName = incompleteSentence.substring(0,
- incompleteSentence.indexOf('(')).trim();
+ int bracketIndex = incompleteSentence.indexOf('(');
+ if (bracketIndex != -1) {
+ incompleteFunctionName = StringUtils.trimSubstring(incompleteSentence, 0, bracketIndex);
}
// Common elements
@@ -301,7 +313,7 @@ private static String autoCompleteTableAlias(Sentence sentence,
return s;
}
String alias = up.substring(0, i);
- if ("SET".equals(alias) || Parser.isKeyword(alias, true)) {
+ if ("SET".equals(alias) || ParserUtil.isKeyword(alias, false)) {
return s;
}
if (newAlias) {
diff --git a/h2/src/main/org/h2/bnf/context/DbProcedure.java b/h2/src/main/org/h2/bnf/context/DbProcedure.java
index 4a6e30c6db..0e9a71c2b7 100644
--- a/h2/src/main/org/h2/bnf/context/DbProcedure.java
+++ b/h2/src/main/org/h2/bnf/context/DbProcedure.java
@@ -1,17 +1,17 @@
/*
- * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0,
- * and the EPL 1.0 (http://h2database.com/html/license.html).
+ * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0,
+ * and the EPL 1.0 (https://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.bnf.context;
-import org.h2.util.New;
-
import java.sql.DatabaseMetaData;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.ArrayList;
+import org.h2.util.Utils;
+
/**
* Contains meta data information about a procedure.
* This class is used by the H2 Console.
@@ -21,7 +21,7 @@ public class DbProcedure {
private final DbSchema schema;
private final String name;
private final String quotedName;
- private boolean returnsResult;
+ private final boolean returnsResult;
private DbColumn[] parameters;
public DbProcedure(DbSchema schema, ResultSet rs) throws SQLException {
@@ -71,10 +71,11 @@ public boolean isReturnsResult() {
* Read the column for this table from the database meta data.
*
* @param meta the database meta data
+ * @throws SQLException on failure
*/
void readParameters(DatabaseMetaData meta) throws SQLException {
ResultSet rs = meta.getProcedureColumns(null, schema.name, name, null);
- ArrayList list = New.arrayList();
+ ArrayList list = Utils.newSmallArrayList();
while (rs.next()) {
DbColumn column = DbColumn.getProcedureColumn(schema.getContents(), rs);
if (column.getPosition() > 0) {
diff --git a/h2/src/main/org/h2/bnf/context/DbSchema.java b/h2/src/main/org/h2/bnf/context/DbSchema.java
index d3655da5bd..f37e06fbe1 100644
--- a/h2/src/main/org/h2/bnf/context/DbSchema.java
+++ b/h2/src/main/org/h2/bnf/context/DbSchema.java
@@ -1,18 +1,21 @@
/*
- * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0,
- * and the EPL 1.0 (http://h2database.com/html/license.html).
+ * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0,
+ * and the EPL 1.0 (https://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.bnf.context;
+import java.sql.Connection;
import java.sql.DatabaseMetaData;
+import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
+import java.sql.SQLSyntaxErrorException;
import java.util.ArrayList;
import org.h2.engine.SysProperties;
-import org.h2.util.New;
import org.h2.util.StringUtils;
+import org.h2.util.Utils;
/**
* Contains meta data information about a database schema.
@@ -20,6 +23,13 @@
*/
public class DbSchema {
+ private static final String COLUMNS_QUERY_H2_197 = "SELECT COLUMN_NAME, ORDINAL_POSITION, COLUMN_TYPE "
+ + "FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_SCHEMA = ?1 AND TABLE_NAME = ?2";
+
+ private static final String COLUMNS_QUERY_H2_202 = "SELECT COLUMN_NAME, ORDINAL_POSITION, "
+ + "DATA_TYPE_SQL(?1, ?2, 'TABLE', ORDINAL_POSITION) COLUMN_TYPE "
+ + "FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_SCHEMA = ?1 AND TABLE_NAME = ?2";
+
/**
* The schema name.
*/
@@ -58,12 +68,12 @@ public class DbSchema {
DbSchema(DbContents contents, String name, boolean isDefault) {
this.contents = contents;
this.name = name;
- this.quotedName = contents.quoteIdentifier(name);
+ this.quotedName = contents.quoteIdentifier(name);
this.isDefault = isDefault;
if (name == null) {
// firebird
isSystem = true;
- } else if ("INFORMATION_SCHEMA".equals(name)) {
+ } else if ("INFORMATION_SCHEMA".equalsIgnoreCase(name)) {
isSystem = true;
} else if (!contents.isH2() &&
StringUtils.toUpperEnglish(name).startsWith("INFO")) {
@@ -104,11 +114,12 @@ public DbProcedure[] getProcedures() {
*
* @param meta the database meta data
* @param tableTypes the table types to read
+ * @throws SQLException on failure
*/
public void readTables(DatabaseMetaData meta, String[] tableTypes)
throws SQLException {
ResultSet rs = meta.getTables(null, name, null, tableTypes);
- ArrayList list = New.arrayList();
+ ArrayList list = new ArrayList<>();
while (rs.next()) {
DbTableOrView table = new DbTableOrView(this, rs);
if (contents.isOracle() && table.getName().indexOf('$') > 0) {
@@ -117,37 +128,46 @@ public void readTables(DatabaseMetaData meta, String[] tableTypes)
list.add(table);
}
rs.close();
- tables = new DbTableOrView[list.size()];
- list.toArray(tables);
+ tables = list.toArray(new DbTableOrView[0]);
if (tables.length < SysProperties.CONSOLE_MAX_TABLES_LIST_COLUMNS) {
- for (DbTableOrView tab : tables) {
- try {
- tab.readColumns(meta);
- } catch (SQLException e) {
- // MySQL:
- // View '...' references invalid table(s) or column(s)
- // or function(s) or definer/invoker of view
- // lack rights to use them HY000/1356
- // ignore
+ try (PreparedStatement ps = contents.isH2() ? prepareColumnsQueryH2(meta.getConnection()) : null) {
+ for (DbTableOrView tab : tables) {
+ try {
+ tab.readColumns(meta, ps);
+ } catch (SQLException e) {
+ // MySQL:
+ // View '...' references invalid table(s) or column(s)
+ // or function(s) or definer/invoker of view
+ // lack rights to use them HY000/1356
+ // ignore
+ }
}
}
}
}
+ private static PreparedStatement prepareColumnsQueryH2(Connection connection) throws SQLException {
+ try {
+ return connection.prepareStatement(COLUMNS_QUERY_H2_202);
+ } catch (SQLSyntaxErrorException ex) {
+ return connection.prepareStatement(COLUMNS_QUERY_H2_197);
+ }
+ }
+
/**
- * Read all procedures in the dataBase.
+ * Read all procedures in the database.
+ *
* @param meta the database meta data
* @throws SQLException Error while fetching procedures
*/
public void readProcedures(DatabaseMetaData meta) throws SQLException {
ResultSet rs = meta.getProcedures(null, name, null);
- ArrayList list = New.arrayList();
+ ArrayList list = Utils.newSmallArrayList();
while (rs.next()) {
list.add(new DbProcedure(this, rs));
}
rs.close();
- procedures = new DbProcedure[list.size()];
- list.toArray(procedures);
+ procedures = list.toArray(new DbProcedure[0]);
if (procedures.length < SysProperties.CONSOLE_MAX_PROCEDURES_LIST_COLUMNS) {
for (DbProcedure procedure : procedures) {
procedure.readParameters(meta);
diff --git a/h2/src/main/org/h2/bnf/context/DbTableOrView.java b/h2/src/main/org/h2/bnf/context/DbTableOrView.java
index cb09454968..e97ffe4385 100644
--- a/h2/src/main/org/h2/bnf/context/DbTableOrView.java
+++ b/h2/src/main/org/h2/bnf/context/DbTableOrView.java
@@ -1,15 +1,15 @@
/*
- * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0,
- * and the EPL 1.0 (http://h2database.com/html/license.html).
+ * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0,
+ * and the EPL 1.0 (https://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.bnf.context;
import java.sql.DatabaseMetaData;
+import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.ArrayList;
-import org.h2.util.New;
/**
* Contains meta data information about a table or a view.
@@ -89,17 +89,26 @@ public String getQuotedName() {
* Read the column for this table from the database meta data.
*
* @param meta the database meta data
+ * @param ps prepared statement with custom query for H2 database, null for
+ * others
+ * @throws SQLException on failure
*/
- public void readColumns(DatabaseMetaData meta) throws SQLException {
- ResultSet rs = meta.getColumns(null, schema.name, name, null);
- ArrayList list = New.arrayList();
+ public void readColumns(DatabaseMetaData meta, PreparedStatement ps) throws SQLException {
+ ResultSet rs;
+ if (schema.getContents().isH2()) {
+ ps.setString(1, schema.name);
+ ps.setString(2, name);
+ rs = ps.executeQuery();
+ } else {
+ rs = meta.getColumns(null, schema.name, name, null);
+ }
+ ArrayList list = new ArrayList<>();
while (rs.next()) {
DbColumn column = DbColumn.getColumn(schema.getContents(), rs);
list.add(column);
}
rs.close();
- columns = new DbColumn[list.size()];
- list.toArray(columns);
+ columns = list.toArray(new DbColumn[0]);
}
}
diff --git a/h2/src/main/org/h2/bnf/context/package.html b/h2/src/main/org/h2/bnf/context/package.html
index aeec0f4c4d..0a6386fb30 100644
--- a/h2/src/main/org/h2/bnf/context/package.html
+++ b/h2/src/main/org/h2/bnf/context/package.html
@@ -1,7 +1,7 @@
diff --git a/h2/src/main/org/h2/bnf/package.html b/h2/src/main/org/h2/bnf/package.html
index 575434d3ed..36296736e3 100644
--- a/h2/src/main/org/h2/bnf/package.html
+++ b/h2/src/main/org/h2/bnf/package.html
@@ -1,7 +1,7 @@
diff --git a/h2/src/main/org/h2/command/Command.java b/h2/src/main/org/h2/command/Command.java
index 089a7110fd..f26fb686b8 100644
--- a/h2/src/main/org/h2/command/Command.java
+++ b/h2/src/main/org/h2/command/Command.java
@@ -1,22 +1,27 @@
/*
- * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0,
- * and the EPL 1.0 (http://h2database.com/html/license.html).
+ * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0,
+ * and the EPL 1.0 (https://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.command;
import java.sql.SQLException;
import java.util.ArrayList;
-
+import java.util.Set;
import org.h2.api.ErrorCode;
import org.h2.engine.Constants;
import org.h2.engine.Database;
+import org.h2.engine.DbObject;
+import org.h2.engine.Mode.CharPadding;
import org.h2.engine.Session;
+import org.h2.engine.SessionLocal;
import org.h2.expression.ParameterInterface;
import org.h2.message.DbException;
import org.h2.message.Trace;
import org.h2.result.ResultInterface;
-import org.h2.util.MathUtils;
+import org.h2.result.ResultWithGeneratedKeys;
+import org.h2.result.ResultWithPaddedStrings;
+import org.h2.util.Utils;
/**
* Represents a SQL statement. This object is only used on the server side.
@@ -26,12 +31,12 @@ public abstract class Command implements CommandInterface {
/**
* The session.
*/
- protected final Session session;
+ protected final SessionLocal session;
/**
* The last start time.
*/
- protected long startTime;
+ protected long startTimeNanos;
/**
* The trace module.
@@ -47,8 +52,8 @@ public abstract class Command implements CommandInterface {
private boolean canReuse;
- Command(Parser parser, String sql) {
- this.session = parser.getSession();
+ Command(SessionLocal session, String sql) {
+ this.session = session;
this.sql = sql;
trace = session.getDatabase().getTrace(Trace.COMMAND);
}
@@ -95,12 +100,16 @@ public abstract class Command implements CommandInterface {
* Execute an updating statement (for example insert, delete, or update), if
* this is possible.
*
- * @return the update count
+ * @param generatedKeysRequest
+ * {@code false} if generated keys are not needed, {@code true} if
+ * generated keys should be configured automatically, {@code int[]}
+ * to specify column indices to return generated keys from, or
+ * {@code String[]} to specify column names to return generated keys
+ * from
+ * @return the update count and generated keys, if any
* @throws DbException if the command is not an updating statement
*/
- public int update() {
- throw DbException.get(ErrorCode.METHOD_NOT_ALLOWED_FOR_QUERY);
- }
+ public abstract ResultWithGeneratedKeys update(Object generatedKeysRequest);
/**
* Execute a query statement, if this is possible.
@@ -109,9 +118,7 @@ public int update() {
* @return the local result set
* @throws DbException if the command is not a query
*/
- public ResultInterface query(int maxrows) {
- throw DbException.get(ErrorCode.METHOD_ONLY_ALLOWED_FOR_QUERY);
- }
+ public abstract ResultInterface query(long maxrows);
@Override
public final ResultInterface getMetaData() {
@@ -122,8 +129,8 @@ public final ResultInterface getMetaData() {
* Start the stopwatch.
*/
void start() {
- if (trace.isInfoEnabled()) {
- startTime = System.currentTimeMillis();
+ if (trace.isInfoEnabled() || session.getDatabase().getQueryStatistics()) {
+ startTimeNanos = Utils.currentNanoTime();
}
}
@@ -143,59 +150,54 @@ protected void checkCanceled() {
}
}
- private void stop() {
- session.endStatement();
- session.setCurrentCommand(null);
- if (!isTransactional()) {
- session.commit(true);
- } else if (session.getAutoCommit()) {
+ @Override
+ public void stop() {
+ commitIfNonTransactional();
+ if (isTransactional() && session.getAutoCommit()) {
session.commit(false);
- } else if (session.getDatabase().isMultiThreaded()) {
- Database db = session.getDatabase();
- if (db != null) {
- if (db.getLockMode() == Constants.LOCK_MODE_READ_COMMITTED) {
- session.unlockReadLocks();
- }
- }
}
- if (trace.isInfoEnabled() && startTime > 0) {
- long time = System.currentTimeMillis() - startTime;
- if (time > Constants.SLOW_QUERY_LIMIT_MS) {
- trace.info("slow query: {0} ms", time);
+ if (trace.isInfoEnabled() && startTimeNanos != 0L) {
+ long timeMillis = (System.nanoTime() - startTimeNanos) / 1_000_000L;
+ if (timeMillis > Constants.SLOW_QUERY_LIMIT_MS) {
+ trace.info("slow query: {0} ms", timeMillis);
}
}
}
/**
* Execute a query and return the result.
- * This method prepares everything and calls {@link #query(int)} finally.
+ * This method prepares everything and calls {@link #query(long)} finally.
*
* @param maxrows the maximum number of rows to return
* @param scrollable if the result set must be scrollable (ignored)
* @return the result set
*/
@Override
- public ResultInterface executeQuery(int maxrows, boolean scrollable) {
- startTime = 0;
- long start = 0;
+ public ResultInterface executeQuery(long maxrows, boolean scrollable) {
+ startTimeNanos = 0L;
+ long start = 0L;
Database database = session.getDatabase();
- Object sync = database.isMultiThreaded() ? (Object) session : (Object) database;
session.waitIfExclusiveModeEnabled();
boolean callStop = true;
- boolean writing = !isReadOnly();
- if (writing) {
- while (!database.beforeWriting()) {
- // wait
- }
- }
- synchronized (sync) {
- session.setCurrentCommand(this);
+ //noinspection SynchronizationOnLocalVariableOrMethodParameter
+ synchronized (session) {
+ session.startStatementWithinTransaction(this);
+ Session oldSession = session.setThreadLocalSession();
try {
while (true) {
database.checkPowerOff();
try {
- return query(maxrows);
+ ResultInterface result = query(maxrows);
+ callStop = !result.isLazy();
+ if (database.getMode().charPadding == CharPadding.IN_RESULT_SETS) {
+ return ResultWithPaddedStrings.get(result);
+ }
+ return result;
} catch (DbException e) {
+ // cannot retry DDL
+ if (isCurrentCommandADefineCommand()) {
+ throw e;
+ }
start = filterConcurrentUpdate(e, start);
} catch (OutOfMemoryError e) {
callStop = false;
@@ -221,38 +223,38 @@ public ResultInterface executeQuery(int maxrows, boolean scrollable) {
database.checkPowerOff();
throw e;
} finally {
+ session.resetThreadLocalSession(oldSession);
+ session.endStatement();
if (callStop) {
stop();
}
- if (writing) {
- database.afterWriting();
- }
}
}
}
@Override
- public int executeUpdate() {
+ public ResultWithGeneratedKeys executeUpdate(Object generatedKeysRequest) {
long start = 0;
Database database = session.getDatabase();
- Object sync = database.isMultiThreaded() ? (Object) session : (Object) database;
session.waitIfExclusiveModeEnabled();
boolean callStop = true;
- boolean writing = !isReadOnly();
- if (writing) {
- while (!database.beforeWriting()) {
- // wait
- }
- }
- synchronized (sync) {
- Session.Savepoint rollback = session.setSavepoint();
- session.setCurrentCommand(this);
+ //noinspection SynchronizationOnLocalVariableOrMethodParameter
+ synchronized (session) {
+ commitIfNonTransactional();
+ SessionLocal.Savepoint rollback = session.setSavepoint();
+ session.startStatementWithinTransaction(this);
+ DbException ex = null;
+ Session oldSession = session.setThreadLocalSession();
try {
while (true) {
database.checkPowerOff();
try {
- return update();
+ return update(generatedKeysRequest);
} catch (DbException e) {
+ // cannot retry DDL
+ if (isCurrentCommandADefineCommand()) {
+ throw e;
+ }
start = filterConcurrentUpdate(e, start);
} catch (OutOfMemoryError e) {
callStop = false;
@@ -271,53 +273,57 @@ public int executeUpdate() {
database.shutdownImmediately();
throw e;
}
- database.checkPowerOff();
- if (s.getErrorCode() == ErrorCode.DEADLOCK_1) {
- session.rollback();
- } else {
- session.rollbackTo(rollback, false);
+ try {
+ database.checkPowerOff();
+ if (s.getErrorCode() == ErrorCode.DEADLOCK_1) {
+ session.rollback();
+ } else {
+ session.rollbackTo(rollback);
+ }
+ } catch (Throwable nested) {
+ e.addSuppressed(nested);
}
+ ex = e;
throw e;
} finally {
+ session.resetThreadLocalSession(oldSession);
try {
+ session.endStatement();
if (callStop) {
stop();
}
- } finally {
- if (writing) {
- database.afterWriting();
+ } catch (Throwable nested) {
+ if (ex == null) {
+ throw nested;
+ } else {
+ ex.addSuppressed(nested);
}
}
}
}
}
+ private void commitIfNonTransactional() {
+ if (!isTransactional()) {
+ boolean autoCommit = session.getAutoCommit();
+ session.commit(true);
+ if (!autoCommit && session.getAutoCommit()) {
+ session.begin();
+ }
+ }
+ }
+
private long filterConcurrentUpdate(DbException e, long start) {
- if (e.getErrorCode() != ErrorCode.CONCURRENT_UPDATE_1) {
+ int errorCode = e.getErrorCode();
+ if (errorCode != ErrorCode.CONCURRENT_UPDATE_1 && errorCode != ErrorCode.ROW_NOT_FOUND_IN_PRIMARY_INDEX
+ && errorCode != ErrorCode.ROW_NOT_FOUND_WHEN_DELETING_1) {
throw e;
}
- long now = System.nanoTime() / 1000000;
- if (start != 0 && now - start > session.getLockTimeout()) {
- throw DbException.get(ErrorCode.LOCK_TIMEOUT_1, e.getCause(), "");
- }
- Database database = session.getDatabase();
- int sleep = 1 + MathUtils.randomInt(10);
- while (true) {
- try {
- if (database.isMultiThreaded()) {
- Thread.sleep(sleep);
- } else {
- database.wait(sleep);
- }
- } catch (InterruptedException e1) {
- // ignore
- }
- long slept = System.nanoTime() / 1000000 - now;
- if (slept >= sleep) {
- break;
- }
+ long now = Utils.currentNanoTime();
+ if (start != 0L && now - start > session.getLockTimeout() * 1_000_000L) {
+ throw DbException.get(ErrorCode.LOCK_TIMEOUT_1, e);
}
- return start == 0 ? now : start;
+ return start == 0L ? now : start;
}
@Override
@@ -327,7 +333,7 @@ public void close() {
@Override
public void cancel() {
- this.cancel = true;
+ cancel = true;
}
@Override
@@ -355,10 +361,21 @@ public boolean canReuse() {
public void reuse() {
canReuse = false;
ArrayList extends ParameterInterface> parameters = getParameters();
- for (int i = 0, size = parameters.size(); i < size; i++) {
- ParameterInterface param = parameters.get(i);
+ for (ParameterInterface param : parameters) {
param.setValue(null, true);
}
}
+ public void setCanReuse(boolean canReuse) {
+ this.canReuse = canReuse;
+ }
+
+ public abstract Set getDependencies();
+
+ /**
+ * Is the command we just tried to execute a DefineCommand (i.e. DDL).
+ *
+ * @return true if yes
+ */
+ protected abstract boolean isCurrentCommandADefineCommand();
}
diff --git a/h2/src/main/org/h2/command/CommandContainer.java b/h2/src/main/org/h2/command/CommandContainer.java
index 0c121aae67..30fcf5bc53 100644
--- a/h2/src/main/org/h2/command/CommandContainer.java
+++ b/h2/src/main/org/h2/command/CommandContainer.java
@@ -1,30 +1,117 @@
/*
- * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0,
- * and the EPL 1.0 (http://h2database.com/html/license.html).
+ * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0,
+ * and the EPL 1.0 (https://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.command;
import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
import org.h2.api.DatabaseEventListener;
+import org.h2.api.ErrorCode;
+import org.h2.command.ddl.DefineCommand;
+import org.h2.command.dml.DataChangeStatement;
+import org.h2.engine.Database;
+import org.h2.engine.DbObject;
+import org.h2.engine.DbSettings;
+import org.h2.engine.SessionLocal;
+import org.h2.expression.Expression;
+import org.h2.expression.ExpressionColumn;
import org.h2.expression.Parameter;
import org.h2.expression.ParameterInterface;
+import org.h2.index.Index;
+import org.h2.message.DbException;
+import org.h2.result.LocalResult;
import org.h2.result.ResultInterface;
+import org.h2.result.ResultTarget;
+import org.h2.result.ResultWithGeneratedKeys;
+import org.h2.table.Column;
+import org.h2.table.DataChangeDeltaTable.ResultOption;
+import org.h2.table.Table;
+import org.h2.table.TableView;
+import org.h2.util.StringUtils;
+import org.h2.util.Utils;
import org.h2.value.Value;
-import org.h2.value.ValueNull;
/**
* Represents a single SQL statements.
* It wraps a prepared statement.
*/
-class CommandContainer extends Command {
+public class CommandContainer extends Command {
+
+ /**
+ * Collector of generated keys.
+ */
+ private static final class GeneratedKeysCollector implements ResultTarget {
+
+ private final int[] indexes;
+ private final LocalResult result;
+
+ GeneratedKeysCollector(int[] indexes, LocalResult result) {
+ this.indexes = indexes;
+ this.result = result;
+ }
+
+ @Override
+ public void limitsWereApplied() {
+ // Nothing to do
+ }
+
+ @Override
+ public long getRowCount() {
+ // Not required
+ return 0L;
+ }
+
+ @Override
+ public void addRow(Value... values) {
+ int length = indexes.length;
+ Value[] row = new Value[length];
+ for (int i = 0; i < length; i++) {
+ row[i] = values[indexes[i]];
+ }
+ result.addRow(row);
+ }
+
+ }
private Prepared prepared;
private boolean readOnlyKnown;
private boolean readOnly;
- CommandContainer(Parser parser, String sql, Prepared prepared) {
- super(parser, sql);
+ /**
+ * Clears CTE views for a specified statement.
+ *
+ * @param session the session
+ * @param prepared prepared statement
+ */
+ static void clearCTE(SessionLocal session, Prepared prepared) {
+ List cteCleanups = prepared.getCteCleanups();
+ if (cteCleanups != null) {
+ clearCTE(session, cteCleanups);
+ }
+ }
+
+ /**
+ * Clears CTE views.
+ *
+ * @param session the session
+ * @param views list of view
+ */
+ static void clearCTE(SessionLocal session, List views) {
+ for (TableView view : views) {
+ // check if view was previously deleted as their name is set to
+ // null
+ if (view.getName() != null) {
+ session.removeLocalTempTable(view);
+ }
+ }
+ }
+
+ public CommandContainer(SessionLocal session, String sql, Prepared prepared) {
+ super(session, sql);
prepared.setCommand(this);
this.prepared = prepared;
}
@@ -49,13 +136,14 @@ private void recompileIfRequired() {
// TODO test with 'always recompile'
prepared.setModificationMetaId(0);
String sql = prepared.getSQL();
+ ArrayList tokens = prepared.getSQLTokens();
ArrayList oldParams = prepared.getParameters();
Parser parser = new Parser(session);
- prepared = parser.parse(sql);
+ prepared = parser.parse(sql, tokens);
long mod = prepared.getModificationMetaId();
prepared.setModificationMetaId(0);
ArrayList newParams = prepared.getParameters();
- for (int i = 0, size = newParams.size(); i < size; i++) {
+ for (int i = 0, size = Math.min(newParams.size(), oldParams.size()); i < size; i++) {
Parameter old = oldParams.get(i);
if (old.isValueSet()) {
Value v = old.getValue(session);
@@ -69,30 +157,121 @@ private void recompileIfRequired() {
}
@Override
- public int update() {
+ public ResultWithGeneratedKeys update(Object generatedKeysRequest) {
recompileIfRequired();
setProgress(DatabaseEventListener.STATE_STATEMENT_START);
start();
- session.setLastScopeIdentity(ValueNull.INSTANCE);
prepared.checkParameters();
- int updateCount = prepared.update();
- prepared.trace(startTime, updateCount);
+ ResultWithGeneratedKeys result;
+ if (generatedKeysRequest != null && !Boolean.FALSE.equals(generatedKeysRequest)) {
+ if (prepared instanceof DataChangeStatement && prepared.getType() != CommandInterface.DELETE) {
+ result = executeUpdateWithGeneratedKeys((DataChangeStatement) prepared,
+ generatedKeysRequest);
+ } else {
+ result = new ResultWithGeneratedKeys.WithKeys(prepared.update(), new LocalResult());
+ }
+ } else {
+ result = ResultWithGeneratedKeys.of(prepared.update());
+ }
+ prepared.trace(startTimeNanos, result.getUpdateCount());
setProgress(DatabaseEventListener.STATE_STATEMENT_END);
- return updateCount;
+ return result;
+ }
+
+ private ResultWithGeneratedKeys executeUpdateWithGeneratedKeys(DataChangeStatement statement,
+ Object generatedKeysRequest) {
+ Database db = session.getDatabase();
+ Table table = statement.getTable();
+ ArrayList expressionColumns;
+ if (Boolean.TRUE.equals(generatedKeysRequest)) {
+ expressionColumns = Utils.newSmallArrayList();
+ Column[] columns = table.getColumns();
+ Index primaryKey = table.findPrimaryKey();
+ for (Column column : columns) {
+ Expression e;
+ if (column.isIdentity()
+ || ((e = column.getEffectiveDefaultExpression()) != null && !e.isConstant())
+ || (primaryKey != null && primaryKey.getColumnIndex(column) >= 0)) {
+ expressionColumns.add(new ExpressionColumn(db, column));
+ }
+ }
+ } else if (generatedKeysRequest instanceof int[]) {
+ int[] indexes = (int[]) generatedKeysRequest;
+ Column[] columns = table.getColumns();
+ int cnt = columns.length;
+ expressionColumns = new ArrayList<>(indexes.length);
+ for (int idx : indexes) {
+ if (idx < 1 || idx > cnt) {
+ throw DbException.get(ErrorCode.COLUMN_NOT_FOUND_1, "Index: " + idx);
+ }
+ expressionColumns.add(new ExpressionColumn(db, columns[idx - 1]));
+ }
+ } else if (generatedKeysRequest instanceof String[]) {
+ String[] names = (String[]) generatedKeysRequest;
+ expressionColumns = new ArrayList<>(names.length);
+ for (String name : names) {
+ Column column = table.findColumn(name);
+ if (column == null) {
+ DbSettings settings = db.getSettings();
+ if (settings.databaseToUpper) {
+ column = table.findColumn(StringUtils.toUpperEnglish(name));
+ } else if (settings.databaseToLower) {
+ column = table.findColumn(StringUtils.toLowerEnglish(name));
+ }
+ search: if (column == null) {
+ for (Column c : table.getColumns()) {
+ if (c.getName().equalsIgnoreCase(name)) {
+ column = c;
+ break search;
+ }
+ }
+ throw DbException.get(ErrorCode.COLUMN_NOT_FOUND_1, name);
+ }
+ }
+ expressionColumns.add(new ExpressionColumn(db, column));
+ }
+ } else {
+ throw DbException.getInternalError();
+ }
+ int columnCount = expressionColumns.size();
+ if (columnCount == 0) {
+ return new ResultWithGeneratedKeys.WithKeys(statement.update(), new LocalResult());
+ }
+ int[] indexes = new int[columnCount];
+ ExpressionColumn[] expressions = expressionColumns.toArray(new ExpressionColumn[0]);
+ for (int i = 0; i < columnCount; i++) {
+ indexes[i] = expressions[i].getColumn().getColumnId();
+ }
+ LocalResult result = new LocalResult(session, expressions, columnCount, columnCount);
+ return new ResultWithGeneratedKeys.WithKeys(
+ statement.update(new GeneratedKeysCollector(indexes, result), ResultOption.FINAL), result);
}
@Override
- public ResultInterface query(int maxrows) {
+ public ResultInterface query(long maxrows) {
recompileIfRequired();
setProgress(DatabaseEventListener.STATE_STATEMENT_START);
start();
prepared.checkParameters();
ResultInterface result = prepared.query(maxrows);
- prepared.trace(startTime, result.getRowCount());
+ prepared.trace(startTimeNanos, result.isLazy() ? 0 : result.getRowCount());
setProgress(DatabaseEventListener.STATE_STATEMENT_END);
return result;
}
+ @Override
+ public void stop() {
+ super.stop();
+ // Clean up after the command was run in the session.
+ // Must restart query (and dependency construction) to reuse.
+ clearCTE(session, prepared);
+ }
+
+ @Override
+ public boolean canReuse() {
+ return super.canReuse() && prepared.getCteCleanups() == null;
+ }
+
@Override
public boolean isReadOnly() {
if (!readOnlyKnown) {
@@ -117,4 +296,22 @@ public int getCommandType() {
return prepared.getType();
}
+ /**
+ * Clean up any associated CTE.
+ */
+ void clearCTE() {
+ clearCTE(session, prepared);
+ }
+
+ @Override
+ public Set getDependencies() {
+ HashSet dependencies = new HashSet<>();
+ prepared.collectDependencies(dependencies);
+ return dependencies;
+ }
+
+ @Override
+ protected boolean isCurrentCommandADefineCommand() {
+ return prepared instanceof DefineCommand;
+ }
}
diff --git a/h2/src/main/org/h2/command/CommandInterface.java b/h2/src/main/org/h2/command/CommandInterface.java
index de5720c231..fbe1223ad7 100644
--- a/h2/src/main/org/h2/command/CommandInterface.java
+++ b/h2/src/main/org/h2/command/CommandInterface.java
@@ -1,6 +1,6 @@
/*
- * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0,
- * and the EPL 1.0 (http://h2database.com/html/license.html).
+ * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0,
+ * and the EPL 1.0 (https://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.command;
@@ -8,11 +8,12 @@
import java.util.ArrayList;
import org.h2.expression.ParameterInterface;
import org.h2.result.ResultInterface;
+import org.h2.result.ResultWithGeneratedKeys;
/**
* Represents a SQL statement.
*/
-public interface CommandInterface {
+public interface CommandInterface extends AutoCloseable {
/**
* The type for unknown statement.
@@ -27,47 +28,48 @@ public interface CommandInterface {
int ALTER_INDEX_RENAME = 1;
/**
- * The type of a ALTER SCHEMA RENAME statement.
+ * The type of an ALTER SCHEMA RENAME statement.
*/
int ALTER_SCHEMA_RENAME = 2;
/**
- * The type of a ALTER TABLE ADD CHECK statement.
+ * The type of an ALTER TABLE ADD CHECK statement.
*/
int ALTER_TABLE_ADD_CONSTRAINT_CHECK = 3;
/**
- * The type of a ALTER TABLE ADD UNIQUE statement.
+ * The type of an ALTER TABLE ADD UNIQUE statement.
*/
int ALTER_TABLE_ADD_CONSTRAINT_UNIQUE = 4;
/**
- * The type of a ALTER TABLE ADD FOREIGN KEY statement.
+ * The type of an ALTER TABLE ADD FOREIGN KEY statement.
*/
int ALTER_TABLE_ADD_CONSTRAINT_REFERENTIAL = 5;
/**
- * The type of a ALTER TABLE ADD PRIMARY KEY statement.
+ * The type of an ALTER TABLE ADD PRIMARY KEY statement.
*/
int ALTER_TABLE_ADD_CONSTRAINT_PRIMARY_KEY = 6;
/**
- * The type of a ALTER TABLE ADD statement.
+ * The type of an ALTER TABLE ADD statement.
*/
int ALTER_TABLE_ADD_COLUMN = 7;
/**
- * The type of a ALTER TABLE ALTER COLUMN SET NOT NULL statement.
+ * The type of an ALTER TABLE ALTER COLUMN SET NOT NULL statement.
*/
int ALTER_TABLE_ALTER_COLUMN_NOT_NULL = 8;
/**
- * The type of a ALTER TABLE ALTER COLUMN SET NULL statement.
+ * The type of an ALTER TABLE ALTER COLUMN DROP NOT NULL statement.
*/
- int ALTER_TABLE_ALTER_COLUMN_NULL = 9;
+ int ALTER_TABLE_ALTER_COLUMN_DROP_NOT_NULL = 9;
/**
- * The type of a ALTER TABLE ALTER COLUMN SET DEFAULT statement.
+ * The type of an ALTER TABLE ALTER COLUMN SET DEFAULT and ALTER TABLE ALTER
+ * COLUMN DROP DEFAULT statements.
*/
int ALTER_TABLE_ALTER_COLUMN_DEFAULT = 10;
@@ -78,52 +80,52 @@ public interface CommandInterface {
int ALTER_TABLE_ALTER_COLUMN_CHANGE_TYPE = 11;
/**
- * The type of a ALTER TABLE DROP COLUMN statement.
+ * The type of an ALTER TABLE DROP COLUMN statement.
*/
int ALTER_TABLE_DROP_COLUMN = 12;
/**
- * The type of a ALTER TABLE ALTER COLUMN SELECTIVITY statement.
+ * The type of an ALTER TABLE ALTER COLUMN SELECTIVITY statement.
*/
int ALTER_TABLE_ALTER_COLUMN_SELECTIVITY = 13;
/**
- * The type of a ALTER TABLE DROP CONSTRAINT statement.
+ * The type of an ALTER TABLE DROP CONSTRAINT statement.
*/
int ALTER_TABLE_DROP_CONSTRAINT = 14;
/**
- * The type of a ALTER TABLE RENAME statement.
+ * The type of an ALTER TABLE RENAME statement.
*/
int ALTER_TABLE_RENAME = 15;
/**
- * The type of a ALTER TABLE ALTER COLUMN RENAME statement.
+ * The type of an ALTER TABLE ALTER COLUMN RENAME statement.
*/
int ALTER_TABLE_ALTER_COLUMN_RENAME = 16;
/**
- * The type of a ALTER USER ADMIN statement.
+ * The type of an ALTER USER ADMIN statement.
*/
int ALTER_USER_ADMIN = 17;
/**
- * The type of a ALTER USER RENAME statement.
+ * The type of an ALTER USER RENAME statement.
*/
int ALTER_USER_RENAME = 18;
/**
- * The type of a ALTER USER SET PASSWORD statement.
+ * The type of an ALTER USER SET PASSWORD statement.
*/
int ALTER_USER_SET_PASSWORD = 19;
/**
- * The type of a ALTER VIEW statement.
+ * The type of an ALTER VIEW statement.
*/
int ALTER_VIEW = 20;
/**
- * The type of a ANALYZE statement.
+ * The type of an ANALYZE statement.
*/
int ANALYZE = 21;
@@ -290,12 +292,12 @@ public interface CommandInterface {
// dml operations
/**
- * The type of a ALTER SEQUENCE statement.
+ * The type of an ALTER SEQUENCE statement.
*/
int ALTER_SEQUENCE = 54;
/**
- * The type of a ALTER TABLE SET REFERENTIAL_INTEGRITY statement.
+ * The type of an ALTER TABLE SET REFERENTIAL_INTEGRITY statement.
*/
int ALTER_TABLE_SET_REFERENTIAL_INTEGRITY = 55;
@@ -315,17 +317,17 @@ public interface CommandInterface {
int DELETE = 58;
/**
- * The type of a EXECUTE statement.
+ * The type of an EXECUTE statement.
*/
int EXECUTE = 59;
/**
- * The type of a EXPLAIN statement.
+ * The type of an EXPLAIN statement.
*/
int EXPLAIN = 60;
/**
- * The type of a INSERT statement.
+ * The type of an INSERT statement.
*/
int INSERT = 61;
@@ -365,7 +367,7 @@ public interface CommandInterface {
int SET = 67;
/**
- * The type of a UPDATE statement.
+ * The type of an UPDATE statement.
*/
int UPDATE = 68;
@@ -451,6 +453,94 @@ public interface CommandInterface {
*/
int SHUTDOWN_DEFRAG = 84;
+ /**
+ * The type of an ALTER TABLE RENAME CONSTRAINT statement.
+ */
+ int ALTER_TABLE_RENAME_CONSTRAINT = 85;
+
+ /**
+ * The type of an EXPLAIN ANALYZE statement.
+ */
+ int EXPLAIN_ANALYZE = 86;
+
+ /**
+ * The type of an ALTER TABLE ALTER COLUMN SET INVISIBLE statement.
+ */
+ int ALTER_TABLE_ALTER_COLUMN_VISIBILITY = 87;
+
+ /**
+ * The type of a CREATE SYNONYM statement.
+ */
+ int CREATE_SYNONYM = 88;
+
+ /**
+ * The type of a DROP SYNONYM statement.
+ */
+ int DROP_SYNONYM = 89;
+
+ /**
+ * The type of an ALTER TABLE ALTER COLUMN SET ON UPDATE statement.
+ */
+ int ALTER_TABLE_ALTER_COLUMN_ON_UPDATE = 90;
+
+ /**
+ * The type of an EXECUTE IMMEDIATELY statement.
+ */
+ int EXECUTE_IMMEDIATELY = 91;
+
+ /**
+ * The type of ALTER DOMAIN ADD CONSTRAINT statement.
+ */
+ int ALTER_DOMAIN_ADD_CONSTRAINT = 92;
+
+ /**
+ * The type of ALTER DOMAIN DROP CONSTRAINT statement.
+ */
+ int ALTER_DOMAIN_DROP_CONSTRAINT = 93;
+
+ /**
+ * The type of an ALTER DOMAIN SET DEFAULT and ALTER DOMAIN DROP DEFAULT
+ * statements.
+ */
+ int ALTER_DOMAIN_DEFAULT = 94;
+
+ /**
+ * The type of an ALTER DOMAIN SET ON UPDATE and ALTER DOMAIN DROP ON UPDATE
+ * statements.
+ */
+ int ALTER_DOMAIN_ON_UPDATE = 95;
+
+ /**
+ * The type of an ALTER DOMAIN RENAME statement.
+ */
+ int ALTER_DOMAIN_RENAME = 96;
+
+ /**
+ * The type of a HELP statement.
+ */
+ int HELP = 97;
+
+ /**
+ * The type of an ALTER TABLE ALTER COLUMN DROP EXPRESSION statement.
+ */
+ int ALTER_TABLE_ALTER_COLUMN_DROP_EXPRESSION = 98;
+
+ /**
+ * The type of an ALTER TABLE ALTER COLUMN DROP IDENTITY statement.
+ */
+ int ALTER_TABLE_ALTER_COLUMN_DROP_IDENTITY = 99;
+
+ /**
+ * The type of ALTER TABLE ALTER COLUMN SET DEFAULT ON NULL and ALTER TABLE
+ * ALTER COLUMN DROP DEFAULT ON NULL statements.
+ */
+ int ALTER_TABLE_ALTER_COLUMN_DEFAULT_ON_NULL = 100;
+
+ /**
+ * The type of an ALTER DOMAIN RENAME CONSTRAINT statement.
+ */
+ int ALTER_DOMAIN_RENAME_CONSTRAINT = 101;
+
/**
* Get command type.
*
@@ -479,18 +569,31 @@ public interface CommandInterface {
* @param scrollable if the result set must be scrollable
* @return the result
*/
- ResultInterface executeQuery(int maxRows, boolean scrollable);
+ ResultInterface executeQuery(long maxRows, boolean scrollable);
/**
* Execute the statement
*
- * @return the update count
+ * @param generatedKeysRequest
+ * {@code null} or {@code false} if generated keys are not
+ * needed, {@code true} if generated keys should be configured
+ * automatically, {@code int[]} to specify column indices to
+ * return generated keys from, or {@code String[]} to specify
+ * column names to return generated keys from
+ *
+ * @return the update count and generated keys, if any
*/
- int executeUpdate();
+ ResultWithGeneratedKeys executeUpdate(Object generatedKeysRequest);
+
+ /**
+ * Stop the command execution, release all locks and resources
+ */
+ void stop();
/**
* Close the statement.
*/
+ @Override
void close();
/**
@@ -504,4 +607,5 @@ public interface CommandInterface {
* @return the empty result
*/
ResultInterface getMetaData();
+
}
diff --git a/h2/src/main/org/h2/command/CommandList.java b/h2/src/main/org/h2/command/CommandList.java
index 3255c0f734..f3d17e1162 100644
--- a/h2/src/main/org/h2/command/CommandList.java
+++ b/h2/src/main/org/h2/command/CommandList.java
@@ -1,56 +1,90 @@
/*
- * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0,
- * and the EPL 1.0 (http://h2database.com/html/license.html).
+ * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0,
+ * and the EPL 1.0 (https://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.command;
import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.Set;
+import org.h2.engine.DbObject;
+import org.h2.engine.SessionLocal;
+import org.h2.expression.Parameter;
import org.h2.expression.ParameterInterface;
import org.h2.result.ResultInterface;
+import org.h2.result.ResultWithGeneratedKeys;
/**
* Represents a list of SQL statements.
*/
class CommandList extends Command {
- private final Command command;
- private final String remaining;
+ private CommandContainer command;
+ private final ArrayList commands;
+ private final ArrayList parameters;
+ private String remaining;
+ private Command remainingCommand;
- CommandList(Parser parser, String sql, Command c, String remaining) {
- super(parser, sql);
- this.command = c;
+ CommandList(SessionLocal session, String sql, CommandContainer command, ArrayList commands,
+ ArrayList parameters, String remaining) {
+ super(session, sql);
+ this.command = command;
+ this.commands = commands;
+ this.parameters = parameters;
this.remaining = remaining;
}
@Override
public ArrayList extends ParameterInterface> getParameters() {
- return command.getParameters();
+ return parameters;
}
private void executeRemaining() {
- Command remainingCommand = session.prepareLocal(remaining);
- if (remainingCommand.isQuery()) {
- remainingCommand.query(0);
- } else {
- remainingCommand.update();
+ for (Prepared prepared : commands) {
+ prepared.prepare();
+ if (prepared.isQuery()) {
+ prepared.query(0);
+ } else {
+ prepared.update();
+ }
+ }
+ if (remaining != null) {
+ remainingCommand = session.prepareLocal(remaining);
+ remaining = null;
+ if (remainingCommand.isQuery()) {
+ remainingCommand.query(0);
+ } else {
+ remainingCommand.update(null);
+ }
}
}
@Override
- public int update() {
- int updateCount = command.executeUpdate();
+ public ResultWithGeneratedKeys update(Object generatedKeysRequest) {
+ ResultWithGeneratedKeys result = command.executeUpdate(null);
executeRemaining();
- return updateCount;
+ return result;
}
@Override
- public ResultInterface query(int maxrows) {
+ public ResultInterface query(long maxrows) {
ResultInterface result = command.query(maxrows);
executeRemaining();
return result;
}
+ @Override
+ public void stop() {
+ command.stop();
+ for (Prepared prepared : commands) {
+ CommandContainer.clearCTE(session, prepared);
+ }
+ if (remainingCommand != null) {
+ remainingCommand.stop();
+ }
+ }
+
@Override
public boolean isQuery() {
return command.isQuery();
@@ -76,4 +110,17 @@ public int getCommandType() {
return command.getCommandType();
}
+ @Override
+ public Set getDependencies() {
+ HashSet dependencies = new HashSet<>();
+ for (Prepared prepared : commands) {
+ prepared.collectDependencies(dependencies);
+ }
+ return dependencies;
+ }
+
+ @Override
+ protected boolean isCurrentCommandADefineCommand() {
+ return command.isCurrentCommandADefineCommand();
+ }
}
diff --git a/h2/src/main/org/h2/command/CommandRemote.java b/h2/src/main/org/h2/command/CommandRemote.java
index 04ba6feceb..7807ef4b7a 100644
--- a/h2/src/main/org/h2/command/CommandRemote.java
+++ b/h2/src/main/org/h2/command/CommandRemote.java
@@ -1,12 +1,13 @@
/*
- * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0,
- * and the EPL 1.0 (http://h2database.com/html/license.html).
+ * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0,
+ * and the EPL 1.0 (https://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.command;
import java.io.IOException;
import java.util.ArrayList;
+import org.h2.engine.GeneratedKeysMode;
import org.h2.engine.SessionRemote;
import org.h2.engine.SysProperties;
import org.h2.expression.ParameterInterface;
@@ -15,9 +16,12 @@
import org.h2.message.Trace;
import org.h2.result.ResultInterface;
import org.h2.result.ResultRemote;
-import org.h2.util.New;
+import org.h2.result.ResultWithGeneratedKeys;
+import org.h2.util.Utils;
import org.h2.value.Transfer;
import org.h2.value.Value;
+import org.h2.value.ValueLob;
+import org.h2.value.ValueNull;
/**
* Represents the client-side part of a SQL statement.
@@ -33,6 +37,7 @@ public class CommandRemote implements CommandInterface {
private SessionRemote session;
private int id;
private boolean isQuery;
+ private int cmdType = UNKNOWN;
private boolean readonly;
private final int created;
@@ -41,7 +46,7 @@ public CommandRemote(SessionRemote session,
this.transferList = transferList;
trace = session.getTrace();
this.sql = sql;
- parameters = New.arrayList();
+ parameters = Utils.newSmallArrayList();
prepare(session, true);
// set session late because prepare might fail - in this case we don't
// need to close the object
@@ -50,16 +55,21 @@ public CommandRemote(SessionRemote session,
created = session.getLastReconnect();
}
+ @Override
+ public void stop() {
+ // Ignore
+ }
+
private void prepare(SessionRemote s, boolean createParams) {
id = s.getNextId();
for (int i = 0, count = 0; i < transferList.size(); i++) {
try {
Transfer transfer = transferList.get(i);
+
if (createParams) {
- s.traceOperation("SESSION_PREPARE_READ_PARAMS", id);
- transfer.
- writeInt(SessionRemote.SESSION_PREPARE_READ_PARAMS).
- writeInt(id).writeString(sql);
+ s.traceOperation("SESSION_PREPARE_READ_PARAMS2", id);
+ transfer.writeInt(SessionRemote.SESSION_PREPARE_READ_PARAMS2)
+ .writeInt(id).writeString(sql);
} else {
s.traceOperation("SESSION_PREPARE", id);
transfer.writeInt(SessionRemote.SESSION_PREPARE).
@@ -68,6 +78,9 @@ private void prepare(SessionRemote s, boolean createParams) {
s.done(transfer);
isQuery = transfer.readBoolean();
readonly = transfer.readBoolean();
+
+ cmdType = createParams ? transfer.readInt() : UNKNOWN;
+
int paramCount = transfer.readInt();
if (createParams) {
parameters.clear();
@@ -135,7 +148,7 @@ public ResultInterface getMetaData() {
}
@Override
- public ResultInterface executeQuery(int maxRows, boolean scrollable) {
+ public ResultInterface executeQuery(long maxRows, boolean scrollable) {
checkParameters();
synchronized (session) {
int objectId = session.getNextId();
@@ -145,8 +158,8 @@ public ResultInterface executeQuery(int maxRows, boolean scrollable) {
Transfer transfer = transferList.get(i);
try {
session.traceOperation("COMMAND_EXECUTE_QUERY", id);
- transfer.writeInt(SessionRemote.COMMAND_EXECUTE_QUERY).
- writeInt(id).writeInt(objectId).writeInt(maxRows);
+ transfer.writeInt(SessionRemote.COMMAND_EXECUTE_QUERY).writeInt(id).writeInt(objectId);
+ transfer.writeRowCount(maxRows);
int fetch;
if (session.isClustered() || scrollable) {
fetch = Integer.MAX_VALUE;
@@ -176,10 +189,14 @@ public ResultInterface executeQuery(int maxRows, boolean scrollable) {
}
@Override
- public int executeUpdate() {
+ public ResultWithGeneratedKeys executeUpdate(Object generatedKeysRequest) {
checkParameters();
+ int generatedKeysMode = GeneratedKeysMode.valueOf(generatedKeysRequest);
+ boolean readGeneratedKeys = generatedKeysMode != GeneratedKeysMode.NONE;
+ int objectId = readGeneratedKeys ? session.getNextId() : 0;
synchronized (session) {
- int updateCount = 0;
+ long updateCount = 0L;
+ ResultRemote generatedKeys = null;
boolean autoCommit = false;
for (int i = 0, count = 0; i < transferList.size(); i++) {
prepareIfRequired();
@@ -188,9 +205,36 @@ public int executeUpdate() {
session.traceOperation("COMMAND_EXECUTE_UPDATE", id);
transfer.writeInt(SessionRemote.COMMAND_EXECUTE_UPDATE).writeInt(id);
sendParameters(transfer);
+ transfer.writeInt(generatedKeysMode);
+ switch (generatedKeysMode) {
+ case GeneratedKeysMode.COLUMN_NUMBERS: {
+ int[] keys = (int[]) generatedKeysRequest;
+ transfer.writeInt(keys.length);
+ for (int key : keys) {
+ transfer.writeInt(key);
+ }
+ break;
+ }
+ case GeneratedKeysMode.COLUMN_NAMES: {
+ String[] keys = (String[]) generatedKeysRequest;
+ transfer.writeInt(keys.length);
+ for (String key : keys) {
+ transfer.writeString(key);
+ }
+ break;
+ }
+ }
session.done(transfer);
- updateCount = transfer.readInt();
+ updateCount = transfer.readRowCount();
autoCommit = transfer.readBoolean();
+ if (readGeneratedKeys) {
+ int columnCount = transfer.readInt();
+ if (generatedKeys != null) {
+ generatedKeys.close();
+ generatedKeys = null;
+ }
+ generatedKeys = new ResultRemote(session, transfer, objectId, columnCount, Integer.MAX_VALUE);
+ }
} catch (IOException e) {
session.removeServer(e, i--, ++count);
}
@@ -198,13 +242,18 @@ public int executeUpdate() {
session.setAutoCommitFromServer(autoCommit);
session.autoCommitIfCluster();
session.readSessionState();
- return updateCount;
+ if (generatedKeys != null) {
+ return new ResultWithGeneratedKeys.WithKeys(updateCount, generatedKeys);
+ }
+ return ResultWithGeneratedKeys.of(updateCount);
}
}
private void checkParameters() {
- for (ParameterInterface p : parameters) {
- p.checkSet();
+ if (cmdType != EXPLAIN) {
+ for (ParameterInterface p : parameters) {
+ p.checkSet();
+ }
}
}
@@ -212,7 +261,13 @@ private void sendParameters(Transfer transfer) throws IOException {
int len = parameters.size();
transfer.writeInt(len);
for (ParameterInterface p : parameters) {
- transfer.writeValue(p.getParamValue());
+ Value pVal = p.getParamValue();
+
+ if (pVal == null && cmdType == EXPLAIN) {
+ pVal = ValueNull.INSTANCE;
+ }
+
+ transfer.writeValue(pVal);
}
}
@@ -235,8 +290,8 @@ public void close() {
try {
for (ParameterInterface p : parameters) {
Value v = p.getParamValue();
- if (v != null) {
- v.close();
+ if (v instanceof ValueLob) {
+ ((ValueLob) v).remove();
}
}
} catch (DbException e) {
@@ -260,7 +315,7 @@ public String toString() {
@Override
public int getCommandType() {
- return UNKNOWN;
+ return cmdType;
}
}
diff --git a/h2/src/main/org/h2/command/Parser.java b/h2/src/main/org/h2/command/Parser.java
index 0f0fa19df6..6aa8a51d37 100644
--- a/h2/src/main/org/h2/command/Parser.java
+++ b/h2/src/main/org/h2/command/Parser.java
@@ -1,6 +1,6 @@
/*
- * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0,
- * and the EPL 1.0 (http://h2database.com/html/license.html).
+ * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0,
+ * and the EPL 1.0 (https://h2database.com/html/license.html).
* Initial Developer: H2 Group
*
* Nicolas Fortin, Atelier SIG, IRSTV FR CNRS 24888
@@ -8,146 +8,401 @@
*/
package org.h2.command;
-import java.math.BigDecimal;
-import java.math.BigInteger;
+import static org.h2.command.Token.ASTERISK;
+import static org.h2.command.Token.AT;
+import static org.h2.command.Token.BIGGER;
+import static org.h2.command.Token.BIGGER_EQUAL;
+import static org.h2.command.Token.CLOSE_BRACE;
+import static org.h2.command.Token.CLOSE_BRACKET;
+import static org.h2.command.Token.CLOSE_PAREN;
+import static org.h2.command.Token.COLON;
+import static org.h2.command.Token.COLON_COLON;
+import static org.h2.command.Token.COLON_EQ;
+import static org.h2.command.Token.COMMA;
+import static org.h2.command.Token.CONCATENATION;
+import static org.h2.command.Token.DOT;
+import static org.h2.command.Token.END_OF_INPUT;
+import static org.h2.command.Token.EQUAL;
+import static org.h2.command.Token.LITERAL;
+import static org.h2.command.Token.MINUS_SIGN;
+import static org.h2.command.Token.NOT_EQUAL;
+import static org.h2.command.Token.NOT_TILDE;
+import static org.h2.command.Token.OPEN_BRACE;
+import static org.h2.command.Token.OPEN_BRACKET;
+import static org.h2.command.Token.OPEN_PAREN;
+import static org.h2.command.Token.PARAMETER;
+import static org.h2.command.Token.PERCENT;
+import static org.h2.command.Token.PLUS_SIGN;
+import static org.h2.command.Token.SEMICOLON;
+import static org.h2.command.Token.SLASH;
+import static org.h2.command.Token.SMALLER;
+import static org.h2.command.Token.SMALLER_EQUAL;
+import static org.h2.command.Token.SPATIAL_INTERSECTS;
+import static org.h2.command.Token.TILDE;
+import static org.h2.command.Token.TOKENS;
+import static org.h2.util.ParserUtil.ALL;
+import static org.h2.util.ParserUtil.AND;
+import static org.h2.util.ParserUtil.ANY;
+import static org.h2.util.ParserUtil.ARRAY;
+import static org.h2.util.ParserUtil.AS;
+import static org.h2.util.ParserUtil.ASYMMETRIC;
+import static org.h2.util.ParserUtil.AUTHORIZATION;
+import static org.h2.util.ParserUtil.BETWEEN;
+import static org.h2.util.ParserUtil.CASE;
+import static org.h2.util.ParserUtil.CAST;
+import static org.h2.util.ParserUtil.CHECK;
+import static org.h2.util.ParserUtil.CONSTRAINT;
+import static org.h2.util.ParserUtil.CROSS;
+import static org.h2.util.ParserUtil.CURRENT_CATALOG;
+import static org.h2.util.ParserUtil.CURRENT_DATE;
+import static org.h2.util.ParserUtil.CURRENT_PATH;
+import static org.h2.util.ParserUtil.CURRENT_ROLE;
+import static org.h2.util.ParserUtil.CURRENT_SCHEMA;
+import static org.h2.util.ParserUtil.CURRENT_TIME;
+import static org.h2.util.ParserUtil.CURRENT_TIMESTAMP;
+import static org.h2.util.ParserUtil.CURRENT_USER;
+import static org.h2.util.ParserUtil.DAY;
+import static org.h2.util.ParserUtil.DEFAULT;
+import static org.h2.util.ParserUtil.DISTINCT;
+import static org.h2.util.ParserUtil.ELSE;
+import static org.h2.util.ParserUtil.END;
+import static org.h2.util.ParserUtil.EXCEPT;
+import static org.h2.util.ParserUtil.EXISTS;
+import static org.h2.util.ParserUtil.FALSE;
+import static org.h2.util.ParserUtil.FETCH;
+import static org.h2.util.ParserUtil.FIRST_KEYWORD;
+import static org.h2.util.ParserUtil.FOR;
+import static org.h2.util.ParserUtil.FOREIGN;
+import static org.h2.util.ParserUtil.FROM;
+import static org.h2.util.ParserUtil.FULL;
+import static org.h2.util.ParserUtil.GROUP;
+import static org.h2.util.ParserUtil.HAVING;
+import static org.h2.util.ParserUtil.HOUR;
+import static org.h2.util.ParserUtil.IDENTIFIER;
+import static org.h2.util.ParserUtil.IF;
+import static org.h2.util.ParserUtil.IN;
+import static org.h2.util.ParserUtil.INNER;
+import static org.h2.util.ParserUtil.INTERSECT;
+import static org.h2.util.ParserUtil.INTERVAL;
+import static org.h2.util.ParserUtil.IS;
+import static org.h2.util.ParserUtil.JOIN;
+import static org.h2.util.ParserUtil.KEY;
+import static org.h2.util.ParserUtil.LAST_KEYWORD;
+import static org.h2.util.ParserUtil.LEFT;
+import static org.h2.util.ParserUtil.LIKE;
+import static org.h2.util.ParserUtil.LIMIT;
+import static org.h2.util.ParserUtil.LOCALTIME;
+import static org.h2.util.ParserUtil.LOCALTIMESTAMP;
+import static org.h2.util.ParserUtil.MINUS;
+import static org.h2.util.ParserUtil.MINUTE;
+import static org.h2.util.ParserUtil.MONTH;
+import static org.h2.util.ParserUtil.NATURAL;
+import static org.h2.util.ParserUtil.NOT;
+import static org.h2.util.ParserUtil.NULL;
+import static org.h2.util.ParserUtil.OFFSET;
+import static org.h2.util.ParserUtil.ON;
+import static org.h2.util.ParserUtil.OR;
+import static org.h2.util.ParserUtil.ORDER;
+import static org.h2.util.ParserUtil.PRIMARY;
+import static org.h2.util.ParserUtil.QUALIFY;
+import static org.h2.util.ParserUtil.RIGHT;
+import static org.h2.util.ParserUtil.ROW;
+import static org.h2.util.ParserUtil.ROWNUM;
+import static org.h2.util.ParserUtil.SECOND;
+import static org.h2.util.ParserUtil.SELECT;
+import static org.h2.util.ParserUtil.SESSION_USER;
+import static org.h2.util.ParserUtil.SET;
+import static org.h2.util.ParserUtil.SOME;
+import static org.h2.util.ParserUtil.SYMMETRIC;
+import static org.h2.util.ParserUtil.SYSTEM_USER;
+import static org.h2.util.ParserUtil.TABLE;
+import static org.h2.util.ParserUtil.TO;
+import static org.h2.util.ParserUtil.TRUE;
+import static org.h2.util.ParserUtil.UNION;
+import static org.h2.util.ParserUtil.UNIQUE;
+import static org.h2.util.ParserUtil.UNKNOWN;
+import static org.h2.util.ParserUtil.USER;
+import static org.h2.util.ParserUtil.USING;
+import static org.h2.util.ParserUtil.VALUE;
+import static org.h2.util.ParserUtil.VALUES;
+import static org.h2.util.ParserUtil.WHEN;
+import static org.h2.util.ParserUtil.WHERE;
+import static org.h2.util.ParserUtil.WINDOW;
+import static org.h2.util.ParserUtil.WITH;
+import static org.h2.util.ParserUtil.YEAR;
+import static org.h2.util.ParserUtil._ROWID_;
+
import java.nio.charset.Charset;
import java.text.Collator;
import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.BitSet;
+import java.util.Collection;
+import java.util.Collections;
import java.util.HashSet;
-
+import java.util.LinkedHashMap;
+import java.util.LinkedHashSet;
+import java.util.List;
+import java.util.TreeSet;
import org.h2.api.ErrorCode;
+import org.h2.api.IntervalQualifier;
import org.h2.api.Trigger;
+import org.h2.command.ddl.AlterDomainAddConstraint;
+import org.h2.command.ddl.AlterDomainDropConstraint;
+import org.h2.command.ddl.AlterDomainExpressions;
+import org.h2.command.ddl.AlterDomainRename;
+import org.h2.command.ddl.AlterDomainRenameConstraint;
import org.h2.command.ddl.AlterIndexRename;
import org.h2.command.ddl.AlterSchemaRename;
+import org.h2.command.ddl.AlterSequence;
import org.h2.command.ddl.AlterTableAddConstraint;
import org.h2.command.ddl.AlterTableAlterColumn;
import org.h2.command.ddl.AlterTableDropConstraint;
import org.h2.command.ddl.AlterTableRename;
import org.h2.command.ddl.AlterTableRenameColumn;
+import org.h2.command.ddl.AlterTableRenameConstraint;
import org.h2.command.ddl.AlterUser;
import org.h2.command.ddl.AlterView;
import org.h2.command.ddl.Analyze;
+import org.h2.command.ddl.CommandWithColumns;
import org.h2.command.ddl.CreateAggregate;
import org.h2.command.ddl.CreateConstant;
+import org.h2.command.ddl.CreateDomain;
import org.h2.command.ddl.CreateFunctionAlias;
import org.h2.command.ddl.CreateIndex;
import org.h2.command.ddl.CreateLinkedTable;
import org.h2.command.ddl.CreateRole;
import org.h2.command.ddl.CreateSchema;
import org.h2.command.ddl.CreateSequence;
+import org.h2.command.ddl.CreateSynonym;
import org.h2.command.ddl.CreateTable;
-import org.h2.command.ddl.CreateTableData;
import org.h2.command.ddl.CreateTrigger;
import org.h2.command.ddl.CreateUser;
-import org.h2.command.ddl.CreateUserDataType;
import org.h2.command.ddl.CreateView;
import org.h2.command.ddl.DeallocateProcedure;
import org.h2.command.ddl.DefineCommand;
import org.h2.command.ddl.DropAggregate;
import org.h2.command.ddl.DropConstant;
import org.h2.command.ddl.DropDatabase;
+import org.h2.command.ddl.DropDomain;
import org.h2.command.ddl.DropFunctionAlias;
import org.h2.command.ddl.DropIndex;
import org.h2.command.ddl.DropRole;
import org.h2.command.ddl.DropSchema;
import org.h2.command.ddl.DropSequence;
+import org.h2.command.ddl.DropSynonym;
import org.h2.command.ddl.DropTable;
import org.h2.command.ddl.DropTrigger;
import org.h2.command.ddl.DropUser;
-import org.h2.command.ddl.DropUserDataType;
import org.h2.command.ddl.DropView;
import org.h2.command.ddl.GrantRevoke;
import org.h2.command.ddl.PrepareProcedure;
+import org.h2.command.ddl.SequenceOptions;
import org.h2.command.ddl.SetComment;
import org.h2.command.ddl.TruncateTable;
-import org.h2.command.dml.AlterSequence;
import org.h2.command.dml.AlterTableSet;
import org.h2.command.dml.BackupCommand;
import org.h2.command.dml.Call;
+import org.h2.command.dml.CommandWithValues;
+import org.h2.command.dml.DataChangeStatement;
import org.h2.command.dml.Delete;
+import org.h2.command.dml.ExecuteImmediate;
import org.h2.command.dml.ExecuteProcedure;
import org.h2.command.dml.Explain;
+import org.h2.command.dml.Help;
import org.h2.command.dml.Insert;
import org.h2.command.dml.Merge;
+import org.h2.command.dml.MergeUsing;
import org.h2.command.dml.NoOperation;
-import org.h2.command.dml.Query;
-import org.h2.command.dml.Replace;
import org.h2.command.dml.RunScriptCommand;
import org.h2.command.dml.ScriptCommand;
-import org.h2.command.dml.Select;
-import org.h2.command.dml.SelectOrderBy;
-import org.h2.command.dml.SelectUnion;
import org.h2.command.dml.Set;
+import org.h2.command.dml.SetClauseList;
+import org.h2.command.dml.SetSessionCharacteristics;
import org.h2.command.dml.SetTypes;
import org.h2.command.dml.TransactionCommand;
import org.h2.command.dml.Update;
-import org.h2.constraint.ConstraintReferential;
+import org.h2.command.query.Query;
+import org.h2.command.query.QueryOrderBy;
+import org.h2.command.query.Select;
+import org.h2.command.query.SelectUnion;
+import org.h2.command.query.TableValueConstructor;
+import org.h2.constraint.ConstraintActionType;
+import org.h2.engine.ConnectionInfo;
import org.h2.engine.Constants;
import org.h2.engine.Database;
import org.h2.engine.DbObject;
-import org.h2.engine.FunctionAlias;
+import org.h2.engine.DbSettings;
+import org.h2.engine.IsolationLevel;
+import org.h2.engine.Mode;
+import org.h2.engine.Mode.ModeEnum;
import org.h2.engine.Procedure;
import org.h2.engine.Right;
-import org.h2.engine.Session;
-import org.h2.engine.SysProperties;
+import org.h2.engine.SessionLocal;
import org.h2.engine.User;
-import org.h2.engine.UserAggregate;
-import org.h2.engine.UserDataType;
-import org.h2.expression.Aggregate;
import org.h2.expression.Alias;
-import org.h2.expression.CompareLike;
-import org.h2.expression.Comparison;
-import org.h2.expression.ConditionAndOr;
-import org.h2.expression.ConditionExists;
-import org.h2.expression.ConditionIn;
-import org.h2.expression.ConditionInSelect;
-import org.h2.expression.ConditionNot;
+import org.h2.expression.ArrayConstructorByQuery;
+import org.h2.expression.ArrayElementReference;
+import org.h2.expression.BinaryOperation;
+import org.h2.expression.BinaryOperation.OpType;
+import org.h2.expression.ConcatenationOperation;
+import org.h2.expression.DomainValueExpression;
import org.h2.expression.Expression;
import org.h2.expression.ExpressionColumn;
import org.h2.expression.ExpressionList;
-import org.h2.expression.Function;
-import org.h2.expression.FunctionCall;
-import org.h2.expression.JavaAggregate;
-import org.h2.expression.JavaFunction;
-import org.h2.expression.Operation;
+import org.h2.expression.ExpressionWithFlags;
+import org.h2.expression.ExpressionWithVariableParameters;
+import org.h2.expression.FieldReference;
+import org.h2.expression.Format;
+import org.h2.expression.Format.FormatEnum;
import org.h2.expression.Parameter;
import org.h2.expression.Rownum;
+import org.h2.expression.SearchedCase;
import org.h2.expression.SequenceValue;
+import org.h2.expression.SimpleCase;
import org.h2.expression.Subquery;
-import org.h2.expression.TableFunction;
+import org.h2.expression.TimeZoneOperation;
+import org.h2.expression.TypedValueExpression;
+import org.h2.expression.UnaryOperation;
import org.h2.expression.ValueExpression;
import org.h2.expression.Variable;
import org.h2.expression.Wildcard;
+import org.h2.expression.aggregate.AbstractAggregate;
+import org.h2.expression.aggregate.Aggregate;
+import org.h2.expression.aggregate.AggregateType;
+import org.h2.expression.aggregate.JavaAggregate;
+import org.h2.expression.aggregate.ListaggArguments;
+import org.h2.expression.analysis.DataAnalysisOperation;
+import org.h2.expression.analysis.Window;
+import org.h2.expression.analysis.WindowFrame;
+import org.h2.expression.analysis.WindowFrameBound;
+import org.h2.expression.analysis.WindowFrameBoundType;
+import org.h2.expression.analysis.WindowFrameExclusion;
+import org.h2.expression.analysis.WindowFrameUnits;
+import org.h2.expression.analysis.WindowFunction;
+import org.h2.expression.analysis.WindowFunctionType;
+import org.h2.expression.condition.BetweenPredicate;
+import org.h2.expression.condition.BooleanTest;
+import org.h2.expression.condition.CompareLike;
+import org.h2.expression.condition.CompareLike.LikeType;
+import org.h2.expression.condition.Comparison;
+import org.h2.expression.condition.ConditionAndOr;
+import org.h2.expression.condition.ConditionAndOrN;
+import org.h2.expression.condition.ConditionIn;
+import org.h2.expression.condition.ConditionInParameter;
+import org.h2.expression.condition.ConditionInQuery;
+import org.h2.expression.condition.ConditionLocalAndGlobal;
+import org.h2.expression.condition.ConditionNot;
+import org.h2.expression.condition.ExistsPredicate;
+import org.h2.expression.condition.IsJsonPredicate;
+import org.h2.expression.condition.NullPredicate;
+import org.h2.expression.condition.TypePredicate;
+import org.h2.expression.condition.UniquePredicate;
+import org.h2.expression.function.ArrayFunction;
+import org.h2.expression.function.BitFunction;
+import org.h2.expression.function.BuiltinFunctions;
+import org.h2.expression.function.CSVWriteFunction;
+import org.h2.expression.function.CardinalityExpression;
+import org.h2.expression.function.CastSpecification;
+import org.h2.expression.function.CoalesceFunction;
+import org.h2.expression.function.CompatibilitySequenceValueFunction;
+import org.h2.expression.function.CompressFunction;
+import org.h2.expression.function.ConcatFunction;
+import org.h2.expression.function.CryptFunction;
+import org.h2.expression.function.CurrentDateTimeValueFunction;
+import org.h2.expression.function.CurrentGeneralValueSpecification;
+import org.h2.expression.function.DBObjectFunction;
+import org.h2.expression.function.DataTypeSQLFunction;
+import org.h2.expression.function.DateTimeFormatFunction;
+import org.h2.expression.function.DateTimeFunction;
+import org.h2.expression.function.DayMonthNameFunction;
+import org.h2.expression.function.FileFunction;
+import org.h2.expression.function.HashFunction;
+import org.h2.expression.function.JavaFunction;
+import org.h2.expression.function.JsonConstructorFunction;
+import org.h2.expression.function.LengthFunction;
+import org.h2.expression.function.MathFunction;
+import org.h2.expression.function.MathFunction1;
+import org.h2.expression.function.MathFunction2;
+import org.h2.expression.function.NullIfFunction;
+import org.h2.expression.function.RandFunction;
+import org.h2.expression.function.RegexpFunction;
+import org.h2.expression.function.SessionControlFunction;
+import org.h2.expression.function.SetFunction;
+import org.h2.expression.function.SignalFunction;
+import org.h2.expression.function.SoundexFunction;
+import org.h2.expression.function.StringFunction;
+import org.h2.expression.function.StringFunction1;
+import org.h2.expression.function.StringFunction2;
+import org.h2.expression.function.SubstringFunction;
+import org.h2.expression.function.SysInfoFunction;
+import org.h2.expression.function.TableInfoFunction;
+import org.h2.expression.function.ToCharFunction;
+import org.h2.expression.function.TrimFunction;
+import org.h2.expression.function.TruncateValueFunction;
+import org.h2.expression.function.XMLFunction;
+import org.h2.expression.function.table.ArrayTableFunction;
+import org.h2.expression.function.table.CSVReadFunction;
+import org.h2.expression.function.table.JavaTableFunction;
+import org.h2.expression.function.table.LinkSchemaFunction;
+import org.h2.expression.function.table.TableFunction;
import org.h2.index.Index;
import org.h2.message.DbException;
+import org.h2.mode.FunctionsPostgreSQL;
+import org.h2.mode.ModeFunction;
+import org.h2.mode.OnDuplicateKeyValues;
+import org.h2.mode.Regclass;
import org.h2.result.SortOrder;
+import org.h2.schema.Domain;
+import org.h2.schema.FunctionAlias;
import org.h2.schema.Schema;
import org.h2.schema.Sequence;
+import org.h2.schema.UserAggregate;
+import org.h2.schema.UserDefinedFunction;
import org.h2.table.Column;
+import org.h2.table.DataChangeDeltaTable;
+import org.h2.table.DataChangeDeltaTable.ResultOption;
+import org.h2.table.DualTable;
import org.h2.table.FunctionTable;
import org.h2.table.IndexColumn;
+import org.h2.table.IndexHints;
import org.h2.table.RangeTable;
import org.h2.table.Table;
import org.h2.table.TableFilter;
import org.h2.table.TableView;
-import org.h2.table.TableFilter.TableFilterVisitor;
-import org.h2.util.MathUtils;
-import org.h2.util.New;
-import org.h2.util.StatementBuilder;
+import org.h2.util.HasSQL;
+import org.h2.util.IntervalUtils;
+import org.h2.util.ParserUtil;
import org.h2.util.StringUtils;
+import org.h2.util.Utils;
+import org.h2.util.geometry.EWKTUtils;
+import org.h2.util.json.JSONItemType;
+import org.h2.util.json.JsonConstructorUtils;
import org.h2.value.CompareMode;
import org.h2.value.DataType;
+import org.h2.value.ExtTypeInfoEnum;
+import org.h2.value.ExtTypeInfoGeometry;
+import org.h2.value.ExtTypeInfoNumeric;
+import org.h2.value.ExtTypeInfoRow;
+import org.h2.value.TypeInfo;
import org.h2.value.Value;
-import org.h2.value.ValueBoolean;
-import org.h2.value.ValueBytes;
+import org.h2.value.ValueArray;
+import org.h2.value.ValueBigint;
import org.h2.value.ValueDate;
-import org.h2.value.ValueDecimal;
-import org.h2.value.ValueInt;
-import org.h2.value.ValueLong;
+import org.h2.value.ValueDouble;
+import org.h2.value.ValueGeometry;
+import org.h2.value.ValueInteger;
+import org.h2.value.ValueInterval;
+import org.h2.value.ValueJson;
import org.h2.value.ValueNull;
-import org.h2.value.ValueString;
+import org.h2.value.ValueNumeric;
+import org.h2.value.ValueRow;
import org.h2.value.ValueTime;
+import org.h2.value.ValueTimeTimeZone;
import org.h2.value.ValueTimestamp;
+import org.h2.value.ValueTimestampTimeZone;
+import org.h2.value.ValueUuid;
+import org.h2.value.ValueVarchar;
/**
* The parser is used to convert a SQL statement string to an command object.
@@ -158,64 +413,118 @@
*/
public class Parser {
- // used during the tokenizer phase
- private static final int CHAR_END = 1, CHAR_VALUE = 2, CHAR_QUOTED = 3;
- private static final int CHAR_NAME = 4, CHAR_SPECIAL_1 = 5,
- CHAR_SPECIAL_2 = 6;
- private static final int CHAR_STRING = 7, CHAR_DOT = 8,
- CHAR_DOLLAR_QUOTED_STRING = 9;
-
- // this are token types
- private static final int KEYWORD = 1, IDENTIFIER = 2, PARAMETER = 3,
- END = 4, VALUE = 5;
- private static final int EQUAL = 6, BIGGER_EQUAL = 7, BIGGER = 8;
- private static final int SMALLER = 9, SMALLER_EQUAL = 10, NOT_EQUAL = 11,
- AT = 12;
- private static final int MINUS = 13, PLUS = 14, STRING_CONCAT = 15;
- private static final int OPEN = 16, CLOSE = 17, NULL = 18, TRUE = 19,
- FALSE = 20;
- private static final int CURRENT_TIMESTAMP = 21, CURRENT_DATE = 22,
- CURRENT_TIME = 23, ROWNUM = 24;
- private static final int SPATIAL_INTERSECTS = 25;
+ private static final String WITH_STATEMENT_SUPPORTS_LIMITED_SUB_STATEMENTS =
+ "WITH statement supports only SELECT, TABLE, VALUES, " +
+ "CREATE TABLE, INSERT, UPDATE, MERGE or DELETE statements";
private final Database database;
- private final Session session;
+ private final SessionLocal session;
+
+ /**
+ * @see org.h2.engine.DbSettings#databaseToLower
+ */
+ private final boolean identifiersToLower;
/**
* @see org.h2.engine.DbSettings#databaseToUpper
*/
private final boolean identifiersToUpper;
- /** indicates character-type for each char in sqlCommand */
- private int[] characterTypes;
+ /**
+ * @see org.h2.engine.SessionLocal#isVariableBinary()
+ */
+ private final boolean variableBinary;
+
+ private final BitSet nonKeywords;
+
+ ArrayList tokens;
+ int tokenIndex;
+ Token token;
private int currentTokenType;
private String currentToken;
- private boolean currentTokenQuoted;
- private Value currentValue;
- private String originalSQL;
- /** copy of originalSQL, with comments blanked out */
private String sqlCommand;
- /** cached array if chars from sqlCommand */
- private char[] sqlCommandChars;
- /** index into sqlCommand of previous token */
- private int lastParseIndex;
- /** index into sqlCommand of current token */
- private int parseIndex;
private CreateView createView;
private Prepared currentPrepared;
private Select currentSelect;
+ private List cteCleanups;
private ArrayList parameters;
+ private ArrayList suppliedParameters;
private String schemaName;
private ArrayList expectedList;
private boolean rightsChecked;
private boolean recompileAlways;
- private ArrayList indexedParameterList;
+ private boolean literalsChecked;
+ private int orderInFrom;
+ private boolean parseDomainConstraint;
+
+ /**
+ * Parses the specified collection of non-keywords.
+ *
+ * @param nonKeywords array of non-keywords in upper case
+ * @return bit set of non-keywords, or {@code null}
+ */
+ public static BitSet parseNonKeywords(String[] nonKeywords) {
+ if (nonKeywords.length == 0) {
+ return null;
+ }
+ BitSet set = new BitSet();
+ for (String nonKeyword : nonKeywords) {
+ int index = Arrays.binarySearch(TOKENS, FIRST_KEYWORD, LAST_KEYWORD + 1, nonKeyword);
+ if (index >= 0) {
+ set.set(index);
+ }
+ }
+ return set.isEmpty() ? null : set;
+ }
+
+ /**
+ * Formats a comma-separated list of keywords.
+ *
+ * @param nonKeywords bit set of non-keywords, or {@code null}
+ * @return comma-separated list of non-keywords
+ */
+ public static String formatNonKeywords(BitSet nonKeywords) {
+ if (nonKeywords == null || nonKeywords.isEmpty()) {
+ return "";
+ }
+ StringBuilder builder = new StringBuilder();
+ for (int i = -1; (i = nonKeywords.nextSetBit(i + 1)) >= 0;) {
+ if (i >= FIRST_KEYWORD && i <= LAST_KEYWORD) {
+ if (builder.length() > 0) {
+ builder.append(',');
+ }
+ builder.append(TOKENS[i]);
+ }
+ }
+ return builder.toString();
+ }
- public Parser(Session session) {
+ /**
+ * Creates a new instance of parser.
+ *
+ * @param session the session
+ */
+ public Parser(SessionLocal session) {
this.database = session.getDatabase();
- this.identifiersToUpper = database.getSettings().databaseToUpper;
+ DbSettings settings = database.getSettings();
+ this.identifiersToLower = settings.databaseToLower;
+ this.identifiersToUpper = settings.databaseToUpper;
+ this.variableBinary = session.isVariableBinary();
+ this.nonKeywords = session.getNonKeywords();
this.session = session;
}
+ /**
+ * Creates a new instance of parser for special use cases.
+ */
+ public Parser() {
+ database = null;
+ identifiersToLower = false;
+ identifiersToUpper = false;
+ variableBinary = false;
+ nonKeywords = null;
+ session = null;
+ }
+
/**
* Parse the statement and prepare it for execution.
*
@@ -223,9 +532,9 @@ public Parser(Session session) {
* @return the prepared object
*/
public Prepared prepare(String sql) {
- Prepared p = parse(sql);
+ Prepared p = parse(sql, null);
p.prepare();
- if (currentTokenType != END) {
+ if (currentTokenType != END_OF_INPUT) {
throw getSyntaxError();
}
return p;
@@ -239,38 +548,96 @@ public Prepared prepare(String sql) {
*/
public Command prepareCommand(String sql) {
try {
- Prepared p = parse(sql);
- boolean hasMore = isToken(";");
- if (!hasMore && currentTokenType != END) {
+ Prepared p = parse(sql, null);
+ if (currentTokenType != SEMICOLON && currentTokenType != END_OF_INPUT) {
+ addExpected(SEMICOLON);
throw getSyntaxError();
}
- p.prepare();
- Command c = new CommandContainer(this, sql, p);
- if (hasMore) {
- String remaining = originalSQL.substring(parseIndex);
- if (remaining.trim().length() != 0) {
- CommandList list = new CommandList(this, sql, c, remaining);
- // list.addCommand(c);
- // do {
- // c = parseCommand();
- // list.addCommand(c);
- // } while (currentToken.equals(";"));
- c = list;
- }
+ try {
+ p.prepare();
+ } catch (Throwable t) {
+ CommandContainer.clearCTE(session, p);
+ throw t;
+ }
+ int sqlIndex = token.start();
+ if (sqlIndex < sql.length()) {
+ sql = sql.substring(0, sqlIndex);
+ }
+ CommandContainer c = new CommandContainer(session, sql, p);
+ while (currentTokenType == SEMICOLON) {
+ read();
+ }
+ if (currentTokenType != END_OF_INPUT) {
+ int offset = token.start();
+ return prepareCommandList(c, p, sql, sqlCommand.substring(offset), getRemainingTokens(offset));
}
return c;
} catch (DbException e) {
- throw e.addSQL(originalSQL);
+ throw e.addSQL(sqlCommand);
+ }
+ }
+
+ private CommandList prepareCommandList(CommandContainer command, Prepared p, String sql, String remainingSql,
+ ArrayList remainingTokens) {
+ try {
+ ArrayList list = Utils.newSmallArrayList();
+ for (;;) {
+ if (p instanceof DefineCommand) {
+ // Next commands may depend on results of this command.
+ return new CommandList(session, sql, command, list, parameters, remainingSql);
+ }
+ suppliedParameters = parameters;
+ try {
+ p = parse(remainingSql, remainingTokens);
+ } catch (DbException ex) {
+ // This command may depend on results of previous commands.
+ if (ex.getErrorCode() == ErrorCode.CANNOT_MIX_INDEXED_AND_UNINDEXED_PARAMS) {
+ throw ex;
+ }
+ return new CommandList(session, sql, command, list, parameters, remainingSql);
+ }
+ list.add(p);
+ if (currentTokenType != SEMICOLON && currentTokenType != END_OF_INPUT) {
+ addExpected(SEMICOLON);
+ throw getSyntaxError();
+ }
+ while (currentTokenType == SEMICOLON) {
+ read();
+ }
+ if (currentTokenType == END_OF_INPUT) {
+ break;
+ }
+ int offset = token.start();
+ remainingSql = sqlCommand.substring(offset);
+ remainingTokens = getRemainingTokens(offset);
+ }
+ return new CommandList(session, sql, command, list, parameters, null);
+ } catch (Throwable t) {
+ command.clearCTE();
+ throw t;
}
}
+ private ArrayList getRemainingTokens(int offset) {
+ List subList = tokens.subList(tokenIndex, tokens.size());
+ ArrayList remainingTokens = new ArrayList<>(subList);
+ subList.clear();
+ tokens.add(new Token.EndOfInputToken(offset));
+ for (Token token : remainingTokens) {
+ token.subtractFromStart(offset);
+ }
+ return remainingTokens;
+ }
+
/**
* Parse the statement, but don't prepare it for execution.
*
* @param sql the SQL statement to parse
+ * @param tokens tokens, or null
* @return the prepared object
*/
- Prepared parse(String sql) {
+ Prepared parse(String sql, ArrayList tokens) {
+ initialize(sql, tokens, false);
Prepared p;
try {
// first, try the fast variant
@@ -278,6 +645,7 @@ Prepared parse(String sql) {
} catch (DbException e) {
if (e.getErrorCode() == ErrorCode.SYNTAX_ERROR_1) {
// now, get the detailed exception
+ resetTokenIndex();
p = parse(sql, true);
} else {
throw e.addSQL(sql);
@@ -289,44 +657,74 @@ Prepared parse(String sql) {
}
private Prepared parse(String sql, boolean withExpectedList) {
- initialize(sql);
if (withExpectedList) {
- expectedList = New.arrayList();
+ expectedList = new ArrayList<>();
} else {
expectedList = null;
}
- parameters = New.arrayList();
+ parameters = suppliedParameters != null ? suppliedParameters : Utils.newSmallArrayList();
currentSelect = null;
currentPrepared = null;
createView = null;
+ cteCleanups = null;
recompileAlways = false;
- indexedParameterList = null;
read();
- return parsePrepared();
+ Prepared p;
+ try {
+ p = parsePrepared();
+ p.setCteCleanups(cteCleanups);
+ } catch (Throwable t) {
+ if (cteCleanups != null) {
+ CommandContainer.clearCTE(session, cteCleanups);
+ }
+ throw t;
+ }
+ return p;
}
private Prepared parsePrepared() {
- int start = lastParseIndex;
+ int start = tokenIndex;
Prepared c = null;
- String token = currentToken;
- if (token.length() == 0) {
+ switch (currentTokenType) {
+ case END_OF_INPUT:
+ case SEMICOLON:
c = new NoOperation(session);
- } else {
- char first = token.charAt(0);
- switch (first) {
- case '?':
- // read the ? as a parameter
- readTerm();
- // this is an 'out' parameter - set a dummy value
- parameters.get(0).setValue(ValueNull.INSTANCE);
- read("=");
- read("CALL");
- c = parseCall();
- break;
- case '(':
- c = parseSelect();
+ setSQL(c, start);
+ return c;
+ case PARAMETER:
+ // read the ? as a parameter
+ // this is an 'out' parameter - set a dummy value
+ readParameter().setValue(ValueNull.INSTANCE);
+ read(EQUAL);
+ start = tokenIndex;
+ read("CALL");
+ c = parseCall();
+ break;
+ case OPEN_PAREN:
+ case SELECT:
+ case TABLE:
+ case VALUES:
+ c = parseQuery();
+ break;
+ case WITH:
+ read();
+ c = parseWithStatementOrQuery(start);
+ break;
+ case SET:
+ read();
+ c = parseSet();
+ break;
+ case IDENTIFIER:
+ if (token.isQuoted()) {
break;
- case 'a':
+ }
+ /*
+ * Convert a-z to A-Z. This method is safe, because only A-Z
+ * characters are considered below.
+ *
+ * Unquoted identifier is never empty.
+ */
+ switch (currentToken.charAt(0) & 0xffdf) {
case 'A':
if (readIf("ALTER")) {
c = parseAlter();
@@ -334,7 +732,6 @@ private Prepared parsePrepared() {
c = parseAnalyze();
}
break;
- case 'b':
case 'B':
if (readIf("BACKUP")) {
c = parseBackup();
@@ -342,7 +739,6 @@ private Prepared parsePrepared() {
c = parseBegin();
}
break;
- case 'c':
case 'C':
if (readIf("COMMIT")) {
c = parseCommit();
@@ -356,64 +752,61 @@ private Prepared parsePrepared() {
c = parseComment();
}
break;
- case 'd':
case 'D':
if (readIf("DELETE")) {
- c = parseDelete();
+ c = parseDelete(start);
} else if (readIf("DROP")) {
c = parseDrop();
} else if (readIf("DECLARE")) {
// support for DECLARE GLOBAL TEMPORARY TABLE...
c = parseCreate();
- } else if (readIf("DEALLOCATE")) {
+ } else if (database.getMode().getEnum() != ModeEnum.MSSQLServer && readIf("DEALLOCATE")) {
+ /*
+ * PostgreSQL-style DEALLOCATE is disabled in MSSQLServer
+ * mode because PostgreSQL-style EXECUTE is redefined in
+ * this mode.
+ */
c = parseDeallocate();
}
break;
- case 'e':
case 'E':
if (readIf("EXPLAIN")) {
c = parseExplain();
- } else if (readIf("EXECUTE")) {
- c = parseExecute();
- }
- break;
- case 'f':
- case 'F':
- if (isToken("FROM")) {
- c = parseSelect();
+ } else if (database.getMode().getEnum() != ModeEnum.MSSQLServer) {
+ if (readIf("EXECUTE")) {
+ c = parseExecutePostgre();
+ }
+ } else {
+ if (readIf("EXEC") || readIf("EXECUTE")) {
+ c = parseExecuteSQLServer();
+ }
}
break;
- case 'g':
case 'G':
if (readIf("GRANT")) {
c = parseGrantRevoke(CommandInterface.GRANT);
}
break;
- case 'h':
case 'H':
if (readIf("HELP")) {
c = parseHelp();
}
break;
- case 'i':
case 'I':
if (readIf("INSERT")) {
- c = parseInsert();
+ c = parseInsert(start);
}
break;
- case 'm':
case 'M':
if (readIf("MERGE")) {
- c = parseMerge();
+ c = parseMerge(start);
}
break;
- case 'p':
case 'P':
if (readIf("PREPARE")) {
c = parsePrepare();
}
break;
- case 'r':
case 'R':
if (readIf("ROLLBACK")) {
c = parseRollback();
@@ -423,17 +816,12 @@ private Prepared parsePrepared() {
c = parseRunScript();
} else if (readIf("RELEASE")) {
c = parseReleaseSavepoint();
- } else if (readIf("REPLACE")) {
- c = parseReplace();
+ } else if (database.getMode().replaceInto && readIf("REPLACE")) {
+ c = parseReplace(start);
}
break;
- case 's':
case 'S':
- if (isToken("SELECT")) {
- c = parseSelect();
- } else if (readIf("SET")) {
- c = parseSet();
- } else if (readIf("SAVEPOINT")) {
+ if (readIf("SAVEPOINT")) {
c = parseSavepoint();
} else if (readIf("SCRIPT")) {
c = parseScript();
@@ -443,100 +831,80 @@ private Prepared parsePrepared() {
c = parseShow();
}
break;
- case 't':
case 'T':
if (readIf("TRUNCATE")) {
c = parseTruncate();
}
break;
- case 'u':
case 'U':
if (readIf("UPDATE")) {
- c = parseUpdate();
+ c = parseUpdate(start);
} else if (readIf("USE")) {
c = parseUse();
}
break;
- case 'v':
- case 'V':
- if (readIf("VALUES")) {
- c = parseValues();
- }
- break;
- case 'w':
- case 'W':
- if (readIf("WITH")) {
- c = parseWith();
- }
- break;
- case ';':
- c = new NoOperation(session);
- break;
- default:
- throw getSyntaxError();
}
- if (indexedParameterList != null) {
- for (int i = 0, size = indexedParameterList.size();
- i < size; i++) {
- if (indexedParameterList.get(i) == null) {
- indexedParameterList.set(i, new Parameter(i));
- }
+ }
+ if (c == null) {
+ throw getSyntaxError();
+ }
+ if (parameters != null) {
+ for (int i = 0, size = parameters.size(); i < size; i++) {
+ if (parameters.get(i) == null) {
+ parameters.set(i, new Parameter(i));
}
- parameters = indexedParameterList;
}
- if (readIf("{")) {
- do {
- int index = (int) readLong() - 1;
- if (index < 0 || index >= parameters.size()) {
- throw getSyntaxError();
- }
- Parameter p = parameters.get(index);
- if (p == null) {
- throw getSyntaxError();
- }
- read(":");
- Expression expr = readExpression();
- expr = expr.optimize(session);
- p.setValue(expr.getValue(session));
- } while (readIf(","));
- read("}");
- for (Parameter p : parameters) {
- p.checkSet();
+ }
+ boolean withParamValues = readIf(OPEN_BRACE);
+ if (withParamValues) {
+ do {
+ int index = (int) readLong() - 1;
+ if (index < 0 || index >= parameters.size()) {
+ throw getSyntaxError();
+ }
+ Parameter p = parameters.get(index);
+ if (p == null) {
+ throw getSyntaxError();
}
- parameters.clear();
+ read(COLON);
+ Expression expr = readExpression();
+ expr = expr.optimize(session);
+ p.setValue(expr.getValue(session));
+ } while (readIf(COMMA));
+ read(CLOSE_BRACE);
+ for (Parameter p : parameters) {
+ p.checkSet();
}
+ parameters.clear();
}
- if (c == null) {
- throw getSyntaxError();
+ if (withParamValues || c.getSQL() == null) {
+ setSQL(c, start);
}
- setSQL(c, null, start);
return c;
}
private DbException getSyntaxError() {
- if (expectedList == null || expectedList.size() == 0) {
- return DbException.getSyntaxError(sqlCommand, parseIndex);
+ if (expectedList == null || expectedList.isEmpty()) {
+ return DbException.getSyntaxError(sqlCommand, token.start());
}
- StatementBuilder buff = new StatementBuilder();
- for (String e : expectedList) {
- buff.appendExceptFirst(", ");
- buff.append(e);
- }
- return DbException.getSyntaxError(sqlCommand, parseIndex,
- buff.toString());
+ return DbException.getSyntaxError(sqlCommand, token.start(), String.join(", ", expectedList));
}
private Prepared parseBackup() {
BackupCommand command = new BackupCommand(session);
- read("TO");
+ read(TO);
command.setFileName(readExpression());
return command;
}
private Prepared parseAnalyze() {
Analyze command = new Analyze(session);
+ if (readIf(TABLE)) {
+ Table table = readTableOrView();
+ command.setTable(table);
+ }
if (readIf("SAMPLE_SIZE")) {
- command.setTop(readPositiveInt());
+ command.setTop(readNonNegativeInt());
}
return command;
}
@@ -553,13 +921,11 @@ private TransactionCommand parseBegin() {
private TransactionCommand parseCommit() {
TransactionCommand command;
if (readIf("TRANSACTION")) {
- command = new TransactionCommand(session,
- CommandInterface.COMMIT_TRANSACTION);
- command.setTransactionName(readUniqueIdentifier());
+ command = new TransactionCommand(session, CommandInterface.COMMIT_TRANSACTION);
+ command.setTransactionName(readIdentifier());
return command;
}
- command = new TransactionCommand(session,
- CommandInterface.COMMIT);
+ command = new TransactionCommand(session, CommandInterface.COMMIT);
readIf("WORK");
return command;
}
@@ -581,44 +947,51 @@ private TransactionCommand parseShutdown() {
private TransactionCommand parseRollback() {
TransactionCommand command;
if (readIf("TRANSACTION")) {
- command = new TransactionCommand(session,
- CommandInterface.ROLLBACK_TRANSACTION);
- command.setTransactionName(readUniqueIdentifier());
+ command = new TransactionCommand(session, CommandInterface.ROLLBACK_TRANSACTION);
+ command.setTransactionName(readIdentifier());
return command;
}
- if (readIf("TO")) {
+ readIf("WORK");
+ if (readIf(TO)) {
read("SAVEPOINT");
- command = new TransactionCommand(session,
- CommandInterface.ROLLBACK_TO_SAVEPOINT);
- command.setSavepointName(readUniqueIdentifier());
+ command = new TransactionCommand(session, CommandInterface.ROLLBACK_TO_SAVEPOINT);
+ command.setSavepointName(readIdentifier());
} else {
- readIf("WORK");
- command = new TransactionCommand(session,
- CommandInterface.ROLLBACK);
+ command = new TransactionCommand(session, CommandInterface.ROLLBACK);
}
return command;
}
private Prepared parsePrepare() {
if (readIf("COMMIT")) {
- TransactionCommand command = new TransactionCommand(session,
- CommandInterface.PREPARE_COMMIT);
- command.setTransactionName(readUniqueIdentifier());
+ TransactionCommand command = new TransactionCommand(session, CommandInterface.PREPARE_COMMIT);
+ command.setTransactionName(readIdentifier());
return command;
}
- String procedureName = readAliasIdentifier();
- if (readIf("(")) {
- ArrayList list = New.arrayList();
+ return parsePrepareProcedure();
+ }
+
+ private Prepared parsePrepareProcedure() {
+ if (database.getMode().getEnum() == ModeEnum.MSSQLServer) {
+ throw getSyntaxError();
+ /*
+ * PostgreSQL-style PREPARE is disabled in MSSQLServer mode
+ * because PostgreSQL-style EXECUTE is redefined in this
+ * mode.
+ */
+ }
+ String procedureName = readIdentifier();
+ if (readIf(OPEN_PAREN)) {
+ ArrayList list = Utils.newSmallArrayList();
for (int i = 0;; i++) {
Column column = parseColumnForTable("C" + i, true);
list.add(column);
- if (readIf(")")) {
+ if (!readIfMore()) {
break;
}
- read(",");
}
}
- read("AS");
+ read(AS);
Prepared prep = parsePrepared();
PrepareProcedure command = new PrepareProcedure(session);
command.setProcedureName(procedureName);
@@ -627,20 +1000,19 @@ private Prepared parsePrepare() {
}
private TransactionCommand parseSavepoint() {
- TransactionCommand command = new TransactionCommand(session,
- CommandInterface.SAVEPOINT);
- command.setSavepointName(readUniqueIdentifier());
+ TransactionCommand command = new TransactionCommand(session, CommandInterface.SAVEPOINT);
+ command.setSavepointName(readIdentifier());
return command;
}
private Prepared parseReleaseSavepoint() {
Prepared command = new NoOperation(session);
readIf("SAVEPOINT");
- readUniqueIdentifier();
+ readIdentifier();
return command;
}
- private Schema getSchema(String schemaName) {
+ private Schema findSchema(String schemaName) {
if (schemaName == null) {
return null;
}
@@ -649,303 +1021,351 @@ private Schema getSchema(String schemaName) {
if (equalsToken("SESSION", schemaName)) {
// for local temporary tables
schema = database.getSchema(session.getCurrentSchemaName());
- } else if (database.getMode().sysDummy1 &&
- "SYSIBM".equals(schemaName)) {
- // IBM DB2 and Apache Derby compatibility: SYSIBM.SYSDUMMY1
- } else {
- throw DbException.get(ErrorCode.SCHEMA_NOT_FOUND_1, schemaName);
}
}
return schema;
}
+ private Schema getSchema(String schemaName) {
+ if (schemaName == null) {
+ return null;
+ }
+ Schema schema = findSchema(schemaName);
+ if (schema == null) {
+ throw DbException.get(ErrorCode.SCHEMA_NOT_FOUND_1, schemaName);
+ }
+ return schema;
+ }
+
private Schema getSchema() {
return getSchema(schemaName);
}
+ /*
+ * Gets the current schema for scenarios that need a guaranteed, non-null schema object.
+ *
+ * This routine is solely here
+ * because of the function readIdentifierWithSchema(String defaultSchemaName) - which
+ * is often called with a null parameter (defaultSchemaName) - then 6 lines into the function
+ * that routine nullifies the state field schemaName - which I believe is a bug.
+ *
+ * There are about 7 places where "readIdentifierWithSchema(null)" is called in this file.
+ *
+ * In other words when is it legal to not have an active schema defined by schemaName ?
+ * I don't think it's ever a valid case. I don't understand when that would be allowed.
+ * I spent a long time trying to figure this out.
+ * As another proof of this point, the command "SET SCHEMA=NULL" is not a valid command.
+ *
+ * I did try to fix this in readIdentifierWithSchema(String defaultSchemaName)
+ * - but every fix I tried cascaded so many unit test errors - so
+ * I gave up. I think this needs a bigger effort to fix his, as part of bigger, dedicated story.
+ *
+ */
+ private Schema getSchemaWithDefault() {
+ if (schemaName == null) {
+ schemaName = session.getCurrentSchemaName();
+ }
+ return getSchema(schemaName);
+ }
private Column readTableColumn(TableFilter filter) {
- String tableAlias = null;
- String columnName = readColumnIdentifier();
- if (readIf(".")) {
+ String columnName = readIdentifier();
+ if (readIf(DOT)) {
+ columnName = readTableColumn(filter, columnName);
+ }
+ return filter.getTable().getColumn(columnName);
+ }
+
+ private String readTableColumn(TableFilter filter, String tableAlias) {
+ String columnName = readIdentifier();
+ if (readIf(DOT)) {
+ String schema = tableAlias;
tableAlias = columnName;
- columnName = readColumnIdentifier();
- if (readIf(".")) {
- String schema = tableAlias;
+ columnName = readIdentifier();
+ if (readIf(DOT)) {
+ checkDatabaseName(schema);
+ schema = tableAlias;
tableAlias = columnName;
- columnName = readColumnIdentifier();
- if (readIf(".")) {
- String catalogName = schema;
- schema = tableAlias;
- tableAlias = columnName;
- columnName = readColumnIdentifier();
- if (!equalsToken(catalogName, database.getShortName())) {
- throw DbException.get(ErrorCode.DATABASE_NOT_FOUND_1,
- catalogName);
- }
- }
- if (!equalsToken(schema, filter.getTable().getSchema()
- .getName())) {
- throw DbException.get(ErrorCode.SCHEMA_NOT_FOUND_1, schema);
- }
+ columnName = readIdentifier();
}
- if (!equalsToken(tableAlias, filter.getTableAlias())) {
- throw DbException.get(ErrorCode.TABLE_OR_VIEW_NOT_FOUND_1,
- tableAlias);
+ if (!equalsToken(schema, filter.getTable().getSchema().getName())) {
+ throw DbException.get(ErrorCode.SCHEMA_NOT_FOUND_1, schema);
}
}
- if (database.getSettings().rowId) {
- if (Column.ROWID.equals(columnName)) {
- return filter.getRowIdColumn();
- }
+ if (!equalsToken(tableAlias, filter.getTableAlias())) {
+ throw DbException.get(ErrorCode.TABLE_OR_VIEW_NOT_FOUND_1, tableAlias);
}
- return filter.getTable().getColumn(columnName);
+ return columnName;
}
- private Update parseUpdate() {
+ private Update parseUpdate(int start) {
Update command = new Update(session);
currentPrepared = command;
- int start = lastParseIndex;
+ Expression fetch = null;
+ if (database.getMode().topInDML && readIf("TOP")) {
+ read(OPEN_PAREN);
+ fetch = readTerm().optimize(session);
+ read(CLOSE_PAREN);
+ }
TableFilter filter = readSimpleTableFilter();
command.setTableFilter(filter);
- read("SET");
- if (readIf("(")) {
- ArrayList columns = New.arrayList();
- do {
- Column column = readTableColumn(filter);
- columns.add(column);
- } while (readIf(","));
- read(")");
- read("=");
- Expression expression = readExpression();
- if (columns.size() == 1) {
- // the expression is parsed as a simple value
- command.setAssignment(columns.get(0), expression);
- } else {
- for (int i = 0, size = columns.size(); i < size; i++) {
- Column column = columns.get(i);
- Function f = Function.getFunction(database, "ARRAY_GET");
- f.setParameter(0, expression);
- f.setParameter(1, ValueExpression.get(ValueInt.get(i + 1)));
- f.doneWithParameters();
- command.setAssignment(column, f);
- }
- }
- } else {
- do {
- Column column = readTableColumn(filter);
- read("=");
- Expression expression;
- if (readIf("DEFAULT")) {
- expression = ValueExpression.getDefault();
- } else {
- expression = readExpression();
- }
- command.setAssignment(column, expression);
- } while (readIf(","));
+ command.setSetClauseList(readUpdateSetClause(filter));
+ if (database.getMode().allowUsingFromClauseInUpdateStatement && readIf(FROM)) {
+ TableFilter fromTable = readTablePrimary();
+ command.setFromTableFilter(fromTable);
}
- if (readIf("WHERE")) {
- Expression condition = readExpression();
- command.setCondition(condition);
+ if (readIf(WHERE)) {
+ command.setCondition(readExpression());
}
- if (readIf("ORDER")) {
+ if (fetch == null) {
// for MySQL compatibility
// (this syntax is supported, but ignored)
- read("BY");
- parseSimpleOrderList();
- }
- if (readIf("LIMIT")) {
- Expression limit = readTerm().optimize(session);
- command.setLimit(limit);
+ readIfOrderBy();
+ fetch = readFetchOrLimit();
}
- setSQL(command, "UPDATE", start);
+ command.setFetch(fetch);
+ setSQL(command, start);
return command;
}
- private TableFilter readSimpleTableFilter() {
- Table table = readTableOrView();
- String alias = null;
- if (readIf("AS")) {
- alias = readAliasIdentifier();
- } else if (currentTokenType == IDENTIFIER) {
- if (!equalsToken("SET", currentToken)) {
- // SET is not a keyword (PostgreSQL supports it as a table name)
- alias = readAliasIdentifier();
+ private SetClauseList readUpdateSetClause(TableFilter filter) {
+ read(SET);
+ SetClauseList list = new SetClauseList(filter.getTable());
+ do {
+ if (readIf(OPEN_PAREN)) {
+ ArrayList columns = Utils.newSmallArrayList();
+ do {
+ columns.add(readTableColumn(filter));
+ } while (readIfMore());
+ read(EQUAL);
+ list.addMultiple(columns, readExpression());
+ } else {
+ Column column = readTableColumn(filter);
+ read(EQUAL);
+ list.addSingle(column, readExpressionOrDefault());
}
- }
- return new TableFilter(session, table, alias, rightsChecked,
- currentSelect);
+ } while (readIf(COMMA));
+ return list;
+ }
+
+ private TableFilter readSimpleTableFilter() {
+ return new TableFilter(session, readTableOrView(), readFromAlias(null), rightsChecked, currentSelect, 0, null);
}
- private Delete parseDelete() {
+ private Delete parseDelete(int start) {
Delete command = new Delete(session);
- Expression limit = null;
- if (readIf("TOP")) {
- limit = readTerm().optimize(session);
+ Expression fetch = null;
+ if (database.getMode().topInDML && readIf("TOP")) {
+ fetch = readTerm().optimize(session);
}
currentPrepared = command;
- int start = lastParseIndex;
- readIf("FROM");
- TableFilter filter = readSimpleTableFilter();
- command.setTableFilter(filter);
- if (readIf("WHERE")) {
- Expression condition = readExpression();
- command.setCondition(condition);
+ if (!readIf(FROM) && database.getMode().getEnum() == ModeEnum.MySQL) {
+ readIdentifierWithSchema();
+ read(FROM);
}
- if (readIf("LIMIT") && limit == null) {
- limit = readTerm().optimize(session);
+ command.setTableFilter(readSimpleTableFilter());
+ if (readIf(WHERE)) {
+ command.setCondition(readExpression());
}
- command.setLimit(limit);
- setSQL(command, "DELETE", start);
+ if (fetch == null) {
+ fetch = readFetchOrLimit();
+ }
+ command.setFetch(fetch);
+ setSQL(command, start);
return command;
}
- private IndexColumn[] parseIndexColumnList() {
- ArrayList columns = New.arrayList();
- do {
- IndexColumn column = new IndexColumn();
- column.columnName = readColumnIdentifier();
- columns.add(column);
- if (readIf("ASC")) {
- // ignore
- } else if (readIf("DESC")) {
- column.sortType = SortOrder.DESCENDING;
+ private Expression readFetchOrLimit() {
+ Expression fetch = null;
+ if (readIf(FETCH)) {
+ if (!readIf("FIRST")) {
+ read("NEXT");
}
- if (readIf("NULLS")) {
- if (readIf("FIRST")) {
- column.sortType |= SortOrder.NULLS_FIRST;
- } else {
- read("LAST");
- column.sortType |= SortOrder.NULLS_LAST;
+ if (readIf(ROW) || readIf("ROWS")) {
+ fetch = ValueExpression.get(ValueInteger.get(1));
+ } else {
+ fetch = readExpression().optimize(session);
+ if (!readIf(ROW)) {
+ read("ROWS");
}
}
- } while (readIf(","));
- read(")");
- return columns.toArray(new IndexColumn[columns.size()]);
+ read("ONLY");
+ } else if (database.getMode().limit && readIf(LIMIT)) {
+ fetch = readTerm().optimize(session);
+ }
+ return fetch;
+ }
+
+ private IndexColumn[] parseIndexColumnList() {
+ ArrayList columns = Utils.newSmallArrayList();
+ do {
+ columns.add(new IndexColumn(readIdentifier(), parseSortType()));
+ } while (readIfMore());
+ return columns.toArray(new IndexColumn[0]);
+ }
+
+ private int parseSortType() {
+ int sortType = !readIf("ASC") && readIf("DESC") ? SortOrder.DESCENDING : SortOrder.ASCENDING;
+ if (readIf("NULLS")) {
+ if (readIf("FIRST")) {
+ sortType |= SortOrder.NULLS_FIRST;
+ } else {
+ read("LAST");
+ sortType |= SortOrder.NULLS_LAST;
+ }
+ }
+ return sortType;
}
private String[] parseColumnList() {
- ArrayList columns = New.arrayList();
+ ArrayList columns = Utils.newSmallArrayList();
do {
- String columnName = readColumnIdentifier();
- columns.add(columnName);
+ columns.add(readIdentifier());
} while (readIfMore());
- return columns.toArray(new String[columns.size()]);
+ return columns.toArray(new String[0]);
}
private Column[] parseColumnList(Table table) {
- ArrayList columns = New.arrayList();
- HashSet set = New.hashSet();
- if (!readIf(")")) {
+ ArrayList columns = Utils.newSmallArrayList();
+ HashSet set = new HashSet<>();
+ if (!readIf(CLOSE_PAREN)) {
do {
Column column = parseColumn(table);
if (!set.add(column)) {
- throw DbException.get(ErrorCode.DUPLICATE_COLUMN_NAME_1,
- column.getSQL());
+ throw DbException.get(ErrorCode.DUPLICATE_COLUMN_NAME_1, column.getTraceSQL());
}
columns.add(column);
} while (readIfMore());
}
- return columns.toArray(new Column[columns.size()]);
+ return columns.toArray(new Column[0]);
}
private Column parseColumn(Table table) {
- String id = readColumnIdentifier();
- if (database.getSettings().rowId && Column.ROWID.equals(id)) {
+ if (currentTokenType == _ROWID_) {
+ read();
return table.getRowIdColumn();
}
- return table.getColumn(id);
+ return table.getColumn(readIdentifier());
}
+ /**
+ * Read comma or closing brace.
+ *
+ * @return {@code true} if comma is read, {@code false} if brace is read
+ */
private boolean readIfMore() {
- if (readIf(",")) {
- return !readIf(")");
+ if (readIf(COMMA)) {
+ return true;
}
- read(")");
+ read(CLOSE_PAREN);
return false;
}
private Prepared parseHelp() {
- StringBuilder buff = new StringBuilder(
- "SELECT * FROM INFORMATION_SCHEMA.HELP");
- int i = 0;
- ArrayList paramValues = New.arrayList();
- while (currentTokenType != END) {
- String s = currentToken;
+ HashSet conditions = new HashSet<>();
+ while (currentTokenType != END_OF_INPUT) {
+ conditions.add(StringUtils.toUpperEnglish(currentToken));
read();
- if (i == 0) {
- buff.append(" WHERE ");
- } else {
- buff.append(" AND ");
- }
- i++;
- buff.append("UPPER(TOPIC) LIKE ?");
- paramValues.add(ValueString.get("%" + s + "%"));
}
- return prepare(session, buff.toString(), paramValues);
+ return new Help(session, conditions.toArray(new String[0]));
}
private Prepared parseShow() {
- ArrayList paramValues = New.arrayList();
+ ArrayList paramValues = Utils.newSmallArrayList();
StringBuilder buff = new StringBuilder("SELECT ");
if (readIf("CLIENT_ENCODING")) {
// for PostgreSQL compatibility
- buff.append("'UNICODE' AS CLIENT_ENCODING FROM DUAL");
+ buff.append("'UNICODE' CLIENT_ENCODING");
} else if (readIf("DEFAULT_TRANSACTION_ISOLATION")) {
// for PostgreSQL compatibility
- buff.append("'read committed' AS DEFAULT_TRANSACTION_ISOLATION " +
- "FROM DUAL");
+ buff.append("'read committed' DEFAULT_TRANSACTION_ISOLATION");
} else if (readIf("TRANSACTION")) {
// for PostgreSQL compatibility
read("ISOLATION");
read("LEVEL");
- buff.append("'read committed' AS TRANSACTION_ISOLATION " +
- "FROM DUAL");
+ buff.append("LOWER(ISOLATION_LEVEL) TRANSACTION_ISOLATION FROM INFORMATION_SCHEMA.SESSIONS "
+ + "WHERE SESSION_ID = SESSION_ID()");
} else if (readIf("DATESTYLE")) {
// for PostgreSQL compatibility
- buff.append("'ISO' AS DATESTYLE FROM DUAL");
+ buff.append("'ISO' DATESTYLE");
+ } else if (readIf("SEARCH_PATH")) {
+ // for PostgreSQL compatibility
+ String[] searchPath = session.getSchemaSearchPath();
+ StringBuilder searchPathBuff = new StringBuilder();
+ if (searchPath != null) {
+ for (int i = 0; i < searchPath.length; i ++) {
+ if (i > 0) {
+ searchPathBuff.append(", ");
+ }
+ ParserUtil.quoteIdentifier(searchPathBuff, searchPath[i], HasSQL.QUOTE_ONLY_WHEN_REQUIRED);
+ }
+ }
+ StringUtils.quoteStringSQL(buff, searchPathBuff.toString());
+ buff.append(" SEARCH_PATH");
} else if (readIf("SERVER_VERSION")) {
// for PostgreSQL compatibility
- buff.append("'8.1.4' AS SERVER_VERSION FROM DUAL");
+ buff.append("'" + Constants.PG_VERSION + "' SERVER_VERSION");
} else if (readIf("SERVER_ENCODING")) {
// for PostgreSQL compatibility
- buff.append("'UTF8' AS SERVER_ENCODING FROM DUAL");
+ buff.append("'UTF8' SERVER_ENCODING");
+ } else if (readIf("SSL")) {
+ // for PostgreSQL compatibility
+ buff.append("'off' SSL");
} else if (readIf("TABLES")) {
// for MySQL compatibility
- String schema = Constants.SCHEMA_MAIN;
- if (readIf("FROM")) {
- schema = readUniqueIdentifier();
+ String schema = database.getMainSchema().getName();
+ if (readIf(FROM)) {
+ schema = readIdentifier();
}
buff.append("TABLE_NAME, TABLE_SCHEMA FROM "
+ "INFORMATION_SCHEMA.TABLES "
+ "WHERE TABLE_SCHEMA=? ORDER BY TABLE_NAME");
- paramValues.add(ValueString.get(schema));
+ paramValues.add(ValueVarchar.get(schema));
} else if (readIf("COLUMNS")) {
// for MySQL compatibility
- read("FROM");
+ read(FROM);
String tableName = readIdentifierWithSchema();
String schemaName = getSchema().getName();
- paramValues.add(ValueString.get(tableName));
- if (readIf("FROM")) {
- schemaName = readUniqueIdentifier();
- }
- buff.append("C.COLUMN_NAME FIELD, "
- + "C.TYPE_NAME || '(' || C.NUMERIC_PRECISION || ')' TYPE, "
+ paramValues.add(ValueVarchar.get(tableName));
+ if (readIf(FROM)) {
+ schemaName = readIdentifier();
+ }
+ buff.append("C.COLUMN_NAME FIELD, ");
+ boolean oldInformationSchema = session.isOldInformationSchema();
+ buff.append(oldInformationSchema
+ ? "C.COLUMN_TYPE"
+ : "DATA_TYPE_SQL(?2, ?1, 'TABLE', C.DTD_IDENTIFIER)");
+ buff.append(" TYPE, "
+ "C.IS_NULLABLE \"NULL\", "
+ "CASE (SELECT MAX(I.INDEX_TYPE_NAME) FROM "
- + "INFORMATION_SCHEMA.INDEXES I "
- + "WHERE I.TABLE_SCHEMA=C.TABLE_SCHEMA "
- + "AND I.TABLE_NAME=C.TABLE_NAME "
- + "AND I.COLUMN_NAME=C.COLUMN_NAME)"
+ + "INFORMATION_SCHEMA.INDEXES I ");
+ if (!oldInformationSchema) {
+ buff.append("JOIN INFORMATION_SCHEMA.INDEX_COLUMNS IC ");
+ }
+ buff.append("WHERE I.TABLE_SCHEMA=C.TABLE_SCHEMA "
+ + "AND I.TABLE_NAME=C.TABLE_NAME ");
+ if (oldInformationSchema) {
+ buff.append("AND I.COLUMN_NAME=C.COLUMN_NAME");
+ } else {
+ buff.append("AND IC.TABLE_SCHEMA=C.TABLE_SCHEMA "
+ + "AND IC.TABLE_NAME=C.TABLE_NAME "
+ + "AND IC.INDEX_SCHEMA=I.INDEX_SCHEMA "
+ + "AND IC.INDEX_NAME=I.INDEX_NAME "
+ + "AND IC.COLUMN_NAME=C.COLUMN_NAME");
+ }
+ buff.append(')'
+ "WHEN 'PRIMARY KEY' THEN 'PRI' "
- + "WHEN 'UNIQUE INDEX' THEN 'UNI' ELSE '' END KEY, "
- + "IFNULL(COLUMN_DEFAULT, 'NULL') DEFAULT "
+ + "WHEN 'UNIQUE INDEX' THEN 'UNI' ELSE '' END `KEY`, "
+ + "COALESCE(COLUMN_DEFAULT, 'NULL') `DEFAULT` "
+ "FROM INFORMATION_SCHEMA.COLUMNS C "
- + "WHERE C.TABLE_NAME=? AND C.TABLE_SCHEMA=? "
+ + "WHERE C.TABLE_NAME=?1 AND C.TABLE_SCHEMA=?2 "
+ "ORDER BY C.ORDINAL_POSITION");
- paramValues.add(ValueString.get(schemaName));
+ paramValues.add(ValueVarchar.get(schemaName));
} else if (readIf("DATABASES") || readIf("SCHEMAS")) {
// for MySQL compatibility
buff.append("SCHEMA_NAME FROM INFORMATION_SCHEMA.SCHEMATA");
+ } else if (database.getMode().getEnum() == ModeEnum.PostgreSQL && readIf("ALL")) {
+ // for PostgreSQL compatibility
+ buff.append("NAME, SETTING FROM PG_CATALOG.PG_SETTINGS");
}
boolean b = session.getAllowLiterals();
try {
@@ -958,7 +1378,7 @@ private Prepared parseShow() {
}
}
- private static Prepared prepare(Session s, String sql,
+ private static Prepared prepare(SessionLocal s, String sql,
ArrayList paramValues) {
Prepared prep = s.prepare(sql);
ArrayList params = prep.getParameters();
@@ -971,299 +1391,726 @@ private static Prepared prepare(Session s, String sql,
return prep;
}
- private boolean isSelect() {
- int start = lastParseIndex;
- while (readIf("(")) {
- // need to read ahead, it could be a nested union:
- // ((select 1) union (select 1))
+ private boolean isDerivedTable() {
+ int offset = tokenIndex;
+ int level = 0;
+ while (tokens.get(offset).tokenType() == OPEN_PAREN) {
+ level++;
+ offset++;
+ }
+ boolean query = isDirectQuery(offset);
+ s: if (query && level > 0) {
+ offset = scanToCloseParen(offset + 1);
+ if (offset < 0) {
+ query = false;
+ break s;
+ }
+ for (;;) {
+ switch (tokens.get(offset).tokenType()) {
+ case SEMICOLON:
+ case END_OF_INPUT:
+ query = false;
+ break s;
+ case OPEN_PAREN:
+ offset = scanToCloseParen(offset + 1);
+ if (offset < 0) {
+ query = false;
+ break s;
+ }
+ break;
+ case CLOSE_PAREN:
+ if (--level == 0) {
+ break s;
+ }
+ offset++;
+ break;
+ case JOIN:
+ query = false;
+ break s;
+ default:
+ offset++;
+ }
+ }
}
- boolean select = isToken("SELECT") || isToken("FROM");
- parseIndex = start;
- read();
- return select;
+ return query;
}
- private Merge parseMerge() {
- Merge command = new Merge(session);
- currentPrepared = command;
+ private boolean isQuery() {
+ int offset = tokenIndex;
+ int level = 0;
+ while (tokens.get(offset).tokenType() == OPEN_PAREN) {
+ level++;
+ offset++;
+ }
+ boolean query = isDirectQuery(offset);
+ s: if (query && level > 0) {
+ offset++;
+ do {
+ offset = scanToCloseParen(offset);
+ if (offset < 0) {
+ query = false;
+ break s;
+ }
+ switch (tokens.get(offset).tokenType()) {
+ default:
+ query = false;
+ break s;
+ case END_OF_INPUT:
+ case SEMICOLON:
+ case CLOSE_PAREN:
+ case ORDER:
+ case OFFSET:
+ case FETCH:
+ case LIMIT:
+ case UNION:
+ case EXCEPT:
+ case MINUS:
+ case INTERSECT:
+ }
+ } while (--level > 0);
+ }
+ return query;
+ }
+
+ private int scanToCloseParen(int offset) {
+ for (int level = 0;;) {
+ switch (tokens.get(offset).tokenType()) {
+ case SEMICOLON:
+ case END_OF_INPUT:
+ return -1;
+ case OPEN_PAREN:
+ level++;
+ break;
+ case CLOSE_PAREN:
+ if (--level < 0) {
+ return offset + 1;
+ }
+ }
+ offset++;
+ }
+ }
+
+ private boolean isQueryQuick() {
+ int offset = tokenIndex;
+ while (tokens.get(offset).tokenType() == OPEN_PAREN) {
+ offset++;
+ }
+ return isDirectQuery(offset);
+ }
+
+ private boolean isDirectQuery(int offset) {
+ boolean query;
+ switch (tokens.get(offset).tokenType()) {
+ case SELECT:
+ case VALUES:
+ case WITH:
+ query = true;
+ break;
+ case TABLE:
+ query = tokens.get(offset + 1).tokenType() != OPEN_PAREN;
+ break;
+ default:
+ query = false;
+ }
+ return query;
+ }
+
+ private Prepared parseMerge(int start) {
read("INTO");
- Table table = readTableOrView();
- command.setTable(table);
- if (readIf("(")) {
- if (isSelect()) {
- command.setQuery(parseSelect());
- read(")");
+ TableFilter targetTableFilter = readSimpleTableFilter();
+ if (readIf(USING)) {
+ return parseMergeUsing(targetTableFilter, start);
+ }
+ return parseMergeInto(targetTableFilter, start);
+ }
+
+ private Prepared parseMergeInto(TableFilter targetTableFilter, int start) {
+ Merge command = new Merge(session, false);
+ currentPrepared = command;
+ command.setTable(targetTableFilter.getTable());
+ Table table = command.getTable();
+ if (readIf(OPEN_PAREN)) {
+ if (isQueryQuick()) {
+ command.setQuery(parseQuery());
+ read(CLOSE_PAREN);
return command;
}
- Column[] columns = parseColumnList(table);
- command.setColumns(columns);
+ command.setColumns(parseColumnList(table));
}
- if (readIf("KEY")) {
- read("(");
- Column[] keys = parseColumnList(table);
- command.setKeys(keys);
+ if (readIf(KEY)) {
+ read(OPEN_PAREN);
+ command.setKeys(parseColumnList(table));
}
- if (readIf("VALUES")) {
- do {
- ArrayList values = New.arrayList();
- read("(");
- if (!readIf(")")) {
- do {
- if (readIf("DEFAULT")) {
- values.add(null);
- } else {
- values.add(readExpression());
- }
- } while (readIfMore());
- }
- command.addRow(values.toArray(new Expression[values.size()]));
- } while (readIf(","));
+ if (readIf(VALUES)) {
+ parseValuesForCommand(command);
} else {
- command.setQuery(parseSelect());
+ command.setQuery(parseQuery());
}
+ setSQL(command, start);
return command;
}
- private Insert parseInsert() {
- Insert command = new Insert(session);
+ private MergeUsing parseMergeUsing(TableFilter targetTableFilter, int start) {
+ MergeUsing command = new MergeUsing(session, targetTableFilter);
currentPrepared = command;
- read("INTO");
- Table table = readTableOrView();
- command.setTable(table);
- Column[] columns = null;
- if (readIf("(")) {
- if (isSelect()) {
- command.setQuery(parseSelect());
- read(")");
- return command;
+ command.setSourceTableFilter(readTableReference());
+ read(ON);
+ Expression condition = readExpression();
+ command.setOnCondition(condition);
+
+ read(WHEN);
+ do {
+ boolean matched = readIf("MATCHED");
+ if (matched) {
+ parseWhenMatched(command);
+ } else {
+ parseWhenNotMatched(command);
+ }
+ } while (readIf(WHEN));
+
+ setSQL(command, start);
+ return command;
+ }
+
+ private void parseWhenMatched(MergeUsing command) {
+ Expression and = readIf(AND) ? readExpression() : null;
+ read("THEN");
+ MergeUsing.When when;
+ if (readIf("UPDATE")) {
+ MergeUsing.WhenMatchedThenUpdate update = command.new WhenMatchedThenUpdate();
+ update.setSetClauseList(readUpdateSetClause(command.getTargetTableFilter()));
+ when = update;
+ } else {
+ read("DELETE");
+ when = command.new WhenMatchedThenDelete();
+ }
+ if (and == null && database.getMode().mergeWhere && readIf(WHERE)) {
+ and = readExpression();
+ }
+ when.setAndCondition(and);
+ command.addWhen(when);
+ }
+
+ private void parseWhenNotMatched(MergeUsing command) {
+ read(NOT);
+ read("MATCHED");
+ Expression and = readIf(AND) ? readExpression() : null;
+ read("THEN");
+ read("INSERT");
+ Column[] columns = readIf(OPEN_PAREN) ? parseColumnList(command.getTargetTableFilter().getTable()) : null;
+ Boolean overridingSystem = readIfOverriding();
+ read(VALUES);
+ read(OPEN_PAREN);
+ ArrayList values = Utils.newSmallArrayList();
+ if (!readIf(CLOSE_PAREN)) {
+ do {
+ values.add(readExpressionOrDefault());
+ } while (readIfMore());
+ }
+ MergeUsing.WhenNotMatched when = command.new WhenNotMatched(columns, overridingSystem,
+ values.toArray(new Expression[0]));
+ when.setAndCondition(and);
+ command.addWhen(when);
+ }
+
+ private Insert parseInsert(int start) {
+ Insert command = new Insert(session);
+ currentPrepared = command;
+ Mode mode = database.getMode();
+ if (mode.onDuplicateKeyUpdate && readIf("IGNORE")) {
+ command.setIgnore(true);
+ }
+ read("INTO");
+ Table table = readTableOrView();
+ command.setTable(table);
+ Column[] columns = null;
+ if (readIf(OPEN_PAREN)) {
+ if (isQueryQuick()) {
+ command.setQuery(parseQuery());
+ read(CLOSE_PAREN);
+ return command;
}
columns = parseColumnList(table);
command.setColumns(columns);
}
+ Boolean overridingSystem = readIfOverriding();
+ command.setOverridingSystem(overridingSystem);
+ boolean requireQuery = false;
if (readIf("DIRECT")) {
+ requireQuery = true;
command.setInsertFromSelect(true);
}
if (readIf("SORTED")) {
- command.setSortedInsertMode(true);
- }
- if (readIf("DEFAULT")) {
- read("VALUES");
- Expression[] expr = {};
- command.addRow(expr);
- } else if (readIf("VALUES")) {
- read("(");
- do {
- ArrayList values = New.arrayList();
- if (!readIf(")")) {
- do {
- if (readIf("DEFAULT")) {
- values.add(null);
- } else {
- values.add(readExpression());
- }
- } while (readIfMore());
+ requireQuery = true;
+ }
+ readValues: {
+ if (!requireQuery) {
+ if (overridingSystem == null && readIf(DEFAULT)) {
+ read(VALUES);
+ command.addRow(new Expression[0]);
+ break readValues;
}
- command.addRow(values.toArray(new Expression[values.size()]));
- // the following condition will allow (..),; and (..);
- } while (readIf(",") && readIf("("));
- } else if (readIf("SET")) {
- if (columns != null) {
- throw getSyntaxError();
- }
- ArrayList columnList = New.arrayList();
- ArrayList values = New.arrayList();
- do {
- columnList.add(parseColumn(table));
- read("=");
- Expression expression;
- if (readIf("DEFAULT")) {
- expression = ValueExpression.getDefault();
- } else {
- expression = readExpression();
+ if (readIf(VALUES)) {
+ parseValuesForCommand(command);
+ break readValues;
}
- values.add(expression);
- } while (readIf(","));
- command.setColumns(columnList.toArray(new Column[columnList.size()]));
- command.addRow(values.toArray(new Expression[values.size()]));
- } else {
- command.setQuery(parseSelect());
+ if (readIf(SET)) {
+ parseInsertSet(command, table, columns);
+ break readValues;
+ }
+ }
+ command.setQuery(parseQuery());
+ }
+ if (mode.onDuplicateKeyUpdate || mode.insertOnConflict || mode.isolationLevelInSelectOrInsertStatement) {
+ parseInsertCompatibility(command, table, mode);
+ }
+ setSQL(command, start);
+ return command;
+ }
+
+ private Boolean readIfOverriding() {
+ Boolean overridingSystem = null;
+ if (readIf("OVERRIDING")) {
+ if (readIf(USER)) {
+ overridingSystem = Boolean.FALSE;
+ } else {
+ read("SYSTEM");
+ overridingSystem = Boolean.TRUE;
+ }
+ read(VALUE);
+ }
+ return overridingSystem;
+ }
+
+ private void parseInsertSet(Insert command, Table table, Column[] columns) {
+ if (columns != null) {
+ throw getSyntaxError();
}
- if (database.getMode().onDuplicateKeyUpdate) {
- if (readIf("ON")) {
+ ArrayList columnList = Utils.newSmallArrayList();
+ ArrayList values = Utils.newSmallArrayList();
+ do {
+ columnList.add(parseColumn(table));
+ read(EQUAL);
+ values.add(readExpressionOrDefault());
+ } while (readIf(COMMA));
+ command.setColumns(columnList.toArray(new Column[0]));
+ command.addRow(values.toArray(new Expression[0]));
+ }
+
+ private void parseInsertCompatibility(Insert command, Table table, Mode mode) {
+ if (mode.onDuplicateKeyUpdate) {
+ if (readIf(ON)) {
read("DUPLICATE");
- read("KEY");
+ read(KEY);
read("UPDATE");
do {
- Column column = parseColumn(table);
- read("=");
- Expression expression;
- if (readIf("DEFAULT")) {
- expression = ValueExpression.getDefault();
- } else {
- expression = readExpression();
+ String columnName = readIdentifier();
+ if (readIf(DOT)) {
+ String schemaOrTableName = columnName;
+ String tableOrColumnName = readIdentifier();
+ if (readIf(DOT)) {
+ if (!table.getSchema().getName().equals(schemaOrTableName)) {
+ throw DbException.get(ErrorCode.SCHEMA_NAME_MUST_MATCH);
+ }
+ columnName = readIdentifier();
+ } else {
+ columnName = tableOrColumnName;
+ tableOrColumnName = schemaOrTableName;
+ }
+ if (!table.getName().equals(tableOrColumnName)) {
+ throw DbException.get(ErrorCode.TABLE_OR_VIEW_NOT_FOUND_1, tableOrColumnName);
+ }
}
- command.addAssignmentForDuplicate(column, expression);
- } while (readIf(","));
+ Column column = table.getColumn(columnName);
+ read(EQUAL);
+ command.addAssignmentForDuplicate(column, readExpressionOrDefault());
+ } while (readIf(COMMA));
}
}
- if (database.getMode().isolationLevelInSelectOrInsertStatement) {
+ if (mode.insertOnConflict) {
+ if (readIf(ON)) {
+ read("CONFLICT");
+ read("DO");
+ read("NOTHING");
+ command.setIgnore(true);
+ }
+ }
+ if (mode.isolationLevelInSelectOrInsertStatement) {
parseIsolationClause();
}
- return command;
}
/**
* MySQL compatibility. REPLACE is similar to MERGE.
*/
- private Replace parseReplace() {
- Replace command = new Replace(session);
+ private Merge parseReplace(int start) {
+ Merge command = new Merge(session, true);
currentPrepared = command;
read("INTO");
Table table = readTableOrView();
command.setTable(table);
- if (readIf("(")) {
- if (isSelect()) {
- command.setQuery(parseSelect());
- read(")");
+ if (readIf(OPEN_PAREN)) {
+ if (isQueryQuick()) {
+ command.setQuery(parseQuery());
+ read(CLOSE_PAREN);
return command;
}
- Column[] columns = parseColumnList(table);
- command.setColumns(columns);
+ command.setColumns(parseColumnList(table));
}
- if (readIf("VALUES")) {
- do {
- ArrayList values = New.arrayList();
- read("(");
- if (!readIf(")")) {
- do {
- if (readIf("DEFAULT")) {
- values.add(null);
- } else {
- values.add(readExpression());
- }
- } while (readIfMore());
- }
- command.addRow(values.toArray(new Expression[values.size()]));
- } while (readIf(","));
+ if (readIf(VALUES)) {
+ parseValuesForCommand(command);
} else {
- command.setQuery(parseSelect());
+ command.setQuery(parseQuery());
}
+ setSQL(command, start);
return command;
}
- private TableFilter readTableFilter(boolean fromOuter) {
+ private void parseValuesForCommand(CommandWithValues command) {
+ ArrayList values = Utils.newSmallArrayList();
+ do {
+ values.clear();
+ boolean multiColumn;
+ if (readIf(ROW)) {
+ read(OPEN_PAREN);
+ multiColumn = true;
+ } else {
+ multiColumn = readIf(OPEN_PAREN);
+ }
+ if (multiColumn) {
+ if (!readIf(CLOSE_PAREN)) {
+ do {
+ values.add(readExpressionOrDefault());
+ } while (readIfMore());
+ }
+ } else {
+ values.add(readExpressionOrDefault());
+ }
+ command.addRow(values.toArray(new Expression[0]));
+ } while (readIf(COMMA));
+ }
+
+ private TableFilter readTablePrimary() {
Table table;
String alias = null;
- if (readIf("(")) {
- if (isSelect()) {
- Query query = parseSelectUnion();
- read(")");
- query.setParameterList(New.arrayList(parameters));
- query.init();
- Session s;
- if (createView != null) {
- s = database.getSystemSession();
- } else {
- s = session;
- }
- alias = session.getNextSystemIdentifier(sqlCommand);
- table = TableView.createTempView(s, session.getUser(), alias,
- query, currentSelect);
+ label: if (readIf(OPEN_PAREN)) {
+ if (isDerivedTable()) {
+ // Derived table
+ return readDerivedTableWithCorrelation();
} else {
- TableFilter top;
- if (database.getSettings().nestedJoins) {
- top = readTableFilter(false);
- top = readJoin(top, currentSelect, false, false);
- top = getNested(top);
- } else {
- top = readTableFilter(fromOuter);
- top = readJoin(top, currentSelect, false, fromOuter);
- }
- read(")");
- alias = readFromAlias(null);
- if (alias != null) {
- top.setAlias(alias);
+ // Parenthesized joined table
+ TableFilter tableFilter = readTableReference();
+ read(CLOSE_PAREN);
+ return readCorrelation(tableFilter);
+ }
+ } else if (readIf(VALUES)) {
+ TableValueConstructor query = parseValues();
+ alias = session.getNextSystemIdentifier(sqlCommand);
+ table = query.toTable(alias, null, parameters, createView != null, currentSelect);
+ } else if (readIf(TABLE)) {
+ // Table function derived table
+ read(OPEN_PAREN);
+ ArrayTableFunction function = readTableFunction(ArrayTableFunction.TABLE);
+ table = new FunctionTable(database.getMainSchema(), session, function);
+ } else {
+ boolean quoted = token.isQuoted();
+ String tableName = readIdentifier();
+ int backupIndex = tokenIndex;
+ schemaName = null;
+ if (readIf(DOT)) {
+ tableName = readIdentifierWithSchema2(tableName);
+ } else if (!quoted && readIf(TABLE)) {
+ table = readDataChangeDeltaTable(upperName(tableName), backupIndex);
+ break label;
+ }
+ Schema schema;
+ if (schemaName == null) {
+ schema = null;
+ } else {
+ schema = findSchema(schemaName);
+ if (schema == null) {
+ if (isDualTable(tableName)) {
+ table = new DualTable(database);
+ break label;
+ }
+ throw DbException.get(ErrorCode.SCHEMA_NOT_FOUND_1, schemaName);
}
- return top;
}
- } else if (readIf("VALUES")) {
- table = parseValuesTable().getTable();
- } else {
- String tableName = readIdentifierWithSchema(null);
- Schema schema = getSchema();
- boolean foundLeftBracket = readIf("(");
- if (foundLeftBracket && readIf("INDEX")) {
+ boolean foundLeftParen = readIf(OPEN_PAREN);
+ if (foundLeftParen && readIf("INDEX")) {
// Sybase compatibility with
// "select * from test (index table1_index)"
readIdentifierWithSchema(null);
- read(")");
- foundLeftBracket = false;
+ read(CLOSE_PAREN);
+ foundLeftParen = false;
}
- if (foundLeftBracket) {
- Schema mainSchema = database.getSchema(Constants.SCHEMA_MAIN);
+ if (foundLeftParen) {
+ Schema mainSchema = database.getMainSchema();
if (equalsToken(tableName, RangeTable.NAME)
|| equalsToken(tableName, RangeTable.ALIAS)) {
Expression min = readExpression();
- read(",");
+ read(COMMA);
Expression max = readExpression();
- if (readIf(",")) {
+ if (readIf(COMMA)) {
Expression step = readExpression();
- read(")");
- table = new RangeTable(mainSchema, min, max, step,
- false);
+ read(CLOSE_PAREN);
+ table = new RangeTable(mainSchema, min, max, step);
} else {
- read(")");
- table = new RangeTable(mainSchema, min, max, false);
+ read(CLOSE_PAREN);
+ table = new RangeTable(mainSchema, min, max);
}
} else {
- Expression expr = readFunction(schema, tableName);
- if (!(expr instanceof FunctionCall)) {
- throw getSyntaxError();
- }
- FunctionCall call = (FunctionCall) expr;
- if (!call.isDeterministic()) {
- recompileAlways = true;
- }
- table = new FunctionTable(mainSchema, session, expr, call);
+ table = new FunctionTable(mainSchema, session, readTableFunction(tableName, schema));
}
- } else if (equalsToken("DUAL", tableName)) {
- table = getDualTable(false);
- } else if (database.getMode().sysDummy1 &&
- equalsToken("SYSDUMMY1", tableName)) {
- table = getDualTable(false);
} else {
table = readTableOrView(tableName);
}
}
- alias = readFromAlias(alias);
- return new TableFilter(session, table, alias, rightsChecked,
- currentSelect);
+ ArrayList derivedColumnNames = null;
+ IndexHints indexHints = null;
+ if (readIfUseIndex()) {
+ indexHints = parseIndexHints(table);
+ } else {
+ alias = readFromAlias(alias);
+ if (alias != null) {
+ derivedColumnNames = readDerivedColumnNames();
+ if (readIfUseIndex()) {
+ indexHints = parseIndexHints(table);
+ }
+ }
+ }
+ return buildTableFilter(table, alias, derivedColumnNames, indexHints);
}
- private String readFromAlias(String alias) {
- if (readIf("AS")) {
- alias = readAliasIdentifier();
- } else if (currentTokenType == IDENTIFIER) {
- // left and right are not keywords (because they are functions as
- // well)
- if (!isToken("LEFT") && !isToken("RIGHT") && !isToken("FULL")) {
- alias = readAliasIdentifier();
+ private TableFilter readCorrelation(TableFilter tableFilter) {
+ String alias = readFromAlias(null);
+ if (alias != null) {
+ tableFilter.setAlias(alias);
+ ArrayList derivedColumnNames = readDerivedColumnNames();
+ if (derivedColumnNames != null) {
+ tableFilter.setDerivedColumns(derivedColumnNames);
+ }
+ }
+ return tableFilter;
+ }
+
+ private TableFilter readDerivedTableWithCorrelation() {
+ Query query = parseQueryExpression();
+ read(CLOSE_PAREN);
+ Table table;
+ String alias;
+ ArrayList derivedColumnNames = null;
+ IndexHints indexHints = null;
+ if (readIfUseIndex()) {
+ alias = session.getNextSystemIdentifier(sqlCommand);
+ table = query.toTable(alias, null, parameters, createView != null, currentSelect);
+ indexHints = parseIndexHints(table);
+ } else {
+ alias = readFromAlias(null);
+ if (alias != null) {
+ derivedColumnNames = readDerivedColumnNames();
+ Column[] columnTemplates = null;
+ if (derivedColumnNames != null) {
+ query.init();
+ columnTemplates = TableView.createQueryColumnTemplateList(
+ derivedColumnNames.toArray(new String[0]), query, new String[1])
+ .toArray(new Column[0]);
+ }
+ table = query.toTable(alias, columnTemplates, parameters, createView != null, currentSelect);
+ if (readIfUseIndex()) {
+ indexHints = parseIndexHints(table);
+ }
+ } else {
+ alias = session.getNextSystemIdentifier(sqlCommand);
+ table = query.toTable(alias, null, parameters, createView != null, currentSelect);
+ }
+ }
+ return buildTableFilter(table, alias, derivedColumnNames, indexHints);
+ }
+
+ private TableFilter buildTableFilter(Table table, String alias, ArrayList derivedColumnNames,
+ IndexHints indexHints) {
+ if (database.getMode().discardWithTableHints) {
+ discardWithTableHints();
+ }
+ // inherit alias for CTE as views from table name
+ if (alias == null && table.isView() && table.isTableExpression()) {
+ alias = table.getName();
+ }
+ TableFilter filter = new TableFilter(session, table, alias, rightsChecked,
+ currentSelect, orderInFrom++, indexHints);
+ if (derivedColumnNames != null) {
+ filter.setDerivedColumns(derivedColumnNames);
+ }
+ return filter;
+ }
+
+ private Table readDataChangeDeltaTable(String resultOptionName, int backupIndex) {
+ read(OPEN_PAREN);
+ int start = tokenIndex;
+ DataChangeStatement statement;
+ ResultOption resultOption = ResultOption.FINAL;
+ switch (resultOptionName) {
+ case "OLD":
+ resultOption = ResultOption.OLD;
+ if (readIf("UPDATE")) {
+ statement = parseUpdate(start);
+ } else if (readIf("DELETE")) {
+ statement = parseDelete(start);
+ } else if (readIf("MERGE")) {
+ statement = (DataChangeStatement) parseMerge(start);
+ } else if (database.getMode().replaceInto && readIf("REPLACE")) {
+ statement = parseReplace(start);
+ } else {
+ throw getSyntaxError();
+ }
+ break;
+ case "NEW":
+ resultOption = ResultOption.NEW;
+ //$FALL-THROUGH$
+ case "FINAL":
+ if (readIf("INSERT")) {
+ statement = parseInsert(start);
+ } else if (readIf("UPDATE")) {
+ statement = parseUpdate(start);
+ } else if (readIf("MERGE")) {
+ statement = (DataChangeStatement) parseMerge(start);
+ } else if (database.getMode().replaceInto && readIf("REPLACE")) {
+ statement = parseReplace(start);
+ } else {
+ throw getSyntaxError();
+ }
+ break;
+ default:
+ setTokenIndex(backupIndex);
+ addExpected("OLD TABLE");
+ addExpected("NEW TABLE");
+ addExpected("FINAL TABLE");
+ throw getSyntaxError();
+ }
+ read(CLOSE_PAREN);
+ if (currentSelect != null) {
+ // Lobs aren't copied, so use it for more safety
+ currentSelect.setNeverLazy(true);
+ }
+ return new DataChangeDeltaTable(getSchemaWithDefault(), session, statement, resultOption);
+ }
+
+ private TableFunction readTableFunction(String name, Schema schema) {
+ if (schema == null) {
+ switch (upperName(name)) {
+ case "UNNEST":
+ return readUnnestFunction();
+ case "TABLE_DISTINCT":
+ return readTableFunction(ArrayTableFunction.TABLE_DISTINCT);
+ case "CSVREAD":
+ recompileAlways = true;
+ return readParameters(new CSVReadFunction());
+ case "LINK_SCHEMA":
+ recompileAlways = true;
+ return readParameters(new LinkSchemaFunction());
}
}
+ FunctionAlias functionAlias = getFunctionAliasWithinPath(name, schema);
+ if (!functionAlias.isDeterministic()) {
+ recompileAlways = true;
+ }
+ ArrayList argList = Utils.newSmallArrayList();
+ if (!readIf(CLOSE_PAREN)) {
+ do {
+ argList.add(readExpression());
+ } while (readIfMore());
+ }
+ return new JavaTableFunction(functionAlias, argList.toArray(new Expression[0]));
+ }
+
+ private boolean readIfUseIndex() {
+ int start = tokenIndex;
+ if (!readIf("USE")) {
+ return false;
+ }
+ if (!readIf("INDEX")) {
+ setTokenIndex(start);
+ return false;
+ }
+ return true;
+ }
+
+ private IndexHints parseIndexHints(Table table) {
+ read(OPEN_PAREN);
+ LinkedHashSet indexNames = new LinkedHashSet<>();
+ if (!readIf(CLOSE_PAREN)) {
+ do {
+ String indexName = readIdentifierWithSchema();
+ Index index = table.getIndex(indexName);
+ indexNames.add(index.getName());
+ } while (readIfMore());
+ }
+ return IndexHints.createUseIndexHints(indexNames);
+ }
+
+ private String readFromAlias(String alias) {
+ if (readIf(AS) || isIdentifier()) {
+ alias = readIdentifier();
+ }
return alias;
}
+ private ArrayList readDerivedColumnNames() {
+ if (readIf(OPEN_PAREN)) {
+ ArrayList derivedColumnNames = new ArrayList<>();
+ do {
+ derivedColumnNames.add(readIdentifier());
+ } while (readIfMore());
+ return derivedColumnNames;
+ }
+ return null;
+ }
+
+ private void discardWithTableHints() {
+ if (readIf(WITH)) {
+ read(OPEN_PAREN);
+ do {
+ discardTableHint();
+ } while (readIfMore());
+ }
+ }
+
+ private void discardTableHint() {
+ if (readIf("INDEX")) {
+ if (readIf(OPEN_PAREN)) {
+ do {
+ readExpression();
+ } while (readIfMore());
+ } else {
+ read(EQUAL);
+ readExpression();
+ }
+ } else {
+ readExpression();
+ }
+ }
+
private Prepared parseTruncate() {
- read("TABLE");
+ read(TABLE);
Table table = readTableOrView();
+ boolean restart = database.getMode().truncateTableRestartIdentity;
+ if (readIf("CONTINUE")) {
+ read("IDENTITY");
+ restart = false;
+ } else if (readIf("RESTART")) {
+ read("IDENTITY");
+ restart = true;
+ }
TruncateTable command = new TruncateTable(session);
command.setTable(table);
+ command.setRestart(restart);
return command;
}
private boolean readIfExists(boolean ifExists) {
- if (readIf("IF")) {
- read("EXISTS");
+ if (readIf(IF)) {
+ read(EXISTS);
ifExists = true;
}
return ifExists;
@@ -1271,16 +2118,16 @@ private boolean readIfExists(boolean ifExists) {
private Prepared parseComment() {
int type = 0;
- read("ON");
+ read(ON);
boolean column = false;
- if (readIf("TABLE") || readIf("VIEW")) {
+ if (readIf(TABLE) || readIf("VIEW")) {
type = DbObject.TABLE_OR_VIEW;
} else if (readIf("COLUMN")) {
column = true;
type = DbObject.TABLE_OR_VIEW;
} else if (readIf("CONSTANT")) {
type = DbObject.CONSTANT;
- } else if (readIf("CONSTRAINT")) {
+ } else if (readIf(CONSTRAINT)) {
type = DbObject.CONSTRAINT;
} else if (readIf("ALIAS")) {
type = DbObject.FUNCTION_ALIAS;
@@ -1294,10 +2141,10 @@ private Prepared parseComment() {
type = DbObject.SEQUENCE;
} else if (readIf("TRIGGER")) {
type = DbObject.TRIGGER;
- } else if (readIf("USER")) {
+ } else if (readIf(USER)) {
type = DbObject.USER;
} else if (readIf("DOMAIN")) {
- type = DbObject.USER_DATATYPE;
+ type = DbObject.DOMAIN;
} else {
throw getSyntaxError();
}
@@ -1305,63 +2152,58 @@ private Prepared parseComment() {
String objectName;
if (column) {
// can't use readIdentifierWithSchema() because
- // it would not read schema.table.column correctly
- // if the db name is equal to the schema name
- ArrayList list = New.arrayList();
- do {
- list.add(readUniqueIdentifier());
- } while (readIf("."));
- schemaName = session.getCurrentSchemaName();
- if (list.size() == 4) {
- if (!equalsToken(database.getShortName(), list.get(0))) {
- throw DbException.getSyntaxError(sqlCommand, parseIndex,
- "database name");
- }
- list.remove(0);
- }
- if (list.size() == 3) {
- schemaName = list.get(0);
- list.remove(0);
- }
- if (list.size() != 2) {
- throw DbException.getSyntaxError(sqlCommand, parseIndex,
- "table.column");
- }
- objectName = list.get(0);
+ // it would not read [catalog.]schema.table.column correctly
+ objectName = readIdentifier();
+ String tmpSchemaName = null;
+ read(DOT);
+ boolean allowEmpty = database.getMode().allowEmptySchemaValuesAsDefaultSchema;
+ String columnName = allowEmpty && currentTokenType == DOT ? null : readIdentifier();
+ if (readIf(DOT)) {
+ tmpSchemaName = objectName;
+ objectName = columnName;
+ columnName = allowEmpty && currentTokenType == DOT ? null : readIdentifier();
+ if (readIf(DOT)) {
+ checkDatabaseName(tmpSchemaName);
+ tmpSchemaName = objectName;
+ objectName = columnName;
+ columnName = readIdentifier();
+ }
+ }
+ if (columnName == null || objectName == null) {
+ throw DbException.getSyntaxError(sqlCommand, token.start(), "table.column");
+ }
+ schemaName = tmpSchemaName != null ? tmpSchemaName : session.getCurrentSchemaName();
command.setColumn(true);
- command.setColumnName(list.get(1));
+ command.setColumnName(columnName);
} else {
objectName = readIdentifierWithSchema();
}
command.setSchemaName(schemaName);
command.setObjectName(objectName);
command.setObjectType(type);
- read("IS");
+ read(IS);
command.setCommentExpression(readExpression());
return command;
}
private Prepared parseDrop() {
- if (readIf("TABLE")) {
+ if (readIf(TABLE)) {
boolean ifExists = readIfExists(false);
- String tableName = readIdentifierWithSchema();
- DropTable command = new DropTable(session, getSchema());
- command.setTableName(tableName);
- while (readIf(",")) {
- tableName = readIdentifierWithSchema();
- DropTable next = new DropTable(session, getSchema());
- next.setTableName(tableName);
- command.addNextDropTable(next);
- }
+ DropTable command = new DropTable(session);
+ do {
+ String tableName = readIdentifierWithSchema();
+ command.addTable(getSchema(), tableName);
+ } while (readIf(COMMA));
ifExists = readIfExists(ifExists);
command.setIfExists(ifExists);
if (readIf("CASCADE")) {
- command.setDropAction(ConstraintReferential.CASCADE);
+ command.setDropAction(ConstraintActionType.CASCADE);
readIf("CONSTRAINTS");
} else if (readIf("RESTRICT")) {
- command.setDropAction(ConstraintReferential.RESTRICT);
+ command.setDropAction(ConstraintActionType.RESTRICT);
} else if (readIf("IGNORE")) {
- command.setDropAction(ConstraintReferential.SET_DEFAULT);
+ // TODO SET_DEFAULT works in the same way as CASCADE
+ command.setDropAction(ConstraintActionType.SET_DEFAULT);
}
return command;
} else if (readIf("INDEX")) {
@@ -1371,11 +2213,15 @@ private Prepared parseDrop() {
command.setIndexName(indexName);
ifExists = readIfExists(ifExists);
command.setIfExists(ifExists);
+ //Support for MySQL: DROP INDEX index_name ON tbl_name
+ if (readIf(ON)) {
+ readIdentifierWithSchema();
+ }
return command;
- } else if (readIf("USER")) {
+ } else if (readIf(USER)) {
boolean ifExists = readIfExists(false);
DropUser command = new DropUser(session);
- command.setUserName(readUniqueIdentifier());
+ command.setUserName(readIdentifier());
ifExists = readIfExists(ifExists);
readIf("CASCADE");
command.setIfExists(ifExists);
@@ -1411,7 +2257,7 @@ private Prepared parseDrop() {
command.setViewName(viewName);
ifExists = readIfExists(ifExists);
command.setIfExists(ifExists);
- Integer dropAction = parseCascadeOrRestrict();
+ ConstraintActionType dropAction = parseCascadeOrRestrict();
if (dropAction != null) {
command.setDropAction(dropAction);
}
@@ -1419,7 +2265,7 @@ private Prepared parseDrop() {
} else if (readIf("ROLE")) {
boolean ifExists = readIfExists(false);
DropRole command = new DropRole(session);
- command.setRoleName(readUniqueIdentifier());
+ command.setRoleName(readIdentifier());
ifExists = readIfExists(ifExists);
command.setIfExists(ifExists);
return command;
@@ -1435,11 +2281,15 @@ private Prepared parseDrop() {
} else if (readIf("SCHEMA")) {
boolean ifExists = readIfExists(false);
DropSchema command = new DropSchema(session);
- command.setSchemaName(readUniqueIdentifier());
+ command.setSchemaName(readIdentifier());
ifExists = readIfExists(ifExists);
command.setIfExists(ifExists);
+ ConstraintActionType dropAction = parseCascadeOrRestrict();
+ if (dropAction != null) {
+ command.setDropAction(dropAction);
+ }
return command;
- } else if (readIf("ALL")) {
+ } else if (readIf(ALL)) {
read("OBJECTS");
DropDatabase command = new DropDatabase(session);
command.setDropAllObjects(true);
@@ -1448,200 +2298,240 @@ private Prepared parseDrop() {
command.setDeleteFiles(true);
}
return command;
- } else if (readIf("DOMAIN")) {
- return parseDropUserDataType();
- } else if (readIf("TYPE")) {
- return parseDropUserDataType();
- } else if (readIf("DATATYPE")) {
- return parseDropUserDataType();
+ } else if (readIf("DOMAIN") || readIf("TYPE") || readIf("DATATYPE")) {
+ return parseDropDomain();
} else if (readIf("AGGREGATE")) {
return parseDropAggregate();
+ } else if (readIf("SYNONYM")) {
+ boolean ifExists = readIfExists(false);
+ String synonymName = readIdentifierWithSchema();
+ DropSynonym command = new DropSynonym(session, getSchema());
+ command.setSynonymName(synonymName);
+ ifExists = readIfExists(ifExists);
+ command.setIfExists(ifExists);
+ return command;
}
throw getSyntaxError();
}
- private DropUserDataType parseDropUserDataType() {
+ private DropDomain parseDropDomain() {
boolean ifExists = readIfExists(false);
- DropUserDataType command = new DropUserDataType(session);
- command.setTypeName(readUniqueIdentifier());
+ String domainName = readIdentifierWithSchema();
+ DropDomain command = new DropDomain(session, getSchema());
+ command.setDomainName(domainName);
ifExists = readIfExists(ifExists);
- command.setIfExists(ifExists);
+ command.setIfDomainExists(ifExists);
+ ConstraintActionType dropAction = parseCascadeOrRestrict();
+ if (dropAction != null) {
+ command.setDropAction(dropAction);
+ }
return command;
}
private DropAggregate parseDropAggregate() {
boolean ifExists = readIfExists(false);
- DropAggregate command = new DropAggregate(session);
- command.setName(readUniqueIdentifier());
+ String name = readIdentifierWithSchema();
+ DropAggregate command = new DropAggregate(session, getSchema());
+ command.setName(name);
ifExists = readIfExists(ifExists);
command.setIfExists(ifExists);
return command;
}
- private TableFilter readJoin(TableFilter top, Select command,
- boolean nested, boolean fromOuter) {
- boolean joined = false;
- TableFilter last = top;
- boolean nestedJoins = database.getSettings().nestedJoins;
- while (true) {
- if (readIf("RIGHT")) {
+ private TableFilter readTableReference() {
+ for (TableFilter top, last = top = readTablePrimary(), join;; last = join) {
+ switch (currentTokenType) {
+ case RIGHT: {
+ read();
readIf("OUTER");
- read("JOIN");
- joined = true;
+ read(JOIN);
// the right hand side is the 'inner' table usually
- TableFilter newTop = readTableFilter(fromOuter);
- newTop = readJoin(newTop, command, nested, true);
- Expression on = null;
- if (readIf("ON")) {
- on = readExpression();
- }
- if (nestedJoins) {
- top = getNested(top);
- newTop.addJoin(top, true, false, on);
- } else {
- newTop.addJoin(top, true, false, on);
- }
- top = newTop;
- last = newTop;
- } else if (readIf("LEFT")) {
+ join = readTableReference();
+ Expression on = readJoinSpecification(top, join, true);
+ addJoin(join, top, true, on);
+ top = join;
+ break;
+ }
+ case LEFT: {
+ read();
readIf("OUTER");
- read("JOIN");
- joined = true;
- TableFilter join = readTableFilter(true);
- if (nestedJoins) {
- join = readJoin(join, command, true, true);
- } else {
- top = readJoin(top, command, false, true);
- }
- Expression on = null;
- if (readIf("ON")) {
- on = readExpression();
- }
- top.addJoin(join, true, false, on);
- last = join;
- } else if (readIf("FULL")) {
+ read(JOIN);
+ join = readTableReference();
+ Expression on = readJoinSpecification(top, join, false);
+ addJoin(top, join, true, on);
+ break;
+ }
+ case FULL:
+ read();
throw getSyntaxError();
- } else if (readIf("INNER")) {
- read("JOIN");
- joined = true;
- TableFilter join = readTableFilter(fromOuter);
- top = readJoin(top, command, false, false);
- Expression on = null;
- if (readIf("ON")) {
- on = readExpression();
- }
- if (nestedJoins) {
- top.addJoin(join, false, false, on);
- } else {
- top.addJoin(join, fromOuter, false, on);
- }
- last = join;
- } else if (readIf("JOIN")) {
- joined = true;
- TableFilter join = readTableFilter(fromOuter);
- top = readJoin(top, command, false, false);
- Expression on = null;
- if (readIf("ON")) {
- on = readExpression();
- }
- if (nestedJoins) {
- top.addJoin(join, false, false, on);
- } else {
- top.addJoin(join, fromOuter, false, on);
- }
- last = join;
- } else if (readIf("CROSS")) {
- read("JOIN");
- joined = true;
- TableFilter join = readTableFilter(fromOuter);
- if (nestedJoins) {
- top.addJoin(join, false, false, null);
- } else {
- top.addJoin(join, fromOuter, false, null);
- }
- last = join;
- } else if (readIf("NATURAL")) {
- read("JOIN");
- joined = true;
- TableFilter join = readTableFilter(fromOuter);
- Column[] tableCols = last.getTable().getColumns();
- Column[] joinCols = join.getTable().getColumns();
- String tableSchema = last.getTable().getSchema().getName();
- String joinSchema = join.getTable().getSchema().getName();
+ case INNER: {
+ read();
+ read(JOIN);
+ join = readTableReference();
+ Expression on = readJoinSpecification(top, join, false);
+ addJoin(top, join, false, on);
+ break;
+ }
+ case JOIN: {
+ read();
+ join = readTableReference();
+ Expression on = readJoinSpecification(top, join, false);
+ addJoin(top, join, false, on);
+ break;
+ }
+ case CROSS: {
+ read();
+ read(JOIN);
+ join = readTablePrimary();
+ addJoin(top, join, false, null);
+ break;
+ }
+ case NATURAL: {
+ read();
+ read(JOIN);
+ join = readTablePrimary();
Expression on = null;
- for (Column tc : tableCols) {
- String tableColumnName = tc.getName();
- for (Column c : joinCols) {
- String joinColumnName = c.getName();
- if (equalsToken(tableColumnName, joinColumnName)) {
- join.addNaturalJoinColumn(c);
- Expression tableExpr = new ExpressionColumn(
- database, tableSchema,
- last.getTableAlias(), tableColumnName);
- Expression joinExpr = new ExpressionColumn(
- database, joinSchema, join.getTableAlias(),
- joinColumnName);
- Expression equal = new Comparison(session,
- Comparison.EQUAL, tableExpr, joinExpr);
- if (on == null) {
- on = equal;
- } else {
- on = new ConditionAndOr(ConditionAndOr.AND, on,
- equal);
- }
- }
+ for (Column column1 : last.getTable().getColumns()) {
+ Column column2 = join.getColumn(last.getColumnName(column1), true);
+ if (column2 != null) {
+ on = addJoinColumn(on, last, join, column1, column2, false);
}
}
- if (nestedJoins) {
- top.addJoin(join, false, nested, on);
- } else {
- top.addJoin(join, fromOuter, false, on);
- }
- last = join;
- } else {
+ addJoin(top, join, false, on);
break;
}
+ default:
+ if (expectedList != null) {
+ // FULL is intentionally excluded
+ addMultipleExpected(RIGHT, LEFT, INNER, JOIN, CROSS, NATURAL);
+ }
+ return top;
+ }
}
- if (nested && joined) {
- top = getNested(top);
- }
- return top;
}
- private TableFilter getNested(TableFilter n) {
- String joinTable = Constants.PREFIX_JOIN + parseIndex;
- TableFilter top = new TableFilter(session, getDualTable(true),
- joinTable, rightsChecked, currentSelect);
- top.addJoin(n, false, true, null);
- return top;
+ private Expression readJoinSpecification(TableFilter filter1, TableFilter filter2, boolean rightJoin) {
+ Expression on = null;
+ if (readIf(ON)) {
+ on = readExpression();
+ } else if (readIf(USING)) {
+ read(OPEN_PAREN);
+ do {
+ String columnName = readIdentifier();
+ on = addJoinColumn(on, filter1, filter2, filter1.getColumn(columnName, false),
+ filter2.getColumn(columnName, false), rightJoin);
+ } while (readIfMore());
+ }
+ return on;
}
- private Prepared parseExecute() {
- ExecuteProcedure command = new ExecuteProcedure(session);
- String procedureName = readAliasIdentifier();
- Procedure p = session.getProcedure(procedureName);
- if (p == null) {
- throw DbException.get(ErrorCode.FUNCTION_ALIAS_NOT_FOUND_1,
- procedureName);
+ private Expression addJoinColumn(Expression on, TableFilter filter1, TableFilter filter2, Column column1,
+ Column column2, boolean rightJoin) {
+ if (rightJoin) {
+ filter1.addCommonJoinColumns(column1, column2, filter2);
+ filter2.addCommonJoinColumnToExclude(column2);
+ } else {
+ filter1.addCommonJoinColumns(column1, column1, filter1);
+ filter2.addCommonJoinColumnToExclude(column2);
+ }
+ Expression tableExpr = new ExpressionColumn(database, filter1.getSchemaName(), filter1.getTableAlias(),
+ filter1.getColumnName(column1));
+ Expression joinExpr = new ExpressionColumn(database, filter2.getSchemaName(), filter2.getTableAlias(),
+ filter2.getColumnName(column2));
+ Expression equal = new Comparison(Comparison.EQUAL, tableExpr, joinExpr, false);
+ if (on == null) {
+ on = equal;
+ } else {
+ on = new ConditionAndOr(ConditionAndOr.AND, on, equal);
}
- command.setProcedure(p);
- if (readIf("(")) {
+ return on;
+ }
+
+ /**
+ * Add one join to another. This method creates nested join between them if
+ * required.
+ *
+ * @param top parent join
+ * @param join child join
+ * @param outer if child join is an outer join
+ * @param on the join condition
+ * @see TableFilter#addJoin(TableFilter, boolean, Expression)
+ */
+ private void addJoin(TableFilter top, TableFilter join, boolean outer, Expression on) {
+ if (join.getJoin() != null) {
+ String joinTable = Constants.PREFIX_JOIN + token.start();
+ TableFilter n = new TableFilter(session, new DualTable(database),
+ joinTable, rightsChecked, currentSelect, join.getOrderInFrom(),
+ null);
+ n.setNestedJoin(join);
+ join = n;
+ }
+ top.addJoin(join, outer, on);
+ }
+
+ private Prepared parseExecutePostgre() {
+ if (readIf("IMMEDIATE")) {
+ return new ExecuteImmediate(session, readExpression());
+ }
+ ExecuteProcedure command = new ExecuteProcedure(session);
+ String procedureName = readIdentifier();
+ Procedure p = session.getProcedure(procedureName);
+ if (p == null) {
+ throw DbException.get(ErrorCode.FUNCTION_ALIAS_NOT_FOUND_1,
+ procedureName);
+ }
+ command.setProcedure(p);
+ if (readIf(OPEN_PAREN)) {
for (int i = 0;; i++) {
command.setExpression(i, readExpression());
- if (readIf(")")) {
+ if (!readIfMore()) {
break;
}
- read(",");
}
}
return command;
}
+ private Prepared parseExecuteSQLServer() {
+ Call command = new Call(session);
+ currentPrepared = command;
+ String schemaName = null;
+ String name = readIdentifier();
+ if (readIf(DOT)) {
+ schemaName = name;
+ name = readIdentifier();
+ if (readIf(DOT)) {
+ checkDatabaseName(schemaName);
+ schemaName = name;
+ name = readIdentifier();
+ }
+ }
+ FunctionAlias functionAlias = getFunctionAliasWithinPath(name,
+ schemaName != null ? database.getSchema(schemaName) : null);
+ Expression[] args;
+ ArrayList argList = Utils.newSmallArrayList();
+ if (currentTokenType != SEMICOLON && currentTokenType != END_OF_INPUT) {
+ do {
+ argList.add(readExpression());
+ } while (readIf(COMMA));
+ }
+ args = argList.toArray(new Expression[0]);
+ command.setExpression(new JavaFunction(functionAlias, args));
+ return command;
+ }
+
+ private FunctionAlias getFunctionAliasWithinPath(String name, Schema schema) {
+ UserDefinedFunction userDefinedFunction = findUserDefinedFunctionWithinPath(schema, name);
+ if (userDefinedFunction instanceof FunctionAlias) {
+ return (FunctionAlias) userDefinedFunction;
+ }
+ throw DbException.get(ErrorCode.FUNCTION_NOT_FOUND_1, name);
+ }
+
private DeallocateProcedure parseDeallocate() {
readIf("PLAN");
- String procedureName = readAliasIdentifier();
DeallocateProcedure command = new DeallocateProcedure(session);
- command.setProcedureName(procedureName);
+ command.setProcedureName(readIdentifier());
return command;
}
@@ -1651,32 +2541,42 @@ private Explain parseExplain() {
command.setExecuteCommand(true);
} else {
if (readIf("PLAN")) {
- readIf("FOR");
+ readIf(FOR);
}
}
- if (isToken("SELECT") || isToken("FROM") || isToken("(")) {
- command.setCommand(parseSelect());
- } else if (readIf("DELETE")) {
- command.setCommand(parseDelete());
- } else if (readIf("UPDATE")) {
- command.setCommand(parseUpdate());
- } else if (readIf("INSERT")) {
- command.setCommand(parseInsert());
- } else if (readIf("MERGE")) {
- command.setCommand(parseMerge());
- } else if (readIf("WITH")) {
- command.setCommand(parseWith());
- } else {
- throw getSyntaxError();
+ switch (currentTokenType) {
+ case SELECT:
+ case TABLE:
+ case VALUES:
+ case WITH:
+ case OPEN_PAREN:
+ Query query = parseQuery();
+ query.setNeverLazy(true);
+ command.setCommand(query);
+ break;
+ default:
+ int start = tokenIndex;
+ if (readIf("DELETE")) {
+ command.setCommand(parseDelete(start));
+ } else if (readIf("UPDATE")) {
+ command.setCommand(parseUpdate(start));
+ } else if (readIf("INSERT")) {
+ command.setCommand(parseInsert(start));
+ } else if (readIf("MERGE")) {
+ command.setCommand(parseMerge(start));
+ } else {
+ throw getSyntaxError();
+ }
}
return command;
}
- private Query parseSelect() {
+ private Query parseQuery() {
int paramIndex = parameters.size();
- Query command = parseSelectUnion();
- ArrayList params = New.arrayList();
- for (int i = paramIndex, size = parameters.size(); i < size; i++) {
+ Query command = parseQueryExpression();
+ int size = parameters.size();
+ ArrayList params = new ArrayList<>(size);
+ for (int i = paramIndex; i < size; i++) {
params.add(parameters.get(i));
}
command.setParameterList(params);
@@ -1684,63 +2584,89 @@ private Query parseSelect() {
return command;
}
- private Query parseSelectUnion() {
- int start = lastParseIndex;
- Query command = parseSelectSub();
- return parseSelectUnionExtension(command, start, false);
+ private Prepared parseWithStatementOrQuery(int start) {
+ int paramIndex = parameters.size();
+ Prepared command = parseWith();
+ int size = parameters.size();
+ ArrayList params = new ArrayList<>(size);
+ for (int i = paramIndex; i < size; i++) {
+ params.add(parameters.get(i));
+ }
+ command.setParameterList(params);
+ if (command instanceof Query) {
+ Query query = (Query) command;
+ query.init();
+ }
+ setSQL(command, start);
+ return command;
+ }
+
+ private Query parseQueryExpression() {
+ Query query;
+ if (readIf(WITH)) {
+ try {
+ query = (Query) parseWith();
+ } catch (ClassCastException e) {
+ throw DbException.get(ErrorCode.SYNTAX_ERROR_1, "WITH statement supports only query in this context");
+ }
+ // recursive can not be lazy
+ query.setNeverLazy(true);
+ } else {
+ query = parseQueryExpressionBodyAndEndOfQuery();
+ }
+ return query;
}
- private Query parseSelectUnionExtension(Query command, int start,
- boolean unionOnly) {
- while (true) {
- if (readIf("UNION")) {
- SelectUnion union = new SelectUnion(session, command);
- if (readIf("ALL")) {
- union.setUnionType(SelectUnion.UNION_ALL);
+ private Query parseQueryExpressionBodyAndEndOfQuery() {
+ int start = tokenIndex;
+ Query command = parseQueryExpressionBody();
+ parseEndOfQuery(command);
+ setSQL(command, start);
+ return command;
+ }
+
+ private Query parseQueryExpressionBody() {
+ Query command = parseQueryTerm();
+ for (;;) {
+ SelectUnion.UnionType type;
+ if (readIf(UNION)) {
+ if (readIf(ALL)) {
+ type = SelectUnion.UnionType.UNION_ALL;
} else {
- readIf("DISTINCT");
- union.setUnionType(SelectUnion.UNION);
- }
- union.setRight(parseSelectSub());
- command = union;
- } else if (readIf("MINUS") || readIf("EXCEPT")) {
- SelectUnion union = new SelectUnion(session, command);
- union.setUnionType(SelectUnion.EXCEPT);
- union.setRight(parseSelectSub());
- command = union;
- } else if (readIf("INTERSECT")) {
- SelectUnion union = new SelectUnion(session, command);
- union.setUnionType(SelectUnion.INTERSECT);
- union.setRight(parseSelectSub());
- command = union;
+ readIf(DISTINCT);
+ type = SelectUnion.UnionType.UNION;
+ }
+ } else if (readIf(EXCEPT) || readIf(MINUS)) {
+ type = SelectUnion.UnionType.EXCEPT;
} else {
break;
}
+ command = new SelectUnion(session, type, command, parseQueryTerm());
}
- if (!unionOnly) {
- parseEndOfQuery(command);
+ return command;
+ }
+
+ private Query parseQueryTerm() {
+ Query command = parseQueryPrimary();
+ while (readIf(INTERSECT)) {
+ command = new SelectUnion(session, SelectUnion.UnionType.INTERSECT, command, parseQueryPrimary());
}
- setSQL(command, null, start);
return command;
}
private void parseEndOfQuery(Query command) {
- if (readIf("ORDER")) {
+ if (readIf(ORDER)) {
read("BY");
Select oldSelect = currentSelect;
if (command instanceof Select) {
currentSelect = (Select) command;
}
- ArrayList orderList = New.arrayList();
+ ArrayList orderList = Utils.newSmallArrayList();
do {
- boolean canBeNumber = true;
- if (readIf("=")) {
- canBeNumber = false;
- }
- SelectOrderBy order = new SelectOrderBy();
+ boolean canBeNumber = currentTokenType == LITERAL;
+ QueryOrderBy order = new QueryOrderBy();
Expression expr = readExpression();
- if (canBeNumber && expr instanceof ValueExpression &&
- expr.getType() == Value.INT) {
+ if (canBeNumber && expr instanceof ValueExpression && expr.getType().getValueType() == Value.INTEGER) {
order.columnIndexExpr = expr;
} else if (expr instanceof Parameter) {
recompileAlways = true;
@@ -1748,87 +2674,74 @@ private void parseEndOfQuery(Query command) {
} else {
order.expression = expr;
}
- if (readIf("DESC")) {
- order.descending = true;
- } else {
- readIf("ASC");
- }
- if (readIf("NULLS")) {
- if (readIf("FIRST")) {
- order.nullsFirst = true;
- } else {
- read("LAST");
- order.nullsLast = true;
- }
- }
+ order.sortType = parseSortType();
orderList.add(order);
- } while (readIf(","));
+ } while (readIf(COMMA));
command.setOrder(orderList);
currentSelect = oldSelect;
}
- if (database.getMode().supportOffsetFetch) {
+ if (command.getFetch() == null) {
// make sure aggregate functions will not work here
Select temp = currentSelect;
currentSelect = null;
-
- // http://sqlpro.developpez.com/SQL2008/
- if (readIf("OFFSET")) {
+ boolean hasOffsetOrFetch = false;
+ // Standard SQL OFFSET / FETCH
+ if (readIf(OFFSET)) {
+ hasOffsetOrFetch = true;
command.setOffset(readExpression().optimize(session));
- if (!readIf("ROW")) {
- read("ROWS");
+ if (!readIf(ROW)) {
+ readIf("ROWS");
}
}
- if (readIf("FETCH")) {
+ if (readIf(FETCH)) {
+ hasOffsetOrFetch = true;
if (!readIf("FIRST")) {
read("NEXT");
}
- if (readIf("ROW")) {
- command.setLimit(ValueExpression.get(ValueInt.get(1)));
+ if (readIf(ROW) || readIf("ROWS")) {
+ command.setFetch(ValueExpression.get(ValueInteger.get(1)));
} else {
- Expression limit = readExpression().optimize(session);
- command.setLimit(limit);
- if (!readIf("ROW")) {
+ command.setFetch(readExpression().optimize(session));
+ if (readIf("PERCENT")) {
+ command.setFetchPercent(true);
+ }
+ if (!readIf(ROW)) {
read("ROWS");
}
}
- read("ONLY");
+ if (readIf(WITH)) {
+ read("TIES");
+ command.setWithTies(true);
+ } else {
+ read("ONLY");
+ }
}
-
- currentSelect = temp;
- }
- if (readIf("LIMIT")) {
- Select temp = currentSelect;
- // make sure aggregate functions will not work here
- currentSelect = null;
- Expression limit = readExpression().optimize(session);
- command.setLimit(limit);
- if (readIf("OFFSET")) {
- Expression offset = readExpression().optimize(session);
- command.setOffset(offset);
- } else if (readIf(",")) {
- // MySQL: [offset, ] rowcount
- Expression offset = limit;
- limit = readExpression().optimize(session);
- command.setOffset(offset);
- command.setLimit(limit);
- }
- if (readIf("SAMPLE_SIZE")) {
- Expression sampleSize = readExpression().optimize(session);
- command.setSampleSize(sampleSize);
+ // MySQL-style LIMIT / OFFSET
+ if (!hasOffsetOrFetch && database.getMode().limit && readIf(LIMIT)) {
+ Expression limit = readExpression().optimize(session);
+ if (readIf(OFFSET)) {
+ command.setOffset(readExpression().optimize(session));
+ } else if (readIf(COMMA)) {
+ // MySQL: [offset, ] rowcount
+ Expression offset = limit;
+ limit = readExpression().optimize(session);
+ command.setOffset(offset);
+ }
+ command.setFetch(limit);
}
currentSelect = temp;
}
- if (readIf("FOR")) {
+ if (readIf(FOR)) {
if (readIf("UPDATE")) {
if (readIf("OF")) {
do {
readIdentifierWithSchema();
- } while (readIf(","));
+ } while (readIf(COMMA));
} else if (readIf("NOWAIT")) {
// TODO parser: select for update nowait: should not wait
}
command.setForUpdate(true);
- } else if (readIf("READ") || readIf("FETCH")) {
+ } else if (readIf("READ") || readIf(FETCH)) {
read("ONLY");
}
}
@@ -1841,11 +2754,11 @@ private void parseEndOfQuery(Query command) {
* DB2 isolation clause
*/
private void parseIsolationClause() {
- if (readIf("WITH")) {
+ if (readIf(WITH)) {
if (readIf("RR") || readIf("RS")) {
// concurrent-access-resolution clause
if (readIf("USE")) {
- read("AND");
+ read(AND);
read("KEEP");
if (readIf("SHARE") || readIf("UPDATE") ||
readIf("EXCLUSIVE")) {
@@ -1859,1230 +2772,2973 @@ private void parseIsolationClause() {
}
}
- private Query parseSelectSub() {
- if (readIf("(")) {
- Query command = parseSelectUnion();
- read(")");
+ private Query parseQueryPrimary() {
+ if (readIf(OPEN_PAREN)) {
+ Query command = parseQueryExpressionBodyAndEndOfQuery();
+ read(CLOSE_PAREN);
return command;
}
- Select select = parseSelectSimple();
- return select;
+ int start = tokenIndex;
+ if (readIf(SELECT)) {
+ return parseSelect(start);
+ } else if (readIf(TABLE)) {
+ return parseExplicitTable(start);
+ }
+ read(VALUES);
+ return parseValues();
}
- private void parseSelectSimpleFromPart(Select command) {
+ private void parseSelectFromPart(Select command) {
do {
- TableFilter filter = readTableFilter(false);
- parseJoinTableFilter(filter, command);
- } while (readIf(","));
- }
-
- private void parseJoinTableFilter(TableFilter top, final Select command) {
- top = readJoin(top, command, false, top.isJoinOuter());
- command.addTableFilter(top, true);
- boolean isOuter = false;
- while (true) {
- TableFilter n = top.getNestedJoin();
- if (n != null) {
- n.visit(new TableFilterVisitor() {
- @Override
- public void accept(TableFilter f) {
- command.addTableFilter(f, false);
+ TableFilter top = readTableReference();
+ command.addTableFilter(top, true);
+ boolean isOuter = false;
+ for (;;) {
+ TableFilter n = top.getNestedJoin();
+ if (n != null) {
+ n.visit(f -> command.addTableFilter(f, false));
+ }
+ TableFilter join = top.getJoin();
+ if (join == null) {
+ break;
+ }
+ isOuter = isOuter | join.isJoinOuter();
+ if (isOuter) {
+ command.addTableFilter(join, false);
+ } else {
+ // make flat so the optimizer can work better
+ Expression on = join.getJoinCondition();
+ if (on != null) {
+ command.addCondition(on);
}
- });
- }
- TableFilter join = top.getJoin();
- if (join == null) {
- break;
- }
- isOuter = isOuter | join.isJoinOuter();
- if (isOuter) {
- command.addTableFilter(join, false);
- } else {
- // make flat so the optimizer can work better
- Expression on = join.getJoinCondition();
- if (on != null) {
- command.addCondition(on);
+ join.removeJoinCondition();
+ top.removeJoin();
+ command.addTableFilter(join, true);
}
- join.removeJoinCondition();
- top.removeJoin();
- command.addTableFilter(join, true);
+ top = join;
}
- top = join;
- }
+ } while (readIf(COMMA));
}
- private void parseSelectSimpleSelectPart(Select command) {
- Select temp = currentSelect;
- // make sure aggregate functions will not work in TOP and LIMIT
- currentSelect = null;
- if (readIf("TOP")) {
+ private void parseSelectExpressions(Select command) {
+ if (database.getMode().topInSelect && readIf("TOP")) {
+ Select temp = currentSelect;
+ // make sure aggregate functions will not work in TOP and LIMIT
+ currentSelect = null;
// can't read more complex expressions here because
// SELECT TOP 1 +? A FROM TEST could mean
// SELECT TOP (1+?) A FROM TEST or
// SELECT TOP 1 (+?) AS A FROM TEST
- Expression limit = readTerm().optimize(session);
- command.setLimit(limit);
- } else if (readIf("LIMIT")) {
- Expression offset = readTerm().optimize(session);
- command.setOffset(offset);
- Expression limit = readTerm().optimize(session);
- command.setLimit(limit);
- }
- currentSelect = temp;
- if (readIf("DISTINCT")) {
- command.setDistinct(true);
+ command.setFetch(readTerm().optimize(session));
+ if (readIf("PERCENT")) {
+ command.setFetchPercent(true);
+ }
+ if (readIf(WITH)) {
+ read("TIES");
+ command.setWithTies(true);
+ }
+ currentSelect = temp;
+ }
+ if (readIf(DISTINCT)) {
+ if (readIf(ON)) {
+ read(OPEN_PAREN);
+ ArrayList distinctExpressions = Utils.newSmallArrayList();
+ do {
+ distinctExpressions.add(readExpression());
+ } while (readIfMore());
+ command.setDistinct(distinctExpressions.toArray(new Expression[0]));
+ } else {
+ command.setDistinct();
+ }
} else {
- readIf("ALL");
+ readIf(ALL);
}
- ArrayList expressions = New.arrayList();
+ ArrayList expressions = Utils.newSmallArrayList();
do {
- if (readIf("*")) {
- expressions.add(new Wildcard(null, null));
+ if (readIf(ASTERISK)) {
+ expressions.add(parseWildcard(null, null));
} else {
- Expression expr = readExpression();
- if (readIf("AS") || currentTokenType == IDENTIFIER) {
- String alias = readAliasIdentifier();
- boolean aliasColumnName = database.getSettings().aliasColumnName;
- aliasColumnName |= database.getMode().aliasColumnName;
- expr = new Alias(expr, alias, aliasColumnName);
+ switch (currentTokenType) {
+ case FROM:
+ case WHERE:
+ case GROUP:
+ case HAVING:
+ case WINDOW:
+ case QUALIFY:
+ case ORDER:
+ case OFFSET:
+ case FETCH:
+ case CLOSE_PAREN:
+ case SEMICOLON:
+ case END_OF_INPUT:
+ break;
+ default:
+ Expression expr = readExpression();
+ if (readIf(AS) || isIdentifier()) {
+ expr = new Alias(expr, readIdentifier(), database.getMode().aliasColumnName);
+ }
+ expressions.add(expr);
}
- expressions.add(expr);
}
- } while (readIf(","));
+ } while (readIf(COMMA));
command.setExpressions(expressions);
}
- private Select parseSelectSimple() {
- boolean fromFirst;
- if (readIf("SELECT")) {
- fromFirst = false;
- } else if (readIf("FROM")) {
- fromFirst = true;
- } else {
- throw getSyntaxError();
- }
- Select command = new Select(session);
- int start = lastParseIndex;
+ private Select parseSelect(int start) {
+ Select command = new Select(session, currentSelect);
Select oldSelect = currentSelect;
+ Prepared oldPrepared = currentPrepared;
currentSelect = command;
currentPrepared = command;
- if (fromFirst) {
- parseSelectSimpleFromPart(command);
- read("SELECT");
- parseSelectSimpleSelectPart(command);
+ parseSelectExpressions(command);
+ if (!readIf(FROM)) {
+ // select without FROM
+ TableFilter filter = new TableFilter(session, new DualTable(database), null, rightsChecked,
+ currentSelect, 0, null);
+ command.addTableFilter(filter, true);
} else {
- parseSelectSimpleSelectPart(command);
- if (!readIf("FROM")) {
- // select without FROM: convert to SELECT ... FROM
- // SYSTEM_RANGE(1,1)
- Table dual = getDualTable(false);
- TableFilter filter = new TableFilter(session, dual, null,
- rightsChecked, currentSelect);
- command.addTableFilter(filter, true);
- } else {
- parseSelectSimpleFromPart(command);
- }
+ parseSelectFromPart(command);
}
- if (readIf("WHERE")) {
- Expression condition = readExpression();
- command.addCondition(condition);
+ if (readIf(WHERE)) {
+ command.addCondition(readExpressionWithGlobalConditions());
}
// the group by is read for the outer select (or not a select)
// so that columns that are not grouped can be used
currentSelect = oldSelect;
- if (readIf("GROUP")) {
+ if (readIf(GROUP)) {
read("BY");
command.setGroupQuery();
- ArrayList list = New.arrayList();
+ ArrayList list = Utils.newSmallArrayList();
do {
- Expression expr = readExpression();
- list.add(expr);
- } while (readIf(","));
- command.setGroupBy(list);
+ if (isToken(OPEN_PAREN) && isOrdinaryGroupingSet()) {
+ if (!readIf(CLOSE_PAREN)) {
+ do {
+ list.add(readExpression());
+ } while (readIfMore());
+ }
+ } else {
+ Expression expr = readExpression();
+ if (database.getMode().groupByColumnIndex && expr instanceof ValueExpression &&
+ expr.getType().getValueType() == Value.INTEGER) {
+ ArrayList expressions = command.getExpressions();
+ for (Expression e : expressions) {
+ if (e instanceof Wildcard) {
+ throw getSyntaxError();
+ }
+ }
+ int idx = expr.getValue(session).getInt();
+ if (idx < 1 || idx > expressions.size()) {
+ throw DbException.get(ErrorCode.GROUP_BY_NOT_IN_THE_RESULT, Integer.toString(idx),
+ Integer.toString(expressions.size()));
+ }
+ list.add(expressions.get(idx-1));
+ } else {
+ list.add(expr);
+ }
+ }
+ } while (readIf(COMMA));
+ if (!list.isEmpty()) {
+ command.setGroupBy(list);
+ }
}
currentSelect = command;
- if (readIf("HAVING")) {
+ if (readIf(HAVING)) {
command.setGroupQuery();
- Expression condition = readExpression();
- command.setHaving(condition);
+ command.setHaving(readExpressionWithGlobalConditions());
+ }
+ if (readIf(WINDOW)) {
+ do {
+ int sqlIndex = token.start();
+ String name = readIdentifier();
+ read(AS);
+ Window w = readWindowSpecification();
+ if (!currentSelect.addWindow(name, w)) {
+ throw DbException.getSyntaxError(sqlCommand, sqlIndex, "unique identifier");
+ }
+ } while (readIf(COMMA));
+ }
+ if (readIf(QUALIFY)) {
+ command.setWindowQuery();
+ command.setQualify(readExpressionWithGlobalConditions());
}
command.setParameterList(parameters);
currentSelect = oldSelect;
- setSQL(command, "SELECT", start);
+ currentPrepared = oldPrepared;
+ setSQL(command, start);
return command;
}
- private Table getDualTable(boolean noColumns) {
- Schema main = database.findSchema(Constants.SCHEMA_MAIN);
- Expression one = ValueExpression.get(ValueLong.get(1));
- return new RangeTable(main, one, one, noColumns);
+ /**
+ * Checks whether current opening parenthesis can be a start of ordinary
+ * grouping set. This method reads this parenthesis if it is.
+ *
+ * @return whether current opening parenthesis can be a start of ordinary
+ * grouping set
+ */
+ private boolean isOrdinaryGroupingSet() {
+ int offset = scanToCloseParen(tokenIndex + 1);
+ if (offset < 0) {
+ // Try to parse as expression to get better syntax error
+ return false;
+ }
+ switch (tokens.get(offset).tokenType()) {
+ // End of query
+ case CLOSE_PAREN:
+ case SEMICOLON:
+ case END_OF_INPUT:
+ // Next grouping element
+ case COMMA:
+ // Next select clause
+ case HAVING:
+ case WINDOW:
+ case QUALIFY:
+ // Next query expression body clause
+ case UNION:
+ case EXCEPT:
+ case MINUS:
+ case INTERSECT:
+ // Next query expression clause
+ case ORDER:
+ case OFFSET:
+ case FETCH:
+ case LIMIT:
+ case FOR:
+ setTokenIndex(tokenIndex + 1);
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ private Query parseExplicitTable(int start) {
+ Table table = readTableOrView();
+ Select command = new Select(session, currentSelect);
+ TableFilter filter = new TableFilter(session, table, null, rightsChecked,
+ command, orderInFrom++, null);
+ command.addTableFilter(filter, true);
+ command.setExplicitTable();
+ setSQL(command, start);
+ return command;
}
- private void setSQL(Prepared command, String start, int startIndex) {
- String sql = originalSQL.substring(startIndex, lastParseIndex).trim();
- if (start != null) {
- sql = start + " " + sql;
+ private void setSQL(Prepared command, int start) {
+ String s = sqlCommand;
+ int beginIndex = tokens.get(start).start();
+ int endIndex = token.start();
+ while (beginIndex < endIndex && s.charAt(beginIndex) <= ' ') {
+ beginIndex++;
+ }
+ while (beginIndex < endIndex && s.charAt(endIndex - 1) <= ' ') {
+ endIndex--;
}
- command.setSQL(sql);
+ s = s.substring(beginIndex, endIndex);
+ ArrayList commandTokens;
+ if (start == 0 && currentTokenType == END_OF_INPUT) {
+ commandTokens = tokens;
+ if (beginIndex != 0) {
+ for (int i = 0, l = commandTokens.size() - 1; i < l; i++) {
+ commandTokens.get(i).subtractFromStart(beginIndex);
+ }
+ }
+ token.setStart(s.length());
+ sqlCommand = s;
+ } else {
+ List subList = tokens.subList(start, tokenIndex);
+ commandTokens = new ArrayList<>(subList.size() + 1);
+ for (int i = start; i < tokenIndex; i++) {
+ Token t = tokens.get(i).clone();
+ t.subtractFromStart(beginIndex);
+ commandTokens.add(t);
+ }
+ commandTokens.add(new Token.EndOfInputToken(s.length()));
+ }
+ command.setSQL(s, commandTokens);
}
- private Expression readExpression() {
- Expression r = readAnd();
- while (readIf("OR")) {
- r = new ConditionAndOr(ConditionAndOr.OR, r, readAnd());
+ private Expression readExpressionOrDefault() {
+ if (readIf(DEFAULT)) {
+ return ValueExpression.DEFAULT;
}
- return r;
+ return readExpression();
}
- private Expression readAnd() {
+ private Expression readExpressionWithGlobalConditions() {
Expression r = readCondition();
- while (readIf("AND")) {
- r = new ConditionAndOr(ConditionAndOr.AND, r, readCondition());
+ if (readIf(AND)) {
+ r = readAnd(new ConditionAndOr(ConditionAndOr.AND, r, readCondition()));
+ } else if (readIf("_LOCAL_AND_GLOBAL_")) {
+ r = readAnd(new ConditionLocalAndGlobal(r, readCondition()));
}
- return r;
+ return readExpressionPart2(r);
+ }
+
+ private Expression readExpression() {
+ return readExpressionPart2(readAnd(readCondition()));
+ }
+
+ private Expression readExpressionPart2(Expression r1) {
+ if (!readIf(OR)) {
+ return r1;
+ }
+ Expression r2 = readAnd(readCondition());
+ if (!readIf(OR)) {
+ return new ConditionAndOr(ConditionAndOr.OR, r1, r2);
+ }
+ // Above logic to avoid allocating an ArrayList for the common case.
+ // We combine into ConditionAndOrN here rather than letting the optimisation
+ // pass do it, to avoid StackOverflowError during stuff like mapColumns.
+ final ArrayList expressions = new ArrayList<>();
+ expressions.add(r1);
+ expressions.add(r2);
+ do {
+ expressions.add(readAnd(readCondition()));
+ }
+ while (readIf(OR));
+ return new ConditionAndOrN(ConditionAndOr.OR, expressions);
+ }
+
+ private Expression readAnd(Expression r) {
+ if (!readIf(AND)) {
+ return r;
+ }
+ Expression expr2 = readCondition();
+ if (!readIf(AND)) {
+ return new ConditionAndOr(ConditionAndOr.AND, r, expr2);
+ }
+ // Above logic to avoid allocating an ArrayList for the common case.
+ // We combine into ConditionAndOrN here rather than letting the optimisation
+ // pass do it, to avoid StackOverflowError during stuff like mapColumns.
+ final ArrayList expressions = new ArrayList<>();
+ expressions.add(r);
+ expressions.add(expr2);
+ do {
+ expressions.add(readCondition());
+ }
+ while (readIf(AND));
+ return new ConditionAndOrN(ConditionAndOr.AND, expressions);
}
private Expression readCondition() {
- if (readIf("NOT")) {
+ switch (currentTokenType) {
+ case NOT:
+ read();
return new ConditionNot(readCondition());
- }
- if (readIf("EXISTS")) {
- read("(");
- Query query = parseSelect();
+ case EXISTS: {
+ read();
+ read(OPEN_PAREN);
+ Query query = parseQuery();
// can not reduce expression because it might be a union except
// query with distinct
- read(")");
- return new ConditionExists(query);
- }
- if (readIf("INTERSECTS")) {
- read("(");
- Expression r1 = readConcat();
- read(",");
- Expression r2 = readConcat();
- read(")");
- return new Comparison(session, Comparison.SPATIAL_INTERSECTS, r1,
- r2);
- }
- Expression r = readConcat();
- while (true) {
+ read(CLOSE_PAREN);
+ return new ExistsPredicate(query);
+ }
+ case UNIQUE: {
+ read();
+ read(OPEN_PAREN);
+ Query query = parseQuery();
+ read(CLOSE_PAREN);
+ return new UniquePredicate(query);
+ }
+ default:
+ int index = tokenIndex;
+ if (readIf("INTERSECTS")) {
+ if (readIf(OPEN_PAREN)) {
+ Expression r1 = readConcat();
+ read(COMMA);
+ Expression r2 = readConcat();
+ read(CLOSE_PAREN);
+ return new Comparison(Comparison.SPATIAL_INTERSECTS, r1, r2, false);
+ } else {
+ setTokenIndex(index);
+ }
+ }
+ if (expectedList != null) {
+ addMultipleExpected(NOT, EXISTS, UNIQUE);
+ addExpected("INTERSECTS");
+ }
+ }
+ Expression l, c = readConcat();
+ do {
+ l = c;
// special case: NOT NULL is not part of an expression (as in CREATE
// TABLE TEST(ID INT DEFAULT 0 NOT NULL))
- int backup = parseIndex;
- boolean not = false;
- if (readIf("NOT")) {
- not = true;
- if (isToken("NULL")) {
- // this really only works for NOT NULL!
- parseIndex = backup;
- currentToken = "NOT";
- break;
- }
+ int backup = tokenIndex;
+ boolean not = readIf(NOT);
+ if (not && isToken(NULL)) {
+ // this really only works for NOT NULL!
+ setTokenIndex(backup);
+ break;
}
- if (readIf("LIKE")) {
- Expression b = readConcat();
- Expression esc = null;
- if (readIf("ESCAPE")) {
- esc = readConcat();
+ c = readConditionRightHandSide(l, not, false);
+ } while (c != null);
+ return l;
+ }
+
+ private Expression readConditionRightHandSide(Expression r, boolean not, boolean whenOperand) {
+ if (!not && readIf(IS)) {
+ r = readConditionIs(r, whenOperand);
+ } else {
+ switch (currentTokenType) {
+ case BETWEEN: {
+ read();
+ boolean symmetric = readIf(SYMMETRIC);
+ if (!symmetric) {
+ readIf(ASYMMETRIC);
}
- recompileAlways = true;
- r = new CompareLike(database, r, b, esc, false);
- } else if (readIf("REGEXP")) {
- Expression b = readConcat();
- r = new CompareLike(database, r, b, null, true);
- } else if (readIf("IS")) {
- if (readIf("NOT")) {
- if (readIf("NULL")) {
- r = new Comparison(session, Comparison.IS_NOT_NULL, r,
- null);
- } else if (readIf("DISTINCT")) {
- read("FROM");
- r = new Comparison(session, Comparison.EQUAL_NULL_SAFE,
- r, readConcat());
- } else {
- r = new Comparison(session,
- Comparison.NOT_EQUAL_NULL_SAFE, r, readConcat());
+ Expression a = readConcat();
+ read(AND);
+ r = new BetweenPredicate(r, not, whenOperand, symmetric, a, readConcat());
+ break;
+ }
+ case IN:
+ read();
+ r = readInPredicate(r, not, whenOperand);
+ break;
+ case LIKE: {
+ read();
+ r = readLikePredicate(r, LikeType.LIKE, not, whenOperand);
+ break;
+ }
+ default:
+ if (readIf("ILIKE")) {
+ r = readLikePredicate(r, LikeType.ILIKE, not, whenOperand);
+ } else if (readIf("REGEXP")) {
+ Expression b = readConcat();
+ recompileAlways = true;
+ r = new CompareLike(database, r, not, whenOperand, b, null, LikeType.REGEXP);
+ } else if (not) {
+ if (whenOperand) {
+ return null;
}
- } else if (readIf("NULL")) {
- r = new Comparison(session, Comparison.IS_NULL, r, null);
- } else if (readIf("DISTINCT")) {
- read("FROM");
- r = new Comparison(session, Comparison.NOT_EQUAL_NULL_SAFE,
- r, readConcat());
- } else {
- r = new Comparison(session, Comparison.EQUAL_NULL_SAFE, r,
- readConcat());
- }
- } else if (readIf("IN")) {
- read("(");
- if (readIf(")")) {
- r = ValueExpression.get(ValueBoolean.get(false));
- } else {
- if (isSelect()) {
- Query query = parseSelect();
- r = new ConditionInSelect(database, r, query, false,
- Comparison.EQUAL);
- } else {
- ArrayList v = New.arrayList();
- Expression last;
- do {
- last = readExpression();
- v.add(last);
- } while (readIf(","));
- if (v.size() == 1 && (last instanceof Subquery)) {
- Subquery s = (Subquery) last;
- Query q = s.getQuery();
- r = new ConditionInSelect(database, r, q, false,
- Comparison.EQUAL);
- } else {
- r = new ConditionIn(database, r, v);
- }
+ if (expectedList != null) {
+ addMultipleExpected(BETWEEN, IN, LIKE);
}
- read(")");
- }
- } else if (readIf("BETWEEN")) {
- Expression low = readConcat();
- read("AND");
- Expression high = readConcat();
- Expression condLow = new Comparison(session,
- Comparison.SMALLER_EQUAL, low, r);
- Expression condHigh = new Comparison(session,
- Comparison.BIGGER_EQUAL, high, r);
- r = new ConditionAndOr(ConditionAndOr.AND, condLow, condHigh);
- } else {
- int compareType = getCompareType(currentTokenType);
- if (compareType < 0) {
- break;
- }
- read();
- if (readIf("ALL")) {
- read("(");
- Query query = parseSelect();
- r = new ConditionInSelect(database, r, query, true,
- compareType);
- read(")");
- } else if (readIf("ANY") || readIf("SOME")) {
- read("(");
- Query query = parseSelect();
- r = new ConditionInSelect(database, r, query, false,
- compareType);
- read(")");
+ throw getSyntaxError();
} else {
- Expression right = readConcat();
- if (SysProperties.OLD_STYLE_OUTER_JOIN &&
- readIf("(") && readIf("+") && readIf(")")) {
- // support for a subset of old-fashioned Oracle outer
- // join with (+)
- if (r instanceof ExpressionColumn &&
- right instanceof ExpressionColumn) {
- ExpressionColumn leftCol = (ExpressionColumn) r;
- ExpressionColumn rightCol = (ExpressionColumn) right;
- ArrayList filters = currentSelect
- .getTopFilters();
- for (TableFilter f : filters) {
- while (f != null) {
- leftCol.mapColumns(f, 0);
- rightCol.mapColumns(f, 0);
- f = f.getJoin();
- }
- }
- TableFilter leftFilter = leftCol.getTableFilter();
- TableFilter rightFilter = rightCol.getTableFilter();
- r = new Comparison(session, compareType, r, right);
- if (leftFilter != null && rightFilter != null) {
- int idx = filters.indexOf(rightFilter);
- if (idx >= 0) {
- filters.remove(idx);
- leftFilter.addJoin(rightFilter, true,
- false, r);
- } else {
- rightFilter.mapAndAddFilter(r);
- }
- r = ValueExpression.get(ValueBoolean.get(true));
- }
- }
- } else {
- r = new Comparison(session, compareType, r, right);
+ int compareType = getCompareType(currentTokenType);
+ if (compareType < 0) {
+ return null;
}
+ read();
+ r = readComparison(r, compareType, whenOperand);
}
}
- if (not) {
- r = new ConditionNot(r);
- }
}
return r;
}
- private Expression readConcat() {
- Expression r = readSum();
- while (true) {
- if (readIf("||")) {
- r = new Operation(Operation.CONCAT, r, readSum());
- } else if (readIf("~")) {
- if (readIf("*")) {
- Function function = Function.getFunction(database, "CAST");
- function.setDataType(new Column("X",
- Value.STRING_IGNORECASE));
- function.setParameter(0, r);
- r = function;
- }
- r = new CompareLike(database, r, readSum(), null, true);
- } else if (readIf("!~")) {
- if (readIf("*")) {
- Function function = Function.getFunction(database, "CAST");
- function.setDataType(new Column("X",
- Value.STRING_IGNORECASE));
- function.setParameter(0, r);
- r = function;
- }
- r = new ConditionNot(new CompareLike(database, r, readSum(),
- null, true));
+ private Expression readConditionIs(Expression left, boolean whenOperand) {
+ boolean isNot = readIf(NOT);
+ switch (currentTokenType) {
+ case NULL:
+ read();
+ left = new NullPredicate(left, isNot, whenOperand);
+ break;
+ case DISTINCT:
+ read();
+ read(FROM);
+ left = readComparison(left, isNot ? Comparison.EQUAL_NULL_SAFE : Comparison.NOT_EQUAL_NULL_SAFE,
+ whenOperand);
+ break;
+ case TRUE:
+ read();
+ left = new BooleanTest(left, isNot, whenOperand, true);
+ break;
+ case FALSE:
+ read();
+ left = new BooleanTest(left, isNot, whenOperand, false);
+ break;
+ case UNKNOWN:
+ read();
+ left = new BooleanTest(left, isNot, whenOperand, null);
+ break;
+ default:
+ if (readIf("OF")) {
+ left = readTypePredicate(left, isNot, whenOperand);
+ } else if (readIf("JSON")) {
+ left = readJsonPredicate(left, isNot, whenOperand);
} else {
- return r;
+ if (expectedList != null) {
+ addMultipleExpected(NULL, DISTINCT, TRUE, FALSE, UNKNOWN);
+ }
+ /*
+ * Databases that were created in 1.4.199 and older
+ * versions can contain invalid generated IS [ NOT ]
+ * expressions.
+ */
+ if (whenOperand || !session.isQuirksMode()) {
+ throw getSyntaxError();
+ }
+ left = new Comparison(isNot ? Comparison.NOT_EQUAL_NULL_SAFE : Comparison.EQUAL_NULL_SAFE, left,
+ readConcat(), false);
}
}
+ return left;
}
- private Expression readSum() {
- Expression r = readFactor();
- while (true) {
- if (readIf("+")) {
- r = new Operation(Operation.PLUS, r, readFactor());
- } else if (readIf("-")) {
- r = new Operation(Operation.MINUS, r, readFactor());
- } else {
- return r;
- }
- }
+ private TypePredicate readTypePredicate(Expression left, boolean not, boolean whenOperand) {
+ read(OPEN_PAREN);
+ ArrayList typeList = Utils.newSmallArrayList();
+ do {
+ typeList.add(parseDataType());
+ } while (readIfMore());
+ return new TypePredicate(left, not, whenOperand, typeList.toArray(new TypeInfo[0]));
}
- private Expression readFactor() {
- Expression r = readTerm();
- while (true) {
- if (readIf("*")) {
- r = new Operation(Operation.MULTIPLY, r, readTerm());
- } else if (readIf("/")) {
- r = new Operation(Operation.DIVIDE, r, readTerm());
- } else if (readIf("%")) {
- r = new Operation(Operation.MODULUS, r, readTerm());
- } else {
- return r;
+ private Expression readInPredicate(Expression left, boolean not, boolean whenOperand) {
+ read(OPEN_PAREN);
+ if (!whenOperand && database.getMode().allowEmptyInPredicate && readIf(CLOSE_PAREN)) {
+ return ValueExpression.getBoolean(not);
+ }
+ ArrayList v;
+ if (isQuery()) {
+ Query query = parseQuery();
+ if (!readIfMore()) {
+ return new ConditionInQuery(left, not, whenOperand, query, false, Comparison.EQUAL);
}
+ v = Utils.newSmallArrayList();
+ v.add(new Subquery(query));
+ } else {
+ v = Utils.newSmallArrayList();
}
+ do {
+ v.add(readExpression());
+ } while (readIfMore());
+ return new ConditionIn(left, not, whenOperand, v);
+ }
+
+ private IsJsonPredicate readJsonPredicate(Expression left, boolean not, boolean whenOperand) {
+ JSONItemType itemType;
+ if (readIf(VALUE)) {
+ itemType = JSONItemType.VALUE;
+ } else if (readIf(ARRAY)) {
+ itemType = JSONItemType.ARRAY;
+ } else if (readIf("OBJECT")) {
+ itemType = JSONItemType.OBJECT;
+ } else if (readIf("SCALAR")) {
+ itemType = JSONItemType.SCALAR;
+ } else {
+ itemType = JSONItemType.VALUE;
+ }
+ boolean unique = false;
+ if (readIf(WITH)) {
+ read(UNIQUE);
+ readIf("KEYS");
+ unique = true;
+ } else if (readIf("WITHOUT")) {
+ read(UNIQUE);
+ readIf("KEYS");
+ }
+ return new IsJsonPredicate(left, not, whenOperand, unique, itemType);
+ }
+
+ private Expression readLikePredicate(Expression left, LikeType likeType, boolean not, boolean whenOperand) {
+ Expression right = readConcat();
+ Expression esc = readIf("ESCAPE") ? readConcat() : null;
+ recompileAlways = true;
+ return new CompareLike(database, left, not, whenOperand, right, esc, likeType);
+ }
+
+ private Expression readComparison(Expression left, int compareType, boolean whenOperand) {
+ int start = tokenIndex;
+ if (readIf(ALL)) {
+ read(OPEN_PAREN);
+ if (isQuery()) {
+ Query query = parseQuery();
+ left = new ConditionInQuery(left, false, whenOperand, query, true, compareType);
+ read(CLOSE_PAREN);
+ } else {
+ setTokenIndex(start);
+ left = new Comparison(compareType, left, readConcat(), whenOperand);
+ }
+ } else if (readIf(ANY) || readIf(SOME)) {
+ read(OPEN_PAREN);
+ if (currentTokenType == PARAMETER && compareType == Comparison.EQUAL) {
+ Parameter p = readParameter();
+ left = new ConditionInParameter(left, false, whenOperand, p);
+ read(CLOSE_PAREN);
+ } else if (isQuery()) {
+ Query query = parseQuery();
+ left = new ConditionInQuery(left, false, whenOperand, query, false, compareType);
+ read(CLOSE_PAREN);
+ } else {
+ setTokenIndex(start);
+ left = new Comparison(compareType, left, readConcat(), whenOperand);
+ }
+ } else {
+ left = new Comparison(compareType, left, readConcat(), whenOperand);
+ }
+ return left;
}
- private Expression readAggregate(int aggregateType, String aggregateName) {
+ private Expression readConcat() {
+ Expression op1 = readSum();
+ for (;;) {
+ switch (currentTokenType) {
+ case CONCATENATION: {
+ read();
+ Expression op2 = readSum();
+ if (readIf(CONCATENATION)) {
+ ConcatenationOperation c = new ConcatenationOperation();
+ c.addParameter(op1);
+ c.addParameter(op2);
+ do {
+ c.addParameter(readSum());
+ } while (readIf(CONCATENATION));
+ c.doneWithParameters();
+ op1 = c;
+ } else {
+ op1 = new ConcatenationOperation(op1, op2);
+ }
+ break;
+ }
+ case TILDE: // PostgreSQL compatibility
+ op1 = readTildeCondition(op1, false);
+ break;
+ case NOT_TILDE: // PostgreSQL compatibility
+ op1 = readTildeCondition(op1, true);
+ break;
+ default:
+ // Don't add compatibility operators
+ addExpected(CONCATENATION);
+ return op1;
+ }
+ }
+ }
+
+ private Expression readSum() {
+ Expression r = readFactor();
+ while (true) {
+ if (readIf(PLUS_SIGN)) {
+ r = new BinaryOperation(OpType.PLUS, r, readFactor());
+ } else if (readIf(MINUS_SIGN)) {
+ r = new BinaryOperation(OpType.MINUS, r, readFactor());
+ } else {
+ return r;
+ }
+ }
+ }
+
+ private Expression readFactor() {
+ Expression r = readTerm();
+ while (true) {
+ if (readIf(ASTERISK)) {
+ r = new BinaryOperation(OpType.MULTIPLY, r, readTerm());
+ } else if (readIf(SLASH)) {
+ r = new BinaryOperation(OpType.DIVIDE, r, readTerm());
+ } else if (readIf(PERCENT)) {
+ r = new MathFunction(r, readTerm(), MathFunction.MOD);
+ } else {
+ return r;
+ }
+ }
+ }
+
+ private Expression readTildeCondition(Expression r, boolean not) {
+ read();
+ if (readIf(ASTERISK)) {
+ r = new CastSpecification(r, TypeInfo.TYPE_VARCHAR_IGNORECASE);
+ }
+ return new CompareLike(database, r, not, false, readSum(), null, LikeType.REGEXP);
+ }
+
+ private Expression readAggregate(AggregateType aggregateType, String aggregateName) {
if (currentSelect == null) {
+ expectedList = null;
throw getSyntaxError();
}
- currentSelect.setGroupQuery();
- Expression r;
- if (aggregateType == Aggregate.COUNT) {
- if (readIf("*")) {
- r = new Aggregate(Aggregate.COUNT_ALL, null, currentSelect,
- false);
+ Aggregate r;
+ switch (aggregateType) {
+ case COUNT:
+ if (readIf(ASTERISK)) {
+ r = new Aggregate(AggregateType.COUNT_ALL, new Expression[0], currentSelect, false);
} else {
- boolean distinct = readIf("DISTINCT");
+ boolean distinct = readDistinctAgg();
Expression on = readExpression();
if (on instanceof Wildcard && !distinct) {
// PostgreSQL compatibility: count(t.*)
- r = new Aggregate(Aggregate.COUNT_ALL, null, currentSelect,
- false);
+ r = new Aggregate(AggregateType.COUNT_ALL, new Expression[0], currentSelect, false);
} else {
- r = new Aggregate(Aggregate.COUNT, on, currentSelect,
- distinct);
+ r = new Aggregate(AggregateType.COUNT, new Expression[] { on }, currentSelect, distinct);
}
}
- } else if (aggregateType == Aggregate.GROUP_CONCAT) {
- Aggregate agg = null;
- if (equalsToken("GROUP_CONCAT", aggregateName)) {
- boolean distinct = readIf("DISTINCT");
- agg = new Aggregate(Aggregate.GROUP_CONCAT,
- readExpression(), currentSelect, distinct);
- if (readIf("ORDER")) {
- read("BY");
- agg.setGroupConcatOrder(parseSimpleOrderList());
- }
-
+ break;
+ case COVAR_POP:
+ case COVAR_SAMP:
+ case CORR:
+ case REGR_SLOPE:
+ case REGR_INTERCEPT:
+ case REGR_COUNT:
+ case REGR_R2:
+ case REGR_AVGX:
+ case REGR_AVGY:
+ case REGR_SXX:
+ case REGR_SYY:
+ case REGR_SXY:
+ r = new Aggregate(aggregateType, new Expression[] { readExpression(), readNextArgument() },
+ currentSelect, false);
+ break;
+ case HISTOGRAM:
+ r = new Aggregate(aggregateType, new Expression[] { readExpression() }, currentSelect, false);
+ break;
+ case LISTAGG: {
+ boolean distinct = readDistinctAgg();
+ Expression arg = readExpression();
+ ListaggArguments extraArguments = new ListaggArguments();
+ ArrayList orderByList;
+ if ("STRING_AGG".equals(aggregateName)) {
+ // PostgreSQL compatibility: string_agg(expression, delimiter)
+ read(COMMA);
+ extraArguments.setSeparator(readString());
+ orderByList = readIfOrderBy();
+ } else if ("GROUP_CONCAT".equals(aggregateName)) {
+ orderByList = readIfOrderBy();
if (readIf("SEPARATOR")) {
- agg.setGroupConcatSeparator(readExpression());
+ extraArguments.setSeparator(readString());
}
- } else if (equalsToken("STRING_AGG", aggregateName)) {
- // PostgreSQL compatibility: string_agg(expression, delimiter)
- agg = new Aggregate(Aggregate.GROUP_CONCAT,
- readExpression(), currentSelect, false);
- read(",");
- agg.setGroupConcatSeparator(readExpression());
+ } else {
+ if (readIf(COMMA)) {
+ extraArguments.setSeparator(readString());
+ }
+ if (readIf(ON)) {
+ read("OVERFLOW");
+ if (readIf("TRUNCATE")) {
+ extraArguments.setOnOverflowTruncate(true);
+ if (currentTokenType == LITERAL) {
+ extraArguments.setFilter(readString());
+ }
+ if (!readIf(WITH)) {
+ read("WITHOUT");
+ extraArguments.setWithoutCount(true);
+ }
+ read("COUNT");
+ } else {
+ read("ERROR");
+ }
+ }
+ orderByList = null;
}
- r = agg;
- } else {
- boolean distinct = readIf("DISTINCT");
- r = new Aggregate(aggregateType, readExpression(), currentSelect,
+ Expression[] args = new Expression[] { arg };
+ int index = tokenIndex;
+ read(CLOSE_PAREN);
+ if (orderByList == null && isToken("WITHIN")) {
+ r = readWithinGroup(aggregateType, args, distinct, extraArguments, false, false);
+ } else {
+ setTokenIndex(index);
+ r = new Aggregate(AggregateType.LISTAGG, args, currentSelect, distinct);
+ r.setExtraArguments(extraArguments);
+ if (orderByList != null) {
+ r.setOrderByList(orderByList);
+ }
+ }
+ break;
+ }
+ case ARRAY_AGG: {
+ boolean distinct = readDistinctAgg();
+ r = new Aggregate(AggregateType.ARRAY_AGG, new Expression[] { readExpression() }, currentSelect, distinct);
+ r.setOrderByList(readIfOrderBy());
+ break;
+ }
+ case RANK:
+ case DENSE_RANK:
+ case PERCENT_RANK:
+ case CUME_DIST: {
+ if (isToken(CLOSE_PAREN)) {
+ return readWindowFunction(aggregateName);
+ }
+ ArrayList expressions = Utils.newSmallArrayList();
+ do {
+ expressions.add(readExpression());
+ } while (readIfMore());
+ r = readWithinGroup(aggregateType, expressions.toArray(new Expression[0]), false, null, true, false);
+ break;
+ }
+ case PERCENTILE_CONT:
+ case PERCENTILE_DISC: {
+ Expression num = readExpression();
+ read(CLOSE_PAREN);
+ r = readWithinGroup(aggregateType, new Expression[] { num }, false, null, false, true);
+ break;
+ }
+ case MODE: {
+ if (readIf(CLOSE_PAREN)) {
+ r = readWithinGroup(AggregateType.MODE, new Expression[0], false, null, false, true);
+ } else {
+ Expression expr = readExpression();
+ r = new Aggregate(AggregateType.MODE, new Expression[0], currentSelect, false);
+ if (readIf(ORDER)) {
+ read("BY");
+ Expression expr2 = readExpression();
+ String sql = expr.getSQL(HasSQL.DEFAULT_SQL_FLAGS), sql2 = expr2.getSQL(HasSQL.DEFAULT_SQL_FLAGS);
+ if (!sql.equals(sql2)) {
+ throw DbException.getSyntaxError(ErrorCode.IDENTICAL_EXPRESSIONS_SHOULD_BE_USED, sqlCommand,
+ token.start(), sql, sql2);
+ }
+ readAggregateOrder(r, expr, true);
+ } else {
+ readAggregateOrder(r, expr, false);
+ }
+ }
+ break;
+ }
+ case JSON_OBJECTAGG: {
+ boolean withKey = readIf(KEY);
+ Expression key = readExpression();
+ if (withKey) {
+ read(VALUE);
+ } else if (!readIf(VALUE)) {
+ read(COLON);
+ }
+ Expression value = readExpression();
+ r = new Aggregate(AggregateType.JSON_OBJECTAGG, new Expression[] { key, value }, currentSelect, false);
+ readJsonObjectFunctionFlags(r, false);
+ break;
+ }
+ case JSON_ARRAYAGG: {
+ boolean distinct = readDistinctAgg();
+ r = new Aggregate(AggregateType.JSON_ARRAYAGG, new Expression[] { readExpression() }, currentSelect,
distinct);
+ r.setOrderByList(readIfOrderBy());
+ r.setFlags(JsonConstructorUtils.JSON_ABSENT_ON_NULL);
+ readJsonObjectFunctionFlags(r, true);
+ break;
+ }
+ default:
+ boolean distinct = readDistinctAgg();
+ r = new Aggregate(aggregateType, new Expression[] { readExpression() }, currentSelect, distinct);
+ break;
+ }
+ read(CLOSE_PAREN);
+ readFilterAndOver(r);
+ return r;
+ }
+
+ private Aggregate readWithinGroup(AggregateType aggregateType, Expression[] args, boolean distinct,
+ Object extraArguments, boolean forHypotheticalSet, boolean simple) {
+ read("WITHIN");
+ read(GROUP);
+ read(OPEN_PAREN);
+ read(ORDER);
+ read("BY");
+ Aggregate r = new Aggregate(aggregateType, args, currentSelect, distinct);
+ r.setExtraArguments(extraArguments);
+ if (forHypotheticalSet) {
+ int count = args.length;
+ ArrayList orderList = new ArrayList<>(count);
+ for (int i = 0; i < count; i++) {
+ if (i > 0) {
+ read(COMMA);
+ }
+ orderList.add(parseSortSpecification());
+ }
+ r.setOrderByList(orderList);
+ } else if (simple) {
+ readAggregateOrder(r, readExpression(), true);
+ } else {
+ r.setOrderByList(parseSortSpecificationList());
}
- read(")");
return r;
}
- private ArrayList parseSimpleOrderList() {
- ArrayList orderList = New.arrayList();
+ private void readAggregateOrder(Aggregate r, Expression expr, boolean parseSortType) {
+ ArrayList orderList = new ArrayList<>(1);
+ QueryOrderBy order = new QueryOrderBy();
+ order.expression = expr;
+ if (parseSortType) {
+ order.sortType = parseSortType();
+ }
+ orderList.add(order);
+ r.setOrderByList(orderList);
+ }
+
+ private ArrayList readIfOrderBy() {
+ if (readIf(ORDER)) {
+ read("BY");
+ return parseSortSpecificationList();
+ }
+ return null;
+ }
+
+ private ArrayList parseSortSpecificationList() {
+ ArrayList orderList = Utils.newSmallArrayList();
do {
- SelectOrderBy order = new SelectOrderBy();
- Expression expr = readExpression();
- order.expression = expr;
- if (readIf("DESC")) {
- order.descending = true;
- } else {
- readIf("ASC");
- }
- orderList.add(order);
- } while (readIf(","));
+ orderList.add(parseSortSpecification());
+ } while (readIf(COMMA));
return orderList;
}
- private JavaFunction readJavaFunction(Schema schema, String functionName) {
- FunctionAlias functionAlias = null;
- if (schema != null) {
- functionAlias = schema.findFunction(functionName);
+ private QueryOrderBy parseSortSpecification() {
+ QueryOrderBy order = new QueryOrderBy();
+ order.expression = readExpression();
+ order.sortType = parseSortType();
+ return order;
+ }
+
+ private Expression readUserDefinedFunctionIf(Schema schema, String functionName) {
+ UserDefinedFunction userDefinedFunction = findUserDefinedFunctionWithinPath(schema, functionName);
+ if (userDefinedFunction == null) {
+ return null;
+ } else if (userDefinedFunction instanceof FunctionAlias) {
+ FunctionAlias functionAlias = (FunctionAlias) userDefinedFunction;
+ ArrayList argList = Utils.newSmallArrayList();
+ if (!readIf(CLOSE_PAREN)) {
+ do {
+ argList.add(readExpression());
+ } while (readIfMore());
+ }
+ return new JavaFunction(functionAlias, argList.toArray(new Expression[0]));
} else {
- functionAlias = findFunctionAlias(session.getCurrentSchemaName(),
- functionName);
+ UserAggregate aggregate = (UserAggregate) userDefinedFunction;
+ boolean distinct = readDistinctAgg();
+ ArrayList params = Utils.newSmallArrayList();
+ do {
+ params.add(readExpression());
+ } while (readIfMore());
+ Expression[] list = params.toArray(new Expression[0]);
+ JavaAggregate agg = new JavaAggregate(aggregate, list, currentSelect, distinct);
+ readFilterAndOver(agg);
+ return agg;
}
- if (functionAlias == null) {
- throw DbException.get(ErrorCode.FUNCTION_NOT_FOUND_1, functionName);
+ }
+
+ private boolean readDistinctAgg() {
+ if (readIf(DISTINCT)) {
+ return true;
}
- Expression[] args;
- ArrayList argList = New.arrayList();
- int numArgs = 0;
- while (!readIf(")")) {
- if (numArgs++ > 0) {
- read(",");
+ readIf(ALL);
+ return false;
+ }
+
+ private void readFilterAndOver(AbstractAggregate aggregate) {
+ if (readIf("FILTER")) {
+ read(OPEN_PAREN);
+ read(WHERE);
+ Expression filterCondition = readExpression();
+ read(CLOSE_PAREN);
+ aggregate.setFilterCondition(filterCondition);
+ }
+ readOver(aggregate);
+ }
+
+ private void readOver(DataAnalysisOperation operation) {
+ if (readIf("OVER")) {
+ operation.setOverCondition(readWindowNameOrSpecification());
+ currentSelect.setWindowQuery();
+ } else if (operation.isAggregate()) {
+ currentSelect.setGroupQuery();
+ } else {
+ throw getSyntaxError();
+ }
+ }
+
+ private Window readWindowNameOrSpecification() {
+ return isToken(OPEN_PAREN) ? readWindowSpecification() : new Window(readIdentifier(), null, null, null);
+ }
+
+ private Window readWindowSpecification() {
+ read(OPEN_PAREN);
+ String parent = null;
+ if (currentTokenType == IDENTIFIER) {
+ String current = currentToken;
+ if (token.isQuoted() || ( //
+ !equalsToken(current, "PARTITION") //
+ && !equalsToken(current, "ROWS") //
+ && !equalsToken(current, "RANGE") //
+ && !equalsToken(current, "GROUPS"))) {
+ parent = current;
+ read();
}
- argList.add(readExpression());
}
- args = new Expression[numArgs];
- argList.toArray(args);
- JavaFunction func = new JavaFunction(functionAlias, args);
- return func;
+ ArrayList partitionBy = null;
+ if (readIf("PARTITION")) {
+ read("BY");
+ partitionBy = Utils.newSmallArrayList();
+ do {
+ Expression expr = readExpression();
+ partitionBy.add(expr);
+ } while (readIf(COMMA));
+ }
+ ArrayList orderBy = readIfOrderBy();
+ WindowFrame frame = readWindowFrame();
+ read(CLOSE_PAREN);
+ return new Window(parent, partitionBy, orderBy, frame);
+ }
+
+ private WindowFrame readWindowFrame() {
+ WindowFrameUnits units;
+ if (readIf("ROWS")) {
+ units = WindowFrameUnits.ROWS;
+ } else if (readIf("RANGE")) {
+ units = WindowFrameUnits.RANGE;
+ } else if (readIf("GROUPS")) {
+ units = WindowFrameUnits.GROUPS;
+ } else {
+ return null;
+ }
+ WindowFrameBound starting, following;
+ if (readIf(BETWEEN)) {
+ starting = readWindowFrameRange();
+ read(AND);
+ following = readWindowFrameRange();
+ } else {
+ starting = readWindowFrameStarting();
+ following = null;
+ }
+ int sqlIndex = token.start();
+ WindowFrameExclusion exclusion = WindowFrameExclusion.EXCLUDE_NO_OTHERS;
+ if (readIf("EXCLUDE")) {
+ if (readIf("CURRENT")) {
+ read(ROW);
+ exclusion = WindowFrameExclusion.EXCLUDE_CURRENT_ROW;
+ } else if (readIf(GROUP)) {
+ exclusion = WindowFrameExclusion.EXCLUDE_GROUP;
+ } else if (readIf("TIES")) {
+ exclusion = WindowFrameExclusion.EXCLUDE_TIES;
+ } else {
+ read("NO");
+ read("OTHERS");
+ }
+ }
+ WindowFrame frame = new WindowFrame(units, starting, following, exclusion);
+ if (!frame.isValid()) {
+ throw DbException.getSyntaxError(sqlCommand, sqlIndex);
+ }
+ return frame;
}
- private JavaAggregate readJavaAggregate(UserAggregate aggregate) {
- ArrayList params = New.arrayList();
- do {
- params.add(readExpression());
- } while (readIf(","));
- read(")");
- Expression[] list = new Expression[params.size()];
- params.toArray(list);
- JavaAggregate agg = new JavaAggregate(aggregate, list, currentSelect);
- currentSelect.setGroupQuery();
- return agg;
+ private WindowFrameBound readWindowFrameStarting() {
+ if (readIf("UNBOUNDED")) {
+ read("PRECEDING");
+ return new WindowFrameBound(WindowFrameBoundType.UNBOUNDED_PRECEDING, null);
+ }
+ if (readIf("CURRENT")) {
+ read(ROW);
+ return new WindowFrameBound(WindowFrameBoundType.CURRENT_ROW, null);
+ }
+ Expression value = readExpression();
+ read("PRECEDING");
+ return new WindowFrameBound(WindowFrameBoundType.PRECEDING, value);
}
- private int getAggregateType(String name) {
- if (!identifiersToUpper) {
- // if not yet converted to uppercase, do it now
- name = StringUtils.toUpperEnglish(name);
+ private WindowFrameBound readWindowFrameRange() {
+ if (readIf("UNBOUNDED")) {
+ if (readIf("PRECEDING")) {
+ return new WindowFrameBound(WindowFrameBoundType.UNBOUNDED_PRECEDING, null);
+ }
+ read("FOLLOWING");
+ return new WindowFrameBound(WindowFrameBoundType.UNBOUNDED_FOLLOWING, null);
+ }
+ if (readIf("CURRENT")) {
+ read(ROW);
+ return new WindowFrameBound(WindowFrameBoundType.CURRENT_ROW, null);
}
- return Aggregate.getAggregateType(name);
+ Expression value = readExpression();
+ if (readIf("PRECEDING")) {
+ return new WindowFrameBound(WindowFrameBoundType.PRECEDING, value);
+ }
+ read("FOLLOWING");
+ return new WindowFrameBound(WindowFrameBoundType.FOLLOWING, value);
}
private Expression readFunction(Schema schema, String name) {
+ String upperName = upperName(name);
if (schema != null) {
- return readJavaFunction(schema, name);
- }
- int agg = getAggregateType(name);
- if (agg >= 0) {
- return readAggregate(agg, name);
- }
- Function function = Function.getFunction(database, name);
- if (function == null) {
- UserAggregate aggregate = database.findAggregate(name);
- if (aggregate != null) {
- return readJavaAggregate(aggregate);
- }
- return readJavaFunction(null, name);
- }
- switch (function.getFunctionType()) {
- case Function.CAST: {
- function.setParameter(0, readExpression());
- read("AS");
- Column type = parseColumnWithType(null);
- function.setDataType(type);
- read(")");
- break;
+ return readFunctionWithSchema(schema, name, upperName);
+ }
+ boolean allowOverride = database.isAllowBuiltinAliasOverride();
+ if (allowOverride) {
+ Expression e = readUserDefinedFunctionIf(null, name);
+ if (e != null) {
+ return e;
+ }
+ }
+ AggregateType agg = Aggregate.getAggregateType(upperName);
+ if (agg != null) {
+ return readAggregate(agg, upperName);
+ }
+ Expression e = readBuiltinFunctionIf(upperName);
+ if (e != null) {
+ return e;
+ }
+ e = readWindowFunction(upperName);
+ if (e != null) {
+ return e;
+ }
+ e = readCompatibilityFunction(upperName);
+ if (e != null) {
+ return e;
+ }
+ if (!allowOverride) {
+ e = readUserDefinedFunctionIf(null, name);
+ if (e != null) {
+ return e;
+ }
+ }
+ throw DbException.get(ErrorCode.FUNCTION_NOT_FOUND_1, name);
+ }
+
+ private Expression readFunctionWithSchema(Schema schema, String name, String upperName) {
+ if (database.getMode().getEnum() == ModeEnum.PostgreSQL
+ && schema.getName().equals(database.sysIdentifier("PG_CATALOG"))) {
+ FunctionsPostgreSQL function = FunctionsPostgreSQL.getFunction(upperName);
+ if (function != null) {
+ return readParameters(function);
+ }
+ }
+ Expression function = readUserDefinedFunctionIf(schema, name);
+ if (function != null) {
+ return function;
+ }
+ throw DbException.get(ErrorCode.FUNCTION_NOT_FOUND_1, name);
+ }
+
+ private Expression readCompatibilityFunction(String name) {
+ switch (name) {
+ // ||
+ case "ARRAY_APPEND":
+ case "ARRAY_CAT":
+ return new ConcatenationOperation(readExpression(), readLastArgument());
+ // []
+ case "ARRAY_GET":
+ return new ArrayElementReference(readExpression(), readLastArgument());
+ // CARDINALITY
+ case "ARRAY_LENGTH":
+ return new CardinalityExpression(readSingleArgument(), false);
+ // Simple case
+ case "DECODE": {
+ Expression caseOperand = readExpression();
+ boolean canOptimize = caseOperand.isConstant() && !caseOperand.getValue(session).containsNull();
+ Expression a = readNextArgument(), b = readNextArgument();
+ SimpleCase.SimpleWhen when = decodeToWhen(caseOperand, canOptimize, a, b), current = when;
+ Expression elseResult = null;
+ while (readIf(COMMA)) {
+ a = readExpression();
+ if (readIf(COMMA)) {
+ b = readExpression();
+ SimpleCase.SimpleWhen next = decodeToWhen(caseOperand, canOptimize, a, b);
+ current.setWhen(next);
+ current = next;
+ } else {
+ elseResult = a;
+ break;
+ }
+ }
+ read(CLOSE_PAREN);
+ return new SimpleCase(caseOperand, when, elseResult);
}
- case Function.CONVERT: {
+ // Searched case
+ case "CASEWHEN":
+ return readCompatibilityCase(readExpression());
+ case "NVL2":
+ return readCompatibilityCase(new NullPredicate(readExpression(), true, false));
+ // Cast specification
+ case "CONVERT": {
+ Expression arg;
+ Column column;
if (database.getMode().swapConvertFunctionParameters) {
- Column type = parseColumnWithType(null);
- function.setDataType(type);
- read(",");
- function.setParameter(0, readExpression());
- read(")");
+ column = parseColumnWithType(null);
+ arg = readNextArgument();
+ } else {
+ arg = readExpression();
+ read(COMMA);
+ column = parseColumnWithType(null);
+ }
+ read(CLOSE_PAREN);
+ return new CastSpecification(arg, column);
+ }
+ // COALESCE
+ case "IFNULL":
+ return new CoalesceFunction(CoalesceFunction.COALESCE, readExpression(), readLastArgument());
+ case "NVL":
+ return readCoalesceFunction(CoalesceFunction.COALESCE);
+ // CURRENT_CATALOG
+ case "DATABASE":
+ read(CLOSE_PAREN);
+ return new CurrentGeneralValueSpecification(CurrentGeneralValueSpecification.CURRENT_CATALOG);
+ // CURRENT_DATE
+ case "CURDATE":
+ case "SYSDATE":
+ case "TODAY":
+ return readCurrentDateTimeValueFunction(CurrentDateTimeValueFunction.CURRENT_DATE, true, name);
+ // CURRENT_SCHEMA
+ case "SCHEMA":
+ read(CLOSE_PAREN);
+ return new CurrentGeneralValueSpecification(CurrentGeneralValueSpecification.CURRENT_SCHEMA);
+ // CURRENT_TIMESTAMP
+ case "SYSTIMESTAMP":
+ return readCurrentDateTimeValueFunction(CurrentDateTimeValueFunction.CURRENT_TIMESTAMP, true, name);
+ // EXTRACT
+ case "DAY":
+ case "DAY_OF_MONTH":
+ case "DAYOFMONTH":
+ return new DateTimeFunction(DateTimeFunction.EXTRACT, DateTimeFunction.DAY, readSingleArgument(), null);
+ case "DAY_OF_WEEK":
+ case "DAYOFWEEK":
+ return new DateTimeFunction(DateTimeFunction.EXTRACT, DateTimeFunction.DAY_OF_WEEK, readSingleArgument(),
+ null);
+ case "DAY_OF_YEAR":
+ case "DAYOFYEAR":
+ return new DateTimeFunction(DateTimeFunction.EXTRACT, DateTimeFunction.DAY_OF_YEAR, readSingleArgument(),
+ null);
+ case "HOUR":
+ return new DateTimeFunction(DateTimeFunction.EXTRACT, DateTimeFunction.HOUR, readSingleArgument(), null);
+ case "ISO_DAY_OF_WEEK":
+ return new DateTimeFunction(DateTimeFunction.EXTRACT, DateTimeFunction.ISO_DAY_OF_WEEK,
+ readSingleArgument(), null);
+ case "ISO_WEEK":
+ return new DateTimeFunction(DateTimeFunction.EXTRACT, DateTimeFunction.ISO_WEEK, readSingleArgument(),
+ null);
+ case "ISO_YEAR":
+ return new DateTimeFunction(DateTimeFunction.EXTRACT, DateTimeFunction.ISO_WEEK_YEAR, readSingleArgument(),
+ null);
+ case "MINUTE":
+ return new DateTimeFunction(DateTimeFunction.EXTRACT, DateTimeFunction.MINUTE, readSingleArgument(), null);
+ case "MONTH":
+ return new DateTimeFunction(DateTimeFunction.EXTRACT, DateTimeFunction.MONTH, readSingleArgument(), null);
+ case "QUARTER":
+ return new DateTimeFunction(DateTimeFunction.EXTRACT, DateTimeFunction.QUARTER, readSingleArgument(), //
+ null);
+ case "SECOND":
+ return new DateTimeFunction(DateTimeFunction.EXTRACT, DateTimeFunction.SECOND, readSingleArgument(), null);
+ case "WEEK":
+ return new DateTimeFunction(DateTimeFunction.EXTRACT, DateTimeFunction.WEEK, readSingleArgument(), null);
+ case "YEAR":
+ return new DateTimeFunction(DateTimeFunction.EXTRACT, DateTimeFunction.YEAR, readSingleArgument(), null);
+ // LOCALTIME
+ case "CURTIME":
+ return readCurrentDateTimeValueFunction(CurrentDateTimeValueFunction.LOCALTIME, true, "CURTIME");
+ case "SYSTIME":
+ read(CLOSE_PAREN);
+ return readCurrentDateTimeValueFunction(CurrentDateTimeValueFunction.LOCALTIME, false, "SYSTIME");
+ // LOCALTIMESTAMP
+ case "NOW":
+ return readCurrentDateTimeValueFunction(CurrentDateTimeValueFunction.LOCALTIMESTAMP, true, "NOW");
+ // LOCATE
+ case "INSTR": {
+ Expression arg1 = readExpression();
+ return new StringFunction(readNextArgument(), arg1, readIfArgument(), StringFunction.LOCATE);
+ }
+ case "POSITION": {
+ // can't read expression because IN would be read too early
+ Expression arg1 = readConcat();
+ if (!readIf(COMMA)) {
+ read(IN);
+ }
+ return new StringFunction(arg1, readSingleArgument(), null, StringFunction.LOCATE);
+ }
+ // LOWER
+ case "LCASE":
+ return new StringFunction1(readSingleArgument(), StringFunction1.LOWER);
+ // SUBSTRING
+ case "SUBSTR":
+ return readSubstringFunction();
+ // TRIM
+ case "LTRIM":
+ return new TrimFunction(readSingleArgument(), null, TrimFunction.LEADING);
+ case "RTRIM":
+ return new TrimFunction(readSingleArgument(), null, TrimFunction.TRAILING);
+ // UPPER
+ case "UCASE":
+ return new StringFunction1(readSingleArgument(), StringFunction1.UPPER);
+ // Sequence value
+ case "CURRVAL":
+ return readCompatibilitySequenceValueFunction(true);
+ case "NEXTVAL":
+ return readCompatibilitySequenceValueFunction(false);
+ default:
+ return null;
+ }
+ }
+
+ private T readParameters(T expression) {
+ if (!readIf(CLOSE_PAREN)) {
+ do {
+ expression.addParameter(readExpression());
+ } while (readIfMore());
+ }
+ expression.doneWithParameters();
+ return expression;
+ }
+
+ private SimpleCase.SimpleWhen decodeToWhen(Expression caseOperand, boolean canOptimize, Expression whenOperand,
+ Expression result) {
+ if (!canOptimize && (!whenOperand.isConstant() || whenOperand.getValue(session).containsNull())) {
+ whenOperand = new Comparison(Comparison.EQUAL_NULL_SAFE, caseOperand, whenOperand, true);
+ }
+ return new SimpleCase.SimpleWhen(whenOperand, result);
+ }
+
+ private Expression readCompatibilityCase(Expression when) {
+ return new SearchedCase(new Expression[] { when, readNextArgument(), readLastArgument() });
+ }
+
+ private Expression readCompatibilitySequenceValueFunction(boolean current) {
+ Expression arg1 = readExpression(), arg2 = readIf(COMMA) ? readExpression() : null;
+ read(CLOSE_PAREN);
+ return new CompatibilitySequenceValueFunction(arg1, arg2, current);
+ }
+
+ private Expression readBuiltinFunctionIf(String upperName) {
+ switch (upperName) {
+ case "ABS":
+ return new MathFunction(readSingleArgument(), null, MathFunction.ABS);
+ case "MOD":
+ return new MathFunction(readExpression(), readLastArgument(), MathFunction.MOD);
+ case "SIN":
+ return new MathFunction1(readSingleArgument(), MathFunction1.SIN);
+ case "COS":
+ return new MathFunction1(readSingleArgument(), MathFunction1.COS);
+ case "TAN":
+ return new MathFunction1(readSingleArgument(), MathFunction1.TAN);
+ case "COT":
+ return new MathFunction1(readSingleArgument(), MathFunction1.COT);
+ case "SINH":
+ return new MathFunction1(readSingleArgument(), MathFunction1.SINH);
+ case "COSH":
+ return new MathFunction1(readSingleArgument(), MathFunction1.COSH);
+ case "TANH":
+ return new MathFunction1(readSingleArgument(), MathFunction1.TANH);
+ case "ASIN":
+ return new MathFunction1(readSingleArgument(), MathFunction1.ASIN);
+ case "ACOS":
+ return new MathFunction1(readSingleArgument(), MathFunction1.ACOS);
+ case "ATAN":
+ return new MathFunction1(readSingleArgument(), MathFunction1.ATAN);
+ case "ATAN2":
+ return new MathFunction2(readExpression(), readLastArgument(), MathFunction2.ATAN2);
+ case "LOG": {
+ Expression arg1 = readExpression();
+ if (readIf(COMMA)) {
+ return new MathFunction2(arg1, readSingleArgument(), MathFunction2.LOG);
} else {
- function.setParameter(0, readExpression());
- read(",");
- Column type = parseColumnWithType(null);
- function.setDataType(type);
- read(")");
+ read(CLOSE_PAREN);
+ return new MathFunction1(arg1,
+ database.getMode().logIsLogBase10 ? MathFunction1.LOG10 : MathFunction1.LN);
+ }
+ }
+ case "LOG10":
+ return new MathFunction1(readSingleArgument(), MathFunction1.LOG10);
+ case "LN":
+ return new MathFunction1(readSingleArgument(), MathFunction1.LN);
+ case "EXP":
+ return new MathFunction1(readSingleArgument(), MathFunction1.EXP);
+ case "POWER":
+ return new MathFunction2(readExpression(), readLastArgument(), MathFunction2.POWER);
+ case "SQRT":
+ return new MathFunction1(readSingleArgument(), MathFunction1.SQRT);
+ case "FLOOR":
+ return new MathFunction(readSingleArgument(), null, MathFunction.FLOOR);
+ case "CEIL":
+ case "CEILING":
+ return new MathFunction(readSingleArgument(), null, MathFunction.CEIL);
+ case "ROUND":
+ return new MathFunction(readExpression(), readIfArgument(), MathFunction.ROUND);
+ case "ROUNDMAGIC":
+ return new MathFunction(readSingleArgument(), null, MathFunction.ROUNDMAGIC);
+ case "SIGN":
+ return new MathFunction(readSingleArgument(), null, MathFunction.SIGN);
+ case "TRUNC":
+ case "TRUNCATE":
+ return new MathFunction(readExpression(), readIfArgument(), MathFunction.TRUNC);
+ case "DEGREES":
+ return new MathFunction1(readSingleArgument(), MathFunction1.DEGREES);
+ case "RADIANS":
+ return new MathFunction1(readSingleArgument(), MathFunction1.RADIANS);
+ case "BITAND":
+ return new BitFunction(readExpression(), readLastArgument(), BitFunction.BITAND);
+ case "BITOR":
+ return new BitFunction(readExpression(), readLastArgument(), BitFunction.BITOR);
+ case "BITXOR":
+ return new BitFunction(readExpression(), readLastArgument(), BitFunction.BITXOR);
+ case "BITNOT":
+ return new BitFunction(readSingleArgument(), null, BitFunction.BITNOT);
+ case "BITNAND":
+ return new BitFunction(readExpression(), readLastArgument(), BitFunction.BITNAND);
+ case "BITNOR":
+ return new BitFunction(readExpression(), readLastArgument(), BitFunction.BITNOR);
+ case "BITXNOR":
+ return new BitFunction(readExpression(), readLastArgument(), BitFunction.BITXNOR);
+ case "BITGET":
+ return new BitFunction(readExpression(), readLastArgument(), BitFunction.BITGET);
+ case "BITCOUNT":
+ return new BitFunction(readSingleArgument(), null, BitFunction.BITCOUNT);
+ case "LSHIFT":
+ return new BitFunction(readExpression(), readLastArgument(), BitFunction.LSHIFT);
+ case "RSHIFT":
+ return new BitFunction(readExpression(), readLastArgument(), BitFunction.RSHIFT);
+ case "ULSHIFT":
+ return new BitFunction(readExpression(), readLastArgument(), BitFunction.ULSHIFT);
+ case "URSHIFT":
+ return new BitFunction(readExpression(), readLastArgument(), BitFunction.URSHIFT);
+ case "ROTATELEFT":
+ return new BitFunction(readExpression(), readLastArgument(), BitFunction.ROTATELEFT);
+ case "ROTATERIGHT":
+ return new BitFunction(readExpression(), readLastArgument(), BitFunction.ROTATERIGHT);
+ case "EXTRACT": {
+ int field = readDateTimeField();
+ read(FROM);
+ return new DateTimeFunction(DateTimeFunction.EXTRACT, field, readSingleArgument(), null);
+ }
+ case "DATE_TRUNC":
+ return new DateTimeFunction(DateTimeFunction.DATE_TRUNC, readDateTimeField(), readLastArgument(), null);
+ case "DATEADD":
+ case "TIMESTAMPADD":
+ return new DateTimeFunction(DateTimeFunction.DATEADD, readDateTimeField(), readNextArgument(),
+ readLastArgument());
+ case "DATEDIFF":
+ case "TIMESTAMPDIFF":
+ return new DateTimeFunction(DateTimeFunction.DATEDIFF, readDateTimeField(), readNextArgument(),
+ readLastArgument());
+ case "FORMATDATETIME":
+ return readDateTimeFormatFunction(DateTimeFormatFunction.FORMATDATETIME);
+ case "PARSEDATETIME":
+ return readDateTimeFormatFunction(DateTimeFormatFunction.PARSEDATETIME);
+ case "DAYNAME":
+ return new DayMonthNameFunction(readSingleArgument(), DayMonthNameFunction.DAYNAME);
+ case "MONTHNAME":
+ return new DayMonthNameFunction(readSingleArgument(), DayMonthNameFunction.MONTHNAME);
+ case "CARDINALITY":
+ return new CardinalityExpression(readSingleArgument(), false);
+ case "ARRAY_MAX_CARDINALITY":
+ return new CardinalityExpression(readSingleArgument(), true);
+ case "LOCATE":
+ return new StringFunction(readExpression(), readNextArgument(), readIfArgument(), StringFunction.LOCATE);
+ case "INSERT":
+ return new StringFunction(readExpression(), readNextArgument(), readNextArgument(), readLastArgument(),
+ StringFunction.INSERT);
+ case "REPLACE":
+ return new StringFunction(readExpression(), readNextArgument(), readIfArgument(), StringFunction.REPLACE);
+ case "LPAD":
+ return new StringFunction(readExpression(), readNextArgument(), readIfArgument(), StringFunction.LPAD);
+ case "RPAD":
+ return new StringFunction(readExpression(), readNextArgument(), readIfArgument(), StringFunction.RPAD);
+ case "TRANSLATE":
+ return new StringFunction(readExpression(), readNextArgument(), readLastArgument(),
+ StringFunction.TRANSLATE);
+ case "UPPER":
+ return new StringFunction1(readSingleArgument(), StringFunction1.UPPER);
+ case "LOWER":
+ return new StringFunction1(readSingleArgument(), StringFunction1.LOWER);
+ case "ASCII":
+ return new StringFunction1(readSingleArgument(), StringFunction1.ASCII);
+ case "CHAR":
+ case "CHR":
+ return new StringFunction1(readSingleArgument(), StringFunction1.CHAR);
+ case "STRINGENCODE":
+ return new StringFunction1(readSingleArgument(), StringFunction1.STRINGENCODE);
+ case "STRINGDECODE":
+ return new StringFunction1(readSingleArgument(), StringFunction1.STRINGDECODE);
+ case "STRINGTOUTF8":
+ return new StringFunction1(readSingleArgument(), StringFunction1.STRINGTOUTF8);
+ case "UTF8TOSTRING":
+ return new StringFunction1(readSingleArgument(), StringFunction1.UTF8TOSTRING);
+ case "HEXTORAW":
+ return new StringFunction1(readSingleArgument(), StringFunction1.HEXTORAW);
+ case "RAWTOHEX":
+ return new StringFunction1(readSingleArgument(), StringFunction1.RAWTOHEX);
+ case "SPACE":
+ return new StringFunction1(readSingleArgument(), StringFunction1.SPACE);
+ case "QUOTE_IDENT":
+ return new StringFunction1(readSingleArgument(), StringFunction1.QUOTE_IDENT);
+ case "SUBSTRING":
+ return readSubstringFunction();
+ case "TO_CHAR": {
+ Expression arg1 = readExpression(), arg2, arg3;
+ if (readIf(COMMA)) {
+ arg2 = readExpression();
+ arg3 = readIf(COMMA) ? readExpression() : null;
+ } else {
+ arg3 = arg2 = null;
+ }
+ read(CLOSE_PAREN);
+ return new ToCharFunction(arg1, arg2, arg3);
+ }
+ case "REPEAT":
+ return new StringFunction2(readExpression(), readLastArgument(), StringFunction2.REPEAT);
+ case "CHAR_LENGTH":
+ case "CHARACTER_LENGTH":
+ case "LENGTH":
+ return new LengthFunction(readIfSingleArgument(), LengthFunction.CHAR_LENGTH);
+ case "OCTET_LENGTH":
+ return new LengthFunction(readIfSingleArgument(), LengthFunction.OCTET_LENGTH);
+ case "BIT_LENGTH":
+ return new LengthFunction(readIfSingleArgument(), LengthFunction.BIT_LENGTH);
+ case "TRIM":
+ return readTrimFunction();
+ case "REGEXP_LIKE":
+ return readParameters(new RegexpFunction(RegexpFunction.REGEXP_LIKE));
+ case "REGEXP_REPLACE":
+ return readParameters(new RegexpFunction(RegexpFunction.REGEXP_REPLACE));
+ case "REGEXP_SUBSTR":
+ return readParameters(new RegexpFunction(RegexpFunction.REGEXP_SUBSTR));
+ case "XMLATTR":
+ return readParameters(new XMLFunction(XMLFunction.XMLATTR));
+ case "XMLCDATA":
+ return readParameters(new XMLFunction(XMLFunction.XMLCDATA));
+ case "XMLCOMMENT":
+ return readParameters(new XMLFunction(XMLFunction.XMLCOMMENT));
+ case "XMLNODE":
+ return readParameters(new XMLFunction(XMLFunction.XMLNODE));
+ case "XMLSTARTDOC":
+ return readParameters(new XMLFunction(XMLFunction.XMLSTARTDOC));
+ case "XMLTEXT":
+ return readParameters(new XMLFunction(XMLFunction.XMLTEXT));
+ case "TRIM_ARRAY":
+ return new ArrayFunction(readExpression(), readLastArgument(), null, ArrayFunction.TRIM_ARRAY);
+ case "ARRAY_CONTAINS":
+ return new ArrayFunction(readExpression(), readLastArgument(), null, ArrayFunction.ARRAY_CONTAINS);
+ case "ARRAY_SLICE":
+ return new ArrayFunction(readExpression(), readNextArgument(), readLastArgument(),
+ ArrayFunction.ARRAY_SLICE);
+ case "COMPRESS":
+ return new CompressFunction(readExpression(), readIfArgument(), CompressFunction.COMPRESS);
+ case "EXPAND":
+ return new CompressFunction(readSingleArgument(), null, CompressFunction.EXPAND);
+ case "SOUNDEX":
+ return new SoundexFunction(readSingleArgument(), null, SoundexFunction.SOUNDEX);
+ case "DIFFERENCE":
+ return new SoundexFunction(readExpression(), readLastArgument(), SoundexFunction.DIFFERENCE);
+ case "JSON_OBJECT": {
+ JsonConstructorFunction function = new JsonConstructorFunction(false);
+ if (currentTokenType != CLOSE_PAREN && !readJsonObjectFunctionFlags(function, false)) {
+ do {
+ boolean withKey = readIf(KEY);
+ function.addParameter(readExpression());
+ if (withKey) {
+ read(VALUE);
+ } else if (!readIf(VALUE)) {
+ read(COLON);
+ }
+ function.addParameter(readExpression());
+ } while (readIf(COMMA));
+ readJsonObjectFunctionFlags(function, false);
}
+ read(CLOSE_PAREN);
+ function.doneWithParameters();
+ return function;
+ }
+ case "JSON_ARRAY": {
+ JsonConstructorFunction function = new JsonConstructorFunction(true);
+ function.setFlags(JsonConstructorUtils.JSON_ABSENT_ON_NULL);
+ if (currentTokenType != CLOSE_PAREN && !readJsonObjectFunctionFlags(function, true)) {
+ do {
+ function.addParameter(readExpression());
+ } while (readIf(COMMA));
+ readJsonObjectFunctionFlags(function, true);
+ }
+ read(CLOSE_PAREN);
+ function.doneWithParameters();
+ return function;
+ }
+ case "ENCRYPT":
+ return new CryptFunction(readExpression(), readNextArgument(), readLastArgument(), CryptFunction.ENCRYPT);
+ case "DECRYPT":
+ return new CryptFunction(readExpression(), readNextArgument(), readLastArgument(), CryptFunction.DECRYPT);
+ case "COALESCE":
+ return readCoalesceFunction(CoalesceFunction.COALESCE);
+ case "GREATEST":
+ return readCoalesceFunction(CoalesceFunction.GREATEST);
+ case "LEAST":
+ return readCoalesceFunction(CoalesceFunction.LEAST);
+ case "NULLIF":
+ return new NullIfFunction(readExpression(), readLastArgument());
+ case "CONCAT":
+ return readConcatFunction(ConcatFunction.CONCAT);
+ case "CONCAT_WS":
+ return readConcatFunction(ConcatFunction.CONCAT_WS);
+ case "HASH":
+ return new HashFunction(readExpression(), readNextArgument(), readIfArgument(), HashFunction.HASH);
+ case "ORA_HASH": {
+ Expression arg1 = readExpression();
+ if (readIfMore()) {
+ return new HashFunction(arg1, readExpression(), readIfArgument(), HashFunction.ORA_HASH);
+ }
+ return new HashFunction(arg1, HashFunction.ORA_HASH);
+ }
+ case "RAND":
+ case "RANDOM":
+ return new RandFunction(readIfSingleArgument(), RandFunction.RAND);
+ case "SECURE_RAND":
+ return new RandFunction(readSingleArgument(), RandFunction.SECURE_RAND);
+ case "RANDOM_UUID":
+ case "UUID":
+ read(CLOSE_PAREN);
+ return new RandFunction(null, RandFunction.RANDOM_UUID);
+ case "ABORT_SESSION":
+ return new SessionControlFunction(readIfSingleArgument(), SessionControlFunction.ABORT_SESSION);
+ case "CANCEL_SESSION":
+ return new SessionControlFunction(readIfSingleArgument(), SessionControlFunction.CANCEL_SESSION);
+ case "AUTOCOMMIT":
+ read(CLOSE_PAREN);
+ return new SysInfoFunction(SysInfoFunction.AUTOCOMMIT);
+ case "DATABASE_PATH":
+ read(CLOSE_PAREN);
+ return new SysInfoFunction(SysInfoFunction.DATABASE_PATH);
+ case "H2VERSION":
+ read(CLOSE_PAREN);
+ return new SysInfoFunction(SysInfoFunction.H2VERSION);
+ case "LOCK_MODE":
+ read(CLOSE_PAREN);
+ return new SysInfoFunction(SysInfoFunction.LOCK_MODE);
+ case "LOCK_TIMEOUT":
+ read(CLOSE_PAREN);
+ return new SysInfoFunction(SysInfoFunction.LOCK_TIMEOUT);
+ case "MEMORY_FREE":
+ read(CLOSE_PAREN);
+ return new SysInfoFunction(SysInfoFunction.MEMORY_FREE);
+ case "MEMORY_USED":
+ read(CLOSE_PAREN);
+ return new SysInfoFunction(SysInfoFunction.MEMORY_USED);
+ case "READONLY":
+ read(CLOSE_PAREN);
+ return new SysInfoFunction(SysInfoFunction.READONLY);
+ case "SESSION_ID":
+ read(CLOSE_PAREN);
+ return new SysInfoFunction(SysInfoFunction.SESSION_ID);
+ case "TRANSACTION_ID":
+ read(CLOSE_PAREN);
+ return new SysInfoFunction(SysInfoFunction.TRANSACTION_ID);
+ case "DISK_SPACE_USED":
+ return new TableInfoFunction(readIfSingleArgument(), null, TableInfoFunction.DISK_SPACE_USED);
+ case "ESTIMATED_ENVELOPE":
+ return new TableInfoFunction(readExpression(), readLastArgument(), TableInfoFunction.ESTIMATED_ENVELOPE);
+ case "FILE_READ":
+ return new FileFunction(readExpression(), readIfArgument(), FileFunction.FILE_READ);
+ case "FILE_WRITE":
+ return new FileFunction(readExpression(), readLastArgument(), FileFunction.FILE_WRITE);
+ case "DATA_TYPE_SQL":
+ return new DataTypeSQLFunction(readExpression(), readNextArgument(), readNextArgument(),
+ readLastArgument());
+ case "DB_OBJECT_ID":
+ return new DBObjectFunction(readExpression(), readNextArgument(), readIfArgument(),
+ DBObjectFunction.DB_OBJECT_ID);
+ case "DB_OBJECT_SQL":
+ return new DBObjectFunction(readExpression(), readNextArgument(), readIfArgument(),
+ DBObjectFunction.DB_OBJECT_SQL);
+ case "CSVWRITE":
+ return readParameters(new CSVWriteFunction());
+ case "SIGNAL":
+ return new SignalFunction(readExpression(), readLastArgument());
+ case "TRUNCATE_VALUE":
+ return new TruncateValueFunction(readExpression(), readNextArgument(), readLastArgument());
+ case "ZERO":
+ read(CLOSE_PAREN);
+ return ValueExpression.get(ValueInteger.get(0));
+ case "PI":
+ read(CLOSE_PAREN);
+ return ValueExpression.get(ValueDouble.get(Math.PI));
+ }
+ ModeFunction function = ModeFunction.getFunction(database, upperName);
+ return function != null ? readParameters(function) : null;
+ }
+
+ private Expression readDateTimeFormatFunction(int function) {
+ DateTimeFormatFunction f = new DateTimeFormatFunction(function);
+ f.addParameter(readExpression());
+ read(COMMA);
+ f.addParameter(readExpression());
+ if (readIf(COMMA)) {
+ f.addParameter(readExpression());
+ if (readIf(COMMA)) {
+ f.addParameter(readExpression());
+ }
+ }
+ read(CLOSE_PAREN);
+ f.doneWithParameters();
+ return f;
+ }
+
+ private Expression readTrimFunction() {
+ int flags;
+ boolean needFrom = false;
+ if (readIf("LEADING")) {
+ flags = TrimFunction.LEADING;
+ needFrom = true;
+ } else if (readIf("TRAILING")) {
+ flags = TrimFunction.TRAILING;
+ needFrom = true;
+ } else {
+ needFrom = readIf("BOTH");
+ flags = TrimFunction.LEADING | TrimFunction.TRAILING;
+ }
+ Expression from, space = null;
+ if (needFrom) {
+ if (!readIf(FROM)) {
+ space = readExpression();
+ read(FROM);
+ }
+ from = readExpression();
+ } else {
+ if (readIf(FROM)) {
+ from = readExpression();
+ } else {
+ from = readExpression();
+ if (readIf(FROM)) {
+ space = from;
+ from = readExpression();
+ } else if (readIf(COMMA)) {
+ space = readExpression();
+ }
+ }
+ }
+ read(CLOSE_PAREN);
+ return new TrimFunction(from, space, flags);
+ }
+
+ private ArrayTableFunction readUnnestFunction() {
+ ArrayTableFunction f = new ArrayTableFunction(ArrayTableFunction.UNNEST);
+ ArrayList columns = Utils.newSmallArrayList();
+ if (!readIf(CLOSE_PAREN)) {
+ int i = 0;
+ do {
+ Expression expr = readExpression();
+ TypeInfo columnType = TypeInfo.TYPE_NULL;
+ if (expr.isConstant()) {
+ expr = expr.optimize(session);
+ TypeInfo exprType = expr.getType();
+ if (exprType.getValueType() == Value.ARRAY) {
+ columnType = (TypeInfo) exprType.getExtTypeInfo();
+ }
+ }
+ f.addParameter(expr);
+ columns.add(new Column("C" + ++i, columnType));
+ } while (readIfMore());
+ }
+ if (readIf(WITH)) {
+ read("ORDINALITY");
+ columns.add(new Column("NORD", TypeInfo.TYPE_INTEGER));
+ }
+ f.setColumns(columns);
+ f.doneWithParameters();
+ return f;
+ }
+
+ private ArrayTableFunction readTableFunction(int functionType) {
+ ArrayTableFunction f = new ArrayTableFunction(functionType);
+ ArrayList columns = Utils.newSmallArrayList();
+ do {
+ columns.add(parseColumnWithType(readIdentifier()));
+ read(EQUAL);
+ f.addParameter(readExpression());
+ } while (readIfMore());
+ f.setColumns(columns);
+ f.doneWithParameters();
+ return f;
+ }
+
+ private Expression readSingleArgument() {
+ Expression arg = readExpression();
+ read(CLOSE_PAREN);
+ return arg;
+ }
+
+ private Expression readNextArgument() {
+ read(COMMA);
+ return readExpression();
+ }
+
+ private Expression readLastArgument() {
+ read(COMMA);
+ Expression arg = readExpression();
+ read(CLOSE_PAREN);
+ return arg;
+ }
+
+ private Expression readIfSingleArgument() {
+ Expression arg;
+ if (readIf(CLOSE_PAREN)) {
+ arg = null;
+ } else {
+ arg = readExpression();
+ read(CLOSE_PAREN);
+ }
+ return arg;
+ }
+
+ private Expression readIfArgument() {
+ Expression arg = readIf(COMMA) ? readExpression() : null;
+ read(CLOSE_PAREN);
+ return arg;
+ }
+
+ private Expression readCoalesceFunction(int function) {
+ CoalesceFunction f = new CoalesceFunction(function);
+ f.addParameter(readExpression());
+ while (readIfMore()) {
+ f.addParameter(readExpression());
+ }
+ f.doneWithParameters();
+ return f;
+ }
+
+ private Expression readConcatFunction(int function) {
+ ConcatFunction f = new ConcatFunction(function);
+ f.addParameter(readExpression());
+ f.addParameter(readNextArgument());
+ if (function == ConcatFunction.CONCAT_WS) {
+ f.addParameter(readNextArgument());
+ }
+ while (readIfMore()) {
+ f.addParameter(readExpression());
+ }
+ f.doneWithParameters();
+ return f;
+ }
+
+ private Expression readSubstringFunction() {
+ // Standard variants are:
+ // SUBSTRING(X FROM 1)
+ // SUBSTRING(X FROM 1 FOR 1)
+ // Different non-standard variants include:
+ // SUBSTRING(X,1)
+ // SUBSTRING(X,1,1)
+ // SUBSTRING(X FOR 1) -- Postgres
+ SubstringFunction function = new SubstringFunction();
+ function.addParameter(readExpression());
+ if (readIf(FROM)) {
+ function.addParameter(readExpression());
+ if (readIf(FOR)) {
+ function.addParameter(readExpression());
+ }
+ } else if (readIf(FOR)) {
+ function.addParameter(ValueExpression.get(ValueInteger.get(1)));
+ function.addParameter(readExpression());
+ } else {
+ read(COMMA);
+ function.addParameter(readExpression());
+ if (readIf(COMMA)) {
+ function.addParameter(readExpression());
+ }
+ }
+ read(CLOSE_PAREN);
+ function.doneWithParameters();
+ return function;
+ }
+
+ private int readDateTimeField() {
+ int field = -1;
+ switch (currentTokenType) {
+ case IDENTIFIER:
+ if (!token.isQuoted()) {
+ field = DateTimeFunction.getField(currentToken);
+ }
+ break;
+ case LITERAL:
+ if (token.value(session).getValueType() == Value.VARCHAR) {
+ field = DateTimeFunction.getField(token.value(session).getString());
+ }
+ break;
+ case YEAR:
+ field = DateTimeFunction.YEAR;
break;
+ case MONTH:
+ field = DateTimeFunction.MONTH;
+ break;
+ case DAY:
+ field = DateTimeFunction.DAY;
+ break;
+ case HOUR:
+ field = DateTimeFunction.HOUR;
+ break;
+ case MINUTE:
+ field = DateTimeFunction.MINUTE;
+ break;
+ case SECOND:
+ field = DateTimeFunction.SECOND;
+ }
+ if (field < 0) {
+ addExpected("date-time field");
+ throw getSyntaxError();
+ }
+ read();
+ return field;
+ }
+
+ private WindowFunction readWindowFunction(String name) {
+ WindowFunctionType type = WindowFunctionType.get(name);
+ if (type == null) {
+ return null;
+ }
+ if (currentSelect == null) {
+ throw getSyntaxError();
+ }
+ int numArgs = WindowFunction.getMinArgumentCount(type);
+ Expression[] args = null;
+ if (numArgs > 0) {
+ // There is no functions with numArgs == 0 && numArgsMax > 0
+ int numArgsMax = WindowFunction.getMaxArgumentCount(type);
+ args = new Expression[numArgsMax];
+ if (numArgs == numArgsMax) {
+ for (int i = 0; i < numArgs; i++) {
+ if (i > 0) {
+ read(COMMA);
+ }
+ args[i] = readExpression();
+ }
+ } else {
+ int i = 0;
+ while (i < numArgsMax) {
+ if (i > 0 && !readIf(COMMA)) {
+ break;
+ }
+ args[i] = readExpression();
+ i++;
+ }
+ if (i < numArgs) {
+ throw getSyntaxError();
+ }
+ if (i != numArgsMax) {
+ args = Arrays.copyOf(args, i);
+ }
+ }
+ }
+ read(CLOSE_PAREN);
+ WindowFunction function = new WindowFunction(type, currentSelect, args);
+ switch (type) {
+ case NTH_VALUE:
+ readFromFirstOrLast(function);
+ //$FALL-THROUGH$
+ case LEAD:
+ case LAG:
+ case FIRST_VALUE:
+ case LAST_VALUE:
+ readRespectOrIgnoreNulls(function);
+ //$FALL-THROUGH$
+ default:
+ // Avoid warning
+ }
+ readOver(function);
+ return function;
+ }
+
+ private void readFromFirstOrLast(WindowFunction function) {
+ if (readIf(FROM) && !readIf("FIRST")) {
+ read("LAST");
+ function.setFromLast(true);
+ }
+ }
+
+ private void readRespectOrIgnoreNulls(WindowFunction function) {
+ if (readIf("RESPECT")) {
+ read("NULLS");
+ } else if (readIf("IGNORE")) {
+ read("NULLS");
+ function.setIgnoreNulls(true);
+ }
+ }
+
+ private boolean readJsonObjectFunctionFlags(ExpressionWithFlags function, boolean forArray) {
+ int start = tokenIndex;
+ boolean result = false;
+ int flags = function.getFlags();
+ if (readIf(NULL)) {
+ if (readIf(ON)) {
+ read(NULL);
+ flags &= ~JsonConstructorUtils.JSON_ABSENT_ON_NULL;
+ result = true;
+ } else {
+ setTokenIndex(start);
+ return false;
+ }
+ } else if (readIf("ABSENT")) {
+ if (readIf(ON)) {
+ read(NULL);
+ flags |= JsonConstructorUtils.JSON_ABSENT_ON_NULL;
+ result = true;
+ } else {
+ setTokenIndex(start);
+ return false;
+ }
+ }
+ if (!forArray) {
+ if (readIf(WITH)) {
+ read(UNIQUE);
+ read("KEYS");
+ flags |= JsonConstructorUtils.JSON_WITH_UNIQUE_KEYS;
+ result = true;
+ } else if (readIf("WITHOUT")) {
+ if (readIf(UNIQUE)) {
+ read("KEYS");
+ flags &= ~JsonConstructorUtils.JSON_WITH_UNIQUE_KEYS;
+ result = true;
+ } else if (result) {
+ throw getSyntaxError();
+ } else {
+ setTokenIndex(start);
+ return false;
+ }
+ }
+ }
+ if (result) {
+ function.setFlags(flags);
+ }
+ return result;
+ }
+
+ private Expression readKeywordCompatibilityFunctionOrColumn() {
+ boolean nonKeyword = nonKeywords != null && nonKeywords.get(currentTokenType);
+ String name = currentToken;
+ read();
+ if (readIf(OPEN_PAREN)) {
+ return readCompatibilityFunction(upperName(name));
+ } else if (nonKeyword) {
+ return readIf(DOT) ? readTermObjectDot(name) : new ExpressionColumn(database, null, null, name);
+ }
+ throw getSyntaxError();
+ }
+
+ private Expression readCurrentDateTimeValueFunction(int function, boolean hasParen, String name) {
+ int scale = -1;
+ if (hasParen) {
+ if (function != CurrentDateTimeValueFunction.CURRENT_DATE && currentTokenType != CLOSE_PAREN) {
+ scale = readInt();
+ if (scale < 0 || scale > ValueTime.MAXIMUM_SCALE) {
+ throw DbException.get(ErrorCode.INVALID_VALUE_SCALE, Integer.toString(scale), "0",
+ /* compile-time constant */ "" + ValueTime.MAXIMUM_SCALE);
+ }
+ }
+ read(CLOSE_PAREN);
+ }
+ if (database.isAllowBuiltinAliasOverride()) {
+ FunctionAlias functionAlias = database.getSchema(session.getCurrentSchemaName())
+ .findFunction(name != null ? name : CurrentDateTimeValueFunction.getName(function));
+ if (functionAlias != null) {
+ return new JavaFunction(functionAlias,
+ scale >= 0 ? new Expression[] { ValueExpression.get(ValueInteger.get(scale)) }
+ : new Expression[0]);
+ }
+ }
+ return new CurrentDateTimeValueFunction(function, scale);
+ }
+
+ private Expression readIfWildcardRowidOrSequencePseudoColumn(String schema, String objectName) {
+ if (readIf(ASTERISK)) {
+ return parseWildcard(schema, objectName);
+ }
+ if (readIf(_ROWID_)) {
+ return new ExpressionColumn(database, schema, objectName);
+ }
+ if (database.getMode().nextvalAndCurrvalPseudoColumns) {
+ return readIfSequencePseudoColumn(schema, objectName);
+ }
+ return null;
+ }
+
+ private Wildcard parseWildcard(String schema, String objectName) {
+ Wildcard wildcard = new Wildcard(schema, objectName);
+ if (readIf(EXCEPT)) {
+ read(OPEN_PAREN);
+ ArrayList exceptColumns = Utils.newSmallArrayList();
+ do {
+ String s = null, t = null;
+ String name = readIdentifier();
+ if (readIf(DOT)) {
+ t = name;
+ name = readIdentifier();
+ if (readIf(DOT)) {
+ s = t;
+ t = name;
+ name = readIdentifier();
+ if (readIf(DOT)) {
+ checkDatabaseName(s);
+ s = t;
+ t = name;
+ name = readIdentifier();
+ }
+ }
+ }
+ exceptColumns.add(new ExpressionColumn(database, s, t, name));
+ } while (readIfMore());
+ wildcard.setExceptColumns(exceptColumns);
+ }
+ return wildcard;
+ }
+
+ private SequenceValue readIfSequencePseudoColumn(String schema, String objectName) {
+ if (schema == null) {
+ schema = session.getCurrentSchemaName();
+ }
+ if (isToken("NEXTVAL")) {
+ Sequence sequence = findSequence(schema, objectName);
+ if (sequence != null) {
+ read();
+ return new SequenceValue(sequence, getCurrentPrepared());
+ }
+ } else if (isToken("CURRVAL")) {
+ Sequence sequence = findSequence(schema, objectName);
+ if (sequence != null) {
+ read();
+ return new SequenceValue(sequence);
+ }
+ }
+ return null;
+ }
+
+ private Expression readTermObjectDot(String objectName) {
+ Expression expr = readIfWildcardRowidOrSequencePseudoColumn(null, objectName);
+ if (expr != null) {
+ return expr;
+ }
+ String name = readIdentifier();
+ if (readIf(OPEN_PAREN)) {
+ return readFunction(database.getSchema(objectName), name);
+ } else if (readIf(DOT)) {
+ String schema = objectName;
+ objectName = name;
+ expr = readIfWildcardRowidOrSequencePseudoColumn(schema, objectName);
+ if (expr != null) {
+ return expr;
+ }
+ name = readIdentifier();
+ if (readIf(OPEN_PAREN)) {
+ checkDatabaseName(schema);
+ return readFunction(database.getSchema(objectName), name);
+ } else if (readIf(DOT)) {
+ checkDatabaseName(schema);
+ schema = objectName;
+ objectName = name;
+ expr = readIfWildcardRowidOrSequencePseudoColumn(schema, objectName);
+ if (expr != null) {
+ return expr;
+ }
+ name = readIdentifier();
+ }
+ return new ExpressionColumn(database, schema, objectName, name);
}
- case Function.EXTRACT: {
- function.setParameter(0,
- ValueExpression.get(ValueString.get(currentToken)));
+ return new ExpressionColumn(database, null, objectName, name);
+ }
+
+ private void checkDatabaseName(String databaseName) {
+ if (!database.getIgnoreCatalogs() && !equalsToken(database.getShortName(), databaseName)) {
+ throw DbException.get(ErrorCode.DATABASE_NOT_FOUND_1, databaseName);
+ }
+ }
+
+ private Parameter readParameter() {
+ int index = ((Token.ParameterToken) token).index();
+ read();
+ Parameter p;
+ if (parameters == null) {
+ parameters = Utils.newSmallArrayList();
+ }
+ if (index > Constants.MAX_PARAMETER_INDEX) {
+ throw DbException.getInvalidValueException("parameter index", index);
+ }
+ index--;
+ if (parameters.size() <= index) {
+ parameters.ensureCapacity(index + 1);
+ while (parameters.size() < index) {
+ parameters.add(null);
+ }
+ p = new Parameter(index);
+ parameters.add(p);
+ } else if ((p = parameters.get(index)) == null) {
+ p = new Parameter(index);
+ parameters.set(index, p);
+ }
+ return p;
+ }
+
+ private Expression readTerm() {
+ Expression r;
+ switch (currentTokenType) {
+ case AT:
read();
- read("FROM");
- function.setParameter(1, readExpression());
- read(")");
+ r = new Variable(session, readIdentifier());
+ if (readIf(COLON_EQ)) {
+ r = new SetFunction(r, readExpression());
+ }
break;
- }
- case Function.DATE_ADD:
- case Function.DATE_DIFF: {
- if (Function.isDatePart(currentToken)) {
- function.setParameter(0,
- ValueExpression.get(ValueString.get(currentToken)));
+ case PARAMETER:
+ r = readParameter();
+ break;
+ case TABLE:
+ case SELECT:
+ case WITH:
+ r = new Subquery(parseQuery());
+ break;
+ case MINUS_SIGN:
+ read();
+ if (currentTokenType == LITERAL) {
+ r = ValueExpression.get(token.value(session).negate());
+ int rType = r.getType().getValueType();
+ if (rType == Value.BIGINT &&
+ r.getValue(session).getLong() == Integer.MIN_VALUE) {
+ // convert Integer.MIN_VALUE to type 'int'
+ // (Integer.MAX_VALUE+1 is of type 'long')
+ r = ValueExpression.get(ValueInteger.get(Integer.MIN_VALUE));
+ } else if (rType == Value.NUMERIC &&
+ r.getValue(session).getBigDecimal().compareTo(Value.MIN_LONG_DECIMAL) == 0) {
+ // convert Long.MIN_VALUE to type 'long'
+ // (Long.MAX_VALUE+1 is of type 'decimal')
+ r = ValueExpression.get(ValueBigint.MIN);
+ }
read();
} else {
- function.setParameter(0, readExpression());
+ r = new UnaryOperation(readTerm());
}
- read(",");
- function.setParameter(1, readExpression());
- read(",");
- function.setParameter(2, readExpression());
- read(")");
break;
- }
- case Function.SUBSTRING: {
- // Different variants include:
- // SUBSTRING(X,1)
- // SUBSTRING(X,1,1)
- // SUBSTRING(X FROM 1 FOR 1) -- Postgres
- // SUBSTRING(X FROM 1) -- Postgres
- // SUBSTRING(X FOR 1) -- Postgres
- function.setParameter(0, readExpression());
- if (readIf("FROM")) {
- function.setParameter(1, readExpression());
- if (readIf("FOR")) {
- function.setParameter(2, readExpression());
- }
- } else if (readIf("FOR")) {
- function.setParameter(1, ValueExpression.get(ValueInt.get(0)));
- function.setParameter(2, readExpression());
+ case PLUS_SIGN:
+ read();
+ r = readTerm();
+ break;
+ case OPEN_PAREN:
+ read();
+ if (readIf(CLOSE_PAREN)) {
+ r = ValueExpression.get(ValueRow.EMPTY);
+ } else if (isQuery()) {
+ r = new Subquery(parseQuery());
+ read(CLOSE_PAREN);
} else {
- read(",");
- function.setParameter(1, readExpression());
- if (readIf(",")) {
- function.setParameter(2, readExpression());
+ r = readExpression();
+ if (readIfMore()) {
+ ArrayList list = Utils.newSmallArrayList();
+ list.add(r);
+ do {
+ list.add(readExpression());
+ } while (readIfMore());
+ r = new ExpressionList(list.toArray(new Expression[0]), false);
+ } else if (r instanceof BinaryOperation) {
+ BinaryOperation binaryOperation = (BinaryOperation) r;
+ if (binaryOperation.getOperationType() == OpType.MINUS) {
+ TypeInfo ti = readIntervalQualifier();
+ if (ti != null) {
+ binaryOperation.setForcedType(ti);
+ }
+ }
+ }
+ }
+ if (readIf(DOT)) {
+ r = new FieldReference(r, readIdentifier());
+ }
+ break;
+ case ARRAY:
+ read();
+ if (readIf(OPEN_BRACKET)) {
+ if (readIf(CLOSE_BRACKET)) {
+ r = ValueExpression.get(ValueArray.EMPTY);
+ } else {
+ ArrayList list = Utils.newSmallArrayList();
+ do {
+ list.add(readExpression());
+ } while (readIf(COMMA));
+ read(CLOSE_BRACKET);
+ r = new ExpressionList(list.toArray(new Expression[0]), true);
}
+ } else {
+ read(OPEN_PAREN);
+ Query q = parseQuery();
+ read(CLOSE_PAREN);
+ r = new ArrayConstructorByQuery(q);
+ }
+ break;
+ case INTERVAL:
+ read();
+ r = readInterval();
+ break;
+ case ROW: {
+ read();
+ read(OPEN_PAREN);
+ if (readIf(CLOSE_PAREN)) {
+ r = ValueExpression.get(ValueRow.EMPTY);
+ } else {
+ ArrayList list = Utils.newSmallArrayList();
+ do {
+ list.add(readExpression());
+ } while (readIfMore());
+ r = new ExpressionList(list.toArray(new Expression[0]), false);
+ }
+ break;
+ }
+ case TRUE:
+ read();
+ r = ValueExpression.TRUE;
+ break;
+ case FALSE:
+ read();
+ r = ValueExpression.FALSE;
+ break;
+ case UNKNOWN:
+ read();
+ r = TypedValueExpression.UNKNOWN;
+ break;
+ case ROWNUM:
+ read();
+ if (readIf(OPEN_PAREN)) {
+ read(CLOSE_PAREN);
+ }
+ if (currentSelect == null && currentPrepared == null) {
+ throw getSyntaxError();
+ }
+ r = new Rownum(getCurrentPrepared());
+ break;
+ case NULL:
+ read();
+ r = ValueExpression.NULL;
+ break;
+ case _ROWID_:
+ read();
+ r = new ExpressionColumn(database, null, null);
+ break;
+ case LITERAL:
+ r = ValueExpression.get(token.value(session));
+ read();
+ break;
+ case VALUES:
+ if (database.getMode().onDuplicateKeyUpdate) {
+ if (currentPrepared instanceof Insert) {
+ r = readOnDuplicateKeyValues(((Insert) currentPrepared).getTable(), null);
+ break;
+ } else if (currentPrepared instanceof Update) {
+ Update update = (Update) currentPrepared;
+ r = readOnDuplicateKeyValues(update.getTable(), update);
+ break;
+ }
+ }
+ r = new Subquery(parseQuery());
+ break;
+ case CASE:
+ read();
+ r = readCase();
+ break;
+ case CAST: {
+ read();
+ read(OPEN_PAREN);
+ Expression arg = readExpression();
+ read(AS);
+ Column column = parseColumnWithType(null);
+ read(CLOSE_PAREN);
+ r = new CastSpecification(arg, column);
+ break;
+ }
+ case CURRENT_CATALOG:
+ return readCurrentGeneralValueSpecification(CurrentGeneralValueSpecification.CURRENT_CATALOG);
+ case CURRENT_DATE:
+ read();
+ r = readCurrentDateTimeValueFunction(CurrentDateTimeValueFunction.CURRENT_DATE, readIf(OPEN_PAREN), null);
+ break;
+ case CURRENT_PATH:
+ return readCurrentGeneralValueSpecification(CurrentGeneralValueSpecification.CURRENT_PATH);
+ case CURRENT_ROLE:
+ return readCurrentGeneralValueSpecification(CurrentGeneralValueSpecification.CURRENT_ROLE);
+ case CURRENT_SCHEMA:
+ return readCurrentGeneralValueSpecification(CurrentGeneralValueSpecification.CURRENT_SCHEMA);
+ case CURRENT_TIME:
+ read();
+ r = readCurrentDateTimeValueFunction(CurrentDateTimeValueFunction.CURRENT_TIME, readIf(OPEN_PAREN), null);
+ break;
+ case CURRENT_TIMESTAMP:
+ read();
+ r = readCurrentDateTimeValueFunction(CurrentDateTimeValueFunction.CURRENT_TIMESTAMP, readIf(OPEN_PAREN),
+ null);
+ break;
+ case CURRENT_USER:
+ case USER:
+ return readCurrentGeneralValueSpecification(CurrentGeneralValueSpecification.CURRENT_USER);
+ case SESSION_USER:
+ return readCurrentGeneralValueSpecification(CurrentGeneralValueSpecification.SESSION_USER);
+ case SYSTEM_USER:
+ return readCurrentGeneralValueSpecification(CurrentGeneralValueSpecification.SYSTEM_USER);
+ case ANY:
+ case SOME:
+ read();
+ read(OPEN_PAREN);
+ return readAggregate(AggregateType.ANY, "ANY");
+ case DAY:
+ case HOUR:
+ case MINUTE:
+ case MONTH:
+ case SECOND:
+ case YEAR:
+ r = readKeywordCompatibilityFunctionOrColumn();
+ break;
+ case LEFT:
+ r = readColumnIfNotFunction();
+ if (r == null) {
+ r = new StringFunction2(readExpression(), readLastArgument(), StringFunction2.LEFT);
+ }
+ break;
+ case LOCALTIME:
+ read();
+ r = readCurrentDateTimeValueFunction(CurrentDateTimeValueFunction.LOCALTIME, readIf(OPEN_PAREN), null);
+ break;
+ case LOCALTIMESTAMP:
+ read();
+ r = readCurrentDateTimeValueFunction(CurrentDateTimeValueFunction.LOCALTIMESTAMP, readIf(OPEN_PAREN), //
+ null);
+ break;
+ case RIGHT:
+ r = readColumnIfNotFunction();
+ if (r == null) {
+ r = new StringFunction2(readExpression(), readLastArgument(), StringFunction2.RIGHT);
}
- read(")");
break;
- }
- case Function.POSITION: {
- // can't read expression because IN would be read too early
- function.setParameter(0, readConcat());
- if (!readIf(",")) {
- read("IN");
+ case SET:
+ r = readColumnIfNotFunction();
+ if (r == null) {
+ r = readSetFunction();
}
- function.setParameter(1, readExpression());
- read(")");
break;
- }
- case Function.TRIM: {
- Expression space = null;
- if (readIf("LEADING")) {
- function = Function.getFunction(database, "LTRIM");
- if (!readIf("FROM")) {
- space = readExpression();
- read("FROM");
- }
- } else if (readIf("TRAILING")) {
- function = Function.getFunction(database, "RTRIM");
- if (!readIf("FROM")) {
- space = readExpression();
- read("FROM");
- }
- } else if (readIf("BOTH")) {
- if (!readIf("FROM")) {
- space = readExpression();
- read("FROM");
- }
+ case VALUE:
+ if (parseDomainConstraint) {
+ read();
+ r = new DomainValueExpression();
+ break;
}
- Expression p0 = readExpression();
- if (readIf(",")) {
- space = readExpression();
- } else if (readIf("FROM")) {
- space = p0;
- p0 = readExpression();
+ //$FALL-THROUGH$
+ default:
+ if (!isIdentifier()) {
+ throw getSyntaxError();
}
- function.setParameter(0, p0);
- if (space != null) {
- function.setParameter(1, space);
+ //$FALL-THROUGH$
+ case IDENTIFIER:
+ String name = currentToken;
+ boolean quoted = token.isQuoted();
+ read();
+ if (readIf(OPEN_PAREN)) {
+ r = readFunction(null, name);
+ } else if (readIf(DOT)) {
+ r = readTermObjectDot(name);
+ } else if (quoted) {
+ r = new ExpressionColumn(database, null, null, name);
+ } else {
+ r = readTermWithIdentifier(name, quoted);
}
- read(")");
break;
}
- case Function.TABLE:
- case Function.TABLE_DISTINCT: {
- int i = 0;
- ArrayList columns = New.arrayList();
- do {
- String columnName = readAliasIdentifier();
- Column column = parseColumnWithType(columnName);
- columns.add(column);
- read("=");
- function.setParameter(i, readExpression());
- i++;
- } while (readIf(","));
- read(")");
- TableFunction tf = (TableFunction) function;
- tf.setColumns(columns);
- break;
+ if (readIf(OPEN_BRACKET)) {
+ r = new ArrayElementReference(r, readExpression());
+ read(CLOSE_BRACKET);
}
- case Function.ROW_NUMBER:
- read(")");
- read("OVER");
- read("(");
- read(")");
- return new Rownum(currentSelect == null ? currentPrepared
- : currentSelect);
- default:
- if (!readIf(")")) {
- int i = 0;
- do {
- function.setParameter(i++, readExpression());
- } while (readIf(","));
- read(")");
+ colonColon: if (readIf(COLON_COLON)) {
+ if (database.getMode().getEnum() == ModeEnum.PostgreSQL) {
+ // PostgreSQL compatibility
+ if (isToken("PG_CATALOG")) {
+ read("PG_CATALOG");
+ read(DOT);
+ }
+ if (readIf("REGCLASS")) {
+ r = new Regclass(r);
+ break colonColon;
+ }
+ }
+ r = new CastSpecification(r, parseColumnWithType(null));
+ }
+ for (;;) {
+ TypeInfo ti = readIntervalQualifier();
+ if (ti != null) {
+ r = new CastSpecification(r, ti);
+ }
+ int index = tokenIndex;
+ if (readIf("AT")) {
+ if (readIf("TIME")) {
+ read("ZONE");
+ r = new TimeZoneOperation(r, readExpression());
+ continue;
+ } else if (readIf("LOCAL")) {
+ r = new TimeZoneOperation(r, null);
+ continue;
+ } else {
+ setTokenIndex(index);
+ }
+ } else if (readIf("FORMAT")) {
+ if (readIf("JSON")) {
+ r = new Format(r, FormatEnum.JSON);
+ continue;
+ } else {
+ setTokenIndex(index);
+ }
}
+ break;
}
- function.doneWithParameters();
- return function;
+ return r;
}
- private Function readFunctionWithoutParameters(String name) {
- if (readIf("(")) {
- read(")");
+ private Expression readCurrentGeneralValueSpecification(int specification) {
+ read();
+ if (readIf(OPEN_PAREN)) {
+ read(CLOSE_PAREN);
}
- Function function = Function.getFunction(database, name);
- function.doneWithParameters();
- return function;
+ return new CurrentGeneralValueSpecification(specification);
}
- private Expression readWildcardOrSequenceValue(String schema,
- String objectName) {
- if (readIf("*")) {
- return new Wildcard(schema, objectName);
- }
- if (schema == null) {
- schema = session.getCurrentSchemaName();
+ private Expression readColumnIfNotFunction() {
+ boolean nonKeyword = nonKeywords != null && nonKeywords.get(currentTokenType);
+ String name = currentToken;
+ read();
+ if (readIf(OPEN_PAREN)) {
+ return null;
+ } else if (nonKeyword) {
+ return readIf(DOT) ? readTermObjectDot(name) : new ExpressionColumn(database, null, null, name);
}
- if (readIf("NEXTVAL")) {
- Sequence sequence = findSequence(schema, objectName);
- if (sequence != null) {
- return new SequenceValue(sequence);
- }
- } else if (readIf("CURRVAL")) {
- Sequence sequence = findSequence(schema, objectName);
- if (sequence != null) {
- Function function = Function.getFunction(database, "CURRVAL");
- function.setParameter(0, ValueExpression.get(ValueString
- .get(sequence.getSchema().getName())));
- function.setParameter(1, ValueExpression.get(ValueString
- .get(sequence.getName())));
- function.doneWithParameters();
- return function;
+ throw getSyntaxError();
+ }
+
+ private Expression readSetFunction() {
+ SetFunction function = new SetFunction(readExpression(), readLastArgument());
+ if (database.isAllowBuiltinAliasOverride()) {
+ FunctionAlias functionAlias = database.getSchema(session.getCurrentSchemaName()).findFunction(
+ function.getName());
+ if (functionAlias != null) {
+ return new JavaFunction(functionAlias,
+ new Expression[] { function.getSubexpression(0), function.getSubexpression(1) });
}
}
- return null;
+ return function;
}
- private Expression readTermObjectDot(String objectName) {
- Expression expr = readWildcardOrSequenceValue(null, objectName);
- if (expr != null) {
- return expr;
- }
- String name = readColumnIdentifier();
- Schema s = database.findSchema(objectName);
- if ((!SysProperties.OLD_STYLE_OUTER_JOIN || s != null) && readIf("(")) {
- // only if the token before the dot is a valid schema name,
- // otherwise the old style Oracle outer join doesn't work:
- // t.x = t2.x(+)
- // this additional check is not required
- // if the old style outer joins are not supported
- return readFunction(s, name);
- } else if (readIf(".")) {
- String schema = objectName;
- objectName = name;
- expr = readWildcardOrSequenceValue(schema, objectName);
- if (expr != null) {
- return expr;
- }
- name = readColumnIdentifier();
- if (readIf("(")) {
- String databaseName = schema;
- if (!equalsToken(database.getShortName(), databaseName)) {
- throw DbException.get(ErrorCode.DATABASE_NOT_FOUND_1,
- databaseName);
- }
- schema = objectName;
- return readFunction(database.getSchema(schema), name);
- } else if (readIf(".")) {
- String databaseName = schema;
- if (!equalsToken(database.getShortName(), databaseName)) {
- throw DbException.get(ErrorCode.DATABASE_NOT_FOUND_1,
- databaseName);
+ private Expression readOnDuplicateKeyValues(Table table, Update update) {
+ read();
+ read(OPEN_PAREN);
+ Column c = readTableColumn(new TableFilter(session, table, null, rightsChecked, null, 0, null));
+ read(CLOSE_PAREN);
+ return new OnDuplicateKeyValues(c, update);
+ }
+
+ private Expression readTermWithIdentifier(String name, boolean quoted) {
+ /*
+ * Convert a-z to A-Z. This method is safe, because only A-Z
+ * characters are considered below.
+ *
+ * Unquoted identifier is never empty.
+ */
+ switch (name.charAt(0) & 0xffdf) {
+ case 'C':
+ if (equalsToken("CURRENT", name)) {
+ int index = tokenIndex;
+ if (readIf(VALUE) && readIf(FOR)) {
+ return new SequenceValue(readSequence());
}
- schema = objectName;
- objectName = name;
- expr = readWildcardOrSequenceValue(schema, objectName);
- if (expr != null) {
- return expr;
+ setTokenIndex(index);
+ if (database.getMode().getEnum() == ModeEnum.DB2) {
+ return parseDB2SpecialRegisters(name);
}
- name = readColumnIdentifier();
- return new ExpressionColumn(database, schema, objectName, name);
}
- return new ExpressionColumn(database, schema, objectName, name);
- }
- return new ExpressionColumn(database, null, objectName, name);
- }
-
- private Expression readTerm() {
- Expression r;
- switch (currentTokenType) {
- case AT:
- read();
- r = new Variable(session, readAliasIdentifier());
- if (readIf(":=")) {
- Expression value = readExpression();
- Function function = Function.getFunction(database, "SET");
- function.setParameter(0, r);
- function.setParameter(1, value);
- r = function;
+ break;
+ case 'D':
+ if (currentTokenType == LITERAL && token.value(session).getValueType() == Value.VARCHAR &&
+ (equalsToken("DATE", name) || equalsToken("D", name))) {
+ String date = token.value(session).getString();
+ read();
+ return ValueExpression.get(ValueDate.parse(date));
}
break;
- case PARAMETER:
- // there must be no space between ? and the number
- boolean indexed = Character.isDigit(sqlCommandChars[parseIndex]);
- read();
- Parameter p;
- if (indexed && currentTokenType == VALUE &&
- currentValue.getType() == Value.INT) {
- if (indexedParameterList == null) {
- if (parameters == null) {
- // this can occur when parsing expressions only (for
- // example check constraints)
- throw getSyntaxError();
- } else if (parameters.size() > 0) {
- throw DbException
- .get(ErrorCode.CANNOT_MIX_INDEXED_AND_UNINDEXED_PARAMS);
- }
- indexedParameterList = New.arrayList();
- }
- int index = currentValue.getInt() - 1;
- if (index < 0 || index >= Constants.MAX_PARAMETER_INDEX) {
- throw DbException.getInvalidValueException(
- "parameter index", index);
- }
- if (indexedParameterList.size() <= index) {
- indexedParameterList.ensureCapacity(index + 1);
- while (indexedParameterList.size() <= index) {
- indexedParameterList.add(null);
- }
- }
- p = indexedParameterList.get(index);
- if (p == null) {
- p = new Parameter(index);
- indexedParameterList.set(index, p);
- }
+ case 'E':
+ if (currentTokenType == LITERAL && token.value(session).getValueType() == Value.VARCHAR //
+ && equalsToken("E", name)) {
+ String text = token.value(session).getString();
+ // the PostgreSQL ODBC driver uses
+ // LIKE E'PROJECT\\_DATA' instead of LIKE
+ // 'PROJECT\_DATA'
+ // N: SQL-92 "National Language" strings
+ text = StringUtils.replaceAll(text, "\\\\", "\\");
read();
- } else {
- if (indexedParameterList != null) {
- throw DbException
- .get(ErrorCode.CANNOT_MIX_INDEXED_AND_UNINDEXED_PARAMS);
+ return ValueExpression.get(ValueVarchar.get(text));
+ }
+ break;
+ case 'G':
+ if (currentTokenType == LITERAL) {
+ int t = token.value(session).getValueType();
+ if (t == Value.VARCHAR && equalsToken("GEOMETRY", name)) {
+ ValueExpression v = ValueExpression.get(ValueGeometry.get(token.value(session).getString()));
+ read();
+ return v;
+ } else if (t == Value.VARBINARY && equalsToken("GEOMETRY", name)) {
+ ValueExpression v = ValueExpression
+ .get(ValueGeometry.getFromEWKB(token.value(session).getBytesNoCopy()));
+ read();
+ return v;
}
- p = new Parameter(parameters.size());
}
- parameters.add(p);
- r = p;
break;
- case KEYWORD:
- if (isToken("SELECT") || isToken("FROM")) {
- Query query = parseSelect();
- r = new Subquery(query);
- } else {
- throw getSyntaxError();
+ case 'J':
+ if (currentTokenType == LITERAL) {
+ int t = token.value(session).getValueType();
+ if (t == Value.VARCHAR && equalsToken("JSON", name)) {
+ ValueExpression v = ValueExpression.get(ValueJson.fromJson(token.value(session).getString()));
+ read();
+ return v;
+ } else if (t == Value.VARBINARY && equalsToken("JSON", name)) {
+ ValueExpression v = ValueExpression.get(ValueJson.fromJson(token.value(session).getBytesNoCopy()));
+ read();
+ return v;
+ }
}
break;
- case IDENTIFIER:
- String name = currentToken;
- if (currentTokenQuoted) {
- read();
- if (readIf("(")) {
- r = readFunction(null, name);
- } else if (readIf(".")) {
- r = readTermObjectDot(name);
- } else {
- r = new ExpressionColumn(database, null, null, name);
+ case 'N':
+ if (equalsToken("NEXT", name)) {
+ int index = tokenIndex;
+ if (readIf(VALUE) && readIf(FOR)) {
+ return new SequenceValue(readSequence(), getCurrentPrepared());
}
- } else {
- read();
- if (readIf(".")) {
- r = readTermObjectDot(name);
- } else if (equalsToken("CASE", name)) {
- // CASE must be processed before (,
- // otherwise CASE(3) would be a function call, which it is
- // not
- r = readCase();
- } else if (readIf("(")) {
- r = readFunction(null, name);
- } else if (equalsToken("CURRENT_USER", name)) {
- r = readFunctionWithoutParameters("USER");
- } else if (equalsToken("CURRENT", name)) {
- if (readIf("TIMESTAMP")) {
- r = readFunctionWithoutParameters("CURRENT_TIMESTAMP");
- } else if (readIf("TIME")) {
- r = readFunctionWithoutParameters("CURRENT_TIME");
- } else if (readIf("DATE")) {
- r = readFunctionWithoutParameters("CURRENT_DATE");
- } else {
- r = new ExpressionColumn(database, null, null, name);
+ setTokenIndex(index);
+ }
+ break;
+ case 'T':
+ if (equalsToken("TIME", name)) {
+ if (readIf(WITH)) {
+ read("TIME");
+ read("ZONE");
+ if (currentTokenType != LITERAL || token.value(session).getValueType() != Value.VARCHAR) {
+ throw getSyntaxError();
}
- } else if (equalsToken("NEXT", name) && readIf("VALUE")) {
- read("FOR");
- Sequence sequence = readSequence();
- r = new SequenceValue(sequence);
- } else if (currentTokenType == VALUE &&
- currentValue.getType() == Value.STRING) {
- if (equalsToken("DATE", name) ||
- equalsToken("D", name)) {
- String date = currentValue.getString();
- read();
- r = ValueExpression.get(ValueDate.parse(date));
- } else if (equalsToken("TIME", name) ||
- equalsToken("T", name)) {
- String time = currentValue.getString();
- read();
- r = ValueExpression.get(ValueTime.parse(time));
- } else if (equalsToken("TIMESTAMP", name) ||
- equalsToken("TS", name)) {
- String timestamp = currentValue.getString();
- read();
- r = ValueExpression
- .get(ValueTimestamp.parse(timestamp));
- } else if (equalsToken("X", name)) {
- read();
- byte[] buffer = StringUtils
- .convertHexToBytes(currentValue.getString());
- r = ValueExpression.get(ValueBytes.getNoCopy(buffer));
- } else if (equalsToken("E", name)) {
- String text = currentValue.getString();
- // the PostgreSQL ODBC driver uses
- // LIKE E'PROJECT\\_DATA' instead of LIKE
- // 'PROJECT\_DATA'
- // N: SQL-92 "National Language" strings
- text = StringUtils.replaceAll(text, "\\\\", "\\");
- read();
- r = ValueExpression.get(ValueString.get(text));
- } else if (equalsToken("N", name)) {
- // SQL-92 "National Language" strings
- String text = currentValue.getString();
+ String time = token.value(session).getString();
+ read();
+ return ValueExpression.get(ValueTimeTimeZone.parse(time));
+ } else {
+ boolean without = readIf("WITHOUT");
+ if (without) {
+ read("TIME");
+ read("ZONE");
+ }
+ if (currentTokenType == LITERAL && token.value(session).getValueType() == Value.VARCHAR) {
+ String time = token.value(session).getString();
read();
- r = ValueExpression.get(ValueString.get(text));
- } else {
- r = new ExpressionColumn(database, null, null, name);
+ return ValueExpression.get(ValueTime.parse(time));
+ } else if (without) {
+ throw getSyntaxError();
+ }
+ }
+ } else if (equalsToken("TIMESTAMP", name)) {
+ if (readIf(WITH)) {
+ read("TIME");
+ read("ZONE");
+ if (currentTokenType != LITERAL || token.value(session).getValueType() != Value.VARCHAR) {
+ throw getSyntaxError();
}
+ String timestamp = token.value(session).getString();
+ read();
+ return ValueExpression.get(ValueTimestampTimeZone.parse(timestamp, session));
} else {
- r = new ExpressionColumn(database, null, null, name);
+ boolean without = readIf("WITHOUT");
+ if (without) {
+ read("TIME");
+ read("ZONE");
+ }
+ if (currentTokenType == LITERAL && token.value(session).getValueType() == Value.VARCHAR) {
+ String timestamp = token.value(session).getString();
+ read();
+ return ValueExpression.get(ValueTimestamp.parse(timestamp, session));
+ } else if (without) {
+ throw getSyntaxError();
+ }
+ }
+ } else if (currentTokenType == LITERAL && token.value(session).getValueType() == Value.VARCHAR) {
+ if (equalsToken("T", name)) {
+ String time = token.value(session).getString();
+ read();
+ return ValueExpression.get(ValueTime.parse(time));
+ } else if (equalsToken("TS", name)) {
+ String timestamp = token.value(session).getString();
+ read();
+ return ValueExpression.get(ValueTimestamp.parse(timestamp, session));
}
}
break;
- case MINUS:
- read();
- if (currentTokenType == VALUE) {
- r = ValueExpression.get(currentValue.negate());
- if (r.getType() == Value.LONG &&
- r.getValue(session).getLong() == Integer.MIN_VALUE) {
- // convert Integer.MIN_VALUE to type 'int'
- // (Integer.MAX_VALUE+1 is of type 'long')
- r = ValueExpression.get(ValueInt.get(Integer.MIN_VALUE));
- } else if (r.getType() == Value.DECIMAL &&
- r.getValue(session).getBigDecimal()
- .compareTo(ValueLong.MIN_BD) == 0) {
- // convert Long.MIN_VALUE to type 'long'
- // (Long.MAX_VALUE+1 is of type 'decimal')
- r = ValueExpression.get(ValueLong.get(Long.MIN_VALUE));
- }
+ case 'U':
+ if (currentTokenType == LITERAL && token.value(session).getValueType() == Value.VARCHAR
+ && (equalsToken("UUID", name))) {
+ String uuid = token.value(session).getString();
read();
- } else {
- r = new Operation(Operation.NEGATE, readTerm(), null);
+ return ValueExpression.get(ValueUuid.get(uuid));
}
break;
- case PLUS:
- read();
- r = readTerm();
- break;
- case OPEN:
+ }
+ return new ExpressionColumn(database, null, null, name, quoted);
+ }
+
+ private Prepared getCurrentPrepared() {
+ return currentPrepared;
+ }
+
+ private Expression readInterval() {
+ boolean negative = readIf(MINUS_SIGN);
+ if (!negative) {
+ readIf(PLUS_SIGN);
+ }
+ if (currentTokenType != LITERAL || token.value(session).getValueType() != Value.VARCHAR) {
+ addExpected("string");
+ throw getSyntaxError();
+ }
+ String s = token.value(session).getString();
+ read();
+ IntervalQualifier qualifier;
+ switch (currentTokenType) {
+ case YEAR:
read();
- if (readIf(")")) {
- r = new ExpressionList(new Expression[0]);
+ if (readIf(TO)) {
+ read(MONTH);
+ qualifier = IntervalQualifier.YEAR_TO_MONTH;
} else {
- r = readExpression();
- if (readIf(",")) {
- ArrayList list = New.arrayList();
- list.add(r);
- while (!readIf(")")) {
- r = readExpression();
- list.add(r);
- if (!readIf(",")) {
- read(")");
- break;
- }
- }
- Expression[] array = new Expression[list.size()];
- list.toArray(array);
- r = new ExpressionList(array);
- } else {
- read(")");
- }
+ qualifier = IntervalQualifier.YEAR;
}
break;
- case TRUE:
- read();
- r = ValueExpression.get(ValueBoolean.get(true));
- break;
- case FALSE:
- read();
- r = ValueExpression.get(ValueBoolean.get(false));
- break;
- case CURRENT_TIME:
- read();
- r = readFunctionWithoutParameters("CURRENT_TIME");
- break;
- case CURRENT_DATE:
+ case MONTH:
read();
- r = readFunctionWithoutParameters("CURRENT_DATE");
+ qualifier = IntervalQualifier.MONTH;
break;
- case CURRENT_TIMESTAMP: {
- Function function = Function.getFunction(database,
- "CURRENT_TIMESTAMP");
+ case DAY:
read();
- if (readIf("(")) {
- if (!readIf(")")) {
- function.setParameter(0, readExpression());
- read(")");
+ if (readIf(TO)) {
+ switch (currentTokenType) {
+ case HOUR:
+ qualifier = IntervalQualifier.DAY_TO_HOUR;
+ break;
+ case MINUTE:
+ qualifier = IntervalQualifier.DAY_TO_MINUTE;
+ break;
+ case SECOND:
+ qualifier = IntervalQualifier.DAY_TO_SECOND;
+ break;
+ default:
+ throw intervalDayError();
}
+ read();
+ } else {
+ qualifier = IntervalQualifier.DAY;
}
- function.doneWithParameters();
- r = function;
break;
- }
- case ROWNUM:
+ case HOUR:
read();
- if (readIf("(")) {
- read(")");
+ if (readIf(TO)) {
+ switch (currentTokenType) {
+ case MINUTE:
+ qualifier = IntervalQualifier.HOUR_TO_MINUTE;
+ break;
+ case SECOND:
+ qualifier = IntervalQualifier.HOUR_TO_SECOND;
+ break;
+ default:
+ throw intervalHourError();
+ }
+ read();
+ } else {
+ qualifier = IntervalQualifier.HOUR;
}
- r = new Rownum(currentSelect == null ? currentPrepared
- : currentSelect);
break;
- case NULL:
+ case MINUTE:
read();
- r = ValueExpression.getNull();
+ if (readIf(TO)) {
+ read(SECOND);
+ qualifier = IntervalQualifier.MINUTE_TO_SECOND;
+ } else {
+ qualifier = IntervalQualifier.MINUTE;
+ }
break;
- case VALUE:
- r = ValueExpression.get(currentValue);
+ case SECOND:
read();
+ qualifier = IntervalQualifier.SECOND;
break;
default:
- throw getSyntaxError();
+ throw intervalQualifierError();
}
- if (readIf("[")) {
- Function function = Function.getFunction(database, "ARRAY_GET");
- function.setParameter(0, r);
- r = readExpression();
- r = new Operation(Operation.PLUS, r, ValueExpression.get(ValueInt
- .get(1)));
- function.setParameter(1, r);
- r = function;
- read("]");
- }
- if (readIf("::")) {
- // PostgreSQL compatibility
- if (isToken("PG_CATALOG")) {
- read("PG_CATALOG");
- read(".");
- }
- if (readIf("REGCLASS")) {
- FunctionAlias f = findFunctionAlias(Constants.SCHEMA_MAIN,
- "PG_GET_OID");
- if (f == null) {
- throw getSyntaxError();
- }
- Expression[] args = { r };
- JavaFunction func = new JavaFunction(f, args);
- r = func;
- } else {
- Column col = parseColumnWithType(null);
- Function function = Function.getFunction(database, "CAST");
- function.setDataType(col);
- function.setParameter(0, r);
- r = function;
+ try {
+ return ValueExpression.get(IntervalUtils.parseInterval(qualifier, negative, s));
+ } catch (Exception e) {
+ throw DbException.get(ErrorCode.INVALID_DATETIME_CONSTANT_2, e, "INTERVAL", s);
+ }
+ }
+
+ private Expression parseDB2SpecialRegisters(String name) {
+ // Only "CURRENT" name is supported
+ if (readIf("TIMESTAMP")) {
+ if (readIf(WITH)) {
+ read("TIME");
+ read("ZONE");
+ return readCurrentDateTimeValueFunction(CurrentDateTimeValueFunction.CURRENT_TIMESTAMP,
+ readIf(OPEN_PAREN), null);
}
+ return readCurrentDateTimeValueFunction(CurrentDateTimeValueFunction.LOCALTIMESTAMP, readIf(OPEN_PAREN),
+ null);
+ } else if (readIf("TIME")) {
+ // Time with fractional seconds is not supported by DB2
+ return readCurrentDateTimeValueFunction(CurrentDateTimeValueFunction.LOCALTIME, false, null);
+ } else if (readIf("DATE")) {
+ return readCurrentDateTimeValueFunction(CurrentDateTimeValueFunction.CURRENT_DATE, false, null);
}
- return r;
+ // No match, parse CURRENT as a column
+ return new ExpressionColumn(database, null, null, name);
}
private Expression readCase() {
- if (readIf("END")) {
- readIf("CASE");
- return ValueExpression.getNull();
- }
- if (readIf("ELSE")) {
- Expression elsePart = readExpression().optimize(session);
- read("END");
- readIf("CASE");
- return elsePart;
- }
- int i;
- Function function;
- if (readIf("WHEN")) {
- function = Function.getFunction(database, "CASE");
- function.setParameter(0, null);
- i = 1;
+ Expression c;
+ if (readIf(WHEN)) {
+ SearchedCase searched = new SearchedCase();
do {
- function.setParameter(i++, readExpression());
+ Expression condition = readExpression();
read("THEN");
- function.setParameter(i++, readExpression());
- } while (readIf("WHEN"));
+ searched.addParameter(condition);
+ searched.addParameter(readExpression());
+ } while (readIf(WHEN));
+ if (readIf(ELSE)) {
+ searched.addParameter(readExpression());
+ }
+ searched.doneWithParameters();
+ c = searched;
} else {
- Expression expr = readExpression();
- if (readIf("END")) {
- readIf("CASE");
- return ValueExpression.getNull();
- }
- if (readIf("ELSE")) {
- Expression elsePart = readExpression().optimize(session);
- read("END");
- readIf("CASE");
- return elsePart;
- }
- function = Function.getFunction(database, "CASE");
- function.setParameter(0, expr);
- i = 1;
- read("WHEN");
+ Expression caseOperand = readExpression();
+ read(WHEN);
+ SimpleCase.SimpleWhen when = readSimpleWhenClause(caseOperand), current = when;
+ while (readIf(WHEN)) {
+ SimpleCase.SimpleWhen next = readSimpleWhenClause(caseOperand);
+ current.setWhen(next);
+ current = next;
+ }
+ c = new SimpleCase(caseOperand, when, readIf(ELSE) ? readExpression() : null);
+ }
+ read(END);
+ return c;
+ }
+
+ private SimpleCase.SimpleWhen readSimpleWhenClause(Expression caseOperand) {
+ Expression whenOperand = readWhenOperand(caseOperand);
+ if (readIf(COMMA)) {
+ ArrayList operands = Utils.newSmallArrayList();
+ operands.add(whenOperand);
do {
- function.setParameter(i++, readExpression());
- read("THEN");
- function.setParameter(i++, readExpression());
- } while (readIf("WHEN"));
+ operands.add(readWhenOperand(caseOperand));
+ } while (readIf(COMMA));
+ read("THEN");
+ return new SimpleCase.SimpleWhen(operands.toArray(new Expression[0]), readExpression());
}
- if (readIf("ELSE")) {
- function.setParameter(i, readExpression());
+ read("THEN");
+ return new SimpleCase.SimpleWhen(whenOperand, readExpression());
+ }
+
+ private Expression readWhenOperand(Expression caseOperand) {
+ int backup = tokenIndex;
+ boolean not = readIf(NOT);
+ Expression whenOperand = readConditionRightHandSide(caseOperand, not, true);
+ if (whenOperand == null) {
+ if (not) {
+ setTokenIndex(backup);
+ }
+ whenOperand = readExpression();
}
- read("END");
- readIf("CASE");
- function.doneWithParameters();
- return function;
+ return whenOperand;
}
- private int readPositiveInt() {
+ private int readNonNegativeInt() {
int v = readInt();
if (v < 0) {
- throw DbException.getInvalidValueException("positive integer", v);
+ throw DbException.getInvalidValueException("non-negative integer", v);
}
return v;
}
private int readInt() {
boolean minus = false;
- if (currentTokenType == MINUS) {
+ if (currentTokenType == MINUS_SIGN) {
minus = true;
read();
- } else if (currentTokenType == PLUS) {
+ } else if (currentTokenType == PLUS_SIGN) {
read();
}
- if (currentTokenType != VALUE) {
- throw DbException.getSyntaxError(sqlCommand, parseIndex, "integer");
+ if (currentTokenType != LITERAL) {
+ throw DbException.getSyntaxError(sqlCommand, token.start(), "integer");
}
+ Value value = token.value(session);
if (minus) {
// must do that now, otherwise Integer.MIN_VALUE would not work
- currentValue = currentValue.negate();
+ value = value.negate();
}
- int i = currentValue.getInt();
+ int i = value.getInt();
read();
return i;
}
+ private long readPositiveLong() {
+ long v = readLong();
+ if (v <= 0) {
+ throw DbException.getInvalidValueException("positive long", v);
+ }
+ return v;
+ }
+
private long readLong() {
boolean minus = false;
- if (currentTokenType == MINUS) {
+ if (currentTokenType == MINUS_SIGN) {
minus = true;
read();
- } else if (currentTokenType == PLUS) {
+ } else if (currentTokenType == PLUS_SIGN) {
read();
}
- if (currentTokenType != VALUE) {
- throw DbException.getSyntaxError(sqlCommand, parseIndex, "long");
+ if (currentTokenType != LITERAL) {
+ throw DbException.getSyntaxError(sqlCommand, token.start(), "long");
}
+ Value value = token.value(session);
if (minus) {
// must do that now, otherwise Long.MIN_VALUE would not work
- currentValue = currentValue.negate();
+ value = value.negate();
}
- long i = currentValue.getLong();
+ long i = value.getLong();
read();
return i;
}
private boolean readBooleanSetting() {
- if (currentTokenType == VALUE) {
- boolean result = currentValue.getBoolean().booleanValue();
+ switch (currentTokenType) {
+ case ON:
+ case TRUE:
+ read();
+ return true;
+ case FALSE:
+ read();
+ return false;
+ case LITERAL:
+ boolean result = token.value(session).getBoolean();
read();
return result;
}
- if (readIf("TRUE") || readIf("ON")) {
- return true;
- } else if (readIf("FALSE") || readIf("OFF")) {
+ if (readIf("OFF")) {
return false;
} else {
+ if (expectedList != null) {
+ addMultipleExpected(ON, TRUE, FALSE);
+ }
throw getSyntaxError();
}
}
private String readString() {
- Expression expr = readExpression().optimize(session);
- if (!(expr instanceof ValueExpression)) {
- throw DbException.getSyntaxError(sqlCommand, parseIndex, "string");
+ int sqlIndex = token.start();
+ Expression expr = readExpression();
+ try {
+ String s = expr.optimize(session).getValue(session).getString();
+ if (s == null || s.length() <= Constants.MAX_STRING_LENGTH) {
+ return s;
+ }
+ } catch (DbException e) {
}
- String s = expr.getValue(session).getString();
- return s;
+ throw DbException.getSyntaxError(sqlCommand, sqlIndex, "character string");
}
+ // TODO: why does this function allow defaultSchemaName=null - which resets
+ // the parser schemaName for everyone ?
private String readIdentifierWithSchema(String defaultSchemaName) {
- if (currentTokenType != IDENTIFIER) {
- throw DbException.getSyntaxError(sqlCommand, parseIndex,
- "identifier");
- }
- String s = currentToken;
- read();
+ String s = readIdentifier();
schemaName = defaultSchemaName;
- if (readIf(".")) {
- schemaName = s;
- if (currentTokenType != IDENTIFIER) {
- throw DbException.getSyntaxError(sqlCommand, parseIndex,
- "identifier");
- }
- s = currentToken;
- read();
+ if (readIf(DOT)) {
+ s = readIdentifierWithSchema2(s);
}
- if (equalsToken(".", currentToken)) {
- if (equalsToken(schemaName, database.getShortName())) {
- read(".");
- schemaName = s;
- if (currentTokenType != IDENTIFIER) {
- throw DbException.getSyntaxError(sqlCommand, parseIndex,
- "identifier");
+ return s;
+ }
+
+ private String readIdentifierWithSchema2(String s) {
+ schemaName = s;
+ if (database.getMode().allowEmptySchemaValuesAsDefaultSchema && readIf(DOT)) {
+ if (equalsToken(schemaName, database.getShortName()) || database.getIgnoreCatalogs()) {
+ schemaName = session.getCurrentSchemaName();
+ s = readIdentifier();
+ }
+ } else {
+ s = readIdentifier();
+ if (currentTokenType == DOT) {
+ if (equalsToken(schemaName, database.getShortName()) || database.getIgnoreCatalogs()) {
+ read();
+ schemaName = s;
+ s = readIdentifier();
}
- s = currentToken;
- read();
}
}
return s;
@@ -3092,18 +5748,16 @@ private String readIdentifierWithSchema() {
return readIdentifierWithSchema(session.getCurrentSchemaName());
}
- private String readAliasIdentifier() {
- return readColumnIdentifier();
- }
-
- private String readUniqueIdentifier() {
- return readColumnIdentifier();
- }
-
- private String readColumnIdentifier() {
- if (currentTokenType != IDENTIFIER) {
- throw DbException.getSyntaxError(sqlCommand, parseIndex,
- "identifier");
+ private String readIdentifier() {
+ if (!isIdentifier()) {
+ /*
+ * Sometimes a new keywords are introduced. During metadata
+ * initialization phase keywords are accepted as identifiers to
+ * allow migration from older versions.
+ */
+ if (!session.isQuirksMode() || !isKeyword(currentTokenType)) {
+ throw DbException.getSyntaxError(sqlCommand, token.start(), "identifier");
+ }
}
String s = currentToken;
read();
@@ -3111,989 +5765,959 @@ private String readColumnIdentifier() {
}
private void read(String expected) {
- if (currentTokenQuoted || !equalsToken(expected, currentToken)) {
+ if (token.isQuoted() || !equalsToken(expected, currentToken)) {
addExpected(expected);
throw getSyntaxError();
}
read();
}
- private boolean readIf(String token) {
- if (!currentTokenQuoted && equalsToken(token, currentToken)) {
+ private void read(int tokenType) {
+ if (tokenType != currentTokenType) {
+ addExpected(tokenType);
+ throw getSyntaxError();
+ }
+ read();
+ }
+
+ private boolean readIf(String tokenName) {
+ if (!token.isQuoted() && equalsToken(tokenName, currentToken)) {
read();
return true;
}
- addExpected(token);
+ addExpected(tokenName);
return false;
}
- private boolean isToken(String token) {
- boolean result = equalsToken(token, currentToken) &&
- !currentTokenQuoted;
- if (result) {
+ private boolean readIf(int tokenType) {
+ if (tokenType == currentTokenType) {
+ read();
return true;
}
- addExpected(token);
+ addExpected(tokenType);
return false;
}
- private boolean equalsToken(String a, String b) {
- if (a == null) {
- return b == null;
- } else if (a.equals(b)) {
+ private boolean isToken(String tokenName) {
+ if (!token.isQuoted() && equalsToken(tokenName, currentToken)) {
return true;
- } else if (!identifiersToUpper && a.equalsIgnoreCase(b)) {
+ }
+ addExpected(tokenName);
+ return false;
+ }
+
+ private boolean isToken(int tokenType) {
+ if (tokenType == currentTokenType) {
return true;
}
+ addExpected(tokenType);
return false;
}
+ private boolean equalsToken(String a, String b) {
+ if (a == null) {
+ return b == null;
+ } else
+ return a.equals(b) || !identifiersToUpper && a.equalsIgnoreCase(b);
+ }
+
+ private boolean isIdentifier() {
+ return currentTokenType == IDENTIFIER || nonKeywords != null && nonKeywords.get(currentTokenType);
+ }
+
private void addExpected(String token) {
if (expectedList != null) {
expectedList.add(token);
}
}
+ private void addExpected(int tokenType) {
+ if (expectedList != null) {
+ expectedList.add(TOKENS[tokenType]);
+ }
+ }
+
+ private void addMultipleExpected(int ... tokenTypes) {
+ for (int tokenType : tokenTypes) {
+ expectedList.add(TOKENS[tokenType]);
+ }
+ }
+
private void read() {
- currentTokenQuoted = false;
if (expectedList != null) {
expectedList.clear();
}
- int[] types = characterTypes;
- lastParseIndex = parseIndex;
- int i = parseIndex;
- int type = types[i];
- while (type == 0) {
- type = types[++i];
- }
- int start = i;
- char[] chars = sqlCommandChars;
- char c = chars[i++];
- currentToken = "";
- switch (type) {
- case CHAR_NAME:
- while (true) {
- type = types[i];
- if (type != CHAR_NAME && type != CHAR_VALUE) {
- break;
- }
- i++;
+ int size = tokens.size();
+ if (tokenIndex + 1 < size) {
+ token = tokens.get(++tokenIndex);
+ currentTokenType = token.tokenType();
+ currentToken = token.asIdentifier();
+ if (currentToken != null && currentToken.length() > Constants.MAX_IDENTIFIER_LENGTH) {
+ throw DbException.get(ErrorCode.NAME_TOO_LONG_2, currentToken.substring(0, 32),
+ "" + Constants.MAX_IDENTIFIER_LENGTH);
+ } else if (currentTokenType == LITERAL) {
+ checkLiterals();
}
- currentToken = StringUtils.fromCacheOrNew(sqlCommand.substring(
- start, i));
- currentTokenType = getTokenType(currentToken);
- parseIndex = i;
- return;
- case CHAR_QUOTED: {
- String result = null;
- while (true) {
- for (int begin = i;; i++) {
- if (chars[i] == '\"') {
- if (result == null) {
- result = sqlCommand.substring(begin, i);
- } else {
- result += sqlCommand.substring(begin - 1, i);
- }
- break;
- }
- }
- if (chars[++i] != '\"') {
- break;
- }
- i++;
+ } else {
+ throw getSyntaxError();
+ }
+ }
+
+ private void checkLiterals() {
+ if (!literalsChecked && session != null && !session.getAllowLiterals()) {
+ int allowed = database.getAllowLiterals();
+ if (allowed == Constants.ALLOW_LITERALS_NONE
+ || ((token instanceof Token.CharacterStringToken || token instanceof Token.BinaryStringToken)
+ && allowed != Constants.ALLOW_LITERALS_ALL)) {
+ throw DbException.get(ErrorCode.LITERALS_ARE_NOT_ALLOWED);
}
- currentToken = StringUtils.fromCacheOrNew(result);
- parseIndex = i;
- currentTokenQuoted = true;
- currentTokenType = IDENTIFIER;
- return;
}
- case CHAR_SPECIAL_2:
- if (types[i] == CHAR_SPECIAL_2) {
- i++;
+ }
+
+ private void initialize(String sql, ArrayList tokens, boolean stopOnCloseParen) {
+ if (sql == null) {
+ sql = "";
+ }
+ sqlCommand = sql;
+ this.tokens = tokens == null ? new Tokenizer(database, identifiersToUpper, identifiersToLower, nonKeywords)
+ .tokenize(sql, stopOnCloseParen) : tokens;
+ resetTokenIndex();
+ }
+
+ private void resetTokenIndex() {
+ tokenIndex = -1;
+ token = null;
+ currentTokenType = -1;
+ currentToken = null;
+ }
+
+ void setTokenIndex(int index) {
+ if (index != tokenIndex) {
+ if (expectedList != null) {
+ expectedList.clear();
}
- currentToken = sqlCommand.substring(start, i);
- currentTokenType = getSpecialType(currentToken);
- parseIndex = i;
- return;
- case CHAR_SPECIAL_1:
- currentToken = sqlCommand.substring(start, i);
- currentTokenType = getSpecialType(currentToken);
- parseIndex = i;
- return;
- case CHAR_VALUE:
- if (c == '0' && chars[i] == 'X') {
- // hex number
- long number = 0;
- start += 2;
- i++;
- while (true) {
- c = chars[i];
- if ((c < '0' || c > '9') && (c < 'A' || c > 'F')) {
- checkLiterals(false);
- currentValue = ValueInt.get((int) number);
- currentTokenType = VALUE;
- currentToken = "0";
- parseIndex = i;
- return;
- }
- number = (number << 4) + c -
- (c >= 'A' ? ('A' - 0xa) : ('0'));
- if (number > Integer.MAX_VALUE) {
- readHexDecimal(start, i);
- return;
+ token = tokens.get(index);
+ tokenIndex = index;
+ currentTokenType = token.tokenType();
+ currentToken = token.asIdentifier();
+ }
+ }
+
+ private static boolean isKeyword(int tokenType) {
+ return tokenType >= FIRST_KEYWORD && tokenType <= LAST_KEYWORD;
+ }
+
+ private boolean isKeyword(String s) {
+ return ParserUtil.isKeyword(s, !identifiersToUpper);
+ }
+
+ private String upperName(String name) {
+ return identifiersToUpper ? name : StringUtils.toUpperEnglish(name);
+ }
+
+ private Column parseColumnForTable(String columnName, boolean defaultNullable) {
+ Column column;
+ Mode mode = database.getMode();
+ if (mode.identityDataType && readIf("IDENTITY")) {
+ column = new Column(columnName, TypeInfo.TYPE_BIGINT);
+ parseCompatibilityIdentityOptions(column);
+ column.setPrimaryKey(true);
+ } else if (mode.serialDataTypes && readIf("BIGSERIAL")) {
+ column = new Column(columnName, TypeInfo.TYPE_BIGINT);
+ column.setIdentityOptions(new SequenceOptions(), false);
+ } else if (mode.serialDataTypes && readIf("SERIAL")) {
+ column = new Column(columnName, TypeInfo.TYPE_INTEGER);
+ column.setIdentityOptions(new SequenceOptions(), false);
+ } else {
+ column = parseColumnWithType(columnName);
+ }
+ if (readIf("INVISIBLE")) {
+ column.setVisible(false);
+ } else if (readIf("VISIBLE")) {
+ column.setVisible(true);
+ }
+ boolean defaultOnNull = false;
+ NullConstraintType nullConstraint = parseNotNullConstraint();
+ defaultIdentityGeneration: if (!column.isIdentity()) {
+ if (readIf(AS)) {
+ column.setGeneratedExpression(readExpression());
+ } else if (readIf(DEFAULT)) {
+ if (readIf(ON)) {
+ read(NULL);
+ defaultOnNull = true;
+ break defaultIdentityGeneration;
+ }
+ column.setDefaultExpression(session, readExpression());
+ } else if (readIf("GENERATED")) {
+ boolean always = readIf("ALWAYS");
+ if (!always) {
+ read("BY");
+ read(DEFAULT);
+ }
+ read(AS);
+ if (readIf("IDENTITY")) {
+ SequenceOptions options = new SequenceOptions();
+ if (readIf(OPEN_PAREN)) {
+ parseSequenceOptions(options, null, false, false);
+ read(CLOSE_PAREN);
}
- i++;
+ column.setIdentityOptions(options, always);
+ break defaultIdentityGeneration;
+ } else if (!always) {
+ throw getSyntaxError();
+ } else {
+ column.setGeneratedExpression(readExpression());
}
}
- long number = c - '0';
- while (true) {
- c = chars[i];
- if (c < '0' || c > '9') {
- if (c == '.' || c == 'E' || c == 'L') {
- readDecimal(start, i);
- break;
- }
- checkLiterals(false);
- currentValue = ValueInt.get((int) number);
- currentTokenType = VALUE;
- currentToken = "0";
- parseIndex = i;
- break;
- }
- number = number * 10 + (c - '0');
- if (number > Integer.MAX_VALUE) {
- readDecimal(start, i);
- break;
- }
- i++;
+ if (!column.isGenerated() && readIf(ON)) {
+ read("UPDATE");
+ column.setOnUpdateExpression(session, readExpression());
}
- return;
- case CHAR_DOT:
- if (types[i] != CHAR_VALUE) {
- currentTokenType = KEYWORD;
- currentToken = ".";
- parseIndex = i;
- return;
- }
- readDecimal(i - 1, i);
- return;
- case CHAR_STRING: {
- String result = null;
- while (true) {
- for (int begin = i;; i++) {
- if (chars[i] == '\'') {
- if (result == null) {
- result = sqlCommand.substring(begin, i);
- } else {
- result += sqlCommand.substring(begin - 1, i);
- }
- break;
- }
- }
- if (chars[++i] != '\'') {
- break;
- }
- i++;
+ nullConstraint = parseNotNullConstraint(nullConstraint);
+ if (parseCompatibilityIdentity(column, mode)) {
+ nullConstraint = parseNotNullConstraint(nullConstraint);
}
- currentToken = "'";
- checkLiterals(true);
- currentValue = ValueString.get(StringUtils.fromCacheOrNew(result),
- database.getMode().treatEmptyStringsAsNull);
- parseIndex = i;
- currentTokenType = VALUE;
- return;
- }
- case CHAR_DOLLAR_QUOTED_STRING: {
- String result = null;
- int begin = i - 1;
- while (types[i] == CHAR_DOLLAR_QUOTED_STRING) {
- i++;
- }
- result = sqlCommand.substring(begin, i);
- currentToken = "'";
- checkLiterals(true);
- currentValue = ValueString.get(StringUtils.fromCacheOrNew(result),
- database.getMode().treatEmptyStringsAsNull);
- parseIndex = i;
- currentTokenType = VALUE;
- return;
}
- case CHAR_END:
- currentToken = "";
- currentTokenType = END;
- parseIndex = i;
- return;
+ switch (nullConstraint) {
+ case NULL_IS_ALLOWED:
+ if (column.isIdentity()) {
+ throw DbException.get(ErrorCode.COLUMN_MUST_NOT_BE_NULLABLE_1, column.getName());
+ }
+ column.setNullable(true);
+ break;
+ case NULL_IS_NOT_ALLOWED:
+ column.setNullable(false);
+ break;
+ case NO_NULL_CONSTRAINT_FOUND:
+ if (!column.isIdentity()) {
+ column.setNullable(defaultNullable);
+ }
+ break;
default:
- throw getSyntaxError();
+ throw DbException.get(ErrorCode.UNKNOWN_MODE_1,
+ "Internal Error - unhandled case: " + nullConstraint.name());
}
- }
-
- private void checkLiterals(boolean text) {
- if (!session.getAllowLiterals()) {
- int allowed = database.getAllowLiterals();
- if (allowed == Constants.ALLOW_LITERALS_NONE ||
- (text && allowed != Constants.ALLOW_LITERALS_ALL)) {
- throw DbException.get(ErrorCode.LITERALS_ARE_NOT_ALLOWED);
+ if (!defaultOnNull) {
+ if (readIf(DEFAULT)) {
+ read(ON);
+ read(NULL);
+ defaultOnNull = true;
+ } else if (readIf("NULL_TO_DEFAULT")) {
+ defaultOnNull = true;
}
}
- }
-
- private void readHexDecimal(int start, int i) {
- char[] chars = sqlCommandChars;
- char c;
- do {
- c = chars[++i];
- } while ((c >= '0' && c <= '9') || (c >= 'A' && c <= 'F'));
- parseIndex = i;
- String sub = sqlCommand.substring(start, i);
- BigDecimal bd = new BigDecimal(new BigInteger(sub, 16));
- checkLiterals(false);
- currentValue = ValueDecimal.get(bd);
- currentTokenType = VALUE;
- }
-
- private void readDecimal(int start, int i) {
- char[] chars = sqlCommandChars;
- int[] types = characterTypes;
- // go until the first non-number
- while (true) {
- int t = types[i];
- if (t != CHAR_DOT && t != CHAR_VALUE) {
- break;
- }
- i++;
+ if (defaultOnNull) {
+ column.setDefaultOnNull(true);
}
- boolean containsE = false;
- if (chars[i] == 'E' || chars[i] == 'e') {
- containsE = true;
- i++;
- if (chars[i] == '+' || chars[i] == '-') {
- i++;
+ if (!column.isGenerated()) {
+ if (readIf("SEQUENCE")) {
+ column.setSequence(readSequence(), column.isGeneratedAlways());
}
- if (types[i] != CHAR_VALUE) {
- throw getSyntaxError();
+ }
+ if (readIf("SELECTIVITY")) {
+ column.setSelectivity(readNonNegativeInt());
+ }
+ if (mode.getEnum() == ModeEnum.MySQL) {
+ if (readIf("CHARACTER")) {
+ readIf(SET);
+ readMySQLCharset();
}
- while (types[++i] == CHAR_VALUE) {
- // go until the first non-number
+ if (readIf("COLLATE")) {
+ readMySQLCharset();
}
}
- parseIndex = i;
- String sub = sqlCommand.substring(start, i);
- checkLiterals(false);
- if (!containsE && sub.indexOf('.') < 0) {
- BigInteger bi = new BigInteger(sub);
- if (bi.compareTo(ValueLong.MAX) <= 0) {
- // parse constants like "10000000L"
- if (chars[i] == 'L') {
- parseIndex++;
- }
- currentValue = ValueLong.get(bi.longValue());
- currentTokenType = VALUE;
- return;
+ String comment = readCommentIf();
+ if (comment != null) {
+ column.setComment(comment);
+ }
+ return column;
+ }
+
+ private void parseCompatibilityIdentityOptions(Column column) {
+ SequenceOptions options = new SequenceOptions();
+ if (readIf(OPEN_PAREN)) {
+ options.setStartValue(ValueExpression.get(ValueBigint.get(readLong())));
+ if (readIf(COMMA)) {
+ options.setIncrement(ValueExpression.get(ValueBigint.get(readLong())));
}
+ read(CLOSE_PAREN);
}
- BigDecimal bd;
- try {
- bd = new BigDecimal(sub);
- } catch (NumberFormatException e) {
- throw DbException.get(ErrorCode.DATA_CONVERSION_ERROR_1, e, sub);
+ column.setIdentityOptions(options, false);
+ }
+
+ private String readCommentIf() {
+ if (readIf("COMMENT")) {
+ readIf(IS);
+ return readString();
}
- currentValue = ValueDecimal.get(bd);
- currentTokenType = VALUE;
+ return null;
}
- public Session getSession() {
- return session;
+ private Column parseColumnWithType(String columnName) {
+ TypeInfo typeInfo = readIfDataType();
+ if (typeInfo == null) {
+ String domainName = readIdentifierWithSchema();
+ return getColumnWithDomain(columnName, getSchema().getDomain(domainName));
+ }
+ return new Column(columnName, typeInfo);
}
- private void initialize(String sql) {
- if (sql == null) {
- sql = "";
+ private TypeInfo parseDataType() {
+ TypeInfo typeInfo = readIfDataType();
+ if (typeInfo == null) {
+ addExpected("data type");
+ throw getSyntaxError();
}
- originalSQL = sql;
- sqlCommand = sql;
- int len = sql.length() + 1;
- char[] command = new char[len];
- int[] types = new int[len];
- len--;
- sql.getChars(0, len, command, 0);
- boolean changed = false;
- command[len] = ' ';
- int startLoop = 0;
- int lastType = 0;
- for (int i = 0; i < len; i++) {
- char c = command[i];
- int type = 0;
- switch (c) {
- case '/':
- if (command[i + 1] == '*') {
- // block comment
- changed = true;
- command[i] = ' ';
- command[i + 1] = ' ';
- startLoop = i;
- i += 2;
- checkRunOver(i, len, startLoop);
- while (command[i] != '*' || command[i + 1] != '/') {
- command[i++] = ' ';
- checkRunOver(i, len, startLoop);
- }
- command[i] = ' ';
- command[i + 1] = ' ';
- i++;
- } else if (command[i + 1] == '/') {
- // single line comment
- changed = true;
- startLoop = i;
- while (true) {
- c = command[i];
- if (c == '\n' || c == '\r' || i >= len - 1) {
- break;
- }
- command[i++] = ' ';
- checkRunOver(i, len, startLoop);
- }
- } else {
- type = CHAR_SPECIAL_1;
- }
- break;
- case '-':
- if (command[i + 1] == '-') {
- // single line comment
- changed = true;
- startLoop = i;
- while (true) {
- c = command[i];
- if (c == '\n' || c == '\r' || i >= len - 1) {
- break;
- }
- command[i++] = ' ';
- checkRunOver(i, len, startLoop);
- }
- } else {
- type = CHAR_SPECIAL_1;
- }
+ return typeInfo;
+ }
+
+ private TypeInfo readIfDataType() {
+ TypeInfo typeInfo = readIfDataType1();
+ if (typeInfo != null) {
+ while (readIf(ARRAY)) {
+ typeInfo = parseArrayType(typeInfo);
+ }
+ }
+ return typeInfo;
+ }
+
+ private TypeInfo readIfDataType1() {
+ switch (currentTokenType) {
+ case IDENTIFIER:
+ if (token.isQuoted()) {
+ return null;
+ }
+ break;
+ case INTERVAL: {
+ read();
+ TypeInfo typeInfo = readIntervalQualifier();
+ if (typeInfo == null) {
+ throw intervalQualifierError();
+ }
+ return typeInfo;
+ }
+ case NULL:
+ read();
+ return TypeInfo.TYPE_NULL;
+ case ROW:
+ read();
+ return parseRowType();
+ case ARRAY:
+ // Partial compatibility with 1.4.200 and older versions
+ if (session.isQuirksMode()) {
+ read();
+ return parseArrayType(TypeInfo.TYPE_VARCHAR);
+ }
+ addExpected("data type");
+ throw getSyntaxError();
+ default:
+ if (isKeyword(currentToken)) {
break;
- case '$':
- if (command[i + 1] == '$' && (i == 0 || command[i - 1] <= ' ')) {
- // dollar quoted string
- changed = true;
- command[i] = ' ';
- command[i + 1] = ' ';
- startLoop = i;
- i += 2;
- checkRunOver(i, len, startLoop);
- while (command[i] != '$' || command[i + 1] != '$') {
- types[i++] = CHAR_DOLLAR_QUOTED_STRING;
- checkRunOver(i, len, startLoop);
- }
- command[i] = ' ';
- command[i + 1] = ' ';
- i++;
+ }
+ addExpected("data type");
+ throw getSyntaxError();
+ }
+ int index = tokenIndex;
+ String originalCase = currentToken;
+ read();
+ if (currentTokenType == DOT) {
+ setTokenIndex(index);
+ return null;
+ }
+ String original = upperName(originalCase);
+ switch (original) {
+ case "BINARY":
+ if (readIf("VARYING")) {
+ original = "BINARY VARYING";
+ } else if (readIf("LARGE")) {
+ read("OBJECT");
+ original = "BINARY LARGE OBJECT";
+ } else if (variableBinary) {
+ original = "VARBINARY";
+ }
+ break;
+ case "CHAR":
+ if (readIf("VARYING")) {
+ original = "CHAR VARYING";
+ } else if (readIf("LARGE")) {
+ read("OBJECT");
+ original = "CHAR LARGE OBJECT";
+ }
+ break;
+ case "CHARACTER":
+ if (readIf("VARYING")) {
+ original = "CHARACTER VARYING";
+ } else if (readIf("LARGE")) {
+ read("OBJECT");
+ original = "CHARACTER LARGE OBJECT";
+ }
+ break;
+ case "DATETIME":
+ case "DATETIME2":
+ return parseDateTimeType(false);
+ case "DEC":
+ case "DECIMAL":
+ return parseNumericType(true);
+ case "DECFLOAT":
+ return parseDecfloatType();
+ case "DOUBLE":
+ if (readIf("PRECISION")) {
+ original = "DOUBLE PRECISION";
+ }
+ break;
+ case "ENUM":
+ return parseEnumType();
+ case "FLOAT":
+ return parseFloatType();
+ case "GEOMETRY":
+ return parseGeometryType();
+ case "LONG":
+ if (readIf("RAW")) {
+ original = "LONG RAW";
+ }
+ break;
+ case "NATIONAL":
+ if (readIf("CHARACTER")) {
+ if (readIf("VARYING")) {
+ original = "NATIONAL CHARACTER VARYING";
+ } else if (readIf("LARGE")) {
+ read("OBJECT");
+ original = "NATIONAL CHARACTER LARGE OBJECT";
} else {
- if (lastType == CHAR_NAME || lastType == CHAR_VALUE) {
- // $ inside an identifier is supported
- type = CHAR_NAME;
- } else {
- // but not at the start, to support PostgreSQL $1
- type = CHAR_SPECIAL_1;
- }
- }
- break;
- case '(':
- case ')':
- case '{':
- case '}':
- case '*':
- case ',':
- case ';':
- case '+':
- case '%':
- case '?':
- case '@':
- case ']':
- type = CHAR_SPECIAL_1;
- break;
- case '!':
- case '<':
- case '>':
- case '|':
- case '=':
- case ':':
- case '&':
- case '~':
- type = CHAR_SPECIAL_2;
- break;
- case '.':
- type = CHAR_DOT;
- break;
- case '\'':
- type = types[i] = CHAR_STRING;
- startLoop = i;
- while (command[++i] != '\'') {
- checkRunOver(i, len, startLoop);
+ original = "NATIONAL CHARACTER";
}
- break;
- case '[':
- if (database.getMode().squareBracketQuotedNames) {
- // SQL Server alias for "
- command[i] = '"';
- changed = true;
- type = types[i] = CHAR_QUOTED;
- startLoop = i;
- while (command[++i] != ']') {
- checkRunOver(i, len, startLoop);
- }
- command[i] = '"';
+ } else {
+ read("CHAR");
+ if (readIf("VARYING")) {
+ original = "NATIONAL CHAR VARYING";
} else {
- type = CHAR_SPECIAL_1;
- }
- break;
- case '`':
- // MySQL alias for ", but not case sensitive
- command[i] = '"';
- changed = true;
- type = types[i] = CHAR_QUOTED;
- startLoop = i;
- while (command[++i] != '`') {
- checkRunOver(i, len, startLoop);
- c = command[i];
- command[i] = Character.toUpperCase(c);
- }
- command[i] = '"';
- break;
- case '\"':
- type = types[i] = CHAR_QUOTED;
- startLoop = i;
- while (command[++i] != '\"') {
- checkRunOver(i, len, startLoop);
+ original = "NATIONAL CHAR";
}
- break;
- case '_':
- type = CHAR_NAME;
- break;
- default:
- if (c >= 'a' && c <= 'z') {
- if (identifiersToUpper) {
- command[i] = (char) (c - ('a' - 'A'));
- changed = true;
+ }
+ break;
+ case "NCHAR":
+ if (readIf("VARYING")) {
+ original = "NCHAR VARYING";
+ } else if (readIf("LARGE")) {
+ read("OBJECT");
+ original = "NCHAR LARGE OBJECT";
+ }
+ break;
+ case "NUMBER":
+ if (database.getMode().disallowedTypes.contains("NUMBER")) {
+ throw DbException.get(ErrorCode.UNKNOWN_DATA_TYPE_1, "NUMBER");
+ }
+ if (!isToken(OPEN_PAREN)) {
+ return TypeInfo.getTypeInfo(Value.DECFLOAT, 40, -1, null);
+ }
+ //$FALL-THROUGH$
+ case "NUMERIC":
+ return parseNumericType(false);
+ case "SMALLDATETIME":
+ return parseDateTimeType(true);
+ case "TIME":
+ return parseTimeType();
+ case "TIMESTAMP":
+ return parseTimestampType();
+ }
+ // Domain names can't have multiple words without quotes
+ if (originalCase.length() == original.length()) {
+ Domain domain = database.getSchema(session.getCurrentSchemaName()).findDomain(originalCase);
+ if (domain != null) {
+ setTokenIndex(index);
+ return null;
+ }
+ }
+ Mode mode = database.getMode();
+ DataType dataType = DataType.getTypeByName(original, mode);
+ if (dataType == null || mode.disallowedTypes.contains(original)) {
+ throw DbException.get(ErrorCode.UNKNOWN_DATA_TYPE_1, original);
+ }
+ long precision;
+ int scale;
+ if (dataType.specialPrecisionScale) {
+ precision = dataType.defaultPrecision;
+ scale = dataType.defaultScale;
+ } else {
+ precision = -1L;
+ scale = -1;
+ }
+ int t = dataType.type;
+ if (database.getIgnoreCase() && t == Value.VARCHAR && !equalsToken("VARCHAR_CASESENSITIVE", original)) {
+ dataType = DataType.getDataType(t = Value.VARCHAR_IGNORECASE);
+ }
+ if ((dataType.supportsPrecision || dataType.supportsScale) && readIf(OPEN_PAREN)) {
+ if (!readIf("MAX")) {
+ if (dataType.supportsPrecision) {
+ precision = readPrecision(t);
+ if (precision < dataType.minPrecision) {
+ throw getInvalidPrecisionException(dataType, precision);
+ } else if (precision > dataType.maxPrecision)
+ badPrecision: {
+ if (session.isQuirksMode() || session.isTruncateLargeLength()) {
+ switch (dataType.type) {
+ case Value.CHAR:
+ case Value.VARCHAR:
+ case Value.VARCHAR_IGNORECASE:
+ case Value.BINARY:
+ case Value.VARBINARY:
+ case Value.JAVA_OBJECT:
+ case Value.JSON:
+ precision = dataType.maxPrecision;
+ break badPrecision;
+ }
+ }
+ throw getInvalidPrecisionException(dataType, precision);
}
- type = CHAR_NAME;
- } else if (c >= 'A' && c <= 'Z') {
- type = CHAR_NAME;
- } else if (c >= '0' && c <= '9') {
- type = CHAR_VALUE;
- } else {
- if (c <= ' ' || Character.isSpaceChar(c)) {
- // whitespace
- } else if (Character.isJavaIdentifierPart(c)) {
- type = CHAR_NAME;
- if (identifiersToUpper) {
- char u = Character.toUpperCase(c);
- if (u != c) {
- command[i] = u;
- changed = true;
+ if (dataType.supportsScale) {
+ if (readIf(COMMA)) {
+ scale = readInt();
+ if (scale < dataType.minScale || scale > dataType.maxScale) {
+ throw DbException.get(ErrorCode.INVALID_VALUE_SCALE, Integer.toString(scale),
+ Integer.toString(dataType.minScale), Integer.toString(dataType.maxScale));
}
}
- } else {
- type = CHAR_SPECIAL_1;
+ }
+ } else {
+ scale = readInt();
+ if (scale < dataType.minScale || scale > dataType.maxScale) {
+ throw DbException.get(ErrorCode.INVALID_VALUE_SCALE, Integer.toString(scale),
+ Integer.toString(dataType.minScale), Integer.toString(dataType.maxScale));
}
}
}
- types[i] = type;
- lastType = type;
- }
- sqlCommandChars = command;
- types[len] = CHAR_END;
- characterTypes = types;
- if (changed) {
- sqlCommand = new String(command);
- }
- parseIndex = 0;
- }
-
- private void checkRunOver(int i, int len, int startLoop) {
- if (i >= len) {
- parseIndex = startLoop;
- throw getSyntaxError();
- }
- }
-
- private int getSpecialType(String s) {
- char c0 = s.charAt(0);
- if (s.length() == 1) {
- switch (c0) {
- case '?':
- case '$':
- return PARAMETER;
- case '@':
- return AT;
- case '+':
- return PLUS;
- case '-':
- return MINUS;
- case '{':
- case '}':
- case '*':
- case '/':
- case '%':
- case ';':
- case ',':
- case ':':
- case '[':
- case ']':
- case '~':
- return KEYWORD;
- case '(':
- return OPEN;
- case ')':
- return CLOSE;
- case '<':
- return SMALLER;
- case '>':
- return BIGGER;
- case '=':
- return EQUAL;
- default:
- break;
- }
- } else if (s.length() == 2) {
- switch (c0) {
- case ':':
- if ("::".equals(s)) {
- return KEYWORD;
- } else if (":=".equals(s)) {
- return KEYWORD;
- }
- break;
- case '>':
- if (">=".equals(s)) {
- return BIGGER_EQUAL;
- }
- break;
- case '<':
- if ("<=".equals(s)) {
- return SMALLER_EQUAL;
- } else if ("<>".equals(s)) {
- return NOT_EQUAL;
- }
- break;
- case '!':
- if ("!=".equals(s)) {
- return NOT_EQUAL;
- } else if ("!~".equals(s)) {
- return KEYWORD;
- }
- break;
- case '|':
- if ("||".equals(s)) {
- return STRING_CONCAT;
- }
- break;
- case '&':
- if ("&&".equals(s)) {
- return SPATIAL_INTERSECTS;
- }
- break;
+ read(CLOSE_PAREN);
+ }
+ if (mode.allNumericTypesHavePrecision && DataType.isNumericType(dataType.type)) {
+ if (readIf(OPEN_PAREN)) {
+ // Support for MySQL: INT(11), MEDIUMINT(8) and so on.
+ // Just ignore the precision.
+ readNonNegativeInt();
+ read(CLOSE_PAREN);
}
+ readIf("UNSIGNED");
}
- throw getSyntaxError();
+ if (mode.forBitData && DataType.isStringType(t)) {
+ if (readIf(FOR)) {
+ read("BIT");
+ read("DATA");
+ dataType = DataType.getDataType(t = Value.VARBINARY);
+ }
+ }
+ return TypeInfo.getTypeInfo(t, precision, scale, null);
}
- private int getTokenType(String s) {
- int len = s.length();
- if (len == 0) {
- throw getSyntaxError();
- }
- if (!identifiersToUpper) {
- // if not yet converted to uppercase, do it now
- s = StringUtils.toUpperEnglish(s);
- }
- return getSaveTokenType(s, database.getMode().supportOffsetFetch);
+ private static DbException getInvalidPrecisionException(DataType dataType, long precision) {
+ return DbException.get(ErrorCode.INVALID_VALUE_PRECISION, Long.toString(precision),
+ Long.toString(dataType.minPrecision), Long.toString(dataType.maxPrecision));
}
- private boolean isKeyword(String s) {
- if (!identifiersToUpper) {
- // if not yet converted to uppercase, do it now
- s = StringUtils.toUpperEnglish(s);
- }
- return isKeyword(s, false);
+ private static Column getColumnWithDomain(String columnName, Domain domain) {
+ Column column = new Column(columnName, domain.getDataType());
+ column.setComment(domain.getComment());
+ column.setDomain(domain);
+ return column;
}
- /**
- * Checks if this string is a SQL keyword.
- *
- * @param s the token to check
- * @param supportOffsetFetch if OFFSET and FETCH are keywords
- * @return true if it is a keyword
- */
- public static boolean isKeyword(String s, boolean supportOffsetFetch) {
- if (s == null || s.length() == 0) {
- return false;
+ private TypeInfo parseFloatType() {
+ int type = Value.DOUBLE;
+ int precision;
+ if (readIf(OPEN_PAREN)) {
+ precision = readNonNegativeInt();
+ read(CLOSE_PAREN);
+ if (precision < 1 || precision > 53) {
+ throw DbException.get(ErrorCode.INVALID_VALUE_PRECISION, Integer.toString(precision), "1", "53");
+ }
+ if (precision <= 24) {
+ type = Value.REAL;
+ }
+ } else {
+ precision = 0;
}
- return getSaveTokenType(s, supportOffsetFetch) != IDENTIFIER;
+ return TypeInfo.getTypeInfo(type, precision, -1, null);
}
- private static int getSaveTokenType(String s, boolean supportOffsetFetch) {
- switch (s.charAt(0)) {
- case 'C':
- if (s.equals("CURRENT_TIMESTAMP")) {
- return CURRENT_TIMESTAMP;
- } else if (s.equals("CURRENT_TIME")) {
- return CURRENT_TIME;
- } else if (s.equals("CURRENT_DATE")) {
- return CURRENT_DATE;
- }
- return getKeywordOrIdentifier(s, "CROSS", KEYWORD);
- case 'D':
- return getKeywordOrIdentifier(s, "DISTINCT", KEYWORD);
- case 'E':
- if ("EXCEPT".equals(s)) {
- return KEYWORD;
- }
- return getKeywordOrIdentifier(s, "EXISTS", KEYWORD);
- case 'F':
- if ("FROM".equals(s)) {
- return KEYWORD;
- } else if ("FOR".equals(s)) {
- return KEYWORD;
- } else if ("FULL".equals(s)) {
- return KEYWORD;
- } else if (supportOffsetFetch && "FETCH".equals(s)) {
- return KEYWORD;
- }
- return getKeywordOrIdentifier(s, "FALSE", FALSE);
- case 'G':
- return getKeywordOrIdentifier(s, "GROUP", KEYWORD);
- case 'H':
- return getKeywordOrIdentifier(s, "HAVING", KEYWORD);
- case 'I':
- if ("INNER".equals(s)) {
- return KEYWORD;
- } else if ("INTERSECT".equals(s)) {
- return KEYWORD;
- }
- return getKeywordOrIdentifier(s, "IS", KEYWORD);
- case 'J':
- return getKeywordOrIdentifier(s, "JOIN", KEYWORD);
- case 'L':
- if ("LIMIT".equals(s)) {
- return KEYWORD;
- }
- return getKeywordOrIdentifier(s, "LIKE", KEYWORD);
- case 'M':
- return getKeywordOrIdentifier(s, "MINUS", KEYWORD);
- case 'N':
- if ("NOT".equals(s)) {
- return KEYWORD;
- } else if ("NATURAL".equals(s)) {
- return KEYWORD;
- }
- return getKeywordOrIdentifier(s, "NULL", NULL);
- case 'O':
- if ("ON".equals(s)) {
- return KEYWORD;
- } else if (supportOffsetFetch && "OFFSET".equals(s)) {
- return KEYWORD;
- }
- return getKeywordOrIdentifier(s, "ORDER", KEYWORD);
- case 'P':
- return getKeywordOrIdentifier(s, "PRIMARY", KEYWORD);
- case 'R':
- return getKeywordOrIdentifier(s, "ROWNUM", ROWNUM);
- case 'S':
- if (s.equals("SYSTIMESTAMP")) {
- return CURRENT_TIMESTAMP;
- } else if (s.equals("SYSTIME")) {
- return CURRENT_TIME;
- } else if (s.equals("SYSDATE")) {
- return CURRENT_TIMESTAMP;
- }
- return getKeywordOrIdentifier(s, "SELECT", KEYWORD);
- case 'T':
- if ("TODAY".equals(s)) {
- return CURRENT_DATE;
- }
- return getKeywordOrIdentifier(s, "TRUE", TRUE);
- case 'U':
- if ("UNIQUE".equals(s)) {
- return KEYWORD;
+ private TypeInfo parseNumericType(boolean decimal) {
+ long precision = -1L;
+ int scale = -1;
+ if (readIf(OPEN_PAREN)) {
+ precision = readPrecision(Value.NUMERIC);
+ if (precision < 1) {
+ throw getInvalidNumericPrecisionException(precision);
+ } else if (precision > Constants.MAX_NUMERIC_PRECISION) {
+ if (session.isQuirksMode() || session.isTruncateLargeLength()) {
+ precision = Constants.MAX_NUMERIC_PRECISION;
+ } else {
+ throw getInvalidNumericPrecisionException(precision);
+ }
}
- return getKeywordOrIdentifier(s, "UNION", KEYWORD);
- case 'W':
- if ("WITH".equals(s)) {
- return KEYWORD;
+ if (readIf(COMMA)) {
+ scale = readInt();
+ if (scale < 0 || scale > ValueNumeric.MAXIMUM_SCALE) {
+ throw DbException.get(ErrorCode.INVALID_VALUE_SCALE, Integer.toString(scale),
+ "0", "" + ValueNumeric.MAXIMUM_SCALE);
+ }
}
- return getKeywordOrIdentifier(s, "WHERE", KEYWORD);
- default:
- return IDENTIFIER;
+ read(CLOSE_PAREN);
}
+ return TypeInfo.getTypeInfo(Value.NUMERIC, precision, scale, decimal ? ExtTypeInfoNumeric.DECIMAL : null);
}
- private static int getKeywordOrIdentifier(String s1, String s2,
- int keywordType) {
- if (s1.equals(s2)) {
- return keywordType;
+ private TypeInfo parseDecfloatType() {
+ long precision = -1L;
+ if (readIf(OPEN_PAREN)) {
+ precision = readPrecision(Value.DECFLOAT);
+ if (precision < 1 || precision > Constants.MAX_NUMERIC_PRECISION) {
+ throw getInvalidNumericPrecisionException(precision);
+ }
+ read(CLOSE_PAREN);
}
- return IDENTIFIER;
+ return TypeInfo.getTypeInfo(Value.DECFLOAT, precision, -1, null);
}
- private Column parseColumnForTable(String columnName,
- boolean defaultNullable) {
- Column column;
- boolean isIdentity = false;
- if (readIf("IDENTITY") || readIf("BIGSERIAL")) {
- column = new Column(columnName, Value.LONG);
- column.setOriginalSQL("IDENTITY");
- parseAutoIncrement(column);
- // PostgreSQL compatibility
- if (!database.getMode().serialColumnIsNotPK) {
- column.setPrimaryKey(true);
- }
- } else if (readIf("SERIAL")) {
- column = new Column(columnName, Value.INT);
- column.setOriginalSQL("SERIAL");
- parseAutoIncrement(column);
- // PostgreSQL compatibility
- if (!database.getMode().serialColumnIsNotPK) {
- column.setPrimaryKey(true);
+ private static DbException getInvalidNumericPrecisionException(long precision) {
+ return DbException.get(ErrorCode.INVALID_VALUE_PRECISION, Long.toString(precision), "1",
+ "" + Constants.MAX_NUMERIC_PRECISION);
+ }
+
+ private TypeInfo parseTimeType() {
+ int scale = -1;
+ if (readIf(OPEN_PAREN)) {
+ scale = readNonNegativeInt();
+ if (scale > ValueTime.MAXIMUM_SCALE) {
+ throw DbException.get(ErrorCode.INVALID_VALUE_SCALE, Integer.toString(scale), "0",
+ /* Folds to a constant */ "" + ValueTime.MAXIMUM_SCALE);
}
- } else {
- column = parseColumnWithType(columnName);
+ read(CLOSE_PAREN);
}
- if (readIf("NOT")) {
- read("NULL");
- column.setNullable(false);
- } else if (readIf("NULL")) {
- column.setNullable(true);
- } else {
- // domains may be defined as not nullable
- column.setNullable(defaultNullable & column.isNullable());
- }
- if (readIf("AS")) {
- if (isIdentity) {
- getSyntaxError();
- }
- Expression expr = readExpression();
- column.setComputedExpression(expr);
- } else if (readIf("DEFAULT")) {
- Expression defaultExpression = readExpression();
- column.setDefaultExpression(session, defaultExpression);
- } else if (readIf("GENERATED")) {
- if (!readIf("ALWAYS")) {
- read("BY");
- read("DEFAULT");
- }
- read("AS");
- read("IDENTITY");
- long start = 1, increment = 1;
- if (readIf("(")) {
- read("START");
- readIf("WITH");
- start = readLong();
- readIf(",");
- if (readIf("INCREMENT")) {
- readIf("BY");
- increment = readLong();
- }
- read(")");
- }
- column.setPrimaryKey(true);
- column.setAutoIncrement(true, start, increment);
+ int type = Value.TIME;
+ if (readIf(WITH)) {
+ read("TIME");
+ read("ZONE");
+ type = Value.TIME_TZ;
+ } else if (readIf("WITHOUT")) {
+ read("TIME");
+ read("ZONE");
}
- if (readIf("NOT")) {
- read("NULL");
- column.setNullable(false);
+ return TypeInfo.getTypeInfo(type, -1L, scale, null);
+ }
+
+ private TypeInfo parseTimestampType() {
+ int scale = -1;
+ if (readIf(OPEN_PAREN)) {
+ scale = readNonNegativeInt();
+ // Allow non-standard TIMESTAMP(..., ...) syntax
+ if (readIf(COMMA)) {
+ scale = readNonNegativeInt();
+ }
+ if (scale > ValueTimestamp.MAXIMUM_SCALE) {
+ throw DbException.get(ErrorCode.INVALID_VALUE_SCALE, Integer.toString(scale), "0",
+ /* Folds to a constant */ "" + ValueTimestamp.MAXIMUM_SCALE);
+ }
+ read(CLOSE_PAREN);
+ }
+ int type = Value.TIMESTAMP;
+ if (readIf(WITH)) {
+ read("TIME");
+ read("ZONE");
+ type = Value.TIMESTAMP_TZ;
+ } else if (readIf("WITHOUT")) {
+ read("TIME");
+ read("ZONE");
+ }
+ return TypeInfo.getTypeInfo(type, -1L, scale, null);
+ }
+
+ private TypeInfo parseDateTimeType(boolean smallDateTime) {
+ int scale;
+ if (smallDateTime) {
+ scale = 0;
} else {
- readIf("NULL");
+ scale = -1;
+ if (readIf(OPEN_PAREN)) {
+ scale = readNonNegativeInt();
+ if (scale > ValueTimestamp.MAXIMUM_SCALE) {
+ throw DbException.get(ErrorCode.INVALID_VALUE_SCALE, Integer.toString(scale), "0",
+ /* folds to a constant */ "" + ValueTimestamp.MAXIMUM_SCALE);
+ }
+ read(CLOSE_PAREN);
+ }
}
- if (readIf("AUTO_INCREMENT") || readIf("BIGSERIAL") || readIf("SERIAL")) {
- parseAutoIncrement(column);
- if (readIf("NOT")) {
- read("NULL");
+ return TypeInfo.getTypeInfo(Value.TIMESTAMP, -1L, scale, null);
+ }
+
+ private TypeInfo readIntervalQualifier() {
+ IntervalQualifier qualifier;
+ int precision = -1, scale = -1;
+ switch (currentTokenType) {
+ case YEAR:
+ read();
+ if (readIf(OPEN_PAREN)) {
+ precision = readNonNegativeInt();
+ read(CLOSE_PAREN);
}
- } else if (readIf("IDENTITY")) {
- parseAutoIncrement(column);
- column.setPrimaryKey(true);
- if (readIf("NOT")) {
- read("NULL");
+ if (readIf(TO)) {
+ read(MONTH);
+ qualifier = IntervalQualifier.YEAR_TO_MONTH;
+ } else {
+ qualifier = IntervalQualifier.YEAR;
+ }
+ break;
+ case MONTH:
+ read();
+ if (readIf(OPEN_PAREN)) {
+ precision = readNonNegativeInt();
+ read(CLOSE_PAREN);
}
+ qualifier = IntervalQualifier.MONTH;
+ break;
+ case DAY:
+ read();
+ if (readIf(OPEN_PAREN)) {
+ precision = readNonNegativeInt();
+ read(CLOSE_PAREN);
+ }
+ if (readIf(TO)) {
+ switch (currentTokenType) {
+ case HOUR:
+ read();
+ qualifier = IntervalQualifier.DAY_TO_HOUR;
+ break;
+ case MINUTE:
+ read();
+ qualifier = IntervalQualifier.DAY_TO_MINUTE;
+ break;
+ case SECOND:
+ read();
+ if (readIf(OPEN_PAREN)) {
+ scale = readNonNegativeInt();
+ read(CLOSE_PAREN);
+ }
+ qualifier = IntervalQualifier.DAY_TO_SECOND;
+ break;
+ default:
+ throw intervalDayError();
+ }
+ } else {
+ qualifier = IntervalQualifier.DAY;
+ }
+ break;
+ case HOUR:
+ read();
+ if (readIf(OPEN_PAREN)) {
+ precision = readNonNegativeInt();
+ read(CLOSE_PAREN);
+ }
+ if (readIf(TO)) {
+ switch (currentTokenType) {
+ case MINUTE:
+ read();
+ qualifier = IntervalQualifier.HOUR_TO_MINUTE;
+ break;
+ case SECOND:
+ read();
+ if (readIf(OPEN_PAREN)) {
+ scale = readNonNegativeInt();
+ read(CLOSE_PAREN);
+ }
+ qualifier = IntervalQualifier.HOUR_TO_SECOND;
+ break;
+ default:
+ throw intervalHourError();
+ }
+ } else {
+ qualifier = IntervalQualifier.HOUR;
+ }
+ break;
+ case MINUTE:
+ read();
+ if (readIf(OPEN_PAREN)) {
+ precision = readNonNegativeInt();
+ read(CLOSE_PAREN);
+ }
+ if (readIf(TO)) {
+ read(SECOND);
+ if (readIf(OPEN_PAREN)) {
+ scale = readNonNegativeInt();
+ read(CLOSE_PAREN);
+ }
+ qualifier = IntervalQualifier.MINUTE_TO_SECOND;
+ } else {
+ qualifier = IntervalQualifier.MINUTE;
+ }
+ break;
+ case SECOND:
+ read();
+ if (readIf(OPEN_PAREN)) {
+ precision = readNonNegativeInt();
+ if (readIf(COMMA)) {
+ scale = readNonNegativeInt();
+ }
+ read(CLOSE_PAREN);
+ }
+ qualifier = IntervalQualifier.SECOND;
+ break;
+ default:
+ return null;
}
- if (readIf("NULL_TO_DEFAULT")) {
- column.setConvertNullToDefault(true);
+ if (precision >= 0) {
+ if (precision == 0 || precision > ValueInterval.MAXIMUM_PRECISION) {
+ throw DbException.get(ErrorCode.INVALID_VALUE_PRECISION, Integer.toString(precision), "1",
+ /* Folds to a constant */ "" + ValueInterval.MAXIMUM_PRECISION);
+ }
}
- if (readIf("SEQUENCE")) {
- Sequence sequence = readSequence();
- column.setSequence(sequence);
+ if (scale >= 0) {
+ if (scale > ValueInterval.MAXIMUM_SCALE) {
+ throw DbException.get(ErrorCode.INVALID_VALUE_SCALE, Integer.toString(scale), "0",
+ /* Folds to a constant */ "" + ValueInterval.MAXIMUM_SCALE);
+ }
}
- if (readIf("SELECTIVITY")) {
- int value = readPositiveInt();
- column.setSelectivity(value);
+ return TypeInfo.getTypeInfo(qualifier.ordinal() + Value.INTERVAL_YEAR, precision, scale, null);
+ }
+
+ private DbException intervalQualifierError() {
+ if (expectedList != null) {
+ addMultipleExpected(YEAR, MONTH, DAY, HOUR, MINUTE, SECOND);
}
- String comment = readCommentIf();
- if (comment != null) {
- column.setComment(comment);
+ return getSyntaxError();
+ }
+
+ private DbException intervalDayError() {
+ if (expectedList != null) {
+ addMultipleExpected(HOUR, MINUTE, SECOND);
}
- return column;
+ return getSyntaxError();
}
- private void parseAutoIncrement(Column column) {
- long start = 1, increment = 1;
- if (readIf("(")) {
- start = readLong();
- if (readIf(",")) {
- increment = readLong();
- }
- read(")");
+ private DbException intervalHourError() {
+ if (expectedList != null) {
+ addMultipleExpected(MINUTE, SECOND);
}
- column.setAutoIncrement(true, start, increment);
+ return getSyntaxError();
}
- private String readCommentIf() {
- if (readIf("COMMENT")) {
- readIf("IS");
- return readString();
+ private TypeInfo parseArrayType(TypeInfo componentType) {
+ int precision = -1;
+ if (readIf(OPEN_BRACKET)) {
+ // Maximum cardinality may be zero
+ precision = readNonNegativeInt();
+ if (precision > Constants.MAX_ARRAY_CARDINALITY) {
+ throw DbException.get(ErrorCode.INVALID_VALUE_PRECISION, Integer.toString(precision), "0",
+ /* Folds to a constant */ "" + Constants.MAX_ARRAY_CARDINALITY);
+ }
+ read(CLOSE_BRACKET);
}
- return null;
+ return TypeInfo.getTypeInfo(Value.ARRAY, precision, -1, componentType);
}
- private Column parseColumnWithType(String columnName) {
- String original = currentToken;
- boolean regular = false;
- if (readIf("LONG")) {
- if (readIf("RAW")) {
- original += " RAW";
+ private TypeInfo parseEnumType() {
+ read(OPEN_PAREN);
+ ArrayList enumeratorList = new ArrayList<>();
+ do {
+ enumeratorList.add(readString());
+ } while (readIfMore());
+ return TypeInfo.getTypeInfo(Value.ENUM, -1L, -1, new ExtTypeInfoEnum(enumeratorList.toArray(new String[0])));
+ }
+
+ private TypeInfo parseGeometryType() {
+ ExtTypeInfoGeometry extTypeInfo;
+ if (readIf(OPEN_PAREN)) {
+ int type = 0;
+ if (currentTokenType != IDENTIFIER || token.isQuoted()) {
+ throw getSyntaxError();
}
- } else if (readIf("DOUBLE")) {
- if (readIf("PRECISION")) {
- original += " PRECISION";
+ if (!readIf("GEOMETRY")) {
+ try {
+ type = EWKTUtils.parseGeometryType(currentToken);
+ read();
+ if (type / 1_000 == 0 && currentTokenType == IDENTIFIER && !token.isQuoted()) {
+ type += EWKTUtils.parseDimensionSystem(currentToken) * 1_000;
+ read();
+ }
+ } catch (IllegalArgumentException ex) {
+ throw getSyntaxError();
+ }
}
- } else if (readIf("CHARACTER")) {
- if (readIf("VARYING")) {
- original += " VARYING";
+ Integer srid = null;
+ if (readIf(COMMA)) {
+ srid = readInt();
}
+ read(CLOSE_PAREN);
+ extTypeInfo = new ExtTypeInfoGeometry(type, srid);
} else {
- regular = true;
+ extTypeInfo = null;
}
- long precision = -1;
- int displaySize = -1;
- int scale = -1;
- String comment = null;
- Column templateColumn = null;
- DataType dataType;
- if (!identifiersToUpper) {
- original = StringUtils.toUpperEnglish(original);
- }
- UserDataType userDataType = database.findUserDataType(original);
- if (userDataType != null) {
- templateColumn = userDataType.getColumn();
- dataType = DataType.getDataType(templateColumn.getType());
- comment = templateColumn.getComment();
- original = templateColumn.getOriginalSQL();
- precision = templateColumn.getPrecision();
- displaySize = templateColumn.getDisplaySize();
- scale = templateColumn.getScale();
- } else {
- dataType = DataType.getTypeByName(original);
- if (dataType == null) {
- throw DbException.get(ErrorCode.UNKNOWN_DATA_TYPE_1,
- currentToken);
+ return TypeInfo.getTypeInfo(Value.GEOMETRY, -1L, -1, extTypeInfo);
+ }
+
+ private TypeInfo parseRowType() {
+ read(OPEN_PAREN);
+ LinkedHashMap fields = new LinkedHashMap<>();
+ do {
+ String name = readIdentifier();
+ if (fields.putIfAbsent(name, parseDataType()) != null) {
+ throw DbException.get(ErrorCode.DUPLICATE_COLUMN_NAME_1, name);
}
- }
- if (database.getIgnoreCase() && dataType.type == Value.STRING &&
- !equalsToken("VARCHAR_CASESENSITIVE", original)) {
- original = "VARCHAR_IGNORECASE";
- dataType = DataType.getTypeByName(original);
- }
- if (regular) {
+ } while (readIfMore());
+ return TypeInfo.getTypeInfo(Value.ROW, -1L, -1, new ExtTypeInfoRow(fields));
+ }
+
+ private long readPrecision(int valueType) {
+ long p = readPositiveLong();
+ if (currentTokenType != IDENTIFIER || token.isQuoted()) {
+ return p;
+ }
+ if ((valueType == Value.BLOB || valueType == Value.CLOB) && currentToken.length() == 1) {
+ long mul;
+ /*
+ * Convert a-z to A-Z. This method is safe, because only A-Z
+ * characters are considered below.
+ */
+ switch (currentToken.charAt(0) & 0xffdf) {
+ case 'K':
+ mul = 1L << 10;
+ break;
+ case 'M':
+ mul = 1L << 20;
+ break;
+ case 'G':
+ mul = 1L << 30;
+ break;
+ case 'T':
+ mul = 1L << 40;
+ break;
+ case 'P':
+ mul = 1L << 50;
+ break;
+ default:
+ throw getSyntaxError();
+ }
+ if (p > Long.MAX_VALUE / mul) {
+ throw DbException.getInvalidValueException("precision", p + currentToken);
+ }
+ p *= mul;
read();
+ if (currentTokenType != IDENTIFIER || token.isQuoted()) {
+ return p;
+ }
}
- precision = precision == -1 ? dataType.defaultPrecision : precision;
- displaySize = displaySize == -1 ? dataType.defaultDisplaySize
- : displaySize;
- scale = scale == -1 ? dataType.defaultScale : scale;
- if (dataType.supportsPrecision || dataType.supportsScale) {
- if (readIf("(")) {
- if (!readIf("MAX")) {
- long p = readLong();
- if (readIf("K")) {
- p *= 1024;
- } else if (readIf("M")) {
- p *= 1024 * 1024;
- } else if (readIf("G")) {
- p *= 1024 * 1024 * 1024;
- }
- if (p > Long.MAX_VALUE) {
- p = Long.MAX_VALUE;
- }
- original += "(" + p;
- // Oracle syntax
- readIf("CHAR");
- if (dataType.supportsScale) {
- if (readIf(",")) {
- scale = readInt();
- original += ", " + scale;
- } else {
- // special case: TIMESTAMP(5) actually means
- // TIMESTAMP(23, 5)
- if (dataType.type == Value.TIMESTAMP) {
- scale = MathUtils.convertLongToInt(p);
- p = precision;
- } else {
- scale = 0;
- }
- }
- }
- precision = p;
- displaySize = MathUtils.convertLongToInt(precision);
- original += ")";
- }
- read(")");
- }
- } else if (readIf("(")) {
- // Support for MySQL: INT(11), MEDIUMINT(8) and so on.
- // Just ignore the precision.
- readPositiveInt();
- read(")");
- }
- if (readIf("FOR")) {
- read("BIT");
- read("DATA");
- if (dataType.type == Value.STRING) {
- dataType = DataType.getTypeByName("BINARY");
- }
- }
- // MySQL compatibility
- readIf("UNSIGNED");
- int type = dataType.type;
- if (scale > precision) {
- throw DbException.get(ErrorCode.INVALID_VALUE_2,
- Integer.toString(scale), "scale (precision = " + precision +
- ")");
- }
- Column column = new Column(columnName, type, precision, scale,
- displaySize);
- if (templateColumn != null) {
- column.setNullable(templateColumn.isNullable());
- column.setDefaultExpression(session,
- templateColumn.getDefaultExpression());
- int selectivity = templateColumn.getSelectivity();
- if (selectivity != Constants.SELECTIVITY_DEFAULT) {
- column.setSelectivity(selectivity);
- }
- Expression checkConstraint = templateColumn.getCheckConstraint(
- session, columnName);
- column.addCheckConstraint(session, checkConstraint);
- }
- column.setComment(comment);
- column.setOriginalSQL(original);
- return column;
+ switch (valueType) {
+ case Value.VARCHAR:
+ case Value.VARCHAR_IGNORECASE:
+ case Value.CLOB:
+ case Value.CHAR:
+ if (!readIf("CHARACTERS") && !readIf("OCTETS")) {
+ if (database.getMode().charAndByteLengthUnits && !readIf("CHAR")) {
+ readIf("BYTE");
+ }
+ }
+ }
+ return p;
}
private Prepared parseCreate() {
boolean orReplace = false;
- if (readIf("OR")) {
+ if (readIf(OR)) {
read("REPLACE");
orReplace = true;
}
@@ -4104,7 +6728,7 @@ private Prepared parseCreate() {
return parseCreateFunctionAlias(force);
} else if (readIf("SEQUENCE")) {
return parseCreateSequence();
- } else if (readIf("USER")) {
+ } else if (readIf(USER)) {
return parseCreateUser();
} else if (readIf("TRIGGER")) {
return parseCreateTrigger(force);
@@ -4114,12 +6738,8 @@ private Prepared parseCreate() {
return parseCreateSchema();
} else if (readIf("CONSTANT")) {
return parseCreateConstant();
- } else if (readIf("DOMAIN")) {
- return parseCreateUserDataType();
- } else if (readIf("TYPE")) {
- return parseCreateUserDataType();
- } else if (readIf("DATATYPE")) {
- return parseCreateUserDataType();
+ } else if (readIf("DOMAIN") || readIf("TYPE") || readIf("DATATYPE")) {
+ return parseCreateDomain();
} else if (readIf("AGGREGATE")) {
return parseCreateAggregate(force);
} else if (readIf("LINKED")) {
@@ -4137,77 +6757,114 @@ private Prepared parseCreate() {
if (readIf("LINKED")) {
return parseCreateLinkedTable(true, false, force);
}
- read("TABLE");
+ read(TABLE);
return parseCreateTable(true, false, cached);
} else if (readIf("GLOBAL")) {
read("TEMPORARY");
if (readIf("LINKED")) {
return parseCreateLinkedTable(true, true, force);
}
- read("TABLE");
+ read(TABLE);
return parseCreateTable(true, true, cached);
} else if (readIf("TEMP") || readIf("TEMPORARY")) {
if (readIf("LINKED")) {
return parseCreateLinkedTable(true, true, force);
}
- read("TABLE");
+ read(TABLE);
return parseCreateTable(true, true, cached);
- } else if (readIf("TABLE")) {
+ } else if (readIf(TABLE)) {
if (!cached && !memory) {
cached = database.getDefaultTableType() == Table.TYPE_CACHED;
}
return parseCreateTable(false, false, cached);
+ } else if (readIf("SYNONYM")) {
+ return parseCreateSynonym(orReplace);
} else {
boolean hash = false, primaryKey = false;
boolean unique = false, spatial = false;
String indexName = null;
Schema oldSchema = null;
boolean ifNotExists = false;
- if (readIf("PRIMARY")) {
- read("KEY");
+ if (session.isQuirksMode() && readIf(PRIMARY)) {
+ read(KEY);
if (readIf("HASH")) {
hash = true;
}
primaryKey = true;
- if (!isToken("ON")) {
- ifNotExists = readIfNoExists();
+ if (!isToken(ON)) {
+ ifNotExists = readIfNotExists();
indexName = readIdentifierWithSchema(null);
oldSchema = getSchema();
}
} else {
- if (readIf("UNIQUE")) {
+ if (readIf(UNIQUE)) {
unique = true;
}
if (readIf("HASH")) {
hash = true;
- }
- if (readIf("SPATIAL")) {
+ } else if (!unique && readIf("SPATIAL")) {
spatial = true;
}
- if (readIf("INDEX")) {
- if (!isToken("ON")) {
- ifNotExists = readIfNoExists();
- indexName = readIdentifierWithSchema(null);
- oldSchema = getSchema();
- }
- } else {
- throw getSyntaxError();
+ read("INDEX");
+ if (!isToken(ON)) {
+ ifNotExists = readIfNotExists();
+ indexName = readIdentifierWithSchema(null);
+ oldSchema = getSchema();
}
}
- read("ON");
+ read(ON);
String tableName = readIdentifierWithSchema();
checkSchema(oldSchema);
+ String comment = readCommentIf();
+ if (!readIf(OPEN_PAREN)) {
+ // PostgreSQL compatibility
+ if (hash || spatial) {
+ throw getSyntaxError();
+ }
+ read(USING);
+ if (readIf("BTREE")) {
+ // default
+ } else if (readIf("HASH")) {
+ hash = true;
+ } else {
+ read("RTREE");
+ spatial = true;
+ }
+ read(OPEN_PAREN);
+ }
CreateIndex command = new CreateIndex(session, getSchema());
command.setIfNotExists(ifNotExists);
- command.setHash(hash);
- command.setSpatial(spatial);
command.setPrimaryKey(primaryKey);
command.setTableName(tableName);
- command.setUnique(unique);
+ command.setHash(hash);
+ command.setSpatial(spatial);
command.setIndexName(indexName);
- command.setComment(readCommentIf());
- read("(");
- command.setIndexColumns(parseIndexColumnList());
+ command.setComment(comment);
+ IndexColumn[] columns;
+ int uniqueColumnCount = 0;
+ if (spatial) {
+ columns = new IndexColumn[] { new IndexColumn(readIdentifier()) };
+ if (unique) {
+ uniqueColumnCount = 1;
+ }
+ read(CLOSE_PAREN);
+ } else {
+ columns = parseIndexColumnList();
+ if (unique) {
+ uniqueColumnCount = columns.length;
+ if (readIf("INCLUDE")) {
+ read(OPEN_PAREN);
+ IndexColumn[] columnsToInclude = parseIndexColumnList();
+ int nonUniqueCount = columnsToInclude.length;
+ columns = Arrays.copyOf(columns, uniqueColumnCount + nonUniqueCount);
+ System.arraycopy(columnsToInclude, 0, columns, uniqueColumnCount, nonUniqueCount);
+ }
+ } else if (primaryKey) {
+ uniqueColumnCount = columns.length;
+ }
+ }
+ command.setIndexColumns(columns);
+ command.setUniqueColumnCount(uniqueColumnCount);
return command;
}
}
@@ -4216,7 +6873,7 @@ private Prepared parseCreate() {
* @return true if we expect to see a TABLE clause
*/
private boolean addRoleOrRight(GrantRevoke command) {
- if (readIf("SELECT")) {
+ if (readIf(SELECT)) {
command.addRight(Right.SELECT);
return true;
} else if (readIf("DELETE")) {
@@ -4228,15 +6885,6 @@ private boolean addRoleOrRight(GrantRevoke command) {
} else if (readIf("UPDATE")) {
command.addRight(Right.UPDATE);
return true;
- } else if (readIf("ALL")) {
- command.addRight(Right.ALL);
- return true;
- } else if (readIf("ALTER")) {
- read("ANY");
- read("SCHEMA");
- command.addRight(Right.ALTER_ANY_SCHEMA);
- command.addTable(null);
- return false;
} else if (readIf("CONNECT")) {
// ignore this right
return true;
@@ -4244,7 +6892,7 @@ private boolean addRoleOrRight(GrantRevoke command) {
// ignore this right
return true;
} else {
- command.addRoleName(readUniqueIdentifier());
+ command.addRoleName(readIdentifier());
return false;
}
}
@@ -4252,217 +6900,181 @@ private boolean addRoleOrRight(GrantRevoke command) {
private GrantRevoke parseGrantRevoke(int operationType) {
GrantRevoke command = new GrantRevoke(session);
command.setOperationType(operationType);
- boolean tableClauseExpected = addRoleOrRight(command);
- while (readIf(",")) {
- addRoleOrRight(command);
- if (command.isRightMode() && command.isRoleMode()) {
- throw DbException
- .get(ErrorCode.ROLES_AND_RIGHT_CANNOT_BE_MIXED);
+ boolean tableClauseExpected;
+ if (readIf(ALL)) {
+ readIf("PRIVILEGES");
+ command.addRight(Right.ALL);
+ tableClauseExpected = true;
+ } else if (readIf("ALTER")) {
+ read(ANY);
+ read("SCHEMA");
+ command.addRight(Right.ALTER_ANY_SCHEMA);
+ command.addTable(null);
+ tableClauseExpected = false;
+ } else {
+ tableClauseExpected = addRoleOrRight(command);
+ while (readIf(COMMA)) {
+ if (addRoleOrRight(command) != tableClauseExpected) {
+ throw DbException.get(ErrorCode.ROLES_AND_RIGHT_CANNOT_BE_MIXED);
+ }
}
}
if (tableClauseExpected) {
- if (readIf("ON")) {
- do {
- Table table = readTableOrView();
- command.addTable(table);
- } while (readIf(","));
+ if (readIf(ON)) {
+ if (readIf("SCHEMA")) {
+ command.setSchema(database.getSchema(readIdentifier()));
+ } else {
+ readIf(TABLE);
+ do {
+ Table table = readTableOrView();
+ command.addTable(table);
+ } while (readIf(COMMA));
+ }
}
}
- if (operationType == CommandInterface.GRANT) {
- read("TO");
- } else {
- read("FROM");
- }
- command.setGranteeName(readUniqueIdentifier());
- return command;
- }
-
- private Select parseValues() {
- Select command = new Select(session);
- currentSelect = command;
- TableFilter filter = parseValuesTable();
- ArrayList list = New.arrayList();
- list.add(new Wildcard(null, null));
- command.setExpressions(list);
- command.addTableFilter(filter, true);
- command.init();
+ read(operationType == CommandInterface.GRANT ? TO : FROM);
+ command.setGranteeName(readIdentifier());
return command;
}
- private TableFilter parseValuesTable() {
- Schema mainSchema = database.getSchema(Constants.SCHEMA_MAIN);
- TableFunction tf = (TableFunction) Function.getFunction(database,
- "TABLE");
- ArrayList columns = New.arrayList();
- ArrayList> rows = New.arrayList();
- do {
- int i = 0;
- ArrayList row = New.arrayList();
- boolean multiColumn = readIf("(");
- do {
- Expression expr = readExpression();
- expr = expr.optimize(session);
- int type = expr.getType();
- long prec;
- int scale, displaySize;
- Column column;
- String columnName = "C" + (i + 1);
- if (rows.size() == 0) {
- if (type == Value.UNKNOWN) {
- type = Value.STRING;
- }
- DataType dt = DataType.getDataType(type);
- prec = dt.defaultPrecision;
- scale = dt.defaultScale;
- displaySize = dt.defaultDisplaySize;
- column = new Column(columnName, type, prec, scale,
- displaySize);
- columns.add(column);
- }
- prec = expr.getPrecision();
- scale = expr.getScale();
- displaySize = expr.getDisplaySize();
- if (i >= columns.size()) {
- throw DbException
- .get(ErrorCode.COLUMN_COUNT_DOES_NOT_MATCH);
- }
- Column c = columns.get(i);
- type = Value.getHigherOrder(c.getType(), type);
- prec = Math.max(c.getPrecision(), prec);
- scale = Math.max(c.getScale(), scale);
- displaySize = Math.max(c.getDisplaySize(), displaySize);
- column = new Column(columnName, type, prec, scale, displaySize);
- columns.set(i, column);
- row.add(expr);
- i++;
- } while (multiColumn && readIf(","));
- if (multiColumn) {
- read(")");
- }
- rows.add(row);
- } while (readIf(","));
- int columnCount = columns.size();
- int rowCount = rows.size();
- for (int i = 0; i < rowCount; i++) {
- if (rows.get(i).size() != columnCount) {
+ private TableValueConstructor parseValues() {
+ ArrayList> rows = Utils.newSmallArrayList();
+ ArrayList row = parseValuesRow(Utils.newSmallArrayList());
+ rows.add(row);
+ int columnCount = row.size();
+ while (readIf(COMMA)) {
+ row = parseValuesRow(new ArrayList<>(columnCount));
+ if (row.size() != columnCount) {
throw DbException.get(ErrorCode.COLUMN_COUNT_DOES_NOT_MATCH);
}
+ rows.add(row);
}
- for (int i = 0; i < columnCount; i++) {
- Column c = columns.get(i);
- if (c.getType() == Value.UNKNOWN) {
- c = new Column(c.getName(), Value.STRING, 0, 0, 0);
- columns.set(i, c);
- }
- Expression[] array = new Expression[rowCount];
- for (int j = 0; j < rowCount; j++) {
- array[j] = rows.get(j).get(i);
- }
- ExpressionList list = new ExpressionList(array);
- tf.setParameter(i, list);
+ return new TableValueConstructor(session, rows);
+ }
+
+ private ArrayList parseValuesRow(ArrayList row) {
+ if (readIf(ROW)) {
+ read(OPEN_PAREN);
+ } else if (!readIf(OPEN_PAREN)) {
+ row.add(readExpression());
+ return row;
}
- tf.setColumns(columns);
- tf.doneWithParameters();
- Table table = new FunctionTable(mainSchema, session, tf, tf);
- TableFilter filter = new TableFilter(session, table, null,
- rightsChecked, currentSelect);
- return filter;
+ do {
+ row.add(readExpression());
+ } while (readIfMore());
+ return row;
}
private Call parseCall() {
Call command = new Call(session);
currentPrepared = command;
- command.setExpression(readExpression());
+ int index = tokenIndex;
+ boolean canBeFunction;
+ switch (currentTokenType) {
+ case IDENTIFIER:
+ canBeFunction = true;
+ break;
+ case TABLE:
+ read();
+ read(OPEN_PAREN);
+ command.setTableFunction(readTableFunction(ArrayTableFunction.TABLE));
+ return command;
+ default:
+ canBeFunction = false;
+ }
+ try {
+ command.setExpression(readExpression());
+ } catch (DbException e) {
+ if (canBeFunction && e.getErrorCode() == ErrorCode.FUNCTION_NOT_FOUND_1) {
+ setTokenIndex(index);
+ String schemaName = null, name = readIdentifier();
+ if (readIf(DOT)) {
+ schemaName = name;
+ name = readIdentifier();
+ if (readIf(DOT)) {
+ checkDatabaseName(schemaName);
+ schemaName = name;
+ name = readIdentifier();
+ }
+ }
+ read(OPEN_PAREN);
+ Schema schema = schemaName != null ? database.getSchema(schemaName) : null;
+ command.setTableFunction(readTableFunction(name, schema));
+ return command;
+ }
+ throw e;
+ }
return command;
}
private CreateRole parseCreateRole() {
CreateRole command = new CreateRole(session);
- command.setIfNotExists(readIfNoExists());
- command.setRoleName(readUniqueIdentifier());
+ command.setIfNotExists(readIfNotExists());
+ command.setRoleName(readIdentifier());
return command;
}
private CreateSchema parseCreateSchema() {
CreateSchema command = new CreateSchema(session);
- command.setIfNotExists(readIfNoExists());
- command.setSchemaName(readUniqueIdentifier());
- if (readIf("AUTHORIZATION")) {
- command.setAuthorization(readUniqueIdentifier());
+ command.setIfNotExists(readIfNotExists());
+ String authorization;
+ if (readIf(AUTHORIZATION)) {
+ authorization = readIdentifier();
+ command.setSchemaName(authorization);
+ command.setAuthorization(authorization);
} else {
- command.setAuthorization(session.getUser().getName());
+ command.setSchemaName(readIdentifier());
+ if (readIf(AUTHORIZATION)) {
+ authorization = readIdentifier();
+ } else {
+ authorization = session.getUser().getName();
+ }
+ }
+ command.setAuthorization(authorization);
+ if (readIf(WITH)) {
+ command.setTableEngineParams(readTableEngineParams());
}
return command;
}
+ private ArrayList readTableEngineParams() {
+ ArrayList tableEngineParams = Utils.newSmallArrayList();
+ do {
+ tableEngineParams.add(readIdentifier());
+ } while (readIf(COMMA));
+ return tableEngineParams;
+ }
+
private CreateSequence parseCreateSequence() {
- boolean ifNotExists = readIfNoExists();
+ boolean ifNotExists = readIfNotExists();
String sequenceName = readIdentifierWithSchema();
CreateSequence command = new CreateSequence(session, getSchema());
command.setIfNotExists(ifNotExists);
command.setSequenceName(sequenceName);
- while (true) {
- if (readIf("START")) {
- readIf("WITH");
- command.setStartWith(readExpression());
- } else if (readIf("INCREMENT")) {
- readIf("BY");
- command.setIncrement(readExpression());
- } else if (readIf("MINVALUE")) {
- command.setMinValue(readExpression());
- } else if (readIf("NOMINVALUE")) {
- command.setMinValue(null);
- } else if (readIf("MAXVALUE")) {
- command.setMaxValue(readExpression());
- } else if (readIf("NOMAXVALUE")) {
- command.setMaxValue(null);
- } else if (readIf("CYCLE")) {
- command.setCycle(true);
- } else if (readIf("NOCYCLE")) {
- command.setCycle(false);
- } else if (readIf("NO")) {
- if (readIf("MINVALUE")) {
- command.setMinValue(null);
- } else if (readIf("MAXVALUE")) {
- command.setMaxValue(null);
- } else if (readIf("CYCLE")) {
- command.setCycle(false);
- } else if (readIf("CACHE")) {
- command.setCacheSize(ValueExpression.get(ValueLong.get(1)));
- } else {
- break;
- }
- } else if (readIf("CACHE")) {
- command.setCacheSize(readExpression());
- } else if (readIf("NOCACHE")) {
- command.setCacheSize(ValueExpression.get(ValueLong.get(1)));
- } else if (readIf("BELONGS_TO_TABLE")) {
- command.setBelongsToTable(true);
- } else {
- break;
- }
- }
+ SequenceOptions options = new SequenceOptions();
+ parseSequenceOptions(options, command, true, false);
+ command.setOptions(options);
return command;
}
- private boolean readIfNoExists() {
- if (readIf("IF")) {
- read("NOT");
- read("EXISTS");
+ private boolean readIfNotExists() {
+ if (readIf(IF)) {
+ read(NOT);
+ read(EXISTS);
return true;
}
return false;
}
private CreateConstant parseCreateConstant() {
- boolean ifNotExists = readIfNoExists();
+ boolean ifNotExists = readIfNotExists();
String constantName = readIdentifierWithSchema();
Schema schema = getSchema();
if (isKeyword(constantName)) {
throw DbException.get(ErrorCode.CONSTANT_ALREADY_EXISTS_1,
constantName);
}
- read("VALUE");
+ read(VALUE);
Expression expr = readExpression();
CreateConstant command = new CreateConstant(session, schema);
command.setConstantName(constantName);
@@ -4472,41 +7084,77 @@ private CreateConstant parseCreateConstant() {
}
private CreateAggregate parseCreateAggregate(boolean force) {
- boolean ifNotExists = readIfNoExists();
- CreateAggregate command = new CreateAggregate(session);
- command.setForce(force);
- String name = readIdentifierWithSchema();
- if (isKeyword(name) || Function.getFunction(database, name) != null ||
- getAggregateType(name) >= 0) {
- throw DbException.get(ErrorCode.FUNCTION_ALIAS_ALREADY_EXISTS_1,
- name);
+ boolean ifNotExists = readIfNotExists();
+ String name = readIdentifierWithSchema(), upperName;
+ if (isKeyword(name) || BuiltinFunctions.isBuiltinFunction(database, upperName = upperName(name))
+ || Aggregate.getAggregateType(upperName) != null) {
+ throw DbException.get(ErrorCode.FUNCTION_ALIAS_ALREADY_EXISTS_1, name);
}
+ CreateAggregate command = new CreateAggregate(session, getSchema());
+ command.setForce(force);
command.setName(name);
- command.setSchema(getSchema());
command.setIfNotExists(ifNotExists);
- read("FOR");
- command.setJavaClassMethod(readUniqueIdentifier());
+ read(FOR);
+ command.setJavaClassMethod(readStringOrIdentifier());
return command;
}
- private CreateUserDataType parseCreateUserDataType() {
- boolean ifNotExists = readIfNoExists();
- CreateUserDataType command = new CreateUserDataType(session);
- command.setTypeName(readUniqueIdentifier());
- read("AS");
- Column col = parseColumnForTable("VALUE", true);
- if (readIf("CHECK")) {
- Expression expr = readExpression();
- col.addCheckConstraint(session, expr);
- }
- col.rename(null);
- command.setColumn(col);
+ private CreateDomain parseCreateDomain() {
+ boolean ifNotExists = readIfNotExists();
+ String domainName = readIdentifierWithSchema();
+ Schema schema = getSchema();
+ CreateDomain command = new CreateDomain(session, schema);
command.setIfNotExists(ifNotExists);
+ command.setTypeName(domainName);
+ readIf(AS);
+ TypeInfo dataType = readIfDataType();
+ if (dataType != null) {
+ command.setDataType(dataType);
+ } else {
+ String parentDomainName = readIdentifierWithSchema();
+ command.setParentDomain(getSchema().getDomain(parentDomainName));
+ }
+ if (readIf(DEFAULT)) {
+ command.setDefaultExpression(readExpression());
+ }
+ if (readIf(ON)) {
+ read("UPDATE");
+ command.setOnUpdateExpression(readExpression());
+ }
+ // Compatibility with 1.4.200 and older versions
+ if (readIf("SELECTIVITY")) {
+ readNonNegativeInt();
+ }
+ String comment = readCommentIf();
+ if (comment != null) {
+ command.setComment(comment);
+ }
+ for (;;) {
+ String constraintName;
+ if (readIf(CONSTRAINT)) {
+ constraintName = readIdentifier();
+ read(CHECK);
+ } else if (readIf(CHECK)) {
+ constraintName = null;
+ } else {
+ break;
+ }
+ AlterDomainAddConstraint constraint = new AlterDomainAddConstraint(session, schema, ifNotExists);
+ constraint.setConstraintName(constraintName);
+ constraint.setDomainName(domainName);
+ parseDomainConstraint = true;
+ try {
+ constraint.setCheckExpression(readExpression());
+ } finally {
+ parseDomainConstraint = false;
+ }
+ command.addConstraintCommand(constraint);
+ }
return command;
}
private CreateTrigger parseCreateTrigger(boolean force) {
- boolean ifNotExists = readIfNoExists();
+ boolean ifNotExists = readIfNotExists();
String triggerName = readIdentifierWithSchema(null);
Schema schema = getSchema();
boolean insteadOf, isBefore;
@@ -4524,6 +7172,7 @@ private CreateTrigger parseCreateTrigger(boolean force) {
}
int typeMask = 0;
boolean onRollback = false;
+ boolean allowOr = database.getMode().getEnum() == ModeEnum.PostgreSQL;
do {
if (readIf("INSERT")) {
typeMask |= Trigger.INSERT;
@@ -4531,15 +7180,15 @@ private CreateTrigger parseCreateTrigger(boolean force) {
typeMask |= Trigger.UPDATE;
} else if (readIf("DELETE")) {
typeMask |= Trigger.DELETE;
- } else if (readIf("SELECT")) {
+ } else if (readIf(SELECT)) {
typeMask |= Trigger.SELECT;
} else if (readIf("ROLLBACK")) {
onRollback = true;
} else {
throw getSyntaxError();
}
- } while (readIf(","));
- read("ON");
+ } while (readIf(COMMA) || allowOr && readIf(OR));
+ read(ON);
String tableName = readIdentifierWithSchema();
checkSchema(schema);
CreateTrigger command = new CreateTrigger(session, getSchema());
@@ -4551,30 +7200,31 @@ private CreateTrigger parseCreateTrigger(boolean force) {
command.setOnRollback(onRollback);
command.setTypeMask(typeMask);
command.setTableName(tableName);
- if (readIf("FOR")) {
+ if (readIf(FOR)) {
read("EACH");
- read("ROW");
- command.setRowBased(true);
- } else {
- command.setRowBased(false);
+ if (readIf(ROW)) {
+ command.setRowBased(true);
+ } else {
+ read("STATEMENT");
+ }
}
if (readIf("QUEUE")) {
- command.setQueueSize(readPositiveInt());
+ command.setQueueSize(readNonNegativeInt());
}
command.setNoWait(readIf("NOWAIT"));
- if (readIf("AS")) {
+ if (readIf(AS)) {
command.setTriggerSource(readString());
} else {
read("CALL");
- command.setTriggerClassName(readUniqueIdentifier());
+ command.setTriggerClassName(readStringOrIdentifier());
}
return command;
}
private CreateUser parseCreateUser() {
CreateUser command = new CreateUser(session);
- command.setIfNotExists(readIfNoExists());
- command.setUserName(readUniqueIdentifier());
+ command.setIfNotExists(readIfNotExists());
+ command.setUserName(readIdentifier());
command.setComment(readCommentIf());
if (readIf("PASSWORD")) {
command.setPassword(readExpression());
@@ -4585,8 +7235,7 @@ private CreateUser parseCreateUser() {
} else if (readIf("IDENTIFIED")) {
read("BY");
// uppercase if not quoted
- command.setPassword(ValueExpression.get(ValueString
- .get(readColumnIdentifier())));
+ command.setPassword(ValueExpression.get(ValueVarchar.get(readIdentifier())));
} else {
throw getSyntaxError();
}
@@ -4597,90 +7246,261 @@ private CreateUser parseCreateUser() {
}
private CreateFunctionAlias parseCreateFunctionAlias(boolean force) {
- boolean ifNotExists = readIfNoExists();
- String aliasName = readIdentifierWithSchema();
- if (isKeyword(aliasName) ||
- Function.getFunction(database, aliasName) != null ||
- getAggregateType(aliasName) >= 0) {
- throw DbException.get(ErrorCode.FUNCTION_ALIAS_ALREADY_EXISTS_1,
- aliasName);
- }
- CreateFunctionAlias command = new CreateFunctionAlias(session,
- getSchema());
+ boolean ifNotExists = readIfNotExists();
+ String aliasName;
+ if (currentTokenType != IDENTIFIER) {
+ aliasName = currentToken;
+ read();
+ schemaName = session.getCurrentSchemaName();
+ } else {
+ aliasName = readIdentifierWithSchema();
+ }
+ String upperName = upperName(aliasName);
+ if (isReservedFunctionName(upperName)) {
+ throw DbException.get(ErrorCode.FUNCTION_ALIAS_ALREADY_EXISTS_1, aliasName);
+ }
+ CreateFunctionAlias command = new CreateFunctionAlias(session, getSchema());
command.setForce(force);
command.setAliasName(aliasName);
command.setIfNotExists(ifNotExists);
command.setDeterministic(readIf("DETERMINISTIC"));
- command.setBufferResultSetToLocalTemp(!readIf("NOBUFFER"));
- if (readIf("AS")) {
+ // Compatibility with old versions of H2
+ readIf("NOBUFFER");
+ if (readIf(AS)) {
command.setSource(readString());
} else {
- read("FOR");
- command.setJavaClassMethod(readUniqueIdentifier());
+ read(FOR);
+ command.setJavaClassMethod(readStringOrIdentifier());
}
return command;
}
- private Query parseWith() {
+ private String readStringOrIdentifier() {
+ return currentTokenType != IDENTIFIER ? readString() : readIdentifier();
+ }
+
+ private boolean isReservedFunctionName(String name) {
+ int tokenType = ParserUtil.getTokenType(name, false, false);
+ if (tokenType != ParserUtil.IDENTIFIER) {
+ if (database.isAllowBuiltinAliasOverride()) {
+ switch (tokenType) {
+ case CURRENT_DATE:
+ case CURRENT_TIME:
+ case CURRENT_TIMESTAMP:
+ case DAY:
+ case HOUR:
+ case LOCALTIME:
+ case LOCALTIMESTAMP:
+ case MINUTE:
+ case MONTH:
+ case SECOND:
+ case YEAR:
+ return false;
+ }
+ }
+ return true;
+ }
+ return Aggregate.getAggregateType(name) != null
+ || BuiltinFunctions.isBuiltinFunction(database, name) && !database.isAllowBuiltinAliasOverride();
+ }
+
+ private Prepared parseWith() {
+ List viewsCreated = new ArrayList<>();
+ try {
+ return parseWith1(viewsCreated);
+ } catch (Throwable t) {
+ CommandContainer.clearCTE(session, viewsCreated);
+ throw t;
+ }
+ }
+
+ private Prepared parseWith1(List viewsCreated) {
readIf("RECURSIVE");
- String tempViewName = readIdentifierWithSchema();
+
+ // This WITH statement is not a temporary view - it is part of a persistent view
+ // as in CREATE VIEW abc AS WITH my_cte - this auto detects that condition.
+ final boolean isTemporary = !session.isParsingCreateView();
+
+ do {
+ viewsCreated.add(parseSingleCommonTableExpression(isTemporary));
+ } while (readIf(COMMA));
+
+ Prepared p;
+ // Reverse the order of constructed CTE views - as the destruction order
+ // (since later created view may depend on previously created views -
+ // we preserve that dependency order in the destruction sequence )
+ // used in setCteCleanups.
+ Collections.reverse(viewsCreated);
+
+ int start = tokenIndex;
+ if (isQueryQuick()) {
+ p = parseWithQuery();
+ } else if (readIf("INSERT")) {
+ p = parseInsert(start);
+ p.setPrepareAlways(true);
+ } else if (readIf("UPDATE")) {
+ p = parseUpdate(start);
+ p.setPrepareAlways(true);
+ } else if (readIf("MERGE")) {
+ p = parseMerge(start);
+ p.setPrepareAlways(true);
+ } else if (readIf("DELETE")) {
+ p = parseDelete(start);
+ p.setPrepareAlways(true);
+ } else if (readIf("CREATE")) {
+ if (!isToken(TABLE)) {
+ throw DbException.get(ErrorCode.SYNTAX_ERROR_1,
+ WITH_STATEMENT_SUPPORTS_LIMITED_SUB_STATEMENTS);
+ }
+ p = parseCreate();
+ p.setPrepareAlways(true);
+ } else {
+ throw DbException.get(ErrorCode.SYNTAX_ERROR_1,
+ WITH_STATEMENT_SUPPORTS_LIMITED_SUB_STATEMENTS);
+ }
+
+ // Clean up temporary views starting with last to first (in case of
+ // dependencies) - but only if they are not persistent.
+ if (isTemporary) {
+ if (cteCleanups == null) {
+ cteCleanups = new ArrayList<>(viewsCreated.size());
+ }
+ cteCleanups.addAll(viewsCreated);
+ }
+ return p;
+ }
+
+ private Prepared parseWithQuery() {
+ Query query = parseQueryExpressionBodyAndEndOfQuery();
+ query.setPrepareAlways(true);
+ query.setNeverLazy(true);
+ return query;
+ }
+
+ private TableView parseSingleCommonTableExpression(boolean isTemporary) {
+ String cteViewName = readIdentifierWithSchema();
Schema schema = getSchema();
- Table recursiveTable;
- read("(");
- ArrayList columns = New.arrayList();
- String[] cols = parseColumnList();
- for (String c : cols) {
- columns.add(new Column(c, Value.STRING));
- }
- Table old = session.findLocalTempTable(tempViewName);
- if (old != null) {
- if (!(old instanceof TableView)) {
+ ArrayList columns = Utils.newSmallArrayList();
+ String[] cols = null;
+
+ // column names are now optional - they can be inferred from the named
+ // query, if not supplied by user
+ if (readIf(OPEN_PAREN)) {
+ cols = parseColumnList();
+ for (String c : cols) {
+ // we don't really know the type of the column, so STRING will
+ // have to do, UNKNOWN does not work here
+ columns.add(new Column(c, TypeInfo.TYPE_VARCHAR));
+ }
+ }
+
+ Table oldViewFound;
+ if (!isTemporary) {
+ oldViewFound = getSchema().findTableOrView(session, cteViewName);
+ } else {
+ oldViewFound = session.findLocalTempTable(cteViewName);
+ }
+ // this persistent check conflicts with check 10 lines down
+ if (oldViewFound != null) {
+ if (!(oldViewFound instanceof TableView)) {
throw DbException.get(ErrorCode.TABLE_OR_VIEW_ALREADY_EXISTS_1,
- tempViewName);
+ cteViewName);
}
- TableView tv = (TableView) old;
+ TableView tv = (TableView) oldViewFound;
if (!tv.isTableExpression()) {
throw DbException.get(ErrorCode.TABLE_OR_VIEW_ALREADY_EXISTS_1,
- tempViewName);
- }
- session.removeLocalTempTable(old);
- }
- CreateTableData data = new CreateTableData();
- data.id = database.allocateObjectId();
- data.columns = columns;
- data.tableName = tempViewName;
- data.temporary = true;
- data.persistData = true;
- data.persistIndexes = false;
- data.create = true;
- data.session = session;
- recursiveTable = schema.createTable(data);
- session.addLocalTempTable(recursiveTable);
- String querySQL;
+ cteViewName);
+ }
+ if (!isTemporary) {
+ oldViewFound.lock(session, Table.EXCLUSIVE_LOCK);
+ database.removeSchemaObject(session, oldViewFound);
+
+ } else {
+ session.removeLocalTempTable(oldViewFound);
+ }
+ }
+ /*
+ * This table is created as a workaround because recursive table
+ * expressions need to reference something that look like themselves to
+ * work (its removed after creation in this method). Only create table
+ * data and table if we don't have a working CTE already.
+ */
+ Table recursiveTable = TableView.createShadowTableForRecursiveTableExpression(
+ isTemporary, session, cteViewName, schema, columns, database);
+ List columnTemplateList;
+ String[] querySQLOutput = new String[1];
try {
- read("AS");
- read("(");
- Query withQuery = parseSelect();
- read(")");
- withQuery.prepare();
- querySQL = StringUtils.fromCacheOrNew(withQuery.getPlanSQL());
+ read(AS);
+ read(OPEN_PAREN);
+ Query withQuery = parseQuery();
+ if (!isTemporary) {
+ withQuery.session = session;
+ }
+ read(CLOSE_PAREN);
+ columnTemplateList = TableView.createQueryColumnTemplateList(cols, withQuery, querySQLOutput);
+
} finally {
- session.removeLocalTempTable(recursiveTable);
+ TableView.destroyShadowTableForRecursiveExpression(isTemporary, session, recursiveTable);
}
+
+ return createCTEView(cteViewName,
+ querySQLOutput[0], columnTemplateList,
+ true/* allowRecursiveQueryDetection */,
+ true/* add to session */,
+ isTemporary);
+ }
+
+ private TableView createCTEView(String cteViewName, String querySQL,
+ List columnTemplateList, boolean allowRecursiveQueryDetection,
+ boolean addViewToSession, boolean isTemporary) {
+ Schema schema = getSchemaWithDefault();
int id = database.allocateObjectId();
- TableView view = new TableView(schema, id, tempViewName, querySQL,
- null, cols, session, true);
+ Column[] columnTemplateArray = columnTemplateList.toArray(new Column[0]);
+
+ // No easy way to determine if this is a recursive query up front, so we just compile
+ // it twice - once without the flag set, and if we didn't see a recursive term,
+ // then we just compile it again.
+ TableView view;
+ synchronized (session) {
+ view = new TableView(schema, id, cteViewName, querySQL,
+ parameters, columnTemplateArray, session,
+ allowRecursiveQueryDetection, false /* literalsChecked */, true /* isTableExpression */,
+ isTemporary);
+ if (!view.isRecursiveQueryDetected() && allowRecursiveQueryDetection) {
+ if (!isTemporary) {
+ database.addSchemaObject(session, view);
+ view.lock(session, Table.EXCLUSIVE_LOCK);
+ database.removeSchemaObject(session, view);
+ } else {
+ session.removeLocalTempTable(view);
+ }
+ view = new TableView(schema, id, cteViewName, querySQL, parameters,
+ columnTemplateArray, session,
+ false/* assume recursive */, false /* literalsChecked */, true /* isTableExpression */,
+ isTemporary);
+ }
+ // both removeSchemaObject and removeLocalTempTable hold meta locks
+ database.unlockMeta(session);
+ }
view.setTableExpression(true);
- view.setTemporary(true);
- session.addLocalTempTable(view);
- view.setOnCommitDrop(true);
- Query q = parseSelect();
- q.setPrepareAlways(true);
- return q;
+ view.setTemporary(isTemporary);
+ view.setHidden(true);
+ view.setOnCommitDrop(false);
+ if (addViewToSession) {
+ if (!isTemporary) {
+ database.addSchemaObject(session, view);
+ view.unlock(session);
+ database.unlockMeta(session);
+ } else {
+ session.addLocalTempTable(view);
+ }
+ }
+ return view;
}
private CreateView parseCreateView(boolean force, boolean orReplace) {
- boolean ifNotExists = readIfNoExists();
+ boolean ifNotExists = readIfNotExists();
+ boolean isTableExpression = readIf("TABLE_EXPRESSION");
String viewName = readIdentifierWithSchema();
CreateView command = new CreateView(session, getSchema());
this.createView = command;
@@ -4689,21 +7509,27 @@ private CreateView parseCreateView(boolean force, boolean orReplace) {
command.setComment(readCommentIf());
command.setOrReplace(orReplace);
command.setForce(force);
- if (readIf("(")) {
+ command.setTableExpression(isTableExpression);
+ if (readIf(OPEN_PAREN)) {
String[] cols = parseColumnList();
command.setColumnNames(cols);
}
- String select = StringUtils.fromCacheOrNew(sqlCommand
- .substring(parseIndex));
- read("AS");
+ read(AS);
+ String select = StringUtils.cache(sqlCommand.substring(token.start()));
try {
- Query query = parseSelect();
- query.prepare();
+ Query query;
+ session.setParsingCreateView(true);
+ try {
+ query = parseQuery();
+ query.prepare();
+ } finally {
+ session.setParsingCreateView(false);
+ }
command.setSelect(query);
} catch (DbException e) {
if (force) {
command.setSelectSQL(select);
- while (currentTokenType != END) {
+ while (currentTokenType != END_OF_INPUT) {
read();
}
} else {
@@ -4726,9 +7552,9 @@ private TransactionCommand parseCheckpoint() {
}
private Prepared parseAlter() {
- if (readIf("TABLE")) {
+ if (readIf(TABLE)) {
return parseAlterTable();
- } else if (readIf("USER")) {
+ } else if (readIf(USER)) {
return parseAlterUser();
} else if (readIf("INDEX")) {
return parseAlterIndex();
@@ -4738,6 +7564,8 @@ private Prepared parseAlter() {
return parseAlterSequence();
} else if (readIf("VIEW")) {
return parseAlterView();
+ } else if (readIf("DOMAIN")) {
+ return parseAlterDomain();
}
throw getSyntaxError();
}
@@ -4749,94 +7577,274 @@ private void checkSchema(Schema old) {
}
private AlterIndexRename parseAlterIndex() {
+ boolean ifExists = readIfExists(false);
String indexName = readIdentifierWithSchema();
Schema old = getSchema();
AlterIndexRename command = new AlterIndexRename(session);
- command.setOldIndex(getSchema().getIndex(indexName));
+ command.setOldSchema(old);
+ command.setOldName(indexName);
+ command.setIfExists(ifExists);
read("RENAME");
- read("TO");
+ read(TO);
String newName = readIdentifierWithSchema(old.getName());
checkSchema(old);
command.setNewName(newName);
return command;
}
- private AlterView parseAlterView() {
- AlterView command = new AlterView(session);
+ private DefineCommand parseAlterDomain() {
+ boolean ifDomainExists = readIfExists(false);
+ String domainName = readIdentifierWithSchema();
+ Schema schema = getSchema();
+ if (readIf("ADD")) {
+ boolean ifNotExists = false;
+ String constraintName = null;
+ String comment = null;
+ if (readIf(CONSTRAINT)) {
+ ifNotExists = readIfNotExists();
+ constraintName = readIdentifierWithSchema(schema.getName());
+ checkSchema(schema);
+ comment = readCommentIf();
+ }
+ read(CHECK);
+ AlterDomainAddConstraint command = new AlterDomainAddConstraint(session, schema, ifNotExists);
+ command.setDomainName(domainName);
+ command.setConstraintName(constraintName);
+ parseDomainConstraint = true;
+ try {
+ command.setCheckExpression(readExpression());
+ } finally {
+ parseDomainConstraint = false;
+ }
+ command.setIfDomainExists(ifDomainExists);
+ command.setComment(comment);
+ if (readIf("NOCHECK")) {
+ command.setCheckExisting(false);
+ } else {
+ readIf(CHECK);
+ command.setCheckExisting(true);
+ }
+ return command;
+ } else if (readIf("DROP")) {
+ if (readIf(CONSTRAINT)) {
+ boolean ifConstraintExists = readIfExists(false);
+ String constraintName = readIdentifierWithSchema(schema.getName());
+ checkSchema(schema);
+ AlterDomainDropConstraint command = new AlterDomainDropConstraint(session, getSchema(),
+ ifConstraintExists);
+ command.setConstraintName(constraintName);
+ command.setDomainName(domainName);
+ command.setIfDomainExists(ifDomainExists);
+ return command;
+ } else if (readIf(DEFAULT)) {
+ AlterDomainExpressions command = new AlterDomainExpressions(session, schema,
+ CommandInterface.ALTER_DOMAIN_DEFAULT);
+ command.setDomainName(domainName);
+ command.setIfDomainExists(ifDomainExists);
+ command.setExpression(null);
+ return command;
+ } else if (readIf(ON)) {
+ read("UPDATE");
+ AlterDomainExpressions command = new AlterDomainExpressions(session, schema,
+ CommandInterface.ALTER_DOMAIN_ON_UPDATE);
+ command.setDomainName(domainName);
+ command.setIfDomainExists(ifDomainExists);
+ command.setExpression(null);
+ return command;
+ }
+ } else if (readIf("RENAME")) {
+ if (readIf(CONSTRAINT)) {
+ String constraintName = readIdentifierWithSchema(schema.getName());
+ checkSchema(schema);
+ read(TO);
+ AlterDomainRenameConstraint command = new AlterDomainRenameConstraint(session, schema);
+ command.setDomainName(domainName);
+ command.setIfDomainExists(ifDomainExists);
+ command.setConstraintName(constraintName);
+ command.setNewConstraintName(readIdentifier());
+ return command;
+ }
+ read(TO);
+ String newName = readIdentifierWithSchema(schema.getName());
+ checkSchema(schema);
+ AlterDomainRename command = new AlterDomainRename(session, getSchema());
+ command.setDomainName(domainName);
+ command.setIfDomainExists(ifDomainExists);
+ command.setNewDomainName(newName);
+ return command;
+ } else {
+ read(SET);
+ if (readIf(DEFAULT)) {
+ AlterDomainExpressions command = new AlterDomainExpressions(session, schema,
+ CommandInterface.ALTER_DOMAIN_DEFAULT);
+ command.setDomainName(domainName);
+ command.setIfDomainExists(ifDomainExists);
+ command.setExpression(readExpression());
+ return command;
+ } else if (readIf(ON)) {
+ read("UPDATE");
+ AlterDomainExpressions command = new AlterDomainExpressions(session, schema,
+ CommandInterface.ALTER_DOMAIN_ON_UPDATE);
+ command.setDomainName(domainName);
+ command.setIfDomainExists(ifDomainExists);
+ command.setExpression(readExpression());
+ return command;
+ }
+ }
+ throw getSyntaxError();
+ }
+
+ private DefineCommand parseAlterView() {
+ boolean ifExists = readIfExists(false);
String viewName = readIdentifierWithSchema();
- Table tableView = getSchema().findTableOrView(session, viewName);
- if (!(tableView instanceof TableView)) {
+ Schema schema = getSchema();
+ Table tableView = schema.findTableOrView(session, viewName);
+ if (!(tableView instanceof TableView) && !ifExists) {
throw DbException.get(ErrorCode.VIEW_NOT_FOUND_1, viewName);
}
- TableView view = (TableView) tableView;
- command.setView(view);
- read("RECOMPILE");
- return command;
+ if (readIf("RENAME")) {
+ read(TO);
+ String newName = readIdentifierWithSchema(schema.getName());
+ checkSchema(schema);
+ AlterTableRename command = new AlterTableRename(session, getSchema());
+ command.setTableName(viewName);
+ command.setNewTableName(newName);
+ command.setIfTableExists(ifExists);
+ return command;
+ } else {
+ read("RECOMPILE");
+ TableView view = (TableView) tableView;
+ AlterView command = new AlterView(session);
+ command.setIfExists(ifExists);
+ command.setView(view);
+ return command;
+ }
}
- private AlterSchemaRename parseAlterSchema() {
+ private Prepared parseAlterSchema() {
+ boolean ifExists = readIfExists(false);
String schemaName = readIdentifierWithSchema();
Schema old = getSchema();
- AlterSchemaRename command = new AlterSchemaRename(session);
- command.setOldSchema(getSchema(schemaName));
read("RENAME");
- read("TO");
+ read(TO);
String newName = readIdentifierWithSchema(old.getName());
+ Schema schema = findSchema(schemaName);
+ if (schema == null) {
+ if (ifExists) {
+ return new NoOperation(session);
+ }
+ throw DbException.get(ErrorCode.SCHEMA_NOT_FOUND_1, schemaName);
+ }
+ AlterSchemaRename command = new AlterSchemaRename(session);
+ command.setOldSchema(schema);
checkSchema(old);
command.setNewName(newName);
return command;
}
private AlterSequence parseAlterSequence() {
+ boolean ifExists = readIfExists(false);
String sequenceName = readIdentifierWithSchema();
- Sequence sequence = getSchema().getSequence(sequenceName);
- AlterSequence command = new AlterSequence(session, sequence.getSchema());
- command.setSequence(sequence);
- while (true) {
- if (readIf("RESTART")) {
- read("WITH");
- command.setStartWith(readExpression());
- } else if (readIf("INCREMENT")) {
- read("BY");
- command.setIncrement(readExpression());
- } else if (readIf("MINVALUE")) {
- command.setMinValue(readExpression());
- } else if (readIf("NOMINVALUE")) {
- command.setMinValue(null);
- } else if (readIf("MAXVALUE")) {
- command.setMaxValue(readExpression());
- } else if (readIf("NOMAXVALUE")) {
- command.setMaxValue(null);
- } else if (readIf("CYCLE")) {
- command.setCycle(true);
- } else if (readIf("NOCYCLE")) {
- command.setCycle(false);
- } else if (readIf("NO")) {
- if (readIf("MINVALUE")) {
- command.setMinValue(null);
- } else if (readIf("MAXVALUE")) {
- command.setMaxValue(null);
- } else if (readIf("CYCLE")) {
- command.setCycle(false);
- } else if (readIf("CACHE")) {
- command.setCacheSize(ValueExpression.get(ValueLong.get(1)));
+ AlterSequence command = new AlterSequence(session, getSchema());
+ command.setSequenceName(sequenceName);
+ command.setIfExists(ifExists);
+ SequenceOptions options = new SequenceOptions();
+ parseSequenceOptions(options, null, false, false);
+ command.setOptions(options);
+ return command;
+ }
+
+ private boolean parseSequenceOptions(SequenceOptions options, CreateSequence command, boolean allowDataType,
+ boolean forAlterColumn) {
+ boolean result = false;
+ for (;;) {
+ if (allowDataType && readIf(AS)) {
+ TypeInfo dataType = parseDataType();
+ if (!DataType.isNumericType(dataType.getValueType())) {
+ throw DbException.getUnsupportedException(dataType
+ .getSQL(new StringBuilder("CREATE SEQUENCE AS "), HasSQL.TRACE_SQL_FLAGS).toString());
+ }
+ options.setDataType(dataType);
+ } else if (readIf("START")) {
+ read(WITH);
+ options.setStartValue(readExpression());
+ } else if (readIf("RESTART")) {
+ options.setRestartValue(readIf(WITH) ? readExpression() : ValueExpression.DEFAULT);
+ } else if (command != null && parseCreateSequenceOption(command)) {
+ //
+ } else if (forAlterColumn) {
+ int index = tokenIndex;
+ if (readIf(SET)) {
+ if (!parseBasicSequenceOption(options)) {
+ setTokenIndex(index);
+ break;
+ }
} else {
break;
}
+ } else if (!parseBasicSequenceOption(options)) {
+ break;
+ }
+ result = true;
+ }
+ return result;
+ }
+
+ private boolean parseCreateSequenceOption(CreateSequence command) {
+ if (readIf("BELONGS_TO_TABLE")) {
+ command.setBelongsToTable(true);
+ } else if (readIf(ORDER)) {
+ // Oracle compatibility
+ } else {
+ return false;
+ }
+ return true;
+ }
+
+ private boolean parseBasicSequenceOption(SequenceOptions options) {
+ if (readIf("INCREMENT")) {
+ readIf("BY");
+ options.setIncrement(readExpression());
+ } else if (readIf("MINVALUE")) {
+ options.setMinValue(readExpression());
+ } else if (readIf("MAXVALUE")) {
+ options.setMaxValue(readExpression());
+ } else if (readIf("CYCLE")) {
+ options.setCycle(Sequence.Cycle.CYCLE);
+ } else if (readIf("NO")) {
+ if (readIf("MINVALUE")) {
+ options.setMinValue(ValueExpression.NULL);
+ } else if (readIf("MAXVALUE")) {
+ options.setMaxValue(ValueExpression.NULL);
+ } else if (readIf("CYCLE")) {
+ options.setCycle(Sequence.Cycle.NO_CYCLE);
} else if (readIf("CACHE")) {
- command.setCacheSize(readExpression());
- } else if (readIf("NOCACHE")) {
- command.setCacheSize(ValueExpression.get(ValueLong.get(1)));
+ options.setCacheSize(ValueExpression.get(ValueBigint.get(1)));
} else {
- break;
+ throw getSyntaxError();
}
+ } else if (readIf("EXHAUSTED")) {
+ options.setCycle(Sequence.Cycle.EXHAUSTED);
+ } else if (readIf("CACHE")) {
+ options.setCacheSize(readExpression());
+ // Various compatibility options
+ } else if (readIf("NOMINVALUE")) {
+ options.setMinValue(ValueExpression.NULL);
+ } else if (readIf("NOMAXVALUE")) {
+ options.setMaxValue(ValueExpression.NULL);
+ } else if (readIf("NOCYCLE")) {
+ options.setCycle(Sequence.Cycle.NO_CYCLE);
+ } else if (readIf("NOCACHE")) {
+ options.setCacheSize(ValueExpression.get(ValueBigint.get(1)));
+ } else {
+ return false;
}
- return command;
+ return true;
}
private AlterUser parseAlterUser() {
- String userName = readUniqueIdentifier();
- if (readIf("SET")) {
+ String userName = readIdentifier();
+ if (readIf(SET)) {
AlterUser command = new AlterUser(session);
command.setType(CommandInterface.ALTER_USER_SET_PASSWORD);
command.setUser(database.getUser(userName));
@@ -4851,21 +7859,20 @@ private AlterUser parseAlterUser() {
}
return command;
} else if (readIf("RENAME")) {
- read("TO");
+ read(TO);
AlterUser command = new AlterUser(session);
command.setType(CommandInterface.ALTER_USER_RENAME);
command.setUser(database.getUser(userName));
- String newName = readUniqueIdentifier();
- command.setNewName(newName);
+ command.setNewName(readIdentifier());
return command;
} else if (readIf("ADMIN")) {
AlterUser command = new AlterUser(session);
command.setType(CommandInterface.ALTER_USER_ADMIN);
User user = database.getUser(userName);
command.setUser(user);
- if (readIf("TRUE")) {
+ if (readIf(TRUE)) {
command.setAdmin(true);
- } else if (readIf("FALSE")) {
+ } else if (readIf(FALSE)) {
command.setAdmin(false);
} else {
throw getSyntaxError();
@@ -4876,30 +7883,22 @@ private AlterUser parseAlterUser() {
}
private void readIfEqualOrTo() {
- if (!readIf("=")) {
- readIf("TO");
+ if (!readIf(EQUAL)) {
+ readIf(TO);
}
}
private Prepared parseSet() {
- if (readIf("@")) {
+ if (readIf(AT)) {
Set command = new Set(session, SetTypes.VARIABLE);
- command.setString(readAliasIdentifier());
+ command.setString(readIdentifier());
readIfEqualOrTo();
command.setExpression(readExpression());
return command;
} else if (readIf("AUTOCOMMIT")) {
readIfEqualOrTo();
- boolean value = readBooleanSetting();
- int setting = value ? CommandInterface.SET_AUTOCOMMIT_TRUE
- : CommandInterface.SET_AUTOCOMMIT_FALSE;
- return new TransactionCommand(session, setting);
- } else if (readIf("MVCC")) {
- readIfEqualOrTo();
- boolean value = readBooleanSetting();
- Set command = new Set(session, SetTypes.MVCC);
- command.setInt(value ? 1 : 0);
- return command;
+ return new TransactionCommand(session, readBooleanSetting() ? CommandInterface.SET_AUTOCOMMIT_TRUE
+ : CommandInterface.SET_AUTOCOMMIT_FALSE);
} else if (readIf("EXCLUSIVE")) {
readIfEqualOrTo();
Set command = new Set(session, SetTypes.EXCLUSIVE);
@@ -4907,9 +7906,8 @@ private Prepared parseSet() {
return command;
} else if (readIf("IGNORECASE")) {
readIfEqualOrTo();
- boolean value = readBooleanSetting();
Set command = new Set(session, SetTypes.IGNORECASE);
- command.setInt(value ? 1 : 0);
+ command.setInt(readBooleanSetting() ? 1 : 0);
return command;
} else if (readIf("PASSWORD")) {
readIfEqualOrTo();
@@ -4930,16 +7928,7 @@ private Prepared parseSet() {
} else if (readIf("MODE")) {
readIfEqualOrTo();
Set command = new Set(session, SetTypes.MODE);
- command.setString(readAliasIdentifier());
- return command;
- } else if (readIf("COMPRESS_LOB")) {
- readIfEqualOrTo();
- Set command = new Set(session, SetTypes.COMPRESS_LOB);
- if (currentTokenType == VALUE) {
- command.setString(readString());
- } else {
- command.setString(readUniqueIdentifier());
- }
+ command.setString(readIdentifier());
return command;
} else if (readIf("DATABASE")) {
readIfEqualOrTo();
@@ -4948,9 +7937,6 @@ private Prepared parseSet() {
} else if (readIf("COLLATION")) {
readIfEqualOrTo();
return parseSetCollation();
- } else if (readIf("BINARY_COLLATION")) {
- readIfEqualOrTo();
- return parseSetBinaryCollation();
} else if (readIf("CLUSTER")) {
readIfEqualOrTo();
Set command = new Set(session, SetTypes.CLUSTER);
@@ -4964,156 +7950,174 @@ private Prepared parseSet() {
} else if (readIf("ALLOW_LITERALS")) {
readIfEqualOrTo();
Set command = new Set(session, SetTypes.ALLOW_LITERALS);
- if (readIf("NONE")) {
- command.setInt(Constants.ALLOW_LITERALS_NONE);
- } else if (readIf("ALL")) {
- command.setInt(Constants.ALLOW_LITERALS_ALL);
+ int v;
+ if (readIf(ALL)) {
+ v = Constants.ALLOW_LITERALS_ALL;
+ } else if (readIf("NONE")) {
+ v = Constants.ALLOW_LITERALS_NONE;
} else if (readIf("NUMBERS")) {
- command.setInt(Constants.ALLOW_LITERALS_NUMBERS);
+ v = Constants.ALLOW_LITERALS_NUMBERS;
} else {
- command.setInt(readPositiveInt());
+ v = readNonNegativeInt();
}
+ command.setInt(v);
return command;
} else if (readIf("DEFAULT_TABLE_TYPE")) {
readIfEqualOrTo();
Set command = new Set(session, SetTypes.DEFAULT_TABLE_TYPE);
+ int v;
if (readIf("MEMORY")) {
- command.setInt(Table.TYPE_MEMORY);
+ v = Table.TYPE_MEMORY;
} else if (readIf("CACHED")) {
- command.setInt(Table.TYPE_CACHED);
+ v = Table.TYPE_CACHED;
} else {
- command.setInt(readPositiveInt());
+ v = readNonNegativeInt();
}
+ command.setInt(v);
return command;
- } else if (readIf("CREATE")) {
- readIfEqualOrTo();
- // Derby compatibility (CREATE=TRUE in the database URL)
- read();
- return new NoOperation(session);
- } else if (readIf("HSQLDB.DEFAULT_TABLE_TYPE")) {
- readIfEqualOrTo();
- read();
- return new NoOperation(session);
- } else if (readIf("PAGE_STORE")) {
- readIfEqualOrTo();
- read();
- return new NoOperation(session);
- } else if (readIf("CACHE_TYPE")) {
- readIfEqualOrTo();
- read();
- return new NoOperation(session);
- } else if (readIf("FILE_LOCK")) {
- readIfEqualOrTo();
- read();
- return new NoOperation(session);
- } else if (readIf("DB_CLOSE_ON_EXIT")) {
- readIfEqualOrTo();
- read();
- return new NoOperation(session);
- } else if (readIf("AUTO_SERVER")) {
- readIfEqualOrTo();
- read();
- return new NoOperation(session);
- } else if (readIf("AUTO_SERVER_PORT")) {
- readIfEqualOrTo();
- read();
- return new NoOperation(session);
- } else if (readIf("AUTO_RECONNECT")) {
- readIfEqualOrTo();
- read();
- return new NoOperation(session);
- } else if (readIf("ASSERT")) {
- readIfEqualOrTo();
- read();
- return new NoOperation(session);
- } else if (readIf("ACCESS_MODE_DATA")) {
- readIfEqualOrTo();
- read();
- return new NoOperation(session);
- } else if (readIf("OPEN_NEW")) {
- readIfEqualOrTo();
- read();
- return new NoOperation(session);
- } else if (readIf("JMX")) {
- readIfEqualOrTo();
- read();
- return new NoOperation(session);
- } else if (readIf("PAGE_SIZE")) {
+ } else if (readIf("SCHEMA")) {
readIfEqualOrTo();
- read();
- return new NoOperation(session);
- } else if (readIf("RECOVER")) {
+ Set command = new Set(session, SetTypes.SCHEMA);
+ command.setExpression(readExpressionOrIdentifier());
+ return command;
+ } else if (readIf("CATALOG")) {
readIfEqualOrTo();
- read();
- return new NoOperation(session);
- } else if (readIf("NAMES")) {
- // Quercus PHP MySQL driver compatibility
+ Set command = new Set(session, SetTypes.CATALOG);
+ command.setExpression(readExpressionOrIdentifier());
+ return command;
+ } else if (readIf(SetTypes.getTypeName(SetTypes.SCHEMA_SEARCH_PATH))) {
readIfEqualOrTo();
- read();
- return new NoOperation(session);
- } else if (readIf("SCHEMA")) {
+ Set command = new Set(session, SetTypes.SCHEMA_SEARCH_PATH);
+ ArrayList list = Utils.newSmallArrayList();
+ do {
+ list.add(readIdentifier());
+ } while (readIf(COMMA));
+ command.setStringArray(list.toArray(new String[0]));
+ return command;
+ } else if (readIf("JAVA_OBJECT_SERIALIZER")) {
readIfEqualOrTo();
- Set command = new Set(session, SetTypes.SCHEMA);
- command.setString(readAliasIdentifier());
+ Set command = new Set(session, SetTypes.JAVA_OBJECT_SERIALIZER);
+ command.setString(readString());
return command;
- } else if (readIf("DATESTYLE")) {
- // PostgreSQL compatibility
+ } else if (readIf("IGNORE_CATALOGS")) {
readIfEqualOrTo();
- if (!readIf("ISO")) {
- String s = readString();
- if (!equalsToken(s, "ISO")) {
- throw getSyntaxError();
- }
+ Set command = new Set(session, SetTypes.IGNORE_CATALOGS);
+ command.setInt(readBooleanSetting() ? 1 : 0);
+ return command;
+ } else if (readIf("SESSION")) {
+ read("CHARACTERISTICS");
+ read(AS);
+ read("TRANSACTION");
+ return parseSetTransactionMode();
+ } else if (readIf("TRANSACTION")) {
+ // TODO should affect only the current transaction
+ return parseSetTransactionMode();
+ } else if (readIf("TIME")) {
+ read("ZONE");
+ Set command = new Set(session, SetTypes.TIME_ZONE);
+ if (!readIf("LOCAL")) {
+ command.setExpression(readExpression());
}
- return new NoOperation(session);
- } else if (readIf("SEARCH_PATH") ||
- readIf(SetTypes.getTypeName(SetTypes.SCHEMA_SEARCH_PATH))) {
+ return command;
+ } else if (readIf("NON_KEYWORDS")) {
readIfEqualOrTo();
- Set command = new Set(session, SetTypes.SCHEMA_SEARCH_PATH);
- ArrayList list = New.arrayList();
- list.add(readAliasIdentifier());
- while (readIf(",")) {
- list.add(readAliasIdentifier());
- }
- String[] schemaNames = new String[list.size()];
- list.toArray(schemaNames);
- command.setStringArray(schemaNames);
+ Set command = new Set(session, SetTypes.NON_KEYWORDS);
+ ArrayList list = Utils.newSmallArrayList();
+ if (currentTokenType != END_OF_INPUT && currentTokenType != SEMICOLON) {
+ do {
+ if (currentTokenType < IDENTIFIER || currentTokenType > LAST_KEYWORD) {
+ throw getSyntaxError();
+ }
+ list.add(StringUtils.toUpperEnglish(currentToken));
+ read();
+ } while (readIf(COMMA));
+ }
+ command.setStringArray(list.toArray(new String[0]));
return command;
- } else if (readIf("JAVA_OBJECT_SERIALIZER")) {
+ } else if (readIf("DEFAULT_NULL_ORDERING")) {
readIfEqualOrTo();
- return parseSetJavaObjectSerializer();
+ Set command = new Set(session, SetTypes.DEFAULT_NULL_ORDERING);
+ command.setString(readIdentifier());
+ return command;
+ } else if (readIf("LOG")) {
+ throw DbException.getUnsupportedException("LOG");
} else {
- if (isToken("LOGSIZE")) {
- // HSQLDB compatibility
- currentToken = SetTypes.getTypeName(SetTypes.MAX_LOG_SIZE);
+ String upperName = upperName(currentToken);
+ if (ConnectionInfo.isIgnoredByParser(upperName)) {
+ read();
+ readIfEqualOrTo();
+ read();
+ return new NoOperation(session);
}
- if (isToken("FOREIGN_KEY_CHECKS")) {
- // MySQL compatibility
- currentToken = SetTypes
- .getTypeName(SetTypes.REFERENTIAL_INTEGRITY);
+ int type = SetTypes.getType(upperName);
+ if (type >= 0) {
+ read();
+ readIfEqualOrTo();
+ Set command = new Set(session, type);
+ command.setExpression(readExpression());
+ return command;
}
- int type = SetTypes.getType(currentToken);
- if (type < 0) {
- throw getSyntaxError();
+ ModeEnum modeEnum = database.getMode().getEnum();
+ if (modeEnum != ModeEnum.REGULAR) {
+ Prepared command = readSetCompatibility(modeEnum);
+ if (command != null) {
+ return command;
+ }
}
- read();
- readIfEqualOrTo();
- Set command = new Set(session, type);
- command.setExpression(readExpression());
- return command;
+ if (session.isQuirksMode()) {
+ switch (upperName) {
+ case "BINARY_COLLATION":
+ case "UUID_COLLATION":
+ read();
+ readIfEqualOrTo();
+ readIdentifier();
+ return new NoOperation(session);
+ }
+ }
+ throw getSyntaxError();
+ }
+ }
+
+ private Prepared parseSetTransactionMode() {
+ IsolationLevel isolationLevel;
+ read("ISOLATION");
+ read("LEVEL");
+ if (readIf("READ")) {
+ if (readIf("UNCOMMITTED")) {
+ isolationLevel = IsolationLevel.READ_UNCOMMITTED;
+ } else {
+ read("COMMITTED");
+ isolationLevel = IsolationLevel.READ_COMMITTED;
+ }
+ } else if (readIf("REPEATABLE")) {
+ read("READ");
+ isolationLevel = IsolationLevel.REPEATABLE_READ;
+ } else if (readIf("SNAPSHOT")) {
+ isolationLevel = IsolationLevel.SNAPSHOT;
+ } else {
+ read("SERIALIZABLE");
+ isolationLevel = IsolationLevel.SERIALIZABLE;
+ }
+ return new SetSessionCharacteristics(session, isolationLevel);
+ }
+
+ private Expression readExpressionOrIdentifier() {
+ if (isIdentifier()) {
+ return ValueExpression.get(ValueVarchar.get(readIdentifier()));
}
+ return readExpression();
}
private Prepared parseUse() {
readIfEqualOrTo();
Set command = new Set(session, SetTypes.SCHEMA);
- command.setString(readAliasIdentifier());
+ command.setExpression(ValueExpression.get(ValueVarchar.get(readIdentifier())));
return command;
}
private Set parseSetCollation() {
Set command = new Set(session, SetTypes.COLLATION);
- String name = readAliasIdentifier();
+ String name = readIdentifier();
command.setString(name);
if (equalsToken(name, CompareMode.OFF)) {
return command;
@@ -5123,7 +8127,7 @@ private Set parseSetCollation() {
throw DbException.getInvalidValueException("collation", name);
}
if (readIf("STRENGTH")) {
- if (readIf("PRIMARY")) {
+ if (readIf(PRIMARY)) {
command.setInt(Collator.PRIMARY);
} else if (readIf("SECONDARY")) {
command.setInt(Collator.SECONDARY);
@@ -5138,33 +8142,99 @@ private Set parseSetCollation() {
return command;
}
- private Set parseSetBinaryCollation() {
- Set command = new Set(session, SetTypes.BINARY_COLLATION);
- String name = readAliasIdentifier();
- command.setString(name);
- if (equalsToken(name, CompareMode.UNSIGNED) ||
- equalsToken(name, CompareMode.SIGNED)) {
- return command;
+ private Prepared readSetCompatibility(ModeEnum modeEnum) {
+ switch (modeEnum) {
+ case Derby:
+ if (readIf("CREATE")) {
+ readIfEqualOrTo();
+ // (CREATE=TRUE in the database URL)
+ read();
+ return new NoOperation(session);
+ }
+ break;
+ case HSQLDB:
+ if (readIf("LOGSIZE")) {
+ readIfEqualOrTo();
+ Set command = new Set(session, SetTypes.MAX_LOG_SIZE);
+ command.setExpression(readExpression());
+ return command;
+ }
+ break;
+ case MySQL:
+ if (readIf("FOREIGN_KEY_CHECKS")) {
+ readIfEqualOrTo();
+ Set command = new Set(session, SetTypes.REFERENTIAL_INTEGRITY);
+ command.setExpression(readExpression());
+ return command;
+ } else if (readIf("NAMES")) {
+ // Quercus PHP MySQL driver compatibility
+ readIfEqualOrTo();
+ read();
+ return new NoOperation(session);
+ }
+ break;
+ case PostgreSQL:
+ if (readIf("STATEMENT_TIMEOUT")) {
+ readIfEqualOrTo();
+ Set command = new Set(session, SetTypes.QUERY_TIMEOUT);
+ command.setInt(readNonNegativeInt());
+ return command;
+ } else if (readIf("CLIENT_ENCODING") || readIf("CLIENT_MIN_MESSAGES") || readIf("JOIN_COLLAPSE_LIMIT")) {
+ readIfEqualOrTo();
+ read();
+ return new NoOperation(session);
+ } else if (readIf("DATESTYLE")) {
+ readIfEqualOrTo();
+ if (!readIf("ISO")) {
+ String s = readString();
+ if (!equalsToken(s, "ISO")) {
+ throw getSyntaxError();
+ }
+ }
+ return new NoOperation(session);
+ } else if (readIf("SEARCH_PATH")) {
+ readIfEqualOrTo();
+ Set command = new Set(session, SetTypes.SCHEMA_SEARCH_PATH);
+ ArrayList list = Utils.newSmallArrayList();
+ String pgCatalog = database.sysIdentifier("PG_CATALOG");
+ boolean hasPgCatalog = false;
+ do {
+ // some PG clients will send single-quoted alias
+ String s = currentTokenType == LITERAL ? readString() : readIdentifier();
+ if ("$user".equals(s)) {
+ continue;
+ }
+ if (pgCatalog.equals(s)) {
+ hasPgCatalog = true;
+ }
+ list.add(s);
+ } while (readIf(COMMA));
+ // If "pg_catalog" is not in the path then it will be searched before
+ // searching any of the path items. See
+ // https://www.postgresql.org/docs/8.2/runtime-config-client.html
+ if (!hasPgCatalog) {
+ if (database.findSchema(pgCatalog) != null) {
+ list.add(0, pgCatalog);
+ }
+ }
+ command.setStringArray(list.toArray(new String[0]));
+ return command;
+ }
+ break;
+ default:
}
- throw DbException.getInvalidValueException("BINARY_COLLATION", name);
- }
-
- private Set parseSetJavaObjectSerializer() {
- Set command = new Set(session, SetTypes.JAVA_OBJECT_SERIALIZER);
- String name = readString();
- command.setString(name);
- return command;
+ return null;
}
private RunScriptCommand parseRunScript() {
RunScriptCommand command = new RunScriptCommand(session);
- read("FROM");
+ read(FROM);
command.setFileNameExpr(readExpression());
if (readIf("COMPRESSION")) {
- command.setCompressionAlgorithm(readUniqueIdentifier());
+ command.setCompressionAlgorithm(readIdentifier());
}
if (readIf("CIPHER")) {
- command.setCipher(readUniqueIdentifier());
+ command.setCipher(readIdentifier());
if (readIf("PASSWORD")) {
command.setPassword(readExpression());
}
@@ -5172,18 +8242,32 @@ private RunScriptCommand parseRunScript() {
if (readIf("CHARSET")) {
command.setCharset(Charset.forName(readString()));
}
+ if (readIf("FROM_1X")) {
+ command.setFrom1X();
+ } else {
+ if (readIf("QUIRKS_MODE")) {
+ command.setQuirksMode(true);
+ }
+ if (readIf("VARIABLE_BINARY")) {
+ command.setVariableBinary(true);
+ }
+ }
return command;
}
private ScriptCommand parseScript() {
ScriptCommand command = new ScriptCommand(session);
- boolean data = true, passwords = true, settings = true;
- boolean dropTables = false, simple = false;
- if (readIf("SIMPLE")) {
- simple = true;
- }
+ boolean data = true, passwords = true, settings = true, version = true;
+ boolean dropTables = false, simple = false, withColumns = false;
if (readIf("NODATA")) {
data = false;
+ } else {
+ if (readIf("SIMPLE")) {
+ simple = true;
+ }
+ if (readIf("COLUMNS")) {
+ withColumns = true;
+ }
}
if (readIf("NOPASSWORDS")) {
passwords = false;
@@ -5191,6 +8275,9 @@ private ScriptCommand parseScript() {
if (readIf("NOSETTINGS")) {
settings = false;
}
+ if (readIf("NOVERSION")) {
+ version = false;
+ }
if (readIf("DROP")) {
dropTables = true;
}
@@ -5201,15 +8288,17 @@ private ScriptCommand parseScript() {
command.setData(data);
command.setPasswords(passwords);
command.setSettings(settings);
+ command.setVersion(version);
command.setDrop(dropTables);
command.setSimple(simple);
- if (readIf("TO")) {
+ command.setWithColumns(withColumns);
+ if (readIf(TO)) {
command.setFileNameExpr(readExpression());
if (readIf("COMPRESSION")) {
- command.setCompressionAlgorithm(readUniqueIdentifier());
+ command.setCompressionAlgorithm(readIdentifier());
}
if (readIf("CIPHER")) {
- command.setCipher(readUniqueIdentifier());
+ command.setCipher(readIdentifier());
if (readIf("PASSWORD")) {
command.setPassword(readExpression());
}
@@ -5219,60 +8308,143 @@ private ScriptCommand parseScript() {
}
}
if (readIf("SCHEMA")) {
- HashSet schemaNames = New.hashSet();
+ HashSet schemaNames = new HashSet<>();
do {
- schemaNames.add(readUniqueIdentifier());
- } while (readIf(","));
+ schemaNames.add(readIdentifier());
+ } while (readIf(COMMA));
command.setSchemaNames(schemaNames);
- } else if (readIf("TABLE")) {
- ArrayList tables = New.arrayList();
+ } else if (readIf(TABLE)) {
+ ArrayList tables = Utils.newSmallArrayList();
do {
tables.add(readTableOrView());
- } while (readIf(","));
+ } while (readIf(COMMA));
command.setTables(tables);
}
return command;
}
+ /**
+ * Is this the Oracle DUAL table or the IBM/DB2 SYSIBM table?
+ *
+ * @param tableName table name.
+ * @return {@code true} if the table is DUAL special table. Otherwise returns {@code false}.
+ * @see Wikipedia: DUAL table
+ */
+ private boolean isDualTable(String tableName) {
+ return ((schemaName == null || equalsToken(schemaName, "SYS")) && equalsToken("DUAL", tableName))
+ || (database.getMode().sysDummy1 && (schemaName == null || equalsToken(schemaName, "SYSIBM"))
+ && equalsToken("SYSDUMMY1", tableName));
+ }
+
private Table readTableOrView() {
return readTableOrView(readIdentifierWithSchema(null));
}
private Table readTableOrView(String tableName) {
- // same algorithm than readSequence
if (schemaName != null) {
- return getSchema().getTableOrView(session, tableName);
+ Table table = getSchema().resolveTableOrView(session, tableName);
+ if (table != null) {
+ return table;
+ }
+ } else {
+ Table table = database.getSchema(session.getCurrentSchemaName())
+ .resolveTableOrView(session, tableName);
+ if (table != null) {
+ return table;
+ }
+ String[] schemaNames = session.getSchemaSearchPath();
+ if (schemaNames != null) {
+ for (String name : schemaNames) {
+ Schema s = database.getSchema(name);
+ table = s.resolveTableOrView(session, tableName);
+ if (table != null) {
+ return table;
+ }
+ }
+ }
}
- Table table = database.getSchema(session.getCurrentSchemaName())
- .findTableOrView(session, tableName);
- if (table != null) {
- return table;
+ if (isDualTable(tableName)) {
+ return new DualTable(database);
}
- String[] schemaNames = session.getSchemaSearchPath();
- if (schemaNames != null) {
- for (String name : schemaNames) {
- Schema s = database.getSchema(name);
- table = s.findTableOrView(session, tableName);
- if (table != null) {
- return table;
- }
+
+ throw getTableOrViewNotFoundDbException(tableName);
+ }
+
+ private DbException getTableOrViewNotFoundDbException(String tableName) {
+ if (schemaName != null) {
+ return getTableOrViewNotFoundDbException(schemaName, tableName);
+ }
+
+ String currentSchemaName = session.getCurrentSchemaName();
+ String[] schemaSearchPath = session.getSchemaSearchPath();
+ if (schemaSearchPath == null) {
+ return getTableOrViewNotFoundDbException(Collections.singleton(currentSchemaName), tableName);
+ }
+
+ LinkedHashSet schemaNames = new LinkedHashSet<>();
+ schemaNames.add(currentSchemaName);
+ schemaNames.addAll(Arrays.asList(schemaSearchPath));
+ return getTableOrViewNotFoundDbException(schemaNames, tableName);
+ }
+
+ private DbException getTableOrViewNotFoundDbException(String schemaName, String tableName) {
+ return getTableOrViewNotFoundDbException(Collections.singleton(schemaName), tableName);
+ }
+
+ private DbException getTableOrViewNotFoundDbException(
+ java.util.Set schemaNames, String tableName) {
+ if (database == null || database.getFirstUserTable() == null) {
+ return DbException.get(ErrorCode.TABLE_OR_VIEW_NOT_FOUND_DATABASE_EMPTY_1, tableName);
+ }
+
+ if (database.getSettings().caseInsensitiveIdentifiers) {
+ return DbException.get(ErrorCode.TABLE_OR_VIEW_NOT_FOUND_1, tableName);
+ }
+
+ java.util.Set candidates = new TreeSet<>();
+ for (String schemaName : schemaNames) {
+ findTableNameCandidates(schemaName, tableName, candidates);
+ }
+
+ if (candidates.isEmpty()) {
+ return DbException.get(ErrorCode.TABLE_OR_VIEW_NOT_FOUND_1, tableName);
+ }
+
+ return DbException.get(ErrorCode.TABLE_OR_VIEW_NOT_FOUND_WITH_CANDIDATES_2,
+ tableName,
+ String.join(", ", candidates));
+ }
+
+ private void findTableNameCandidates(String schemaName, String tableName, java.util.Set candidates) {
+ Schema schema = database.getSchema(schemaName);
+ String ucTableName = StringUtils.toUpperEnglish(tableName);
+ Collection allTablesAndViews = schema.getAllTablesAndViews(session);
+ for (Table candidate : allTablesAndViews) {
+ String candidateName = candidate.getName();
+ if (ucTableName.equals(StringUtils.toUpperEnglish(candidateName))) {
+ candidates.add(candidateName);
}
}
- throw DbException.get(ErrorCode.TABLE_OR_VIEW_NOT_FOUND_1, tableName);
}
- private FunctionAlias findFunctionAlias(String schema, String aliasName) {
- FunctionAlias functionAlias = database.getSchema(schema).findFunction(
- aliasName);
- if (functionAlias != null) {
- return functionAlias;
+ private UserDefinedFunction findUserDefinedFunctionWithinPath(Schema schema, String name) {
+ if (schema != null) {
+ return schema.findFunctionOrAggregate(name);
+ }
+ schema = database.getSchema(session.getCurrentSchemaName());
+ UserDefinedFunction userDefinedFunction = schema.findFunctionOrAggregate(name);
+ if (userDefinedFunction != null) {
+ return userDefinedFunction;
}
String[] schemaNames = session.getSchemaSearchPath();
if (schemaNames != null) {
- for (String n : schemaNames) {
- functionAlias = database.getSchema(n).findFunction(aliasName);
- if (functionAlias != null) {
- return functionAlias;
+ for (String schemaName : schemaNames) {
+ Schema schemaFromPath = database.getSchema(schemaName);
+ if (schemaFromPath != schema) {
+ userDefinedFunction = schemaFromPath.findFunctionOrAggregate(name);
+ if (userDefinedFunction != null) {
+ return userDefinedFunction;
+ }
}
}
}
@@ -5312,342 +8484,651 @@ private Sequence readSequence() {
}
private Prepared parseAlterTable() {
- Table table = readTableOrView();
+ boolean ifTableExists = readIfExists(false);
+ String tableName = readIdentifierWithSchema();
+ Schema schema = getSchema();
if (readIf("ADD")) {
- Prepared command = parseAlterTableAddConstraintIf(table.getName(),
- table.getSchema());
+ Prepared command = parseTableConstraintIf(tableName, schema, ifTableExists);
if (command != null) {
return command;
}
- return parseAlterTableAddColumn(table);
- } else if (readIf("SET")) {
- read("REFERENTIAL_INTEGRITY");
- int type = CommandInterface.ALTER_TABLE_SET_REFERENTIAL_INTEGRITY;
- boolean value = readBooleanSetting();
- AlterTableSet command = new AlterTableSet(session,
- table.getSchema(), type, value);
- command.setTableName(table.getName());
- if (readIf("CHECK")) {
- command.setCheckExisting(true);
- } else if (readIf("NOCHECK")) {
- command.setCheckExisting(false);
+ return parseAlterTableAddColumn(tableName, schema, ifTableExists);
+ } else if (readIf(SET)) {
+ return parseAlterTableSet(schema, tableName, ifTableExists);
+ } else if (readIf("RENAME")) {
+ return parseAlterTableRename(schema, tableName, ifTableExists);
+ } else if (readIf("DROP")) {
+ return parseAlterTableDrop(schema, tableName, ifTableExists);
+ } else if (readIf("ALTER")) {
+ return parseAlterTableAlter(schema, tableName, ifTableExists);
+ } else {
+ Mode mode = database.getMode();
+ if (mode.alterTableExtensionsMySQL || mode.alterTableModifyColumn) {
+ return parseAlterTableCompatibility(schema, tableName, ifTableExists, mode);
}
+ }
+ throw getSyntaxError();
+ }
+
+ private Prepared parseAlterTableAlter(Schema schema, String tableName, boolean ifTableExists) {
+ readIf("COLUMN");
+ boolean ifExists = readIfExists(false);
+ String columnName = readIdentifier();
+ Column column = columnIfTableExists(schema, tableName, columnName, ifTableExists, ifExists);
+ if (readIf("RENAME")) {
+ read(TO);
+ AlterTableRenameColumn command = new AlterTableRenameColumn(
+ session, schema);
+ command.setTableName(tableName);
+ command.setIfTableExists(ifTableExists);
+ command.setIfExists(ifExists);
+ command.setOldColumnName(columnName);
+ String newName = readIdentifier();
+ command.setNewColumnName(newName);
return command;
- } else if (readIf("RENAME")) {
- read("TO");
- String newName = readIdentifierWithSchema(table.getSchema()
- .getName());
- checkSchema(table.getSchema());
+ } else if (readIf("DROP")) {
+ if (readIf(DEFAULT)) {
+ if (readIf(ON)) {
+ read(NULL);
+ AlterTableAlterColumn command = new AlterTableAlterColumn(session, schema);
+ command.setTableName(tableName);
+ command.setIfTableExists(ifTableExists);
+ command.setOldColumn(column);
+ command.setType(CommandInterface.ALTER_TABLE_ALTER_COLUMN_DEFAULT_ON_NULL);
+ command.setBooleanFlag(false);
+ return command;
+ }
+ return getAlterTableAlterColumnDropDefaultExpression(schema, tableName, ifTableExists, column,
+ CommandInterface.ALTER_TABLE_ALTER_COLUMN_DEFAULT);
+ } else if (readIf("EXPRESSION")) {
+ return getAlterTableAlterColumnDropDefaultExpression(schema, tableName, ifTableExists, column,
+ CommandInterface.ALTER_TABLE_ALTER_COLUMN_DROP_EXPRESSION);
+ } else if (readIf("IDENTITY")) {
+ return getAlterTableAlterColumnDropDefaultExpression(schema, tableName, ifTableExists, column,
+ CommandInterface.ALTER_TABLE_ALTER_COLUMN_DROP_IDENTITY);
+ }
+ if (readIf(ON)) {
+ read("UPDATE");
+ AlterTableAlterColumn command = new AlterTableAlterColumn(session, schema);
+ command.setTableName(tableName);
+ command.setIfTableExists(ifTableExists);
+ command.setOldColumn(column);
+ command.setType(CommandInterface.ALTER_TABLE_ALTER_COLUMN_ON_UPDATE);
+ command.setDefaultExpression(null);
+ return command;
+ }
+ read(NOT);
+ read(NULL);
+ AlterTableAlterColumn command = new AlterTableAlterColumn(
+ session, schema);
+ command.setTableName(tableName);
+ command.setIfTableExists(ifTableExists);
+ command.setOldColumn(column);
+ command.setType(CommandInterface.ALTER_TABLE_ALTER_COLUMN_DROP_NOT_NULL);
+ return command;
+ } else if (readIf("TYPE")) {
+ // PostgreSQL compatibility
+ return parseAlterTableAlterColumnDataType(schema, tableName, columnName, ifTableExists, ifExists);
+ } else if (readIf("SELECTIVITY")) {
+ AlterTableAlterColumn command = new AlterTableAlterColumn(
+ session, schema);
+ command.setTableName(tableName);
+ command.setIfTableExists(ifTableExists);
+ command.setType(CommandInterface.ALTER_TABLE_ALTER_COLUMN_SELECTIVITY);
+ command.setOldColumn(column);
+ command.setSelectivity(readExpression());
+ return command;
+ }
+ Prepared command = parseAlterTableAlterColumnIdentity(schema, tableName, ifTableExists, column);
+ if (command != null) {
+ return command;
+ }
+ if (readIf(SET)) {
+ return parseAlterTableAlterColumnSet(schema, tableName, ifTableExists, ifExists, columnName, column);
+ }
+ return parseAlterTableAlterColumnType(schema, tableName, columnName, ifTableExists, ifExists, true);
+ }
+
+ private Prepared getAlterTableAlterColumnDropDefaultExpression(Schema schema, String tableName,
+ boolean ifTableExists, Column column, int type) {
+ AlterTableAlterColumn command = new AlterTableAlterColumn(session, schema);
+ command.setTableName(tableName);
+ command.setIfTableExists(ifTableExists);
+ command.setOldColumn(column);
+ command.setType(type);
+ command.setDefaultExpression(null);
+ return command;
+ }
+
+ private Prepared parseAlterTableAlterColumnIdentity(Schema schema, String tableName, boolean ifTableExists,
+ Column column) {
+ int index = tokenIndex;
+ Boolean always = null;
+ if (readIf(SET) && readIf("GENERATED")) {
+ if (readIf("ALWAYS")) {
+ always = true;
+ } else {
+ read("BY");
+ read(DEFAULT);
+ always = false;
+ }
+ } else {
+ setTokenIndex(index);
+ }
+ SequenceOptions options = new SequenceOptions();
+ if (!parseSequenceOptions(options, null, false, true) && always == null) {
+ return null;
+ }
+ if (column == null) {
+ return new NoOperation(session);
+ }
+ if (!column.isIdentity()) {
+ AlterTableAlterColumn command = new AlterTableAlterColumn(session, schema);
+ parseAlterColumnUsingIf(command);
+ command.setTableName(tableName);
+ command.setIfTableExists(ifTableExists);
+ command.setType(CommandInterface.ALTER_TABLE_ALTER_COLUMN_CHANGE_TYPE);
+ command.setOldColumn(column);
+ Column newColumn = column.getClone();
+ newColumn.setIdentityOptions(options, always != null && always);
+ command.setNewColumn(newColumn);
+ return command;
+ }
+ AlterSequence command = new AlterSequence(session, schema);
+ command.setColumn(column, always);
+ command.setOptions(options);
+ return commandIfTableExists(schema, tableName, ifTableExists, command);
+ }
+
+ private Prepared parseAlterTableAlterColumnSet(Schema schema, String tableName, boolean ifTableExists,
+ boolean ifExists, String columnName, Column column) {
+ if (readIf("DATA")) {
+ read("TYPE");
+ return parseAlterTableAlterColumnDataType(schema, tableName, columnName, ifTableExists, ifExists);
+ }
+ AlterTableAlterColumn command = new AlterTableAlterColumn(
+ session, schema);
+ command.setTableName(tableName);
+ command.setIfTableExists(ifTableExists);
+ command.setOldColumn(column);
+ NullConstraintType nullConstraint = parseNotNullConstraint();
+ switch (nullConstraint) {
+ case NULL_IS_ALLOWED:
+ command.setType(CommandInterface.ALTER_TABLE_ALTER_COLUMN_DROP_NOT_NULL);
+ break;
+ case NULL_IS_NOT_ALLOWED:
+ command.setType(CommandInterface.ALTER_TABLE_ALTER_COLUMN_NOT_NULL);
+ break;
+ case NO_NULL_CONSTRAINT_FOUND:
+ if (readIf(DEFAULT)) {
+ if (readIf(ON)) {
+ read(NULL);
+ command.setType(CommandInterface.ALTER_TABLE_ALTER_COLUMN_DEFAULT_ON_NULL);
+ command.setBooleanFlag(true);
+ break;
+ }
+ Expression defaultExpression = readExpression();
+ command.setType(CommandInterface.ALTER_TABLE_ALTER_COLUMN_DEFAULT);
+ command.setDefaultExpression(defaultExpression);
+ } else if (readIf(ON)) {
+ read("UPDATE");
+ Expression onUpdateExpression = readExpression();
+ command.setType(CommandInterface.ALTER_TABLE_ALTER_COLUMN_ON_UPDATE);
+ command.setDefaultExpression(onUpdateExpression);
+ } else if (readIf("INVISIBLE")) {
+ command.setType(CommandInterface.ALTER_TABLE_ALTER_COLUMN_VISIBILITY);
+ command.setBooleanFlag(false);
+ } else if (readIf("VISIBLE")) {
+ command.setType(CommandInterface.ALTER_TABLE_ALTER_COLUMN_VISIBILITY);
+ command.setBooleanFlag(true);
+ }
+ break;
+ default:
+ throw DbException.get(ErrorCode.UNKNOWN_MODE_1,
+ "Internal Error - unhandled case: " + nullConstraint.name());
+ }
+ return command;
+ }
+
+ private Prepared parseAlterTableDrop(Schema schema, String tableName, boolean ifTableExists) {
+ if (readIf(CONSTRAINT)) {
+ boolean ifExists = readIfExists(false);
+ String constraintName = readIdentifierWithSchema(schema.getName());
+ ifExists = readIfExists(ifExists);
+ checkSchema(schema);
+ AlterTableDropConstraint command = new AlterTableDropConstraint(session, getSchema(), ifExists);
+ command.setTableName(tableName);
+ command.setIfTableExists(ifTableExists);
+ command.setConstraintName(constraintName);
+ ConstraintActionType dropAction = parseCascadeOrRestrict();
+ if (dropAction != null) {
+ command.setDropAction(dropAction);
+ }
+ return command;
+ } else if (readIf(PRIMARY)) {
+ read(KEY);
+ Table table = tableIfTableExists(schema, tableName, ifTableExists);
+ if (table == null) {
+ return new NoOperation(session);
+ }
+ Index idx = table.getPrimaryKey();
+ DropIndex command = new DropIndex(session, schema);
+ command.setIndexName(idx.getName());
+ return command;
+ } else if (database.getMode().alterTableExtensionsMySQL) {
+ Prepared command = parseAlterTableDropCompatibility(schema, tableName, ifTableExists);
+ if (command != null) {
+ return command;
+ }
+ }
+ readIf("COLUMN");
+ boolean ifExists = readIfExists(false);
+ ArrayList columnsToRemove = new ArrayList<>();
+ Table table = tableIfTableExists(schema, tableName, ifTableExists);
+ // For Oracle compatibility - open bracket required
+ boolean openingBracketDetected = readIf(OPEN_PAREN);
+ do {
+ String columnName = readIdentifier();
+ if (table != null) {
+ Column column = table.getColumn(columnName, ifExists);
+ if (column != null) {
+ columnsToRemove.add(column);
+ }
+ }
+ } while (readIf(COMMA));
+ if (openingBracketDetected) {
+ // For Oracle compatibility - close bracket
+ read(CLOSE_PAREN);
+ }
+ if (table == null || columnsToRemove.isEmpty()) {
+ return new NoOperation(session);
+ }
+ AlterTableAlterColumn command = new AlterTableAlterColumn(session, schema);
+ command.setType(CommandInterface.ALTER_TABLE_DROP_COLUMN);
+ command.setTableName(tableName);
+ command.setIfTableExists(ifTableExists);
+ command.setColumnsToRemove(columnsToRemove);
+ return command;
+ }
+
+ private Prepared parseAlterTableDropCompatibility(Schema schema, String tableName, boolean ifTableExists) {
+ if (readIf(FOREIGN)) {
+ read(KEY);
+ // For MariaDB
+ boolean ifExists = readIfExists(false);
+ String constraintName = readIdentifierWithSchema(schema.getName());
+ checkSchema(schema);
+ AlterTableDropConstraint command = new AlterTableDropConstraint(session, getSchema(), ifExists);
+ command.setTableName(tableName);
+ command.setIfTableExists(ifTableExists);
+ command.setConstraintName(constraintName);
+ return command;
+ } else if (readIf("INDEX")) {
+ // For MariaDB
+ boolean ifExists = readIfExists(false);
+ String indexOrConstraintName = readIdentifierWithSchema(schema.getName());
+ if (schema.findIndex(session, indexOrConstraintName) != null) {
+ DropIndex dropIndexCommand = new DropIndex(session, getSchema());
+ dropIndexCommand.setIndexName(indexOrConstraintName);
+ return commandIfTableExists(schema, tableName, ifTableExists, dropIndexCommand);
+ } else {
+ AlterTableDropConstraint dropCommand = new AlterTableDropConstraint(session, getSchema(), ifExists);
+ dropCommand.setTableName(tableName);
+ dropCommand.setIfTableExists(ifTableExists);
+ dropCommand.setConstraintName(indexOrConstraintName);
+ return dropCommand;
+ }
+ }
+ return null;
+ }
+
+ private Prepared parseAlterTableRename(Schema schema, String tableName, boolean ifTableExists) {
+ if (readIf("COLUMN")) {
+ // PostgreSQL syntax
+ String columnName = readIdentifier();
+ read(TO);
+ AlterTableRenameColumn command = new AlterTableRenameColumn(
+ session, schema);
+ command.setTableName(tableName);
+ command.setIfTableExists(ifTableExists);
+ command.setOldColumnName(columnName);
+ command.setNewColumnName(readIdentifier());
+ return command;
+ } else if (readIf(CONSTRAINT)) {
+ String constraintName = readIdentifierWithSchema(schema.getName());
+ checkSchema(schema);
+ read(TO);
+ AlterTableRenameConstraint command = new AlterTableRenameConstraint(session, schema);
+ command.setTableName(tableName);
+ command.setIfTableExists(ifTableExists);
+ command.setConstraintName(constraintName);
+ command.setNewConstraintName(readIdentifier());
+ return command;
+ } else {
+ read(TO);
+ String newName = readIdentifierWithSchema(schema.getName());
+ checkSchema(schema);
AlterTableRename command = new AlterTableRename(session,
getSchema());
- command.setOldTable(table);
+ command.setTableName(tableName);
command.setNewTableName(newName);
+ command.setIfTableExists(ifTableExists);
command.setHidden(readIf("HIDDEN"));
return command;
- } else if (readIf("DROP")) {
- if (readIf("CONSTRAINT")) {
- boolean ifExists = readIfExists(false);
- String constraintName = readIdentifierWithSchema(table
- .getSchema().getName());
- ifExists = readIfExists(ifExists);
- checkSchema(table.getSchema());
- AlterTableDropConstraint command = new AlterTableDropConstraint(
- session, getSchema(), ifExists);
- command.setConstraintName(constraintName);
- return command;
- } else if (readIf("FOREIGN")) {
- // MySQL compatibility
- read("KEY");
- String constraintName = readIdentifierWithSchema(table
- .getSchema().getName());
- checkSchema(table.getSchema());
- AlterTableDropConstraint command = new AlterTableDropConstraint(
- session, getSchema(), false);
- command.setConstraintName(constraintName);
- return command;
- } else if (readIf("INDEX")) {
- // MySQL compatibility
- String indexName = readIdentifierWithSchema();
- DropIndex command = new DropIndex(session, getSchema());
- command.setIndexName(indexName);
- return command;
- } else if (readIf("PRIMARY")) {
- read("KEY");
- Index idx = table.getPrimaryKey();
- DropIndex command = new DropIndex(session, table.getSchema());
- command.setIndexName(idx.getName());
- return command;
- } else {
- readIf("COLUMN");
- boolean ifExists = readIfExists(false);
- AlterTableAlterColumn command = new AlterTableAlterColumn(
- session, table.getSchema());
- command.setType(CommandInterface.ALTER_TABLE_DROP_COLUMN);
- String columnName = readColumnIdentifier();
- command.setTable(table);
- if (ifExists && !table.doesColumnExist(columnName)) {
+ }
+ }
+
+ private Prepared parseAlterTableSet(Schema schema, String tableName, boolean ifTableExists) {
+ read("REFERENTIAL_INTEGRITY");
+ int type = CommandInterface.ALTER_TABLE_SET_REFERENTIAL_INTEGRITY;
+ boolean value = readBooleanSetting();
+ AlterTableSet command = new AlterTableSet(session,
+ schema, type, value);
+ command.setTableName(tableName);
+ command.setIfTableExists(ifTableExists);
+ if (readIf(CHECK)) {
+ command.setCheckExisting(true);
+ } else if (readIf("NOCHECK")) {
+ command.setCheckExisting(false);
+ }
+ return command;
+ }
+
+ private Prepared parseAlterTableCompatibility(Schema schema, String tableName, boolean ifTableExists, Mode mode) {
+ if (mode.alterTableExtensionsMySQL) {
+ if (readIf("AUTO_INCREMENT")) {
+ readIf(EQUAL);
+ Expression restart = readExpression();
+ Table table = tableIfTableExists(schema, tableName, ifTableExists);
+ if (table == null) {
return new NoOperation(session);
}
- command.setOldColumn(table.getColumn(columnName));
- return command;
+ Index idx = table.findPrimaryKey();
+ if (idx != null) {
+ for (IndexColumn ic : idx.getIndexColumns()) {
+ Column column = ic.column;
+ if (column.isIdentity()) {
+ AlterSequence command = new AlterSequence(session, schema);
+ command.setColumn(column, null);
+ SequenceOptions options = new SequenceOptions();
+ options.setRestartValue(restart);
+ command.setOptions(options);
+ return command;
+ }
+ }
+ }
+ throw DbException.get(ErrorCode.COLUMN_NOT_FOUND_1, "AUTO_INCREMENT PRIMARY KEY");
+ } else if (readIf("CHANGE")) {
+ readIf("COLUMN");
+ String columnName = readIdentifier();
+ String newColumnName = readIdentifier();
+ Column column = columnIfTableExists(schema, tableName, columnName, ifTableExists, false);
+ boolean nullable = column == null ? true : column.isNullable();
+ // new column type ignored. RENAME and MODIFY are
+ // a single command in MySQL but two different commands in H2.
+ parseColumnForTable(newColumnName, nullable);
+ AlterTableRenameColumn command = new AlterTableRenameColumn(session, schema);
+ command.setTableName(tableName);
+ command.setIfTableExists(ifTableExists);
+ command.setOldColumnName(columnName);
+ command.setNewColumnName(newColumnName);
+ return command;
+ } else if (readIf("CONVERT")) {
+ readIf(TO);
+ readIf("CHARACTER");
+ readIf(SET);
+ readMySQLCharset();
+
+ if (readIf("COLLATE")) {
+ readMySQLCharset();
+ }
+
+ return new NoOperation(session);
}
- } else if (readIf("CHANGE")) {
- // MySQL compatibility
- readIf("COLUMN");
- String columnName = readColumnIdentifier();
- Column column = table.getColumn(columnName);
- String newColumnName = readColumnIdentifier();
- // new column type ignored. RENAME and MODIFY are
- // a single command in MySQL but two different commands in H2.
- parseColumnForTable(newColumnName, column.isNullable());
- AlterTableRenameColumn command = new AlterTableRenameColumn(session);
- command.setTable(table);
- command.setColumn(column);
- command.setNewColumnName(newColumnName);
- return command;
- } else if (readIf("MODIFY")) {
- // MySQL compatibility
- readIf("COLUMN");
- String columnName = readColumnIdentifier();
- Column column = table.getColumn(columnName);
- return parseAlterTableAlterColumnType(table, columnName, column);
- } else if (readIf("ALTER")) {
+ }
+ if (mode.alterTableModifyColumn && readIf("MODIFY")) {
+ // MySQL compatibility (optional)
readIf("COLUMN");
- String columnName = readColumnIdentifier();
- Column column = table.getColumn(columnName);
- if (readIf("RENAME")) {
- read("TO");
- AlterTableRenameColumn command = new AlterTableRenameColumn(
- session);
- command.setTable(table);
- command.setColumn(column);
- String newName = readColumnIdentifier();
- command.setNewColumnName(newName);
- return command;
- } else if (readIf("DROP")) {
- // PostgreSQL compatibility
- if (readIf("DEFAULT")) {
- AlterTableAlterColumn command = new AlterTableAlterColumn(
- session, table.getSchema());
- command.setTable(table);
- command.setOldColumn(column);
- command.setType(CommandInterface.ALTER_TABLE_ALTER_COLUMN_DEFAULT);
- command.setDefaultExpression(null);
- return command;
- }
- read("NOT");
- read("NULL");
- AlterTableAlterColumn command = new AlterTableAlterColumn(
- session, table.getSchema());
- command.setTable(table);
- command.setOldColumn(column);
- command.setType(CommandInterface.ALTER_TABLE_ALTER_COLUMN_NULL);
- return command;
- } else if (readIf("TYPE")) {
- // PostgreSQL compatibility
- return parseAlterTableAlterColumnType(table, columnName, column);
- } else if (readIf("SET")) {
- if (readIf("DATA")) {
- // Derby compatibility
- read("TYPE");
- return parseAlterTableAlterColumnType(table, columnName,
- column);
- }
- AlterTableAlterColumn command = new AlterTableAlterColumn(
- session, table.getSchema());
- command.setTable(table);
+ // Oracle specifies (but will not require) an opening parenthesis
+ boolean hasOpeningBracket = readIf(OPEN_PAREN);
+ String columnName = readIdentifier();
+ AlterTableAlterColumn command;
+ NullConstraintType nullConstraint = parseNotNullConstraint();
+ switch (nullConstraint) {
+ case NULL_IS_ALLOWED:
+ case NULL_IS_NOT_ALLOWED:
+ command = new AlterTableAlterColumn(session, schema);
+ command.setTableName(tableName);
+ command.setIfTableExists(ifTableExists);
+ Column column = columnIfTableExists(schema, tableName, columnName, ifTableExists, false);
command.setOldColumn(column);
- if (readIf("NULL")) {
- command.setType(CommandInterface.ALTER_TABLE_ALTER_COLUMN_NULL);
- return command;
- } else if (readIf("NOT")) {
- read("NULL");
+ if (nullConstraint == NullConstraintType.NULL_IS_ALLOWED) {
+ command.setType(CommandInterface.ALTER_TABLE_ALTER_COLUMN_DROP_NOT_NULL);
+ } else {
command.setType(CommandInterface.ALTER_TABLE_ALTER_COLUMN_NOT_NULL);
- return command;
- } else if (readIf("DEFAULT")) {
- Expression defaultExpression = readExpression();
- command.setType(CommandInterface.ALTER_TABLE_ALTER_COLUMN_DEFAULT);
- command.setDefaultExpression(defaultExpression);
- return command;
}
- } else if (readIf("RESTART")) {
- readIf("WITH");
- Expression start = readExpression();
- AlterSequence command = new AlterSequence(session,
- table.getSchema());
- command.setColumn(column);
- command.setStartWith(start);
- return command;
- } else if (readIf("SELECTIVITY")) {
- AlterTableAlterColumn command = new AlterTableAlterColumn(
- session, table.getSchema());
- command.setTable(table);
- command.setType(CommandInterface.ALTER_TABLE_ALTER_COLUMN_SELECTIVITY);
- command.setOldColumn(column);
- command.setSelectivity(readExpression());
- return command;
- } else {
- return parseAlterTableAlterColumnType(table, columnName, column);
+ break;
+ case NO_NULL_CONSTRAINT_FOUND:
+ command = parseAlterTableAlterColumnType(schema, tableName, columnName, ifTableExists, false,
+ mode.getEnum() != ModeEnum.MySQL);
+ break;
+ default:
+ throw DbException.get(ErrorCode.UNKNOWN_MODE_1,
+ "Internal Error - unhandled case: " + nullConstraint.name());
}
+ if (hasOpeningBracket) {
+ read(CLOSE_PAREN);
+ }
+ return command;
}
throw getSyntaxError();
}
- private AlterTableAlterColumn parseAlterTableAlterColumnType(Table table,
- String columnName, Column column) {
- Column newColumn = parseColumnForTable(columnName, column.isNullable());
- AlterTableAlterColumn command = new AlterTableAlterColumn(session,
- table.getSchema());
- command.setTable(table);
+ private Table tableIfTableExists(Schema schema, String tableName, boolean ifTableExists) {
+ Table table = schema.resolveTableOrView(session, tableName);
+ if (table == null && !ifTableExists) {
+ throw getTableOrViewNotFoundDbException(schema.getName(), tableName);
+ }
+ return table;
+ }
+
+ private Column columnIfTableExists(Schema schema, String tableName,
+ String columnName, boolean ifTableExists, boolean ifExists) {
+ Table table = tableIfTableExists(schema, tableName, ifTableExists);
+ if (table == null) {
+ return null;
+ }
+ return table.getColumn(columnName, ifExists);
+ }
+
+ private Prepared commandIfTableExists(Schema schema, String tableName,
+ boolean ifTableExists, Prepared commandIfTableExists) {
+ return tableIfTableExists(schema, tableName, ifTableExists) == null
+ ? new NoOperation(session)
+ : commandIfTableExists;
+ }
+
+ private AlterTableAlterColumn parseAlterTableAlterColumnType(Schema schema,
+ String tableName, String columnName, boolean ifTableExists, boolean ifExists, boolean preserveNotNull) {
+ Column oldColumn = columnIfTableExists(schema, tableName, columnName, ifTableExists, ifExists);
+ Column newColumn = parseColumnForTable(columnName,
+ !preserveNotNull || oldColumn == null || oldColumn.isNullable());
+ AlterTableAlterColumn command = new AlterTableAlterColumn(session, schema);
+ parseAlterColumnUsingIf(command);
+ command.setTableName(tableName);
+ command.setIfTableExists(ifTableExists);
command.setType(CommandInterface.ALTER_TABLE_ALTER_COLUMN_CHANGE_TYPE);
- command.setOldColumn(column);
+ command.setOldColumn(oldColumn);
+ command.setNewColumn(newColumn);
+ return command;
+ }
+
+ private AlterTableAlterColumn parseAlterTableAlterColumnDataType(Schema schema,
+ String tableName, String columnName, boolean ifTableExists, boolean ifExists) {
+ Column oldColumn = columnIfTableExists(schema, tableName, columnName, ifTableExists, ifExists);
+ Column newColumn = parseColumnWithType(columnName);
+ if (oldColumn != null) {
+ if (!oldColumn.isNullable()) {
+ newColumn.setNullable(false);
+ }
+ if (!oldColumn.getVisible()) {
+ newColumn.setVisible(false);
+ }
+ Expression e = oldColumn.getDefaultExpression();
+ if (e != null) {
+ if (oldColumn.isGenerated()) {
+ newColumn.setGeneratedExpression(e);
+ } else {
+ newColumn.setDefaultExpression(session, e);
+ }
+ }
+ e = oldColumn.getOnUpdateExpression();
+ if (e != null) {
+ newColumn.setOnUpdateExpression(session, e);
+ }
+ Sequence s = oldColumn.getSequence();
+ if (s != null) {
+ newColumn.setIdentityOptions(new SequenceOptions(s, newColumn.getType()),
+ oldColumn.isGeneratedAlways());
+ }
+ String c = oldColumn.getComment();
+ if (c != null) {
+ newColumn.setComment(c);
+ }
+ }
+ AlterTableAlterColumn command = new AlterTableAlterColumn(session, schema);
+ parseAlterColumnUsingIf(command);
+ command.setTableName(tableName);
+ command.setIfTableExists(ifTableExists);
+ command.setType(CommandInterface.ALTER_TABLE_ALTER_COLUMN_CHANGE_TYPE);
+ command.setOldColumn(oldColumn);
command.setNewColumn(newColumn);
return command;
}
- private AlterTableAlterColumn parseAlterTableAddColumn(Table table) {
+ private AlterTableAlterColumn parseAlterTableAddColumn(String tableName,
+ Schema schema, boolean ifTableExists) {
readIf("COLUMN");
- Schema schema = table.getSchema();
AlterTableAlterColumn command = new AlterTableAlterColumn(session,
schema);
command.setType(CommandInterface.ALTER_TABLE_ADD_COLUMN);
- command.setTable(table);
- ArrayList columnsToAdd = New.arrayList();
- if (readIf("(")) {
+ command.setTableName(tableName);
+ command.setIfTableExists(ifTableExists);
+ if (readIf(OPEN_PAREN)) {
command.setIfNotExists(false);
do {
- String columnName = readColumnIdentifier();
- Column column = parseColumnForTable(columnName, true);
- columnsToAdd.add(column);
- } while (readIf(","));
- read(")");
- command.setNewColumns(columnsToAdd);
+ parseTableColumnDefinition(command, schema, tableName, false);
+ } while (readIfMore());
} else {
- boolean ifNotExists = readIfNoExists();
+ boolean ifNotExists = readIfNotExists();
command.setIfNotExists(ifNotExists);
- String columnName = readColumnIdentifier();
- Column column = parseColumnForTable(columnName, true);
- columnsToAdd.add(column);
- if (readIf("BEFORE")) {
- command.setAddBefore(readColumnIdentifier());
- } else if (readIf("AFTER")) {
- command.setAddAfter(readColumnIdentifier());
- }
+ parseTableColumnDefinition(command, schema, tableName, false);
+ parseAlterColumnUsingIf(command);
+ }
+ if (readIf("BEFORE")) {
+ command.setAddBefore(readIdentifier());
+ } else if (readIf("AFTER")) {
+ command.setAddAfter(readIdentifier());
+ } else if (readIf("FIRST")) {
+ command.setAddFirst();
}
- command.setNewColumns(columnsToAdd);
return command;
}
- private int parseAction() {
- Integer result = parseCascadeOrRestrict();
+ private void parseAlterColumnUsingIf(AlterTableAlterColumn command) {
+ if (readIf(USING)) {
+ command.setUsingExpression(readExpression());
+ }
+ }
+
+ private ConstraintActionType parseAction() {
+ ConstraintActionType result = parseCascadeOrRestrict();
if (result != null) {
return result;
}
if (readIf("NO")) {
read("ACTION");
- return ConstraintReferential.RESTRICT;
+ return ConstraintActionType.RESTRICT;
}
- read("SET");
- if (readIf("NULL")) {
- return ConstraintReferential.SET_NULL;
+ read(SET);
+ if (readIf(NULL)) {
+ return ConstraintActionType.SET_NULL;
}
- read("DEFAULT");
- return ConstraintReferential.SET_DEFAULT;
+ read(DEFAULT);
+ return ConstraintActionType.SET_DEFAULT;
}
- private Integer parseCascadeOrRestrict() {
+ private ConstraintActionType parseCascadeOrRestrict() {
if (readIf("CASCADE")) {
- return ConstraintReferential.CASCADE;
+ return ConstraintActionType.CASCADE;
} else if (readIf("RESTRICT")) {
- return ConstraintReferential.RESTRICT;
+ return ConstraintActionType.RESTRICT;
} else {
return null;
}
}
- private DefineCommand parseAlterTableAddConstraintIf(String tableName,
- Schema schema) {
+ private DefineCommand parseTableConstraintIf(String tableName, Schema schema, boolean ifTableExists) {
String constraintName = null, comment = null;
boolean ifNotExists = false;
- boolean allowIndexDefinition = database.getMode().indexDefinitionInCreateTable;
- if (readIf("CONSTRAINT")) {
- ifNotExists = readIfNoExists();
+ if (readIf(CONSTRAINT)) {
+ ifNotExists = readIfNotExists();
constraintName = readIdentifierWithSchema(schema.getName());
checkSchema(schema);
comment = readCommentIf();
- allowIndexDefinition = true;
}
- if (readIf("PRIMARY")) {
- read("KEY");
- AlterTableAddConstraint command = new AlterTableAddConstraint(
- session, schema, ifNotExists);
- command.setType(CommandInterface.ALTER_TABLE_ADD_CONSTRAINT_PRIMARY_KEY);
- command.setComment(comment);
- command.setConstraintName(constraintName);
- command.setTableName(tableName);
+ AlterTableAddConstraint command;
+ switch (currentTokenType) {
+ case PRIMARY:
+ read();
+ read(KEY);
+ command = new AlterTableAddConstraint(session, schema,
+ CommandInterface.ALTER_TABLE_ADD_CONSTRAINT_PRIMARY_KEY, ifNotExists);
if (readIf("HASH")) {
command.setPrimaryKeyHash(true);
}
- read("(");
+ read(OPEN_PAREN);
command.setIndexColumns(parseIndexColumnList());
if (readIf("INDEX")) {
String indexName = readIdentifierWithSchema();
command.setIndex(getSchema().findIndex(session, indexName));
}
- return command;
- } else if (allowIndexDefinition && (isToken("INDEX") || isToken("KEY"))) {
- // MySQL
- // need to read ahead, as it could be a column name
- int start = lastParseIndex;
+ break;
+ case UNIQUE:
read();
- if (DataType.getTypeByName(currentToken) != null) {
- // known data type
- parseIndex = start;
- read();
- return null;
- }
- CreateIndex command = new CreateIndex(session, schema);
- command.setComment(comment);
- command.setTableName(tableName);
- if (!readIf("(")) {
- command.setIndexName(readUniqueIdentifier());
- read("(");
- }
- command.setIndexColumns(parseIndexColumnList());
// MySQL compatibility
- if (readIf("USING")) {
- read("BTREE");
+ boolean compatibility = database.getMode().indexDefinitionInCreateTable;
+ if (compatibility) {
+ if (!readIf(KEY)) {
+ readIf("INDEX");
+ }
+ if (!isToken(OPEN_PAREN)) {
+ constraintName = readIdentifier();
+ }
}
- return command;
- }
- AlterTableAddConstraint command;
- if (readIf("CHECK")) {
- command = new AlterTableAddConstraint(session, schema, ifNotExists);
- command.setType(CommandInterface.ALTER_TABLE_ADD_CONSTRAINT_CHECK);
- command.setCheckExpression(readExpression());
- } else if (readIf("UNIQUE")) {
- readIf("KEY");
- readIf("INDEX");
- command = new AlterTableAddConstraint(session, schema, ifNotExists);
- command.setType(CommandInterface.ALTER_TABLE_ADD_CONSTRAINT_UNIQUE);
- if (!readIf("(")) {
- constraintName = readUniqueIdentifier();
- read("(");
+ read(OPEN_PAREN);
+ command = new AlterTableAddConstraint(session, schema, CommandInterface.ALTER_TABLE_ADD_CONSTRAINT_UNIQUE,
+ ifNotExists);
+ if (readIf(VALUE)) {
+ read(CLOSE_PAREN);
+ command.setIndexColumns(null);
+ } else {
+ command.setIndexColumns(parseIndexColumnList());
}
- command.setIndexColumns(parseIndexColumnList());
if (readIf("INDEX")) {
String indexName = readIdentifierWithSchema();
command.setIndex(getSchema().findIndex(session, indexName));
}
- // MySQL compatibility
- if (readIf("USING")) {
+ if (compatibility && readIf(USING)) {
read("BTREE");
}
- } else if (readIf("FOREIGN")) {
- command = new AlterTableAddConstraint(session, schema, ifNotExists);
- command.setType(CommandInterface.ALTER_TABLE_ADD_CONSTRAINT_REFERENTIAL);
- read("KEY");
- read("(");
+ break;
+ case FOREIGN:
+ read();
+ command = new AlterTableAddConstraint(session, schema,
+ CommandInterface.ALTER_TABLE_ADD_CONSTRAINT_REFERENTIAL, ifNotExists);
+ read(KEY);
+ read(OPEN_PAREN);
command.setIndexColumns(parseIndexColumnList());
if (readIf("INDEX")) {
String indexName = readIdentifierWithSchema();
@@ -5655,19 +9136,60 @@ private DefineCommand parseAlterTableAddConstraintIf(String tableName,
}
read("REFERENCES");
parseReferences(command, schema, tableName);
- } else {
- if (constraintName != null) {
+ break;
+ case CHECK:
+ read();
+ command = new AlterTableAddConstraint(session, schema, CommandInterface.ALTER_TABLE_ADD_CONSTRAINT_CHECK,
+ ifNotExists);
+ command.setCheckExpression(readExpression());
+ break;
+ default:
+ if (constraintName == null) {
+ Mode mode = database.getMode();
+ if (mode.indexDefinitionInCreateTable) {
+ int start = tokenIndex;
+ if (readIf(KEY) || readIf("INDEX")) {
+ // MySQL
+ // need to read ahead, as it could be a column name
+ if (DataType.getTypeByName(currentToken, mode) == null) {
+ CreateIndex createIndex = new CreateIndex(session, schema);
+ createIndex.setComment(comment);
+ createIndex.setTableName(tableName);
+ createIndex.setIfTableExists(ifTableExists);
+ if (!readIf(OPEN_PAREN)) {
+ createIndex.setIndexName(readIdentifier());
+ read(OPEN_PAREN);
+ }
+ createIndex.setIndexColumns(parseIndexColumnList());
+ // MySQL compatibility
+ if (readIf(USING)) {
+ read("BTREE");
+ }
+ return createIndex;
+ } else {
+ // known data type
+ setTokenIndex(start);
+ }
+ }
+ }
+ return null;
+ } else {
+ if (expectedList != null) {
+ addMultipleExpected(PRIMARY, UNIQUE, FOREIGN, CHECK);
+ }
throw getSyntaxError();
}
- return null;
}
- if (readIf("NOCHECK")) {
- command.setCheckExisting(false);
- } else {
- readIf("CHECK");
- command.setCheckExisting(true);
+ if (command.getType() != CommandInterface.ALTER_TABLE_ADD_CONSTRAINT_PRIMARY_KEY) {
+ if (readIf("NOCHECK")) {
+ command.setCheckExisting(false);
+ } else {
+ readIf(CHECK);
+ command.setCheckExisting(true);
+ }
}
command.setTableName(tableName);
+ command.setIfTableExists(ifTableExists);
command.setConstraintName(constraintName);
command.setComment(comment);
return command;
@@ -5675,13 +9197,13 @@ private DefineCommand parseAlterTableAddConstraintIf(String tableName,
private void parseReferences(AlterTableAddConstraint command,
Schema schema, String tableName) {
- if (readIf("(")) {
+ if (readIf(OPEN_PAREN)) {
command.setRefTableName(schema, tableName);
command.setRefIndexColumns(parseIndexColumnList());
} else {
String refTableName = readIdentifierWithSchema(schema.getName());
command.setRefTableName(getSchema(), refTableName);
- if (readIf("(")) {
+ if (readIf(OPEN_PAREN)) {
command.setRefIndexColumns(parseIndexColumnList());
}
}
@@ -5689,7 +9211,7 @@ private void parseReferences(AlterTableAddConstraint command,
String indexName = readIdentifierWithSchema();
command.setRefIndex(getSchema().findIndex(session, indexName));
}
- while (readIf("ON")) {
+ while (readIf(ON)) {
if (readIf("DELETE")) {
command.setDeleteAction(parseAction());
} else {
@@ -5697,7 +9219,7 @@ private void parseReferences(AlterTableAddConstraint command,
command.setUpdateAction(parseAction());
}
}
- if (readIf("NOT")) {
+ if (readIf(NOT)) {
read("DEFERRABLE");
} else {
readIf("DEFERRABLE");
@@ -5706,8 +9228,8 @@ private void parseReferences(AlterTableAddConstraint command,
private CreateLinkedTable parseCreateLinkedTable(boolean temp,
boolean globalTemp, boolean force) {
- read("TABLE");
- boolean ifNotExists = readIfNoExists();
+ read(TABLE);
+ boolean ifNotExists = readIfNotExists();
String tableName = readIdentifierWithSchema();
CreateLinkedTable command = new CreateLinkedTable(session, getSchema());
command.setTemporary(temp);
@@ -5716,34 +9238,45 @@ private CreateLinkedTable parseCreateLinkedTable(boolean temp,
command.setIfNotExists(ifNotExists);
command.setTableName(tableName);
command.setComment(readCommentIf());
- read("(");
+ read(OPEN_PAREN);
command.setDriver(readString());
- read(",");
+ read(COMMA);
command.setUrl(readString());
- read(",");
+ read(COMMA);
command.setUser(readString());
- read(",");
+ read(COMMA);
command.setPassword(readString());
- read(",");
+ read(COMMA);
String originalTable = readString();
- if (readIf(",")) {
+ if (readIf(COMMA)) {
command.setOriginalSchema(originalTable);
originalTable = readString();
}
command.setOriginalTable(originalTable);
- read(")");
+ read(CLOSE_PAREN);
if (readIf("EMIT")) {
read("UPDATES");
command.setEmitUpdates(true);
} else if (readIf("READONLY")) {
command.setReadOnly(true);
}
+ if (readIf("FETCH_SIZE")) {
+ command.setFetchSize(readNonNegativeInt());
+ }
+ if(readIf("AUTOCOMMIT")){
+ if(readIf("ON")) {
+ command.setAutoCommit(true);
+ }
+ else if(readIf("OFF")){
+ command.setAutoCommit(false);
+ }
+ }
return command;
}
private CreateTable parseCreateTable(boolean temp, boolean globalTemp,
boolean persistIndexes) {
- boolean ifNotExists = readIfNoExists();
+ boolean ifNotExists = readIfNotExists();
String tableName = readIdentifierWithSchema();
if (temp && globalTemp && equalsToken("SESSION", schemaName)) {
// support weird syntax: declare global temporary table session.xy
@@ -5759,130 +9292,24 @@ private CreateTable parseCreateTable(boolean temp, boolean globalTemp,
command.setIfNotExists(ifNotExists);
command.setTableName(tableName);
command.setComment(readCommentIf());
- if (readIf("(")) {
- if (!readIf(")")) {
+ if (readIf(OPEN_PAREN)) {
+ if (!readIf(CLOSE_PAREN)) {
do {
- DefineCommand c = parseAlterTableAddConstraintIf(tableName,
- schema);
- if (c != null) {
- command.addConstraintCommand(c);
- } else {
- String columnName = readColumnIdentifier();
- Column column = parseColumnForTable(columnName, true);
- if (column.isAutoIncrement() && column.isPrimaryKey()) {
- column.setPrimaryKey(false);
- IndexColumn[] cols = { new IndexColumn() };
- cols[0].columnName = column.getName();
- AlterTableAddConstraint pk = new AlterTableAddConstraint(
- session, schema, false);
- pk.setType(CommandInterface.ALTER_TABLE_ADD_CONSTRAINT_PRIMARY_KEY);
- pk.setTableName(tableName);
- pk.setIndexColumns(cols);
- command.addConstraintCommand(pk);
- }
- command.addColumn(column);
- String constraintName = null;
- if (readIf("CONSTRAINT")) {
- constraintName = readColumnIdentifier();
- }
- if (readIf("PRIMARY")) {
- read("KEY");
- boolean hash = readIf("HASH");
- IndexColumn[] cols = { new IndexColumn() };
- cols[0].columnName = column.getName();
- AlterTableAddConstraint pk = new AlterTableAddConstraint(
- session, schema, false);
- pk.setPrimaryKeyHash(hash);
- pk.setType(CommandInterface.ALTER_TABLE_ADD_CONSTRAINT_PRIMARY_KEY);
- pk.setTableName(tableName);
- pk.setIndexColumns(cols);
- command.addConstraintCommand(pk);
- if (readIf("AUTO_INCREMENT")) {
- parseAutoIncrement(column);
- }
- } else if (readIf("UNIQUE")) {
- AlterTableAddConstraint unique = new AlterTableAddConstraint(
- session, schema, false);
- unique.setConstraintName(constraintName);
- unique.setType(CommandInterface.ALTER_TABLE_ADD_CONSTRAINT_UNIQUE);
- IndexColumn[] cols = { new IndexColumn() };
- cols[0].columnName = columnName;
- unique.setIndexColumns(cols);
- unique.setTableName(tableName);
- command.addConstraintCommand(unique);
- }
- if (readIf("NOT")) {
- read("NULL");
- column.setNullable(false);
- } else {
- readIf("NULL");
- }
- if (readIf("CHECK")) {
- Expression expr = readExpression();
- column.addCheckConstraint(session, expr);
- }
- if (readIf("REFERENCES")) {
- AlterTableAddConstraint ref = new AlterTableAddConstraint(
- session, schema, false);
- ref.setConstraintName(constraintName);
- ref.setType(CommandInterface.ALTER_TABLE_ADD_CONSTRAINT_REFERENTIAL);
- IndexColumn[] cols = { new IndexColumn() };
- cols[0].columnName = columnName;
- ref.setIndexColumns(cols);
- ref.setTableName(tableName);
- parseReferences(ref, schema, tableName);
- command.addConstraintCommand(ref);
- }
- }
+ parseTableColumnDefinition(command, schema, tableName, true);
} while (readIfMore());
}
}
- // Allows "COMMENT='comment'" in DDL statements (MySQL syntax)
- if (readIf("COMMENT")) {
- if (readIf("=")) {
- // read the complete string comment, but nothing with it for now
- readString();
- }
+ if (database.getMode().getEnum() == ModeEnum.MySQL) {
+ parseCreateTableMySQLTableOptions(command);
}
if (readIf("ENGINE")) {
- if (readIf("=")) {
- // map MySQL engine types onto H2 behavior
- String tableEngine = readUniqueIdentifier();
- if ("InnoDb".equalsIgnoreCase(tableEngine)) {
- // ok
- } else if (!"MyISAM".equalsIgnoreCase(tableEngine)) {
- throw DbException.getUnsupportedException(tableEngine);
- }
- } else {
- command.setTableEngine(readUniqueIdentifier());
- if (readIf("WITH")) {
- ArrayList tableEngineParams = New.arrayList();
- do {
- tableEngineParams.add(readUniqueIdentifier());
- } while (readIf(","));
- command.setTableEngineParams(tableEngineParams);
- }
- }
- } else if (database.getSettings().defaultTableEngine != null) {
- command.setTableEngine(database.getSettings().defaultTableEngine);
- }
- // MySQL compatibility
- if (readIf("AUTO_INCREMENT")) {
- read("=");
- if (currentTokenType != VALUE ||
- currentValue.getType() != Value.INT) {
- throw DbException.getSyntaxError(sqlCommand, parseIndex,
- "integer");
- }
- read();
+ command.setTableEngine(readIdentifier());
}
- readIf("DEFAULT");
- if (readIf("CHARSET")) {
- read("=");
- read("UTF8");
+ if (readIf(WITH)) {
+ command.setTableEngineParams(readTableEngineParams());
}
if (temp) {
- if (readIf("ON")) {
+ if (readIf(ON)) {
read("COMMIT");
if (readIf("DROP")) {
command.setOnCommitDrop();
@@ -5890,7 +9317,7 @@ private CreateTable parseCreateTable(boolean temp, boolean globalTemp,
read("ROWS");
command.setOnCommitTruncate();
}
- } else if (readIf("NOT")) {
+ } else if (readIf(NOT)) {
if (readIf("PERSISTENT")) {
command.setPersistData(false);
} else {
@@ -5900,25 +9327,268 @@ private CreateTable parseCreateTable(boolean temp, boolean globalTemp,
if (readIf("TRANSACTIONAL")) {
command.setTransactional(true);
}
- } else if (!persistIndexes && readIf("NOT")) {
+ } else if (!persistIndexes && readIf(NOT)) {
read("PERSISTENT");
command.setPersistData(false);
}
if (readIf("HIDDEN")) {
command.setHidden(true);
}
- if (readIf("AS")) {
- if (readIf("SORTED")) {
- command.setSortedInsertMode(true);
+ if (readIf(AS)) {
+ readIf("SORTED");
+ command.setQuery(parseQuery());
+ if (readIf(WITH)) {
+ command.setWithNoData(readIf("NO"));
+ read("DATA");
+ }
+ }
+ return command;
+ }
+
+ private void parseTableColumnDefinition(CommandWithColumns command, Schema schema, String tableName,
+ boolean forCreateTable) {
+ DefineCommand c = parseTableConstraintIf(tableName, schema, false);
+ if (c != null) {
+ command.addConstraintCommand(c);
+ return;
+ }
+ String columnName = readIdentifier();
+ if (forCreateTable && (currentTokenType == COMMA || currentTokenType == CLOSE_PAREN)) {
+ command.addColumn(new Column(columnName, TypeInfo.TYPE_UNKNOWN));
+ return;
+ }
+ Column column = parseColumnForTable(columnName, true);
+ if (column.hasIdentityOptions() && column.isPrimaryKey()) {
+ command.addConstraintCommand(newPrimaryKeyConstraintCommand(session, schema, tableName, column));
+ }
+ command.addColumn(column);
+ readColumnConstraints(command, schema, tableName, column);
+ }
+
+ /**
+ * Create a new alter table command.
+ *
+ * @param session the session
+ * @param schema the schema
+ * @param tableName the table
+ * @param column the column
+ * @return the command
+ */
+ public static AlterTableAddConstraint newPrimaryKeyConstraintCommand(SessionLocal session, Schema schema,
+ String tableName, Column column) {
+ column.setPrimaryKey(false);
+ AlterTableAddConstraint pk = new AlterTableAddConstraint(session, schema,
+ CommandInterface.ALTER_TABLE_ADD_CONSTRAINT_PRIMARY_KEY, false);
+ pk.setTableName(tableName);
+ pk.setIndexColumns(new IndexColumn[] { new IndexColumn(column.getName()) });
+ return pk;
+ }
+
+ private void readColumnConstraints(CommandWithColumns command, Schema schema, String tableName, Column column) {
+ String comment = column.getComment();
+ boolean hasPrimaryKey = false, hasNotNull = false;
+ NullConstraintType nullType;
+ Mode mode = database.getMode();
+ for (;;) {
+ String constraintName;
+ if (readIf(CONSTRAINT)) {
+ constraintName = readIdentifier();
+ } else if (comment == null && (comment = readCommentIf()) != null) {
+ // Compatibility: COMMENT may be specified appear after some constraint
+ column.setComment(comment);
+ continue;
+ } else {
+ constraintName = null;
+ }
+ if (!hasPrimaryKey && readIf(PRIMARY)) {
+ read(KEY);
+ hasPrimaryKey = true;
+ boolean hash = readIf("HASH");
+ AlterTableAddConstraint pk = new AlterTableAddConstraint(session, schema,
+ CommandInterface.ALTER_TABLE_ADD_CONSTRAINT_PRIMARY_KEY, false);
+ pk.setConstraintName(constraintName);
+ pk.setPrimaryKeyHash(hash);
+ pk.setTableName(tableName);
+ pk.setIndexColumns(new IndexColumn[] { new IndexColumn(column.getName()) });
+ command.addConstraintCommand(pk);
+ } else if (readIf(UNIQUE)) {
+ AlterTableAddConstraint unique = new AlterTableAddConstraint(session, schema,
+ CommandInterface.ALTER_TABLE_ADD_CONSTRAINT_UNIQUE, false);
+ unique.setConstraintName(constraintName);
+ unique.setIndexColumns(new IndexColumn[] { new IndexColumn(column.getName()) });
+ unique.setTableName(tableName);
+ command.addConstraintCommand(unique);
+ } else if (!hasNotNull
+ && (nullType = parseNotNullConstraint()) != NullConstraintType.NO_NULL_CONSTRAINT_FOUND) {
+ hasNotNull = true;
+ if (nullType == NullConstraintType.NULL_IS_NOT_ALLOWED) {
+ column.setNullable(false);
+ } else if (nullType == NullConstraintType.NULL_IS_ALLOWED) {
+ if (column.isIdentity()) {
+ throw DbException.get(ErrorCode.COLUMN_MUST_NOT_BE_NULLABLE_1, column.getName());
+ }
+ column.setNullable(true);
+ }
+ } else if (readIf(CHECK)) {
+ AlterTableAddConstraint check = new AlterTableAddConstraint(session, schema,
+ CommandInterface.ALTER_TABLE_ADD_CONSTRAINT_CHECK, false);
+ check.setConstraintName(constraintName);
+ check.setTableName(tableName);
+ check.setCheckExpression(readExpression());
+ command.addConstraintCommand(check);
+ } else if (readIf("REFERENCES")) {
+ AlterTableAddConstraint ref = new AlterTableAddConstraint(session, schema,
+ CommandInterface.ALTER_TABLE_ADD_CONSTRAINT_REFERENTIAL, false);
+ ref.setConstraintName(constraintName);
+ ref.setIndexColumns(new IndexColumn[] { new IndexColumn(column.getName()) });
+ ref.setTableName(tableName);
+ parseReferences(ref, schema, tableName);
+ command.addConstraintCommand(ref);
+ } else if (constraintName == null) {
+ if (column.getIdentityOptions() != null || !parseCompatibilityIdentity(column, mode)) {
+ return;
+ }
+ } else {
+ throw getSyntaxError();
+ }
+ }
+ }
+
+ private boolean parseCompatibilityIdentity(Column column, Mode mode) {
+ if (mode.autoIncrementClause && readIf("AUTO_INCREMENT")) {
+ parseCompatibilityIdentityOptions(column);
+ return true;
+ }
+ if (mode.identityClause && readIf("IDENTITY")) {
+ parseCompatibilityIdentityOptions(column);
+ return true;
+ }
+ return false;
+ }
+
+ private void parseCreateTableMySQLTableOptions(CreateTable command) {
+ boolean requireNext = false;
+ for (;;) {
+ if (readIf("AUTO_INCREMENT")) {
+ readIf(EQUAL);
+ Expression value = readExpression();
+ set: {
+ AlterTableAddConstraint primaryKey = command.getPrimaryKey();
+ if (primaryKey != null) {
+ for (IndexColumn ic : primaryKey.getIndexColumns()) {
+ String columnName = ic.columnName;
+ for (Column column : command.getColumns()) {
+ if (database.equalsIdentifiers(column.getName(), columnName)) {
+ SequenceOptions options = column.getIdentityOptions();
+ if (options != null) {
+ options.setStartValue(value);
+ break set;
+ }
+ }
+ }
+ }
+ }
+ throw DbException.get(ErrorCode.COLUMN_NOT_FOUND_1, "AUTO_INCREMENT PRIMARY KEY");
+ }
+ } else if (readIf(DEFAULT)) {
+ if (readIf("CHARACTER")) {
+ read(SET);
+ } else {
+ readIf("CHARSET");
+ readIf("COLLATE");
+ }
+ readMySQLCharset();
+ } else if (readIf("CHARACTER")) {
+ read(SET);
+ readMySQLCharset();
+ } else if (readIf("COLLATE")) {
+ readMySQLCharset();
+ } else if (readIf("CHARSET")) {
+ readMySQLCharset();
+ } else if (readIf("COMMENT")) {
+ readIf(EQUAL);
+ command.setComment(readString());
+ } else if (readIf("ENGINE")) {
+ readIf(EQUAL);
+ readIdentifier();
+ } else if (readIf("ROW_FORMAT")) {
+ readIf(EQUAL);
+ readIdentifier();
+ } else if (requireNext) {
+ throw getSyntaxError();
+ } else {
+ break;
}
- command.setQuery(parseSelect());
+ requireNext = readIf(COMMA);
+ }
+ }
+
+ private void readMySQLCharset() {
+ readIf(EQUAL);
+ readIdentifier();
+ }
+
+ /**
+ * Enumeration describing null constraints
+ */
+ private enum NullConstraintType {
+ NULL_IS_ALLOWED, NULL_IS_NOT_ALLOWED, NO_NULL_CONSTRAINT_FOUND
+ }
+
+ private NullConstraintType parseNotNullConstraint(NullConstraintType nullConstraint) {
+ if (nullConstraint == NullConstraintType.NO_NULL_CONSTRAINT_FOUND) {
+ nullConstraint = parseNotNullConstraint();
+ }
+ return nullConstraint;
+ }
+
+ private NullConstraintType parseNotNullConstraint() {
+ NullConstraintType nullConstraint;
+ if (readIf(NOT)) {
+ read(NULL);
+ nullConstraint = NullConstraintType.NULL_IS_NOT_ALLOWED;
+ } else if (readIf(NULL)) {
+ nullConstraint = NullConstraintType.NULL_IS_ALLOWED;
+ } else {
+ return NullConstraintType.NO_NULL_CONSTRAINT_FOUND;
+ }
+ if (database.getMode().getEnum() == ModeEnum.Oracle) {
+ nullConstraint = parseNotNullCompatibility(nullConstraint);
}
- // for MySQL compatibility
- if (readIf("ROW_FORMAT")) {
- if (readIf("=")) {
- readColumnIdentifier();
+ return nullConstraint;
+ }
+
+ private NullConstraintType parseNotNullCompatibility(NullConstraintType nullConstraint) {
+ if (readIf("ENABLE")) {
+ if (!readIf("VALIDATE") && readIf("NOVALIDATE")) {
+ // Turn off constraint, allow NULLs
+ nullConstraint = NullConstraintType.NULL_IS_ALLOWED;
+ }
+ } else if (readIf("DISABLE")) {
+ // Turn off constraint, allow NULLs
+ nullConstraint = NullConstraintType.NULL_IS_ALLOWED;
+ if (!readIf("VALIDATE")) {
+ readIf("NOVALIDATE");
}
}
+ return nullConstraint;
+ }
+
+ private CreateSynonym parseCreateSynonym(boolean orReplace) {
+ boolean ifNotExists = readIfNotExists();
+ String name = readIdentifierWithSchema();
+ Schema synonymSchema = getSchema();
+ read(FOR);
+ String tableName = readIdentifierWithSchema();
+
+ Schema targetSchema = getSchema();
+ CreateSynonym command = new CreateSynonym(session, synonymSchema);
+ command.setName(name);
+ command.setSynonymFor(tableName);
+ command.setSynonymForSchema(targetSchema);
+ command.setComment(readCommentIf());
+ command.setIfNotExists(ifNotExists);
+ command.setOrReplace(orReplace);
return command;
}
@@ -5947,34 +9617,31 @@ private static int getCompareType(int tokenType) {
* Add double quotes around an identifier if required.
*
* @param s the identifier
+ * @param sqlFlags formatting flags
* @return the quoted identifier
*/
- public static String quoteIdentifier(String s) {
- if (s == null || s.length() == 0) {
+ public static String quoteIdentifier(String s, int sqlFlags) {
+ if (s == null) {
return "\"\"";
}
- char c = s.charAt(0);
- // lowercase a-z is quoted as well
- if ((!Character.isLetter(c) && c != '_') || Character.isLowerCase(c)) {
- return StringUtils.quoteIdentifier(s);
- }
- for (int i = 1, length = s.length(); i < length; i++) {
- c = s.charAt(i);
- if ((!Character.isLetterOrDigit(c) && c != '_') ||
- Character.isLowerCase(c)) {
- return StringUtils.quoteIdentifier(s);
- }
- }
- if (isKeyword(s, true)) {
- return StringUtils.quoteIdentifier(s);
+ if ((sqlFlags & HasSQL.QUOTE_ONLY_WHEN_REQUIRED) != 0 && ParserUtil.isSimpleIdentifier(s, false, false)) {
+ return s;
}
- return s;
+ return StringUtils.quoteIdentifier(s);
+ }
+
+ public void setLiteralsChecked(boolean literalsChecked) {
+ this.literalsChecked = literalsChecked;
}
public void setRightsChecked(boolean rightsChecked) {
this.rightsChecked = rightsChecked;
}
+ public void setSuppliedParameters(ArrayList suppliedParameters) {
+ this.suppliedParameters = suppliedParameters;
+ }
+
/**
* Parse a SQL code snippet that represents an expression.
*
@@ -5982,12 +9649,30 @@ public void setRightsChecked(boolean rightsChecked) {
* @return the expression object
*/
public Expression parseExpression(String sql) {
- parameters = New.arrayList();
- initialize(sql);
+ parameters = Utils.newSmallArrayList();
+ initialize(sql, null, false);
read();
return readExpression();
}
+ /**
+ * Parse a SQL code snippet that represents an expression for a domain constraint.
+ *
+ * @param sql the code snippet
+ * @return the expression object
+ */
+ public Expression parseDomainConstraintExpression(String sql) {
+ parameters = Utils.newSmallArrayList();
+ initialize(sql, null, false);
+ read();
+ try {
+ parseDomainConstraint = true;
+ return readExpression();
+ } finally {
+ parseDomainConstraint = false;
+ }
+ }
+
/**
* Parse a SQL code snippet that represents a table name.
*
@@ -5995,9 +9680,70 @@ public Expression parseExpression(String sql) {
* @return the table object
*/
public Table parseTableName(String sql) {
- parameters = New.arrayList();
- initialize(sql);
+ parameters = Utils.newSmallArrayList();
+ initialize(sql, null, false);
read();
return readTableOrView();
}
+
+ /**
+ * Parses a list of column names or numbers in parentheses.
+ *
+ * @param sql the source SQL
+ * @param offset the initial offset
+ * @return the array of column names ({@code String[]}) or numbers
+ * ({@code int[]})
+ * @throws DbException on syntax error
+ */
+ public Object parseColumnList(String sql, int offset) {
+ initialize(sql, null, true);
+ for (int i = 0, l = tokens.size(); i < l; i++) {
+ if (tokens.get(i).start() >= offset) {
+ setTokenIndex(i);
+ break;
+ }
+ }
+ read(OPEN_PAREN);
+ if (readIf(CLOSE_PAREN)) {
+ return Utils.EMPTY_INT_ARRAY;
+ }
+ if (isIdentifier()) {
+ ArrayList list = Utils.newSmallArrayList();
+ do {
+ if (!isIdentifier()) {
+ throw getSyntaxError();
+ }
+ list.add(currentToken);
+ read();
+ } while (readIfMore());
+ return list.toArray(new String[0]);
+ } else if (currentTokenType == LITERAL) {
+ ArrayList list = Utils.newSmallArrayList();
+ do {
+ list.add(readInt());
+ } while (readIfMore());
+ int count = list.size();
+ int[] array = new int[count];
+ for (int i = 0; i < count; i++) {
+ array[i] = list.get(i);
+ }
+ return array;
+ } else {
+ throw getSyntaxError();
+ }
+ }
+
+ /**
+ * Returns the last parse index.
+ *
+ * @return the last parse index
+ */
+ public int getLastParseIndex() {
+ return token.start();
+ }
+
+ @Override
+ public String toString() {
+ return StringUtils.addAsterisk(sqlCommand, token.start());
+ }
}
diff --git a/h2/src/main/org/h2/command/Prepared.java b/h2/src/main/org/h2/command/Prepared.java
index 89b0693a7f..f9a88835d9 100644
--- a/h2/src/main/org/h2/command/Prepared.java
+++ b/h2/src/main/org/h2/command/Prepared.java
@@ -1,22 +1,25 @@
/*
- * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0,
- * and the EPL 1.0 (http://h2database.com/html/license.html).
+ * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0,
+ * and the EPL 1.0 (https://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.command;
import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
import org.h2.api.DatabaseEventListener;
import org.h2.api.ErrorCode;
import org.h2.engine.Database;
-import org.h2.engine.Session;
+import org.h2.engine.DbObject;
+import org.h2.engine.SessionLocal;
import org.h2.expression.Expression;
import org.h2.expression.Parameter;
import org.h2.message.DbException;
import org.h2.message.Trace;
import org.h2.result.ResultInterface;
-import org.h2.util.StatementBuilder;
-import org.h2.value.Value;
+import org.h2.table.TableView;
+import org.h2.util.HasSQL;
/**
* A prepared statement.
@@ -26,13 +29,18 @@ public abstract class Prepared {
/**
* The session.
*/
- protected Session session;
+ protected SessionLocal session;
/**
* The SQL string.
*/
protected String sqlStatement;
+ /**
+ * The SQL tokens.
+ */
+ protected ArrayList sqlTokens;
+
/**
* Whether to create a new object (for indexes).
*/
@@ -52,16 +60,26 @@ public abstract class Prepared {
private long modificationMetaId;
private Command command;
- private int objectId;
- private int currentRowNumber;
+ /**
+ * Used to preserve object identities on database startup. {@code 0} if
+ * object is not stored, {@code -1} if object is stored and its ID is
+ * already read, {@code >0} if object is stored and its id is not yet read.
+ */
+ private int persistedObjectId;
+ private long currentRowNumber;
private int rowScanCount;
+ /**
+ * Common table expressions (CTE) in queries require us to create temporary views,
+ * which need to be cleaned up once a command is done executing.
+ */
+ private List cteCleanups;
/**
* Create a new object.
*
* @param session the session
*/
- public Prepared(Session session) {
+ public Prepared(SessionLocal session) {
this.session = session;
modificationMetaId = session.getDatabase().getModificationMetaId();
}
@@ -158,9 +176,13 @@ public ArrayList getParameters() {
* @throws DbException if any parameter has not been set
*/
protected void checkParameters() {
+ if (persistedObjectId < 0) {
+ // restore original persistedObjectId on Command re-run
+ // i.e. due to concurrent update
+ persistedObjectId = ~persistedObjectId;
+ }
if (parameters != null) {
- for (int i = 0, size = parameters.size(); i < size; i++) {
- Parameter param = parameters.get(i);
+ for (Parameter param : parameters) {
param.checkSet();
}
}
@@ -197,7 +219,7 @@ public void prepare() {
* @return the update count
* @throws DbException if it is a query
*/
- public int update() {
+ public long update() {
throw DbException.get(ErrorCode.METHOD_NOT_ALLOWED_FOR_QUERY);
}
@@ -208,7 +230,8 @@ public int update() {
* @return the result set
* @throws DbException if it is not a query
*/
- public ResultInterface query(int maxrows) {
+ @SuppressWarnings("unused")
+ public ResultInterface query(long maxrows) {
throw DbException.get(ErrorCode.METHOD_ONLY_ALLOWED_FOR_QUERY);
}
@@ -216,9 +239,11 @@ public ResultInterface query(int maxrows) {
* Set the SQL statement.
*
* @param sql the SQL statement
+ * @param sqlTokens the SQL tokens
*/
- public void setSQL(String sql) {
+ public final void setSQL(String sql, ArrayList sqlTokens) {
this.sqlStatement = sql;
+ this.sqlTokens = sqlTokens;
}
/**
@@ -226,43 +251,56 @@ public void setSQL(String sql) {
*
* @return the SQL statement
*/
- public String getSQL() {
+ public final String getSQL() {
return sqlStatement;
}
+ /**
+ * Get the SQL tokens.
+ *
+ * @return the SQL tokens
+ */
+ public final ArrayList getSQLTokens() {
+ return sqlTokens;
+ }
+
/**
* Get the object id to use for the database object that is created in this
- * statement. This id is only set when the object is persistent.
+ * statement. This id is only set when the object is already persisted.
* If not set, this method returns 0.
*
* @return the object id or 0 if not set
*/
- protected int getCurrentObjectId() {
- return objectId;
+ public int getPersistedObjectId() {
+ int id = persistedObjectId;
+ return id >= 0 ? id : 0;
}
/**
* Get the current object id, or get a new id from the database. The object
- * id is used when creating new database object (CREATE statement).
+ * id is used when creating new database object (CREATE statement). This
+ * method may be called only once.
*
* @return the object id
*/
protected int getObjectId() {
- int id = objectId;
+ int id = persistedObjectId;
if (id == 0) {
id = session.getDatabase().allocateObjectId();
- } else {
- objectId = 0;
+ } else if (id < 0) {
+ throw DbException.getInternalError("Prepared.getObjectId() was called before");
}
+ persistedObjectId = ~persistedObjectId; // while negative, it can be restored later
return id;
}
/**
* Get the SQL statement with the execution plan.
*
+ * @param sqlFlags formatting flags
* @return the execution plan
*/
- public String getPlanSQL() {
+ public String getPlanSQL(int sqlFlags) {
return null;
}
@@ -280,12 +318,12 @@ public void checkCanceled() {
}
/**
- * Set the object id for this statement.
+ * Set the persisted object id for this statement.
*
* @param i the object id
*/
- public void setObjectId(int i) {
- this.objectId = i;
+ public void setPersistedObjectId(int i) {
+ this.persistedObjectId = i;
this.create = false;
}
@@ -294,7 +332,7 @@ public void setObjectId(int i) {
*
* @param currentSession the new session
*/
- public void setSession(Session currentSession) {
+ public void setSession(SessionLocal currentSession) {
this.session = currentSession;
}
@@ -302,19 +340,20 @@ public void setSession(Session currentSession) {
* Print information about the statement executed if info trace level is
* enabled.
*
- * @param startTime when the statement was started
+ * @param startTimeNanos when the statement was started
* @param rowCount the query or update row count
*/
- void trace(long startTime, int rowCount) {
- if (session.getTrace().isInfoEnabled() && startTime > 0) {
- long deltaTime = System.currentTimeMillis() - startTime;
+ void trace(long startTimeNanos, long rowCount) {
+ if (session.getTrace().isInfoEnabled() && startTimeNanos > 0) {
+ long deltaTimeNanos = System.nanoTime() - startTimeNanos;
String params = Trace.formatParams(parameters);
- session.getTrace().infoSQL(sqlStatement, params, rowCount, deltaTime);
+ session.getTrace().infoSQL(sqlStatement, params, rowCount, deltaTimeNanos / 1_000_000L);
}
- if (session.getDatabase().getQueryStatistics()) {
- long deltaTime = System.currentTimeMillis() - startTime;
- session.getDatabase().getQueryStatisticsData().
- update(toString(), deltaTime, rowCount);
+ // startTime_nanos can be zero for the command that actually turns on
+ // statistics
+ if (session.getDatabase().getQueryStatistics() && startTimeNanos != 0) {
+ long deltaTimeNanos = System.nanoTime() - startTimeNanos;
+ session.getDatabase().getQueryStatisticsData().update(toString(), deltaTimeNanos, rowCount);
}
}
@@ -333,7 +372,7 @@ public void setPrepareAlways(boolean prepareAlways) {
*
* @param rowNumber the row number
*/
- protected void setCurrentRowNumber(int rowNumber) {
+ public void setCurrentRowNumber(long rowNumber) {
if ((++rowScanCount & 127) == 0) {
checkCanceled();
}
@@ -346,7 +385,7 @@ protected void setCurrentRowNumber(int rowNumber) {
*
* @return the row number
*/
- public int getCurrentRowNumber() {
+ public long getCurrentRowNumber() {
return currentRowNumber;
}
@@ -355,9 +394,8 @@ public int getCurrentRowNumber() {
*/
private void setProgress() {
if ((currentRowNumber & 127) == 0) {
- session.getDatabase().setProgress(
- DatabaseEventListener.STATE_STATEMENT_PROGRESS,
- sqlStatement, currentRowNumber, 0);
+ session.getDatabase().setProgress(DatabaseEventListener.STATE_STATEMENT_PROGRESS, sqlStatement,
+ currentRowNumber, 0L);
}
}
@@ -371,38 +409,14 @@ public String toString() {
return sqlStatement;
}
- /**
- * Get the SQL snippet of the value list.
- *
- * @param values the value list
- * @return the SQL snippet
- */
- protected static String getSQL(Value[] values) {
- StatementBuilder buff = new StatementBuilder();
- for (Value v : values) {
- buff.appendExceptFirst(", ");
- if (v != null) {
- buff.append(v.getSQL());
- }
- }
- return buff.toString();
- }
-
/**
* Get the SQL snippet of the expression list.
*
* @param list the expression list
* @return the SQL snippet
*/
- protected static String getSQL(Expression[] list) {
- StatementBuilder buff = new StatementBuilder();
- for (Expression e : list) {
- buff.appendExceptFirst(", ");
- if (e != null) {
- buff.append(e.getSQL());
- }
- }
- return buff.toString();
+ public static String getSimpleSQL(Expression[] list) {
+ return Expression.writeExpressions(new StringBuilder(), list, HasSQL.TRACE_SQL_FLAGS).toString();
}
/**
@@ -413,7 +427,7 @@ protected static String getSQL(Expression[] list) {
* @param values the values of the row
* @return the exception
*/
- protected DbException setRow(DbException e, int rowId, String values) {
+ protected DbException setRow(DbException e, long rowId, String values) {
StringBuilder buff = new StringBuilder();
if (sqlStatement != null) {
buff.append(sqlStatement);
@@ -430,4 +444,30 @@ public boolean isCacheable() {
return false;
}
+ /**
+ * @return the temporary views created for CTE's.
+ */
+ public List getCteCleanups() {
+ return cteCleanups;
+ }
+
+ /**
+ * Set the temporary views created for CTE's.
+ *
+ * @param cteCleanups the temporary views
+ */
+ public void setCteCleanups(List cteCleanups) {
+ this.cteCleanups = cteCleanups;
+ }
+
+ public final SessionLocal getSession() {
+ return session;
+ }
+
+ /**
+ * Find and collect all DbObjects, this Prepared depends on.
+ *
+ * @param dependencies collection of dependencies to populate
+ */
+ public void collectDependencies(HashSet dependencies) {}
}
diff --git a/h2/src/main/org/h2/command/Token.java b/h2/src/main/org/h2/command/Token.java
new file mode 100644
index 0000000000..888a7e776a
--- /dev/null
+++ b/h2/src/main/org/h2/command/Token.java
@@ -0,0 +1,757 @@
+/*
+ * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0,
+ * and the EPL 1.0 (https://h2database.com/html/license.html).
+ * Initial Developer: H2 Group
+ */
+package org.h2.command;
+
+import static org.h2.util.ParserUtil.IDENTIFIER;
+import static org.h2.util.ParserUtil.LAST_KEYWORD;
+
+import org.h2.engine.CastDataProvider;
+import org.h2.message.DbException;
+import org.h2.util.StringUtils;
+import org.h2.value.Value;
+import org.h2.value.ValueBigint;
+import org.h2.value.ValueInteger;
+import org.h2.value.ValueVarbinary;
+import org.h2.value.ValueVarchar;
+
+/**
+ * Token.
+ */
+public abstract class Token implements Cloneable {
+
+ /**
+ * Token with parameter.
+ */
+ static final int PARAMETER = LAST_KEYWORD + 1;
+
+ /**
+ * End of input.
+ */
+ static final int END_OF_INPUT = PARAMETER + 1;
+
+ /**
+ * Token with literal.
+ */
+ static final int LITERAL = END_OF_INPUT + 1;
+
+ /**
+ * The token "=".
+ */
+ static final int EQUAL = LITERAL + 1;
+
+ /**
+ * The token ">=".
+ */
+ static final int BIGGER_EQUAL = EQUAL + 1;
+
+ /**
+ * The token ">".
+ */
+ static final int BIGGER = BIGGER_EQUAL + 1;
+
+ /**
+ * The token "<".
+ */
+ static final int SMALLER = BIGGER + 1;
+
+ /**
+ * The token "<=".
+ */
+ static final int SMALLER_EQUAL = SMALLER + 1;
+
+ /**
+ * The token "<>" or "!=".
+ */
+ static final int NOT_EQUAL = SMALLER_EQUAL + 1;
+
+ /**
+ * The token "@".
+ */
+ static final int AT = NOT_EQUAL + 1;
+
+ /**
+ * The token "-".
+ */
+ static final int MINUS_SIGN = AT + 1;
+
+ /**
+ * The token "+".
+ */
+ static final int PLUS_SIGN = MINUS_SIGN + 1;
+
+ /**
+ * The token "||".
+ */
+ static final int CONCATENATION = PLUS_SIGN + 1;
+
+ /**
+ * The token "(".
+ */
+ static final int OPEN_PAREN = CONCATENATION + 1;
+
+ /**
+ * The token ")".
+ */
+ static final int CLOSE_PAREN = OPEN_PAREN + 1;
+
+ /**
+ * The token "&&".
+ */
+ static final int SPATIAL_INTERSECTS = CLOSE_PAREN + 1;
+
+ /**
+ * The token "*".
+ */
+ static final int ASTERISK = SPATIAL_INTERSECTS + 1;
+
+ /**
+ * The token ",".
+ */
+ static final int COMMA = ASTERISK + 1;
+
+ /**
+ * The token ".".
+ */
+ static final int DOT = COMMA + 1;
+
+ /**
+ * The token "{".
+ */
+ static final int OPEN_BRACE = DOT + 1;
+
+ /**
+ * The token "}".
+ */
+ static final int CLOSE_BRACE = OPEN_BRACE + 1;
+
+ /**
+ * The token "/".
+ */
+ static final int SLASH = CLOSE_BRACE + 1;
+
+ /**
+ * The token "%".
+ */
+ static final int PERCENT = SLASH + 1;
+
+ /**
+ * The token ";".
+ */
+ static final int SEMICOLON = PERCENT + 1;
+
+ /**
+ * The token ":".
+ */
+ static final int COLON = SEMICOLON + 1;
+
+ /**
+ * The token "[".
+ */
+ static final int OPEN_BRACKET = COLON + 1;
+
+ /**
+ * The token "]".
+ */
+ static final int CLOSE_BRACKET = OPEN_BRACKET + 1;
+
+ /**
+ * The token "~".
+ */
+ static final int TILDE = CLOSE_BRACKET + 1;
+
+ /**
+ * The token "::".
+ */
+ static final int COLON_COLON = TILDE + 1;
+
+ /**
+ * The token ":=".
+ */
+ static final int COLON_EQ = COLON_COLON + 1;
+
+ /**
+ * The token "!~".
+ */
+ static final int NOT_TILDE = COLON_EQ + 1;
+
+ static final String[] TOKENS = {
+ // Unused
+ null,
+ // KEYWORD
+ null,
+ // IDENTIFIER
+ null,
+ // ALL
+ "ALL",
+ // AND
+ "AND",
+ // ANY
+ "ANY",
+ // ARRAY
+ "ARRAY",
+ // AS
+ "AS",
+ // ASYMMETRIC
+ "ASYMMETRIC",
+ // AUTHORIZATION
+ "AUTHORIZATION",
+ // BETWEEN
+ "BETWEEN",
+ // CASE
+ "CASE",
+ // CAST
+ "CAST",
+ // CHECK
+ "CHECK",
+ // CONSTRAINT
+ "CONSTRAINT",
+ // CROSS
+ "CROSS",
+ // CURRENT_CATALOG
+ "CURRENT_CATALOG",
+ // CURRENT_DATE
+ "CURRENT_DATE",
+ // CURRENT_PATH
+ "CURRENT_PATH",
+ // CURRENT_ROLE
+ "CURRENT_ROLE",
+ // CURRENT_SCHEMA
+ "CURRENT_SCHEMA",
+ // CURRENT_TIME
+ "CURRENT_TIME",
+ // CURRENT_TIMESTAMP
+ "CURRENT_TIMESTAMP",
+ // CURRENT_USER
+ "CURRENT_USER",
+ // DAY
+ "DAY",
+ // DEFAULT
+ "DEFAULT",
+ // DISTINCT
+ "DISTINCT",
+ // ELSE
+ "ELSE",
+ // END
+ "END",
+ // EXCEPT
+ "EXCEPT",
+ // EXISTS
+ "EXISTS",
+ // FALSE
+ "FALSE",
+ // FETCH
+ "FETCH",
+ // FOR
+ "FOR",
+ // FOREIGN
+ "FOREIGN",
+ // FROM
+ "FROM",
+ // FULL
+ "FULL",
+ // GROUP
+ "GROUP",
+ // HAVING
+ "HAVING",
+ // HOUR
+ "HOUR",
+ // IF
+ "IF",
+ // IN
+ "IN",
+ // INNER
+ "INNER",
+ // INTERSECT
+ "INTERSECT",
+ // INTERVAL
+ "INTERVAL",
+ // IS
+ "IS",
+ // JOIN
+ "JOIN",
+ // KEY
+ "KEY",
+ // LEFT
+ "LEFT",
+ // LIKE
+ "LIKE",
+ // LIMIT
+ "LIMIT",
+ // LOCALTIME
+ "LOCALTIME",
+ // LOCALTIMESTAMP
+ "LOCALTIMESTAMP",
+ // MINUS
+ "MINUS",
+ // MINUTE
+ "MINUTE",
+ // MONTH
+ "MONTH",
+ // NATURAL
+ "NATURAL",
+ // NOT
+ "NOT",
+ // NULL
+ "NULL",
+ // OFFSET
+ "OFFSET",
+ // ON
+ "ON",
+ // OR
+ "OR",
+ // ORDER
+ "ORDER",
+ // PRIMARY
+ "PRIMARY",
+ // QUALIFY
+ "QUALIFY",
+ // RIGHT
+ "RIGHT",
+ // ROW
+ "ROW",
+ // ROWNUM
+ "ROWNUM",
+ // SECOND
+ "SECOND",
+ // SELECT
+ "SELECT",
+ // SESSION_USER
+ "SESSION_USER",
+ // SET
+ "SET",
+ // SOME
+ "SOME",
+ // SYMMETRIC
+ "SYMMETRIC",
+ // SYSTEM_USER
+ "SYSTEM_USER",
+ // TABLE
+ "TABLE",
+ // TO
+ "TO",
+ // TRUE
+ "TRUE",
+ // UESCAPE
+ "UESCAPE",
+ // UNION
+ "UNION",
+ // UNIQUE
+ "UNIQUE",
+ // UNKNOWN
+ "UNKNOWN",
+ // USER
+ "USER",
+ // USING
+ "USING",
+ // VALUE
+ "VALUE",
+ // VALUES
+ "VALUES",
+ // WHEN
+ "WHEN",
+ // WHERE
+ "WHERE",
+ // WINDOW
+ "WINDOW",
+ // WITH
+ "WITH",
+ // YEAR
+ "YEAR",
+ // _ROWID_
+ "_ROWID_",
+ // PARAMETER
+ "?",
+ // END_OF_INPUT
+ null,
+ // LITERAL
+ null,
+ // EQUAL
+ "=",
+ // BIGGER_EQUAL
+ ">=",
+ // BIGGER
+ ">",
+ // SMALLER
+ "<",
+ // SMALLER_EQUAL
+ "<=",
+ // NOT_EQUAL
+ "<>",
+ // AT
+ "@",
+ // MINUS_SIGN
+ "-",
+ // PLUS_SIGN
+ "+",
+ // CONCATENATION
+ "||",
+ // OPEN_PAREN
+ "(",
+ // CLOSE_PAREN
+ ")",
+ // SPATIAL_INTERSECTS
+ "&&",
+ // ASTERISK
+ "*",
+ // COMMA
+ ",",
+ // DOT
+ ".",
+ // OPEN_BRACE
+ "{",
+ // CLOSE_BRACE
+ "}",
+ // SLASH
+ "/",
+ // PERCENT
+ "%",
+ // SEMICOLON
+ ";",
+ // COLON
+ ":",
+ // OPEN_BRACKET
+ "[",
+ // CLOSE_BRACKET
+ "]",
+ // TILDE
+ "~",
+ // COLON_COLON
+ "::",
+ // COLON_EQ
+ ":=",
+ // NOT_TILDE
+ "!~",
+ // End
+ };
+
+ static class IdentifierToken extends Token {
+
+ private String identifier;
+
+ private final boolean quoted;
+
+ private boolean unicode;
+
+ IdentifierToken(int start, String identifier, boolean quoted, boolean unicode) {
+ super(start);
+ this.identifier = identifier;
+ this.quoted = quoted;
+ this.unicode = unicode;
+ }
+
+ @Override
+ int tokenType() {
+ return IDENTIFIER;
+ }
+
+ @Override
+ String asIdentifier() {
+ return identifier;
+ }
+
+ @Override
+ boolean isQuoted() {
+ return quoted;
+ }
+
+ @Override
+ boolean needsUnicodeConversion() {
+ return unicode;
+ }
+
+ @Override
+ void convertUnicode(int uescape) {
+ if (unicode) {
+ identifier = StringUtils.decodeUnicodeStringSQL(identifier, uescape);
+ unicode = false;
+ } else {
+ throw DbException.getInternalError();
+ }
+ }
+
+ @Override
+ public String toString() {
+ return quoted ? StringUtils.quoteIdentifier(identifier) : identifier;
+ }
+
+ }
+
+ static final class KeywordToken extends Token {
+
+ private final int type;
+
+ KeywordToken(int start, int type) {
+ super(start);
+ this.type = type;
+ }
+
+ @Override
+ int tokenType() {
+ return type;
+ }
+
+ @Override
+ String asIdentifier() {
+ return TOKENS[type];
+ }
+
+ @Override
+ public String toString() {
+ return TOKENS[type];
+ }
+
+ }
+
+ static final class KeywordOrIdentifierToken extends Token {
+
+ private final int type;
+
+ private final String identifier;
+
+ KeywordOrIdentifierToken(int start, int type, String identifier) {
+ super(start);
+ this.type = type;
+ this.identifier = identifier;
+ }
+
+ @Override
+ int tokenType() {
+ return type;
+ }
+
+ @Override
+ String asIdentifier() {
+ return identifier;
+ }
+
+ @Override
+ public String toString() {
+ return identifier;
+ }
+
+ }
+
+ static abstract class LiteralToken extends Token {
+
+ Value value;
+
+ LiteralToken(int start) {
+ super(start);
+ }
+
+ @Override
+ final int tokenType() {
+ return LITERAL;
+ }
+
+ @Override
+ public final String toString() {
+ return value(null).getTraceSQL();
+ }
+
+ }
+
+ static final class BinaryStringToken extends LiteralToken {
+
+ private final byte[] string;
+
+ BinaryStringToken(int start, byte[] string) {
+ super(start);
+ this.string = string;
+ }
+
+ @Override
+ Value value(CastDataProvider provider) {
+ if (value == null) {
+ value = ValueVarbinary.getNoCopy(string);
+ }
+ return value;
+ }
+
+ }
+
+ static final class CharacterStringToken extends LiteralToken {
+
+ String string;
+
+ private boolean unicode;
+
+ CharacterStringToken(int start, String string, boolean unicode) {
+ super(start);
+ this.string = string;
+ this.unicode = unicode;
+ }
+
+ @Override
+ Value value(CastDataProvider provider) {
+ if (value == null) {
+ value = ValueVarchar.get(string, provider);
+ }
+ return value;
+ }
+
+ @Override
+ boolean needsUnicodeConversion() {
+ return unicode;
+ }
+
+ @Override
+ void convertUnicode(int uescape) {
+ if (unicode) {
+ string = StringUtils.decodeUnicodeStringSQL(string, uescape);
+ unicode = false;
+ } else {
+ throw DbException.getInternalError();
+ }
+ }
+
+ }
+
+ static final class IntegerToken extends LiteralToken {
+
+ private final int number;
+
+ IntegerToken(int start, int number) {
+ super(start);
+ this.number = number;
+ }
+
+ @Override
+ Value value(CastDataProvider provider) {
+ if (value == null) {
+ value = ValueInteger.get(number);
+ }
+ return value;
+ }
+
+ }
+
+ static final class BigintToken extends LiteralToken {
+
+ private final long number;
+
+ BigintToken(int start, long number) {
+ super(start);
+ this.number = number;
+ }
+
+ @Override
+ Value value(CastDataProvider provider) {
+ if (value == null) {
+ value = ValueBigint.get(number);
+ }
+ return value;
+ }
+
+ }
+
+ static final class ValueToken extends LiteralToken {
+
+ ValueToken(int start, Value value) {
+ super(start);
+ this.value = value;
+ }
+
+ @Override
+ Value value(CastDataProvider provider) {
+ return value;
+ }
+
+ }
+
+ static final class ParameterToken extends Token {
+
+ int index;
+
+ ParameterToken(int start, int index) {
+ super(start);
+ this.index = index;
+ }
+
+ @Override
+ int tokenType() {
+ return PARAMETER;
+ }
+
+ @Override
+ String asIdentifier() {
+ return "?";
+ }
+
+ int index() {
+ return index;
+ }
+
+ @Override
+ public String toString() {
+ return index == 0 ? "?" : "?" + index;
+ }
+
+ }
+
+ static final class EndOfInputToken extends Token {
+
+ EndOfInputToken(int start) {
+ super(start);
+ }
+
+ @Override
+ int tokenType() {
+ return END_OF_INPUT;
+ }
+
+ }
+
+ private int start;
+
+ Token(int start) {
+ this.start = start;
+ }
+
+ final int start() {
+ return start;
+ }
+
+ final void setStart(int offset) {
+ start = offset;
+ }
+
+ final void subtractFromStart(int offset) {
+ start -= offset;
+ }
+
+ abstract int tokenType();
+
+ String asIdentifier() {
+ return null;
+ }
+
+ boolean isQuoted() {
+ return false;
+ }
+
+ Value value(CastDataProvider provider) {
+ return null;
+ }
+
+ boolean needsUnicodeConversion() {
+ return false;
+ }
+
+ void convertUnicode(int uescape) {
+ throw DbException.getInternalError();
+ }
+
+ @Override
+ protected Token clone() {
+ try {
+ return (Token) super.clone();
+ } catch (CloneNotSupportedException e) {
+ throw DbException.getInternalError();
+ }
+ }
+
+}
diff --git a/h2/src/main/org/h2/command/Tokenizer.java b/h2/src/main/org/h2/command/Tokenizer.java
new file mode 100644
index 0000000000..f0c413e546
--- /dev/null
+++ b/h2/src/main/org/h2/command/Tokenizer.java
@@ -0,0 +1,1400 @@
+/*
+ * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0,
+ * and the EPL 1.0 (https://h2database.com/html/license.html).
+ * Initial Developer: H2 Group
+ */
+package org.h2.command;
+
+import static org.h2.command.Token.ASTERISK;
+import static org.h2.command.Token.AT;
+import static org.h2.command.Token.BIGGER;
+import static org.h2.command.Token.BIGGER_EQUAL;
+import static org.h2.command.Token.CLOSE_BRACE;
+import static org.h2.command.Token.CLOSE_BRACKET;
+import static org.h2.command.Token.CLOSE_PAREN;
+import static org.h2.command.Token.COLON;
+import static org.h2.command.Token.COLON_COLON;
+import static org.h2.command.Token.COLON_EQ;
+import static org.h2.command.Token.COMMA;
+import static org.h2.command.Token.CONCATENATION;
+import static org.h2.command.Token.DOT;
+import static org.h2.command.Token.EQUAL;
+import static org.h2.command.Token.MINUS_SIGN;
+import static org.h2.command.Token.NOT_EQUAL;
+import static org.h2.command.Token.NOT_TILDE;
+import static org.h2.command.Token.OPEN_BRACE;
+import static org.h2.command.Token.OPEN_BRACKET;
+import static org.h2.command.Token.OPEN_PAREN;
+import static org.h2.command.Token.PERCENT;
+import static org.h2.command.Token.PLUS_SIGN;
+import static org.h2.command.Token.SEMICOLON;
+import static org.h2.command.Token.SLASH;
+import static org.h2.command.Token.SMALLER;
+import static org.h2.command.Token.SMALLER_EQUAL;
+import static org.h2.command.Token.SPATIAL_INTERSECTS;
+import static org.h2.command.Token.TILDE;
+import static org.h2.util.ParserUtil.ALL;
+import static org.h2.util.ParserUtil.AND;
+import static org.h2.util.ParserUtil.ANY;
+import static org.h2.util.ParserUtil.ARRAY;
+import static org.h2.util.ParserUtil.AS;
+import static org.h2.util.ParserUtil.ASYMMETRIC;
+import static org.h2.util.ParserUtil.AUTHORIZATION;
+import static org.h2.util.ParserUtil.BETWEEN;
+import static org.h2.util.ParserUtil.CASE;
+import static org.h2.util.ParserUtil.CAST;
+import static org.h2.util.ParserUtil.CHECK;
+import static org.h2.util.ParserUtil.CONSTRAINT;
+import static org.h2.util.ParserUtil.CROSS;
+import static org.h2.util.ParserUtil.CURRENT_CATALOG;
+import static org.h2.util.ParserUtil.CURRENT_DATE;
+import static org.h2.util.ParserUtil.CURRENT_PATH;
+import static org.h2.util.ParserUtil.CURRENT_ROLE;
+import static org.h2.util.ParserUtil.CURRENT_SCHEMA;
+import static org.h2.util.ParserUtil.CURRENT_TIME;
+import static org.h2.util.ParserUtil.CURRENT_TIMESTAMP;
+import static org.h2.util.ParserUtil.CURRENT_USER;
+import static org.h2.util.ParserUtil.DAY;
+import static org.h2.util.ParserUtil.DEFAULT;
+import static org.h2.util.ParserUtil.DISTINCT;
+import static org.h2.util.ParserUtil.ELSE;
+import static org.h2.util.ParserUtil.END;
+import static org.h2.util.ParserUtil.EXCEPT;
+import static org.h2.util.ParserUtil.EXISTS;
+import static org.h2.util.ParserUtil.FALSE;
+import static org.h2.util.ParserUtil.FETCH;
+import static org.h2.util.ParserUtil.FOR;
+import static org.h2.util.ParserUtil.FOREIGN;
+import static org.h2.util.ParserUtil.FROM;
+import static org.h2.util.ParserUtil.FULL;
+import static org.h2.util.ParserUtil.GROUP;
+import static org.h2.util.ParserUtil.HAVING;
+import static org.h2.util.ParserUtil.HOUR;
+import static org.h2.util.ParserUtil.IDENTIFIER;
+import static org.h2.util.ParserUtil.IF;
+import static org.h2.util.ParserUtil.IN;
+import static org.h2.util.ParserUtil.INNER;
+import static org.h2.util.ParserUtil.INTERSECT;
+import static org.h2.util.ParserUtil.INTERVAL;
+import static org.h2.util.ParserUtil.IS;
+import static org.h2.util.ParserUtil.JOIN;
+import static org.h2.util.ParserUtil.KEY;
+import static org.h2.util.ParserUtil.LEFT;
+import static org.h2.util.ParserUtil.LIKE;
+import static org.h2.util.ParserUtil.LIMIT;
+import static org.h2.util.ParserUtil.LOCALTIME;
+import static org.h2.util.ParserUtil.LOCALTIMESTAMP;
+import static org.h2.util.ParserUtil.MINUS;
+import static org.h2.util.ParserUtil.MINUTE;
+import static org.h2.util.ParserUtil.MONTH;
+import static org.h2.util.ParserUtil.NATURAL;
+import static org.h2.util.ParserUtil.NOT;
+import static org.h2.util.ParserUtil.NULL;
+import static org.h2.util.ParserUtil.OFFSET;
+import static org.h2.util.ParserUtil.ON;
+import static org.h2.util.ParserUtil.OR;
+import static org.h2.util.ParserUtil.ORDER;
+import static org.h2.util.ParserUtil.PRIMARY;
+import static org.h2.util.ParserUtil.QUALIFY;
+import static org.h2.util.ParserUtil.RIGHT;
+import static org.h2.util.ParserUtil.ROW;
+import static org.h2.util.ParserUtil.ROWNUM;
+import static org.h2.util.ParserUtil.SECOND;
+import static org.h2.util.ParserUtil.SELECT;
+import static org.h2.util.ParserUtil.SESSION_USER;
+import static org.h2.util.ParserUtil.SET;
+import static org.h2.util.ParserUtil.SOME;
+import static org.h2.util.ParserUtil.SYMMETRIC;
+import static org.h2.util.ParserUtil.SYSTEM_USER;
+import static org.h2.util.ParserUtil.TABLE;
+import static org.h2.util.ParserUtil.TO;
+import static org.h2.util.ParserUtil.TRUE;
+import static org.h2.util.ParserUtil.UESCAPE;
+import static org.h2.util.ParserUtil.UNION;
+import static org.h2.util.ParserUtil.UNIQUE;
+import static org.h2.util.ParserUtil.UNKNOWN;
+import static org.h2.util.ParserUtil.USER;
+import static org.h2.util.ParserUtil.USING;
+import static org.h2.util.ParserUtil.VALUE;
+import static org.h2.util.ParserUtil.VALUES;
+import static org.h2.util.ParserUtil.WHEN;
+import static org.h2.util.ParserUtil.WHERE;
+import static org.h2.util.ParserUtil.WINDOW;
+import static org.h2.util.ParserUtil.WITH;
+import static org.h2.util.ParserUtil.YEAR;
+import static org.h2.util.ParserUtil._ROWID_;
+
+import java.io.ByteArrayOutputStream;
+import java.math.BigDecimal;
+import java.math.BigInteger;
+import java.util.ArrayList;
+import java.util.BitSet;
+import java.util.ListIterator;
+
+import org.h2.api.ErrorCode;
+import org.h2.engine.CastDataProvider;
+import org.h2.message.DbException;
+import org.h2.util.StringUtils;
+import org.h2.value.ValueBigint;
+import org.h2.value.ValueDecfloat;
+import org.h2.value.ValueNumeric;
+
+/**
+ * Tokenizer.
+ */
+public final class Tokenizer {
+
+ private final CastDataProvider provider;
+
+ private final boolean identifiersToUpper;
+
+ private final boolean identifiersToLower;
+
+ private final BitSet nonKeywords;
+
+ Tokenizer(CastDataProvider provider, boolean identifiersToUpper, boolean identifiersToLower, BitSet nonKeywords) {
+ this.provider = provider;
+ this.identifiersToUpper = identifiersToUpper;
+ this.identifiersToLower = identifiersToLower;
+ this.nonKeywords = nonKeywords;
+ }
+
+ ArrayList tokenize(String sql, boolean stopOnCloseParen) {
+ ArrayList tokens = new ArrayList<>();
+ int end = sql.length() - 1;
+ boolean foundUnicode = false;
+ int lastParameter = 0;
+ loop: for (int i = 0; i <= end;) {
+ int tokenStart = i;
+ char c = sql.charAt(i);
+ Token token;
+ switch (c) {
+ case '!':
+ if (i < end) {
+ char c2 = sql.charAt(++i);
+ if (c2 == '=') {
+ token = new Token.KeywordToken(tokenStart, NOT_EQUAL);
+ break;
+ }
+ if (c2 == '~') {
+ token = new Token.KeywordToken(tokenStart, NOT_TILDE);
+ break;
+ }
+ }
+ throw DbException.getSyntaxError(sql, tokenStart);
+ case '"':
+ case '`':
+ i = readQuotedIdentifier(sql, end, tokenStart, i, c, false, tokens);
+ continue loop;
+ case '#':
+ if (provider.getMode().supportPoundSymbolForColumnNames) {
+ i = readIdentifier(sql, end, tokenStart, i, c, tokens);
+ continue loop;
+ }
+ throw DbException.getSyntaxError(sql, tokenStart);
+ case '$':
+ if (i < end) {
+ char c2 = sql.charAt(i + 1);
+ if (c2 == '$') {
+ i += 2;
+ int stringEnd = sql.indexOf("$$", i);
+ if (stringEnd < 0) {
+ throw DbException.getSyntaxError(sql, tokenStart);
+ }
+ token = new Token.CharacterStringToken(tokenStart, sql.substring(i, stringEnd), false);
+ i = stringEnd + 1;
+ } else {
+ i = parseParameterIndex(sql, end, i, tokens);
+ lastParameter = assignParameterIndex(tokens, lastParameter);
+ continue loop;
+ }
+ } else {
+ token = new Token.ParameterToken(tokenStart, 0);
+ }
+ break;
+ case '%':
+ token = new Token.KeywordToken(tokenStart, PERCENT);
+ break;
+ case '&':
+ if (i < end && sql.charAt(i + 1) == '&') {
+ i++;
+ token = new Token.KeywordToken(tokenStart, SPATIAL_INTERSECTS);
+ break;
+ }
+ throw DbException.getSyntaxError(sql, tokenStart);
+ case '\'':
+ i = readCharacterString(sql, tokenStart, end, i, false, tokens);
+ continue loop;
+ case '(':
+ token = new Token.KeywordToken(tokenStart, OPEN_PAREN);
+ break;
+ case ')':
+ token = new Token.KeywordToken(tokenStart, CLOSE_PAREN);
+ if (stopOnCloseParen) {
+ tokens.add(token);
+ end = skipWhitespace(sql, end, i + 1) - 1;
+ break loop;
+ }
+ break;
+ case '*':
+ token = new Token.KeywordToken(tokenStart, ASTERISK);
+ break;
+ case '+':
+ token = new Token.KeywordToken(tokenStart, PLUS_SIGN);
+ break;
+ case ',':
+ token = new Token.KeywordToken(tokenStart, COMMA);
+ break;
+ case '-':
+ if (i < end && sql.charAt(i + 1) == '-') {
+ i = skipSimpleComment(sql, end, i);
+ continue loop;
+ } else {
+ token = new Token.KeywordToken(tokenStart, MINUS_SIGN);
+ }
+ break;
+ case '.':
+ if (i < end) {
+ char c2 = sql.charAt(i + 1);
+ if (c2 >= '0' && c2 <= '9') {
+ i = readNumeric(sql, tokenStart, end, i + 1, c2, false, false, tokens);
+ continue loop;
+ }
+ }
+ token = new Token.KeywordToken(tokenStart, DOT);
+ break;
+ case '/':
+ if (i < end) {
+ char c2 = sql.charAt(i + 1);
+ if (c2 == '*') {
+ i = skipBracketedComment(sql, tokenStart, end, i);
+ continue loop;
+ } else if (c2 == '/') {
+ i = skipSimpleComment(sql, end, i);
+ continue loop;
+ }
+ }
+ token = new Token.KeywordToken(tokenStart, SLASH);
+ break;
+ case '0':
+ if (i < end) {
+ char c2 = sql.charAt(i + 1);
+ if (c2 == 'X' || c2 == 'x') {
+ i = readHexNumber(sql, provider, tokenStart, end, i + 2, tokens);
+ continue loop;
+ }
+ }
+ //$FALL-THROUGH$
+ case '1':
+ case '2':
+ case '3':
+ case '4':
+ case '5':
+ case '6':
+ case '7':
+ case '8':
+ case '9':
+ i = readNumeric(sql, tokenStart, end, i + 1, c, tokens);
+ continue loop;
+ case ':':
+ if (i < end) {
+ char c2 = sql.charAt(i + 1);
+ if (c2 == ':') {
+ i++;
+ token = new Token.KeywordToken(tokenStart, COLON_COLON);
+ break;
+ } else if (c2 == '=') {
+ i++;
+ token = new Token.KeywordToken(tokenStart, COLON_EQ);
+ break;
+ }
+ }
+ token = new Token.KeywordToken(tokenStart, COLON);
+ break;
+ case ';':
+ token = new Token.KeywordToken(tokenStart, SEMICOLON);
+ break;
+ case '<':
+ if (i < end) {
+ char c2 = sql.charAt(i + 1);
+ if (c2 == '=') {
+ i++;
+ token = new Token.KeywordToken(tokenStart, SMALLER_EQUAL);
+ break;
+ }
+ if (c2 == '>') {
+ i++;
+ token = new Token.KeywordToken(tokenStart, NOT_EQUAL);
+ break;
+ }
+ }
+ token = new Token.KeywordToken(tokenStart, SMALLER);
+ break;
+ case '=':
+ token = new Token.KeywordToken(tokenStart, EQUAL);
+ break;
+ case '>':
+ if (i < end && sql.charAt(i + 1) == '=') {
+ i++;
+ token = new Token.KeywordToken(tokenStart, BIGGER_EQUAL);
+ break;
+ }
+ token = new Token.KeywordToken(tokenStart, BIGGER);
+ break;
+ case '?': {
+ if (i + 1 < end && sql.charAt(i + 1) == '?') {
+ char c3 = sql.charAt(i + 2);
+ if (c3 == '(') {
+ i += 2;
+ token = new Token.KeywordToken(tokenStart, OPEN_BRACKET);
+ break;
+ }
+ if (c3 == ')') {
+ i += 2;
+ token = new Token.KeywordToken(tokenStart, CLOSE_BRACKET);
+ break;
+ }
+ }
+ i = parseParameterIndex(sql, end, i, tokens);
+ lastParameter = assignParameterIndex(tokens, lastParameter);
+ continue loop;
+ }
+ case '@':
+ token = new Token.KeywordToken(tokenStart, AT);
+ break;
+ case 'A':
+ case 'a':
+ i = readA(sql, end, tokenStart, i, tokens);
+ continue loop;
+ case 'B':
+ case 'b':
+ i = readB(sql, end, tokenStart, i, tokens);
+ continue loop;
+ case 'C':
+ case 'c':
+ i = readC(sql, end, tokenStart, i, tokens);
+ continue loop;
+ case 'D':
+ case 'd':
+ i = readD(sql, end, tokenStart, i, tokens);
+ continue loop;
+ case 'E':
+ case 'e':
+ i = readE(sql, end, tokenStart, i, tokens);
+ continue loop;
+ case 'F':
+ case 'f':
+ i = readF(sql, end, tokenStart, i, tokens);
+ continue loop;
+ case 'G':
+ case 'g':
+ i = readG(sql, end, tokenStart, i, tokens);
+ continue loop;
+ case 'H':
+ case 'h':
+ i = readH(sql, end, tokenStart, i, tokens);
+ continue loop;
+ case 'I':
+ case 'i':
+ i = readI(sql, end, tokenStart, i, tokens);
+ continue loop;
+ case 'J':
+ case 'j':
+ i = readJ(sql, end, tokenStart, i, tokens);
+ continue loop;
+ case 'K':
+ case 'k':
+ i = readK(sql, end, tokenStart, i, tokens);
+ continue loop;
+ case 'L':
+ case 'l':
+ i = readL(sql, end, tokenStart, i, tokens);
+ continue loop;
+ case 'M':
+ case 'm':
+ i = readM(sql, end, tokenStart, i, tokens);
+ continue loop;
+ case 'N':
+ case 'n':
+ if (i < end && sql.charAt(i + 1) == '\'') {
+ i = readCharacterString(sql, tokenStart, end, i + 1, false, tokens);
+ } else {
+ i = readN(sql, end, tokenStart, i, tokens);
+ }
+ continue loop;
+ case 'O':
+ case 'o':
+ i = readO(sql, end, tokenStart, i, tokens);
+ continue loop;
+ case 'P':
+ case 'p':
+ i = readP(sql, end, tokenStart, i, tokens);
+ continue loop;
+ case 'Q':
+ case 'q':
+ i = readQ(sql, end, tokenStart, i, tokens);
+ continue loop;
+ case 'R':
+ case 'r':
+ i = readR(sql, end, tokenStart, i, tokens);
+ continue loop;
+ case 'S':
+ case 's':
+ i = readS(sql, end, tokenStart, i, tokens);
+ continue loop;
+ case 'T':
+ case 't':
+ i = readT(sql, end, tokenStart, i, tokens);
+ continue loop;
+ case 'U':
+ case 'u':
+ if (i + 1 < end && sql.charAt(i + 1) == '&') {
+ char c3 = sql.charAt(i + 2);
+ if (c3 == '"') {
+ i = readQuotedIdentifier(sql, end, tokenStart, i + 2, '"', true, tokens);
+ foundUnicode = true;
+ continue loop;
+ } else if (c3 == '\'') {
+ i = readCharacterString(sql, tokenStart, end, i + 2, true, tokens);
+ foundUnicode = true;
+ continue loop;
+ }
+ }
+ i = readU(sql, end, tokenStart, i, tokens);
+ continue loop;
+ case 'V':
+ case 'v':
+ i = readV(sql, end, tokenStart, i, tokens);
+ continue loop;
+ case 'W':
+ case 'w':
+ i = readW(sql, end, tokenStart, i, tokens);
+ continue loop;
+ case 'X':
+ case 'x':
+ if (i < end && sql.charAt(i + 1) == '\'') {
+ i = readBinaryString(sql, tokenStart, end, i + 1, tokens);
+ } else {
+ i = readIdentifier(sql, end, tokenStart, i, c, tokens);
+ }
+ continue loop;
+ case 'Y':
+ case 'y':
+ i = readY(sql, end, tokenStart, i, tokens);
+ continue loop;
+ case 'Z':
+ case 'z':
+ i = readIdentifier(sql, end, tokenStart, i, c, tokens);
+ continue loop;
+ case '[':
+ if (provider.getMode().squareBracketQuotedNames) {
+ int identifierEnd = sql.indexOf(']', ++i);
+ if (identifierEnd < 0) {
+ throw DbException.getSyntaxError(sql, tokenStart);
+ }
+ token = new Token.IdentifierToken(tokenStart, sql.substring(i, identifierEnd), true, false);
+ i = identifierEnd;
+ } else {
+ token = new Token.KeywordToken(tokenStart, OPEN_BRACKET);
+ }
+ break;
+ case ']':
+ token = new Token.KeywordToken(tokenStart, CLOSE_BRACKET);
+ break;
+ case '_':
+ i = read_(sql, end, tokenStart, i, tokens);
+ continue loop;
+ case '{':
+ token = new Token.KeywordToken(tokenStart, OPEN_BRACE);
+ break;
+ case '|':
+ if (i < end && sql.charAt(++i) == '|') {
+ token = new Token.KeywordToken(tokenStart, CONCATENATION);
+ break;
+ }
+ throw DbException.getSyntaxError(sql, tokenStart);
+ case '}':
+ token = new Token.KeywordToken(tokenStart, CLOSE_BRACE);
+ break;
+ case '~':
+ token = new Token.KeywordToken(tokenStart, TILDE);
+ break;
+ default:
+ if (c <= ' ') {
+ i++;
+ continue loop;
+ } else {
+ int cp = Character.isHighSurrogate(c) ? sql.codePointAt(i++) : c;
+ if (Character.isSpaceChar(cp)) {
+ continue loop;
+ }
+ if (Character.isJavaIdentifierStart(cp)) {
+ i = readIdentifier(sql, end, tokenStart, i, cp, tokens);
+ continue loop;
+ }
+ throw DbException.getSyntaxError(sql, tokenStart);
+ }
+ }
+ tokens.add(token);
+ i++;
+ }
+ if (foundUnicode) {
+ processUescape(sql, tokens);
+ }
+ tokens.add(new Token.EndOfInputToken(end + 1));
+ return tokens;
+ }
+
+ private int readIdentifier(String sql, int end, int tokenStart, int i, int cp, ArrayList tokens) {
+ if (cp >= Character.MIN_SUPPLEMENTARY_CODE_POINT) {
+ i++;
+ }
+ int endIndex = findIdentifierEnd(sql, end, i + Character.charCount(cp) - 1);
+ tokens.add(new Token.IdentifierToken(tokenStart, extractIdentifier(sql, tokenStart, endIndex), false, false));
+ return endIndex;
+ }
+
+ private int readA(String sql, int end, int tokenStart, int i, ArrayList tokens) {
+ int endIndex = findIdentifierEnd(sql, end, i);
+ int length = endIndex - tokenStart;
+ int type;
+ if (length == 2) {
+ type = (sql.charAt(tokenStart + 1) & 0xffdf) == 'S' ? AS : IDENTIFIER;
+ } else {
+ if (eq("ALL", sql, tokenStart, length)) {
+ type = ALL;
+ } else if (eq("AND", sql, tokenStart, length)) {
+ type = AND;
+ } else if (eq("ANY", sql, tokenStart, length)) {
+ type = ANY;
+ } else if (eq("ARRAY", sql, tokenStart, length)) {
+ type = ARRAY;
+ } else if (eq("ASYMMETRIC", sql, tokenStart, length)) {
+ type = ASYMMETRIC;
+ } else if (eq("AUTHORIZATION", sql, tokenStart, length)) {
+ type = AUTHORIZATION;
+ } else {
+ type = IDENTIFIER;
+ }
+ }
+ return readIdentifierOrKeyword(sql, tokenStart, tokens, endIndex, type);
+ }
+
+ private int readB(String sql, int end, int tokenStart, int i, ArrayList tokens) {
+ int endIndex = findIdentifierEnd(sql, end, i);
+ int length = endIndex - tokenStart;
+ int type = eq("BETWEEN", sql, tokenStart, length) ? BETWEEN : IDENTIFIER;
+ return readIdentifierOrKeyword(sql, tokenStart, tokens, endIndex, type);
+ }
+
+ private int readC(String sql, int end, int tokenStart, int i, ArrayList tokens) {
+ int endIndex = findIdentifierEnd(sql, end, i);
+ int length = endIndex - tokenStart;
+ int type;
+ if (eq("CASE", sql, tokenStart, length)) {
+ type = CASE;
+ } else if (eq("CAST", sql, tokenStart, length)) {
+ type = CAST;
+ } else if (eq("CHECK", sql, tokenStart, length)) {
+ type = CHECK;
+ } else if (eq("CONSTRAINT", sql, tokenStart, length)) {
+ type = CONSTRAINT;
+ } else if (eq("CROSS", sql, tokenStart, length)) {
+ type = CROSS;
+ } else if (length >= 12 && eq("CURRENT_", sql, tokenStart, 8)) {
+ type = getTokenTypeCurrent(sql, tokenStart, length);
+ } else {
+ type = IDENTIFIER;
+ }
+ return readIdentifierOrKeyword(sql, tokenStart, tokens, endIndex, type);
+ }
+
+ private static int getTokenTypeCurrent(String s, int tokenStart, int length) {
+ tokenStart += 8;
+ switch (length) {
+ case 12:
+ if (eqCurrent("CURRENT_DATE", s, tokenStart, length)) {
+ return CURRENT_DATE;
+ } else if (eqCurrent("CURRENT_PATH", s, tokenStart, length)) {
+ return CURRENT_PATH;
+ } else if (eqCurrent("CURRENT_ROLE", s, tokenStart, length)) {
+ return CURRENT_ROLE;
+ } else if (eqCurrent("CURRENT_TIME", s, tokenStart, length)) {
+ return CURRENT_TIME;
+ } else if (eqCurrent("CURRENT_USER", s, tokenStart, length)) {
+ return CURRENT_USER;
+ }
+ break;
+ case 14:
+ if (eqCurrent("CURRENT_SCHEMA", s, tokenStart, length)) {
+ return CURRENT_SCHEMA;
+ }
+ break;
+ case 15:
+ if (eqCurrent("CURRENT_CATALOG", s, tokenStart, length)) {
+ return CURRENT_CATALOG;
+ }
+ break;
+ case 17:
+ if (eqCurrent("CURRENT_TIMESTAMP", s, tokenStart, length)) {
+ return CURRENT_TIMESTAMP;
+ }
+ }
+ return IDENTIFIER;
+ }
+
+ private static boolean eqCurrent(String expected, String s, int start, int length) {
+ for (int i = 8; i < length; i++) {
+ if (expected.charAt(i) != (s.charAt(start++) & 0xffdf)) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ private int readD(String sql, int end, int tokenStart, int i, ArrayList tokens) {
+ int endIndex = findIdentifierEnd(sql, end, i);
+ int length = endIndex - tokenStart;
+ int type;
+ if (eq("DAY", sql, tokenStart, length)) {
+ type = DAY;
+ } else if (eq("DEFAULT", sql, tokenStart, length)) {
+ type = DEFAULT;
+ } else if (eq("DISTINCT", sql, tokenStart, length)) {
+ type = DISTINCT;
+ } else {
+ type = IDENTIFIER;
+ }
+ return readIdentifierOrKeyword(sql, tokenStart, tokens, endIndex, type);
+ }
+
+ private int readE(String sql, int end, int tokenStart, int i, ArrayList tokens) {
+ int endIndex = findIdentifierEnd(sql, end, i);
+ int length = endIndex - tokenStart;
+ int type;
+ if (eq("ELSE", sql, tokenStart, length)) {
+ type = ELSE;
+ } else if (eq("END", sql, tokenStart, length)) {
+ type = END;
+ } else if (eq("EXCEPT", sql, tokenStart, length)) {
+ type = EXCEPT;
+ } else if (eq("EXISTS", sql, tokenStart, length)) {
+ type = EXISTS;
+ } else {
+ type = IDENTIFIER;
+ }
+ return readIdentifierOrKeyword(sql, tokenStart, tokens, endIndex, type);
+ }
+
+ private int readF(String sql, int end, int tokenStart, int i, ArrayList tokens) {
+ int endIndex = findIdentifierEnd(sql, end, i);
+ int length = endIndex - tokenStart;
+ int type;
+ if (eq("FETCH", sql, tokenStart, length)) {
+ type = FETCH;
+ } else if (eq("FROM", sql, tokenStart, length)) {
+ type = FROM;
+ } else if (eq("FOR", sql, tokenStart, length)) {
+ type = FOR;
+ } else if (eq("FOREIGN", sql, tokenStart, length)) {
+ type = FOREIGN;
+ } else if (eq("FULL", sql, tokenStart, length)) {
+ type = FULL;
+ } else if (eq("FALSE", sql, tokenStart, length)) {
+ type = FALSE;
+ } else {
+ type = IDENTIFIER;
+ }
+ return readIdentifierOrKeyword(sql, tokenStart, tokens, endIndex, type);
+ }
+
+ private int readG(String sql, int end, int tokenStart, int i, ArrayList tokens) {
+ int endIndex = findIdentifierEnd(sql, end, i);
+ int length = endIndex - tokenStart;
+ int type = eq("GROUP", sql, tokenStart, length) ? GROUP : IDENTIFIER;
+ return readIdentifierOrKeyword(sql, tokenStart, tokens, endIndex, type);
+ }
+
+ private int readH(String sql, int end, int tokenStart, int i, ArrayList tokens) {
+ int endIndex = findIdentifierEnd(sql, end, i);
+ int length = endIndex - tokenStart;
+ int type;
+ if (eq("HAVING", sql, tokenStart, length)) {
+ type = HAVING;
+ } else if (eq("HOUR", sql, tokenStart, length)) {
+ type = HOUR;
+ } else {
+ type = IDENTIFIER;
+ }
+ return readIdentifierOrKeyword(sql, tokenStart, tokens, endIndex, type);
+ }
+
+ private int readI(String sql, int end, int tokenStart, int i, ArrayList tokens) {
+ int endIndex = findIdentifierEnd(sql, end, i);
+ int length = endIndex - tokenStart;
+ int type;
+ if (length == 2) {
+ switch ((sql.charAt(tokenStart + 1) & 0xffdf)) {
+ case 'F':
+ type = IF;
+ break;
+ case 'N':
+ type = IN;
+ break;
+ case 'S':
+ type = IS;
+ break;
+ default:
+ type = IDENTIFIER;
+ }
+ } else {
+ if (eq("INNER", sql, tokenStart, length)) {
+ type = INNER;
+ } else if (eq("INTERSECT", sql, tokenStart, length)) {
+ type = INTERSECT;
+ } else if (eq("INTERVAL", sql, tokenStart, length)) {
+ type = INTERVAL;
+ } else {
+ type = IDENTIFIER;
+ }
+ }
+ return readIdentifierOrKeyword(sql, tokenStart, tokens, endIndex, type);
+ }
+
+ private int readJ(String sql, int end, int tokenStart, int i, ArrayList tokens) {
+ int endIndex = findIdentifierEnd(sql, end, i);
+ int length = endIndex - tokenStart;
+ int type = eq("JOIN", sql, tokenStart, length) ? JOIN : IDENTIFIER;
+ return readIdentifierOrKeyword(sql, tokenStart, tokens, endIndex, type);
+ }
+
+ private int readK(String sql, int end, int tokenStart, int i, ArrayList tokens) {
+ int endIndex = findIdentifierEnd(sql, end, i);
+ int length = endIndex - tokenStart;
+ int type = eq("KEY", sql, tokenStart, length) ? KEY : IDENTIFIER;
+ return readIdentifierOrKeyword(sql, tokenStart, tokens, endIndex, type);
+ }
+
+ private int readL(String sql, int end, int tokenStart, int i, ArrayList tokens) {
+ int endIndex = findIdentifierEnd(sql, end, i);
+ int length = endIndex - tokenStart;
+ int type;
+ if (eq("LEFT", sql, tokenStart, length)) {
+ type = LEFT;
+ } else if (eq("LIMIT", sql, tokenStart, length)) {
+ type = provider.getMode().limit ? LIMIT : IDENTIFIER;
+ } else if (eq("LIKE", sql, tokenStart, length)) {
+ type = LIKE;
+ } else if (eq("LOCALTIME", sql, tokenStart, length)) {
+ type = LOCALTIME;
+ } else if (eq("LOCALTIMESTAMP", sql, tokenStart, length)) {
+ type = LOCALTIMESTAMP;
+ } else {
+ type = IDENTIFIER;
+ }
+ return readIdentifierOrKeyword(sql, tokenStart, tokens, endIndex, type);
+ }
+
+ private int readM(String sql, int end, int tokenStart, int i, ArrayList tokens) {
+ int endIndex = findIdentifierEnd(sql, end, i);
+ int length = endIndex - tokenStart;
+ int type;
+ if (eq("MINUS", sql, tokenStart, length)) {
+ type = provider.getMode().minusIsExcept ? MINUS : IDENTIFIER;
+ } else if (eq("MINUTE", sql, tokenStart, length)) {
+ type = MINUTE;
+ } else if (eq("MONTH", sql, tokenStart, length)) {
+ type = MONTH;
+ } else {
+ type = IDENTIFIER;
+ }
+ return readIdentifierOrKeyword(sql, tokenStart, tokens, endIndex, type);
+ }
+
+ private int readN(String sql, int end, int tokenStart, int i, ArrayList tokens) {
+ int endIndex = findIdentifierEnd(sql, end, i);
+ int length = endIndex - tokenStart;
+ int type;
+ if (eq("NOT", sql, tokenStart, length)) {
+ type = NOT;
+ } else if (eq("NATURAL", sql, tokenStart, length)) {
+ type = NATURAL;
+ } else if (eq("NULL", sql, tokenStart, length)) {
+ type = NULL;
+ } else {
+ type = IDENTIFIER;
+ }
+ return readIdentifierOrKeyword(sql, tokenStart, tokens, endIndex, type);
+ }
+
+ private int readO(String sql, int end, int tokenStart, int i, ArrayList tokens) {
+ int endIndex = findIdentifierEnd(sql, end, i);
+ int length = endIndex - tokenStart;
+ int type;
+ if (length == 2) {
+ switch ((sql.charAt(tokenStart + 1) & 0xffdf)) {
+ case 'N':
+ type = ON;
+ break;
+ case 'R':
+ type = OR;
+ break;
+ default:
+ type = IDENTIFIER;
+ }
+ } else {
+ if (eq("OFFSET", sql, tokenStart, length)) {
+ type = OFFSET;
+ } else if (eq("ORDER", sql, tokenStart, length)) {
+ type = ORDER;
+ } else {
+ type = IDENTIFIER;
+ }
+ }
+ return readIdentifierOrKeyword(sql, tokenStart, tokens, endIndex, type);
+ }
+
+ private int readP(String sql, int end, int tokenStart, int i, ArrayList tokens) {
+ int endIndex = findIdentifierEnd(sql, end, i);
+ int length = endIndex - tokenStart;
+ int type = eq("PRIMARY", sql, tokenStart, length) ? PRIMARY : IDENTIFIER;
+ return readIdentifierOrKeyword(sql, tokenStart, tokens, endIndex, type);
+ }
+
+ private int readQ(String sql, int end, int tokenStart, int i, ArrayList tokens) {
+ int endIndex = findIdentifierEnd(sql, end, i);
+ int length = endIndex - tokenStart;
+ int type = eq("QUALIFY", sql, tokenStart, length) ? QUALIFY : IDENTIFIER;
+ return readIdentifierOrKeyword(sql, tokenStart, tokens, endIndex, type);
+ }
+
+ private int readR(String sql, int end, int tokenStart, int i, ArrayList tokens) {
+ int endIndex = findIdentifierEnd(sql, end, i);
+ int length = endIndex - tokenStart;
+ int type;
+ if (eq("RIGHT", sql, tokenStart, length)) {
+ type = RIGHT;
+ } else if (eq("ROW", sql, tokenStart, length)) {
+ type = ROW;
+ } else if (eq("ROWNUM", sql, tokenStart, length)) {
+ type = ROWNUM;
+ } else {
+ type = IDENTIFIER;
+ }
+ return readIdentifierOrKeyword(sql, tokenStart, tokens, endIndex, type);
+ }
+
+ private int readS(String sql, int end, int tokenStart, int i, ArrayList tokens) {
+ int endIndex = findIdentifierEnd(sql, end, i);
+ int length = endIndex - tokenStart;
+ int type;
+ if (eq("SECOND", sql, tokenStart, length)) {
+ type = SECOND;
+ } else if (eq("SELECT", sql, tokenStart, length)) {
+ type = SELECT;
+ } else if (eq("SESSION_USER", sql, tokenStart, length)) {
+ type = SESSION_USER;
+ } else if (eq("SET", sql, tokenStart, length)) {
+ type = SET;
+ } else if (eq("SOME", sql, tokenStart, length)) {
+ type = SOME;
+ } else if (eq("SYMMETRIC", sql, tokenStart, length)) {
+ type = SYMMETRIC;
+ } else if (eq("SYSTEM_USER", sql, tokenStart, length)) {
+ type = SYSTEM_USER;
+ } else {
+ type = IDENTIFIER;
+ }
+ return readIdentifierOrKeyword(sql, tokenStart, tokens, endIndex, type);
+ }
+
+ private int readT(String sql, int end, int tokenStart, int i, ArrayList tokens) {
+ int endIndex = findIdentifierEnd(sql, end, i);
+ int length = endIndex - tokenStart;
+ int type;
+ if (length == 2) {
+ type = (sql.charAt(tokenStart + 1) & 0xffdf) == 'O' ? TO : IDENTIFIER;
+ } else {
+ if (eq("TABLE", sql, tokenStart, length)) {
+ type = TABLE;
+ } else if (eq("TRUE", sql, tokenStart, length)) {
+ type = TRUE;
+ } else {
+ type = IDENTIFIER;
+ }
+ }
+ return readIdentifierOrKeyword(sql, tokenStart, tokens, endIndex, type);
+ }
+
+ private int readU(String sql, int end, int tokenStart, int i, ArrayList tokens) {
+ int endIndex = findIdentifierEnd(sql, end, i);
+ int length = endIndex - tokenStart;
+ int type;
+ if (eq("UESCAPE", sql, tokenStart, length)) {
+ type = UESCAPE;
+ } else if (eq("UNION", sql, tokenStart, length)) {
+ type = UNION;
+ } else if (eq("UNIQUE", sql, tokenStart, length)) {
+ type = UNIQUE;
+ } else if (eq("UNKNOWN", sql, tokenStart, length)) {
+ type = UNKNOWN;
+ } else if (eq("USER", sql, tokenStart, length)) {
+ type = USER;
+ } else if (eq("USING", sql, tokenStart, length)) {
+ type = USING;
+ } else {
+ type = IDENTIFIER;
+ }
+ return readIdentifierOrKeyword(sql, tokenStart, tokens, endIndex, type);
+ }
+
+ private int readV(String sql, int end, int tokenStart, int i, ArrayList tokens) {
+ int endIndex = findIdentifierEnd(sql, end, i);
+ int length = endIndex - tokenStart;
+ int type;
+ if (eq("VALUE", sql, tokenStart, length)) {
+ type = VALUE;
+ } else if (eq("VALUES", sql, tokenStart, length)) {
+ type = VALUES;
+ } else {
+ type = IDENTIFIER;
+ }
+ return readIdentifierOrKeyword(sql, tokenStart, tokens, endIndex, type);
+ }
+
+ private int readW(String sql, int end, int tokenStart, int i, ArrayList tokens) {
+ int endIndex = findIdentifierEnd(sql, end, i);
+ int length = endIndex - tokenStart;
+ int type;
+ if (eq("WHEN", sql, tokenStart, length)) {
+ type = WHEN;
+ } else if (eq("WHERE", sql, tokenStart, length)) {
+ type = WHERE;
+ } else if (eq("WINDOW", sql, tokenStart, length)) {
+ type = WINDOW;
+ } else if (eq("WITH", sql, tokenStart, length)) {
+ type = WITH;
+ } else {
+ type = IDENTIFIER;
+ }
+ return readIdentifierOrKeyword(sql, tokenStart, tokens, endIndex, type);
+ }
+
+ private int readY(String sql, int end, int tokenStart, int i, ArrayList tokens) {
+ int endIndex = findIdentifierEnd(sql, end, i);
+ int length = endIndex - tokenStart;
+ int type = eq("YEAR", sql, tokenStart, length) ? YEAR : IDENTIFIER;
+ return readIdentifierOrKeyword(sql, tokenStart, tokens, endIndex, type);
+ }
+
+ private int read_(String sql, int end, int tokenStart, int i, ArrayList tokens) {
+ int endIndex = findIdentifierEnd(sql, end, i);
+ int type = endIndex - tokenStart == 7 && "_ROWID_".regionMatches(true, 1, sql, tokenStart + 1, 6) ? _ROWID_
+ : IDENTIFIER;
+ return readIdentifierOrKeyword(sql, tokenStart, tokens, endIndex, type);
+ }
+
+ private int readIdentifierOrKeyword(String sql, int tokenStart, ArrayList tokens, int endIndex, int type) {
+ Token token;
+ if (type == IDENTIFIER) {
+ token = new Token.IdentifierToken(tokenStart, extractIdentifier(sql, tokenStart, endIndex), false, false);
+ } else if (nonKeywords != null && nonKeywords.get(type)) {
+ token = new Token.KeywordOrIdentifierToken(tokenStart, type, extractIdentifier(sql, tokenStart, endIndex));
+ } else {
+ token = new Token.KeywordToken(tokenStart, type);
+ }
+ tokens.add(token);
+ return endIndex;
+ }
+
+ private static boolean eq(String expected, String s, int start, int length) {
+ if (length != expected.length()) {
+ return false;
+ }
+ for (int i = 1; i < length; i++) {
+ if (expected.charAt(i) != (s.charAt(++start) & 0xffdf)) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ private int findIdentifierEnd(String sql, int end, int i) {
+ i++;
+ for (;;) {
+ int cp;
+ if (i > end || (!Character.isJavaIdentifierPart(cp = sql.codePointAt(i))
+ && (cp != '#' || !provider.getMode().supportPoundSymbolForColumnNames))) {
+ break;
+ }
+ i += Character.charCount(cp);
+ }
+ return i;
+ }
+
+ private String extractIdentifier(String sql, int beginIndex, int endIndex) {
+ return convertCase(sql.substring(beginIndex, endIndex));
+ }
+
+ private int readQuotedIdentifier(String sql, int end, int tokenStart, int i, char c, boolean unicode,
+ ArrayList tokens) {
+ int identifierEnd = sql.indexOf(c, ++i);
+ if (identifierEnd < 0) {
+ throw DbException.getSyntaxError(sql, tokenStart);
+ }
+ String s = sql.substring(i, identifierEnd);
+ i = identifierEnd + 1;
+ if (i <= end && sql.charAt(i) == c) {
+ StringBuilder builder = new StringBuilder(s);
+ do {
+ identifierEnd = sql.indexOf(c, i + 1);
+ if (identifierEnd < 0) {
+ throw DbException.getSyntaxError(sql, tokenStart);
+ }
+ builder.append(sql, i, identifierEnd);
+ i = identifierEnd + 1;
+ } while (i <= end && sql.charAt(i) == c);
+ s = builder.toString();
+ }
+ if (c == '`') {
+ s = convertCase(s);
+ }
+ tokens.add(new Token.IdentifierToken(tokenStart, s, true, unicode));
+ return i;
+ }
+
+ private String convertCase(String s) {
+ if (identifiersToUpper) {
+ s = StringUtils.toUpperEnglish(s);
+ } else if (identifiersToLower) {
+ s = StringUtils.toLowerEnglish(s);
+ }
+ return s;
+ }
+
+ private static int readBinaryString(String sql, int tokenStart, int end, int i, ArrayList tokens) {
+ ByteArrayOutputStream result = new ByteArrayOutputStream();
+ int stringEnd;
+ do {
+ stringEnd = sql.indexOf('\'', ++i);
+ if (stringEnd < 0 || stringEnd < end && sql.charAt(stringEnd + 1) == '\'') {
+ throw DbException.getSyntaxError(sql, tokenStart);
+ }
+ StringUtils.convertHexWithSpacesToBytes(result, sql, i, stringEnd);
+ i = skipWhitespace(sql, end, stringEnd + 1);
+ } while (i <= end && sql.charAt(i) == '\'');
+ tokens.add(new Token.BinaryStringToken(tokenStart, result.toByteArray()));
+ return i;
+ }
+
+ private static int readCharacterString(String sql, int tokenStart, int end, int i, boolean unicode,
+ ArrayList tokens) {
+ String s = null;
+ StringBuilder builder = null;
+ int stringEnd;
+ do {
+ stringEnd = sql.indexOf('\'', ++i);
+ if (stringEnd < 0) {
+ throw DbException.getSyntaxError(sql, tokenStart);
+ }
+ if (s == null) {
+ s = sql.substring(i, stringEnd);
+ } else {
+ if (builder == null) {
+ builder = new StringBuilder(s);
+ }
+ builder.append(sql, i, stringEnd);
+ }
+ i = stringEnd + 1;
+ if (i <= end && sql.charAt(i) == '\'') {
+ if (builder == null) {
+ builder = new StringBuilder(s);
+ }
+ do {
+ stringEnd = sql.indexOf('\'', i + 1);
+ if (stringEnd < 0) {
+ throw DbException.getSyntaxError(sql, tokenStart);
+ }
+ builder.append(sql, i, stringEnd);
+ i = stringEnd + 1;
+ } while (i <= end && sql.charAt(i) == '\'');
+ }
+ i = skipWhitespace(sql, end, i);
+ } while (i <= end && sql.charAt(i) == '\'');
+ if (builder != null) {
+ s = builder.toString();
+ }
+ tokens.add(new Token.CharacterStringToken(tokenStart, s, unicode));
+ return i;
+ }
+
+ private static int skipWhitespace(String sql, int end, int i) {
+ while (i <= end) {
+ int cp = sql.codePointAt(i);
+ if (!Character.isWhitespace(cp)) {
+ if (cp == '/' && i < end) {
+ char c2 = sql.charAt(i + 1);
+ if (c2 == '*') {
+ i = skipBracketedComment(sql, i, end, i);
+ continue;
+ } else if (c2 == '/') {
+ i = skipSimpleComment(sql, end, i);
+ continue;
+ }
+ }
+ break;
+ }
+ i += Character.charCount(cp);
+ }
+ return i;
+ }
+
+ private static int readHexNumber(String sql, CastDataProvider provider, int tokenStart, int end, int i,
+ ArrayList tokens) {
+ if (provider.getMode().zeroExLiteralsAreBinaryStrings) {
+ int start = i;
+ for (char c; i <= end
+ && (((c = sql.charAt(i)) >= '0' && c <= '9') || ((c &= 0xffdf) >= 'A' && c <= 'F'));) {
+ i++;
+ }
+ if (i <= end && Character.isJavaIdentifierPart(sql.codePointAt(i))) {
+ throw DbException.get(ErrorCode.HEX_STRING_WRONG_1, sql.substring(start, i + 1));
+ }
+ tokens.add(new Token.BinaryStringToken(start, StringUtils.convertHexToBytes(sql.substring(start, i))));
+ return i;
+ } else {
+ if (i > end) {
+ throw DbException.getSyntaxError(sql, tokenStart, "Hex number");
+ }
+ int start = i;
+ long number = 0;
+ char c;
+ do {
+ c = sql.charAt(i);
+ if (c >= '0' && c <= '9') {
+ number = (number << 4) + c - '0';
+ // Convert a-z to A-Z
+ } else if ((c &= 0xffdf) >= 'A' && c <= 'F') {
+ number = (number << 4) + c - ('A' - 10);
+ } else if (i == start) {
+ throw DbException.getSyntaxError(sql, tokenStart, "Hex number");
+ } else {
+ break;
+ }
+ if (number > Integer.MAX_VALUE) {
+ while (++i <= end
+ && (((c = sql.charAt(i)) >= '0' && c <= '9') || ((c &= 0xffdf) >= 'A' && c <= 'F'))) {
+ }
+ return finishBigInteger(sql, tokenStart, end, i, start, i <= end && c == 'L', 16, tokens);
+ }
+ } while (++i <= end);
+
+ boolean bigint = i <= end && c == 'L';
+ if (bigint) {
+ i++;
+ }
+ if (i <= end && Character.isJavaIdentifierPart(sql.codePointAt(i))) {
+ throw DbException.getSyntaxError(sql, tokenStart, "Hex number");
+ }
+ tokens.add(bigint ? new Token.BigintToken(start, number) : new Token.IntegerToken(start, (int) number));
+ return i;
+ }
+ }
+
+ private static int readNumeric(String sql, int tokenStart, int end, int i, char c, ArrayList tokens) {
+ long number = c - '0';
+ for (; i <= end; i++) {
+ c = sql.charAt(i);
+ if (c < '0' || c > '9') {
+ switch (c) {
+ case '.':
+ return readNumeric(sql, tokenStart, end, i, c, false, false, tokens);
+ case 'E':
+ case 'e':
+ return readNumeric(sql, tokenStart, end, i, c, false, true, tokens);
+ case 'L':
+ case 'l':
+ return finishBigInteger(sql, tokenStart, end, i, tokenStart, true, 10, tokens);
+ }
+ break;
+ }
+ number = number * 10 + (c - '0');
+ if (number > Integer.MAX_VALUE) {
+ return readNumeric(sql, tokenStart, end, i, c, true, false, tokens);
+ }
+ }
+ tokens.add(new Token.IntegerToken(tokenStart, (int) number));
+ return i;
+ }
+
+ private static int readNumeric(String sql, int tokenStart, int end, int i, char c, boolean integer,
+ boolean approximate, ArrayList tokens) {
+ if (!approximate) {
+ while (++i <= end) {
+ c = sql.charAt(i);
+ if (c == '.') {
+ integer = false;
+ } else if (c < '0' || c > '9') {
+ break;
+ }
+ }
+ }
+ if (i <= end && (c == 'E' || c == 'e')) {
+ integer = false;
+ approximate = true;
+ if (i == end) {
+ throw DbException.getSyntaxError(sql, tokenStart);
+ }
+ c = sql.charAt(++i);
+ if (c == '+' || c == '-') {
+ if (i == end) {
+ throw DbException.getSyntaxError(sql, tokenStart);
+ }
+ c = sql.charAt(++i);
+ }
+ if (c < '0' || c > '9') {
+ throw DbException.getSyntaxError(sql, tokenStart);
+ }
+ while (++i <= end && (c = sql.charAt(i)) >= '0' && c <= '9') {
+ // go until the first non-number
+ }
+ }
+ if (integer) {
+ return finishBigInteger(sql, tokenStart, end, i, tokenStart, i < end && c == 'L' || c == 'l', 10, tokens);
+ }
+ BigDecimal bd;
+ String string = sql.substring(tokenStart, i);
+ try {
+ bd = new BigDecimal(string);
+ } catch (NumberFormatException e) {
+ throw DbException.get(ErrorCode.DATA_CONVERSION_ERROR_1, e, string);
+ }
+ tokens.add(new Token.ValueToken(tokenStart, approximate ? ValueDecfloat.get(bd) : ValueNumeric.get(bd)));
+ return i;
+ }
+
+ private static int finishBigInteger(String sql, int tokenStart, int end, int i, int start, boolean asBigint,
+ int radix, ArrayList tokens) {
+ int endIndex = i;
+ if (asBigint) {
+ i++;
+ }
+ if (radix == 16 && i <= end && Character.isJavaIdentifierPart(sql.codePointAt(i))) {
+ throw DbException.getSyntaxError(sql, tokenStart, "Hex number");
+ }
+ BigInteger bigInteger = new BigInteger(sql.substring(start, endIndex), radix);
+ Token token;
+ if (bigInteger.compareTo(ValueBigint.MAX_BI) > 0) {
+ if (asBigint) {
+ throw DbException.getSyntaxError(sql, tokenStart);
+ }
+ token = new Token.ValueToken(tokenStart, ValueNumeric.get(bigInteger));
+ } else {
+ token = new Token.BigintToken(start, bigInteger.longValue());
+ }
+ tokens.add(token);
+ return i;
+ }
+
+ private static int skipBracketedComment(String sql, int tokenStart, int end, int i) {
+ i += 2;
+ for (int level = 1; level > 0;) {
+ for (;;) {
+ if (i >= end) {
+ throw DbException.getSyntaxError(sql, tokenStart);
+ }
+ char c = sql.charAt(i++);
+ if (c == '*') {
+ if (sql.charAt(i) == '/') {
+ level--;
+ i++;
+ break;
+ }
+ } else if (c == '/' && sql.charAt(i) == '*') {
+ level++;
+ i++;
+ }
+ }
+ }
+ return i;
+ }
+
+ private static int skipSimpleComment(String sql, int end, int i) {
+ i += 2;
+ for (char c; i <= end && (c = sql.charAt(i)) != '\n' && c != '\r'; i++) {
+ //
+ }
+ return i;
+ }
+
+ private static int parseParameterIndex(String sql, int end, int i, ArrayList tokens) {
+ int tokenStart = i;
+ long number = 0;
+ for (char c; ++i <= end && (c = sql.charAt(i)) >= '0' && c <= '9';) {
+ number = number * 10 + (c - '0');
+ if (number > Integer.MAX_VALUE) {
+ throw DbException.getInvalidValueException("parameter index", number);
+ }
+ }
+ if (i > tokenStart + 1 && number == 0) {
+ throw DbException.getInvalidValueException("parameter index", number);
+ }
+ tokens.add(new Token.ParameterToken(tokenStart, (int) number));
+ return i;
+ }
+
+ private static int assignParameterIndex(ArrayList tokens, int lastParameter) {
+ Token.ParameterToken parameter = (Token.ParameterToken) tokens.get(tokens.size() - 1);
+ if (parameter.index == 0) {
+ if (lastParameter < 0) {
+ throw DbException.get(ErrorCode.CANNOT_MIX_INDEXED_AND_UNINDEXED_PARAMS);
+ }
+ parameter.index = ++lastParameter;
+ } else if (lastParameter > 0) {
+ throw DbException.get(ErrorCode.CANNOT_MIX_INDEXED_AND_UNINDEXED_PARAMS);
+ } else {
+ lastParameter = -1;
+ }
+ return lastParameter;
+ }
+
+ private static void processUescape(String sql, ArrayList tokens) {
+ ListIterator i = tokens.listIterator();
+ while (i.hasNext()) {
+ Token token = i.next();
+ if (token.needsUnicodeConversion()) {
+ int uescape = '\\';
+ condition: if (i.hasNext()) {
+ Token t2 = i.next();
+ if (t2.tokenType() == UESCAPE) {
+ i.remove();
+ if (i.hasNext()) {
+ Token t3 = i.next();
+ i.remove();
+ if (t3 instanceof Token.CharacterStringToken) {
+ String s = ((Token.CharacterStringToken) t3).string;
+ if (s.codePointCount(0, s.length()) == 1) {
+ int escape = s.codePointAt(0);
+ if (!Character.isWhitespace(escape) && (escape < '0' || escape > '9')
+ && (escape < 'A' || escape > 'F') && (escape < 'a' || escape > 'f')) {
+ switch (escape) {
+ default:
+ uescape = escape;
+ break condition;
+ case '"':
+ case '\'':
+ case '+':
+ }
+ }
+ }
+ }
+ }
+ throw DbException.getSyntaxError(sql, t2.start() + 7, "''");
+ }
+ }
+ token.convertUnicode(uescape);
+ }
+ }
+ }
+
+}
diff --git a/h2/src/main/org/h2/command/ddl/AlterDomain.java b/h2/src/main/org/h2/command/ddl/AlterDomain.java
new file mode 100644
index 0000000000..4b96f6828d
--- /dev/null
+++ b/h2/src/main/org/h2/command/ddl/AlterDomain.java
@@ -0,0 +1,111 @@
+/*
+ * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0,
+ * and the EPL 1.0 (https://h2database.com/html/license.html).
+ * Initial Developer: H2 Group
+ */
+package org.h2.command.ddl;
+
+import java.util.function.BiPredicate;
+
+import org.h2.api.ErrorCode;
+import org.h2.engine.Database;
+import org.h2.engine.SessionLocal;
+import org.h2.message.DbException;
+import org.h2.schema.Domain;
+import org.h2.schema.Schema;
+import org.h2.table.Column;
+import org.h2.table.Table;
+
+/**
+ * The base class for ALTER DOMAIN commands.
+ */
+public abstract class AlterDomain extends SchemaOwnerCommand {
+
+ /**
+ * Processes all columns and domains that use the specified domain.
+ *
+ * @param session
+ * the session
+ * @param domain
+ * the domain to process
+ * @param columnProcessor
+ * column handler
+ * @param domainProcessor
+ * domain handler
+ * @param recompileExpressions
+ * whether processed expressions need to be recompiled
+ */
+ public static void forAllDependencies(SessionLocal session, Domain domain,
+ BiPredicate columnProcessor, BiPredicate domainProcessor,
+ boolean recompileExpressions) {
+ Database db = session.getDatabase();
+ for (Schema schema : db.getAllSchemasNoMeta()) {
+ for (Domain targetDomain : schema.getAllDomains()) {
+ if (targetDomain.getDomain() == domain) {
+ if (domainProcessor == null || domainProcessor.test(domain, targetDomain)) {
+ if (recompileExpressions) {
+ domain.prepareExpressions(session);
+ }
+ db.updateMeta(session, targetDomain);
+ }
+ }
+ }
+ for (Table t : schema.getAllTablesAndViews(null)) {
+ if (forTable(session, domain, columnProcessor, recompileExpressions, t)) {
+ db.updateMeta(session, t);
+ }
+ }
+ }
+ for (Table t : session.getLocalTempTables()) {
+ forTable(session, domain, columnProcessor, recompileExpressions, t);
+ }
+ }
+
+ private static boolean forTable(SessionLocal session, Domain domain, BiPredicate columnProcessor,
+ boolean recompileExpressions, Table t) {
+ boolean modified = false;
+ for (Column targetColumn : t.getColumns()) {
+ if (targetColumn.getDomain() == domain) {
+ boolean m = columnProcessor == null || columnProcessor.test(domain, targetColumn);
+ if (m) {
+ if (recompileExpressions) {
+ targetColumn.prepareExpressions(session);
+ }
+ modified = true;
+ }
+ }
+ }
+ return modified;
+ }
+
+ String domainName;
+
+ boolean ifDomainExists;
+
+ AlterDomain(SessionLocal session, Schema schema) {
+ super(session, schema);
+ }
+
+ public final void setDomainName(String domainName) {
+ this.domainName = domainName;
+ }
+
+ public final void setIfDomainExists(boolean b) {
+ ifDomainExists = b;
+ }
+
+ @Override
+ final long update(Schema schema) {
+ Domain domain = getSchema().findDomain(domainName);
+ if (domain == null) {
+ if (ifDomainExists) {
+ return 0;
+ }
+ throw DbException.get(ErrorCode.DOMAIN_NOT_FOUND_1, domainName);
+ }
+ return update(schema, domain);
+ }
+
+ abstract long update(Schema schema, Domain domain);
+
+}
diff --git a/h2/src/main/org/h2/command/ddl/AlterDomainAddConstraint.java b/h2/src/main/org/h2/command/ddl/AlterDomainAddConstraint.java
new file mode 100644
index 0000000000..d8b8bcef52
--- /dev/null
+++ b/h2/src/main/org/h2/command/ddl/AlterDomainAddConstraint.java
@@ -0,0 +1,105 @@
+/*
+ * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0,
+ * and the EPL 1.0 (https://h2database.com/html/license.html).
+ * Initial Developer: H2 Group
+ */
+package org.h2.command.ddl;
+
+import org.h2.api.ErrorCode;
+import org.h2.command.CommandInterface;
+import org.h2.constraint.ConstraintDomain;
+import org.h2.engine.Database;
+import org.h2.engine.SessionLocal;
+import org.h2.expression.Expression;
+import org.h2.message.DbException;
+import org.h2.schema.Domain;
+import org.h2.schema.Schema;
+
+/**
+ * This class represents the statement ALTER DOMAIN ADD CONSTRAINT
+ */
+public class AlterDomainAddConstraint extends AlterDomain {
+
+ private String constraintName;
+ private Expression checkExpression;
+ private String comment;
+ private boolean checkExisting;
+ private final boolean ifNotExists;
+
+ public AlterDomainAddConstraint(SessionLocal session, Schema schema, boolean ifNotExists) {
+ super(session, schema);
+ this.ifNotExists = ifNotExists;
+ }
+
+ private String generateConstraintName(Domain domain) {
+ if (constraintName == null) {
+ constraintName = getSchema().getUniqueDomainConstraintName(session, domain);
+ }
+ return constraintName;
+ }
+
+ @Override
+ long update(Schema schema, Domain domain) {
+ try {
+ return tryUpdate(schema, domain);
+ } finally {
+ getSchema().freeUniqueName(constraintName);
+ }
+ }
+
+ /**
+ * Try to execute the statement.
+ *
+ * @param schema the schema
+ * @param domain the domain
+ * @return the update count
+ */
+ private int tryUpdate(Schema schema, Domain domain) {
+ if (constraintName != null && schema.findConstraint(session, constraintName) != null) {
+ if (ifNotExists) {
+ return 0;
+ }
+ throw DbException.get(ErrorCode.CONSTRAINT_ALREADY_EXISTS_1, constraintName);
+ }
+ Database db = session.getDatabase();
+ db.lockMeta(session);
+
+ int id = getObjectId();
+ String name = generateConstraintName(domain);
+ ConstraintDomain constraint = new ConstraintDomain(schema, id, name, domain);
+ constraint.setExpression(session, checkExpression);
+ if (checkExisting) {
+ constraint.checkExistingData(session);
+ }
+ constraint.setComment(comment);
+ db.addSchemaObject(session, constraint);
+ domain.addConstraint(constraint);
+ return 0;
+ }
+
+ public void setConstraintName(String constraintName) {
+ this.constraintName = constraintName;
+ }
+
+ public String getConstraintName() {
+ return constraintName;
+ }
+
+ @Override
+ public int getType() {
+ return CommandInterface.ALTER_DOMAIN_ADD_CONSTRAINT;
+ }
+
+ public void setCheckExpression(Expression expression) {
+ this.checkExpression = expression;
+ }
+
+ public void setComment(String comment) {
+ this.comment = comment;
+ }
+
+ public void setCheckExisting(boolean b) {
+ this.checkExisting = b;
+ }
+
+}
diff --git a/h2/src/main/org/h2/command/ddl/AlterDomainDropConstraint.java b/h2/src/main/org/h2/command/ddl/AlterDomainDropConstraint.java
new file mode 100644
index 0000000000..df9efaa5a8
--- /dev/null
+++ b/h2/src/main/org/h2/command/ddl/AlterDomainDropConstraint.java
@@ -0,0 +1,54 @@
+/*
+ * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0,
+ * and the EPL 1.0 (https://h2database.com/html/license.html).
+ * Initial Developer: H2 Group
+ */
+package org.h2.command.ddl;
+
+import org.h2.api.ErrorCode;
+import org.h2.command.CommandInterface;
+import org.h2.constraint.Constraint;
+import org.h2.constraint.Constraint.Type;
+import org.h2.constraint.ConstraintDomain;
+import org.h2.engine.SessionLocal;
+import org.h2.message.DbException;
+import org.h2.schema.Domain;
+import org.h2.schema.Schema;
+
+/**
+ * This class represents the statement ALTER DOMAIN DROP CONSTRAINT
+ */
+public class AlterDomainDropConstraint extends AlterDomain {
+
+ private String constraintName;
+ private final boolean ifConstraintExists;
+
+ public AlterDomainDropConstraint(SessionLocal session, Schema schema, boolean ifConstraintExists) {
+ super(session, schema);
+ this.ifConstraintExists = ifConstraintExists;
+ }
+
+ public void setConstraintName(String string) {
+ constraintName = string;
+ }
+
+ @Override
+ long update(Schema schema, Domain domain) {
+ Constraint constraint = schema.findConstraint(session, constraintName);
+ if (constraint == null || constraint.getConstraintType() != Type.DOMAIN
+ || ((ConstraintDomain) constraint).getDomain() != domain) {
+ if (!ifConstraintExists) {
+ throw DbException.get(ErrorCode.CONSTRAINT_NOT_FOUND_1, constraintName);
+ }
+ } else {
+ session.getDatabase().removeSchemaObject(session, constraint);
+ }
+ return 0;
+ }
+
+ @Override
+ public int getType() {
+ return CommandInterface.ALTER_DOMAIN_DROP_CONSTRAINT;
+ }
+
+}
diff --git a/h2/src/main/org/h2/command/ddl/AlterDomainExpressions.java b/h2/src/main/org/h2/command/ddl/AlterDomainExpressions.java
new file mode 100644
index 0000000000..a5d519e379
--- /dev/null
+++ b/h2/src/main/org/h2/command/ddl/AlterDomainExpressions.java
@@ -0,0 +1,92 @@
+/*
+ * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0,
+ * and the EPL 1.0 (https://h2database.com/html/license.html).
+ * Initial Developer: H2 Group
+ */
+package org.h2.command.ddl;
+
+import org.h2.command.CommandInterface;
+import org.h2.engine.SessionLocal;
+import org.h2.expression.Expression;
+import org.h2.message.DbException;
+import org.h2.schema.Domain;
+import org.h2.schema.Schema;
+import org.h2.table.Column;
+import org.h2.table.ColumnTemplate;
+
+/**
+ * This class represents the statements
+ * ALTER DOMAIN SET DEFAULT
+ * ALTER DOMAIN DROP DEFAULT
+ * ALTER DOMAIN SET ON UPDATE
+ * ALTER DOMAIN DROP ON UPDATE
+ */
+public class AlterDomainExpressions extends AlterDomain {
+
+ private final int type;
+
+ private Expression expression;
+
+ public AlterDomainExpressions(SessionLocal session, Schema schema, int type) {
+ super(session, schema);
+ this.type = type;
+ }
+
+ public void setExpression(Expression expression) {
+ this.expression = expression;
+ }
+
+ @Override
+ long update(Schema schema, Domain domain) {
+ switch (type) {
+ case CommandInterface.ALTER_DOMAIN_DEFAULT:
+ domain.setDefaultExpression(session, expression);
+ break;
+ case CommandInterface.ALTER_DOMAIN_ON_UPDATE:
+ domain.setOnUpdateExpression(session, expression);
+ break;
+ default:
+ throw DbException.getInternalError("type=" + type);
+ }
+ if (expression != null) {
+ forAllDependencies(session, domain, this::copyColumn, this::copyDomain, true);
+ }
+ session.getDatabase().updateMeta(session, domain);
+ return 0;
+ }
+
+ private boolean copyColumn(Domain domain, Column targetColumn) {
+ return copyExpressions(session, domain, targetColumn);
+ }
+
+ private boolean copyDomain(Domain domain, Domain targetDomain) {
+ return copyExpressions(session, domain, targetDomain);
+ }
+
+ private boolean copyExpressions(SessionLocal session, Domain domain, ColumnTemplate targetColumn) {
+ switch (type) {
+ case CommandInterface.ALTER_DOMAIN_DEFAULT: {
+ Expression e = domain.getDefaultExpression();
+ if (e != null && targetColumn.getDefaultExpression() == null) {
+ targetColumn.setDefaultExpression(session, e);
+ return true;
+ }
+ break;
+ }
+ case CommandInterface.ALTER_DOMAIN_ON_UPDATE: {
+ Expression e = domain.getOnUpdateExpression();
+ if (e != null && targetColumn.getOnUpdateExpression() == null) {
+ targetColumn.setOnUpdateExpression(session, e);
+ return true;
+ }
+ }
+ }
+ return false;
+ }
+
+ @Override
+ public int getType() {
+ return type;
+ }
+
+}
diff --git a/h2/src/main/org/h2/command/ddl/AlterDomainRename.java b/h2/src/main/org/h2/command/ddl/AlterDomainRename.java
new file mode 100644
index 0000000000..f0b65e9705
--- /dev/null
+++ b/h2/src/main/org/h2/command/ddl/AlterDomainRename.java
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0,
+ * and the EPL 1.0 (https://h2database.com/html/license.html).
+ * Initial Developer: H2 Group
+ */
+package org.h2.command.ddl;
+
+import org.h2.api.ErrorCode;
+import org.h2.command.CommandInterface;
+import org.h2.engine.SessionLocal;
+import org.h2.message.DbException;
+import org.h2.schema.Domain;
+import org.h2.schema.Schema;
+
+/**
+ * This class represents the statement
+ * ALTER DOMAIN RENAME
+ */
+public class AlterDomainRename extends AlterDomain {
+
+ private String newDomainName;
+
+ public AlterDomainRename(SessionLocal session, Schema schema) {
+ super(session, schema);
+ }
+
+ public void setNewDomainName(String name) {
+ newDomainName = name;
+ }
+
+ @Override
+ long update(Schema schema, Domain domain) {
+ Domain d = schema.findDomain(newDomainName);
+ if (d != null) {
+ if (domain != d) {
+ throw DbException.get(ErrorCode.DOMAIN_ALREADY_EXISTS_1, newDomainName);
+ }
+ if (newDomainName.equals(domain.getName())) {
+ return 0;
+ }
+ }
+ session.getDatabase().renameSchemaObject(session, domain, newDomainName);
+ forAllDependencies(session, domain, null, null, false);
+ return 0;
+ }
+
+ @Override
+ public int getType() {
+ return CommandInterface.ALTER_DOMAIN_RENAME;
+ }
+
+}
diff --git a/h2/src/main/org/h2/command/ddl/AlterDomainRenameConstraint.java b/h2/src/main/org/h2/command/ddl/AlterDomainRenameConstraint.java
new file mode 100644
index 0000000000..3f4cfbad23
--- /dev/null
+++ b/h2/src/main/org/h2/command/ddl/AlterDomainRenameConstraint.java
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0,
+ * and the EPL 1.0 (https://h2database.com/html/license.html).
+ * Initial Developer: H2 Group
+ */
+package org.h2.command.ddl;
+
+import org.h2.api.ErrorCode;
+import org.h2.command.CommandInterface;
+import org.h2.constraint.Constraint;
+import org.h2.constraint.Constraint.Type;
+import org.h2.constraint.ConstraintDomain;
+import org.h2.engine.SessionLocal;
+import org.h2.message.DbException;
+import org.h2.schema.Domain;
+import org.h2.schema.Schema;
+
+/**
+ * This class represents the statement
+ * ALTER DOMAIN RENAME CONSTRAINT
+ */
+public class AlterDomainRenameConstraint extends AlterDomain {
+
+ private String constraintName;
+ private String newConstraintName;
+
+ public AlterDomainRenameConstraint(SessionLocal session, Schema schema) {
+ super(session, schema);
+ }
+
+ public void setConstraintName(String string) {
+ constraintName = string;
+ }
+
+ public void setNewConstraintName(String newName) {
+ this.newConstraintName = newName;
+ }
+
+ @Override
+ long update(Schema schema, Domain domain) {
+ Constraint constraint = getSchema().findConstraint(session, constraintName);
+ if (constraint == null || constraint.getConstraintType() != Type.DOMAIN
+ || ((ConstraintDomain) constraint).getDomain() != domain) {
+ throw DbException.get(ErrorCode.CONSTRAINT_NOT_FOUND_1, constraintName);
+ }
+ if (getSchema().findConstraint(session, newConstraintName) != null
+ || newConstraintName.equals(constraintName)) {
+ throw DbException.get(ErrorCode.CONSTRAINT_ALREADY_EXISTS_1, newConstraintName);
+ }
+ session.getDatabase().renameSchemaObject(session, constraint, newConstraintName);
+ return 0;
+ }
+
+ @Override
+ public int getType() {
+ return CommandInterface.ALTER_DOMAIN_RENAME_CONSTRAINT;
+ }
+
+}
diff --git a/h2/src/main/org/h2/command/ddl/AlterIndexRename.java b/h2/src/main/org/h2/command/ddl/AlterIndexRename.java
index e09bed3924..a09d820ce2 100644
--- a/h2/src/main/org/h2/command/ddl/AlterIndexRename.java
+++ b/h2/src/main/org/h2/command/ddl/AlterIndexRename.java
@@ -1,6 +1,6 @@
/*
- * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0,
- * and the EPL 1.0 (http://h2database.com/html/license.html).
+ * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0,
+ * and the EPL 1.0 (https://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.command.ddl;
@@ -9,7 +9,7 @@
import org.h2.command.CommandInterface;
import org.h2.engine.Database;
import org.h2.engine.Right;
-import org.h2.engine.Session;
+import org.h2.engine.SessionLocal;
import org.h2.index.Index;
import org.h2.message.DbException;
import org.h2.schema.Schema;
@@ -20,15 +20,25 @@
*/
public class AlterIndexRename extends DefineCommand {
- private Index oldIndex;
+ private boolean ifExists;
+ private Schema oldSchema;
+ private String oldIndexName;
private String newIndexName;
- public AlterIndexRename(Session session) {
+ public AlterIndexRename(SessionLocal session) {
super(session);
}
- public void setOldIndex(Index index) {
- oldIndex = index;
+ public void setIfExists(boolean b) {
+ ifExists = b;
+ }
+
+ public void setOldSchema(Schema old) {
+ oldSchema = old;
+ }
+
+ public void setOldName(String name) {
+ oldIndexName = name;
}
public void setNewName(String name) {
@@ -36,16 +46,22 @@ public void setNewName(String name) {
}
@Override
- public int update() {
- session.commit(true);
+ public long update() {
Database db = session.getDatabase();
- Schema schema = oldIndex.getSchema();
- if (schema.findIndex(session, newIndexName) != null ||
- newIndexName.equals(oldIndex.getName())) {
+ Index oldIndex = oldSchema.findIndex(session, oldIndexName);
+ if (oldIndex == null) {
+ if (!ifExists) {
+ throw DbException.get(ErrorCode.INDEX_NOT_FOUND_1,
+ newIndexName);
+ }
+ return 0;
+ }
+ if (oldSchema.findIndex(session, newIndexName) != null ||
+ newIndexName.equals(oldIndexName)) {
throw DbException.get(ErrorCode.INDEX_ALREADY_EXISTS_1,
newIndexName);
}
- session.getUser().checkRight(oldIndex.getTable(), Right.ALL);
+ session.getUser().checkTableRight(oldIndex.getTable(), Right.SCHEMA_OWNER);
db.renameSchemaObject(session, oldIndex, newIndexName);
return 0;
}
diff --git a/h2/src/main/org/h2/command/ddl/AlterSchemaRename.java b/h2/src/main/org/h2/command/ddl/AlterSchemaRename.java
index 13ca821104..3ce0b0fb3b 100644
--- a/h2/src/main/org/h2/command/ddl/AlterSchemaRename.java
+++ b/h2/src/main/org/h2/command/ddl/AlterSchemaRename.java
@@ -1,20 +1,19 @@
/*
- * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0,
- * and the EPL 1.0 (http://h2database.com/html/license.html).
+ * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0,
+ * and the EPL 1.0 (https://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.command.ddl;
+import java.util.ArrayList;
import org.h2.api.ErrorCode;
import org.h2.command.CommandInterface;
import org.h2.engine.Database;
-import org.h2.engine.Session;
+import org.h2.engine.SessionLocal;
import org.h2.message.DbException;
import org.h2.schema.Schema;
import org.h2.schema.SchemaObject;
-import java.util.ArrayList;
-
/**
* This class represents the statement
* ALTER SCHEMA RENAME
@@ -24,7 +23,7 @@ public class AlterSchemaRename extends DefineCommand {
private Schema oldSchema;
private String newSchemaName;
- public AlterSchemaRename(Session session) {
+ public AlterSchemaRename(SessionLocal session) {
super(session);
}
@@ -37,23 +36,23 @@ public void setNewName(String name) {
}
@Override
- public int update() {
- session.commit(true);
+ public long update() {
+ session.getUser().checkSchemaAdmin();
Database db = session.getDatabase();
if (!oldSchema.canDrop()) {
- throw DbException.get(ErrorCode.SCHEMA_CAN_NOT_BE_DROPPED_1,
- oldSchema.getName());
+ throw DbException.get(ErrorCode.SCHEMA_CAN_NOT_BE_DROPPED_1, oldSchema.getName());
}
- if (db.findSchema(newSchemaName) != null ||
- newSchemaName.equals(oldSchema.getName())) {
- throw DbException.get(ErrorCode.SCHEMA_ALREADY_EXISTS_1,
- newSchemaName);
+ if (db.findSchema(newSchemaName) != null || newSchemaName.equals(oldSchema.getName())) {
+ throw DbException.get(ErrorCode.SCHEMA_ALREADY_EXISTS_1, newSchemaName);
}
- session.getUser().checkSchemaAdmin();
db.renameDatabaseObject(session, oldSchema, newSchemaName);
- ArrayList all = db.getAllSchemaObjects();
- for (SchemaObject schemaObject : all) {
- db.updateMeta(session, schemaObject);
+ ArrayList all = new ArrayList<>();
+ for (Schema schema : db.getAllSchemas()) {
+ schema.getAll(all);
+ for (SchemaObject schemaObject : all) {
+ db.updateMeta(session, schemaObject);
+ }
+ all.clear();
}
return 0;
}
diff --git a/h2/src/main/org/h2/command/ddl/AlterSequence.java b/h2/src/main/org/h2/command/ddl/AlterSequence.java
new file mode 100644
index 0000000000..706672a7c1
--- /dev/null
+++ b/h2/src/main/org/h2/command/ddl/AlterSequence.java
@@ -0,0 +1,106 @@
+/*
+ * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0,
+ * and the EPL 1.0 (https://h2database.com/html/license.html).
+ * Initial Developer: H2 Group
+ */
+package org.h2.command.ddl;
+
+import org.h2.api.ErrorCode;
+import org.h2.command.CommandInterface;
+import org.h2.engine.Right;
+import org.h2.engine.SessionLocal;
+import org.h2.message.DbException;
+import org.h2.schema.Schema;
+import org.h2.schema.Sequence;
+import org.h2.table.Column;
+
+/**
+ * This class represents the statement ALTER SEQUENCE.
+ */
+public class AlterSequence extends SchemaOwnerCommand {
+
+ private boolean ifExists;
+
+ private Column column;
+
+ private Boolean always;
+
+ private String sequenceName;
+
+ private Sequence sequence;
+
+ private SequenceOptions options;
+
+ public AlterSequence(SessionLocal session, Schema schema) {
+ super(session, schema);
+ transactional = true;
+ }
+
+ public void setIfExists(boolean b) {
+ ifExists = b;
+ }
+
+ public void setSequenceName(String sequenceName) {
+ this.sequenceName = sequenceName;
+ }
+
+ public void setOptions(SequenceOptions options) {
+ this.options = options;
+ }
+
+ @Override
+ public boolean isTransactional() {
+ return true;
+ }
+
+ /**
+ * Set the column
+ *
+ * @param column the column
+ * @param always whether value should be always generated, or null if "set
+ * generated is not specified
+ */
+ public void setColumn(Column column, Boolean always) {
+ this.column = column;
+ this.always = always;
+ sequence = column.getSequence();
+ if (sequence == null && !ifExists) {
+ throw DbException.get(ErrorCode.SEQUENCE_NOT_FOUND_1, column.getTraceSQL());
+ }
+ }
+
+ @Override
+ long update(Schema schema) {
+ if (sequence == null) {
+ sequence = schema.findSequence(sequenceName);
+ if (sequence == null) {
+ if (!ifExists) {
+ throw DbException.get(ErrorCode.SEQUENCE_NOT_FOUND_1, sequenceName);
+ }
+ return 0;
+ }
+ }
+ if (column != null) {
+ session.getUser().checkTableRight(column.getTable(), Right.SCHEMA_OWNER);
+ }
+ options.setDataType(sequence.getDataType());
+ Long startValue = options.getStartValue(session);
+ sequence.modify(
+ options.getRestartValue(session, startValue != null ? startValue : sequence.getStartValue()),
+ startValue,
+ options.getMinValue(sequence, session), options.getMaxValue(sequence, session),
+ options.getIncrement(session), options.getCycle(), options.getCacheSize(session));
+ sequence.flush(session);
+ if (column != null && always != null) {
+ column.setSequence(sequence, always);
+ session.getDatabase().updateMeta(session, column.getTable());
+ }
+ return 0;
+ }
+
+ @Override
+ public int getType() {
+ return CommandInterface.ALTER_SEQUENCE;
+ }
+
+}
diff --git a/h2/src/main/org/h2/command/ddl/AlterTable.java b/h2/src/main/org/h2/command/ddl/AlterTable.java
new file mode 100644
index 0000000000..2cfbd7ff85
--- /dev/null
+++ b/h2/src/main/org/h2/command/ddl/AlterTable.java
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0,
+ * and the EPL 1.0 (https://h2database.com/html/license.html).
+ * Initial Developer: H2 Group
+ */
+package org.h2.command.ddl;
+
+import org.h2.api.ErrorCode;
+import org.h2.engine.Right;
+import org.h2.engine.SessionLocal;
+import org.h2.message.DbException;
+import org.h2.schema.Schema;
+import org.h2.table.Table;
+
+/**
+ * The base class for ALTER TABLE commands.
+ */
+public abstract class AlterTable extends SchemaCommand {
+
+ String tableName;
+
+ boolean ifTableExists;
+
+ AlterTable(SessionLocal session, Schema schema) {
+ super(session, schema);
+ }
+
+ public final void setTableName(String tableName) {
+ this.tableName = tableName;
+ }
+
+ public final void setIfTableExists(boolean b) {
+ ifTableExists = b;
+ }
+
+ @Override
+ public final long update() {
+ Table table = getSchema().findTableOrView(session, tableName);
+ if (table == null) {
+ if (ifTableExists) {
+ return 0;
+ }
+ throw DbException.get(ErrorCode.TABLE_OR_VIEW_NOT_FOUND_1, tableName);
+ }
+ session.getUser().checkTableRight(table, Right.SCHEMA_OWNER);
+ return update(table);
+ }
+
+ abstract long update(Table table);
+
+}
diff --git a/h2/src/main/org/h2/command/ddl/AlterTableAddConstraint.java b/h2/src/main/org/h2/command/ddl/AlterTableAddConstraint.java
index 1c74fdc000..05c425b2e0 100644
--- a/h2/src/main/org/h2/command/ddl/AlterTableAddConstraint.java
+++ b/h2/src/main/org/h2/command/ddl/AlterTableAddConstraint.java
@@ -1,23 +1,23 @@
/*
- * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0,
- * and the EPL 1.0 (http://h2database.com/html/license.html).
+ * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0,
+ * and the EPL 1.0 (https://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.command.ddl;
import java.util.ArrayList;
-import java.util.HashSet;
import org.h2.api.ErrorCode;
import org.h2.command.CommandInterface;
import org.h2.constraint.Constraint;
+import org.h2.constraint.ConstraintActionType;
import org.h2.constraint.ConstraintCheck;
import org.h2.constraint.ConstraintReferential;
import org.h2.constraint.ConstraintUnique;
import org.h2.engine.Constants;
import org.h2.engine.Database;
import org.h2.engine.Right;
-import org.h2.engine.Session;
+import org.h2.engine.SessionLocal;
import org.h2.expression.Expression;
import org.h2.index.Index;
import org.h2.index.IndexType;
@@ -27,20 +27,20 @@
import org.h2.table.IndexColumn;
import org.h2.table.Table;
import org.h2.table.TableFilter;
-import org.h2.util.New;
+import org.h2.util.HasSQL;
+import org.h2.value.DataType;
/**
* This class represents the statement
* ALTER TABLE ADD CONSTRAINT
*/
-public class AlterTableAddConstraint extends SchemaCommand {
+public class AlterTableAddConstraint extends AlterTable {
- private int type;
+ private final int type;
private String constraintName;
- private String tableName;
private IndexColumn[] indexColumns;
- private int deleteAction;
- private int updateAction;
+ private ConstraintActionType deleteAction = ConstraintActionType.RESTRICT;
+ private ConstraintActionType updateAction = ConstraintActionType.RESTRICT;
private Schema refSchema;
private String refTableName;
private IndexColumn[] refIndexColumns;
@@ -50,29 +50,38 @@ public class AlterTableAddConstraint extends SchemaCommand {
private boolean checkExisting;
private boolean primaryKeyHash;
private final boolean ifNotExists;
- private ArrayList createdIndexes = New.arrayList();
+ private final ArrayList createdIndexes = new ArrayList<>();
+ private ConstraintUnique createdUniqueConstraint;
- public AlterTableAddConstraint(Session session, Schema schema,
- boolean ifNotExists) {
+ public AlterTableAddConstraint(SessionLocal session, Schema schema, int type, boolean ifNotExists) {
super(session, schema);
this.ifNotExists = ifNotExists;
+ this.type = type;
}
private String generateConstraintName(Table table) {
if (constraintName == null) {
- constraintName = getSchema().getUniqueConstraintName(
- session, table);
+ constraintName = getSchema().getUniqueConstraintName(session, table);
}
return constraintName;
}
@Override
- public int update() {
+ public long update(Table table) {
try {
- return tryUpdate();
+ return tryUpdate(table);
} catch (DbException e) {
- for (Index index : createdIndexes) {
- session.getDatabase().removeSchemaObject(session, index);
+ try {
+ if (createdUniqueConstraint != null) {
+ Index index = createdUniqueConstraint.getIndex();
+ session.getDatabase().removeSchemaObject(session, createdUniqueConstraint);
+ createdIndexes.remove(index);
+ }
+ for (Index index : createdIndexes) {
+ session.getDatabase().removeSchemaObject(session, index);
+ }
+ } catch (Throwable ex) {
+ e.addSuppressed(ex);
}
throw e;
} finally {
@@ -85,22 +94,25 @@ public int update() {
*
* @return the update count
*/
- private int tryUpdate() {
- if (!transactional) {
- session.commit(true);
- }
- Database db = session.getDatabase();
- Table table = getSchema().getTableOrView(session, tableName);
- if (getSchema().findConstraint(session, constraintName) != null) {
+ private int tryUpdate(Table table) {
+ if (constraintName != null && getSchema().findConstraint(session, constraintName) != null) {
if (ifNotExists) {
return 0;
}
- throw DbException.get(ErrorCode.CONSTRAINT_ALREADY_EXISTS_1,
- constraintName);
+ /**
+ * 1.4.200 and older databases don't always have a unique constraint
+ * for each referential constraint, so these constraints are created
+ * and they may use the same generated name as some other not yet
+ * initialized constraint that may lead to a name conflict.
+ */
+ if (!session.isQuirksMode()) {
+ throw DbException.get(ErrorCode.CONSTRAINT_ALREADY_EXISTS_1, constraintName);
+ }
+ constraintName = null;
}
- session.getUser().checkRight(table, Right.ALL);
+ Database db = session.getDatabase();
db.lockMeta(session);
- table.lock(session, true, true);
+ table.lock(session, Table.EXCLUSIVE_LOCK);
Constraint constraint;
switch (type) {
case CommandInterface.ALTER_TABLE_ADD_CONSTRAINT_PRIMARY_KEY: {
@@ -109,7 +121,7 @@ private int tryUpdate() {
ArrayList constraints = table.getConstraints();
for (int i = 0; constraints != null && i < constraints.size(); i++) {
Constraint c = constraints.get(i);
- if (Constraint.PRIMARY_KEY.equals(c.getConstraintType())) {
+ if (Constraint.Type.PRIMARY_KEY == c.getConstraintType()) {
throw DbException.get(ErrorCode.SECOND_PRIMARY_KEY);
}
}
@@ -125,58 +137,57 @@ private int tryUpdate() {
throw DbException.get(ErrorCode.SECOND_PRIMARY_KEY);
}
}
- }
- if (index == null) {
+ } else {
IndexType indexType = IndexType.createPrimaryKey(
table.isPersistIndexes(), primaryKeyHash);
String indexName = table.getSchema().getUniqueIndexName(
session, table, Constants.PREFIX_PRIMARY_KEY);
- int id = getObjectId();
+ int indexId = session.getDatabase().allocateObjectId();
try {
- index = table.addIndex(session, indexName, id,
- indexColumns, indexType, true, null);
+ index = table.addIndex(session, indexName, indexId, indexColumns, indexColumns.length, indexType,
+ true, null);
} finally {
getSchema().freeUniqueName(indexName);
}
}
index.getIndexType().setBelongsToConstraint(true);
- int constraintId = getObjectId();
+ int id = getObjectId();
String name = generateConstraintName(table);
ConstraintUnique pk = new ConstraintUnique(getSchema(),
- constraintId, name, table, true);
+ id, name, table, true);
pk.setColumns(indexColumns);
pk.setIndex(index, true);
constraint = pk;
break;
}
- case CommandInterface.ALTER_TABLE_ADD_CONSTRAINT_UNIQUE: {
- IndexColumn.mapColumns(indexColumns, table);
- boolean isOwner = false;
- if (index != null && canUseUniqueIndex(index, table, indexColumns)) {
- isOwner = true;
- index.getIndexType().setBelongsToConstraint(true);
- } else {
- index = getUniqueIndex(table, indexColumns);
- if (index == null) {
- index = createIndex(table, indexColumns, true);
- isOwner = true;
+ case CommandInterface.ALTER_TABLE_ADD_CONSTRAINT_UNIQUE:
+ if (indexColumns == null) {
+ Column[] columns = table.getColumns();
+ int columnCount = columns.length;
+ ArrayList list = new ArrayList<>(columnCount);
+ for (int i = 0; i < columnCount; i++) {
+ Column c = columns[i];
+ if (c.getVisible()) {
+ IndexColumn indexColumn = new IndexColumn(c.getName());
+ indexColumn.column = c;
+ list.add(indexColumn);
+ }
+ }
+ if (list.isEmpty()) {
+ throw DbException.get(ErrorCode.SYNTAX_ERROR_1, "UNIQUE(VALUE) on table without columns");
}
+ indexColumns = list.toArray(new IndexColumn[0]);
+ } else {
+ IndexColumn.mapColumns(indexColumns, table);
}
- int id = getObjectId();
- String name = generateConstraintName(table);
- ConstraintUnique unique = new ConstraintUnique(getSchema(), id,
- name, table, false);
- unique.setColumns(indexColumns);
- unique.setIndex(index, isOwner);
- constraint = unique;
+ constraint = createUniqueConstraint(table, index, indexColumns, false);
break;
- }
case CommandInterface.ALTER_TABLE_ADD_CONSTRAINT_CHECK: {
int id = getObjectId();
String name = generateConstraintName(table);
ConstraintCheck check = new ConstraintCheck(getSchema(), id, name, table);
- TableFilter filter = new TableFilter(session, table, null, false, null);
- checkExpression.mapColumns(filter, 0);
+ TableFilter filter = new TableFilter(session, table, null, false, null, 0, null);
+ checkExpression.mapColumns(filter, 0, Expression.MAP_INITIAL);
checkExpression = checkExpression.optimize(session);
check.setExpression(checkExpression);
check.setTableFilter(filter);
@@ -187,90 +198,154 @@ private int tryUpdate() {
break;
}
case CommandInterface.ALTER_TABLE_ADD_CONSTRAINT_REFERENTIAL: {
- Table refTable = refSchema.getTableOrView(session, refTableName);
- session.getUser().checkRight(refTable, Right.ALL);
+ Table refTable = refSchema.resolveTableOrView(session, refTableName);
+ if (refTable == null) {
+ throw DbException.get(ErrorCode.TABLE_OR_VIEW_NOT_FOUND_1, refTableName);
+ }
+ if (refTable != table) {
+ session.getUser().checkTableRight(refTable, Right.SCHEMA_OWNER);
+ }
if (!refTable.canReference()) {
- throw DbException.getUnsupportedException("Reference " +
- refTable.getSQL());
+ StringBuilder builder = new StringBuilder("Reference ");
+ refTable.getSQL(builder, HasSQL.TRACE_SQL_FLAGS);
+ throw DbException.getUnsupportedException(builder.toString());
}
boolean isOwner = false;
IndexColumn.mapColumns(indexColumns, table);
- if (index != null && canUseIndex(index, table, indexColumns, false)) {
- isOwner = true;
- index.getIndexType().setBelongsToConstraint(true);
- } else {
- if (db.isStarting()) {
- // before version 1.3.176, an existing index was used:
- // must do the same to avoid
- // Unique index or primary key violation:
- // "PRIMARY KEY ON """".PAGE_INDEX"
- index = getIndex(table, indexColumns, true);
- } else {
- index = getIndex(table, indexColumns, false);
- }
- if (index == null) {
- index = createIndex(table, indexColumns, false);
- isOwner = true;
- }
- }
if (refIndexColumns == null) {
- Index refIdx = refTable.getPrimaryKey();
- refIndexColumns = refIdx.getIndexColumns();
+ refIndexColumns = refTable.getPrimaryKey().getIndexColumns();
} else {
IndexColumn.mapColumns(refIndexColumns, refTable);
}
- if (refIndexColumns.length != indexColumns.length) {
+ int columnCount = indexColumns.length;
+ if (refIndexColumns.length != columnCount) {
throw DbException.get(ErrorCode.COLUMN_COUNT_DOES_NOT_MATCH);
}
- boolean isRefOwner = false;
- if (refIndex != null && refIndex.getTable() == refTable &&
- canUseIndex(refIndex, refTable, refIndexColumns, false)) {
- isRefOwner = true;
- refIndex.getIndexType().setBelongsToConstraint(true);
- } else {
- refIndex = null;
+ for (IndexColumn indexColumn : indexColumns) {
+ Column column = indexColumn.column;
+ if (column.isGeneratedAlways()) {
+ switch (deleteAction) {
+ case SET_DEFAULT:
+ case SET_NULL:
+ throw DbException.get(ErrorCode.GENERATED_COLUMN_CANNOT_BE_UPDATABLE_BY_CONSTRAINT_2,
+ column.getSQLWithTable(new StringBuilder(), HasSQL.TRACE_SQL_FLAGS).toString(),
+ "ON DELETE " + deleteAction.getSqlName());
+ default:
+ // All other actions are allowed
+ }
+ switch (updateAction) {
+ case CASCADE:
+ case SET_DEFAULT:
+ case SET_NULL:
+ throw DbException.get(ErrorCode.GENERATED_COLUMN_CANNOT_BE_UPDATABLE_BY_CONSTRAINT_2,
+ column.getSQLWithTable(new StringBuilder(), HasSQL.TRACE_SQL_FLAGS).toString(),
+ "ON UPDATE " + updateAction.getSqlName());
+ default:
+ // All other actions are allowed
+ }
+ }
}
- if (refIndex == null) {
- refIndex = getIndex(refTable, refIndexColumns, false);
- if (refIndex == null) {
- refIndex = createIndex(refTable, refIndexColumns, true);
- isRefOwner = true;
+ for (int i = 0; i < columnCount; i++) {
+ Column column1 = indexColumns[i].column, column2 = refIndexColumns[i].column;
+ if (!DataType.areStableComparable(column1.getType(), column2.getType())) {
+ throw DbException.get(ErrorCode.UNCOMPARABLE_REFERENCED_COLUMN_2, column1.getCreateSQL(),
+ column2.getCreateSQL());
+ }
+ }
+ ConstraintUnique unique = getUniqueConstraint(refTable, refIndexColumns);
+ if (unique == null && !session.isQuirksMode()
+ && !session.getMode().createUniqueConstraintForReferencedColumns) {
+ throw DbException.get(ErrorCode.CONSTRAINT_NOT_FOUND_1, IndexColumn.writeColumns(
+ new StringBuilder("PRIMARY KEY | UNIQUE ("), refIndexColumns, HasSQL.TRACE_SQL_FLAGS)
+ .append(')').toString());
+ }
+ if (index != null && canUseIndex(index, table, indexColumns, false)) {
+ isOwner = true;
+ index.getIndexType().setBelongsToConstraint(true);
+ } else {
+ index = getIndex(table, indexColumns, false);
+ if (index == null) {
+ index = createIndex(table, indexColumns, false);
+ isOwner = true;
}
}
int id = getObjectId();
String name = generateConstraintName(table);
- ConstraintReferential ref = new ConstraintReferential(getSchema(),
+ ConstraintReferential refConstraint = new ConstraintReferential(getSchema(),
id, name, table);
- ref.setColumns(indexColumns);
- ref.setIndex(index, isOwner);
- ref.setRefTable(refTable);
- ref.setRefColumns(refIndexColumns);
- ref.setRefIndex(refIndex, isRefOwner);
+ refConstraint.setColumns(indexColumns);
+ refConstraint.setIndex(index, isOwner);
+ refConstraint.setRefTable(refTable);
+ refConstraint.setRefColumns(refIndexColumns);
+ if (unique == null) {
+ unique = createUniqueConstraint(refTable, refIndex, refIndexColumns, true);
+ addConstraintToTable(db, refTable, unique);
+ createdUniqueConstraint = unique;
+ }
+ refConstraint.setRefConstraint(unique);
if (checkExisting) {
- ref.checkExistingData(session);
+ refConstraint.checkExistingData(session);
}
- constraint = ref;
- refTable.addConstraint(constraint);
- ref.setDeleteAction(deleteAction);
- ref.setUpdateAction(updateAction);
+ refTable.addConstraint(refConstraint);
+ refConstraint.setDeleteAction(deleteAction);
+ refConstraint.setUpdateAction(updateAction);
+ constraint = refConstraint;
break;
}
default:
- throw DbException.throwInternalError("type=" + type);
+ throw DbException.getInternalError("type=" + type);
}
// parent relationship is already set with addConstraint
constraint.setComment(comment);
+ addConstraintToTable(db, table, constraint);
+ return 0;
+ }
+
+ private ConstraintUnique createUniqueConstraint(Table table, Index index, IndexColumn[] indexColumns,
+ boolean forForeignKey) {
+ boolean isOwner = false;
+ if (index != null && canUseIndex(index, table, indexColumns, true)) {
+ isOwner = true;
+ index.getIndexType().setBelongsToConstraint(true);
+ } else {
+ index = getIndex(table, indexColumns, true);
+ if (index == null) {
+ index = createIndex(table, indexColumns, true);
+ isOwner = true;
+ }
+ }
+ int id;
+ String name;
+ Schema tableSchema = table.getSchema();
+ if (forForeignKey) {
+ id = session.getDatabase().allocateObjectId();
+ try {
+ tableSchema.reserveUniqueName(constraintName);
+ name = tableSchema.getUniqueConstraintName(session, table);
+ } finally {
+ tableSchema.freeUniqueName(constraintName);
+ }
+ } else {
+ id = getObjectId();
+ name = generateConstraintName(table);
+ }
+ ConstraintUnique unique = new ConstraintUnique(tableSchema, id, name, table, false);
+ unique.setColumns(indexColumns);
+ unique.setIndex(index, isOwner);
+ return unique;
+ }
+
+ private void addConstraintToTable(Database db, Table table, Constraint constraint) {
if (table.isTemporary() && !table.isGlobalTemporary()) {
session.addLocalTempTableConstraint(constraint);
} else {
db.addSchemaObject(session, constraint);
}
table.addConstraint(constraint);
- return 0;
}
private Index createIndex(Table t, IndexColumn[] cols, boolean unique) {
- int indexId = getObjectId();
+ int indexId = session.getDatabase().allocateObjectId();
IndexType indexType;
if (unique) {
// for unique constraints
@@ -284,8 +359,8 @@ private Index createIndex(Table t, IndexColumn[] cols, boolean unique) {
String indexName = t.getSchema().getUniqueIndexName(session, t,
prefix + "_INDEX_");
try {
- Index index = t.addIndex(session, indexName, indexId, cols,
- indexType, true, null);
+ Index index = t.addIndex(session, indexName, indexId, cols, unique ? cols.length : 0, indexType, true,
+ null);
createdIndexes.add(index);
return index;
} finally {
@@ -293,87 +368,66 @@ private Index createIndex(Table t, IndexColumn[] cols, boolean unique) {
}
}
- public void setDeleteAction(int action) {
+ public void setDeleteAction(ConstraintActionType action) {
this.deleteAction = action;
}
- public void setUpdateAction(int action) {
+ public void setUpdateAction(ConstraintActionType action) {
this.updateAction = action;
}
- private static Index getUniqueIndex(Table t, IndexColumn[] cols) {
- for (Index idx : t.getIndexes()) {
- if (canUseUniqueIndex(idx, t, cols)) {
- return idx;
- }
- }
- return null;
- }
-
- private static Index getIndex(Table t, IndexColumn[] cols, boolean moreColumnOk) {
- for (Index idx : t.getIndexes()) {
- if (canUseIndex(idx, t, cols, moreColumnOk)) {
- return idx;
+ private static ConstraintUnique getUniqueConstraint(Table t, IndexColumn[] cols) {
+ ArrayList constraints = t.getConstraints();
+ if (constraints != null) {
+ for (Constraint constraint : constraints) {
+ if (constraint.getTable() == t) {
+ Constraint.Type constraintType = constraint.getConstraintType();
+ if (constraintType == Constraint.Type.PRIMARY_KEY || constraintType == Constraint.Type.UNIQUE) {
+ if (canUseIndex(constraint.getIndex(), t, cols, true)) {
+ return (ConstraintUnique) constraint;
+ }
+ }
+ }
}
}
return null;
}
- private static boolean canUseUniqueIndex(Index idx, Table table,
- IndexColumn[] cols) {
- if (idx.getTable() != table || !idx.getIndexType().isUnique()) {
- return false;
- }
- Column[] indexCols = idx.getColumns();
- if (indexCols.length > cols.length) {
- return false;
- }
- HashSet set = New.hashSet();
- for (IndexColumn c : cols) {
- set.add(c.column);
- }
- for (Column c : indexCols) {
- // all columns of the index must be part of the list,
- // but not all columns of the list need to be part of the index
- if (!set.contains(c)) {
- return false;
+ private static Index getIndex(Table t, IndexColumn[] cols, boolean unique) {
+ ArrayList indexes = t.getIndexes();
+ Index index = null;
+ if (indexes != null) {
+ for (Index idx : indexes) {
+ if (canUseIndex(idx, t, cols, unique)) {
+ if (index == null || idx.getIndexColumns().length < index.getIndexColumns().length) {
+ index = idx;
+ }
+ }
}
}
- return true;
+ return index;
}
- private static boolean canUseIndex(Index existingIndex, Table table,
- IndexColumn[] cols, boolean moreColumnsOk) {
- if (existingIndex.getTable() != table || existingIndex.getCreateSQL() == null) {
- // can't use the scan index or index of another table
+ private static boolean canUseIndex(Index index, Table table, IndexColumn[] cols, boolean unique) {
+ if (index.getTable() != table) {
return false;
}
- Column[] indexCols = existingIndex.getColumns();
-
- if (moreColumnsOk) {
- if (indexCols.length < cols.length) {
+ int allowedColumns;
+ if (unique) {
+ allowedColumns = index.getUniqueColumnCount();
+ if (allowedColumns != cols.length) {
return false;
}
- for (IndexColumn col : cols) {
- // all columns of the list must be part of the index,
- // but not all columns of the index need to be part of the list
- // holes are not allowed (index=a,b,c & list=a,b is ok;
- // but list=a,c is not)
- int idx = existingIndex.getColumnIndex(col.column);
- if (idx < 0 || idx >= cols.length) {
- return false;
- }
- }
} else {
- if (indexCols.length != cols.length) {
+ if (index.getCreateSQL() == null || (allowedColumns = index.getColumns().length) != cols.length) {
return false;
}
- for (IndexColumn col : cols) {
- // all columns of the list must be part of the index
- int idx = existingIndex.getColumnIndex(col.column);
- if (idx < 0) {
- return false;
- }
+ }
+ for (IndexColumn col : cols) {
+ // all columns of the list must be part of the index
+ int i = index.getColumnIndex(col.column);
+ if (i < 0 || i >= allowedColumns) {
+ return false;
}
}
return true;
@@ -383,8 +437,8 @@ public void setConstraintName(String constraintName) {
this.constraintName = constraintName;
}
- public void setType(int type) {
- this.type = type;
+ public String getConstraintName() {
+ return constraintName;
}
@Override
@@ -396,10 +450,6 @@ public void setCheckExpression(Expression expression) {
this.checkExpression = expression;
}
- public void setTableName(String tableName) {
- this.tableName = tableName;
- }
-
public void setIndexColumns(IndexColumn[] indexColumns) {
this.indexColumns = indexColumns;
}
diff --git a/h2/src/main/org/h2/command/ddl/AlterTableAlterColumn.java b/h2/src/main/org/h2/command/ddl/AlterTableAlterColumn.java
index 90f91965cc..ebb8baa2ef 100644
--- a/h2/src/main/org/h2/command/ddl/AlterTableAlterColumn.java
+++ b/h2/src/main/org/h2/command/ddl/AlterTableAlterColumn.java
@@ -1,75 +1,102 @@
/*
- * Copyright 2004-2014 H2 Group. Multiple-Licensed under the MPL 2.0,
- * and the EPL 1.0 (http://h2database.com/html/license.html).
+ * Copyright 2004-2022 H2 Group. Multiple-Licensed under the MPL 2.0,
+ * and the EPL 1.0 (https://h2database.com/html/license.html).
* Initial Developer: H2 Group
*/
package org.h2.command.ddl;
import java.util.ArrayList;
import java.util.HashSet;
-
import org.h2.api.ErrorCode;
+import org.h2.command.CommandContainer;
import org.h2.command.CommandInterface;
import org.h2.command.Parser;
import org.h2.command.Prepared;
import org.h2.constraint.Constraint;
import org.h2.constraint.ConstraintReferential;
+import org.h2.constraint.ConstraintUnique;
+import org.h2.engine.Constants;
import org.h2.engine.Database;
import org.h2.engine.DbObject;
import org.h2.engine.Right;
-import org.h2.engine.Session;
+import org.h2.engine.SessionLocal;
import org.h2.expression.Expression;
import org.h2.expression.ExpressionVisitor;
import org.h2.index.Index;
import org.h2.index.IndexType;
import org.h2.message.DbException;
import org.h2.result.ResultInterface;
+import org.h2.result.SearchRow;
import org.h2.schema.Schema;
import org.h2.schema.SchemaObject;
import org.h2.schema.Sequence;
import org.h2.schema.TriggerObject;
import org.h2.table.Column;
import org.h2.table.Table;
+import org.h2.table.TableBase;
import org.h2.table.TableView;
-import org.h2.util.New;
+import org.h2.util.HasSQL;
+import org.h2.util.Utils;
/**
* This class represents the statements
* ALTER TABLE ADD,
* ALTER TABLE ADD IF NOT EXISTS,
* ALTER TABLE ALTER COLUMN,
- * ALTER TABLE ALTER COLUMN RESTART,
* ALTER TABLE ALTER COLUMN SELECTIVITY,
* ALTER TABLE ALTER COLUMN SET DEFAULT,
- * ALTER TABLE ALTER COLUMN SET NOT NULL,
+ * ALTER TABLE ALTER COLUMN DROP DEFAULT,
+ * ALTER TABLE ALTER COLUMN DROP EXPRESSION,
* ALTER TABLE ALTER COLUMN SET NULL,
+ * ALTER TABLE ALTER COLUMN DROP NULL,
+ * ALTER TABLE ALTER COLUMN SET VISIBLE,
+ * ALTER TABLE ALTER COLUMN SET INVISIBLE,
* ALTER TABLE DROP COLUMN
*/
-public class AlterTableAlterColumn extends SchemaCommand {
+public class AlterTableAlterColumn extends CommandWithColumns {
- private Table table;
+ private String tableName;
private Column oldColumn;
private Column newColumn;
private int type;
+ /**
+ * Default or on update expression.
+ */
private Expression defaultExpression;
private Expression newSelectivity;
+ private Expression usingExpression;
+ private boolean addFirst;
private String addBefore;
private String addAfter;
+ private boolean ifTableExists;
private boolean ifNotExists;
private ArrayList columnsToAdd;
+ private ArrayList columnsToRemove;
+ private boolean booleanFlag;
- public AlterTableAlterColumn(Session session, Schema schema) {
+ public AlterTableAlterColumn(SessionLocal session, Schema schema) {
super(session, schema);
}
- public void setTable(Table table) {
- this.table = table;
+ public void setIfTableExists(boolean b) {
+ ifTableExists = b;
+ }
+
+ public void setTableName(String tableName) {
+ this.tableName = tableName;
}
public void setOldColumn(Column oldColumn) {
this.oldColumn = oldColumn;
}
+ /**
+ * Add the column as the first column of the table.
+ */
+ public void setAddFirst() {
+ addFirst = true;
+ }
+
public void setAddBefore(String before) {
this.addBefore = before;
}
@@ -79,136 +106,219 @@ public void setAddAfter(String after) {
}
@Override
- public int update() {
- session.commit(true);
+ public long update() {
Database db = session.getDatabase();
- session.getUser().checkRight(table, Right.ALL);
+ Table table = getSchema().resolveTableOrView(session, tableName);
+ if (table == null) {
+ if (ifTableExists) {
+ return 0;
+ }
+ throw DbException.get(ErrorCode.TABLE_OR_VIEW_NOT_FOUND_1, tableName);
+ }
+ session.getUser().checkTableRight(table, Right.SCHEMA_OWNER);
table.checkSupportAlter();
- table.lock(session, true, true);
- Sequence sequence = oldColumn == null ? null : oldColumn.getSequence();
+ table.lock(session, Table.EXCLUSIVE_LOCK);
if (newColumn != null) {
- checkDefaultReferencesTable(newColumn.getDefaultExpression());
+ checkDefaultReferencesTable(table, newColumn.getDefaultExpression());
+ checkClustering(newColumn);
}
if (columnsToAdd != null) {
for (Column column : columnsToAdd) {
- checkDefaultReferencesTable(column.getDefaultExpression());
+ checkDefaultReferencesTable(table, column.getDefaultExpression());
+ checkClustering(column);
}
}
switch (type) {
case CommandInterface.ALTER_TABLE_ALTER_COLUMN_NOT_NULL: {
- if (!oldColumn.isNullable()) {
+ if (oldColumn == null || !oldColumn.isNullable()) {
// no change
break;
}
- checkNoNullValues();
+ checkNoNullValues(table);
oldColumn.setNullable(false);
db.updateMeta(session, table);
break;
}
- case CommandInterface.ALTER_TABLE_ALTER_COLUMN_NULL: {
- if (oldColumn.isNullable()) {
+ case CommandInterface.ALTER_TABLE_ALTER_COLUMN_DROP_NOT_NULL: {
+ if (oldColumn == null || oldColumn.isNullable()) {
// no change
break;
}
- checkNullable();
+ checkNullable(table);
oldColumn.setNullable(true);
db.updateMeta(session, table);
break;
}
- case CommandInterface.ALTER_TABLE_ALTER_COLUMN_DEFAULT: {
- checkDefaultReferencesTable(defaultExpression);
- oldColumn.setSequence(null);
- oldColumn.setDefaultExpression(session, defaultExpression);
- removeSequence(sequence);
+ case CommandInterface.ALTER_TABLE_ALTER_COLUMN_DEFAULT:
+ case CommandInterface.ALTER_TABLE_ALTER_COLUMN_DROP_EXPRESSION: {
+ if (oldColumn == null) {
+ break;
+ }
+ if (oldColumn.isIdentity()) {
+ break;
+ }
+ if (defaultExpression != null) {
+ if (oldColumn.isGenerated()) {
+ break;
+ }
+ checkDefaultReferencesTable(table, defaultExpression);
+ oldColumn.setDefaultExpression(session, defaultExpression);
+ } else {
+ if (type == CommandInterface.ALTER_TABLE_ALTER_COLUMN_DROP_EXPRESSION != oldColumn.isGenerated()) {
+ break;
+ }
+ oldColumn.setDefaultExpression(session, null);
+ }
+ db.updateMeta(session, table);
+ break;
+ }
+ case CommandInterface.ALTER_TABLE_ALTER_COLUMN_DROP_IDENTITY: {
+ if (oldColumn == null) {
+ break;
+ }
+ Sequence sequence = oldColumn.getSequence();
+ if (sequence == null) {
+ break;
+ }
+ oldColumn.setSequence(null, false);
+ removeSequence(table, sequence);
+ db.updateMeta(session, table);
+ break;
+ }
+ case CommandInterface.ALTER_TABLE_ALTER_COLUMN_ON_UPDATE: {
+ if (oldColumn == null) {
+ break;
+ }
+ if (defaultExpression != null) {
+ if (oldColumn.isIdentity() || oldColumn.isGenerated()) {
+ break;
+ }
+ checkDefaultReferencesTable(table, defaultExpression);
+ oldColumn.setOnUpdateExpression(session, defaultExpression);
+ } else {
+ oldColumn.setOnUpdateExpression(session, null);
+ }
db.updateMeta(session, table);
break;
}
case CommandInterface.ALTER_TABLE_ALTER_COLUMN_CHANGE_TYPE: {
+ if (oldColumn == null) {
+ break;
+ }
// if the change is only increasing the precision, then we don't
// need to copy the table because the length is only a constraint,
// and does not affect the storage structure.
- if (oldColumn.isWideningConversion(newColumn)) {
- convertAutoIncrementColumn(newColumn);
+ if (oldColumn.isWideningConversion(newColumn) && usingExpression == null) {
+ convertIdentityColumn(table, newColumn);
oldColumn.copy(newColumn);
db.updateMeta(session, table);
} else {
- oldColumn.setSequence(null);
+ oldColumn.setSequence(null, false);
oldColumn.setDefaultExpression(session, null);
- oldColumn.setConvertNullToDefault(false);
if (oldColumn.isNullable() && !newColumn.isNullable()) {
- checkNoNullValues();
+ checkNoNullValues(table);
} else if (!oldColumn.isNullable() && newColumn.isNullable()) {
- checkNullable();
+ checkNullable(table);
}
- convertAutoIncrementColumn(newColumn);
- copyData();
+ if (oldColumn.getVisible() ^ newColumn.getVisible()) {
+ oldColumn.setVisible(newColumn.getVisible());
+ }
+ convertIdentityColumn(table, newColumn);
+ copyData(table, null, true);
}
+ table.setModified();
break;
}
case CommandInterface.ALTER_TABLE_ADD_COLUMN: {
// ifNotExists only supported for single column add
- if (ifNotExists && columnsToAdd.size() == 1 &&
+ if (ifNotExists && columnsToAdd != null && columnsToAdd.size() == 1 &&
table.doesColumnExist(columnsToAdd.get(0).getName())) {
break;
}
- for (Column column : columnsToAdd) {
- if (column.isAutoIncrement()) {
- int objId = getObjectId();
- column.convertAutoIncrementToSequence(session, getSchema(), objId,
- table.isTemporary());
- }
+ ArrayList sequences = generateSequences(columnsToAdd, false);
+ if (columnsToAdd != null) {
+ changePrimaryKeysToNotNull(columnsToAdd);
}
- copyData();
+ copyData(table, sequences, true);
break;
}
case CommandInterface.ALTER_TABLE_DROP_COLUMN: {
- if (table.getColumns().length == 1) {
- throw DbException.get(ErrorCode.CANNOT_DROP_LAST_COLUMN,
- oldColumn.getSQL());
+ if (table.getColumns().length - columnsToRemove.size() < 1) {
+ throw DbException.get(ErrorCode.CANNOT_DROP_LAST_COLUMN, columnsToRemove.get(0).getTraceSQL());
}
- table.dropSingleColumnConstraintsAndIndexes(session, oldColumn);
- copyData();
+ table.dropMultipleColumnsConstraintsAndIndexes(session, columnsToRemove);
+ copyData(table, null, false);
break;
}
case CommandInterface.ALTER_TABLE_ALTER_COLUMN_SELECTIVITY: {
+ if (oldColumn == null) {
+ break;
+ }
int value = newSelectivity.optimize(session).getValue(session).getInt();
oldColumn.setSelectivity(value);
db.updateMeta(session, table);
break;
}
+ case CommandInterface.ALTER_TABLE_ALTER_COLUMN_VISIBILITY:
+ if (oldColumn == null) {
+ break;
+ }
+ if (oldColumn.getVisible() != booleanFlag) {
+ oldColumn.setVisible(booleanFlag);
+ table.setModified();
+ db.updateMeta(session, table);
+ }
+ break;
+ case CommandInterface.ALTER_TABLE_ALTER_COLUMN_DEFAULT_ON_NULL:
+ if (oldColumn == null) {
+ break;
+ }
+ if (oldColumn.isDefaultOnNull() != booleanFlag) {
+ oldColumn.setDefaultOnNull(booleanFlag);
+ table.setModified();
+ db.updateMeta(session, table);
+ }
+ break;
default:
- DbException.throwInternalError("type=" + type);
+ throw DbException.getInternalError("type=" + type);
}
return 0;
}
- private void checkDefaultReferencesTable(Expression defaultExpression) {
+ private static void checkDefaultReferencesTable(Table table, Expression defaultExpression) {
if (defaultExpression == null) {
return;
}
- HashSet dependencies = New.hashSet();
+ HashSet