diff --git a/.travis.yml b/.travis.yml new file mode 100644 index 00000000..51d30a6f --- /dev/null +++ b/.travis.yml @@ -0,0 +1,24 @@ +# Travis CI configuration +# see https://docs.travis-ci.com/user/languages/python + +language: python + +# unfortunately, Travis cannot be used to test with Python 2.4 and 2.5 +python: + - "2.6" + - "2.7" + +install: + - pip install . + - if [[ $TRAVIS_PYTHON_VERSION == 2.6 ]]; then pip install unittest2; fi + +script: python setup.py test + +addons: + postgresql: "9.1" + +services: + - postgresql + +before_script: + - psql -U postgres -c 'create database unittest' diff --git a/LICENSE.txt b/LICENSE.txt new file mode 100644 index 00000000..22c1be9a --- /dev/null +++ b/LICENSE.txt @@ -0,0 +1,31 @@ +Written by D'Arcy J.M. Cain (darcy@druid.net) + +Based heavily on code written by Pascal Andre (andre@chimay.via.ecp.fr) + +Copyright (c) 1995, Pascal Andre + +Further modifications copyright (c) 1997-2008 by D'Arcy J.M. Cain +(darcy@PyGreSQL.org) + +Further modifications copyright (c) 2009-2016 by the PyGreSQL team. + +PyGreSQL is released under the PostgreSQL License, a liberal Open Source +license, similar to the BSD or MIT licenses: + +Permission to use, copy, modify, and distribute this software and its +documentation for any purpose, without fee, and without a written agreement +is hereby granted, provided that the above copyright notice and this +paragraph and the following two paragraphs appear in all copies. In +this license the term "AUTHORS" refers to anyone who has contributed code +to PyGreSQL. + +IN NO EVENT SHALL THE AUTHORS BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, +SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, +ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF +AUTHORS HAVE BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +THE AUTHORS SPECIFICALLY DISCLAIM ANY WARRANTIES, INCLUDING, BUT NOT LIMITED +TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS, AND THE +AUTHORS HAVE NO OBLIGATIONS TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, +ENHANCEMENTS, OR MODIFICATIONS. diff --git a/MANIFEST.in b/MANIFEST.in new file mode 100644 index 00000000..ba95795b --- /dev/null +++ b/MANIFEST.in @@ -0,0 +1,26 @@ + +include pgmodule.c +include pgtypes.h +include pgfs.h + +include pg.py +include pgdb.py +include setup.py + +include setup.cfg + +include README.rst +include LICENSE.txt + +recursive-include tests *.py + +include docs/Makefile +include docs/make.bat +include docs/*.py +include docs/*.rst +recursive-include docs/community *.rst +recursive-include docs/contents *.rst +recursive-include docs/download *.rst +recursive-include docs/_static *.css_t *.ico *.png +recursive-include docs/_templates *.html +recursive-include docs/_build/html *.css *.gif *.html *.ico *.js *.png *.txt diff --git a/README.rst b/README.rst new file mode 100644 index 00000000..319c5d9b --- /dev/null +++ b/README.rst @@ -0,0 +1,27 @@ +PyGreSQL - Python interface for PostgreSQL +========================================== + +PyGreSQL is a Python module that interfaces to a PostgreSQL database. +It embeds the PostgreSQL query library to allow easy use of the powerful +PostgreSQL features from a Python script. + +PyGreSQL is developed and tested on a NetBSD system, but it should also +run on most other platforms where PostgreSQL and Python is running. +It is based on the PyGres95 code written by Pascal Andre. +D'Arcy (darcy@druid.net) renamed it to PyGreSQL starting with version 2.0 +and serves as the "BDFL" of PyGreSQL. + +Installation +------------ + +The simplest way to install PyGreSQL is to type:: + + $ pip install PyGreSQL + +For other ways of installing PyGreSQL and requirements, +see the documentation. + +Documentation +------------- + +The documentation is available at http://www.pygresql.org/. diff --git a/module/Setup.in.raw b/Setup.in.raw similarity index 100% rename from module/Setup.in.raw rename to Setup.in.raw diff --git a/buildhtml.py b/buildhtml.py deleted file mode 100755 index 110a2374..00000000 --- a/buildhtml.py +++ /dev/null @@ -1,226 +0,0 @@ -#! /usr/bin/python - -# Author: David Goodger -# Contact: goodger@users.sourceforge.net -# Revision: $Revision$ -# Date: $Date$ -# Copyright: This module has been placed in the public domain. - -""" -Generates .html from all the .txt files in a directory. - -Ordinary .txt files are understood to be standalone reStructuredText. -Files named ``pep-*.txt`` are interpreted as reStructuredText PEPs. -""" -# Once PySource is here, build .html from .py as well. - -__docformat__ = 'reStructuredText' - - -try: - import locale - locale.setlocale(locale.LC_ALL, '') -except: - pass - -import sys -import os -import os.path -import copy -import docutils -from docutils import ApplicationError -from docutils import core, frontend -from docutils.parsers import rst -from docutils.readers import standalone, pep -from docutils.writers import html4css1, pep_html - - -usage = '%prog [options] [ ...]' -description = ('Generates .html from all the reStructuredText .txt files ' - '(including PEPs) in each ' - '(default is the current directory).') - - -class SettingsSpec(docutils.SettingsSpec): - - """ - Runtime settings & command-line options for the front end. - """ - - # Can't be included in OptionParser below because we don't want to - # override the base class. - settings_spec = ( - 'Build-HTML Options', - None, - (('Recursively scan subdirectories for files to process. This is ' - 'the default.', - ['--recurse'], - {'action': 'store_true', 'default': 1, - 'validator': frontend.validate_boolean}), - ('Do not scan subdirectories for files to process.', - ['--local'], {'dest': 'recurse', 'action': 'store_false'}), - ('Do not process files in . This option may be used ' - 'more than once to specify multiple directories.', - ['--prune'], - {'metavar': '', 'action': 'append', - 'validator': frontend.validate_colon_separated_string_list}), - ('Work silently (no progress messages). Independent of "--quiet".', - ['--silent'], - {'action': 'store_true', 'validator': frontend.validate_boolean}),)) - - relative_path_settings = ('prune',) - config_section = 'buildhtml application' - config_section_dependencies = ('applications',) - - -class OptionParser(frontend.OptionParser): - - """ - Command-line option processing for the ``buildhtml.py`` front end. - """ - - def check_values(self, values, args): - frontend.OptionParser.check_values(self, values, args) - values._source = None - return values - - def check_args(self, args): - source = destination = None - if args: - self.values._directories = args - else: - self.values._directories = [os.getcwd()] - return source, destination - - -class Struct: - - """Stores data attributes for dotted-attribute access.""" - - def __init__(self, **keywordargs): - self.__dict__.update(keywordargs) - - -class Builder: - - def __init__(self): - self.publishers = { - '': Struct(components=(pep.Reader, rst.Parser, pep_html.Writer, - SettingsSpec)), - '.txt': Struct(components=(rst.Parser, standalone.Reader, - html4css1.Writer, SettingsSpec), - reader_name='standalone', - writer_name='html'), - 'PEPs': Struct(components=(rst.Parser, pep.Reader, - pep_html.Writer, SettingsSpec), - reader_name='pep', - writer_name='pep_html')} - """Publisher-specific settings. Key '' is for the front-end script - itself. ``self.publishers[''].components`` must contain a superset of - all components used by individual publishers.""" - - self.setup_publishers() - - def setup_publishers(self): - """ - Manage configurations for individual publishers. - - Each publisher (combination of parser, reader, and writer) may have - its own configuration defaults, which must be kept separate from those - of the other publishers. Setting defaults are combined with the - config file settings and command-line options by - `self.get_settings()`. - """ - for name, publisher in self.publishers.items(): - option_parser = OptionParser( - components=publisher.components, read_config_files=1, - usage=usage, description=description) - publisher.option_parser = option_parser - publisher.setting_defaults = option_parser.get_default_values() - frontend.make_paths_absolute(publisher.setting_defaults.__dict__, - option_parser.relative_path_settings) - publisher.config_settings = ( - option_parser.get_standard_config_settings()) - self.settings_spec = self.publishers[''].option_parser.parse_args( - values=frontend.Values()) # no defaults; just the cmdline opts - self.initial_settings = self.get_settings('') - - def get_settings(self, publisher_name, directory=None): - """ - Return a settings object, from multiple sources. - - Copy the setting defaults, overlay the startup config file settings, - then the local config file settings, then the command-line options. - Assumes the current directory has been set. - """ - publisher = self.publishers[publisher_name] - settings = frontend.Values(publisher.setting_defaults.__dict__) - settings.update(publisher.config_settings, publisher.option_parser) - if directory: - local_config = publisher.option_parser.get_config_file_settings( - os.path.join(directory, 'docutils.conf')) - frontend.make_paths_absolute( - local_config, publisher.option_parser.relative_path_settings, - directory) - settings.update(local_config, publisher.option_parser) - settings.update(self.settings_spec.__dict__, publisher.option_parser) - return settings - - def run(self, directory=None, recurse=1): - recurse = recurse and self.initial_settings.recurse - if directory: - self.directories = [directory] - elif self.settings_spec._directories: - self.directories = self.settings_spec._directories - else: - self.directories = [os.getcwd()] - for directory in self.directories: - os.path.walk(directory, self.visit, recurse) - - def visit(self, recurse, directory, names): - settings = self.get_settings('', directory) - if settings.prune and (os.path.abspath(directory) in settings.prune): - print >>sys.stderr, '/// ...Skipping directory (pruned):', directory - sys.stderr.flush() - names[:] = [] - return - if not self.initial_settings.silent: - print >>sys.stderr, '/// Processing directory:', directory - sys.stderr.flush() - prune = 0 - for name in names: - if name.endswith('.txt'): - prune = self.process_txt(directory, name) - if prune: - break - if not recurse: - del names[:] - - def process_txt(self, directory, name): - if name.startswith('pep-'): - publisher = 'PEPs' - else: - publisher = '.txt' - settings = self.get_settings(publisher, directory) - pub_struct = self.publishers[publisher] - if settings.prune and (directory in settings.prune): - return 1 - settings._source = os.path.normpath(os.path.join(directory, name)) - settings._destination = settings._source[:-4]+'.html' - if not self.initial_settings.silent: - print >>sys.stderr, ' ::: Processing:', name - sys.stderr.flush() - try: - core.publish_file(source_path=settings._source, - destination_path=settings._destination, - reader_name=pub_struct.reader_name, - parser_name='restructuredtext', - writer_name=pub_struct.writer_name, - settings=settings) - except ApplicationError, error: - print >>sys.stderr, (' Error (%s): %s' - % (error.__class__.__name__, error)) - - -if __name__ == "__main__": - Builder().run() diff --git a/docs/Makefile b/docs/Makefile index 3768edd4..ae3e5d2b 100644 --- a/docs/Makefile +++ b/docs/Makefile @@ -1,12 +1,17 @@ # Makefile for Sphinx documentation -# +# -*- Note: requires GNU Make -*- # You can set these variables from the command line. -SPHINXOPTS = -aE +SPHINXOPTS = SPHINXBUILD = sphinx-build PAPER = BUILDDIR = _build +# User-friendly check for sphinx-build +ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) +$(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/) +endif + # Internal variables. PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter @@ -14,7 +19,7 @@ ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . # the i18n builder cannot share the environment and doctrees with the others I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . -.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext +.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest coverage gettext help: @echo "Please use \`make ' where is one of" @@ -25,21 +30,26 @@ help: @echo " json to make JSON files" @echo " htmlhelp to make HTML files and a HTML help project" @echo " qthelp to make HTML files and a qthelp project" + @echo " applehelp to make an Apple Help Book" @echo " devhelp to make HTML files and a Devhelp project" @echo " epub to make an epub" @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" @echo " latexpdf to make LaTeX files and run them through pdflatex" + @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" @echo " text to make text files" @echo " man to make manual pages" @echo " texinfo to make Texinfo files" @echo " info to make Texinfo files and run them through makeinfo" @echo " gettext to make PO message catalogs" @echo " changes to make an overview of all changed/added/deprecated items" + @echo " xml to make Docutils-native XML files" + @echo " pseudoxml to make pseudoxml-XML files for display purposes" @echo " linkcheck to check all external links for integrity" @echo " doctest to run all doctests embedded in the documentation (if enabled)" + @echo " coverage to run coverage check of the documentation (if enabled)" clean: - -rm -rf $(BUILDDIR)/* + rm -rf $(BUILDDIR)/* html: $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html @@ -81,6 +91,14 @@ qthelp: @echo "To view the help file:" @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/PyGreSQL.qhc" +applehelp: + $(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp + @echo + @echo "Build finished. The help book is in $(BUILDDIR)/applehelp." + @echo "N.B. You won't be able to view it unless you put it in" \ + "~/Library/Documentation/Help or install it in your application" \ + "bundle." + devhelp: $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp @echo @@ -108,6 +126,12 @@ latexpdf: $(MAKE) -C $(BUILDDIR)/latex all-pdf @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." +latexpdfja: + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo "Running LaTeX files through platex and dvipdfmx..." + $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja + @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." + text: $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text @echo @@ -151,3 +175,18 @@ doctest: $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest @echo "Testing of doctests in the sources finished, look at the " \ "results in $(BUILDDIR)/doctest/output.txt." + +coverage: + $(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage + @echo "Testing of coverage in the sources finished, look at the " \ + "results in $(BUILDDIR)/coverage/python.txt." + +xml: + $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml + @echo + @echo "Build finished. The XML files are in $(BUILDDIR)/xml." + +pseudoxml: + $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml + @echo + @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." diff --git a/docs/_static/favicon.ico b/docs/_static/favicon.ico new file mode 100644 index 00000000..40ea652f Binary files /dev/null and b/docs/_static/favicon.ico differ diff --git a/docs/_static/pygresql.css_t b/docs/_static/pygresql.css_t new file mode 100644 index 00000000..a3bc4de2 --- /dev/null +++ b/docs/_static/pygresql.css_t @@ -0,0 +1,86 @@ +{% macro experimental(keyword, value) %} + {% if value %} + -moz-{{keyword}}: {{value}}; + -webkit-{{keyword}}: {{value}}; + -o-{{keyword}}: {{value}}; + -ms-{{keyword}}: {{value}}; + {{keyword}}: {{value}}; + {% endif %} +{% endmacro %} + +{% macro border_radius(value) -%} + {{experimental("border-radius", value)}} +{% endmacro %} + +{% macro box_shadow(value) -%} + {{experimental("box-shadow", value)}} +{% endmacro %} + +.pageheader.related { + text-align: left; + padding: 10px 15px; + border: 1px solid #eeeeee; + margin-bottom: 10px; + {{border_radius("1em 1em 1em 1em")}} + {% if theme_borderless_decor | tobool %} + border-top: 0; + border-bottom: 0; + {% endif %} +} + +.pageheader.related .logo { + font-size: 36px; + font-style: italic; + letter-spacing: 5px; + margin-right: 2em; +} + +.pageheader.related .logo { + font-size: 36px; + font-style: italic; + letter-spacing: 5px; + margin-right: 2em; +} + +.pageheader.related .logo a, .pageheader.related .logo a:hover { + background: transparent; + color: {{ theme_relbarlinkcolor }}; + border: none; + text-decoration: none; + text-shadow: none; + {{box_shadow("none")}} +} + +.pageheader.related ul { + float: right; + margin: 2px 1em; +} + +.pageheader.related li { + float: left; + margin: 0 0 0 10px; +} + +.pageheader.related li a { + padding: 8px 12px; +} + +.norelbar .subtitle { + font-size: 14px; + line-height: 18px; + font-weight: bold; + letter-spacing: 4px; + text-align: right; + padding: 0 1em; + margin-top: -9px; +} + +.relbar-top .related.norelbar { + height: 22px; + border-bottom: 14px solid #eeeeee; +} + +.relbar-bottom .related.norelbar { + height: 22px; + border-top: 14px solid #eeeeee; +} diff --git a/docs/_static/pygresql.png b/docs/_static/pygresql.png new file mode 100644 index 00000000..706e855f Binary files /dev/null and b/docs/_static/pygresql.png differ diff --git a/docs/_templates/layout.html b/docs/_templates/layout.html new file mode 100644 index 00000000..1cb2ddee --- /dev/null +++ b/docs/_templates/layout.html @@ -0,0 +1,58 @@ +{%- extends "cloud/layout.html" %} + +{% set css_files = css_files + ["_static/pygresql.css"] %} + +{# + This layout adds a page header above the standard layout. + It also removes the relbars from all pages that are not part + of the core documentation in the contents/ directory, + adapting the navigation bar (breadcrumb) appropriately. +#} + +{% set is_content = pagename.startswith(('contents/', 'genindex', 'modindex', 'py-', 'search')) %} +{% if is_content %} +{% set master_doc = 'contents/index' %} +{% set parents = parents[1:] %} +{% endif %} + +{% block header %} + + + +{% endblock %} + +{% block relbar1 -%} +{%- if is_content -%} + {{ super() }} +{% else %} +
+{%- endif -%} +{%- endblock %} + +{% block relbar2 -%} +{%- if is_content -%} + {{ super() }} +{%- else -%} +
+{%- endif -%} +{%- endblock %} + +{% block content -%} +{%- if is_content -%} +{{ super() }} +{%- else -%} +
{{ super() }}
+{%- endif -%} +{%- endblock %} diff --git a/docs/about.rst b/docs/about.rst new file mode 100644 index 00000000..3e61d030 --- /dev/null +++ b/docs/about.rst @@ -0,0 +1,4 @@ +About PyGreSQL +============== + +.. include:: about.txt \ No newline at end of file diff --git a/docs/introduction.rst b/docs/about.txt similarity index 69% rename from docs/introduction.rst rename to docs/about.txt index 5477f15f..03576b07 100644 --- a/docs/introduction.rst +++ b/docs/about.txt @@ -1,17 +1,15 @@ -Introduction -============ - **PyGreSQL** is an *open-source* `Python `_ module that interfaces to a `PostgreSQL `_ database. It embeds the PostgreSQL query library to allow easy use of the powerful PostgreSQL features from a Python script. -| This software is copyright © 1995, Pascal Andre. -| Further modifications are copyright © 1997-2008 by D'Arcy J.M. Cain. -| Further modifications are copyright © 2009-2012 by the PyGreSQL team + | This software is copyright © 1995, Pascal Andre. + | Further modifications are copyright © 1997-2008 by D'Arcy J.M. Cain. + | Further modifications are copyright © 2009-2016 by the PyGreSQL team. + | For licensing details, see the full :doc:`copyright`. **PostgreSQL** is a highly scalable, SQL compliant, open source -object-relational database management system. With more than 15 years +object-relational database management system. With more than 20 years of development history, it is quickly becoming the de facto database for enterprise level open source solutions. Best of all, PostgreSQL's source code is available under the most liberal @@ -32,10 +30,11 @@ even for commercial use. It embeds the PostgreSQL query library to allow easy use of the powerful PostgreSQL features from a Python script. -PyGreSQL is developed and tested on a NetBSD system, but it should also -run on most other platforms where PostgreSQL and Python is running. It is -based on the PyGres95 code written by Pascal Andre (andre@chimay.via.ecp.fr). +PyGreSQL is developed and tested on a NetBSD system, but it also runs on +most other platforms where PostgreSQL and Python is running. It is based +on the PyGres95 code written by Pascal Andre (andre@chimay.via.ecp.fr). D'Arcy (darcy@druid.net) renamed it to PyGreSQL starting with version 2.0 and serves as the "BDFL" of PyGreSQL. -The current version PyGreSQL 4.2 needs PostgreSQL 8.3 and Python 2.5 or above. +The current version PyGreSQL 4.2 needs PostgreSQL 8.3 or newer and Python 2.5 +to 2.7. If you are using Python 3.x, you will need PyGreSQL 5.0 or newer. diff --git a/docs/announce.txt b/docs/announce.rst similarity index 85% rename from docs/announce.txt rename to docs/announce.rst index af4e1d8f..7fece981 100644 --- a/docs/announce.txt +++ b/docs/announce.rst @@ -22,10 +22,12 @@ for general information. This version has been built and unit tested on: - NetBSD - FreeBSD - - openSUSE 12.2 + - openSUSE + - Ubuntu - Windows 7 with both MinGW and Visual Studio - - PostgreSQL 8.4, 9.0 and 9.2 32 and 64bit - - Python 2.5, 2.6 and 2.7 32 and 64bit + - PostgreSQL 8.4 and 9.3 64bit + - Python 2.4, 2.5, 2.6 and 2.7 32 and 64bit | D'Arcy J.M. Cain | darcy@PyGreSQL.org +| and the PyGreSQL team diff --git a/docs/changelog.txt b/docs/changelog.txt deleted file mode 100644 index dd1764ab..00000000 --- a/docs/changelog.txt +++ /dev/null @@ -1,336 +0,0 @@ -================== -PyGreSQL ChangeLog -================== - -Version 4.2 ------------ - -Version 4.1.1 (2013-01-08) --------------------------- -- Add WhenNotified class and method. Replaces need for third party pgnotify. -- Sharpen test for inserting current_timestamp. -- Add more quote tests. False and 0 should evaluate to NULL. -- More tests - Any number other than 0 is True. -- Do not use positional parameters internally. - This restores backward compatibility with version 4.0. -- Add methods for changing the decimal point. - -Version 4.1 (2013-01-01) ------------------------- -- Dropped support for Python below 2.5 and PostgreSQL below 8.3. -- Added support for Python up to 2.7 and PostgreSQL up to 9.2. -- Particularly, support PQescapeLiteral() and PQescapeIdentifier(). -- The query method of the classic API now supports positional parameters. - This an effective way to pass arbitrary or unknown data without worrying - about SQL injection or syntax errors (contribution by Patrick TJ McPhee). -- The classic API now supports a method namedresult() in addition to - getresult() and dictresult(), which returns the rows of the result - as named tuples if these are supported (Python 2.6 or higher). -- The classic API has got the new methods begin(), commit(), rollback(), - savepoint() and release() for handling transactions. -- Both classic and DBAPI 2 connections can now be used as context - managers for encapsulating transactions. -- The execute() and executemany() methods now return the cursor object, - so you can now write statements like "for row in cursor.execute(...)" - (as suggested by Adam Frederick). -- Binary objects are now automatically escaped and unescaped. -- Bug in money quoting fixed. Amounts of $0.00 handled correctly. -- Proper handling of date and time objects as input. -- Proper handling of floats with 'nan' or 'inf' values as input. -- Fixed the set_decimal() function. -- All DatabaseError instances now have a sqlstate attribute. -- The getnotify() method can now also return payload strings (#15). -- Better support for notice processing with the new methods - set_notice_receiver() and get_notice_receiver() - (as suggested by Michael Filonenko, see #12 and #37). -- Open transactions are rolled back when pgdb connections are closed - (as suggested by Peter Harris, see #46). -- Connections and cursors can now be used with the "with" statement - (as suggested by Peter Harris, see #46). -- New method use_regtypes() that can be called to let getattnames() - return regular type names instead of the simplified classic types (#44). - - -Version 4.0 (2009-01-01) ------------------------- -- Dropped support for Python below 2.3 and PostgreSQL below 7.4. -- Added support for Python up to 2.6 and PostgreSQL up to 8.3. -- Improved performance of fetchall() for large result sets - by speeding up the type casts (as suggested by Peter Schuller). -- Exposed exceptions as attributes of the connection object. -- Exposed connection as attribute of the cursor object. -- Cursors now support the iteration protocol. -- Added new method to get parameter settings. -- Added customizable row_factory as suggested by Simon Pamies. -- Separated between mandatory and additional type objects. -- Added keyword args to insert, update and delete methods. -- Added exception handling for direct copy. -- Start transactions only when necessary, not after every commit(). -- Release the GIL while making a connection - (as suggested by Peter Schuller). -- If available, use decimal.Decimal for numeric types. -- Allow DB wrapper to be used with DB-API 2 connections - (as suggested by Chris Hilton). -- Made private attributes of DB wrapper accessible. -- Dropped dependence on mx.DateTime module. -- Support for PQescapeStringConn() and PQescapeByteaConn(); - these are now also used by the internal _quote() functions. -- Added 'int8' to INTEGER types. New SMALLINT type. -- Added a way to find the number of rows affected by a query() - with the classic pg module by returning it as a string. - For single inserts, query() still returns the oid as an integer. - The pgdb module already provides the "rowcount" cursor attribute - for the same purpose. -- Improved getnotify() by calling PQconsumeInput() instead of - submitting an empty command. -- Removed compatibility code for old OID munging style. -- The insert() and update() methods now use the "returning" clause - if possible to get all changed values, and they also check in advance - whether a subsequent select is possible, so that ongoing transactions - won't break if there is no select privilege. -- Added "protocol_version" and "server_version" attributes. -- Revived the "user" attribute. -- The pg module now works correctly with composite primary keys; - these are represented as frozensets. -- Removed the undocumented and actually unnecessary "view" parameter - from the get() method. -- get() raises a nicer ProgrammingError instead of a KeyError - if no primary key was found. -- delete() now also works based on the primary key if no oid available - and returns whether the row existed or not. - - -Version 3.8.1 (2006-06-05) --------------------------- -- Use string methods instead of deprecated string functions. -- Only use SQL-standard way of escaping quotes. -- Added the functions escape_string() and escape/unescape_bytea() - (as suggested by Charlie Dyson and Kavous Bojnourdi a long time ago). -- Reverted code in clear() method that set date to current. -- Added code for backwards compatibility in OID munging code. -- Reorder attnames tests so that "interval" is checked for before "int." -- If caller supplies key dictionary, make sure that all has a namespace. - -Version 3.8 (2006-02-17) ------------------------- -- Installed new favicon.ico from Matthew Sporleder -- Replaced snprintf by PyOS_snprintf. -- Removed NO_SNPRINTF switch which is not needed any longer -- Clean up some variable names and namespace -- Add get_relations() method to get any type of relation -- Rewrite get_tables() to use get_relations() -- Use new method in get_attnames method to get attributes of views as well -- Add Binary type -- Number of rows is now -1 after executing no-result statements -- Fix some number handling -- Non-simple types do not raise an error any more -- Improvements to documentation framework -- Take into account that nowadays not every table must have an oid column -- Simplification and improvement of the inserttable() function -- Fix up unit tests -- The usual assortment of minor fixes and enhancements - -Version 3.7 (2005-09-07) ------------------------- -Improvement of pgdb module: - -- Use Python standard `datetime` if `mxDateTime` is not available - -Major improvements and clean-up in classic pg module: - -- All members of the underlying connection directly available in `DB` -- Fixes to quoting function -- Add checks for valid database connection to methods -- Improved namespace support, handle `search_path` correctly -- Removed old dust and unnessesary imports, added docstrings -- Internal sql statements as one-liners, smoothed out ugly code - -Version 3.6.2 (2005-02-23) --------------------------- -- Further fixes to namespace handling - -Version 3.6.1 (2005-01-11) --------------------------- -- Fixes to namespace handling - -Version 3.6 (2004-12-17) ------------------------- -- Better DB-API 2.0 compliance -- Exception hierarchy moved into C module and made available to both APIs -- Fix error in update method that caused false exceptions -- Moved to standard exception hierarchy in classic API -- Added new method to get transaction state -- Use proper Python constants where appropriate -- Use Python versions of strtol, etc. Allows Win32 build. -- Bug fixes and cleanups - -Version 3.5 (2004-08-29) ------------------------- -Fixes and enhancements: - -- Add interval to list of data types -- fix up method wrapping especially close() -- retry pkeys once if table missing in case it was just added -- wrap query method separately to handle debug better -- use isinstance instead of type -- fix free/PQfreemem issue - finally -- miscellaneous cleanups and formatting - -Version 3.4 (2004-06-02) ------------------------- -Some cleanups and fixes. -This is the first version where PyGreSQL is moved back out of the -PostgreSQL tree. A lot of the changes mentioned below were actually -made while in the PostgreSQL tree since their last release. - -- Allow for larger integer returns -- Return proper strings for true and false -- Cleanup convenience method creation -- Enhance debugging method -- Add reopen method -- Allow programs to preload field names for speedup -- Move OID handling so that it returns long instead of int -- Miscellaneous cleanups and formatting - -Version 3.3 (2001-12-03) ------------------------- -A few cleanups. Mostly there was some confusion about the latest version -and so I am bumping the number to keep it straight. - -- Added NUMERICOID to list of returned types. This fixes a bug when - returning aggregates in the latest version of PostgreSQL. - -Version 3.2 (2001-06-20) ------------------------- -Note that there are very few changes to PyGreSQL between 3.1 and 3.2. -The main reason for the release is the move into the PostgreSQL -development tree. Even the WIN32 changes are pretty minor. - -- Add Win32 support (gerhard@bigfoot.de) -- Fix some DB-API quoting problems (niall.smart@ebeon.com) -- Moved development into PostgreSQL development tree. - -Version 3.1 (2000-11-06) ------------------------- -- Fix some quoting functions. In particular handle NULLs better. -- Use a method to add primary key information rather than direct - manipulation of the class structures -- Break decimal out in `_quote` (in pg.py) and treat it as float -- Treat timestamp like date for quoting purposes -- Remove a redundant SELECT from the `get` method speeding it, - and `insert` (since it calls `get`) up a little. -- Add test for BOOL type in typecast method to `pgdbTypeCache` class - (tv@beamnet.de) -- Fix pgdb.py to send port as integer to lower level function - (dildog@l0pht.com) -- Change pg.py to speed up some operations -- Allow updates on tables with no primary keys - -Version 3.0 (2000-05-30) ------------------------- -- Remove strlen() call from pglarge_write() and get size from object - (Richard@Bouska.cz) -- Add a little more error checking to the quote function in the wrapper -- Add extra checking in `_quote` function -- Wrap query in pg.py for debugging -- Add DB-API 2.0 support to pgmodule.c (andre@via.ecp.fr) -- Add DB-API 2.0 wrapper pgdb.py (andre@via.ecp.fr) -- Correct keyword clash (temp) in tutorial -- Clean up layout of tutorial -- Return NULL values as None (rlawrence@lastfoot.com) - (WARNING: This will cause backwards compatibility issues) -- Change None to NULL in insert and update -- Change hash-bang lines to use /usr/bin/env -- Clearing date should be blank (NULL) not TODAY -- Quote backslashes in strings in `_quote` (brian@CSUA.Berkeley.EDU) -- Expanded and clarified build instructions (tbryan@starship.python.net) -- Make code thread safe (Jerome.Alet@unice.fr) -- Add README.distutils (mwa@gate.net & jeremy@cnri.reston.va.us) -- Many fixes and increased DB-API compliance by chifungfan@yahoo.com, - tony@printra.net, jeremy@alum.mit.edu and others to get the final - version ready to release. - -Version 2.4 (1999-06-15) ------------------------- -- Insert returns None if the user doesn't have select permissions - on the table. It can (and does) happen that one has insert but - not select permissions on a table. -- Added ntuples() method to query object (brit@druid.net) -- Corrected a bug related to getresult() and the money type -- Corrected a bug related to negative money amounts -- Allow update based on primary key if munged oid not available and - table has a primary key -- Add many __doc__ strings (andre@via.ecp.fr) -- Get method works with views if key specified - -Version 2.3 (1999-04-17) ------------------------- -- connect.host returns "localhost" when connected to Unix socket - (torppa@tuhnu.cutery.fi) -- Use `PyArg_ParseTupleAndKeywords` in connect() (torppa@tuhnu.cutery.fi) -- fixes and cleanups (torppa@tuhnu.cutery.fi) -- Fixed memory leak in dictresult() (terekhov@emc.com) -- Deprecated pgext.py - functionality now in pg.py -- More cleanups to the tutorial -- Added fileno() method - terekhov@emc.com (Mikhail Terekhov) -- added money type to quoting function -- Compiles cleanly with more warnings turned on -- Returns PostgreSQL error message on error -- Init accepts keywords (Jarkko Torppa) -- Convenience functions can be overridden (Jarkko Torppa) -- added close() method - -Version 2.2 (1998-12-21) ------------------------- -- Added user and password support thanks to Ng Pheng Siong (ngps@post1.com) -- Insert queries return the inserted oid -- Add new `pg` wrapper (C module renamed to _pg) -- Wrapped database connection in a class -- Cleaned up some of the tutorial. (More work needed.) -- Added `version` and `__version__`. - Thanks to thilo@eevolute.com for the suggestion. - -Version 2.1 (1998-03-07) ------------------------- -- return fields as proper Python objects for field type -- Cleaned up pgext.py -- Added dictresult method - -Version 2.0 (1997-12-23) -------------------------- -- Updated code for PostgreSQL 6.2.1 and Python 1.5 -- Reformatted code and converted to use full ANSI style prototypes -- Changed name to PyGreSQL (from PyGres95) -- Changed order of arguments to connect function -- Created new type `pgqueryobject` and moved certain methods to it -- Added a print function for pgqueryobject -- Various code changes - mostly stylistic - -Version 1.0b (1995-11-04) -------------------------- -- Keyword support for connect function moved from library file to C code - and taken away from library -- Rewrote documentation -- Bug fix in connect function -- Enhancements in large objects interface methods - -Version 1.0a (1995-10-30) -------------------------- -A limited release. - -- Module adapted to standard Python syntax -- Keyword support for connect function in library file -- Rewrote default parameters interface (internal use of strings) -- Fixed minor bugs in module interface -- Redefinition of error messages - -Version 0.9b (1995-10-10) -------------------------- -The first public release. - -- Large objects implementation -- Many bug fixes, enhancements, ... - -Version 0.1a (1995-10-07) -------------------------- -- Basic libpq functions (SQL access) diff --git a/docs/classic.rst b/docs/classic.rst deleted file mode 100644 index edbf58b4..00000000 --- a/docs/classic.rst +++ /dev/null @@ -1,1594 +0,0 @@ -The classic PyGreSQL interface -============================== - -Introduction ------------- - -The `pg` module handles three types of objects, - -- the `pgobject`, which handles the connection - and all the requests to the database, -- the `pglarge` object, which handles - all the accesses to PostgreSQL large objects, -- the `pgqueryobject` that handles query results - -and it provides a convenient wrapper class `DB` for the `pgobject`. - -If you want to see a simple example of the use of some of these functions, -see the examples page. - - -functions & constants ------------------------------- -The `pg` module defines a few functions that allow to connect -to a database and to define "default variables" that override -the environment variables used by PostgreSQL. - -These "default variables" were designed to allow you to handle general -connection parameters without heavy code in your programs. You can prompt the -user for a value, put it in the default variable, and forget it, without -having to modify your environment. The support for default variables can be -disabled by setting the -DNO_DEF_VAR option in the Python setup file. Methods -relative to this are specified by the tag [DV]. - -All variables are set to `None` at module initialization, specifying that -standard environment variables should be used. - -connect -------- -Opens a pg connection - -Syntax:: - - connect([dbname], [host], [port], [opt], [tty], [user], [passwd]) - -Parameters: - :dbname: name of connected database (string/None) - :host: name of the server host (string/None) - :port: port used by the database server (integer/-1) - :opt: connection options (string/None) - :tty: debug terminal (string/None) - :user: PostgreSQL user (string/None) - :passwd: password for user (string/None) - -Return type: - :pgobject: If successful, the `pgobject` handling the connection - -Exceptions raised: - :TypeError: bad argument type, or too many arguments - :SyntaxError: duplicate argument definition - :pg.InternalError: some error occurred during pg connection definition - - (plus all exceptions relative to object allocation) - -Description: - This function opens a connection to a specified database on a given - PostgreSQL server. You can use keywords here, as described in the - Python tutorial. The names of the keywords are the name of the - parameters given in the syntax line. For a precise description - of the parameters, please refer to the PostgreSQL user manual. - -Examples:: - - import pg - - con1 = pg.connect('testdb', 'myhost', 5432, None, None, 'bob', None) - con2 = pg.connect(dbname='testdb', host='localhost', user='bob') - -get_defhost/set_defhost ------------------------ -default server host [DV] - -Syntax:: - - get_defhost() - -Parameters: - None - -Return type: - :string, None: default host specification - -Exceptions raised: - :TypeError: too many arguments - -Description: - This method returns the current default host specification, - or `None` if the environment variables should be used. - Environment variables won't be looked up. - -Syntax:: - - set_defhost(host) - -Parameters: - :host: new default host (string/None) - -Return type: - :string, None: previous default host specification - -Exceptions raised: - :TypeError: bad argument type, or too many arguments - -Description: - This methods sets the default host value for new connections. - If `None` is supplied as parameter, environment variables will - be used in future connections. It returns the previous setting - for default host. - -get_defport/set_defport ------------------------ -default server port [DV] - -Syntax:: - - get_defport() - -Parameters: - None - -Return type: - :integer, None: default port specification - -Exceptions raised: - :TypeError: too many arguments - -Description: - This method returns the current default port specification, - or `None` if the environment variables should be used. - Environment variables won't be looked up. - -Syntax:: - - set_defport(port) - -Parameters: - :port: new default port (integer/-1) - -Return type: - :integer, None: previous default port specification - -Description: - This methods sets the default port value for new connections. If -1 is - supplied as parameter, environment variables will be used in future - connections. It returns the previous setting for default port. - -get_defopt/set_defopt ---------------------- -default connection options [DV] - -Syntax:: - - get_defopt() - -Parameters: - None - -Return type: - :string, None: default options specification - -Exceptions raised: - :TypeError: too many arguments - -Description: - This method returns the current default connection options specification, - or `None` if the environment variables should be used. Environment variables - won't be looked up. - -Syntax:: - - set_defopt(options) - -Parameters: - :options: new default connection options (string/None) - -Return type: - :string, None: previous default options specification - -Exceptions raised: - :TypeError: bad argument type, or too many arguments - -Description: - This methods sets the default connection options value for new connections. - If `None` is supplied as parameter, environment variables will be used in - future connections. It returns the previous setting for default options. - -get_deftty/set_deftty ---------------------- -default debug tty [DV] - -Syntax:: - - get_deftty() - -Parameters: - None - -Return type: - :string, None: default debug terminal specification - -Exceptions raised: - :TypeError: too many arguments - -Description: - This method returns the current default debug terminal specification, or - `None` if the environment variables should be used. Environment variables - won't be looked up. - -Syntax:: - - set_deftty(terminal) - -Parameters: - :terminal: new default debug terminal (string/None) - -Return type: - :string, None: previous default debug terminal specification - -Exceptions raised: - :TypeError: bad argument type, or too many arguments - -Description: - This methods sets the default debug terminal value for new connections. If - `None` is supplied as parameter, environment variables will be used in future - connections. It returns the previous setting for default terminal. - -get_defbase/set_defbase ------------------------ -default database name [DV] - -Syntax:: - - get_defbase() - -Parameters: - None - -Return type: - :string, None: default database name specification - -Exceptions raised: - :TypeError: too many arguments - -Description: - This method returns the current default database name specification, or - `None` if the environment variables should be used. Environment variables - won't be looked up. - -Syntax:: - - set_defbase(base) - -Parameters: - :base: new default base name (string/None) - -Return type: - :string, None: previous default database name specification - -Exceptions raised: - :TypeError: bad argument type, or too many arguments - -Description: - This method sets the default database name value for new connections. If - `None` is supplied as parameter, environment variables will be used in - future connections. It returns the previous setting for default host. - -escape_string -------------- -escape a string for use within SQL - -Syntax:: - - escape_string(string) - -Parameters: - :string: the string that is to be escaped - -Return type: - :str: the escaped string - -Exceptions raised: - :TypeError: bad argument type, or too many arguments - -Description: - This function escapes a string for use within an SQL command. - This is useful when inserting data values as literal constants - in SQL commands. Certain characters (such as quotes and backslashes) - must be escaped to prevent them from being interpreted specially - by the SQL parser. `escape_string` performs this operation. - Note that there is also a `pgobject` method with the same name - which takes connection properties into account. - -.. caution:: It is especially important to do proper escaping when - handling strings that were received from an untrustworthy source. - Otherwise there is a security risk: you are vulnerable to "SQL injection" - attacks wherein unwanted SQL commands are fed to your database. - -Example:: - - name = raw_input("Name? ") - phone = con.query("select phone from employees" - " where name='%s'" % escape_string(name)).getresult() - -escape_bytea ------------- -escape binary data for use within SQL as type `bytea` - -Syntax:: - - escape_bytea(datastring) - -Parameters: - :datastring: string containing the binary data that is to be escaped - -Return type: - :str: the escaped string - -Exceptions raised: - :TypeError: bad argument type, or too many arguments - -Description: - Escapes binary data for use within an SQL command with the type `bytea`. - As with `escape_string`, this is only used when inserting data directly - into an SQL command string. - Note that there is also a `pgobject` method with the same name - which takes connection properties into account. - -Example:: - - picture = file('garfield.gif', 'rb').read() - con.query("update pictures set img='%s' where name='Garfield'" - % escape_bytea(picture)) - -unescape_bytea --------------- -unescape `bytea` data that has been retrieved as text - -Syntax:: - - unescape_bytea(string) - -Parameters: - :datastring: the `bytea` data string that has been retrieved as text - -Return type: - :str: string containing the binary data - -Exceptions raised: - :TypeError: bad argument type, or too many arguments - -Description: - Converts an escaped string representation of binary data into binary - data - the reverse of `escape_bytea`. This is needed when retrieving - `bytea` data with the `getresult()` or `dictresult()` method. - -Example:: - - picture = unescape_bytea(con.query( - "select img from pictures where name='Garfield'").getresult[0][0]) - file('garfield.gif', 'wb').write(picture) - -set_decimal ------------ -set a decimal type to be used for numeric values - -Syntax:: - - set_decimal(cls) - -Parameters: - :cls: the Python class to be used for PostgreSQL numeric values - -Description: - This function can be used to specify the Python class that shall be - used by PyGreSQL to hold PostgreSQL numeric values. The default class - is decimal.Decimal if available, otherwise the float type is used. - -set_namedresult ---------------- -set a function that will convert to named tuples - -Syntax:: - - set_namedresult(func) - -Parameters: - :func: the function to be used to convert results to named tuples - -Description: - You can use this if you want to create different kinds of named tuples. - - -Module constants ----------------- -Some constants are defined in the module dictionary. -They are intended to be used as parameters for methods calls. -You should refer to the libpq description in the PostgreSQL user manual -for more information about them. These constants are: - -:version, __version__: constants that give the current version. -:INV_READ, INV_WRITE: large objects access modes, - used by `(pgobject.)locreate` and `(pglarge.)open` -:SEEK_SET, SEEK_CUR, SEEK_END: positional flags, - used by `(pglarge.)seek` - - -pgobject --------- -Connection object - -This object handles a connection to a PostgreSQL database. It embeds and -hides all the parameters that define this connection, thus just leaving really -significant parameters in function calls. - -.. caution:: Some methods give direct access to the connection socket. - *Do not use them unless you really know what you are doing.* - If you prefer disabling them, - set the -DNO_DIRECT option in the Python setup file. - - **These methods are specified by the tag [DA].** - -.. note:: Some other methods give access to large objects - (refer to PostgreSQL user manual for more information about these). - If you want to forbid access to these from the module, - set the -DNO_LARGE option in the Python setup file. - - **These methods are specified by the tag [LO].** - -query ------ -executes a SQL command string - -Syntax:: - - query(command, [args]) - -Parameters: - :command: SQL command (string) - :args: optional positional arguments - -Return type: - :pgqueryobject, None: result values - -Exceptions raised: - :TypeError: bad argument type, or too many arguments - :TypeError: invalid connection - :ValueError: empty SQL query or lost connection - :pg.ProgrammingError: error in query - :pg.InternalError: error during query processing - -Description: - This method simply sends a SQL query to the database. If the query is an - insert statement that inserted exactly one row into a table that has OIDs, the - return value is the OID of the newly inserted row. If the query is an update - or delete statement, or an insert statement that did not insert exactly one - row in a table with OIDs, then the numer of rows affected is returned as a - string. If it is a statement that returns rows as a result (usually a select - statement, but maybe also an "insert/update ... returning" statement), this - method returns a `pgqueryobject` that can be accessed via the `getresult()`, - `dictresult()` or `namedresult()` methods or simply printed. Otherwise, it - returns `None`. - - The query may optionally contain positional parameters of the form `$1`, - `$2`, etc instead of literal data, and the values supplied as a tuple. - The values are substituted by the database in such a way that they don't - need to be escaped, making this an effective way to pass arbitrary or - unknown data without worrying about SQL injection or syntax errors. - - When the database could not process the query, a `pg.ProgrammingError` or - a `pg.InternalError` is raised. You can check the "SQLSTATE" code of this - error by reading its `sqlstate` attribute. - -Example:: - - name = raw_input("Name? ") - phone = con.query("select phone from employees" - " where name=$1", (name, )).getresult() - -reset ------ -resets the connection - -Syntax:: - - reset() - -Parameters: - None - -Return type: - None - -Exceptions raised: - :TypeError: too many (any) arguments - :TypeError: invalid connection - -Description: - This method resets the current database connection. - -cancel ------- -abandon processing of current SQL command - -Syntax:: - - cancel() - -Parameters: - None - -Return type: - None - -Exceptions raised: - :TypeError: too many (any) arguments - :TypeError: invalid connection - -Description: - This method requests that the server abandon processing - of the current SQL command. - -close ------ -close the database connection - -Syntax:: - - close() - -Parameters: - None - -Return type: - None - -Exceptions raised: - :TypeError: too many (any) arguments - -Description: - This method closes the database connection. The connection will - be closed in any case when the connection is deleted but this - allows you to explicitly close it. It is mainly here to allow - the DB-SIG API wrapper to implement a close function. - -fileno ------- -returns the socket used to connect to the database - -Syntax:: - - fileno() - -Parameters: - None - -Exceptions raised: - :TypeError: too many (any) arguments - :TypeError: invalid connection - -Description: - This method returns the underlying socket id used to connect - to the database. This is useful for use in select calls, etc. - -getnotify ---------- -gets the last notify from the server - -Syntax:: - - getnotify() - -Parameters: - None - -Return type: - :tuple, None: last notify from server - -Exceptions raised: - :TypeError: too many parameters - :TypeError: invalid connection - -Description: - This methods try to get a notify from the server (from the SQL statement - NOTIFY). If the server returns no notify, the methods returns None. - Otherwise, it returns a tuple (triplet) `(relname, pid, extra)`, where - `relname` is the name of the notify, `pid` is the process id of the - connection that triggered the notify, and `extra` is a payload string - that has been sent with the notification. Remember to do a listen query - first, otherwise getnotify() will always return `None`. - -inserttable ------------ -insert a list into a table - -Syntax:: - - inserttable(table, values) - -Parameters: - :table: the table name (string) - :values: list of rows values (list) - -Return type: - None - -Exceptions raised: - :TypeError: invalid connection, bad argument type, or too many arguments - :MemoryError: insert buffer could not be allocated - :ValueError: unsupported values - -Description: - This method allow to *quickly* insert large blocks of data in a table: - It inserts the whole values list into the given table. Internally, it - uses the COPY command of the PostgreSQL database. The list is a list - of tuples/lists that define the values for each inserted row. The rows - values may contain string, integer, long or double (real) values. - -.. caution:: *Be very careful*: - This method doesn't typecheck the fields according to the table definition; - it just look whether or not it knows how to handle such types. - -set_notice_receiver -------------------- -set a custom notice receiver - -Syntax:: - - set_notice_receiver(proc) - -Parameters: - :proc: the custom notice receiver callback function - -Return type: - None - -Exceptions raised: - :TypeError: the specified notice receiver is not callable - -Description: - This method allows setting a custom notice receiver callback function. - When a notice or warning message is received from the server, - or generated internally by libpq, and the message level is below - the one set with `client_min_messages`, the specified notice receiver - function will be called. This function must take one parameter, - the `pgnotice` object, which provides the following read-only attributes: - - :pgcnx: the connection - :message: the full message with a trailing newline - :severity: the level of the message, e.g. 'NOTICE' or 'WARNING' - :primary: the primary human-readable error message - :detail: an optional secondary error message - :hint: an optional suggestion what to do about the problem - -get_notice_receiver -------------------- -get the current notice receiver - -Syntax:: - - get_notice_receiver() - -Parameters: - None - -Return type: - :callable, None: the current notice receiver callable - -Exceptions raised: - :TypeError: too many (any) arguments - -Description: - This method gets the custom notice receiver callback function that has - been set with `set_notice_receiver()`, or `None` if no custom notice - receiver has ever been set on the connection. - -putline -------- -writes a line to the server socket [DA] - -Syntax:: - - putline(line) - -Parameters: - :line: line to be written (string) - -Return type: - None - -Exceptions raised: - :TypeError: invalid connection, bad parameter type, or too many parameters - -Description: - This method allows to directly write a string to the server socket. - -getline -------- -gets a line from server socket [DA] - -Syntax:: - - getline() - -Parameters: - None - -Return type: - :string: the line read - -Exceptions raised: - :TypeError: invalid connection - :TypeError: too many parameters - :MemoryError: buffer overflow - -Description: - This method allows to directly read a string from the server socket. - -endcopy -------- -synchronizes client and server [DA] - -Syntax:: - - endcopy() - -Parameters: - None - -Return type: - None - -Exceptions raised: - :TypeError: invalid connection - :TypeError: too many parameters - -Description: - The use of direct access methods may desynchonize client and server. - This method ensure that client and server will be synchronized. - -locreate --------- -create a large object in the database [LO] - -Syntax:: - - locreate(mode) - -Parameters: - :mode: large object create mode - -Return type: - :pglarge: object handling the PostGreSQL large object - -Exceptions raised: - - :TypeError: invalid connection, bad parameter type, or too many parameters - :pg.OperationalError: creation error - -Description: - This method creates a large object in the database. The mode can be defined - by OR-ing the constants defined in the pg module (INV_READ, INV_WRITE and - INV_ARCHIVE). Please refer to PostgreSQL user manual for a description of - the mode values. - -getlo ------ -build a large object from given oid [LO] - -Syntax:: - - getlo(oid) - -Parameters: - :oid: OID of the existing large object (integer) - -Return type: - :pglarge: object handling the PostGreSQL large object - -Exceptions raised: - :TypeError: invalid connection, bad parameter type, or too many parameters - :ValueError: bad OID value (0 is invalid_oid) - -Description: - This method allows to reuse a formerly created large object through the - `pglarge` interface, providing the user have its OID. - -loimport --------- -import a file to a large object [LO] - -Syntax:: - - loimport(name) - -Parameters: - :name: the name of the file to be imported (string) - -Return type: - :pglarge: object handling the PostGreSQL large object - -Exceptions raised: - :TypeError: invalid connection, bad argument type, or too many arguments - :pg.OperationalError: error during file import - -Description: - This methods allows to create large objects in a very simple way. You just - give the name of a file containing the data to be use. - -Object attributes ------------------ -Every `pgobject` defines a set of read-only attributes that describe the -connection and its status. These attributes are: - - :host: the host name of the server (string) - :port: the port of the server (integer) - :db: the selected database (string) - :options: the connection options (string) - :tty: the connection debug terminal (string) - :user: user name on the database system (string) - :protocol_version: the frontend/backend protocol being used (integer) - :server_version: the backend version (integer, e.g. 80305 for 8.3.5) - :status: the status of the connection (integer: 1 - OK, 0 - bad) - :error: the last warning/error message from the server (string) - -DB --- -the DB wrapper class - -The `pgobject` methods are wrapped in the class `DB`. -The preferred way to use this module is as follows:: - - import pg - - db = pg.DB(...) # see below - - for r in db.query( # just for example - """SELECT foo,bar - FROM foo_bar_table - WHERE foo !~ bar""" - ).dictresult(): - - print '%(foo)s %(bar)s' % r - -This class can be subclassed as in this example:: - - import pg - - class DB_ride(pg.DB): - """This class encapsulates the database functions and the specific - methods for the ride database.""" - - def __init__(self): - """Opens a database connection to the rides database""" - - pg.DB.__init__(self, dbname = 'ride') - self.query("""SET DATESTYLE TO 'ISO'""") - - [Add or override methods here] - -The following describes the methods and variables of this class. - -Initialization --------------- -The DB class is initialized with the same arguments as the connect -function described in section 2. It also initializes a few -internal variables. The statement `db = DB()` will open the -local database with the name of the user just like connect() does. - -You can also initialize the DB class with an existing `_pg` or `pgdb` -connection. Pass this connection as a single unnamed parameter, or as a -single parameter named `db`. This allows you to use all of the methods -of the DB class with a DB-API 2 compliant connection. Note that the -`close()` and `reopen()` methods are inoperative in this case. - - - -pkey ----- -return the primary key of a table - -Syntax:: - - pkey(table) - -Parameters: - :table: name of table - -Return type: - :string: Name of the field which is the primary key of the table - -Description: - This method returns the primary key of a table. For composite primary - keys, the return value will be a frozenset. Note that this raises an - exception if the table does not have a primary key. - -get_databases -------------- -get list of databases in the system - -Syntax:: - - get_databases() - -Parameters: - None - -Return type: - :list: all databases in the system - -Description: - Although you can do this with a simple select, it is added here for - convenience. - -get_relations -------------- -get list of relations in connected database - -Syntax:: - - get_relations(kinds) - -Parameters: - :kinds: a string or sequence of type letters - -Description: - The type letters are `r` = ordinary table, `i` = index, `S` = sequence, - `v` = view, `c` = composite type, `s` = special, `t` = TOAST table. - If `kinds` is None or an empty string, all relations are returned (this is - also the default). Although you can do this with a simple select, it is - added here for convenience. - -get_tables ----------- -get list of tables in connected database - -Syntax:: - - get_tables() - -Parameters: - None - -Returns: - :list: all tables in connected database - -Description: - Although you can do this with a simple select, it is added here for - convenience. - -get_attnames ------------- -get the attribute names of a table - -Syntax:: - - get_attnames(table) - -Parameters: - :table: name of table - -Returns: - :dictionary: The keys are the attribute names, - the values are the type names of the attributes. - -Description: - Given the name of a table, digs out the set of attribute names. - -has_table_privilege -------------------- -check whether current user has specified table privilege - -Syntax:: - - has_table_privilege(table, privilege) - -Parameters: - :table: name of table - :privilege: privilege to be checked - default is 'select' - -Description: - Returns True if the current user has the specified privilege for the table. - -get ---- -get a row from a database table or view - -Syntax:: - - get(table, arg, [keyname]) - -Parameters: - :table: name of table or view - :arg: either a dictionary or the value to be looked up - :keyname: name of field to use as key (optional) - -Return type: - :dictionary: The keys are the attribute names, - the values are the row values. - -Description: - This method is the basic mechanism to get a single row. It assumes - that the key specifies a unique row. If `keyname` is not specified - then the primary key for the table is used. If `arg` is a dictionary - then the value for the key is taken from it and it is modified to - include the new values, replacing existing values where necessary. - For a composite key, `keyname` can also be a sequence of key names. - The OID is also put into the dictionary if the table has one, but in - order to allow the caller to work with multiple tables, it is munged - as `oid(schema.table)`. - -insert ------- -insert a row into a database table - -Syntax:: - - insert(table, [d,] [key = val, ...]) - -Parameters: - :table: name of table - :d: optional dictionary of values - -Return type: - :dictionary: The dictionary of values inserted - -Description: - This method inserts a row into a table. If the optional dictionary is - not supplied then the required values must be included as keyword/value - pairs. If a dictionary is supplied then any keywords provided will be - added to or replace the entry in the dictionary. - - The dictionary is then, if possible, reloaded with the values actually - inserted in order to pick up values modified by rules, triggers, etc. - - Note: The method currently doesn't support insert into views - although PostgreSQL does. - -update ------- -update a row in a database table - -Syntax:: - - update(table, [d,] [key = val, ...]) - -Parameters: - :table: name of table - :d: optional dictionary of values - -Return type: - :dictionary: the new row - -Description: - Similar to insert but updates an existing row. The update is based on the - OID value as munged by get or passed as keyword, or on the primary key of - the table. The dictionary is modified, if possible, to reflect any changes - caused by the update due to triggers, rules, default values, etc. - - Like insert, the dictionary is optional and updates will be performed - on the fields in the keywords. There must be an OID or primary key - either in the dictionary where the OID must be munged, or in the keywords - where it can be simply the string "oid". - -query ------ -executes a SQL command string - -Syntax:: - - query(command, [arg1, [arg2, ...]]) - -Parameters: - :command: SQL command (string) - :arg*: optional positional arguments - -Return type: - :pgqueryobject, None: result values - -Exceptions raised: - :TypeError: bad argument type, or too many arguments - :TypeError: invalid connection - :ValueError: empty SQL query or lost connection - :pg.ProgrammingError: error in query - :pg.InternalError: error during query processing - -Description: - Similar to the pgobject function with the same name, except that positional - arguments can be passed either as a single list or tuple, or as individual - positional arguments - -Example:: - - name = raw_input("Name? ") - phone = raw_input("Phone? " - rows = db.query("update employees set phone=$2" - " where name=$1", (name, phone)).getresult()[0][0] - # or - rows = db.query("update employees set phone=$2" - " where name=$1", name, phone).getresult()[0][0] - -clear ------ -clears row values in memory - -Syntax:: - - clear(table, [a]) - -Parameters: - :table: name of table - :a: optional dictionary of values - -Return type: - :dictionary: an empty row - -Description: - This method clears all the attributes to values determined by the types. - Numeric types are set to 0, Booleans are set to 'f', dates are set - to 'now()' and everything else is set to the empty string. - If the array argument is present, it is used as the array and any entries - matching attribute names are cleared with everything else left unchanged. - - If the dictionary is not supplied a new one is created. - -delete ------- -delete a row from a database table - -Syntax:: - - delete(table, [d,] [key = val, ...]) - -Parameters: - :table: name of table - :d: optional dictionary of values - -Returns: - None - -Description: - This method deletes the row from a table. It deletes based on the OID value - as munged by get or passed as keyword, or on the primary key of the table. - The return value is the number of deleted rows (i.e. 0 if the row did not - exist and 1 if the row was deleted). - -escape_string -------------- -escape a string for use within SQL - -Syntax:: - - escape_string(string) - -Parameters: - :string: the string that is to be escaped - -Return type: - :str: the escaped string - -Description: - Similar to the module function with the same name, but the - behavior of this method is adjusted depending on the connection properties - (such as character encoding). - -escape_bytea ------------- -escape binary data for use within SQL as type `bytea` - -Syntax:: - - escape_bytea(datastring) - -Parameters: - :datastring: string containing the binary data that is to be escaped - -Return type: - :str: the escaped string - -Description: - Similar to the module function with the same name, but the - behavior of this method is adjusted depending on the connection properties - (in particular, whether standard-conforming strings are enabled). - -unescape_bytea --------------- -unescape `bytea` data that has been retrieved as text - -Syntax:: - - unescape_bytea(string) - -Parameters: - :datastring: the `bytea` data string that has been retrieved as text - -Return type: - :str: string containing the binary data - -Description: - See the module function with the same name. - - -pgqueryobject methods ---------------------- - -getresult ---------- -get query values as list of tuples - -Syntax:: - - getresult() - -Parameters: - None - -Return type: - :list: result values as a list of tuples - -Exceptions raised: - :TypeError: too many (any) parameters - :MemoryError: internal memory error - -Description: - This method returns the list of the values returned by the query. - More information about this result may be accessed using listfields(), - fieldname() and fieldnum() methods. - -dictresult ----------- -get query values as list of dictionaries - -Syntax:: - - dictresult() - -Parameters: - None - -Return type: - :list: result values as a list of dictionaries - -Exceptions raised: - :TypeError: too many (any) parameters - :MemoryError: internal memory error - -Description: - This method returns the list of the values returned by the query - with each tuple returned as a dictionary with the field names - used as the dictionary index. - -namedresult ------------ -get query values as list of named tuples - -Syntax:: - - namedresult() - -Parameters: - None - -Return type: - :list: result values as a list of named tuples - -Exceptions raised: - :TypeError: too many (any) parameters - :TypeError: named tuples not supported - :MemoryError: internal memory error - -Description: - This method returns the list of the values returned by the query - with each row returned as a named tuple with proper field names. - -listfields ----------- -lists fields names of previous query result - -Syntax:: - - listfields() - -Parameters: - None - -Return type: - :list: field names - -Exceptions raised: - :TypeError: too many parameters - -Description: - This method returns the list of names of the fields defined for the - query result. The fields are in the same order as the result values. - -fieldname/fieldnum ------------------- -field name/number conversion - -Syntax:: - - fieldname(i) - -Parameters: - :i: field number (integer) - -Return type: - :string: field name - -Exceptions raised: - :TypeError: invalid connection, bad parameter type, or too many parameters - :ValueError: invalid field number - -Description: - This method allows to find a field name from its rank number. It can be - useful for displaying a result. The fields are in the same order as the - result values. - -Syntax:: - - fieldnum(name) - -Parameters: - :name: field name (string) - -Return type: - :integer: field number - -Exceptions raised: - :TypeError: invalid connection, bad parameter type, or too many parameters - :ValueError: unknown field name - -Description: - This method returns a field number from its name. It can be used to - build a function that converts result list strings to their correct - type, using a hardcoded table definition. The number returned is the - field rank in the result values list. - -ntuples -------- -return number of tuples in query object - -Syntax:: - - ntuples() - -Parameters: - None - -Return type: - :integer: number of tuples in `pgqueryobject` - -Exceptions raised: - :TypeError: Too many arguments. - -Description: - This method returns the number of tuples found in a query. - - -pglarge -------- -Large objects - -This object handles all the request concerning a PostgreSQL large object. It -embeds and hides all the "recurrent" variables (object OID and connection), -exactly in the same way `pgobjects` do, thus only keeping significant -parameters in function calls. It keeps a reference to the `pgobject` used for -its creation, sending requests though with its parameters. Any modification but -dereferencing the `pgobject` will thus affect the `pglarge` object. -Dereferencing the initial `pgobject` is not a problem since Python won't -deallocate it before the `pglarge` object dereference it. -All functions return a generic error message on call error, whatever the -exact error was. The `error` attribute of the object allows to get the exact -error message. - -See also the PostgreSQL programmer's guide for more information about the -large object interface. - -open ----- -opens a large object - -Syntax:: - - open(mode) - -Parameters: - :mode: open mode definition (integer) - -Return type: - None - -Exceptions raised: - :TypeError: invalid connection, bad parameter type, or too many parameters - :IOError: already opened object, or open error - -Description: - This method opens a large object for reading/writing, in the same way than - the Unix open() function. The mode value can be obtained by OR-ing the - constants defined in the pgmodule (INV_READ, INV_WRITE). - -close ------ -closes a large object - -Syntax:: - - close() - -Parameters: - None - -Return type: - None - -Exceptions raised: - :TypeError: invalid connection - :TypeError: too many parameters - :IOError: object is not opened, or close error - -Description: - This method closes a previously opened large object, in the same way than - the Unix close() function. - -read/write/tell/seek/unlink ---------------------------- -file like large object handling - -Syntax:: - - read(size) - -Parameters: - :size: maximal size of the buffer to be read - -Return type: - :sized string: the read buffer - -Exceptions raised: - :TypeError: invalid connection, invalid object, - bad parameter type, or too many parameters - :ValueError: if `size` is negative - :IOError: object is not opened, or read error - -Description: - This function allows to read data from a large object, starting at current - position. - -Syntax:: - - write(string) - -Parameters: - (sized) string - buffer to be written - -Return type: - None - -Exceptions raised: - :TypeError: invalid connection, bad parameter type, or too many parameters - :IOError: object is not opened, or write error - -Description: - This function allows to write data to a large object, starting at current - position. - -Syntax:: - - seek(offset, whence) - -Parameters: - :offset: position offset - :whence: positional parameter - -Return type: - :integer: new position in object - -Exceptions raised: - :TypeError: invalid connection or invalid object, - bad parameter type, or too many parameters - :IOError: object is not opened, or seek error - -Description: - This method allows to move the position cursor in the large object. The - valid values for the whence parameter are defined as constants in the - `pg` module (`SEEK_SET`, `SEEK_CUR`, `SEEK_END`). - -Syntax:: - - tell() - -Parameters: - None - -Return type: - :integer: current position in large object - -Exceptions raised: - :TypeError: invalid connection or invalid object - :TypeError: too many parameters - :IOError: object is not opened, or seek error - -Description: - This method allows to get the current position in the large object. - -Syntax:: - - unlink() - -Parameter: - None - -Return type: - None - -Exceptions raised: - :TypeError: invalid connection or invalid object - :TypeError: too many parameters - :IOError: object is not closed, or unlink error - -Description: - This methods unlinks (deletes) the PostgreSQL large object. - -size ----- -gives the large object size - -Syntax:: - - size() - -Parameters: - None - -Return type: - :integer: the large object size - -Exceptions raised: - :TypeError: invalid connection or invalid object - :TypeError: too many parameters - :IOError: object is not opened, or seek/tell error - -Description: - This (composite) method allows to get the size of a large object. It was - implemented because this function is very useful for a web interfaced - database. Currently, the large object needs to be opened first. - -export ------- -saves a large object to a file - -Syntax:: - - export(name) - -Parameters: - :name: file to be created - -Return type: - None - -Exceptions raised: - :TypeError: invalid connection or invalid object, - bad parameter type, or too many parameters - :IOError: object is not closed, or export error - -Description: - This methods allows to dump the content of a large object in a very simple - way. The exported file is created on the host of the program, not the - server host. - -Object attributes ------------------ -`pglarge` objects define a read-only set of attributes that allow to get -some information about it. These attributes are: - - :oid: the OID associated with the object - :pgcnx: the `pgobject` associated with the object - :error: the last warning/error message of the connection - -.. caution:: *Be careful*: - In multithreaded environments, `error` may be modified by another thread - using the same pgobject. Remember these object are shared, not duplicated. - You should provide some locking to be able if you want to check this. - The `oid` attribute is very interesting because it allow you reuse the OID - later, creating the `pglarge` object with a `pgobject` getlo() method call. diff --git a/docs/community/bugtracker.rst b/docs/community/bugtracker.rst new file mode 100644 index 00000000..2708eab4 --- /dev/null +++ b/docs/community/bugtracker.rst @@ -0,0 +1,15 @@ +Bug Tracker +----------- + +We are using `Trac `_ as an issue tracker. + +Track tickets are usually entered after discussion on the mailing list, +but you may also request an account for the issue tracker and add or +process tickets if you want to get more involved into the development +of the project. You can use the following links to get an overview: + +* `PyGreSQL Issues Tracker `_ +* `Timeline with all changes `_ +* `Roadmap of the project `_ +* `Lists of active tickets `_ +* `PyGreSQL Trac browser `_ \ No newline at end of file diff --git a/docs/community/homes.rst b/docs/community/homes.rst new file mode 100644 index 00000000..abf3bae5 --- /dev/null +++ b/docs/community/homes.rst @@ -0,0 +1,11 @@ +Project home sites +------------------ + +**Python**: + http://www.python.org + +**PostgreSQL**: + http://www.postgresql.org + +**PyGreSQL**: + http://www.pygresql.org \ No newline at end of file diff --git a/docs/community/index.rst b/docs/community/index.rst new file mode 100644 index 00000000..83160268 --- /dev/null +++ b/docs/community/index.rst @@ -0,0 +1,17 @@ +PyGreSQL Development and Support +================================ + +PyGreSQL is an open-source project created by a group of volunteers. +The project and the development infrastructure are currently maintained +by D'Arcy J.M. Cain. We would be glad to welcome more contributors +so that PyGreSQL can be further developed, modernized and improved. + +.. include:: mailinglist.rst + +.. include:: source.rst + +.. include:: bugtracker.rst + +.. include:: support.rst + +.. include:: homes.rst diff --git a/docs/community/mailinglist.rst b/docs/community/mailinglist.rst new file mode 100644 index 00000000..c0269512 --- /dev/null +++ b/docs/community/mailinglist.rst @@ -0,0 +1,11 @@ +Mailing list +------------ + +You can join +`the mailing list `_ +to discuss future development of the PyGreSQL interface or if you have +questions or problems with PyGreSQL that are not covered in the +:doc:`documentation <../contents/index>`. + +This is usually a low volume list except when there are new features +being added. diff --git a/docs/community/source.rst b/docs/community/source.rst new file mode 100644 index 00000000..e8bfc035 --- /dev/null +++ b/docs/community/source.rst @@ -0,0 +1,12 @@ +Access to the source repository +------------------------------- + +We are using a central `Subversion `_ +source code repository for PyGreSQL. + +The current trunk of the repository can be checked out with the command:: + + svn co svn://svn.pygresql.org/pygresql/trunk + +You can also browse through the repository using the +`PyGreSQL Trac browser `_. diff --git a/docs/community/support.rst b/docs/community/support.rst new file mode 100644 index 00000000..ac4fa6e8 --- /dev/null +++ b/docs/community/support.rst @@ -0,0 +1,18 @@ +Support +------- + +**Python**: + see http://www.python.org/community/ + +**PostgreSQL**: + see http://www.postgresql.org/support/ + +**PyGreSQL**: + Join `the PyGreSQL mailing list `_ + if you need help regarding PyGreSQL. + + Please also send context diffs there, if you would like to proposes changes. + + Please note that messages to individual developers will generally not be + answered directly. All questions, comments and code changes must be + submitted to the mailing list for peer review and archiving purposes. \ No newline at end of file diff --git a/docs/conf.py b/docs/conf.py index aa204757..babb1b21 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -1,9 +1,9 @@ # -*- coding: utf-8 -*- # -# PyGreSQL documentation build configuration file, created by -# sphinx-quickstart on Thu Nov 1 07:47:06 2012. +# PyGreSQL documentation build configuration file. # -# This file is execfile()d with the current directory set to its containing dir. +# This file is execfile()d with the current directory set to its +# containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. @@ -11,29 +11,45 @@ # All configuration values have a default; values that are commented out # serve to show the default. -import sys, os - -# import Cloud -import cloud_sptheme as csp +import sys +import os +import shlex +import shutil + +# Import Cloud theme (this will also automatically add the theme directory). +# Note: We add a navigation bar to the cloud them using a custom layout. +if os.environ.get('READTHEDOCS', None) == 'True': + # We cannot use our custom layout here, since RTD overrides layout.html. + use_cloud_theme = False +else: + try: + import cloud_sptheme + use_cloud_theme = True + except ImportError: + use_cloud_theme = False + +shutil.copyfile('start.txt' if use_cloud_theme else 'toc.txt', 'index.rst') # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) -# -- General configuration ----------------------------------------------------- +# -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' -# Add any Sphinx extension module names here, as strings. They can be extensions -# coming with Sphinx (named 'sphinx.ext.*') or your custom ones. +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc'] # Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] +templates_path = ['_templates'] if use_cloud_theme else [] -# The suffix of source filenames. +# The suffix(es) of source filenames. +# You can specify multiple suffix as a list of string: +# source_suffix = ['.rst', '.md'] source_suffix = '.rst' # The encoding of source files. @@ -44,20 +60,23 @@ # General information about the project. project = u'PyGreSQL' -copyright = u'2012, The PyGreSQL team' +author = u'The PyGreSQL team' +copyright = u'2016, ' + author # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. -version = '4.2' +version = u'4.2' # The full version, including alpha/beta/rc tags. -release = '4.2' +release = u'4.2' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. -#language = None +# This is also used if you do content translation via gettext catalogs. +# Usually you set "language" from the command line for these cases. +language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: @@ -69,7 +88,15 @@ # directories to ignore when looking for source files. exclude_patterns = ['_build'] -# The reST default role (used for this markup: `text`) to use for all documents. +# List of pages which are included in other pages and therefore should +# not appear in the toctree. +exclude_patterns += [ + 'download/download.rst', 'download/files.rst', + 'community/mailinglist.rst', 'community/source.rst', + 'community/bugtracker.rst', 'community/support.rst', + 'community/homes.rst'] + +# The reST default role (used for this markup: `text`) for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. @@ -89,42 +116,61 @@ # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] +# If true, keep warnings as "system message" paragraphs in the built documents. +#keep_warnings = False + +# If true, `todo` and `todoList` produce output, else they produce nothing. +todo_include_todos = False -# -- Options for HTML output --------------------------------------------------- + +# -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. -html_theme = 'cloud' +html_theme = 'cloud' if use_cloud_theme else 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. -html_theme_options = { "defaultcollapsed": True, } +if use_cloud_theme: + html_theme_options = { + 'roottarget': 'contents/index', + 'defaultcollapsed': True, + 'shaded_decor': True} +else: + html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. -html_theme_path = [csp.get_theme_dir()] +html_theme_path = ['_themes'] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". -#html_title = None +html_title = 'PyGreSQL %s' % version +if use_cloud_theme: + html_title += ' documentation' # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. -#html_logo = None +html_logo = '_static/pygresql.png' # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. -#html_favicon = None +html_favicon = '_static/favicon.ico' # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] +# Add any extra paths that contain custom files (such as robots.txt or +# .htaccess) here, relative to this directory. These files are copied +# directly to the root of the documentation. +#html_extra_path = [] + # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' @@ -166,11 +212,25 @@ # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None +# Language to be used for generating the HTML full-text search index. +# Sphinx supports the following languages: +# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' +# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' +#html_search_language = 'en' + +# A dictionary with options for the search language support, empty by default. +# Now only 'ja' uses this config value +#html_search_options = {'type': 'default'} + +# The name of a javascript file (relative to the configuration directory) that +# implements a search results scorer. If empty, the default will be used. +#html_search_scorer = 'scorer.js' + # Output file base name for HTML help builder. htmlhelp_basename = 'PyGreSQLdoc' -# -- Options for LaTeX output -------------------------------------------------- +# -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). @@ -181,13 +241,17 @@ # Additional stuff for the LaTeX preamble. #'preamble': '', + +# Latex figure (float) alignment +#'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, author, documentclass [howto/manual]). +# (source start file, target name, title, +# author, documentclass [howto, manual, or own class]). latex_documents = [ - ('index', 'PyGreSQL.tex', u'PyGreSQL Documentation', - u'The PyGreSQL team', 'manual'), + (master_doc, 'PyGreSQL.tex', u'PyGreSQL Documentation', + author, 'manual'), ] # The name of an image file (relative to this directory) to place at the top of @@ -211,28 +275,27 @@ #latex_domain_indices = True -# -- Options for manual page output -------------------------------------------- +# -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ - ('index', 'pygresql', u'PyGreSQL Documentation', - [u'The PyGreSQL team'], 1) + (master_doc, 'pygresql', u'PyGreSQL Documentation', [author], 1) ] # If true, show URL addresses after external links. #man_show_urls = False -# -- Options for Texinfo output ------------------------------------------------ +# -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ - ('index', 'PyGreSQL', u'PyGreSQL Documentation', - u'The PyGreSQL team', 'PyGreSQL', 'One line description of project.', - 'Miscellaneous'), + (master_doc, 'PyGreSQL', u'PyGreSQL Documentation', + author, 'PyGreSQL', 'One line description of project.', + 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. @@ -243,3 +306,6 @@ # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' + +# If true, do not generate a @detailmenu in the "Top" node's menu. +#texinfo_no_detailmenu = False diff --git a/docs/changelog.rst b/docs/contents/changelog.rst similarity index 87% rename from docs/changelog.rst rename to docs/contents/changelog.rst index 3733bfb0..c2aa6f3d 100644 --- a/docs/changelog.rst +++ b/docs/contents/changelog.rst @@ -1,12 +1,42 @@ ChangeLog ========= -Version 4.2 ------------ +Version 4.2.2 (2016-03-18) +-------------------------- +- The get_relations() and get_tables() methods now also return system views + and tables if you set the optional "system" parameter to True. +- Fixed a regression when using temporary tables with DB wrapper methods + (thanks to Patrick TJ McPhee for reporting). + +Version 4.2.1 (2016-02-18) +-------------------------- +- Fixed a small bug when setting the notice receiver. +- Some more minor fixes and re-packaging with proper permissions. + +Version 4.2 (2016-01-21) +------------------------ +- The supported Python versions are 2.4 to 2.7. +- PostgreSQL is supported in all versions from 8.3 to 9.5. +- Set a better default for the user option "escaping-funcs". +- Force build to compile with no errors. +- New methods get_parameters() and set_parameters() in the classic interface + which can be used to get or set run-time parameters. +- New method truncate() in the classic interface that can be used to quickly + empty a table or a set of tables. +- Fix decimal point handling. +- Add option to return boolean values as bool objects. +- Add option to return money values as string. +- get_tables() does not list information schema tables any more. +- Fix notification handler (Thanks Patrick TJ McPhee). +- Fix a small issue with large objects. +- Minor improvements in the NotificationHandler. +- Converted documentation to Sphinx and added many missing parts. +- The tutorial files have become a chapter in the documentation. +- Greatly improved unit testing, tests run with Python 2.4 to 2.7 again. Version 4.1.1 (2013-01-08) -------------------------- -- Add WhenNotified class and method. Replaces need for third party pgnotify. +- Add NotificationHandler class and method. Replaces need for pgnotify. - Sharpen test for inserting current_timestamp. - Add more quote tests. False and 0 should evaluate to NULL. - More tests - Any number other than 0 is True. @@ -17,7 +47,7 @@ Version 4.1.1 (2013-01-08) Version 4.1 (2013-01-01) ------------------------ - Dropped support for Python below 2.5 and PostgreSQL below 8.3. -- Support the new PostgreSQL versions 9.0, 9.1 and 9.2. +- Added support for Python up to 2.7 and PostgreSQL up to 9.2. - Particularly, support PQescapeLiteral() and PQescapeIdentifier(). - The query method of the classic API now supports positional parameters. This an effective way to pass arbitrary or unknown data without worrying @@ -25,6 +55,10 @@ Version 4.1 (2013-01-01) - The classic API now supports a method namedresult() in addition to getresult() and dictresult(), which returns the rows of the result as named tuples if these are supported (Python 2.6 or higher). +- The classic API has got the new methods begin(), commit(), rollback(), + savepoint() and release() for handling transactions. +- Both classic and DBAPI 2 connections can now be used as context + managers for encapsulating transactions. - The execute() and executemany() methods now return the cursor object, so you can now write statements like "for row in cursor.execute(...)" (as suggested by Adam Frederick). @@ -45,7 +79,6 @@ Version 4.1 (2013-01-01) - New method use_regtypes() that can be called to let getattnames() return regular type names instead of the simplified classic types (#44). - Version 4.0 (2009-01-01) ------------------------ - Dropped support for Python below 2.3 and PostgreSQL below 7.4. @@ -93,7 +126,6 @@ Version 4.0 (2009-01-01) - delete() now also works based on the primary key if no oid available and returns whether the row existed or not. - Version 3.8.1 (2006-06-05) -------------------------- - Use string methods instead of deprecated string functions. diff --git a/docs/examples.rst b/docs/contents/examples.rst similarity index 81% rename from docs/examples.rst rename to docs/contents/examples.rst index a7227578..2b279dcf 100644 --- a/docs/examples.rst +++ b/docs/contents/examples.rst @@ -5,6 +5,9 @@ I am starting to collect examples of applications that use PyGreSQL. So far I only have a few but if you have an example for me, you can either send me the files or the URL for me to point to. +The *tutorial* directory that is part of the PyGreSQL distribution +shows some examples of using PostgreSQL with PyGreSQL. + Here is a `list of motorcycle rides in Ontario `_ that uses a PostgreSQL database to store the rides. diff --git a/docs/contents/general.rst b/docs/contents/general.rst new file mode 100644 index 00000000..aced233d --- /dev/null +++ b/docs/contents/general.rst @@ -0,0 +1,35 @@ +General PyGreSQL Programming Information +---------------------------------------- + +PyGreSQL consists of two parts: the "classic" PyGreSQL interface +provided by the :mod:`pg` module and the newer +DB-API 2.0 compliant interface provided by the :mod:`pgdb` module. + +If you use only the standard features of the DB-API 2.0 interface, +it will be easier to switch from PostgreSQL to another database +for which a DB-API 2.0 compliant interface exists. + +The "classic" interface may be easier to use for beginners, and it +provides some higher-level and PostgreSQL specific convenience methods. + +.. seealso:: + + **DB-API 2.0** (Python Database API Specification v2.0) + is a specification for connecting to databases (not only PostGreSQL) + from Python that has been developed by the Python DB-SIG in 1999. + The authoritative programming information for the DB-API is :pep:`0249`. + +Both Python modules utilize the same lower level C extension module that +serves as a wrapper for the C API to PostgreSQL that is available in form +of the so-called "libpq" library. + +This means you must have the libpq library installed as a shared library +on your client computer, in a version that is supported by PyGreSQL. +Depending on the client platform, you may have to set environment variables +like `PATH` or `LD_LIBRARY_PATH` so that PyGreSQL can find the library. + +.. warning:: + + Note that PyGreSQL is not thread-safe on the connection level. Therefore + we recommend using `DBUtils `_ + for multi-threaded environments, which supports both PyGreSQL interfaces. diff --git a/docs/contents/index.rst b/docs/contents/index.rst new file mode 100644 index 00000000..f3e9caa3 --- /dev/null +++ b/docs/contents/index.rst @@ -0,0 +1,24 @@ +The PyGreSQL documentation +========================== + +Contents +-------- + +.. toctree:: + :maxdepth: 1 + + Installing PyGreSQL + What's New and History of Changes + General PyGreSQL Programming Information + First Steps with PyGreSQL + The Classic PyGreSQL Interface + The DB-API Compliant Interface + A PostgreSQL Primer + Examples for using PyGreSQL + +Indices and tables +------------------ + +* :ref:`genindex` +* :ref:`modindex` +* :ref:`search` diff --git a/docs/install.rst b/docs/contents/install.rst similarity index 82% rename from docs/install.rst rename to docs/contents/install.rst index cc7fb0fd..48bcbabc 100644 --- a/docs/install.rst +++ b/docs/contents/install.rst @@ -5,14 +5,14 @@ General ------- You must first have installed Python and PostgreSQL on your system. -If you want to access remote database only, you don't need to install +If you want to access remote databases only, you don't need to install the full PostgreSQL server, but only the C interface (libpq). If you are on Windows, make sure that the directory with libpq.dll is in your ``PATH`` environment variable. The current version of PyGreSQL has been tested with Python 2.7 and -PostGreSQL 9.2. Older version should work as well, but you will need -at least Python 2.5 and PostgreSQL 8.3. +PostGreSQL 9.3. Older version should work as well, but you will need +at least Python 2.4 and PostgreSQL 8.3. PyGreSQL will be installed as three modules, a dynamic module called _pg.pyd, and two pure Python wrapper modules called pg.py and pgdb.py. @@ -20,10 +20,31 @@ All three files will be installed directly into the Python site-packages directory. To uninstall PyGreSQL, simply remove these three files again. +Installing with Pip +------------------- + +This is the most easy way to install PyGreSQL if you have "pip" installed +on your computer. Just run the following command in your terminal:: + + pip install PyGreSQL + +This will automatically try to find and download a distribution on the +`Python Package Index `_ that matches your operating +system and Python version and install it on your computer. + + Installing from a Binary Distribution ------------------------------------- -This is the easiest way to install PyGreSQL. +If you don't want to use "pip", or "pip" doesn't find an appropriate +distribution for your computer, you can also try to manually download +and install a distribution. + +When you download the source distribution, you will need to compile the +C extensions, for which you need a C compiler installed on your computer. +If you don't want to install a C compiler or avoid possible problems +with the compilation, you can search for a pre-compiled binary distribution +of PyGreSQL on the Python Package Index or the PyGreSQL homepage. You can currently download PyGreSQL as Linux RPM, NetBSD package and Windows installer. Make sure the required Python version of the binary package matches @@ -49,7 +70,7 @@ tool. This is usually also part of the "devel" package on Unix, and will be installed as part of the database server feature on Windows systems. Building and installing with Distutils --------------------------------------- +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ You can build and install PyGreSQL using `Distutils `_. @@ -61,27 +82,17 @@ Type the following commands to build and install PyGreSQL:: python setup.py build python setup.py install -If you are using `MinGW `_ to build PyGreSQL under -Microsoft Windows, please note that Python newer version 2.3 is using msvcr71 -instead of msvcrt as its common runtime library. You can allow for that by -editing the file ``%MinGWpath%/lib/gcc/%MinGWversion%/specs`` and changing -the entry that reads ``-lmsvcrt`` to ``-lmsvcr71``. You may also need to copy -``libpq.lib`` to ``libpq.a`` in the PostgreSQL ``lib`` directory. Then use -the following command to build and install PyGreSQL:: - - python setup.py build -c mingw32 install - Now you should be ready to use PyGreSQL. Compiling Manually ------------------- +~~~~~~~~~~~~~~~~~~ The source file for compiling the dynamic module is called pgmodule.c. You have two options. You can compile PyGreSQL as a stand-alone module or you can build it into the Python interpreter. Stand-Alone ------------ +^^^^^^^^^^^ * In the directory containing ``pgmodule.c``, run the following command:: @@ -139,7 +150,7 @@ Stand-Alone if your Python modules are in ``/usr/lib/python``. Built-in to Python interpreter ------------------------------- +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ * Find the directory where your ``Setup`` file lives (usually in the ``Modules`` subdirectory) in the Python source hierarchy and copy or symlink the diff --git a/docs/contents/pg/connection.rst b/docs/contents/pg/connection.rst new file mode 100644 index 00000000..612db3d9 --- /dev/null +++ b/docs/contents/pg/connection.rst @@ -0,0 +1,374 @@ +pgobject -- The connection object +================================= + +.. py:currentmodule:: pg + +.. class:: pgobject + +This object handles a connection to a PostgreSQL database. It embeds and +hides all the parameters that define this connection, thus just leaving really +significant parameters in function calls. + +.. note:: + + Some methods give direct access to the connection socket. + *Do not use them unless you really know what you are doing.* + If you prefer disabling them, + set the ``-DNO_DIRECT`` option in the Python setup file. + These methods are specified by the tag [DA]. + +.. note:: + + Some other methods give access to large objects + (refer to PostgreSQL user manual for more information about these). + If you want to forbid access to these from the module, + set the ``-DNO_LARGE`` option in the Python setup file. + These methods are specified by the tag [LO]. + +query -- execute a SQL command string +------------------------------------- + +.. method:: pgobject.query(command, [args]) + + Execute a SQL command string + + :param str command: SQL command + :param args: optional positional arguments + :returns: result values + :rtype: :class:`pgqueryobject`, None + :raises TypeError: bad argument type, or too many arguments + :raises TypeError: invalid connection + :raises ValueError: empty SQL query or lost connection + :raises pg.ProgrammingError: error in query + :raises pg.InternalError: error during query processing + +This method simply sends a SQL query to the database. If the query is an +insert statement that inserted exactly one row into a table that has OIDs, the +return value is the OID of the newly inserted row. If the query is an update +or delete statement, or an insert statement that did not insert exactly one +row in a table with OIDs, then the number of rows affected is returned as a +string. If it is a statement that returns rows as a result (usually a select +statement, but maybe also an ``"insert/update ... returning"`` statement), +this method returns a :class:`pgqueryobject` that can be accessed via the +:meth:`pgqueryobject.getresult`, :meth:`pgqueryobject.dictresult` or +:meth:`pgqueryobject.namedresult` methods or simply printed. +Otherwise, it returns ``None``. + +The query may optionally contain positional parameters of the form ``$1``, +``$2``, etc instead of literal data, and the values supplied as a tuple. +The values are substituted by the database in such a way that they don't +need to be escaped, making this an effective way to pass arbitrary or +unknown data without worrying about SQL injection or syntax errors. + +When the database could not process the query, a :exc:`pg.ProgrammingError` or +a :exc:`pg.InternalError` is raised. You can check the ``SQLSTATE`` error code +of this error by reading its :attr:`sqlstate` attribute. + +Example:: + + name = raw_input("Name? ") + phone = con.query("select phone from employees where name=$1", + (name,)).getresult() + +reset -- reset the connection +----------------------------- + +.. method:: pgobject.reset() + + Reset the :mod:`pg` connection + + :rtype: None + :raises TypeError: too many (any) arguments + :raises TypeError: invalid connection + +This method resets the current database connection. + +cancel -- abandon processing of current SQL command +--------------------------------------------------- + +.. method:: pgobject.cancel() + + :rtype: None + :raises TypeError: too many (any) arguments + :raises TypeError: invalid connection + +This method requests that the server abandon processing +of the current SQL command. + +close -- close the database connection +-------------------------------------- + +.. method:: pgobject.close() + + Close the :mod:`pg` connection + + :rtype: None + :raises TypeError: too many (any) arguments + +This method closes the database connection. The connection will +be closed in any case when the connection is deleted but this +allows you to explicitly close it. It is mainly here to allow +the DB-SIG API wrapper to implement a close function. + +fileno -- returns the socket used to connect to the database +------------------------------------------------------------ + +.. method:: pgobject.fileno() + + Return the socket used to connect to the database + + :returns: the socket id of the database connection + :rtype: int + :raises TypeError: too many (any) arguments + :raises TypeError: invalid connection + +This method returns the underlying socket id used to connect +to the database. This is useful for use in select calls, etc. + +getnotify -- get the last notify from the server +------------------------------------------------ + +.. method:: pgobject.getnotify() + + Get the last notify from the server + + :returns: last notify from server + :rtype: tuple, None + :raises TypeError: too many parameters + :raises TypeError: invalid connection + +This method tries to get a notify from the server (from the SQL statement +NOTIFY). If the server returns no notify, the methods returns None. +Otherwise, it returns a tuple (triplet) *(relname, pid, extra)*, where +*relname* is the name of the notify, *pid* is the process id of the +connection that triggered the notify, and *extra* is a payload string +that has been sent with the notification. Remember to do a listen query +first, otherwise :meth:`pgobject.getnotify` will always return ``None``. + +.. versionchanged:: 4.1 + Support for payload strings was added in version 4.1. + +inserttable -- insert a list into a table +----------------------------------------- + +.. method:: pgobject.inserttable(table, values) + + Insert a Python list into a database table + + :param str table: the table name + :param list values: list of rows values + :rtype: None + :raises TypeError: invalid connection, bad argument type, or too many arguments + :raises MemoryError: insert buffer could not be allocated + :raises ValueError: unsupported values + +This method allows to *quickly* insert large blocks of data in a table: +It inserts the whole values list into the given table. Internally, it +uses the COPY command of the PostgreSQL database. The list is a list +of tuples/lists that define the values for each inserted row. The rows +values may contain string, integer, long or double (real) values. + +.. warning:: + + This method doesn't type check the fields according to the table definition; + it just look whether or not it knows how to handle such types. + +get/set_notice_receiver -- custom notice receiver +------------------------------------------------- + +.. method:: pgobject.get_notice_receiver() + + Get the current notice receiver + + :returns: the current notice receiver callable + :rtype: callable, None + :raises TypeError: too many (any) arguments + +This method gets the custom notice receiver callback function that has +been set with :meth:`pgobject.set_notice_receiver`, or ``None`` if no +custom notice receiver has ever been set on the connection. + +.. versionadded:: 4.1 + +.. method:: pgobject.set_notice_receiver(proc) + + Set a custom notice receiver + + :param proc: the custom notice receiver callback function + :rtype: None + :raises TypeError: the specified notice receiver is not callable + +This method allows setting a custom notice receiver callback function. +When a notice or warning message is received from the server, +or generated internally by libpq, and the message level is below +the one set with ``client_min_messages``, the specified notice receiver +function will be called. This function must take one parameter, +the :class:`pgnotice` object, which provides the following read-only +attributes: + + .. attribute:: pgnotice.pgcnx + + the connection + + .. attribute:: pgnotice.message + + the full message with a trailing newline + + .. attribute:: pgnotice.severity + + the level of the message, e.g. 'NOTICE' or 'WARNING' + + .. attribute:: pgnotice.primary + + the primary human-readable error message + + .. attribute:: pgnotice.detail + + an optional secondary error message + + .. attribute:: pgnotice.hint + + an optional suggestion what to do about the problem + +.. versionadded:: 4.1 + +putline -- write a line to the server socket [DA] +------------------------------------------------- + +.. method:: pgobject.putline(line) + + Write a line to the server socket + + :param str line: line to be written + :rtype: None + :raises TypeError: invalid connection, bad parameter type, or too many parameters + +This method allows to directly write a string to the server socket. + +getline -- get a line from server socket [DA] +--------------------------------------------- + +.. method:: pgobject.getline() + + Get a line from server socket + + :returns: the line read + :rtype: str + :raises TypeError: invalid connection + :raises TypeError: too many parameters + :raises MemoryError: buffer overflow + +This method allows to directly read a string from the server socket. + +endcopy -- synchronize client and server [DA] +--------------------------------------------- + +.. method:: pgobject.endcopy() + + Synchronize client and server + + :rtype: None + :raises TypeError: invalid connection + :raises TypeError: too many parameters + +The use of direct access methods may desynchronize client and server. +This method ensure that client and server will be synchronized. + +locreate -- create a large object in the database [LO] +------------------------------------------------------ + +.. method:: pgobject.locreate(mode) + + Create a large object in the database + + :param int mode: large object create mode + :returns: object handling the PostGreSQL large object + :rtype: :class:`pglarge` + :raises TypeError: invalid connection, bad parameter type, or too many parameters + :raises pg.OperationalError: creation error + +This method creates a large object in the database. The mode can be defined +by OR-ing the constants defined in the :mod:`pg` module (:const:`INV_READ`, +:const:`INV_WRITE` and :const:`INV_ARCHIVE`). Please refer to PostgreSQL +user manual for a description of the mode values. + +getlo -- build a large object from given oid [LO] +------------------------------------------------- + +.. method:: pgobject.getlo(oid) + + Create a large object in the database + + :param int oid: OID of the existing large object + :returns: object handling the PostGreSQL large object + :rtype: :class:`pglarge` + :raises TypeError: invalid connection, bad parameter type, or too many parameters + :raises ValueError: bad OID value (0 is invalid_oid) + +This method allows to reuse a formerly created large object through the +:class:`pglarge` interface, providing the user have its OID. + +loimport -- import a file to a large object [LO] +------------------------------------------------ + +.. method:: pgobject.loimport(name) + + Import a file to a large object + + :param str name: the name of the file to be imported + :returns: object handling the PostGreSQL large object + :rtype: :class:`pglarge` + :raises TypeError: invalid connection, bad argument type, or too many arguments + :raises pg.OperationalError: error during file import + +This methods allows to create large objects in a very simple way. You just +give the name of a file containing the data to be used. + +Object attributes +----------------- +Every :class:`pgobject` defines a set of read-only attributes that describe +the connection and its status. These attributes are: + +.. attribute:: pgobject.host + + the host name of the server (str) + +.. attribute:: pgobject.port + + the port of the server (int) + +.. attribute:: pgobject.db + + the selected database (str) + +.. attribute:: pgobject.options + + the connection options (str) + +.. attribute:: pgobject.tty + + the connection debug terminal (str) + +.. attribute:: pgobject.user + + user name on the database system (str) + +.. attribute:: pgobject.protocol_version + + the frontend/backend protocol being used (int) + +.. versionadded:: 4.0 + +.. attribute:: pgobject.server_version + + the backend version (int, e.g. 80305 for 8.3.5) + +.. versionadded:: 4.0 + +.. attribute:: pgobject.status + + the status of the connection (int: 1 = OK, 0 = bad) + +.. attribute:: pgobject.error + + the last warning/error message from the server (str) diff --git a/docs/contents/pg/db_wrapper.rst b/docs/contents/pg/db_wrapper.rst new file mode 100644 index 00000000..677ca232 --- /dev/null +++ b/docs/contents/pg/db_wrapper.rst @@ -0,0 +1,561 @@ +The DB wrapper class +==================== + +.. py:currentmodule:: pg + +.. class:: DB + +The :class:`pgobject` methods are wrapped in the class :class:`DB`. +The preferred way to use this module is as follows:: + + import pg + + db = pg.DB(...) # see below + + for r in db.query( # just for example + """SELECT foo,bar + FROM foo_bar_table + WHERE foo !~ bar""" + ).dictresult(): + + print '%(foo)s %(bar)s' % r + +This class can be subclassed as in this example:: + + import pg + + class DB_ride(pg.DB): + """Ride database wrapper + + This class encapsulates the database functions and the specific + methods for the ride database.""" + + def __init__(self): + """Open a database connection to the rides database""" + pg.DB.__init__(self, dbname='ride') + self.query("SET DATESTYLE TO 'ISO'") + + [Add or override methods here] + +The following describes the methods and variables of this class. + +Initialization +-------------- +The :class:`DB` class is initialized with the same arguments as the +:func:`connect` function described above. It also initializes a few +internal variables. The statement ``db = DB()`` will open the local +database with the name of the user just like ``connect()`` does. + +You can also initialize the DB class with an existing :mod:`pg` or :mod:`pgdb` +connection. Pass this connection as a single unnamed parameter, or as a +single parameter named ``db``. This allows you to use all of the methods +of the DB class with a DB-API 2 compliant connection. Note that the +:meth:`pgobject.close` and :meth:`pgobject.reopen` methods are inoperative +in this case. + +pkey -- return the primary key of a table +----------------------------------------- + +.. method:: DB.pkey(table) + + Return the primary key of a table + + :param str table: name of table + :returns: Name of the field which is the primary key of the table + :rtype: str + :rtype: str + :raises KeyError: the table does not have a primary key + +This method returns the primary key of a table. For composite primary +keys, the return value will be a frozenset. Note that this raises a +KeyError if the table does not have a primary key. + +get_databases -- get list of databases in the system +---------------------------------------------------- + +.. method:: DB.get_databases() + + Get the list of databases in the system + + :returns: all databases in the system + :rtype: list + +Although you can do this with a simple select, it is added here for +convenience. + +get_relations -- get list of relations in connected database +------------------------------------------------------------ + +.. method:: DB.get_relations([kinds], [system]) + + Get the list of relations in connected database + + :param str kinds: a string or sequence of type letters + :param bool system: whether system relations should be returned + :returns: all relations of the given kinds in the database + :rtype: list + +This method returns the list of relations in the connected database. Although +you can do this with a simple select, it is added here for convenience. You +can select which kinds of relations you are interested in by passing type +letters in the `kinds` parameter. The type letters are ``r`` = ordinary table, +``i`` = index, ``S`` = sequence, ``v`` = view, ``c`` = composite type, +``s`` = special, ``t`` = TOAST table. If `kinds` is None or an empty string, +all relations are returned (this is also the default). If `system` is set to +`True`, then system tables and views (temporary tables, toast tables, catalog +vies and tables) will be returned as well, otherwise they will be ignored. + +get_tables -- get list of tables in connected database +------------------------------------------------------ + +.. method:: DB.get_tables([system]) + + Get the list of tables in connected database + + :param bool system: whether system tables should be returned + :returns: all tables in connected database + :rtype: list + +This is a shortcut for ``get_relations('r', system)`` that has been added for +convenience. + +get_attnames -- get the attribute names of a table +-------------------------------------------------- + +.. method:: DB.get_attnames(table) + + Get the attribute names of a table + + :param str table: name of table + :returns: a dictionary mapping attribute names to type names + +Given the name of a table, digs out the set of attribute names. + +Returns a dictionary of attribute names (the names are the keys, +the values are the names of the attributes' types). + +By default, only a limited number of simple types will be returned. +You can get the regular types after enabling this by calling the +:meth:`DB.use_regtypes` method. + +get/set_parameter -- get or set run-time parameters +---------------------------------------------------- + +.. method:: DB.get_parameter(parameter) + + Get the value of run-time parameters + + :param parameter: the run-time parameter(s) to get + :type param: str, tuple, list or dict + :returns: the current value(s) of the run-time parameter(s) + :rtype: str, list or dict + :raises TypeError: Invalid parameter type(s) + :raises pg.ProgrammingError: Invalid parameter name(s) + +If the parameter is a string, the return value will also be a string +that is the current setting of the run-time parameter with that name. + +You can get several parameters at once by passing a list, set or dict. +When passing a list of parameter names, the return value will be a +corresponding list of parameter settings. When passing a set of +parameter names, a new dict will be returned, mapping these parameter +names to their settings. Finally, if you pass a dict as parameter, +its values will be set to the current parameter settings corresponding +to its keys. + +By passing the special name `'all'` as the parameter, you can get a dict +of all existing configuration parameters. + +.. versionadded:: 4.2 + +.. method:: DB.set_parameter(parameter, [value], [local]) + + Set the value of run-time parameters + + :param parameter: the run-time parameter(s) to set + :type param: string, tuple, list or dict + :param value: the value to set + :type param: str or None + :raises TypeError: Invalid parameter type(s) + :raises ValueError: Invalid value argument(s) + :raises pg.ProgrammingError: Invalid parameter name(s) or values + +If the parameter and the value are strings, the run-time parameter +will be set to that value. If no value or *None* is passed as a value, +then the run-time parameter will be restored to its default value. + +You can set several parameters at once by passing a list of parameter +names, together with a single value that all parameters should be +set to or with a corresponding list of values. You can also pass +the parameters as a set if you only provide a single value. +Finally, you can pass a dict with parameter names as keys. In this +case, you should not pass a value, since the values for the parameters +will be taken from the dict. + +By passing the special name `'all'` as the parameter, you can reset +all existing settable run-time parameters to their default values. + +If you set *local* to `True`, then the command takes effect for only the +current transaction. After :meth:`DB.commit` or :meth:`DB.rollback`, +the session-level setting takes effect again. Setting *local* to `True` +will appear to have no effect if it is executed outside a transaction, +since the transaction will end immediately. + +.. versionadded:: 4.2 + +has_table_privilege -- check table privilege +-------------------------------------------- + +.. method:: DB.has_table_privilege(table, privilege) + + Check whether current user has specified table privilege + + :param str table: the name of the table + :param str privilege: privilege to be checked -- default is 'select' + :returns: whether current user has specified table privilege + :rtype: bool + +Returns True if the current user has the specified privilege for the table. + +.. versionadded:: 4.0 + +begin/commit/rollback/savepoint/release -- transaction handling +--------------------------------------------------------------- + +.. method:: DB.begin([mode]) + + Begin a transaction + + :param str mode: an optional transaction mode such as 'READ ONLY' + + This initiates a transaction block, that is, all following queries + will be executed in a single transaction until :meth:`DB.commit` + or :meth:`DB.rollback` is called. + +.. versionadded:: 4.1 + +.. method:: DB.start() + + This is the same as the :meth:`DB.begin` method. + +.. method:: DB.commit() + + Commit a transaction + + This commits the current transaction. All changes made by the + transaction become visible to others and are guaranteed to be + durable if a crash occurs. + +.. method:: DB.end() + + This is the same as the :meth:`DB.commit` method. + +.. versionadded:: 4.1 + +.. method:: DB.rollback([name]) + + Roll back a transaction + + :param str name: optionally, roll back to the specified savepoint + + This rolls back the current transaction and causes all the updates + made by the transaction to be discarded. + +.. versionadded:: 4.1 + +.. method:: DB.abort() + + This is the same as the :meth:`DB.rollback` method. + +.. versionadded:: 4.2 + +.. method:: DB.savepoint(name) + + Define a new savepoint + + :param str name: the name to give to the new savepoint + + This establishes a new savepoint within the current transaction. + +.. versionadded:: 4.1 + +.. method:: DB.release(name) + + Destroy a savepoint + + :param str name: the name of the savepoint to destroy + + This destroys a savepoint previously defined in the current transaction. + +.. versionadded:: 4.1 + +get -- get a row from a database table or view +---------------------------------------------- + +.. method:: DB.get(table, arg, [keyname]) + + Get a row from a database table or view + + :param str table: name of table or view + :param arg: either a dictionary or the value to be looked up + :param str keyname: name of field to use as key (optional) + :returns: A dictionary - the keys are the attribute names, + the values are the row values. + :raises pg.ProgrammingError: no primary key or missing privilege + +This method is the basic mechanism to get a single row. It assumes +that the key specifies a unique row. If *keyname* is not specified, +then the primary key for the table is used. If *arg* is a dictionary +then the value for the key is taken from it and it is modified to +include the new values, replacing existing values where necessary. +For a composite key, *keyname* can also be a sequence of key names. +The OID is also put into the dictionary if the table has one, but in +order to allow the caller to work with multiple tables, it is munged +as ``oid(schema.table)``. + +insert -- insert a row into a database table +-------------------------------------------- + +.. method:: DB.insert(table, [d], [key=val, ...]) + + Insert a row into a database table + + :param str table: name of table + :param dict d: optional dictionary of values + :returns: the inserted values in the database + :rtype: dict + :raises pg.ProgrammingError: missing privilege or conflict + +This method inserts a row into a table. If the optional dictionary is +not supplied then the required values must be included as keyword/value +pairs. If a dictionary is supplied then any keywords provided will be +added to or replace the entry in the dictionary. + +The dictionary is then, if possible, reloaded with the values actually +inserted in order to pick up values modified by rules, triggers, etc. + +update -- update a row in a database table +------------------------------------------ + +.. method:: DB.update(table, [d], [key=val, ...]) + + Update a row in a database table + + :param str table: name of table + :param dict d: optional dictionary of values + :returns: the new row in the database + :rtype: dict + :raises pg.ProgrammingError: no primary key or missing privilege + +Similar to insert but updates an existing row. The update is based on the +OID value as munged by :meth:`DB.get` or passed as keyword, or on the primary +key of the table. The dictionary is modified, if possible, to reflect any +changes caused by the update due to triggers, rules, default values, etc. + +Like insert, the dictionary is optional and updates will be performed +on the fields in the keywords. There must be an OID or primary key +either in the dictionary where the OID must be munged, or in the keywords +where it can be simply the string ``'oid'``. + +query -- execute a SQL command string +------------------------------------- + +.. method:: DB.query(command, [arg1, [arg2, ...]]) + + Execute a SQL command string + + :param str command: SQL command + :param arg*: optional positional arguments + :returns: result values + :rtype: :class:`pgqueryobject`, None + :raises TypeError: bad argument type, or too many arguments + :raises TypeError: invalid connection + :raises ValueError: empty SQL query or lost connection + :raises pg.ProgrammingError: error in query + :raises pg.InternalError: error during query processing + +Similar to the :class:`pgobject` function with the same name, except that +positional arguments can be passed either as a single list or tuple, or as +individual positional arguments. + +Example:: + + name = raw_input("Name? ") + phone = raw_input("Phone? ") + rows = db.query("update employees set phone=$2 where name=$1", + (name, phone)).getresult()[0][0] + # or + rows = db.query("update employees set phone=$2 where name=$1", + name, phone).getresult()[0][0] + +clear -- clear row values in memory +----------------------------------- + +.. method:: DB.clear(table, [d]) + + Clear row values in memory + + :param str table: name of table + :param dict d: optional dictionary of values + :returns: an empty row + :rtype: dict + +This method clears all the attributes to values determined by the types. +Numeric types are set to 0, Booleans are set to ``'f'``, and everything +else is set to the empty string. If the optional dictionary is present, +it is used as the row and any entries matching attribute names are cleared +with everything else left unchanged. + +If the dictionary is not supplied a new one is created. + +delete -- delete a row from a database table +-------------------------------------------- + +.. method:: DB.delete(table, [d], [key=val, ...]) + + Delete a row from a database table + + :param str table: name of table + :param dict d: optional dictionary of values + :rtype: None + :raises pg.ProgrammingError: table has no primary key, + row is still referenced or missing privilege + +This method deletes the row from a table. It deletes based on the OID value +as munged by :meth:`DB.get` or passed as keyword, or on the primary key of +the table. The return value is the number of deleted rows (i.e. 0 if the +row did not exist and 1 if the row was deleted). + +truncate -- quickly empty database tables +----------------------------------------- + +.. method:: DB.truncate(table, [restart], [cascade], [only]) + + Empty a table or set of tables + + :param table: the name of the table(s) + :type table: str, list or set + :param bool restart: whether table sequences should be restarted + :param bool cascade: whether referenced tables should also be truncated + :param only: whether only parent tables should be truncated + :type only: bool or list + +This method quickly removes all rows from the given table or set +of tables. It has the same effect as an unqualified DELETE on each +table, but since it does not actually scan the tables it is faster. +Furthermore, it reclaims disk space immediately, rather than requiring +a subsequent VACUUM operation. This is most useful on large tables. + +If *restart* is set to `True`, sequences owned by columns of the truncated +table(s) are automatically restarted. If *cascade* is set to `True`, it +also truncates all tables that have foreign-key references to any of +the named tables. If the parameter *only* is not set to `True`, all the +descendant tables (if any) will also be truncated. Optionally, a ``*`` +can be specified after the table name to explicitly indicate that +descendant tables are included. If the parameter *table* is a list, +the parameter *only* can also be a list of corresponding boolean values. + +.. versionadded:: 4.2 + +escape_literal/identifier/string/bytea -- escape for SQL +-------------------------------------------------------- + +The following methods escape text or binary strings so that they can be +inserted directly into an SQL command. Except for :meth:`DB.escape_byte`, +you don't need to call these methods for the strings passed as parameters +to :meth:`DB.query`. You also don't need to call any of these methods +when storing data using :meth:`DB.insert` and similar. + +.. method:: DB.escape_literal(string) + + Escape a string for use within SQL as a literal constant + + :param str string: the string that is to be escaped + :returns: the escaped string + :rtype: str + +This method escapes a string for use within an SQL command. This is useful +when inserting data values as literal constants in SQL commands. Certain +characters (such as quotes and backslashes) must be escaped to prevent them +from being interpreted specially by the SQL parser. + +.. versionadded:: 4.1 + +.. method:: DB.escape_identifier(string) + + Escape a string for use within SQL as an identifier + + :param str string: the string that is to be escaped + :returns: the escaped string + :rtype: str + +This method escapes a string for use as an SQL identifier, such as a table, +column, or function name. This is useful when a user-supplied identifier +might contain special characters that would otherwise not be interpreted +as part of the identifier by the SQL parser, or when the identifier might +contain upper case characters whose case should be preserved. + +.. versionadded:: 4.1 + +.. method:: DB.escape_bytea(datastring) + + Escape binary data for use within SQL as type ``bytea`` + + :param str datastring: string containing the binary data that is to be escaped + :returns: the escaped string + :rtype: str + +Similar to the module function :func:`pg.escape_string` with the same name, +but the behavior of this method is adjusted depending on the connection +properties (such as character encoding). + +unescape_bytea -- unescape data retrieved from the database +----------------------------------------------------------- + +.. method:: DB.unescape_bytea(string) + + Unescape ``bytea`` data that has been retrieved as text + + :param datastring: the ``bytea`` data string that has been retrieved as text + :returns: byte string containing the binary data + :rtype: str + +See the module function :func:`pg.unescape_bytea` with the same name. + +use_regtypes -- determine use of regular type names +--------------------------------------------------- + +.. method:: DB.use_regtypes([regtypes]) + + Determine whether regular type names shall be used + + :param bool regtypes: if passed, set whether regular type names shall be used + :returns: whether regular type names are used + +The :meth:`DB.get_attnames` method can return either simplified "classic" +type names (the default) or more specific "regular" type names. Which kind +of type names is used can be changed by calling :meth:`DB.get_regtypes`. +If you pass a boolean, it sets whether regular type names shall be used. +The method can also be used to check through its return value whether +currently regular type names are used. + +.. versionadded:: 4.1 + +notification_handler -- create a notification handler +----------------------------------------------------- + +.. class:: DB.notification_handler(event, callback, [arg_dict], [timeout], [stop_event]) + + Create a notification handler instance + + :param str event: the name of an event to listen for + :param callback: a callback function + :param dict arg_dict: an optional dictionary for passing arguments + :param timeout: the time-out when waiting for notifications + :type timeout: int, float or None + :param str stop_event: an optional different name to be used as stop event + +This method creates a :class:`pg.NotificationHandler` object using the +:class:`DB` connection as explained under :doc:`notification`. + +.. versionadded:: 4.1.1 diff --git a/docs/contents/pg/index.rst b/docs/contents/pg/index.rst new file mode 100644 index 00000000..a394ecaa --- /dev/null +++ b/docs/contents/pg/index.rst @@ -0,0 +1,17 @@ +-------------------------------------------- +:mod:`pg` --- The Classic PyGreSQL Interface +-------------------------------------------- + +.. module:: pg + +Contents +======== + +.. toctree:: + introduction + module + connection + db_wrapper + query + large_objects + notification diff --git a/docs/contents/pg/introduction.rst b/docs/contents/pg/introduction.rst new file mode 100644 index 00000000..a8719faa --- /dev/null +++ b/docs/contents/pg/introduction.rst @@ -0,0 +1,24 @@ +Introduction +============ + +You may either choose to use the "classic" PyGreSQL interface provided by +the :mod:`pg` module or else the newer DB-API 2.0 compliant interface +provided by the :mod:`pgdb` module. + +The following part of the documentation covers only the older :mod:`pg` API. + +The :mod:`pg` module handles three types of objects, + +- the :class:`pgobject`, which handles the connection + and all the requests to the database, +- the :class:`pglarge` object, which handles + all the accesses to PostgreSQL large objects, +- the :class:`pgqueryobject` that handles query results + +and it provides a convenient wrapper class :class:`DB` +for the :class:`pgobject`. + +.. seealso:: + + If you want to see a simple example of the use of some of these functions, + see the :doc:`../examples` page. diff --git a/docs/contents/pg/large_objects.rst b/docs/contents/pg/large_objects.rst new file mode 100644 index 00000000..2150606f --- /dev/null +++ b/docs/contents/pg/large_objects.rst @@ -0,0 +1,182 @@ +pglarge -- Large Objects +======================== + +.. py:currentmodule:: pg + +.. class:: pglarge + +Objects that are instances of the class :class:`pglarge` are used to handle +all the requests concerning a PostgreSQL large object. These objects embed +and hide all the "recurrent" variables (object OID and connection), exactly +in the same way :class:`pgobject` instances do, thus only keeping significant +parameters in function calls. The :class:`pglarge` object keeps a reference +to the :class:`pgobject` used for its creation, sending requests though with +its parameters. Any modification but dereferencing the :class:`pgobject` +will thus affect the :class:`pglarge` object. Dereferencing the initial +:class:`pgobject` is not a problem since Python won't deallocate it before +the :class:`pglarge` object dereferences it. All functions return a generic +error message on call error, whatever the exact error was. The :attr:`error` +attribute of the object allows to get the exact error message. + +See also the PostgreSQL programmer's guide for more information about the +large object interface. + +open -- open a large object +--------------------------- + +.. method:: pglarge.open(mode) + + Open a large object + + :param int mode: open mode definition + :rtype: None + :raises TypeError: invalid connection, bad parameter type, or too many parameters + :raises IOError: already opened object, or open error + +This method opens a large object for reading/writing, in the same way than the +Unix open() function. The mode value can be obtained by OR-ing the constants +defined in the :mod:`pg` module (:const:`INV_READ`, :const:`INV_WRITE`). + +close -- close a large object +----------------------------- + +.. method:: pglarge.close() + + Close a large object + + :rtype: None + :raises TypeError: invalid connection + :raises TypeError: too many parameters + :raises IOError: object is not opened, or close error + +This method closes a previously opened large object, in the same way than +the Unix close() function. + +read, write, tell, seek, unlink -- file-like large object handling +------------------------------------------------------------------ + +.. method:: pglarge.read(size) + + Read data from large object + + :param int size: maximal size of the buffer to be read + :returns: the read buffer + :rtype: str + :raises TypeError: invalid connection, invalid object, + bad parameter type, or too many parameters + :raises ValueError: if `size` is negative + :raises IOError: object is not opened, or read error + +This function allows to read data from a large object, starting at current +position. + +.. method:: pglarge.write(string) + + Read data to large object + + :param str string: string buffer to be written + :rtype: None + :raises TypeError: invalid connection, bad parameter type, or too many parameters + :raises IOError: object is not opened, or write error + +This function allows to write data to a large object, starting at current +position. + +.. method:: pglarge.seek(offset, whence) + + Change current position in large object + + :param int offset: position offset + :param int whence: positional parameter + :returns: new position in object + :rtype: int + :raises TypeError: invalid connection or invalid object, + bad parameter type, or too many parameters + :raises IOError: object is not opened, or seek error + +This method allows to move the position cursor in the large object. +The valid values for the whence parameter are defined as constants in the +:mod:`pg` module (:const:`SEEK_SET`, :const:`SEEK_CUR`, :const:`SEEK_END`). + +.. method:: pglarge.tell() + + Return current position in large object + + :returns: current position in large object + :rtype: int + :raises TypeError: invalid connection or invalid object + :raises TypeError: too many parameters + :raises IOError: object is not opened, or seek error + +This method allows to get the current position in the large object. + +.. method:: pglarge.unlink() + + Delete large object + + :rtype: None + :raises TypeError: invalid connection or invalid object + :raises TypeError: too many parameters + :raises IOError: object is not closed, or unlink error + +This methods unlinks (deletes) the PostgreSQL large object. + +size -- get the large object size +--------------------------------- + +.. method:: pglarge.size() + + Return the large object size + + :returns: the large object size + :rtype: int + :raises TypeError: invalid connection or invalid object + :raises TypeError: too many parameters + :raises IOError: object is not opened, or seek/tell error + +This (composite) method allows to get the size of a large object. It was +implemented because this function is very useful for a web interfaced +database. Currently, the large object needs to be opened first. + +export -- save a large object to a file +--------------------------------------- + +.. method:: pglarge.export(name) + + Export a large object to a file + + :param str name: file to be created + :rtype: None + :raises TypeError: invalid connection or invalid object, + bad parameter type, or too many parameters + :raises IOError: object is not closed, or export error + +This methods allows to dump the content of a large object in a very simple +way. The exported file is created on the host of the program, not the +server host. + +Object attributes +----------------- +:class:`pglarge` objects define a read-only set of attributes that allow +to get some information about it. These attributes are: + +.. attribute:: pglarge.oid + + the OID associated with the object (int) + +.. attribute:: pglarge.pgcnx + + the :class:`pgobject` associated with the object + +.. attribute:: pglarge.error + + the last warning/error message of the connection + +.. warning:: + + In multi-threaded environments, :attr:`pglarge.error` may be modified by + another thread using the same :class:`pgobject`. Remember these object + are shared, not duplicated. You should provide some locking to be able + if you want to check this. The :attr:`pglarge.oid` attribute is very + interesting, because it allows you to reuse the OID later, creating the + :class:`pglarge` object with a :meth:`pgobject.getlo` method call. diff --git a/docs/contents/pg/module.rst b/docs/contents/pg/module.rst new file mode 100644 index 00000000..564e9adc --- /dev/null +++ b/docs/contents/pg/module.rst @@ -0,0 +1,484 @@ +Module functions and constants +============================== + +.. py:currentmodule:: pg + +The :mod:`pg` module defines a few functions that allow to connect +to a database and to define "default variables" that override +the environment variables used by PostgreSQL. + +These "default variables" were designed to allow you to handle general +connection parameters without heavy code in your programs. You can prompt the +user for a value, put it in the default variable, and forget it, without +having to modify your environment. The support for default variables can be +disabled by setting the ``-DNO_DEF_VAR`` option in the Python setup file. +Methods relative to this are specified by the tag [DV]. + +All variables are set to ``None`` at module initialization, specifying that +standard environment variables should be used. + +connect -- Open a PostgreSQL connection +--------------------------------------- + +.. function:: connect([dbname], [host], [port], [opt], [tty], [user], [passwd]) + + Open a :mod:`pg` connection + + :param dbname: name of connected database (*None* = :data:`defbase`) + :type str: str or None + :param host: name of the server host (*None* = :data:`defhost`) + :type host: str or None + :param port: port used by the database server (-1 = :data:`defport`) + :type port: int + :param opt: connection options (*None* = :data:`defopt`) + :type opt: str or None + :param tty: debug terminal (*None* = :data:`deftty`) + :type tty: str or None + :param user: PostgreSQL user (*None* = :data:`defuser`) + :type user: str or None + :param passwd: password for user (*None* = :data:`defpasswd`) + :type passwd: str or None + :returns: If successful, the :class:`pgobject` handling the connection + :rtype: :class:`pgobject` + :raises TypeError: bad argument type, or too many arguments + :raises SyntaxError: duplicate argument definition + :raises pg.InternalError: some error occurred during pg connection definition + :raises Exception: (all exceptions relative to object allocation) + +This function opens a connection to a specified database on a given +PostgreSQL server. You can use keywords here, as described in the +Python tutorial. The names of the keywords are the name of the +parameters given in the syntax line. For a precise description +of the parameters, please refer to the PostgreSQL user manual. + +Example:: + + import pg + + con1 = pg.connect('testdb', 'myhost', 5432, None, None, 'bob', None) + con2 = pg.connect(dbname='testdb', host='localhost', user='bob') + +get/set_defhost -- default server host [DV] +------------------------------------------- + +.. function:: get_defhost(host) + + Get the default host + + :returns: the current default host specification + :rtype: str or None + :raises TypeError: too many arguments + +This method returns the current default host specification, +or ``None`` if the environment variables should be used. +Environment variables won't be looked up. + +.. function:: set_defhost(host) + + Set the default host + + :param host: the new default host specification + :type host: str or None + :returns: the previous default host specification + :rtype: str or None + :raises TypeError: bad argument type, or too many arguments + +This methods sets the default host value for new connections. +If ``None`` is supplied as parameter, environment variables will +be used in future connections. It returns the previous setting +for default host. + +get/set_defport -- default server port [DV] +------------------------------------------- + +.. function:: get_defport() + + Get the default port + + :returns: the current default port specification + :rtype: int + :raises TypeError: too many arguments + +This method returns the current default port specification, +or ``None`` if the environment variables should be used. +Environment variables won't be looked up. + +.. function:: set_defport(port) + + Set the default port + + :param port: the new default port + :type port: int + :returns: previous default port specification + :rtype: int or None + +This methods sets the default port value for new connections. If -1 is +supplied as parameter, environment variables will be used in future +connections. It returns the previous setting for default port. + +get/set_defopt -- default connection options [DV] +-------------------------------------------------- + +.. function:: get_defopt() + + Get the default connection options + + :returns: the current default options specification + :rtype: str or None + :raises TypeError: too many arguments + +This method returns the current default connection options specification, +or ``None`` if the environment variables should be used. Environment variables +won't be looked up. + +.. function:: set_defopt(options) + + Set the default connection options + + :param options: the new default connection options + :type options: str or None + :returns: previous default options specification + :rtype: str or None + :raises TypeError: bad argument type, or too many arguments + +This methods sets the default connection options value for new connections. +If ``None`` is supplied as parameter, environment variables will be used in +future connections. It returns the previous setting for default options. + +get/set_deftty -- default debug tty [DV] +---------------------------------------- + +.. function:: get_deftty() + + Get the default debug terminal + + :returns: the current default debug terminal specification + :rtype: str or None + :raises TypeError: too many arguments + +This method returns the current default debug terminal specification, or +``None`` if the environment variables should be used. Environment variables +won't be looked up. Note that this is ignored in newer PostgreSQL versions. + +.. function:: set_deftty(terminal) + + Set the default debug terminal + + :param terminal: the new default debug terminal + :type terminal: str or None + :returns: the previous default debug terminal specification + :rtype: str or None + :raises TypeError: bad argument type, or too many arguments + +This methods sets the default debug terminal value for new connections. +If ``None`` is supplied as parameter, environment variables will be used +in future connections. It returns the previous setting for default terminal. +Note that this is ignored in newer PostgreSQL versions. + +get/set_defbase -- default database name [DV] +--------------------------------------------- + +.. function:: get_defbase() + + Get the default database name + + :returns: the current default database name specification + :rtype: str or None + :raises TypeError: too many arguments + +This method returns the current default database name specification, or +``None`` if the environment variables should be used. Environment variables +won't be looked up. + +.. function:: set_defbase(base) + + Set the default database name + + :param base: the new default base name + :type base: str or None + :returns: the previous default database name specification + :rtype: str or None + :raises TypeError: bad argument type, or too many arguments + +This method sets the default database name value for new connections. If +``None`` is supplied as parameter, environment variables will be used in +future connections. It returns the previous setting for default host. + +get/set_defuser -- default database user [DV] +--------------------------------------------- + +.. function:: get_defuser() + + Get the default database user + + :returns: the current default database user specification + :rtype: str or None + :raises TypeError: too many arguments + +This method returns the current default database user specification, or +``None`` if the environment variables should be used. Environment variables +won't be looked up. + +.. function:: set_defuser(user) + + Set the default database user + + :param user: the new default database user + :type base: str or None + :returns: the previous default database user specification + :rtype: str or None + :raises TypeError: bad argument type, or too many arguments + +This method sets the default database user name for new connections. If +``None`` is supplied as parameter, environment variables will be used in +future connections. It returns the previous setting for default host. + +get/set_defpasswd -- default database password [DV] +--------------------------------------------------- + +.. function:: get_defpasswd() + + Get the default database password + + :returns: the current default database password specification + :rtype: str or None + :raises TypeError: too many arguments + +This method returns the current default database password specification, or +``None`` if the environment variables should be used. Environment variables +won't be looked up. + +.. function:: set_defpasswd(passwd) + + Set the default database password + + :param passwd: the new default database password + :type base: str or None + :returns: the previous default database password specification + :rtype: str or None + :raises TypeError: bad argument type, or too many arguments + +This method sets the default database password for new connections. If +``None`` is supplied as parameter, environment variables will be used in +future connections. It returns the previous setting for default host. + +escape_string -- escape a string for use within SQL +--------------------------------------------------- + +.. function:: escape_string(string) + + Escape a string for use within SQL + + :param str string: the string that is to be escaped + :returns: the escaped string + :rtype: str + :raises TypeError: bad argument type, or too many arguments + +This function escapes a string for use within an SQL command. +This is useful when inserting data values as literal constants +in SQL commands. Certain characters (such as quotes and backslashes) +must be escaped to prevent them from being interpreted specially +by the SQL parser. :func:`escape_string` performs this operation. +Note that there is also a :class:`pgobject` method with the same name +which takes connection properties into account. + +.. note:: + + It is especially important to do proper escaping when + handling strings that were received from an untrustworthy source. + Otherwise there is a security risk: you are vulnerable to "SQL injection" + attacks wherein unwanted SQL commands are fed to your database. + +Example:: + + name = raw_input("Name? ") + phone = con.query("select phone from employees where name='%s'" + % escape_string(name)).getresult() + +escape_bytea -- escape binary data for use within SQL +----------------------------------------------------- + +.. function:: escape_bytea(datastring) + + escape binary data for use within SQL as type ``bytea`` + + :param str datastring: string containing the binary data that is to be escaped + :returns: the escaped string + :rtype: str + :raises TypeError: bad argument type, or too many arguments + +Escapes binary data for use within an SQL command with the type ``bytea``. +As with :func:`escape_string`, this is only used when inserting data directly +into an SQL command string. +Note that there is also a :class:`pgobject` method with the same name +which takes connection properties into account. + +Example:: + + picture = open('garfield.gif', 'rb').read() + con.query("update pictures set img='%s' where name='Garfield'" + % escape_bytea(picture)) + +unescape_bytea -- unescape data that has been retrieved as text +--------------------------------------------------------------- + +.. function:: unescape_bytea(string) + + Unescape ``bytea`` data that has been retrieved as text + + :param str datastring: the ``bytea`` data string that has been retrieved as text + :returns: byte string containing the binary data + :rtype: str + :raises TypeError: bad argument type, or too many arguments + +Converts an escaped string representation of binary data into binary +data -- the reverse of :func:`escape_bytea`. This is needed when retrieving +``bytea`` data with one of the :meth:`pgqueryobject.getresult`, +:meth:`pgqueryobject.dictresult` or :meth:`pgqueryobject.namedresult` methods. + +Example:: + + picture = unescape_bytea(con.query( + "select img from pictures where name='Garfield'").getresult[0][0]) + open('garfield.gif', 'wb').write(picture) + +get/set_decimal -- decimal type to be used for numeric values +------------------------------------------------------------- + +.. function:: get_decimal() + + Get the decimal type to be used for numeric values + + :returns: the Python class used for PostgreSQL numeric values + :rtype: class + +This function returns the Python class that is used by PyGreSQL to hold +PostgreSQL numeric values. The default class is :class:`decimal.Decimal` +if available, otherwise the :class:`float` type is used. + +.. function:: set_decimal(cls) + + Set a decimal type to be used for numeric values + + :param class cls: the Python class to be used for PostgreSQL numeric values + +This function can be used to specify the Python class that shall +be used by PyGreSQL to hold PostgreSQL numeric values. +The default class is :class:`decimal.Decimal` if available, +otherwise the :class:`float` type is used. + +get/set_decimal_point -- decimal mark used for monetary values +-------------------------------------------------------------- + +.. function:: get_decimal_point() + + Get the decimal mark used for monetary values + + :returns: string with one character representing the decimal mark + :rtype: str + +This function returns the decimal mark used by PyGreSQL to interpret +PostgreSQL monetary values when converting them to decimal numbers. +The default setting is ``'.'`` as a decimal point. This setting is not +adapted automatically to the locale used by PostGreSQL, but you can +use ``set_decimal()`` to set a different decimal mark manually. A return +value of ``None`` means monetary values are not interpreted as decimal +numbers, but returned as strings including the formatting and currency. + +.. versionadded:: 4.1.1 + +.. function:: set_decimal_point(string) + + Specify which decimal mark is used for interpreting monetary values + + :param str string: string with one character representing the decimal mark + +This function can be used to specify the decimal mark used by PyGreSQL +to interpret PostgreSQL monetary values. The default value is '.' as +a decimal point. This value is not adapted automatically to the locale +used by PostGreSQL, so if you are dealing with a database set to a +locale that uses a ``','`` instead of ``'.'`` as the decimal point, +then you need to call ``set_decimal(',')`` to have PyGreSQL interpret +monetary values correctly. If you don't want money values to be converted +to decimal numbers, then you can call ``set_decimal(None)``, which will +cause PyGreSQL to return monetary values as strings including their +formatting and currency. + +.. versionadded:: 4.1.1 + +get/set_bool -- whether boolean values are returned as bool objects +------------------------------------------------------------------- + +.. function:: get_bool() + + Check whether boolean values are returned as bool objects + + :returns: whether or not bool objects will be returned + :rtype: bool + +This function checks whether PyGreSQL returns PostgreSQL boolean +values converted to Python bool objects, or as ``'f'`` and ``'t'`` +strings which are the values used internally by PostgreSQL. By default, +conversion to bool objects is not activated, but you can enable +this with the ``set_bool()`` method. + +.. versionadded:: 4.2 + +.. function:: set_bool(on) + + Set whether boolean values are returned as bool objects + + :param on: whether or not bool objects shall be returned + +This function can be used to specify whether PyGreSQL shall return +PostgreSQL boolean values converted to Python bool objects, or as +``'f'`` and ``'t'`` strings which are the values used internally by PostgreSQL. +By default, conversion to bool objects is not activated, but you can +enable this by calling ``set_bool(True)``. + +.. versionadded:: 4.2 + +get/set_namedresult -- conversion to named tuples +------------------------------------------------- + +.. function:: get_namedresult() + + Get the function that converts to named tuples + +This returns the function used by PyGreSQL to construct the result of the +:meth:`pgqueryobject.namedresult` method. + +.. versionadded:: 4.1 + +.. function:: set_namedresult(func) + + Set a function that will convert to named tuples + + :param func: the function to be used to convert results to named tuples + +You can use this if you want to create different kinds of named tuples returned +by the :meth:`pgqueryobject.namedresult` method. If you set this function to +*None*, then it will become equal to :meth:`pgqueryobject.getresult`. + +.. versionadded:: 4.1 + + +Module constants +---------------- +Some constants are defined in the module dictionary. +They are intended to be used as parameters for methods calls. +You should refer to the libpq description in the PostgreSQL user manual +for more information about them. These constants are: + +.. data:: version, __version__ + + constants that give the current version + +.. data:: INV_READ, INV_WRITE + + large objects access modes, + used by :meth:`pgobject.locreate` and :meth:`pglarge.open` + +.. data:: SEEK_SET, SEEK_CUR, SEEK_END: + + positional flags, used by :meth:`pglarge.seek` diff --git a/docs/contents/pg/notification.rst b/docs/contents/pg/notification.rst new file mode 100644 index 00000000..a37df668 --- /dev/null +++ b/docs/contents/pg/notification.rst @@ -0,0 +1,119 @@ +The Notification Handler +======================== + +.. py:currentmodule:: pg + +PyGreSQL comes with a client-side asynchronous notification handler that +was based on the ``pgnotify`` module written by Ng Pheng Siong. + +.. versionadded:: 4.1.1 + +Instantiating the notification handler +-------------------------------------- + +.. class:: NotificationHandler(db, event, callback, [arg_dict], [timeout], [stop_event]) + + Create an instance of the notification handler + + :param int db: the database connection + :type db: :class:`Connection` + :param str event: the name of an event to listen for + :param callback: a callback function + :param dict arg_dict: an optional dictionary for passing arguments + :param timeout: the time-out when waiting for notifications + :type timeout: int, float or None + :param str stop_event: an optional different name to be used as stop event + +You can also create an instance of the NotificationHandler using the +:class:`DB.connection_handler` method. In this case you don't need to +pass a database connection because the :class:`DB` connection itself +will be used as the datebase connection for the notification handler. + +You must always pass the name of an *event* (notification channel) to listen +for and a *callback* function. + +You can also specify a dictionary *arg_dict* that will be passed as the +single argument to the callback function, and a *timeout* value in seconds +(a floating point number denotes fractions of seconds). If it is absent +or *None*, the callers will never time out. If the time-out is reached, +the callback function will be called with a single argument that is *None*. +If you set the *timeout* to ``0``, the handler will poll notifications +synchronously and return. + +You can specify the name of the event that will be used to signal the handler +to stop listening as *stop_event*. By default, it will be the event name +prefixed with ``'stop_'``. + +All of the parameters will be also available as attributes of the +created notification handler object. + +Invoking the notification handler +--------------------------------- + +To invoke the notification handler, just call the instance without passing +any parameters. + +The handler is a loop that listens for notifications on the event and stop +event channels. When either of these notifications are received, its +associated *pid*, *event* and *extra* (the payload passed with the +notification) are inserted into its *arg_dict* dictionary and the callback +is invoked with this dictionary as a single argument. When the handler +receives a stop event, it stops listening to both events and return. + +In the special case that the timeout of the handler has been set to ``0``, +the handler will poll all events synchronously and return. If will keep +listening until it receives a stop event. + +.. warning:: + + If you run this loop in another thread, don't use the same database + connection for database operations in the main thread. + +Sending notifications +--------------------- + +You can send notifications by either running ``NOTIFY`` commands on the +database directly, or using the following method: + +.. method:: NotificationHandler.notify([db], [stop], [payload]) + + Generate a notification + + :param int db: the database connection for sending the notification + :type db: :class:`Connection` + :param bool stop: whether to produce a normal event or a stop event + :param str payload: an optional payload to be sent with the notification + +This method sends a notification event together with an optional *payload*. +If you set the *stop* flag, a stop notification will be sent instead of +a normal notification. This will cause the handler to stop listening. + +.. warning:: + + If the notification handler is running in another thread, you must pass + a different database connection since PyGreSQL database connections are + not thread-safe. + +Auxiliary methods +----------------- + +.. method:: NotificationHandler.listen() + + Start listening for the event and the stop event + +This method is called implicitly when the handler is invoked. + +.. method:: NotificationHandler.unlisten() + + Stop listening for the event and the stop event + +This method is called implicitly when the handler receives a stop event +or when it is closed or deleted. + +.. method:: NotificationHandler.close() + + Stop listening and close the database connection + +You can call this method instead of :meth:`NotificationHandler.unlisten` +if you want to close not only the handler, but also the database connection +it was created with. \ No newline at end of file diff --git a/docs/contents/pg/query.rst b/docs/contents/pg/query.rst new file mode 100644 index 00000000..10423a29 --- /dev/null +++ b/docs/contents/pg/query.rst @@ -0,0 +1,120 @@ +pgqueryobject methods +===================== + +.. py:currentmodule:: pg + +.. class:: pgqueryobject + +The :class:`pgqueryobject` returned by :meth:`pgobject.query` and +:meth:`DB.query` provides the following methods for accessing +the results of the query: + +getresult -- get query values as list of tuples +----------------------------------------------- + +.. method:: pgqueryobject.getresult() + + Get query values as list of tuples + + :returns: result values as a list of tuples + :rtype: list + :raises TypeError: too many (any) parameters + :raises MemoryError: internal memory error + +This method returns the list of the values returned by the query. +More information about this result may be accessed using +:meth:`pgqueryobject.listfields`, :meth:`pgqueryobject.fieldname` +and :meth:`pgqueryobject.fieldnum` methods. + +dictresult -- get query values as list of dictionaries +------------------------------------------------------ + +.. method:: pgqueryobject.dictresult() + + Get query values as list of dictionaries + + :returns: result values as a list of dictionaries + :rtype: list + :raises TypeError: too many (any) parameters + :raises MemoryError: internal memory error + +This method returns the list of the values returned by the query +with each tuple returned as a dictionary with the field names +used as the dictionary index. + +namedresult -- get query values as list of named tuples +------------------------------------------------------- + +.. method:: pgqueryobject.namedresult() + + Get query values as list of named tuples + + :returns: result values as a list of named tuples + :rtype: list + :raises TypeError: too many (any) parameters + :raises TypeError: named tuples not supported + :raises MemoryError: internal memory error + +This method returns the list of the values returned by the query +with each row returned as a named tuple with proper field names. + +.. versionadded:: 4.1 + +listfields -- list fields names of previous query result +-------------------------------------------------------- + +.. method:: pgqueryobject.listfields() + + List fields names of previous query result + + :returns: field names + :rtype: list + :raises TypeError: too many parameters + +This method returns the list of names of the fields defined for the +query result. The fields are in the same order as the result values. + +fieldname, fieldnum -- field name/number conversion +--------------------------------------------------- + +.. method:: pgqueryobject.fieldname(num) + + Get field name from its number + + :param int num: field number + :returns: field name + :rtype: str + :raises TypeError: invalid connection, bad parameter type, or too many parameters + :raises ValueError: invalid field number + +This method allows to find a field name from its rank number. It can be +useful for displaying a result. The fields are in the same order as the +result values. + +.. method:: pgqueryobject.fieldnum(name) + + Get field number from its name + + :param str name: field name + :returns: field number + :rtype: int + :raises TypeError: invalid connection, bad parameter type, or too many parameters + :raises ValueError: unknown field name + +This method returns a field number from its name. It can be used to +build a function that converts result list strings to their correct +type, using a hardcoded table definition. The number returned is the +field rank in the result values list. + +ntuples -- return number of tuples in query object +-------------------------------------------------- + +.. method:: pgqueryobject.ntuples() + + Return number of tuples in query object + + :returns: number of tuples in :class:`pgqueryobject` + :rtype: int + :raises TypeError: Too many arguments. + +This method returns the number of tuples found in a query. diff --git a/docs/contents/pgdb/connection.rst b/docs/contents/pgdb/connection.rst new file mode 100644 index 00000000..de0bdafa --- /dev/null +++ b/docs/contents/pgdb/connection.rst @@ -0,0 +1,63 @@ +pgdbCnx -- The connection object +================================ + +.. py:currentmodule:: pgdb + +.. class:: pgdbCnx + +These connection objects respond to the following methods. + +Note that ``pgdb.pgdbCnx`` objects also implement the context manager protocol, +i.e. you can use them in a ``with`` statement. + +close -- close the connection +----------------------------- + +.. method:: pgdbCnx.close() + + Close the connection now (rather than whenever it is deleted) + + :rtype: None + +The connection will be unusable from this point forward; an :exc:`Error` +(or subclass) exception will be raised if any operation is attempted with +the connection. The same applies to all cursor objects trying to use the +connection. Note that closing a connection without committing the changes +first will cause an implicit rollback to be performed. + +commit -- commit the connection +------------------------------- + +.. method:: pgdbCnx.commit() + + Commit any pending transaction to the database + + :rtype: None + +Note that connections always use a transaction, there is no auto-commit. + +rollback -- roll back the connection +------------------------------------ + +.. method:: pgdbCnx.rollback() + + Roll back any pending transaction to the database + + :rtype: None + +This method causes the database to roll back to the start of any pending +transaction. Closing a connection without committing the changes first will +cause an implicit rollback to be performed. + +cursor -- return a new cursor object +------------------------------------ + +.. method:: pgdbCnx.cursor() + + Return a new cursor object using the connection + + :returns: a connection object + :rtype: :class:`pgdbCursor` + +This method returns a new :class:`pgdbCursor` object that can be used to +operate on the database in the way described in the next section. diff --git a/docs/contents/pgdb/cursor.rst b/docs/contents/pgdb/cursor.rst new file mode 100644 index 00000000..e775e493 --- /dev/null +++ b/docs/contents/pgdb/cursor.rst @@ -0,0 +1,219 @@ +pgdbCursor -- The cursor object +=============================== + +.. py:currentmodule:: pgdb + +.. class:: pgdbCursor + +These objects represent a database cursor, which is used to manage the context +of a fetch operation. Cursors created from the same connection are not +isolated, i.e., any changes done to the database by a cursor are immediately +visible by the other cursors. Cursors created from different connections can +or can not be isolated, depending on the level of transaction isolation. +The default PostgreSQL transaction isolation level is "read committed". + +Cursor objects respond to the following methods and attributes. + +Note that ``pgdbCursor`` objects also implement both the iterator and the +context manager protocol, i.e. you can iterate over them and you can use them +in a ``with`` statement. + +description -- details regarding the result columns +--------------------------------------------------- + +.. attribute:: pgdbCursor.description + + This read-only attribute is a sequence of 7-item tuples. + + Each of these tuples contains information describing one result column: + + - *name* + - *type_code* + - *display_size* + - *internal_size* + - *precision* + - *scale* + - *null_ok* + + Note that *display_size*, *precision*, *scale* and *null_ok* + are not implemented. + + This attribute will be ``None`` for operations that do not return rows + or if the cursor has not had an operation invoked via the + :meth:`pgdbCursor.execute` or :meth:`pgdbCursor.executemany` method yet. + +rowcount -- number of rows of the result +---------------------------------------- + +.. attribute:: pgdbCursor.rowcount + + This read-only attribute specifies the number of rows that the last + :meth:`pgdbCursor.execute` or :meth:`pgdbCursor.executemany` call produced + (for DQL statements like SELECT) or affected (for DML statements like + UPDATE or INSERT). The attribute is -1 in case no such method call has + been performed on the cursor or the rowcount of the last operation + cannot be determined by the interface. + +close -- close the cursor +------------------------- + +.. method:: pgdbCursor.close() + + Close the cursor now (rather than whenever it is deleted) + + :rtype: None + +The cursor will be unusable from this point forward; an :exc:`Error` +(or subclass) exception will be raised if any operation is attempted +with the cursor. + +execute -- execute a database operation +--------------------------------------- + +.. method:: pgdbCursor.execute(operation, [parameters]) + + Prepare and execute a database operation (query or command) + + :param str operation: the database operation + :param parameters: a sequence or mapping of parameters + :returns: the cursor, so you can chain commands + +Parameters may be provided as sequence or mapping and will be bound to +variables in the operation. Variables are specified using Python extended +format codes, e.g. ``" ... WHERE name=%(name)s"``. + +A reference to the operation will be retained by the cursor. If the same +operation object is passed in again, then the cursor can optimize its behavior. +This is most effective for algorithms where the same operation is used, +but different parameters are bound to it (many times). + +The parameters may also be specified as list of tuples to e.g. insert multiple +rows in a single operation, but this kind of usage is deprecated: +:meth:`pgdbCursor.executemany` should be used instead. + +Note that in case this method raises a :exc:`DatabaseError`, you can get +information about the error condition that has occurred by introspecting +its :attr:`DatabaseError.sqlstate` attribute, which will be the ``SQLSTATE`` +error code associated with the error. Applications that need to know which +error condition has occurred should usually test the error code, rather than +looking at the textual error message. + +executemany -- execute many similar database operations +------------------------------------------------------- + +.. method:: pgdbCursor.executemany(operation, [seq_of_parameters]) + + Prepare and execute many similar database operations (queries or commands) + + :param str operation: the database operation + :param seq_of_parameters: a sequence or mapping of parameter tuples or mappings + :returns: the cursor, so you can chain commands + +Prepare a database operation (query or command) and then execute it against +all parameter tuples or mappings found in the sequence *seq_of_parameters*. + +Parameters are bounded to the query using Python extended format codes, +e.g. ``" ... WHERE name=%(name)s"``. + +fetchone -- fetch next row of the query result +---------------------------------------------- + +.. method:: pgdbCursor.fetchone() + + Fetch the next row of a query result set + + :returns: the next row of the query result set + :rtype: list or None + +Fetch the next row of a query result set, returning a single list, +or ``None`` when no more data is available. + +An :exc:`Error` (or subclass) exception is raised if the previous call to +:meth:`pgdbCursor.execute` or :meth:`pgdbCursor.executemany` did not produce +any result set or no call was issued yet. + +fetchmany -- fetch next set of rows of the query result +------------------------------------------------------- + +.. method:: pgdbCursor.fetchmany([size=None], [keep=False]) + + Fetch the next set of rows of a query result + + :param size: the number of rows to be fetched + :type size: int or None + :param keep: if set to true, will keep the passed arraysize + :tpye keep: bool + :returns: the next set of rows of the query result + :rtype: list of lists + +Fetch the next set of rows of a query result, returning a list of lists. +An empty sequence is returned when no more rows are available. + +The number of rows to fetch per call is specified by the *size* parameter. +If it is not given, the cursor's :attr:`arraysize` determines the number of +rows to be fetched. If you set the *keep* parameter to True, this is kept as +new :attr:`arraysize`. + +The method tries to fetch as many rows as indicated by the *size* parameter. +If this is not possible due to the specified number of rows not being +available, fewer rows may be returned. + +An :exc:`Error` (or subclass) exception is raised if the previous call to +:meth:`pgdbCursor.execute` or :meth:`pgdbCursor.executemany` did not produce +any result set or no call was issued yet. + +Note there are performance considerations involved with the *size* parameter. +For optimal performance, it is usually best to use the :attr:`arraysize` +attribute. If the *size* parameter is used, then it is best for it to retain +the same value from one :meth:`pgdbCursor.fetchmany` call to the next. + +fetchall -- fetch all rows of the query result +---------------------------------------------- + +.. method:: pgdbCursor.fetchall() + + Fetch all (remaining) rows of a query result + + :returns: the set of all rows of the query result + :rtype: list of list + +Fetch all (remaining) rows of a query result, returning them as list of lists. +Note that the cursor's :attr:`arraysize` attribute can affect the performance +of this operation. + +row_factory -- process a row of the query result +------------------------------------------------ + +.. method:: pgdbCursor.row_factory(row) + + Process rows before they are returned + + :param list row: the currently processed row of the result set + :returns: the transformed row that the fetch methods shall return + +.. note:: + + This method is not part of the DB-API 2 standard. + +You can overwrite this method with a custom row factory, e.g. +if you want to return rows as dicts instead of lists:: + + class DictCursor(pgdb.pgdbCursor): + + def row_factory(self, row): + return dict((d[0], v) for d, v in zip(self.description, row)) + + cur = DictCursor(con) + +.. versionadded:: 4.0 + +arraysize - the number of rows to fetch at a time +------------------------------------------------- + +.. attribute:: pgdbCursor.arraysize + + The number of rows to fetch at a time + +This read/write attribute specifies the number of rows to fetch at a time with +:meth:`pgdbCursor.fetchmany`. It defaults to 1 meaning to fetch a single row +at a time. diff --git a/docs/contents/pgdb/index.rst b/docs/contents/pgdb/index.rst new file mode 100644 index 00000000..5d3f2a90 --- /dev/null +++ b/docs/contents/pgdb/index.rst @@ -0,0 +1,15 @@ +---------------------------------------------- +:mod:`pgdb` --- The DB-API Compliant Interface +---------------------------------------------- + +.. module:: pgdb + +Contents +======== + +.. toctree:: + introduction + module + connection + cursor + types diff --git a/docs/contents/pgdb/introduction.rst b/docs/contents/pgdb/introduction.rst new file mode 100644 index 00000000..7c8bd42d --- /dev/null +++ b/docs/contents/pgdb/introduction.rst @@ -0,0 +1,19 @@ +Introduction +============ + +You may either choose to use the "classic" PyGreSQL interface provided by +the :mod:`pg` module or else the newer DB-API 2.0 compliant interface +provided by the :mod:`pgdb` module. + +The following part of the documentation covers only the newer :mod:`pgdb` API. + +**DB-API 2.0** (Python Database API Specification v2.0) +is a specification for connecting to databases (not only PostGreSQL) +from Python that has been developed by the Python DB-SIG in 1999. +The authoritative programming information for the DB-API is :pep:`0249`. + +.. seealso:: + + A useful tutorial-like `introduction to the DB-API + `_ + has been written by Andrew M. Kuchling for the LINUX Journal in 1998. diff --git a/docs/contents/pgdb/module.rst b/docs/contents/pgdb/module.rst new file mode 100644 index 00000000..cff00e2c --- /dev/null +++ b/docs/contents/pgdb/module.rst @@ -0,0 +1,114 @@ +Module functions and constants +============================== + +.. py:currentmodule:: pgdb + +The :mod:`pgdb` module defines a :func:`connect` function that allows to +connect to a database, some global constants describing the capabilities +of the module as well as several exception classes. + +connect -- Open a PostgreSQL connection +--------------------------------------- + +.. function:: connect([dsn], [user], [password], [host], [database]) + + Return a new connection to the database + + :param str dsn: data source name as string + :param str user: the database user name + :param str password: the database password + :param str host: the hostname of the database + :param database: the name of the database + :returns: a connection object + :rtype: :class:`pgdbCnx` + :raises pgdb.OperationalError: error connecting to the database + +This function takes parameters specifying how to connect to a PostgreSQL +database and returns a :class:`pgdbCnx` object using these parameters. +If specified, the *dsn* parameter must be a string with the format +``'host:base:user:passwd:opt:tty'``. All of the parts specified in the *dsn* +are optional. You can also specify the parameters individually using keyword +arguments, which always take precedence. The *host* can also contain a port +if specified in the format ``'host:port'``. In the *opt* part of the *dsn* +you can pass command-line options to the server, the *tty* part is used to +send server debug output. + +Example:: + + con = connect(dsn='myhost:mydb', user='guido', password='234$') + + +Module constants +---------------- + +.. data:: apilevel + + The string constant ``'2.0'``, stating that the module is DB-API 2.0 level + compliant. + +.. data:: threadsafety + + The integer constant 1, stating that the module itself is thread-safe, + but the connections are not thread-safe, and therefore must be protected + with a lock if you want to use them from different threads. + +.. data:: paramstyle + + The string constant ``pyformat``, stating that parameters should be passed + using Python extended format codes, e.g. ``" ... WHERE name=%(name)s"``. + +Errors raised by this module +---------------------------- + +The errors that can be raised by the :mod:`pgdb` module are the following: + +.. exception:: Warning + + Exception raised for important warnings like data truncations while + inserting. + +.. exception:: Error + + Exception that is the base class of all other error exceptions. You can + use this to catch all errors with one single except statement. + Warnings are not considered errors and thus do not use this class as base. + +.. exception:: InterfaceError + + Exception raised for errors that are related to the database interface + rather than the database itself. + +.. exception:: DatabaseError + + Exception raised for errors that are related to the database. + + In PyGreSQL, this also has a :attr:`DatabaseError.sqlstate` attribute + that contains the ``SQLSTATE`` error code of this error. + +.. exception:: DataError + + Exception raised for errors that are due to problems with the processed + data like division by zero or numeric value out of range. + +.. exception:: OperationalError + + Exception raised for errors that are related to the database's operation + and not necessarily under the control of the programmer, e.g. an unexpected + disconnect occurs, the data source name is not found, a transaction could + not be processed, or a memory allocation error occurred during processing. + +.. exception:: IntegrityError + + Exception raised when the relational integrity of the database is affected, + e.g. a foreign key check fails. + +.. exception:: ProgrammingError + + Exception raised for programming errors, e.g. table not found or already + exists, syntax error in the SQL statement or wrong number of parameters + specified. + +.. exception:: NotSupportedError + + Exception raised in case a method or database API was used which is not + supported by the database. diff --git a/docs/contents/pgdb/types.rst b/docs/contents/pgdb/types.rst new file mode 100644 index 00000000..4bc379e0 --- /dev/null +++ b/docs/contents/pgdb/types.rst @@ -0,0 +1,137 @@ +pgdbType -- Type objects and constructors +========================================= + +.. py:currentmodule:: pgdb + +Type constructors +----------------- + +For binding to an operation's input parameters, PostgreSQL needs to have +the input in a particular format. However, from the parameters to the +:meth:`pgdbCursor.execute` and :meth:`pgdbCursor.executemany` methods it +is not always obvious as which PostgreSQL data types they shall be bound. +For instance, a Python string could be bound as a simple ``char`` value, +or also as a ``date`` or a ``time``. To make the intention clear in such +cases, you can wrap the parameters in type helper objects. PyGreSQL provides +the constructors defined below to create such objects that can hold special +values. When passed to the cursor methods, PyGreSQL can then detect the +proper type of the input parameter and bind it accordingly. + +The :mod:`pgdb` module exports the following type constructors as part of +the DB-API 2 standard: + +.. function:: Date(year, month, day) + + Construct an object holding a date value + +.. function:: Time(hour, minute=0, second=0, microsecond=0) + + Construct an object holding a time value + +.. function:: Timestamp(year, month, day, hour=0, minute=0, second=0, microsecond=0) + + Construct an object holding a time stamp value + +.. function:: DateFromTicks(ticks) + + Construct an object holding a date value from the given *ticks* value + +.. function:: TimeFromTicks(ticks) + + Construct an object holding a time value from the given *ticks* value + +.. function:: TimestampFromTicks(ticks) + + Construct an object holding a time stamp from the given *ticks* value + +.. function:: Binary(bytes) + + Construct an object capable of holding a (long) binary string value + +.. note:: + + SQL ``NULL`` values are always represented by the Python *None* singleton + on input and output. + +Type objects +------------ + +.. class:: pgdbType + +The :attr:`pgdbCursor.description` attribute returns information about each +of the result columns of a query. The *type_code* must compare equal to one +of the :class:`pgdbType` objects defined below. Type objects can be equal to +more than one type code (e.g. :class:`DATETIME` is equal to the type codes +for date, time and timestamp columns). + +The pgdb module exports the following :class:`Type` objects as part of the +DB-API 2 standard: + +.. object:: STRING + + Used to describe columns that are string-based (e.g. ``char``, ``varchar``, ``text``) + +.. object:: BINARY + + Used to describe (long) binary columns (``bytea``) + +.. object:: NUMBER + + Used to describe numeric columns (e.g. ``int``, ``float``, ``numeric``, ``money``) + +.. object:: DATETIME + + Used to describe date/time columns (e.g. ``date``, ``time``, ``timestamp``, ``interval``) + +.. object:: ROWID + + Used to describe the ``oid`` column of PostgreSQL database tables + +.. note:: + + The following more specific type objects are not part of the DB-API 2 standard. + +.. object:: BOOL + + Used to describe ``boolean`` columns + +.. object:: SMALLINT + + Used to describe ``smallint`` columns + +.. object:: INTEGER + + Used to describe ``integer`` columns + +.. object:: LONG + + Used to describe ``bigint`` columns + +.. object:: FLOAT + + Used to describe ``float`` columns + +.. object:: NUMERIC + + Used to describe ``numeric`` columns + +.. object:: MONEY + + Used to describe ``money`` columns + +.. object:: DATE + + Used to describe ``date`` columns + +.. object:: TIME + + Used to describe ``time`` columns + +.. object:: TIMESTAMP + + Used to describe ``timestamp`` columns + +.. object:: INTERVAL + + Used to describe date and time ``interval`` columns + diff --git a/docs/contents/postgres/advanced.rst b/docs/contents/postgres/advanced.rst new file mode 100644 index 00000000..1f55f5ad --- /dev/null +++ b/docs/contents/postgres/advanced.rst @@ -0,0 +1,154 @@ +Examples for advanced features +============================== + +.. py:currentmodule:: pg + +In this section, we show how to use some advanced features of PostgreSQL +using the classic PyGreSQL interface. + +We assume that you have already created a connection to the PostgreSQL +database, as explained in the :doc:`basic`:: + + >>> from pg import DB + >>> db = DB() + >>> query = query + +Inheritance +----------- + +A table can inherit from zero or more tables. A query can reference either +all rows of a table or all rows of a table plus all of its descendants. + +For example, the capitals table inherits from cities table (it inherits +all data fields from cities):: + + >>> data = [('cities', [ + ... "'San Francisco', 7.24E+5, 63", + ... "'Las Vegas', 2.583E+5, 2174", + ... "'Mariposa', 1200, 1953"]), + ... ('capitals', [ + ... "'Sacramento',3.694E+5,30,'CA'", + ... "'Madison', 1.913E+5, 845, 'WI'"])] + +Now, let's populate the tables:: + + >>> data = ['cities', [ + ... "'San Francisco', 7.24E+5, 63" + ... "'Las Vegas', 2.583E+5, 2174" + ... "'Mariposa', 1200, 1953"], + ... 'capitals', [ + ... "'Sacramento',3.694E+5,30,'CA'", + ... "'Madison', 1.913E+5, 845, 'WI'"]] + >>> for table, rows in data: + ... for row in rows: + ... query("INSERT INTO %s VALUES (%s)" % (table, row)) + >>> print query("SELECT * FROM cities") + name |population|altitude + -------------+----------+-------- + San Francisco| 724000| 63 + Las Vegas | 258300| 2174 + Mariposa | 1200| 1953 + Sacramento | 369400| 30 + Madison | 191300| 845 + (5 rows) + >>> print query("SELECT * FROM capitals") + name |population|altitude|state + ----------+----------+--------+----- + Sacramento| 369400| 30|CA + Madison | 191300| 845|WI + (2 rows) + +You can find all cities, including capitals, that are located at an altitude +of 500 feet or higher by:: + + >>> print query("""SELECT c.name, c.altitude + ... FROM cities + ... WHERE altitude > 500""") + name |altitude + ---------+-------- + Las Vegas| 2174 + Mariposa | 1953 + Madison | 845 + (3 rows) + +On the other hand, the following query references rows of the base table only, +i.e. it finds all cities that are not state capitals and are situated at an +altitude of 500 feet or higher:: + + >>> print query("""SELECT name, altitude + ... FROM ONLY cities + ... WHERE altitude > 500""") + name |altitude + ---------+-------- + Las Vegas| 2174 + Mariposa | 1953 + (2 rows) + +Arrays +------ + +Attributes can be arrays of base types or user-defined types:: + + >>> query("""CREATE TABLE sal_emp ( + ... name text, + ... pay_by_quarter int4[], + ... pay_by_extra_quarter int8[], + ... schedule text[][])""") + + +Insert instances with array attributes. Note the use of braces:: + + >>> query("""INSERT INTO sal_emp VALUES ( + ... 'Bill', '{10000,10000,10000,10000}', + ... '{9223372036854775800,9223372036854775800,9223372036854775800}', + ... '{{"meeting", "lunch"}, {"training", "presentation"}}')""") + >>> query("""INSERT INTO sal_emp VALUES ( + ... 'Carol', '{20000,25000,25000,25000}', + ... '{9223372036854775807,9223372036854775807,9223372036854775807}', + ... '{{"breakfast", "consulting"}, {"meeting", "lunch"}}')""") + + +Queries on array attributes:: + + >>> query("""SELECT name FROM sal_emp WHERE + ... sal_emp.pay_by_quarter[1] != sal_emp.pay_by_quarter[2]""") + name + ----- + Carol + (1 row) + +Retrieve third quarter pay of all employees:: + + >>> query("SELECT sal_emp.pay_by_quarter[3] FROM sal_emp") + pay_by_quarter + -------------- + 10000 + 25000 + (2 rows) + +Retrieve third quarter extra pay of all employees:: + + >>> query("SELECT sal_emp.pay_by_extra_quarter[3] FROM sal_emp") + pay_by_extra_quarter + -------------------- + 9223372036854775800 + 9223372036854775807 + (2 rows) + +Retrieve first two quarters of extra quarter pay of all employees:: + + >>> query("SELECT sal_emp.pay_by_extra_quarter[1:2] FROM sal_emp") + pay_by_extra_quarter + ----------------------------------------- + {9223372036854775800,9223372036854775800} + {9223372036854775807,9223372036854775807} + (2 rows) + +Select subarrays:: + + >>> query("""SELECT sal_emp.schedule[1:2][1:1] FROM sal_emp + ... WHERE sal_emp.name = 'Bill'""") + schedule + ---------------------- + {{meeting},{training}} + (1 row) diff --git a/docs/contents/postgres/basic.rst b/docs/contents/postgres/basic.rst new file mode 100644 index 00000000..d2d458d4 --- /dev/null +++ b/docs/contents/postgres/basic.rst @@ -0,0 +1,361 @@ +Basic examples +============== + +.. py:currentmodule:: pg + +In this section, we demonstrate how to use some of the very basic features +of PostgreSQL using the classic PyGreSQL interface. + +Creating a connection to the database +------------------------------------- + +We start by creating a **connection** to the PostgreSQL database:: + + >>> from pg import DB + >>> db = DB() + +If you pass no parameters when creating the :class:`DB` instance, then +PyGreSQL will try to connect to the database on the local host that has +the same name as the current user, and also use that name for login. + +You can also pass the database name, host, port and login information +as parameters when creating the :class:`DB` instance:: + + >>> db = DB(dbname='testdb', host='pgserver', port=5432, + ... user='scott', passwd='tiger') + +The :class:`DB` class of which ``db`` is an object is a wrapper around +the lower level :class:`pgobject` class of the :mod:`pg` module. +The most important method of such connection objects is the ``query`` +method that allows you to send SQL commands to the database. + +Creating tables +--------------- + +The first thing you would want to do in an empty database is creating a +table. To do this, you need to send a **CREATE TABLE** command to the +database. PostgreSQL has its own set of built-in types that can be used +for the table columns. Let us create two tables "weather" and "cities":: + + >>> db.query("""CREATE TABLE weather ( + ... city varchar(80), + ... temp_lo int, temp_hi int, + ... prcp float8, + ... date date)""") + >>> db.query("""CREATE TABLE cities ( + ... name varchar(80), + ... location point)""") + +.. note:: + Keywords are case-insensitive but identifiers are case-sensitive. + +You can get a list of all tables in the database with:: + + >>> db.get_tables() + ['public.cities', 'public.weather'] + + +Insert data +----------- + +Now we want to fill our tables with data. An **INSERT** statement is used +to insert a new row into a table. There are several ways you can specify +what columns the data should go to. + +Let us insert a row into each of these tables. The simplest case is when +the list of values corresponds to the order of the columns specified in the +CREATE TABLE command:: + + >>> db.query("""INSERT INTO weather + ... VALUES ('San Francisco', 46, 50, 0.25, '11/27/1994')""") + >>> db.query("""INSERT INTO cities + ... VALUES ('San Francisco', '(-194.0, 53.0)')""") + +You can also specify what column the values correspond to. The columns can +be specified in any order. You may also omit any number of columns, +unknown precipitation below:: + + >>> db.query("""INSERT INTO weather (date, city, temp_hi, temp_lo) + ... VALUES ('11/29/1994', 'Hayward', 54, 37)""") + + +If you get errors regarding the format of the date values, your database +is probably set to a different date style. In this case you must change +the date style like this:: + + >>> db.query("set datestyle = MDY") + +Instead of explicitly writing the INSERT statement and sending it to the +database with the :meth:`DB.query` method, you can also use the more +convenient :meth:`DB.insert` method that does the same under the hood:: + + >>> db.insert('weather', + ... date='11/29/1994', city='Hayward', temp_hi=54, temp_lo=37) + +And instead of using keyword parameters, you can also pass the values +to the :meth:`DB.insert` method in a single Python dictionary. + +If you have a Python list with many rows that shall be used to fill +a database table quickly, you can use the :meth:`DB.inserttable` method. + +Retrieving data +--------------- + +After having entered some data into our tables, let's see how we can get +the data out again. A **SELECT** statement is used for retrieving data. +The basic syntax is: + +.. code-block:: psql + + SELECT columns FROM tables WHERE predicates + +A simple one would be the following query:: + + >>> q = db.query("SELECT * FROM weather") + >>> print q + city |temp_lo|temp_hi|prcp| date + -------------+-------+-------+----+---------- + San Francisco| 46| 50|0.25|1994-11-27 + Hayward | 37| 54| |1994-11-29 + (2 rows) + +You may also specify expressions in the target list. +(The 'AS column' specifies the column name of the result. It is optional.) + +:: + + >>> print db.query("""SELECT city, (temp_hi+temp_lo)/2 AS temp_avg, date + ... FROM weather""") + city |temp_avg| date + -------------+--------+---------- + San Francisco| 48|1994-11-27 + Hayward | 45|1994-11-29 + (2 rows) + +If you want to retrieve rows that satisfy certain condition (i.e. a +restriction), specify the condition in a WHERE clause. The following +retrieves the weather of San Francisco on rainy days:: + + >>> print db.query("""SELECT * FROM weather + ... WHERE city = 'San Francisco' AND prcp > 0.0""") + city |temp_lo|temp_hi|prcp| date + -------------+-------+-------+----+---------- + San Francisco| 46| 50|0.25|1994-11-27 + (1 row) + +Here is a more complicated one. Duplicates are removed when DISTINCT is +specified. ORDER BY specifies the column to sort on. (Just to make sure the +following won't confuse you, DISTINCT and ORDER BY can be used separately.) + +:: + + >>> print db.query("SELECT DISTINCT city FROM weather ORDER BY city") + city + ------------- + Hayward + San Francisco + (2 rows) + +So far we have only printed the output of a SELECT query. The object that +is returned by the query is an instance of the :class:`pgqueryobject` class +that can print itself in the nicely formatted way we saw above. But you can +also retrieve the results as a list of tuples, by using the +:meth:`pgqueryobject.getresult` method:: + + >>> from pprint import pprint + >>> q = db.query("SELECT * FROM weather") + >>> pprint(q.getresult()) + [('San Francisco', 46, 50, 0.25, '1994-11-27'), + ('Hayward', 37, 54, None, '1994-11-29')] + +Here we used pprint to print out the returned list in a nicely formatted way. + +If you want to retrieve the results as a list of dictionaries instead of +tuples, use the :meth:`pgqueryobject.dictresult` method instead:: + + >>> pprint(q.dictresult()) + [{'city': 'San Francisco', + 'date': '1994-11-27', + 'prcp': 0.25, + 'temp_hi': 50, + 'temp_lo': 46}, + {'city': 'Hayward', + 'date': '1994-11-29', + 'prcp': None, + 'temp_hi': 54, + 'temp_lo': 37}] + +Finally, in Python 2.5 and above you can also retrieve the results as a list +of named tuples, using the :meth:`pgqueryobject.namedresult` method. +This can be a good compromise between simple tuples and the more memory +intensive dictionaries: + + >>> for row in q.namedresult(): + ... print row.city, row.date + ... + San Francisco 1994-11-27 + Hayward 1994-11-29 + +If you only want to retrieve a single row of data, you can use the more +convenient :meth:`DB.get` method that does the same under the hood:: + + >>> d = dict(city='Hayward') + >>> db.get('weather', d, 'city') + >>> pprint(d) + {'city': 'Hayward', + 'date': '1994-11-29', + 'prcp': None, + 'temp_hi': 54, + 'temp_lo': 37} + +As you see, the :meth:`DB.get` method returns a dictionary with the column +names as keys. In the third parameter you can specify which column should +be looked up in the WHERE statement of the SELECT statement that is executed +by the :meth:`DB.get` method. You normally don't need it when the table was +created with a primary key. + +Retrieving data into other tables +--------------------------------- + +A SELECT ... INTO statement can be used to retrieve data into another table:: + + >>> db.query("""SELECT * INTO TEMPORARY TABLE temptab FROM weather + ... WHERE city = 'San Francisco' and prcp > 0.0""") + +This fills a temporary table "temptab" with a subset of the data in the +original "weather" table. It can be listed with:: + + >>> print db.query("SELECT * from temptab") + city |temp_lo|temp_hi|prcp| date + -------------+-------+-------+----+---------- + San Francisco| 46| 50|0.25|1994-11-27 + (1 row) + +Aggregates +---------- + +Let's try the following query:: + + >>> print db.query("SELECT max(temp_lo) FROM weather") + max + --- + 46 + (1 row) + +You can also use aggregates with the GROUP BY clause:: + + >>> print db.query("SELECT city, max(temp_lo) FROM weather GROUP BY city") + city |max + -------------+--- + Hayward | 37 + San Francisco| 46 + (2 rows) + +Joining tables +-------------- + +Queries can access multiple tables at once or access the same table in such a +way that multiple instances of the table are being processed at the same time. + +Suppose we want to find all the records that are in the temperature range of +other records. W1 and W2 are aliases for weather. We can use the following +query to achieve that:: + + >>> print db.query("""SELECT W1.city, W1.temp_lo, W1.temp_hi, + ... W2.city, W2.temp_lo, W2.temp_hi FROM weather W1, weather W2 + ... WHERE W1.temp_lo < W2.temp_lo and W1.temp_hi > W2.temp_hi""") + city |temp_lo|temp_hi| city |temp_lo|temp_hi + -------+-------+-------+-------------+-------+------- + Hayward| 37| 54|San Francisco| 46| 50 + (1 row) + +Now let's join two tables. The following joins the "weather" table and the +"cities" table:: + + >>> print db.query("""SELECT city, location, prcp, date FROM weather, cities + ... WHERE name = city""") + city |location |prcp| date + -------------+---------+----+---------- + San Francisco|(-194,53)|0.25|1994-11-27 + (1 row) + +Since the column names are all different, we don't have to specify the table +name. If you want to be clear, you can do the following. They give identical +results, of course:: + + >>> print db.query("""SELECT w.city, c.location, w.prcp, w.date + ... FROM weather w, cities c WHERE c.name = w.city""") + city |location |prcp| date + -------------+---------+----+---------- + San Francisco|(-194,53)|0.25|1994-11-27 + (1 row) + +Updating data +------------- + +It you want to change the data that has already been inserted into a database +table, you will need the **UPDATE** statement. + +Suppose you discover the temperature readings are all off by 2 degrees as of +Nov 28, you may update the data as follow:: + + >>> db.query("""UPDATE weather + ... SET temp_hi = temp_hi - 2, temp_lo = temp_lo - 2 + ... WHERE date > '11/28/1994'""") + '1' + >>> print db.query("SELECT * from weather") + city |temp_lo|temp_hi|prcp| date + -------------+-------+-------+----+---------- + San Francisco| 46| 50|0.25|1994-11-27 + Hayward | 35| 52| |1994-11-29 + (2 rows) + +Note that the UPDATE statement returned the string ``'1'``, indicating that +exactly one row of data has been affected by the update. + +If you retrieved one row of data as a dictionary using the :meth:`DB.get` +method, then you can also update that row with the :meth:`DB.update` method. + +Deleting data +------------- + +To delete rows from a table, a **DELETE** statement can be used. + +Suppose you are no longer interested in the weather of Hayward, you can do +the following to delete those rows from the table:: + + >>> db.query("DELETE FROM weather WHERE city = 'Hayward'") + '1' + +Again, you get the string ``'1'`` as return value, indicating that exactly +one row of data has been deleted. + +You can also delete all the rows in a table by doing the following. +This is different from DROP TABLE which removes the table itself in addition +to the removing the rows, as explained in the next section. + +:: + + >>> db.query("DELETE FROM weather") + '1' + >>> print db.query("SELECT * from weather") + city|temp_lo|temp_hi|prcp|date + ----+-------+-------+----+---- + (0 rows) + +Since only one row was left in the table, the DELETE query again returns the +string ``'1'``. The SELECT query now gives an empty result. + +If you retrieved a row of data as a dictionary using the :meth:`DB.get` +method, then you can also delete that row with the :meth:`DB.delete` method. + + +Removing the tables +------------------- +The **DROP TABLE** command is used to remove tables. After you have done this, +you can no longer use those tables:: + + >>> db.query("DROP TABLE weather, cities") + >>> db.query("select * from weather") + pg.ProgrammingError: Error: Relation "weather" does not exist + diff --git a/docs/contents/postgres/func.rst b/docs/contents/postgres/func.rst new file mode 100644 index 00000000..4331d193 --- /dev/null +++ b/docs/contents/postgres/func.rst @@ -0,0 +1,162 @@ +Examples for using SQL functions +================================ + +.. py:currentmodule:: pg + +We assume that you have already created a connection to the PostgreSQL +database, as explained in the :doc:`basic`:: + + >>> from pg import DB + >>> db = DB() + >>> query = db.query + +Creating SQL Functions on Base Types +------------------------------------ + +A **CREATE FUNCTION** statement lets you create a new function that can be +used in expressions (in SELECT, INSERT, etc.). We will start with functions +that return values of base types. + +Let's create a simple SQL function that takes no arguments and returns 1:: + + >>> query("""CREATE FUNCTION one() RETURNS int4 + ... AS 'SELECT 1 as ONE' LANGUAGE SQL""") + +Functions can be used in any expressions (eg. in the target"list or +qualifications):: + + >>> print db.query("SELECT one() AS answer") + answer + ------ + 1 + (1 row) + + +Here's how you create a function that takes arguments. The following function +returns the sum of its two arguments:: + + >>> query("""CREATE FUNCTION add_em(int4, int4) RETURNS int4 + ... AS $$ SELECT $1 + $2 $$ LANGUAGE SQL""") + >>> print query("SELECT add_em(1, 2) AS answer") + answer + ------ + 3 + (1 row) + + +Creating SQL Functions on Composite Types +----------------------------------------- + +It is also possible to create functions that return values of composite types. + +Before we create more sophisticated functions, let's populate an EMP table:: + + >>> query("""CREATE TABLE EMP ( + ... name text, + ... salary int4, + ... age f int4, + ... dept varchar(16))""") + >>> emps = ["'Sam', 1200, 16, 'toy'", + ... "'Claire', 5000, 32, 'shoe'", + ... "'Andy', -1000, 2, 'candy'", + ... "'Bill', 4200, 36, 'shoe'", + ... "'Ginger', 4800, 30, 'candy'"] + >>> for emp in emps: + ... query("INSERT INTO EMP VALUES (%s)" % emp) + +Every INSERT statement will return a '1' indicating that it has inserted +one row into the EMP table. + +The argument of a function can also be a tuple. For instance, *double_salary* +takes a tuple of the EMP table:: + + >>> query("""CREATE FUNCTION double_salary(EMP) RETURNS int4 + ... AS $$ SELECT $1.salary * 2 AS salary $$ LANGUAGE SQL""") + >>> print query("""SELECT name, double_salary(EMP) AS dream + ... FROM EMP WHERE EMP.dept = 'toy'""") + name|dream + ----+----- + Sam | 2400 + (1 row) + +The return value of a function can also be a tuple. However, make sure that the +expressions in the target list are in the same order as the columns of EMP:: + + >>> query("""CREATE FUNCTION new_emp() RETURNS EMP AS $$ + ... SELECT 'None'::text AS name, + ... 1000 AS salary, + ... 25 AS age, + ... 'None'::varchar(16) AS dept + ... $$ LANGUAGE SQL""") + +You can then project a column out of resulting the tuple by using the +"function notation" for projection columns (i.e. ``bar(foo)`` is equivalent +to ``foo.bar``). Note that ``new_emp().name`` isn't supported:: + + >>> print query("SELECT name(new_emp()) AS nobody") + nobody + ------ + None + (1 row) + +Let's try one more function that returns tuples:: + + >>> query("""CREATE FUNCTION high_pay() RETURNS setof EMP + ... AS 'SELECT * FROM EMP where salary > 1500' + ... LANGUAGE SQL""") + >>> query("SELECT name(high_pay()) AS overpaid") + overpaid + -------- + Claire + Bill + Ginger + (3 rows) + + +Creating SQL Functions with multiple SQL statements +--------------------------------------------------- + +You can also create functions that do more than just a SELECT. + +You may have noticed that Andy has a negative salary. We'll create a function +that removes employees with negative salaries:: + + >>> query("SELECT * FROM EMP") + name |salary|age|dept + ------+------+---+----- + Sam | 1200| 16|toy + Claire| 5000| 32|shoe + Andy | -1000| 2|candy + Bill | 4200| 36|shoe + Ginger| 4800| 30|candy + (5 rows) + >>> query("""CREATE FUNCTION clean_EMP () RETURNS int4 AS + ... 'DELETE FROM EMP WHERE EMP.salary <= 0; + ... SELECT 1 AS ignore_this' + ... LANGUAGE SQL""") + >>> query("SELECT clean_EMP()") + clean_emp + --------- + 1 + (1 row) + >>> query("SELECT * FROM EMP") + name |salary|age|dept + ------+------+---+----- + Sam | 1200| 16|toy + Claire| 5000| 32|shoe + Bill | 4200| 36|shoe + Ginger| 4800| 30|candy + (4 rows) + +Remove functions that were created in this example +-------------------------------------------------- + +We can remove the functions that we have created in this example and the +table EMP, by using the DROP command:: + + query("DROP FUNCTION clean_EMP()") + query("DROP FUNCTION high_pay()") + query("DROP FUNCTION new_emp()") + query("DROP FUNCTION add_em(int4, int4)") + query("DROP FUNCTION one()") + query("DROP TABLE EMP CASCADE") diff --git a/docs/contents/postgres/index.rst b/docs/contents/postgres/index.rst new file mode 100644 index 00000000..409a7042 --- /dev/null +++ b/docs/contents/postgres/index.rst @@ -0,0 +1,17 @@ +------------------- +A PostgreSQL Primer +------------------- + +The examples in this chapter of the documentation have been taken +from the PostgreSQL manual. They demonstrate some PostgreSQL features +using the classic PyGreSQL interface. They can serve as an introduction +to PostgreSQL, but not so much as examples for the use of PyGreSQL. + +Contents +======== + +.. toctree:: + basic + advanced + func + syscat diff --git a/docs/contents/postgres/syscat.rst b/docs/contents/postgres/syscat.rst new file mode 100644 index 00000000..338f1ebc --- /dev/null +++ b/docs/contents/postgres/syscat.rst @@ -0,0 +1,134 @@ +Examples for using the system catalogs +====================================== + +.. py:currentmodule:: pg + +The system catalogs are regular tables where PostgreSQL stores schema metadata, +such as information about tables and columns, and internal bookkeeping +information. You can drop and recreate the tables, add columns, insert and +update values, and severely mess up your system that way. Normally, one +should not change the system catalogs by hand, there are always SQL commands +to do that. For example, CREATE DATABASE inserts a row into the *pg_database* +catalog — and actually creates the database on disk. + +It this section we want to show examples for how to parse some of the system +catalogs, making queries with the classic PyGreSQL interface. + +We assume that you have already created a connection to the PostgreSQL +database, as explained in the :doc:`basic`:: + + >>> from pg import DB + >>> db = DB() + >>> query = query + +Lists indices +------------- + +This query lists all simple indices in the database:: + + print query("""SELECT bc.relname AS class_name, + ic.relname AS index_name, a.attname + FROM pg_class bc, pg_class ic, pg_index i, pg_attribute a + WHERE i.indrelid = bc.oid AND i.indexrelid = ic.oid + AND i.indkey[0] = a.attnum AND a.attrelid = bc.oid + AND NOT a.attisdropped + ORDER BY class_name, index_name, attname""") + + +List user defined attributes +---------------------------- + +This query lists all user defined attributes and their type +in user-defined classes:: + + print query("""SELECT c.relname, a.attname, t.typname + FROM pg_class c, pg_attribute a, pg_type t + WHERE c.relkind = 'r' and c.relname !~ '^pg_' + AND c.relname !~ '^Inv' and a.attnum > 0 + AND a.attrelid = c.oid and a.atttypid = t.oid + AND NOT a.attisdropped + ORDER BY relname, attname""") + +List user defined base types +---------------------------- + +This query lists all user defined base types:: + + print query("""SELECT r.rolname, t.typname + FROM pg_type t, pg_authid r + WHERE r.oid = t.typowner + AND t.typrelid = '0'::oid and t.typelem = '0'::oid + AND r.rolname != 'postgres' + ORDER BY rolname, typname""") + + +List operators +--------------- + +This query lists all right-unary operators:: + + print query("""SELECT o.oprname AS right_unary, + lt.typname AS operand, result.typname AS return_type + FROM pg_operator o, pg_type lt, pg_type result + WHERE o.oprkind='r' and o.oprleft = lt.oid + AND o.oprresult = result.oid + ORDER BY operand""") + + +This query lists all left-unary operators:: + + print query("""SELECT o.oprname AS left_unary, + rt.typname AS operand, result.typname AS return_type + FROM pg_operator o, pg_type rt, pg_type result + WHERE o.oprkind='l' AND o.oprright = rt.oid + AND o.oprresult = result.oid + ORDER BY operand""") + + +And this one lists all of the binary operators:: + + print query("""SELECT o.oprname AS binary_op, + rt.typname AS right_opr, lt.typname AS left_opr, + result.typname AS return_type + FROM pg_operator o, pg_type rt, pg_type lt, pg_type result + WHERE o.oprkind = 'b' AND o.oprright = rt.oid + AND o.oprleft = lt.oid AND o.oprresult = result.oid""") + + +List functions of a language +---------------------------- + +Given a programming language, this query returns the name, args and return +type from all functions of a language:: + + language = 'sql' + print query("""SELECT p.proname, p.pronargs, t.typname + FROM pg_proc p, pg_language l, pg_type t + WHERE p.prolang = l.oid AND p.prorettype = t.oid + AND l.lanname = $1 + ORDER BY proname""", (language,)) + + +List aggregate functions +------------------------ + +This query lists all of the aggregate functions and the type to which +they can be applied:: + + print query("""SELECT p.proname, t.typname + FROM pg_aggregate a, pg_proc p, pg_type t + WHERE a.aggfnoid = p.oid + and p.proargtypes[0] = t.oid + ORDER BY proname, typname""") + +List operator families +---------------------- + +The following query lists all defined operator families and all the operators +included in each family:: + + print query("""SELECT am.amname, opf.opfname, amop.amopopr::regoperator + FROM pg_am am, pg_opfamily opf, pg_amop amop + WHERE opf.opfmethod = am.oid + AND amop.amopfamily = opf.oid + ORDER BY amname, opfname, amopopr""") diff --git a/docs/contents/tutorial.rst b/docs/contents/tutorial.rst new file mode 100644 index 00000000..9a265dcd --- /dev/null +++ b/docs/contents/tutorial.rst @@ -0,0 +1,257 @@ +First Steps with PyGreSQL +========================= + +In this small tutorial we show you the basic operations you can perform +with both flavors of the PyGreSQL interface. Please choose your flavor: + +.. contents:: + :local: + + +First Steps with the classic PyGreSQL Interface +----------------------------------------------- + +.. py:currentmodule:: pg + +The first thing you need to do anything with your PostgreSQL database is +to create a database connection. + +To do this, simply import the :class:`DB` wrapper class and create an +instance of it, passing the necessary connection parameters, like this:: + + >>> from pg import DB + >>> db = DB(dbname='testdb', host='pgserver', port=5432, + ... user='scott', passwd='tiger') + +You can omit one or even all parameters if you want to use their default +values. PostgreSQL will use the name of the current operating system user +as the login and the database name, and will try to connect to the local +host on port 5432 if nothing else is specified. + +The `db` object has all methods of the lower-level :class:`pgobject` class +plus some more convenience methods provided by the :class:`DB` wrapper. + +You can now execute database queries using the :meth:`DB.query` method:: + + >>> db.query("create table fruits(id serial primary key, name varchar)") + +You can list all database tables with the :meth:`DB.get_tables` method:: + + >>> db.get_tables() + ['public.fruits'] + +To get the attributes of the *fruits* table, use :meth:`DB.get_attnames`:: + + >>> db.get_attnames('fruits') + {'id': 'int', 'name': 'text'} + +Verify that you can insert into the newly created *fruits* table: + + >>> db.has_table_privilege('fruits', 'insert') + True + +You can insert a new row into the table using the :meth:`DB.insert` method, +for example:: + + >>> db.insert('fruits', name='apple') + {'name': 'apple', 'id': 1} + +Note how this method returns the full row as a dictionary including its *id* +column that has been generated automatically by a database sequence. You can +also pass a dictionary to the :meth:`DB.insert` method instead of or in +addition to using keyword arguments. + +Let's add another row to the table: + + >>> banana = db.insert('fruits', name='banana') + +Or, you can add a whole bunch of fruits at the same time using the +:meth:`DB.inserttable` method. Note that this method uses the COPY command +of PostgreSQL to insert all data in one operation, which is faster than +sending many INSERT commands:: + + >>> more_fruits = 'cherimaya durian eggfruit fig grapefruit'.split() + >>> data = list(enumerate(more_fruits, start=3)) + >>> db.inserttable('fruits', data) + +We can now query the database for all rows that have been inserted into +the *fruits* table:: + + >>> print db.query('select * from fruits') + id| name + --+---------- + 1|apple + 2|banana + 3|cherimaya + 4|durian + 5|eggfruit + 6|fig + 7|grapefruit + (7 rows) + +Instead of simply printing the :class:`pgqueryobject` instance that has been +returned by this query, we can also request the data as list of tuples:: + + >>> q = db.query('select * from fruits') + >>> q.getresult() + ... [(1, 'apple'), ..., (7, 'grapefruit')] + +Instead of a list of tuples, we can also request a list of dicts:: + + >>> q.dictresult() + [{'id': 1, 'name': 'apple'}, ..., {'id': 7, 'name': 'grapefruit'}] + +And with Python 2.5 or higher, you can also return the rows as named tuples:: + + >>> rows = q.namedresult() + >>> rows[3].name + 'durian' + +To change a single row in the database, you can use the :meth:`DB.update` +method. For instance, if you want to capitalize the name 'banana':: + + >>> db.update('fruits', banana, name=banana['name'].capitalize()) + {'id': 2, 'name': 'Banana'} + >>> print db.query('select * from fruits where id between 1 and 3') + id| name + --+--------- + 1|apple + 2|Banana + 3|cherimaya + (3 rows) + +Let's also capitalize the other names in the database:: + + >>> db.query('update fruits set name=initcap(name)') + '7' + +The returned string `'7'` tells us the number of updated rows. It is returned +as a string to discern it from an OID which will be returned as an integer, +if a new row has been inserted into a table with an OID column. + +To delete a single row from the database, use the :meth:`DB.delete` method:: + + >>> db.delete('fruits', banana) + 1 + +The returned integer value `1` tells us that one row has been deleted. If we +try it again, the method returns the integer value `0`. Naturally, this method +can only return 0 or 1:: + + >>> db.delete('fruits', banana) + 0 + +Of course, we can insert the row back again:: + + >>> db.insert('fruits', banana) + {'id': 2, 'name': 'Banana'} + +If we want to change a different row, we can get its current state with:: + + >>> apple = db.get('fruits', 1) + >>> apple + {'name': 'Apple', 'id': 1} + +We can duplicate the row like this:: + + >>> db.insert('fruits', apple, id=8) + {'id': 8, 'name': 'Apple'} + + To remove the duplicated row, we can do:: + + >>> db.delete('fruits', id=8) + 1 + +Finally, to remove the table from the database and close the connection:: + + >>> db.query("drop table fruits") + >>> db.close() + +For more advanced features and details, see the reference: :doc:`pg/index` + +First Steps with the DB-API 2.0 Interface +----------------------------------------- + +.. py:currentmodule:: pgdb + +As with the classic interface, the first thing you need to do is to create +a database connection. To do this, use the function :func:`pgdb.connect` +in the :mod:`pgdb` module, passing the connection parameters:: + + >>> from pgdb import connect + >>> con = connect(database='testdb', host='pgserver:5432', + ... user='scott', password='tiger') + +Note that like in the classic interface, you can omit parameters if they +are the default values used by PostgreSQL. + +To do anything with the connection, you need to request a cursor object +from it, which is thought of as the Python representation of a database +cursor. The connection has a method that lets you get a cursor:: + + >>> cursor = con.cursor() + +The cursor now has a method that lets you execute database queries:: + + >>> cursor.execute("create table fruits(" + ... "id serial primary key, name varchar)") + + +To insert data into the table, also can also use this method:: + + >>> cursor.execute("insert into fruits (name) values ('apple')") + +You can pass parameters in a safe way:: + + >>> cursor.execute("insert into fruits (name) values (%s)", ('banana',)) + +For inserting multiple rows at once, you can use the following method:: + + >>> more_fruits = 'cherimaya durian eggfruit fig grapefruit'.split() + >>> parameters = [(name,) for name in more_fruits] + >>> cursor.executemany("insert into fruits (name) values (%s)", parameters) + +Note that the DB API 2.0 interface does not have an autocommit as you may +be used from PostgreSQL. So in order to make these inserts permanent, you +need to commit them to the database first:: + + >>> con.commit() + +If you end the program without calling the commit method of the connection, +or if you call the rollback method of the connection, then all the changes +will be discarded. + +In a similar way, you can also update or delete rows in the database, +executing UPDATE or DELETE statements instead of INSERT statements. + +To fetch rows from the database, execute a SELECT statement first. Then +you can use one of several fetch methods to retrieve the results. For +instance, to request a single row:: + + >>> cursor.execute('select * from fruits where id=1') + >>> cursor.fetchone() + [1, 'apple'] + +The output is a single list that represents the row. + +To fetch all rows of the query, use this method instead:: + + >>> cursor.execute('select * from fruits') + >>> cursor.fetchall() + [[1, 'apple'], ..., [7, 'grapefruit']] + +The output is a list with 7 items which are lists of 2 items. + +If you want to fetch only a limited number of rows from the query:: + + >>> cursor.execute('select * from fruits') + >>> cursor.fetchmany(2) + [[1, 'apple'], [2, 'banana']] + +Finally, to remove the table from the database and close the connection:: + + >>> db.execute("drop table fruits") + >>> cur.close() + >>> db.close() + +For more advanced features and details, see the reference: :doc:`pgdb/index` \ No newline at end of file diff --git a/docs/copyright.rst b/docs/copyright.rst index eff0f5a9..42f72525 100644 --- a/docs/copyright.rst +++ b/docs/copyright.rst @@ -10,7 +10,7 @@ Copyright (c) 1995, Pascal Andre Further modifications copyright (c) 1997-2008 by D'Arcy J.M. Cain (darcy@PyGreSQL.org) -Further modifications copyright (c) 2009-2012 by the PyGreSQL team. +Further modifications copyright (c) 2009-2016 by the PyGreSQL team. Permission to use, copy, modify, and distribute this software and its documentation for any purpose, without fee, and without a written agreement @@ -29,5 +29,3 @@ TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS, AND THE AUTHORS HAVE NO OBLIGATIONS TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. - - diff --git a/docs/db_api.rst b/docs/db_api.rst deleted file mode 100644 index 8ea115d5..00000000 --- a/docs/db_api.rst +++ /dev/null @@ -1,21 +0,0 @@ -The DB-API compliant interface (pgdb module) -============================================ - -`DB-API 2.0 `_ -(Python Database API Specification v2.0) -is a specification for connecting to databases (not only PostGreSQL) -from Python that has been developed by the Python DB-SIG in 1999. - -The following documentation covers only the newer `pgdb` API. - -The authoritative programming information for the DB-API is availabe at - http://www.python.org/dev/peps/pep-0249/ - -A tutorial-like introduction to the DB-API can be found at - http://www2.linuxjournal.com/lj-issues/issue49/2605.html - - -The pgdb module ---------------- -.. note:: This section of the documentation still needs to be written. - diff --git a/docs/default.css b/docs/default.css deleted file mode 100644 index e94df154..00000000 --- a/docs/default.css +++ /dev/null @@ -1,279 +0,0 @@ -/* -:Author: David Goodger -:Contact: goodger@users.sourceforge.net -:Date: $Date$ -:Revision: $Revision$ -:Copyright: This stylesheet has been placed in the public domain. - -Default cascading style sheet for the HTML output of Docutils. - -See http://docutils.sf.net/docs/howto/html-stylesheets.html for how to -customize this style sheet. -*/ - -/* used to remove borders from tables and images */ -.borderless, table.borderless td, table.borderless th { - border: 0 } - -table.borderless td, table.borderless th { - /* Override padding for "table.docutils td" with "! important". - The right padding separates the table cells. */ - padding: 0 0.5em 0 0 ! important } - -.first { - /* Override more specific margin styles with "! important". */ - margin-top: 0 ! important } - -.last, .with-subtitle { - margin-bottom: 0 ! important } - -.hidden { - display: none } - -a.toc-backref { - text-decoration: none ; - color: black } - -blockquote.epigraph { - margin: 2em 5em ; } - -dl.docutils dd { - margin-bottom: 0.5em } - -/* Uncomment (and remove this text!) to get bold-faced definition list terms -dl.docutils dt { - font-weight: bold } -*/ - -div.abstract { - margin: 2em 5em } - -div.abstract p.topic-title { - font-weight: bold ; - text-align: center } - -div.admonition, div.attention, div.caution, div.danger, div.error, -div.hint, div.important, div.note, div.tip, div.warning { - margin: 2em ; - border: medium outset ; - padding: 1em } - -div.admonition p.admonition-title, div.hint p.admonition-title, -div.important p.admonition-title, div.note p.admonition-title, -div.tip p.admonition-title { - font-weight: bold ; - font-family: sans-serif } - -div.attention p.admonition-title, div.caution p.admonition-title, -div.danger p.admonition-title, div.error p.admonition-title, -div.warning p.admonition-title { - color: red ; - font-weight: bold ; - font-family: sans-serif } - -/* Uncomment (and remove this text!) to get reduced vertical space in - compound paragraphs. -div.compound .compound-first, div.compound .compound-middle { - margin-bottom: 0.5em } - -div.compound .compound-last, div.compound .compound-middle { - margin-top: 0.5em } -*/ - -div.dedication { - margin: 2em 5em ; - text-align: center ; - font-style: italic } - -div.dedication p.topic-title { - font-weight: bold ; - font-style: normal } - -div.figure { - margin-left: 2em ; - margin-right: 2em } - -div.footer, div.header { - clear: both; - font-size: smaller } - -div.line-block { - display: block ; - margin-top: 1em ; - margin-bottom: 1em } - -div.line-block div.line-block { - margin-top: 0 ; - margin-bottom: 0 ; - margin-left: 1.5em } - -div.sidebar { - margin-left: 1em ; - border: medium outset ; - padding: 1em ; - background-color: #ffffee ; - width: 40% ; - float: right ; - clear: right } - -div.sidebar p.rubric { - font-family: sans-serif ; - font-size: medium } - -div.system-messages { - margin: 5em } - -div.system-messages h1 { - color: red } - -div.system-message { - border: medium outset ; - padding: 1em } - -div.system-message p.system-message-title { - color: red ; - font-weight: bold } - -div.topic { - margin: 2em } - -h1.section-subtitle, h2.section-subtitle, h3.section-subtitle, -h4.section-subtitle, h5.section-subtitle, h6.section-subtitle { - margin-top: 0.4em } - -h1.title { - text-align: center } - -h2.subtitle { - text-align: center } - -hr.docutils { - width: 75% } - -img.align-left { - clear: left } - -img.align-right { - clear: right } - -ol.simple, ul.simple { - margin-bottom: 1em } - -ol.arabic { - list-style: decimal } - -ol.loweralpha { - list-style: lower-alpha } - -ol.upperalpha { - list-style: upper-alpha } - -ol.lowerroman { - list-style: lower-roman } - -ol.upperroman { - list-style: upper-roman } - -p.attribution { - text-align: right ; - margin-left: 50% } - -p.caption { - font-style: italic } - -p.credits { - font-style: italic ; - font-size: smaller } - -p.label { - white-space: nowrap } - -p.rubric { - font-weight: bold ; - font-size: larger ; - color: maroon ; - text-align: center } - -p.sidebar-title { - font-family: sans-serif ; - font-weight: bold ; - font-size: larger } - -p.sidebar-subtitle { - font-family: sans-serif ; - font-weight: bold } - -p.topic-title { - font-weight: bold } - -pre.address { - margin-bottom: 0 ; - margin-top: 0 ; - font-family: serif ; - font-size: 100% } - -pre.literal-block, pre.doctest-block { - margin-left: 2em ; - margin-right: 2em ; - background-color: #eeeeee } - -span.classifier { - font-family: sans-serif ; - font-style: oblique } - -span.classifier-delimiter { - font-family: sans-serif ; - font-weight: bold } - -span.interpreted { - font-family: sans-serif } - -span.option { - white-space: nowrap } - -span.pre { - white-space: pre } - -span.problematic { - color: red } - -span.section-subtitle { - /* font-size relative to parent (h1..h6 element) */ - font-size: 80% } - -table.citation { - border-left: solid 1px gray; - margin-left: 1px } - -table.docinfo { - margin: 2em 4em } - -table.docutils { - margin-top: 0.5em ; - margin-bottom: 0.5em } - -table.footnote { - border-left: solid 1px black; - margin-left: 1px } - -table.docutils td, table.docutils th, -table.docinfo td, table.docinfo th { - padding-left: 0.5em ; - padding-right: 0.5em ; - vertical-align: top } - -table.docutils th.field-name, table.docinfo th.docinfo-name { - font-weight: bold ; - text-align: left ; - white-space: nowrap ; - padding-left: 0 } - -h1 tt.docutils, h2 tt.docutils, h3 tt.docutils, -h4 tt.docutils, h5 tt.docutils, h6 tt.docutils { - font-size: 100% } - -tt.docutils { - background-color: #eeeeee } - -ul.auto-toc { - list-style-type: none } diff --git a/docs/docs.css b/docs/docs.css deleted file mode 100644 index 3d99c950..00000000 --- a/docs/docs.css +++ /dev/null @@ -1,109 +0,0 @@ -/* -Stylesheet for use with Docutils. - -Customized for PyGreSQL docs. -*/ - -@import url(default.css); - -body { - margin: 8pt; - padding: 8pt; - background-color: #f8f8ff; - color: #000008; - text-align: justify; - font-family: Arial, Verdana, Helvetica, sans-serif; - font-size: 11pt; } - -a { - text-decoration: none; } - -a:hover { - text-decoration: underline; } - -.title, .subtitle { - color: #003; } - -.topic-title { - color: #006; - font-size: 14pt; } - -h1, h2, h3, h4 { - color: #006; } - -h1 { - padding-top: 20pt; - font-size: 17pt; } - -div#pygresql-changelog div.section h1 { - font-size: 12pt; -} - -h1.title { - font-size: 20pt; -} - -h2 { - font-size: 14pt; } - -h2.subtitle { - font-size: 16pt; -} - -h3 { - font-size: 13pt; } - -h4 { - font-size: 12pt; } - -a.toc-backref { - color: #006; } - -ul.simple li { - margin-top: 4pt; } - -ul.simple ul li { - margin-top: 2pt; } - -div.contents ul { - list-style-type: none; } - -div.contents ul li { - margin-top: 4pt; - font-size: 12pt; } - -div.contents ul ul li { - margin-top: 2pt; - font-size: 11pt; } - -cite { - font-style: normal; - font-family: monospace; - font-weight: bold; } - -table.field-list th.field-name { - font-style: normal; - font-family: monospace; - font-weight: bold; } - -tt.literal, pre.literal-block { - font-style: normal; - font-family: monospace; - font-weight: bold; - background-color: #fff; } - -tt.literal { - padding-left: 2pt; - padding-right: 2pt; } - -pre.literal-block { - padding: 4pt; - border: 1px dotted #ccc; } - -table.docutils { - border-spacing: 0px; - border-collapse: collapse; } - -table.docutils td { - margin: 0px; - padding: 2pt; } diff --git a/docs/download/download.rst b/docs/download/download.rst new file mode 100644 index 00000000..0219d13c --- /dev/null +++ b/docs/download/download.rst @@ -0,0 +1,27 @@ +Current PyGreSQL versions +------------------------- + +You can find PyGreSQL on the **Python Package Index** at + * http://pypi.python.org/pypi/PyGreSQL/ + +The **released version of the source code** is available at + * http://pygresql.org/files/PyGreSQL.tar.gz +You can also check the latest **pre-release version** at + * http://pygresql.org/files/PyGreSQL-beta.tar.gz +A **Linux RPM** can be picked up from + * http://pygresql.org/files/pygresql.i386.rpm +A **NetBSD package** is available in their pkgsrc collection + * ftp://ftp.netbsd.org/pub/NetBSD/packages/pkgsrc/databases/py-postgresql/README.html +A **FreeBSD package** is available in their ports collection + * http://www.freebsd.org/cgi/cvsweb.cgi/ports/databases/py-PyGreSQL/ +An **openSUSE package** is available through their build service at + * https://software.opensuse.org/package/PyGreSQL?search_term=pygresql +A **Win32 installer** for Python 2.6 and 2.7 is available at + * http://pygresql.org/files/PyGreSQL-4.2.2.win-amd64-py2.6.exe + * http://pygresql.org/files/PyGreSQL-4.2.2.win-amd64-py2.7.exe + +Older PyGreSQL versions +----------------------- + +You can look for older PyGreSQL versions at + * http://pygresql.org/files/ diff --git a/docs/download/files.rst b/docs/download/files.rst new file mode 100644 index 00000000..2ebcff38 --- /dev/null +++ b/docs/download/files.rst @@ -0,0 +1,27 @@ +Distribution files +------------------ + +========== = + +pgmodule.c the C Python module (_pg) +pgfs.h PostgreSQL definitions for large objects +pgtypes.h PostgreSQL type definitions +pg.py the "classic" PyGreSQL module +pgdb.py a DB-SIG DB-API 2.0 compliant API wrapper for PygreSQL + +setup.py the Python setup script + + To install PyGreSQL, you can run "python setup.py install". + +setup.cfg the Python setup configuration + +docs/ documentation directory + + The documentation has been created with Sphinx. + All text files are in ReST format; a HTML version of + the documentation can be created with the command + "make html" or "gmake html". + +tests/ a suite of unit tests for PyGreSQL + +========== = diff --git a/docs/download/index.rst b/docs/download/index.rst new file mode 100644 index 00000000..c4735826 --- /dev/null +++ b/docs/download/index.rst @@ -0,0 +1,24 @@ +Download information +==================== + +.. include:: download.rst + +News, Changes and Future Development +------------------------------------ + +See the :doc:`../announce` for current news. + +For a list of all changes in the current version |version| +and in past versions, have a look at the :doc:`../contents/changelog`. + +The section on :doc:`../community/index` lists ideas for +future developments and ways to participate. + +Installation +------------ + +Please read the chapter on :doc:`../contents/install` in our documentation. + +.. include:: files.rst + +.. include:: ../community/homes.rst \ No newline at end of file diff --git a/docs/favicon.ico b/docs/favicon.ico deleted file mode 100644 index 89522b71..00000000 Binary files a/docs/favicon.ico and /dev/null differ diff --git a/docs/future.rst b/docs/future.rst deleted file mode 100644 index 26a3310c..00000000 --- a/docs/future.rst +++ /dev/null @@ -1,57 +0,0 @@ -PyGreSQL future directions -========================== - -This list has been closed since tasks are now managed with the PyGreSQL -tracker that can be found at http://trac.vex.net:8000/pgtracker. -(ticket numbers have been added below): - -To Do ------ - -- Add docs for the pgdb module (everything specific to PyGreSQL) (#5). -- The large object and direct access functions need much more attention (#6). -- The fetch method should use real cursors (#7). -- The C module needs to be cleaned up and redundant code merged, - and should get its own unit test module (#8). -- Clean up test_pg.py and merge it with TEST_PyGreSQL_classic.py (#9). -- The test suite for the classic module should also check that quoted - mixed-case identifiers can be used everywhere - currently they can't. - Improve pg.py accordingly, adding quotes etc. as needed (#10). -- What shall we do with the "tutorial" directory - it's rather a tutorial - for Postgres/SQL than for PyGreSQL, it's using only the query method from - the classic pg module and no other PyGreSQL functionality, it's rather - a demo than a tutorial (#11)? - -Proposed Patches ----------------- - -- Notice handling with PQsetNoticeReceiver and PQsetNoticeProcessor - (one possible implementation was already suggested by Dmitry Dvoinikov - https://mail.vex.net/mailman/private.cgi/pygresql/2005-November/001530.html). - Maybe also make notifications accessible via the optional cursor and - connection attribute "messages" proposed in the DB-API specs (#12). - -Wish List ---------- - -- Make SQLSTATE error codes available (#13). -- Support the new listen/notify infrastructure of PostgreSQL 9.0 (#15). -- Make use of PQexecParams() and PQprepare(). This could speed up - executemany() and allow retrieving binary data directly by setting - the resultFormat parameter to one (#16). -- Enhance cursor.description attribute, delivering more information - available with PQfmod() or PQftable() for instance (#17). -- Support optional "errorhandler" extension (#18). -- Support optional cursor and connection attribute "messages" (#19). -- Connection as context manager (see http://tinyurl.com/32bx6xo) (#20). -- Users should be able to register their own types with _pg (#21). -- Let pg and pgdb support namedtuples (as available in Py 2.6). - pg could get a new method namedresult(), and pgdb could provide - a row factory for namedtuples (similar to sqlite3) (#22). -- New methods in the classic module, similar to getresult() and - dictresult(), but returning dictionaries of rows instead of lists - of rows (with primary key or oids as keys) (#23). -- Make PyGreSQL thread-safe on the connection level (#24). -- The API documentation could be created with Epydoc or Sphinx (#4). -- Write a tutorial for beginners and advanced use (#11). -- More and better documented examples (#4, #5, #11). diff --git a/docs/future.txt b/docs/future.txt deleted file mode 100644 index 7cf5a917..00000000 --- a/docs/future.txt +++ /dev/null @@ -1,46 +0,0 @@ -========================== -PyGreSQL future directions -========================== - -This list has been closed since tasks are now managed with the PyGreSQL -tracker that can be found at http://trac.vex.net:8000/pgtracker. -(ticket numbers have been added below): - -To Do ------ - -- Add docs for the pgdb module (everything specific to PyGreSQL) (#5). -- The large object and direct access functions need much more attention (#6). -- The fetch method should use real cursors (#7). -- The C module needs to be cleaned up and redundant code merged, - and should get its own unit test module (#8). -- Clean up test_pg.py and merge it with TEST_PyGreSQL_classic.py (#9). -- The test suite for the classic module should also check that quoted - mixed-case identifiers can be used everywhere - currently they can't. - Improve pg.py accordingly, adding quotes etc. as needed (#10). -- What shall we do with the "tutorial" directory - it's rather a tutorial - for Postgres/SQL than for PyGreSQL, it's using only the query method from - the classic pg module and no other PyGreSQL functionality, it's rather - a demo than a tutorial (#11)? - -Wish List ---------- - -- Make use of PQexecParams() and PQprepare(). This could speed up - executemany() and allow retrieving binary data directly by setting - the resultFormat parameter to one (#16). -- Enhance cursor.description attribute, delivering more information - available with PQfmod() or PQftable() for instance (#17). -- Support optional "errorhandler" extension (#18). -- Support optional cursor and connection attribute "messages" (#19). -- Users should be able to register their own types with _pg (#21). -- pg has now got a new method namedresult() that returns named tuples. - pgdb should also support named tuples, maybe via a row_factory attribute - on the connection, similar to sqlite3 (see #22). -- New methods in the classic module, similar to getresult() and - dictresult(), but returning dictionaries of rows instead of lists - of rows (with primary key or oids as keys) (#23). -- Make PyGreSQL thread-safe on the connection level (#24). -- The API documentation could be created with Epydoc or Sphinx (#4). -- Write a tutorial for beginners and advanced use (#11). -- More and better documented examples (#4, #5, #11). diff --git a/docs/index.html b/docs/index.html deleted file mode 100644 index 1b0724dd..00000000 --- a/docs/index.html +++ /dev/null @@ -1,183 +0,0 @@ - - - - - PyGreSQL - PostgreSQL module for Python - - - - - - - - -
PyGreSQL
-
Version 4.2
- -
:: PostgreSQL module for Python ::
- -
- -

PyGreSQL – PostgreSQL module for Python

- -

PyGreSQL is an open-source - Python module - that interfaces to a PostgreSQL database. - It embeds the PostgreSQL query library to allow easy use of the powerful PostgreSQL - features from a Python script.

- -

This software is copyright © 1995, Pascal Andre.
- Further modifications are copyright © 1997-2008 by D'Arcy J.M. Cain.
- Further modifications are copyright © 2009-2012 by the PyGreSQL team

- -

See the - copyright notice - for detailed information.

- - -

Documentation

- -

The following information is also available in the docs folder of the distribution:

- - - - -

SVN Access

- -

- The SVN repository can be checked out from svn://svn.PyGreSQL.org/pygresql. - It is also available through the - online - SVN repository

- - -

Mailing list

- -

You can join - the mailing - list to discuss future development of the PyGreSQL interface. - This is usually a low volume list except when there are new features - being added.

- - -

Examples

- -

I am starting to collect examples of applications that use PyGreSQL. - So far I only have a few but if you have an example for me, you can - either send me the files or the URL for me to point to.

- -

Here is a List of motorcycle - rides in Ontario that uses a PostgreSQL database to store the - rides. There is a link at the bottom of the page to view the source code.

- -

- Oleg Broytmann has written a simple example - RGB - database demo. -

- -
- - - - diff --git a/docs/index.rst b/docs/index.rst index a29bb76f..5166896a 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -1,31 +1,15 @@ -.. PyGreSQL documentation master file, created by - sphinx-quickstart on Thu Nov 1 07:47:06 2012. - You can adapt this file completely to your liking, but it should at least - contain the root `toctree` directive. +.. PyGreSQL index page without toc (for use with cloud theme) -PyGreSQL - PostgreSQL module for Python -======================================= - -Contents: +Welcome to PyGreSQL +=================== .. toctree:: - :maxdepth: 1 - - introduction - copyright - changelog - install - interface - classic - db_api - svn - mailinglist - future - examples + :hidden: -Indices and tables -================== + copyright + announce + download/index + contents/index + community/index -* :ref:`genindex` -* :ref:`modindex` -* :ref:`search` +.. include:: about.txt \ No newline at end of file diff --git a/docs/install.txt b/docs/install.txt deleted file mode 100644 index eb85bf0a..00000000 --- a/docs/install.txt +++ /dev/null @@ -1,184 +0,0 @@ -===================== -PyGreSQL Installation -===================== - -.. sectnum:: -.. contents:: Contents - - -General -======= - -You must first have installed Python and PostgreSQL on your system. -If you want to access remote database only, you don't need to install -the full PostgreSQL server, but only the C interface (libpq). If you -are on Windows, make sure that the directory with libpq.dll is in your -``PATH`` environment variable. - -The current version of PyGreSQL has been tested with Python 2.7 and -PostGreSQL 9.2. Older version should work as well, but you will need -at least Python 2.5 and PostgreSQL 8.3. - -PyGreSQL will be installed as three modules, a dynamic module called -_pg.pyd, and two pure Python wrapper modules called pg.py and pgdb.py. -All three files will be installed directly into the Python site-packages -directory. To uninstall PyGreSQL, simply remove these three files again. - - -Installing from a Binary Distribution -===================================== - -This is the easiest way to install PyGreSQL. - -You can currently download PyGreSQL as Linux RPM, NetBSD package and Windows -installer. Make sure the required Python version of the binary package matches -the Python version you have installed. - -Install the package as usual on your system. - -Note that the documentation is currently only included in the source package. - - -Installing from Source -====================== - -If you want to install PyGreSQL from Source, or there is no binary -package available for your platform, follow these instructions. - -Make sure the Python header files and PostgreSQL client and server header -files are installed. These come usually with the "devel" packages on Unix -systems and the installer executables on Windows systems. - -If you are using a precompiled PostgreSQL, you will also need the pg_config -tool. This is usually also part of the "devel" package on Unix, and will be -installed as part of the database server feature on Windows systems. - -Building and installing with Distutils --------------------------------------- - -You can build and install PyGreSQL using -`Distutils `_. - -Download and unpack the PyGreSQL source tarball if you haven't already done so. - -Type the following commands to build and install PyGreSQL:: - - python setup.py build - python setup.py install - -If you are using `MinGW `_ to build PyGreSQL under -Microsoft Windows, please note that Python newer version 2.3 is using msvcr71 -instead of msvcrt as its common runtime library. You can allow for that by -editing the file ``%MinGWpath%/lib/gcc/%MinGWversion%/specs`` and changing -the entry that reads ``-lmsvcrt`` to ``-lmsvcr71``. You may also need to copy -``libpq.lib`` to ``libpq.a`` in the PostgreSQL ``lib`` directory. Then use -the following command to build and install PyGreSQL:: - - python setup.py build -c mingw32 install - -Now you should be ready to use PyGreSQL. - -Compiling Manually ------------------- - -The source file for compiling the dynamic module is called pgmodule.c. -You have two options. You can compile PyGreSQL as a stand-alone module -or you can build it into the Python interpreter. - -Stand-Alone ------------ - -* In the directory containing ``pgmodule.c``, run the following command:: - - cc -fpic -shared -o _pg.so -I$PYINC -I$PGINC -I$PSINC -L$PGLIB -lpq pgmodule.c - - where you have to set:: - - PYINC = path to the Python include files - (usually something like /usr/include/python) - PGINC = path to the PostgreSQL client include files - (something like /usr/include/pgsql or /usr/include/postgresql) - PSINC = path to the PostgreSQL server include files - (like /usr/include/pgsql/server or /usr/include/postgresql/server) - PGLIB = path to the PostgreSQL object code libraries (usually /usr/lib) - - If you are not sure about the above paths, try something like:: - - PYINC=`find /usr -name Python.h` - PGINC=`find /usr -name libpq-fe.h` - PSINC=`find /usr -name postgres.h` - PGLIB=`find /usr -name libpq.so` - - If you have the ``pg_config`` tool installed, you can set:: - - PGINC=`pg_config --includedir` - PSINC=`pg_config --includedir-server` - PGLIB=`pg_config --libdir` - - Some options may be added to this line:: - - -DNO_DEF_VAR no default variables support - -DNO_DIRECT no direct access methods - -DNO_LARGE no large object support - -DNO_PQSOCKET if running an older PostgreSQL - - On some systems you may need to include ``-lcrypt`` in the list of libraries - to make it compile. - -* Test the new module. Something like the following should work:: - - $ python - - >>> import _pg - >>> db = _pg.connect('thilo','localhost') - >>> db.query("INSERT INTO test VALUES ('ping','pong')") - 18304 - >>> db.query("SELECT * FROM test") - eins|zwei - ----+---- - ping|pong - (1 row) - -* Finally, move the ``_pg.so``, ``pg.py``, and ``pgdb.py`` to a directory in - your ``PYTHONPATH``. A good place would be ``/usr/lib/python/site-packages`` - if your Python modules are in ``/usr/lib/python``. - -Built-in to Python interpreter ------------------------------- - -* Find the directory where your ``Setup`` file lives (usually in the ``Modules`` - subdirectory) in the Python source hierarchy and copy or symlink the - ``pgmodule.c`` file there. - -* Add the following line to your 'Setup' file:: - - _pg pgmodule.c -I$PGINC -I$PSINC -L$PGLIB -lpq - - where:: - - PGINC = path to the PostgreSQL client include files (see above) - PSINC = path to the PostgreSQL server include files (see above) - PGLIB = path to the PostgreSQL object code libraries (see above) - - Some options may be added to this line:: - - -DNO_DEF_VAR no default variables support - -DNO_DIRECT no direct access methods - -DNO_LARGE no large object support - -DNO_PQSOCKET if running an older PostgreSQL (see above) - - On some systems you may need to include ``-lcrypt`` in the list of libraries - to make it compile. - -* If you want a shared module, make sure that the ``shared`` keyword is - uncommented and add the above line below it. You used to need to install - your shared modules with ``make sharedinstall`` but this no longer seems - to be true. - -* Copy ``pg.py`` to the lib directory where the rest of your modules are. - For example, that's ``/usr/local/lib/Python`` on my system. - -* Rebuild Python from the root directory of the Python source hierarchy by - running ``make -f Makefile.pre.in boot`` and ``make && make install``. - -* For more details read the documentation at the top of ``Makefile.pre.in``. diff --git a/docs/interface.rst b/docs/interface.rst deleted file mode 100644 index f72a9259..00000000 --- a/docs/interface.rst +++ /dev/null @@ -1,17 +0,0 @@ -Programming Interface -===================== - -You may either choose to use the `"classic" PyGreSQL interface `_ -provided by the `pg` module or else the -`DB-API 2.0 compliant interface `_ provided by the `pgdb` module. - -`DB-API 2.0 `_ -(Python Database API Specification v2.0) -is a specification for connecting to databases (not only PostGreSQL) -from Python that has been developed by the Python DB-SIG in 1999. - -The authoritative programming information for the DB-API is availabe at - http://www.python.org/dev/peps/pep-0249/ - -A tutorial-like introduction to the DB-API can be found at - http://www2.linuxjournal.com/lj-issues/issue49/2605.html diff --git a/docs/mailinglist.rst b/docs/mailinglist.rst deleted file mode 100644 index 61cf875b..00000000 --- a/docs/mailinglist.rst +++ /dev/null @@ -1,8 +0,0 @@ -Mailing list -============ - -You can join -`the mailing list `_ -to discuss future development of the PyGreSQL interface. -This is usually a low volume list except when there are new features -being added. diff --git a/docs/make.bat b/docs/make.bat index d6fa8189..b8571b60 100644 --- a/docs/make.bat +++ b/docs/make.bat @@ -33,8 +33,11 @@ if "%1" == "help" ( echo. texinfo to make Texinfo files echo. gettext to make PO message catalogs echo. changes to make an overview over all changed/added/deprecated items + echo. xml to make Docutils-native XML files + echo. pseudoxml to make pseudoxml-XML files for display purposes echo. linkcheck to check all external links for integrity echo. doctest to run all doctests embedded in the documentation if enabled + echo. coverage to run coverage check of the documentation if enabled goto end ) @@ -44,6 +47,31 @@ if "%1" == "clean" ( goto end ) + +REM Check if sphinx-build is available and fallback to Python version if any +%SPHINXBUILD% 1>NUL 2>NUL +if errorlevel 9009 goto sphinx_python +goto sphinx_ok + +:sphinx_python + +set SPHINXBUILD=python -m sphinx.__init__ +%SPHINXBUILD% 2> nul +if errorlevel 9009 ( + echo. + echo.The 'sphinx-build' command was not found. Make sure you have Sphinx + echo.installed, then set the SPHINXBUILD environment variable to point + echo.to the full path of the 'sphinx-build' executable. Alternatively you + echo.may add the Sphinx directory to PATH. + echo. + echo.If you don't have Sphinx installed, grab it from + echo.http://sphinx-doc.org/ + exit /b 1 +) + +:sphinx_ok + + if "%1" == "html" ( %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html if errorlevel 1 exit /b 1 @@ -129,6 +157,26 @@ if "%1" == "latex" ( goto end ) +if "%1" == "latexpdf" ( + %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex + cd %BUILDDIR%/latex + make all-pdf + cd %~dp0 + echo. + echo.Build finished; the PDF files are in %BUILDDIR%/latex. + goto end +) + +if "%1" == "latexpdfja" ( + %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex + cd %BUILDDIR%/latex + make all-pdf-ja + cd %~dp0 + echo. + echo.Build finished; the PDF files are in %BUILDDIR%/latex. + goto end +) + if "%1" == "text" ( %SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text if errorlevel 1 exit /b 1 @@ -187,4 +235,29 @@ results in %BUILDDIR%/doctest/output.txt. goto end ) +if "%1" == "coverage" ( + %SPHINXBUILD% -b coverage %ALLSPHINXOPTS% %BUILDDIR%/coverage + if errorlevel 1 exit /b 1 + echo. + echo.Testing of coverage in the sources finished, look at the ^ +results in %BUILDDIR%/coverage/python.txt. + goto end +) + +if "%1" == "xml" ( + %SPHINXBUILD% -b xml %ALLSPHINXOPTS% %BUILDDIR%/xml + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The XML files are in %BUILDDIR%/xml. + goto end +) + +if "%1" == "pseudoxml" ( + %SPHINXBUILD% -b pseudoxml %ALLSPHINXOPTS% %BUILDDIR%/pseudoxml + if errorlevel 1 exit /b 1 + echo. + echo.Build finished. The pseudo-XML files are in %BUILDDIR%/pseudoxml. + goto end +) + :end diff --git a/docs/pg.txt b/docs/pg.txt deleted file mode 100644 index cde55f33..00000000 --- a/docs/pg.txt +++ /dev/null @@ -1,1507 +0,0 @@ -================================ -PyGreSQL Programming Information -================================ - ------------------------------------------- -The classic PyGreSQL interface (pg module) ------------------------------------------- - -.. meta:: - :description: The classic PyGreSQL interface (pg module) - :keywords: PyGreSQL, pg, PostGreSQL, Python - -.. sectnum:: -.. contents:: Contents - - -Introduction -============ -You may either choose to use the -`"classic" PyGreSQL interface `_ -provided by the `pg` module or else the -`DB-API 2.0 compliant interface `_ -provided by the `pgdb` module. - -The following documentation covers only the older `pg` API. - -The `pg` module handles three types of objects, - -- the `pgobject`, which handles the connection - and all the requests to the database, -- the `pglarge` object, which handles - all the accesses to PostgreSQL large objects, -- the `pgqueryobject` that handles query results - -and it provides a convenient wrapper class `DB` for the `pgobject`. - -If you want to see a simple example of the use of some of these functions, -see http://ontario.bikerides.ca where you can find a link at the bottom to the -actual Python code for the page. - - -Module functions and constants -============================== -The `pg` module defines a few functions that allow to connect -to a database and to define "default variables" that override -the environment variables used by PostgreSQL. - -These "default variables" were designed to allow you to handle general -connection parameters without heavy code in your programs. You can prompt the -user for a value, put it in the default variable, and forget it, without -having to modify your environment. The support for default variables can be -disabled by setting the -DNO_DEF_VAR option in the Python setup file. Methods -relative to this are specified by the tag [DV]. - -All variables are set to `None` at module initialization, specifying that -standard environment variables should be used. - -connect - opens a pg connection -------------------------------- -Syntax:: - - connect([dbname], [host], [port], [opt], [tty], [user], [passwd]) - -Parameters: - :dbname: name of connected database (string/None) - :host: name of the server host (string/None) - :port: port used by the database server (integer/-1) - :opt: connection options (string/None) - :tty: debug terminal (string/None) - :user: PostgreSQL user (string/None) - :passwd: password for user (string/None) - -Return type: - :pgobject: If successful, the `pgobject` handling the connection - -Exceptions raised: - :TypeError: bad argument type, or too many arguments - :SyntaxError: duplicate argument definition - :pg.InternalError: some error occurred during pg connection definition - - (plus all exceptions relative to object allocation) - -Description: - This function opens a connection to a specified database on a given - PostgreSQL server. You can use keywords here, as described in the - Python tutorial. The names of the keywords are the name of the - parameters given in the syntax line. For a precise description - of the parameters, please refer to the PostgreSQL user manual. - -Examples:: - - import pg - - con1 = pg.connect('testdb', 'myhost', 5432, None, None, 'bob', None) - con2 = pg.connect(dbname='testdb', host='localhost', user='bob') - -get_defhost, set_defhost - default server host [DV] ---------------------------------------------------- -Syntax:: - - get_defhost() - -Parameters: - None - -Return type: - :string, None: default host specification - -Exceptions raised: - :TypeError: too many arguments - -Description: - This method returns the current default host specification, - or `None` if the environment variables should be used. - Environment variables won't be looked up. - -Syntax:: - - set_defhost(host) - -Parameters: - :host: new default host (string/None) - -Return type: - :string, None: previous default host specification - -Exceptions raised: - :TypeError: bad argument type, or too many arguments - -Description: - This methods sets the default host value for new connections. - If `None` is supplied as parameter, environment variables will - be used in future connections. It returns the previous setting - for default host. - -get_defport, set_defport - default server port [DV] ---------------------------------------------------- -Syntax:: - - get_defport() - -Parameters: - None - -Return type: - :integer, None: default port specification - -Exceptions raised: - :TypeError: too many arguments - -Description: - This method returns the current default port specification, - or `None` if the environment variables should be used. - Environment variables won't be looked up. - -Syntax:: - - set_defport(port) - -Parameters: - :port: new default port (integer/-1) - -Return type: - :integer, None: previous default port specification - -Description: - This methods sets the default port value for new connections. If -1 is - supplied as parameter, environment variables will be used in future - connections. It returns the previous setting for default port. - -get_defopt, set_defopt - default connection options [DV] --------------------------------------------------------- -Syntax:: - - get_defopt() - -Parameters: - None - -Return type: - :string, None: default options specification - -Exceptions raised: - :TypeError: too many arguments - -Description: - This method returns the current default connection options specification, - or `None` if the environment variables should be used. Environment variables - won't be looked up. - -Syntax:: - - set_defopt(options) - -Parameters: - :options: new default connection options (string/None) - -Return type: - :string, None: previous default options specification - -Exceptions raised: - :TypeError: bad argument type, or too many arguments - -Description: - This methods sets the default connection options value for new connections. - If `None` is supplied as parameter, environment variables will be used in - future connections. It returns the previous setting for default options. - -get_deftty, set_deftty - default debug tty [DV] ------------------------------------------------ -Syntax:: - - get_deftty() - -Parameters: - None - -Return type: - :string, None: default debug terminal specification - -Exceptions raised: - :TypeError: too many arguments - -Description: - This method returns the current default debug terminal specification, or - `None` if the environment variables should be used. Environment variables - won't be looked up. - -Syntax:: - - set_deftty(terminal) - -Parameters: - :terminal: new default debug terminal (string/None) - -Return type: - :string, None: previous default debug terminal specification - -Exceptions raised: - :TypeError: bad argument type, or too many arguments - -Description: - This methods sets the default debug terminal value for new connections. If - `None` is supplied as parameter, environment variables will be used in future - connections. It returns the previous setting for default terminal. - -get_defbase, set_defbase - default database name [DV] ------------------------------------------------------ -Syntax:: - - get_defbase() - -Parameters: - None - -Return type: - :string, None: default database name specification - -Exceptions raised: - :TypeError: too many arguments - -Description: - This method returns the current default database name specification, or - `None` if the environment variables should be used. Environment variables - won't be looked up. - -Syntax:: - - set_defbase(base) - -Parameters: - :base: new default base name (string/None) - -Return type: - :string, None: previous default database name specification - -Exceptions raised: - :TypeError: bad argument type, or too many arguments - -Description: - This method sets the default database name value for new connections. If - `None` is supplied as parameter, environment variables will be used in - future connections. It returns the previous setting for default host. - -escape_string - escape a string for use within SQL --------------------------------------------------- -Syntax:: - - escape_string(string) - -Parameters: - :string: the string that is to be escaped - -Return type: - :str: the escaped string - -Exceptions raised: - :TypeError: bad argument type, or too many arguments - -Description: - This function escapes a string for use within an SQL command. - This is useful when inserting data values as literal constants - in SQL commands. Certain characters (such as quotes and backslashes) - must be escaped to prevent them from being interpreted specially - by the SQL parser. `escape_string` performs this operation. - Note that there is also a `pgobject` method with the same name - which takes connection properties into account. - -.. caution:: It is especially important to do proper escaping when - handling strings that were received from an untrustworthy source. - Otherwise there is a security risk: you are vulnerable to "SQL injection" - attacks wherein unwanted SQL commands are fed to your database. - -Example:: - - name = raw_input("Name? ") - phone = con.query("select phone from employees" - " where name='%s'" % escape_string(name)).getresult() - -escape_bytea - escape binary data for use within SQL as type `bytea` --------------------------------------------------------------------- -Syntax:: - - escape_bytea(datastring) - -Parameters: - :datastring: string containing the binary data that is to be escaped - -Return type: - :str: the escaped string - -Exceptions raised: - :TypeError: bad argument type, or too many arguments - -Description: - Escapes binary data for use within an SQL command with the type `bytea`. - As with `escape_string`, this is only used when inserting data directly - into an SQL command string. - Note that there is also a `pgobject` method with the same name - which takes connection properties into account. - -Example:: - - picture = file('garfield.gif', 'rb').read() - con.query("update pictures set img='%s' where name='Garfield'" - % escape_bytea(picture)) - -unescape_bytea -- unescape `bytea` data that has been retrieved as text ------------------------------------------------------------------------ -Syntax:: - - unescape_bytea(string) - -Parameters: - :datastring: the `bytea` data string that has been retrieved as text - -Return type: - :str: string containing the binary data - -Exceptions raised: - :TypeError: bad argument type, or too many arguments - -Description: - Converts an escaped string representation of binary data into binary - data - the reverse of `escape_bytea`. This is needed when retrieving - `bytea` data with the `getresult()` or `dictresult()` method. - -Example:: - - picture = unescape_bytea(con.query( - "select img from pictures where name='Garfield'").getresult[0][0]) - file('garfield.gif', 'wb').write(picture) - -set_decimal -- set a decimal type to be used for numeric values ---------------------------------------------------------------- -Syntax:: - - set_decimal(cls) - -Parameters: - :cls: the Python class to be used for PostgreSQL numeric values - -Description: - This function can be used to specify the Python class that shall be - used by PyGreSQL to hold PostgreSQL numeric values. The default class - is decimal.Decimal if available, otherwise the float type is used. - -set_namedresult -- set a function that will convert to named tuples -------------------------------------------------------------------- -Syntax:: - - set_namedresult(func) - -Parameters: - :func: the function to be used to convert results to named tuples - -Description: - You can use this if you want to create different kinds of named tuples. - - -Module constants ----------------- -Some constants are defined in the module dictionary. -They are intended to be used as parameters for methods calls. -You should refer to the libpq description in the PostgreSQL user manual -for more information about them. These constants are: - -:version, __version__: constants that give the current version. -:INV_READ, INV_WRITE: large objects access modes, - used by `(pgobject.)locreate` and `(pglarge.)open` -:SEEK_SET, SEEK_CUR, SEEK_END: positional flags, - used by `(pglarge.)seek` - - -Connection objects: pgobject -============================ -This object handles a connection to a PostgreSQL database. It embeds and -hides all the parameters that define this connection, thus just leaving really -significant parameters in function calls. - -.. caution:: Some methods give direct access to the connection socket. - *Do not use them unless you really know what you are doing.* - If you prefer disabling them, - set the -DNO_DIRECT option in the Python setup file. - - **These methods are specified by the tag [DA].** - -.. note:: Some other methods give access to large objects - (refer to PostgreSQL user manual for more information about these). - If you want to forbid access to these from the module, - set the -DNO_LARGE option in the Python setup file. - - **These methods are specified by the tag [LO].** - -query - executes a SQL command string -------------------------------------- -Syntax:: - - query(command, [args]) - -Parameters: - :command: SQL command (string) - :args: optional positional arguments - -Return type: - :pgqueryobject, None: result values - -Exceptions raised: - :TypeError: bad argument type, or too many arguments - :TypeError: invalid connection - :ValueError: empty SQL query or lost connection - :pg.ProgrammingError: error in query - :pg.InternalError: error during query processing - -Description: - This method simply sends a SQL query to the database. If the query is an - insert statement that inserted exactly one row into a table that has OIDs, the - return value is the OID of the newly inserted row. If the query is an update - or delete statement, or an insert statement that did not insert exactly one - row in a table with OIDs, then the numer of rows affected is returned as a - string. If it is a statement that returns rows as a result (usually a select - statement, but maybe also an "insert/update ... returning" statement), this - method returns a `pgqueryobject` that can be accessed via the `getresult()`, - `dictresult()` or `namedresult()` methods or simply printed. Otherwise, it - returns `None`. - - The query may optionally contain positional parameters of the form `$1`, - `$2`, etc instead of literal data, and the values supplied as a tuple. - The values are substituted by the database in such a way that they don't - need to be escaped, making this an effective way to pass arbitrary or - unknown data without worrying about SQL injection or syntax errors. - - When the database could not process the query, a `pg.ProgrammingError` or - a `pg.InternalError` is raised. You can check the "SQLSTATE" code of this - error by reading its `sqlstate` attribute. - -Example:: - - name = raw_input("Name? ") - phone = con.query("select phone from employees" - " where name=$1", (name, )).getresult() - -reset - resets the connection ------------------------------ -Syntax:: - - reset() - -Parameters: - None - -Return type: - None - -Exceptions raised: - :TypeError: too many (any) arguments - :TypeError: invalid connection - -Description: - This method resets the current database connection. - -cancel - abandon processing of current SQL command --------------------------------------------------- -Syntax:: - - cancel() - -Parameters: - None - -Return type: - None - -Exceptions raised: - :TypeError: too many (any) arguments - :TypeError: invalid connection - -Description: - This method requests that the server abandon processing - of the current SQL command. - -close - close the database connection -------------------------------------- -Syntax:: - - close() - -Parameters: - None - -Return type: - None - -Exceptions raised: - :TypeError: too many (any) arguments - -Description: - This method closes the database connection. The connection will - be closed in any case when the connection is deleted but this - allows you to explicitly close it. It is mainly here to allow - the DB-SIG API wrapper to implement a close function. - -fileno - returns the socket used to connect to the database ------------------------------------------------------------ -Syntax:: - - fileno() - -Parameters: - None - -Exceptions raised: - :TypeError: too many (any) arguments - :TypeError: invalid connection - -Description: - This method returns the underlying socket id used to connect - to the database. This is useful for use in select calls, etc. - -getnotify - gets the last notify from the server ------------------------------------------------- -Syntax:: - - getnotify() - -Parameters: - None - -Return type: - :tuple, None: last notify from server - -Exceptions raised: - :TypeError: too many parameters - :TypeError: invalid connection - -Description: - This methods try to get a notify from the server (from the SQL statement - NOTIFY). If the server returns no notify, the methods returns None. - Otherwise, it returns a tuple (triplet) `(relname, pid, extra)`, where - `relname` is the name of the notify, `pid` is the process id of the - connection that triggered the notify, and `extra` is a payload string - that has been sent with the notification. Remember to do a listen query - first, otherwise getnotify() will always return `None`. - -inserttable - insert a list into a table ----------------------------------------- -Syntax:: - - inserttable(table, values) - -Parameters: - :table: the table name (string) - :values: list of rows values (list) - -Return type: - None - -Exceptions raised: - :TypeError: invalid connection, bad argument type, or too many arguments - :MemoryError: insert buffer could not be allocated - :ValueError: unsupported values - -Description: - This method allow to *quickly* insert large blocks of data in a table: - It inserts the whole values list into the given table. Internally, it - uses the COPY command of the PostgreSQL database. The list is a list - of tuples/lists that define the values for each inserted row. The rows - values may contain string, integer, long or double (real) values. - -.. caution:: *Be very careful*: - This method doesn't typecheck the fields according to the table definition; - it just look whether or not it knows how to handle such types. - -set_notice_receiver - set a custom notice receiver --------------------------------------------------- -Syntax:: - - set_notice_receiver(proc) - -Parameters: - :proc: the custom notice receiver callback function - -Return type: - None - -Exceptions raised: - :TypeError: the specified notice receiver is not callable - -Description: - This method allows setting a custom notice receiver callback function. - When a notice or warning message is received from the server, - or generated internally by libpq, and the message level is below - the one set with `client_min_messages`, the specified notice receiver - function will be called. This function must take one parameter, - the `pgnotice` object, which provides the following read-only attributes: - - :pgcnx: the connection - :message: the full message with a trailing newline - :severity: the level of the message, e.g. 'NOTICE' or 'WARNING' - :primary: the primary human-readable error message - :detail: an optional secondary error message - :hint: an optional suggestion what to do about the problem - -get_notice_receiver - get the current notice receiver ------------------------------------------------------ -Syntax:: - - get_notice_receiver() - -Parameters: - None - -Return type: - :callable, None: the current notice receiver callable - -Exceptions raised: - :TypeError: too many (any) arguments - -Description: - This method gets the custom notice receiver callback function that has - been set with `set_notice_receiver()`, or `None` if no custom notice - receiver has ever been set on the connection. - -putline - writes a line to the server socket [DA] -------------------------------------------------- -Syntax:: - - putline(line) - -Parameters: - :line: line to be written (string) - -Return type: - None - -Exceptions raised: - :TypeError: invalid connection, bad parameter type, or too many parameters - -Description: - This method allows to directly write a string to the server socket. - -getline - gets a line from server socket [DA] ---------------------------------------------- -Syntax:: - - getline() - -Parameters: - None - -Return type: - :string: the line read - -Exceptions raised: - :TypeError: invalid connection - :TypeError: too many parameters - :MemoryError: buffer overflow - -Description: - This method allows to directly read a string from the server socket. - -endcopy - synchronizes client and server [DA] ---------------------------------------------- -Syntax:: - - endcopy() - -Parameters: - None - -Return type: - None - -Exceptions raised: - :TypeError: invalid connection - :TypeError: too many parameters - -Description: - The use of direct access methods may desynchonize client and server. - This method ensure that client and server will be synchronized. - -locreate - create a large object in the database [LO] ------------------------------------------------------ -Syntax:: - - locreate(mode) - -Parameters: - :mode: large object create mode - -Return type: - :pglarge: object handling the PostGreSQL large object - -Exceptions raised: - - :TypeError: invalid connection, bad parameter type, or too many parameters - :pg.OperationalError: creation error - -Description: - This method creates a large object in the database. The mode can be defined - by OR-ing the constants defined in the pg module (INV_READ, INV_WRITE and - INV_ARCHIVE). Please refer to PostgreSQL user manual for a description of - the mode values. - -getlo - build a large object from given oid [LO] ------------------------------------------------- -Syntax:: - - getlo(oid) - -Parameters: - :oid: OID of the existing large object (integer) - -Return type: - :pglarge: object handling the PostGreSQL large object - -Exceptions raised: - :TypeError: invalid connection, bad parameter type, or too many parameters - :ValueError: bad OID value (0 is invalid_oid) - -Description: - This method allows to reuse a formerly created large object through the - `pglarge` interface, providing the user have its OID. - -loimport - import a file to a large object [LO] ------------------------------------------------ -Syntax:: - - loimport(name) - -Parameters: - :name: the name of the file to be imported (string) - -Return type: - :pglarge: object handling the PostGreSQL large object - -Exceptions raised: - :TypeError: invalid connection, bad argument type, or too many arguments - :pg.OperationalError: error during file import - -Description: - This methods allows to create large objects in a very simple way. You just - give the name of a file containing the data to be use. - -Object attributes ------------------ -Every `pgobject` defines a set of read-only attributes that describe the -connection and its status. These attributes are: - - :host: the host name of the server (string) - :port: the port of the server (integer) - :db: the selected database (string) - :options: the connection options (string) - :tty: the connection debug terminal (string) - :user: user name on the database system (string) - :protocol_version: the frontend/backend protocol being used (integer) - :server_version: the backend version (integer, e.g. 80305 for 8.3.5) - :status: the status of the connection (integer: 1 - OK, 0 - bad) - :error: the last warning/error message from the server (string) - - -The DB wrapper class -==================== -The `pgobject` methods are wrapped in the class `DB`. -The preferred way to use this module is as follows:: - - import pg - - db = pg.DB(...) # see below - - for r in db.query( # just for example - """SELECT foo,bar - FROM foo_bar_table - WHERE foo !~ bar""" - ).dictresult(): - - print '%(foo)s %(bar)s' % r - -This class can be subclassed as in this example:: - - import pg - - class DB_ride(pg.DB): - """This class encapsulates the database functions and the specific - methods for the ride database.""" - - def __init__(self): - """Opens a database connection to the rides database""" - - pg.DB.__init__(self, dbname = 'ride') - self.query("""SET DATESTYLE TO 'ISO'""") - - [Add or override methods here] - -The following describes the methods and variables of this class. - -Initialization --------------- -The DB class is initialized with the same arguments as the connect -function described in section 2. It also initializes a few -internal variables. The statement `db = DB()` will open the -local database with the name of the user just like connect() does. - -You can also initialize the DB class with an existing `_pg` or `pgdb` -connection. Pass this connection as a single unnamed parameter, or as a -single parameter named `db`. This allows you to use all of the methods -of the DB class with a DB-API 2 compliant connection. Note that the -`close()` and `reopen()` methods are inoperative in this case. - - - -pkey - return the primary key of a table ----------------------------------------- -Syntax:: - - pkey(table) - -Parameters: - :table: name of table - -Return type: - :string: Name of the field which is the primary key of the table - -Description: - This method returns the primary key of a table. For composite primary - keys, the return value will be a frozenset. Note that this raises an - exception if the table does not have a primary key. - -get_databases - get list of databases in the system ---------------------------------------------------- -Syntax:: - - get_databases() - -Parameters: - None - -Return type: - :list: all databases in the system - -Description: - Although you can do this with a simple select, it is added here for - convenience. - -get_relations - get list of relations in connected database ------------------------------------------------------------ -Syntax:: - - get_relations(kinds) - -Parameters: - :kinds: a string or sequence of type letters - -Description: - The type letters are `r` = ordinary table, `i` = index, `S` = sequence, - `v` = view, `c` = composite type, `s` = special, `t` = TOAST table. - If `kinds` is None or an empty string, all relations are returned (this is - also the default). Although you can do this with a simple select, it is - added here for convenience. - -get_tables - get list of tables in connected database ------------------------------------------------------ -Syntax:: - - get_tables() - -Parameters: - None - -Returns: - :list: all tables in connected database - -Description: - Although you can do this with a simple select, it is added here for - convenience. - -get_attnames - get the attribute names of a table -------------------------------------------------- -Syntax:: - - get_attnames(table) - -Parameters: - :table: name of table - -Returns: - :dictionary: The keys are the attribute names, - the values are the type names of the attributes. - -Description: - Given the name of a table, digs out the set of attribute names. - -has_table_privilege - check whether current user has specified table privilege ------------------------------------------------------------------------------- -Syntax:: - - has_table_privilege(table, privilege) - -Parameters: - :table: name of table - :privilege: privilege to be checked - default is 'select' - -Description: - Returns True if the current user has the specified privilege for the table. - -get - get a row from a database table or view ---------------------------------------------- -Syntax:: - - get(table, arg, [keyname]) - -Parameters: - :table: name of table or view - :arg: either a dictionary or the value to be looked up - :keyname: name of field to use as key (optional) - -Return type: - :dictionary: The keys are the attribute names, - the values are the row values. - -Description: - This method is the basic mechanism to get a single row. It assumes - that the key specifies a unique row. If `keyname` is not specified - then the primary key for the table is used. If `arg` is a dictionary - then the value for the key is taken from it and it is modified to - include the new values, replacing existing values where necessary. - For a composite key, `keyname` can also be a sequence of key names. - The OID is also put into the dictionary if the table has one, but in - order to allow the caller to work with multiple tables, it is munged - as `oid(schema.table)`. - -insert - insert a row into a database table -------------------------------------------- -Syntax:: - - insert(table, [d,] [key = val, ...]) - -Parameters: - :table: name of table - :d: optional dictionary of values - -Return type: - :dictionary: The dictionary of values inserted - -Description: - This method inserts a row into a table. If the optional dictionary is - not supplied then the required values must be included as keyword/value - pairs. If a dictionary is supplied then any keywords provided will be - added to or replace the entry in the dictionary. - - The dictionary is then, if possible, reloaded with the values actually - inserted in order to pick up values modified by rules, triggers, etc. - - Note: The method currently doesn't support insert into views - although PostgreSQL does. - -update - update a row in a database table ------------------------------------------ -Syntax:: - - update(table, [d,] [key = val, ...]) - -Parameters: - :table: name of table - :d: optional dictionary of values - -Return type: - :dictionary: the new row - -Description: - Similar to insert but updates an existing row. The update is based on the - OID value as munged by get or passed as keyword, or on the primary key of - the table. The dictionary is modified, if possible, to reflect any changes - caused by the update due to triggers, rules, default values, etc. - - Like insert, the dictionary is optional and updates will be performed - on the fields in the keywords. There must be an OID or primary key - either in the dictionary where the OID must be munged, or in the keywords - where it can be simply the string "oid". - -query - executes a SQL command string -------------------------------------- -Syntax:: - - query(command, [arg1, [arg2, ...]]) - -Parameters: - :command: SQL command (string) - :arg*: optional positional arguments - -Return type: - :pgqueryobject, None: result values - -Exceptions raised: - :TypeError: bad argument type, or too many arguments - :TypeError: invalid connection - :ValueError: empty SQL query or lost connection - :pg.ProgrammingError: error in query - :pg.InternalError: error during query processing - -Description: - Similar to the pgobject function with the same name, except that positional - arguments can be passed either as a single list or tuple, or as individual - positional arguments - -Example:: - - name = raw_input("Name? ") - phone = raw_input("Phone? " - rows = db.query("update employees set phone=$2" - " where name=$1", (name, phone)).getresult()[0][0] - # or - rows = db.query("update employees set phone=$2" - " where name=$1", name, phone).getresult()[0][0] - -clear - clears row values in memory ------------------------------------ -Syntax:: - - clear(table, [a]) - -Parameters: - :table: name of table - :a: optional dictionary of values - -Return type: - :dictionary: an empty row - -Description: - This method clears all the attributes to values determined by the types. - Numeric types are set to 0, Booleans are set to 'f', dates are set - to 'now()' and everything else is set to the empty string. - If the array argument is present, it is used as the array and any entries - matching attribute names are cleared with everything else left unchanged. - - If the dictionary is not supplied a new one is created. - -delete - delete a row from a database table -------------------------------------------- -Syntax:: - - delete(table, [d,] [key = val, ...]) - -Parameters: - :table: name of table - :d: optional dictionary of values - -Returns: - None - -Description: - This method deletes the row from a table. It deletes based on the OID value - as munged by get or passed as keyword, or on the primary key of the table. - The return value is the number of deleted rows (i.e. 0 if the row did not - exist and 1 if the row was deleted). - -escape_string - escape a string for use within SQL --------------------------------------------------- -Syntax:: - - escape_string(string) - -Parameters: - :string: the string that is to be escaped - -Return type: - :str: the escaped string - -Description: - Similar to the module function with the same name, but the - behavior of this method is adjusted depending on the connection properties - (such as character encoding). - -escape_bytea - escape binary data for use within SQL as type `bytea` --------------------------------------------------------------------- -Syntax:: - - escape_bytea(datastring) - -Parameters: - :datastring: string containing the binary data that is to be escaped - -Return type: - :str: the escaped string - -Description: - Similar to the module function with the same name, but the - behavior of this method is adjusted depending on the connection properties - (in particular, whether standard-conforming strings are enabled). - -unescape_bytea -- unescape `bytea` data that has been retrieved as text ------------------------------------------------------------------------ -Syntax:: - - unescape_bytea(string) - -Parameters: - :datastring: the `bytea` data string that has been retrieved as text - -Return type: - :str: string containing the binary data - -Description: - See the module function with the same name. - - -pgqueryobject methods -===================== - -getresult - get query values as list of tuples ------------------------------------------------ -Syntax:: - - getresult() - -Parameters: - None - -Return type: - :list: result values as a list of tuples - -Exceptions raised: - :TypeError: too many (any) parameters - :MemoryError: internal memory error - -Description: - This method returns the list of the values returned by the query. - More information about this result may be accessed using listfields(), - fieldname() and fieldnum() methods. - -dictresult - get query values as list of dictionaries ------------------------------------------------------ -Syntax:: - - dictresult() - -Parameters: - None - -Return type: - :list: result values as a list of dictionaries - -Exceptions raised: - :TypeError: too many (any) parameters - :MemoryError: internal memory error - -Description: - This method returns the list of the values returned by the query - with each tuple returned as a dictionary with the field names - used as the dictionary index. - -namedresult - get query values as list of named tuples ------------------------------------------------------- -Syntax:: - - namedresult() - -Parameters: - None - -Return type: - :list: result values as a list of named tuples - -Exceptions raised: - :TypeError: too many (any) parameters - :TypeError: named tuples not supported - :MemoryError: internal memory error - -Description: - This method returns the list of the values returned by the query - with each row returned as a named tuple with proper field names. - -listfields - lists fields names of previous query result --------------------------------------------------------- -Syntax:: - - listfields() - -Parameters: - None - -Return type: - :list: field names - -Exceptions raised: - :TypeError: too many parameters - -Description: - This method returns the list of names of the fields defined for the - query result. The fields are in the same order as the result values. - -fieldname, fieldnum - field name/number conversion --------------------------------------------------- -Syntax:: - - fieldname(i) - -Parameters: - :i: field number (integer) - -Return type: - :string: field name - -Exceptions raised: - :TypeError: invalid connection, bad parameter type, or too many parameters - :ValueError: invalid field number - -Description: - This method allows to find a field name from its rank number. It can be - useful for displaying a result. The fields are in the same order as the - result values. - -Syntax:: - - fieldnum(name) - -Parameters: - :name: field name (string) - -Return type: - :integer: field number - -Exceptions raised: - :TypeError: invalid connection, bad parameter type, or too many parameters - :ValueError: unknown field name - -Description: - This method returns a field number from its name. It can be used to - build a function that converts result list strings to their correct - type, using a hardcoded table definition. The number returned is the - field rank in the result values list. - -ntuples - return number of tuples in query object -------------------------------------------------- -Syntax:: - - ntuples() - -Parameters: - None - -Return type: - :integer: number of tuples in `pgqueryobject` - -Exceptions raised: - :TypeError: Too many arguments. - -Description: - This method returns the number of tuples found in a query. - - -Large objects: pglarge -====================== -This object handles all the request concerning a PostgreSQL large object. It -embeds and hides all the "recurrent" variables (object OID and connection), -exactly in the same way `pgobjects` do, thus only keeping significant -parameters in function calls. It keeps a reference to the `pgobject` used for -its creation, sending requests though with its parameters. Any modification but -dereferencing the `pgobject` will thus affect the `pglarge` object. -Dereferencing the initial `pgobject` is not a problem since Python won't -deallocate it before the `pglarge` object dereference it. -All functions return a generic error message on call error, whatever the -exact error was. The `error` attribute of the object allows to get the exact -error message. - -See also the PostgreSQL programmer's guide for more information about the -large object interface. - -open - opens a large object ---------------------------- -Syntax:: - - open(mode) - -Parameters: - :mode: open mode definition (integer) - -Return type: - None - -Exceptions raised: - :TypeError: invalid connection, bad parameter type, or too many parameters - :IOError: already opened object, or open error - -Description: - This method opens a large object for reading/writing, in the same way than - the Unix open() function. The mode value can be obtained by OR-ing the - constants defined in the pgmodule (INV_READ, INV_WRITE). - -close - closes a large object ------------------------------ -Syntax:: - - close() - -Parameters: - None - -Return type: - None - -Exceptions raised: - :TypeError: invalid connection - :TypeError: too many parameters - :IOError: object is not opened, or close error - -Description: - This method closes a previously opened large object, in the same way than - the Unix close() function. - -read, write, tell, seek, unlink - file like large object handling ------------------------------------------------------------------ -Syntax:: - - read(size) - -Parameters: - :size: maximal size of the buffer to be read - -Return type: - :sized string: the read buffer - -Exceptions raised: - :TypeError: invalid connection, invalid object, - bad parameter type, or too many parameters - :ValueError: if `size` is negative - :IOError: object is not opened, or read error - -Description: - This function allows to read data from a large object, starting at current - position. - -Syntax:: - - write(string) - -Parameters: - (sized) string - buffer to be written - -Return type: - None - -Exceptions raised: - :TypeError: invalid connection, bad parameter type, or too many parameters - :IOError: object is not opened, or write error - -Description: - This function allows to write data to a large object, starting at current - position. - -Syntax:: - - seek(offset, whence) - -Parameters: - :offset: position offset - :whence: positional parameter - -Return type: - :integer: new position in object - -Exceptions raised: - :TypeError: binvalid connection or invalid object, - bad parameter type, or too many parameters - :IOError: object is not opened, or seek error - -Description: - This method allows to move the position cursor in the large object. The - whence parameter can be obtained by OR-ing the constants defined in the - `pg` module (`SEEK_SET`, `SEEK_CUR`, `SEEK_END`). - -Syntax:: - - tell() - -Parameters: - None - -Return type: - :integer: current position in large object - -Exceptions raised: - :TypeError: invalid connection or invalid object - :TypeError: too many parameters - :IOError: object is not opened, or seek error - -Description: - This method allows to get the current position in the large object. - -Syntax:: - - unlink() - -Parameter: - None - -Return type: - None - -Exceptions raised: - :TypeError: invalid connection or invalid object - :TypeError: too many parameters - :IOError: object is not closed, or unlink error - -Description: - This methods unlinks (deletes) the PostgreSQL large object. - -size - gives the large object size ----------------------------------- - -Syntax:: - - size() - -Parameters: - None - -Return type: - :integer: the large object size - -Exceptions raised: - :TypeError: invalid connection or invalid object - :TypeError: too many parameters - :IOError: object is not opened, or seek/tell error - -Description: - This (composite) method allows to get the size of a large object. It was - implemented because this function is very useful for a web interfaced - database. Currently, the large object needs to be opened first. - -export - saves a large object to a file ---------------------------------------- -Syntax:: - - export(name) - -Parameters: - :name: file to be created - -Return type: - None - -Exceptions raised: - :TypeError: invalid connection or invalid object, - bad parameter type, or too many parameters - :IOError: object is not closed, or export error - -Description: - This methods allows to dump the content of a large object in a very simple - way. The exported file is created on the host of the program, not the - server host. - -Object attributes ------------------ -`pglarge` objects define a read-only set of attributes that allow to get -some information about it. These attributes are: - - :oid: the OID associated with the object - :pgcnx: the `pgobject` associated with the object - :error: the last warning/error message of the connection - -.. caution:: *Be careful*: - In multithreaded environments, `error` may be modified by another thread - using the same pgobject. Remember these object are shared, not duplicated. - You should provide some locking to be able if you want to check this. - The `oid` attribute is very interesting because it allow you reuse the OID - later, creating the `pglarge` object with a `pgobject` getlo() method call. diff --git a/docs/pgdb.txt b/docs/pgdb.txt deleted file mode 100644 index b333c016..00000000 --- a/docs/pgdb.txt +++ /dev/null @@ -1,42 +0,0 @@ -================================ -PyGreSQL Programming Information -================================ - --------------------------------------------- -The DB-API compliant interface (pgdb module) --------------------------------------------- - -.. meta:: - :description: The DB-API compliant interface (pgdb module) - :keywords: PyGreSQL, pgdb, DB-API, PostGreSQL, Python - -.. sectnum:: -.. contents:: Contents - - -Introduction -============ -You may either choose to use the -`"classic" PyGreSQL interface `_ -provided by the `pg` module or else the -`DB-API 2.0 compliant interface `_ -provided by the `pgdb` module. - -`DB-API 2.0 `_ -(Python Database API Specification v2.0) -is a specification for connecting to databases (not only PostGreSQL) -from Python that has been developed by the Python DB-SIG in 1999. - -The following documentation covers only the newer `pgdb` API. - -The authoritative programming information for the DB-API is availabe at - http://www.python.org/dev/peps/pep-0249/ - -A tutorial-like introduction to the DB-API can be found at - http://www2.linuxjournal.com/lj-issues/issue49/2605.html - - -The pgdb module -=============== -.. note:: This section of the documentation still needs to be written. - diff --git a/docs/readme.txt b/docs/readme.txt deleted file mode 100644 index 2c3387d7..00000000 --- a/docs/readme.txt +++ /dev/null @@ -1,208 +0,0 @@ -========================================== -PyGreSQL - Python interface for PostgreSQL -========================================== - --------------------- -PyGreSQL version 4.2 --------------------- - -.. meta:: - :description: PyGreSQL - Python interface for PostgreSQL - :keywords: PyGreSQL, PostGreSQL, Python - -.. contents:: Contents - - -Copyright notice -================ - -Written by D'Arcy J.M. Cain (darcy@druid.net) - -Based heavily on code written by Pascal Andre (andre@chimay.via.ecp.fr) - -Copyright (c) 1995, Pascal Andre - -Further modifications copyright (c) 1997-2008 by D'Arcy J.M. Cain -(darcy@PyGreSQL.org) - -Further modifications copyright (c) 2009-2012 by the PyGreSQL team. - -Permission to use, copy, modify, and distribute this software and its -documentation for any purpose, without fee, and without a written agreement -is hereby granted, provided that the above copyright notice and this -paragraph and the following two paragraphs appear in all copies. In -this license the term "AUTHORS" refers to anyone who has contributed code -to PyGreSQL. - -IN NO EVENT SHALL THE AUTHORS BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, -SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, -ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF -AUTHORS HAVE BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -THE AUTHORS SPECIFICALLY DISCLAIM ANY WARRANTIES, INCLUDING, BUT NOT LIMITED -TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS, AND THE -AUTHORS HAVE NO OBLIGATIONS TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, -ENHANCEMENTS, OR MODIFICATIONS. - - -Introduction -============ - -**PostgreSQL** is a highly scalable, SQL compliant, open source -object-relational database management system. With more than 15 years -of development history, it is quickly becoming the de facto database -for enterprise level open source solutions. -Best of all, PostgreSQL's source code is available under the most liberal -open source license: the BSD license. - -**Python** Python is an interpreted, interactive, object-oriented -programming language. It is often compared to Tcl, Perl, Scheme or Java. -Python combines remarkable power with very clear syntax. It has modules, -classes, exceptions, very high level dynamic data types, and dynamic typing. -There are interfaces to many system calls and libraries, as well as to -various windowing systems (X11, Motif, Tk, Mac, MFC). New built-in modules -are easily written in C or C++. Python is also usable as an extension -language for applications that need a programmable interface. -The Python implementation is copyrighted but freely usable and distributable, -even for commercial use. - -**PyGreSQL** is a Python module that interfaces to a PostgreSQL database. -It embeds the PostgreSQL query library to allow easy use of the powerful -PostgreSQL features from a Python script. - -PyGreSQL is developed and tested on a NetBSD system, but it should also -run on most other platforms where PostgreSQL and Python is running. -It is based on the PyGres95 code written by Pascal Andre (andre@chimay.via.ecp.fr). -D'Arcy (darcy@druid.net) renamed it to PyGreSQL starting with -version 2.0 and serves as the "BDFL" of PyGreSQL. - -The current version PyGreSQL 4.2 needs PostgreSQL 8.3 and Python 2.5 or above. - - -Where to get ... ? -================== - -Home sites of the different packages ------------------------------------- -**Python**: - http://www.python.org - -**PostgreSQL**: - http://www.postgresql.org - -**PyGreSQL**: - http://www.pygresql.org - -Download PyGreSQL here ----------------------- -The **released version of the source code** is available at - * http://pygresql.org/files/PyGreSQL.tgz -You can also check the latest **pre-release version** at - * http://pygresql.org/files/PyGreSQL-beta.tgz -A **Linux RPM** can be picked up from - * http://pygresql.org/files/pygresql.i386.rpm -A **NetBSD package** is available in their pkgsrc collection - * ftp://ftp.netbsd.org/pub/NetBSD/packages/pkgsrc/databases/py-postgresql/README.html -A **FreeBSD package** is available in their ports collection - * http://www.freebsd.org/cgi/cvsweb.cgi/ports/databases/py-PyGreSQL/ -A **Win32 package** for various Python versions is available at - * http://pygresql.org/files/PyGreSQL-4.2.win32-py2.5.exe - * http://pygresql.org/files/PyGreSQL-4.2.win32-py2.6.exe - * http://pygresql.org/files/PyGreSQL-4.2.win32-py2.7.exe -You can also find PyGreSQL on the **Python Package Index** at - * http://pypi.python.org/pypi/PyGreSQL/ - - -Distribution files -================== - -========== = -pgmodule.c the C Python module (_pg) -pg.py the "classic" PyGreSQL module -pgdb.py DB-SIG DB-API 2.0 compliant API wrapper for PygreSQL -docs/ documentation directory - - Contains: readme.txt, announce.txt, install.txt, - changelog.txt, future.txt, pg.txt and pgdb.txt. - - All text files are in ReST format, so HTML versions - can be easily created with buildhtml.py from docutils. -tutorial/ demos directory - - Contains: basics.py, syscat.py, advanced.py and func.py. - - The samples here have been taken from the - PostgreSQL manual and were used for module testing. - They demonstrate some PostgreSQL features. -========== = - - -Installation -============ -You will find the installing instructions in -`install.txt `_. - - -Information and support -======================= - -For general information ------------------------ -**Python**: - http://www.python.org - -**PostgreSQL**: - http://www.postgresql.org - -**PyGreSQL**: - http://www.pygresql.org - -For support ------------ -**Python**: - see http://www.python.org/community/ - -**PostgreSQL**: - see http://www.postgresql.org/support/ - -**PyGreSQL**: - Contact the PyGreSQL mailing list - concerning PyGreSQL 2.0 and up. - - If you would like to proposes changes, please join the - PyGreSQL mailing list and send context diffs there. - - See https://mail.vex.net/mailman/listinfo.cgi/pygresql - to join the mailing list. - -Please note that messages to individual developers will generally not be -answered directly. All questions, comments and code changes must be -submitted to the mailing list for peer review and archiving purposes. - -PyGreSQL programming information --------------------------------- -You may either choose to use the "classic" PyGreSQL interface -provided by the `pg` module or else the newer DB-API 2.0 -compliant interface provided by the `pgdb` module. - -`DB-API 2.0 `_ -(Python Database API Specification v2.0) -is a specification for connecting to databases (not only PostGreSQL) -from Python that has been developed by the Python DB-SIG in 1999. - -The programming information is available in the files -`pg.txt `_ and `pgdb.txt `_. - -Note that PyGreSQL is not thread-safe on the connection level. Therefore -we recommend using `DBUtils ` -for multi-threaded environments, which supports both PyGreSQL interfaces. - - -ChangeLog and Future -==================== -The ChangeLog with past changes is in the file -`changelog.txt `_. - -A to do list and wish list is in the file -`future.txt `_. diff --git a/docs/requirements.txt b/docs/requirements.txt new file mode 100644 index 00000000..c354e8d9 --- /dev/null +++ b/docs/requirements.txt @@ -0,0 +1 @@ +cloud_sptheme>=1.7.1 \ No newline at end of file diff --git a/docs/start.txt b/docs/start.txt new file mode 100644 index 00000000..5166896a --- /dev/null +++ b/docs/start.txt @@ -0,0 +1,15 @@ +.. PyGreSQL index page without toc (for use with cloud theme) + +Welcome to PyGreSQL +=================== + +.. toctree:: + :hidden: + + copyright + announce + download/index + contents/index + community/index + +.. include:: about.txt \ No newline at end of file diff --git a/docs/svn.rst b/docs/svn.rst deleted file mode 100644 index 99dca0a7..00000000 --- a/docs/svn.rst +++ /dev/null @@ -1,6 +0,0 @@ -SVN Access -========== - -The SVN repository can be checked out from svn://svn.PyGreSQL.org/pygresql. -It is also available through the -`online SVN repository `_. diff --git a/docs/toc.txt b/docs/toc.txt new file mode 100644 index 00000000..441021b4 --- /dev/null +++ b/docs/toc.txt @@ -0,0 +1,14 @@ +.. PyGreSQL index page with toc (for use without cloud theme) + +Welcome to PyGreSQL +=================== + +.. toctree:: + :maxdepth: 2 + + about + copyright + announce + download/index + contents/index + community/index \ No newline at end of file diff --git a/mkdocs b/mkdocs index 4dc59cbf..d7cf5146 100755 --- a/mkdocs +++ b/mkdocs @@ -1,6 +1,17 @@ #! /bin/sh -echo "Making HTML docs..." +MAKE=make +which gmake && MAKE=gmake -python ./buildhtml.py --stylesheet docs.css --link-stylesheet --prune=docs/CVS docs +# small safety test +if [ ! -f pgmodule.c ] +then + echo "Hmmm. Are you sure you are in the right directory?" + exit 1 +fi +echo "Making Sphinx docs..." + +cd docs +${MAKE} clean +${MAKE} html diff --git a/mktar b/mktar index 25211c87..6698fe71 100755 --- a/mktar +++ b/mktar @@ -1,48 +1,92 @@ #! /bin/sh -VERSION=4.2 +VERSION=4.2.2 +DISTDIR=/u/WEB/pyg/files -# small safety test -if [ ! -f module/pgmodule.c ] +# some safety tests +if [ ! -d $DISTDIR ] +then + echo "Hmmm. Are you sure you are on the right server?" + exit 1 +fi +if [ ! -f setup.py -o ! -f pgmodule.c -o ! -d tests -o ! -d docs ] then echo "Hmmm. Are you sure you are in the right directory?" exit 1 fi +FILES="*.c *.h *.py *.cfg *.rst *.txt" +NUMFILES=`ls $FILES | wc -l` +if [ $NUMFILES != 9 ] +then + echo "Hmmm. The number of top-level files seems to be wrong:" + ls $FILES + echo "Maybe you should do a clean checkout first." + echo "If something has changed, edit MANIFEST.in and mktar." + exit 1 +fi +FILES="mktar mkdocs docs tests pg.py pgdb.py pgmodule.c setup.cfg" +PERMS=`stat --printf="%a" $FILES` +if [ $? -eq 0 -a "$PERMS" != '755755755755644644644644' ] +then + echo "Hmmm. File permissions are not set properly." + echo "Use a filesystem with permissions and do a clean checkout first." + exit 1 +fi if [ -f BETA ] then VERSION=$VERSION-pre`date +"%y%m%d"` PACKAGE=pygresql.pkg-beta - SYMLINK=PyGreSQL-beta.tgz + SYMLINK=PyGreSQL-beta.tar.gz else PACKAGE=pygresql.pkg - SYMLINK=PyGreSQL.tgz + SYMLINK=PyGreSQL.tar.gz fi -DISTDIR=/u/pyg/files -TD=PyGreSQL-$VERSION -TF=$DISTDIR/$TD.tgz - -MODFILES="module/pg.py module/pgdb.py module/pgmodule.c module/pgfs.h module/pgtypes.h module/setup.py" -DOCFILES="docs/*.txt docs/*.html docs/*.css" -TUTFILES="tutorial/*.py" +# Package up as a source tarball in the distribution directory. echo "Making source tarball..." +echo + +umask 0022 + +# Make sure that the documentation has been built. + +if ! ./mkdocs +then + echo "Hmmm. The documentation could not be built." + exit 1 +fi + +# Package as source distribution. -./mkdocs +rm -rf build dist + +if ! python2 setup.py sdist +then + echo "Hmmm. The source distribution could not be created." + exit 1 +fi + +DF=`ls dist` +if [ $? -ne 0 -o -z "$DF" ] +then + echo "Hmmm. The source distribution could not be found." + exit 1 +fi + +TF=$DISTDIR/$DF + +if ! cp dist/$DF $TF +then + echo "Hmmm. The source distribution could not be copied." + exit 1 +fi -rm -rf $TD -mkdir $TD -mkdir $TD/docs -mkdir $TD/tutorial -cp $MODFILES $TD -cp $DOCFILES $TD/docs -cp $TUTFILES $TD/tutorial -tar -cvzf $TF $TD chmod 644 $TF -rm -rf $TD -rm -f $DISTDIR/$SYMLINK -ln -s $TD.tgz $DISTDIR/$SYMLINK -echo "$TF has been built" +rm -f $DISTDIR/$SYMLINK +ln -s $DF $DISTDIR/$SYMLINK +echo +echo "$TF has been built." diff --git a/module/GNUmakefile b/module/GNUmakefile deleted file mode 100644 index f3a93fea..00000000 --- a/module/GNUmakefile +++ /dev/null @@ -1,58 +0,0 @@ -# $Header: /usr/cvs/Public/pygresql/module/GNUmakefile,v 1.19 2005-01-11 12:13:38 darcy Exp $ -# $Id$ - -subdir = src/interfaces/python -top_builddir = ../../.. -include $(top_builddir)/src/Makefile.global - -NAME = _pgmodule -OBJS = pgmodule.o -SHLIB_LINK = $(libpq) -ifeq ($(PORTNAME), cygwin) -override CPPFLAGS += -DUSE_DL_IMPORT -SHLIB_LINK += $(python_libspec) -endif - - -include $(top_srcdir)/src/Makefile.shlib - -override CPPFLAGS := -I$(libpq_srcdir) $(CPPFLAGS) $(python_includespec) - -all: all-lib - -all-lib: libpq-all - -.PHONY: libpq-all -libpq-all: - $(MAKE) -C $(libpq_builddir) all - -install-warning-msg := { \ -echo "*** Skipping the installation of the Python interface module for lack"; \ -echo "*** of permissions. To install it, change to the directory"; \ -echo "*** `pwd`,"; \ -echo "*** become the appropriate user, and do '$(MAKE) install'."; } - -install: all installdirs - @if test -w $(DESTDIR)$(python_moduleexecdir) && test -w $(DESTDIR)$(python_moduledir); then \ - echo "$(INSTALL_SHLIB) $(shlib) $(DESTDIR)$(python_moduleexecdir)/_pgmodule$(DLSUFFIX)"; \ - $(INSTALL_SHLIB) $(shlib) $(DESTDIR)$(python_moduleexecdir)/_pgmodule$(DLSUFFIX); \ - \ - echo "$(INSTALL_DATA) $(srcdir)/pg.py $(DESTDIR)$(python_moduledir)/pg.py"; \ - $(INSTALL_DATA) $(srcdir)/pg.py $(DESTDIR)$(python_moduledir)/pg.py; \ - \ - echo "$(INSTALL_DATA) $(srcdir)/pgdb.py $(DESTDIR)$(python_moduledir)/pgdb.py"; \ - $(INSTALL_DATA) $(srcdir)/pgdb.py $(DESTDIR)$(python_moduledir)/pgdb.py; \ - else \ - $(install-warning-msg); \ - fi - -installdirs: - $(mkinstalldirs) $(DESTDIR)$(python_moduleexecdir) $(DESTDIR)$(python_moduledir) - -uninstall: - rm -f $(DESTDIR)$(python_moduleexecdir)/_pgmodule$(DLSUFFIX) \ - $(DESTDIR)$(python_moduledir)/pg.py \ - $(DESTDIR)$(python_moduledir)/pgdb.py - -clean distclean maintainer-clean: clean-lib - rm -f $(OBJS) diff --git a/module/PyGreSQL.spec b/module/PyGreSQL.spec deleted file mode 100644 index 380461a7..00000000 --- a/module/PyGreSQL.spec +++ /dev/null @@ -1,58 +0,0 @@ -# $Id$ -%define version 3.0 -%define release pre20000310 -%define name PyGreSQL -%define pythonversion 1.5 -Source: %{name}-%{version}-%{release}.tgz -Summary: A Python interface for PostgreSQL database. -Name: %{name} -Version: %{version} -Release: %{release} -#Patch: -Group: Applications/Databases -BuildRoot: /tmp/rpmbuild_%{name} -Copyright: GPL-like -Requires: python >= %{pythonversion}, postgresql -Packager: Hartmut Goebel -Vendor: D'Arcy J.M. Cain -URL: http://www.druid.net/pygresql/ - -%changelog -#* Tue Oct 06 1998 Fabio Coatti -#- fixed installation directory files list - -%description -PyGreSQL is a python module that interfaces to a PostgreSQL database. It -embeds the PostgreSQL query library to allow easy use of the powerful -PostgreSQL features from a Python script. - -Version 3.0 includes DB-API 2.0 support. - -%prep -rm -rf $RPM_BUILD_ROOT - -%setup -n %{name}-%{version}-%{release} -#%patch - -%build -mkdir -p $RPM_BUILD_ROOT/usr/lib/python%{pythonversion}/lib-dynload -cc -fpic -shared -o $RPM_BUILD_ROOT/usr/lib/python%{pythonversion}/lib-dynload/_pg.so -I/usr/include/pgsql/ -I/usr/include/python1.5 pgmodule.c -lpq -## import fails, since _pg is not yet installed -python -c 'import pg' || true -python -c 'import pgdb' || true - -%install -cp *.py *.pyc $RPM_BUILD_ROOT/usr/lib/python%{pythonversion}/ - -cd $RPM_BUILD_ROOT -find . -type f | sed 's,^\.,\%attr(-\,root\,root) ,' > $RPM_BUILD_DIR/file.list.%{name} -find . -type l | sed 's,^\.,\%attr(-\,root\,root) ,' >> $RPM_BUILD_DIR/file.list.%{name} - -%files -f ../file.list.%{name} -%doc %attr(-,root,root) Announce ChangeLog README tutorial - - -%clean -rm -rf $RPM_BUILD_ROOT -cd $RPM_BUILD_DIR -rm -rf %{name}-%{version}-%{release} file.list.%{name} diff --git a/module/pg.py b/pg.py similarity index 61% rename from module/pg.py rename to pg.py index 1c579c48..fa968fb8 100644 --- a/module/pg.py +++ b/pg.py @@ -15,7 +15,7 @@ """ -# Copyright (c) 1997-2013 by D'Arcy J.M. Cain. +# Copyright (c) 1997-2016 by D'Arcy J.M. Cain. # # Contributions made by Ch. Zwerschke and others. # @@ -34,12 +34,12 @@ import warnings try: frozenset -except NameError: # Python < 2.4 +except NameError: # Python < 2.4, unsupported from sets import ImmutableSet as frozenset try: from decimal import Decimal set_decimal(Decimal) -except ImportError: # Python < 2.4 +except ImportError: # Python < 2.4, unsupported Decimal = float try: from collections import namedtuple @@ -47,7 +47,7 @@ namedtuple = None -# Auxiliary functions which are independent from a DB connection: +# Auxiliary functions that are independent from a DB connection: def _is_quoted(s): """Check whether this string is a quoted identifier.""" @@ -140,26 +140,33 @@ def _prg_error(msg): return _db_error(msg, ProgrammingError) +# The notification handler + class NotificationHandler(object): """A PostgreSQL client-side asynchronous notification handler.""" - def __init__(self, db, event, callback, arg_dict=None, timeout=None): + def __init__(self, db, event, callback=None, + arg_dict=None, timeout=None, stop_event=None): """Initialize the notification handler. - db - PostgreSQL connection object. - event - Event (notification channel) to LISTEN for. - callback - Event callback function. - arg_dict - A dictionary passed as the argument to the callback. - timeout - Timeout in seconds; a floating point number denotes - fractions of seconds. If it is absent or None, the - callers will never time out. + You must pass a PyGreSQL database connection, the name of an + event (notification channel) to listen for and a callback function. + + You can also specify a dictionary arg_dict that will be passed as + the single argument to the callback function, and a timeout value + in seconds (a floating point number denotes fractions of seconds). + If it is absent or None, the callers will never time out. If the + timeout is reached, the callback function will be called with a + single argument that is None. If you set the timeout to zero, + the handler will poll notifications synchronously and return. + You can specify the name of the event that will be used to signal + the handler to stop listening as stop_event. By default, it will + be the event name prefixed with 'stop_'. """ - if isinstance(db, DB): - db = db.db self.db = db self.event = event - self.stop_event = 'stop_%s' % event + self.stop_event = stop_event or 'stop_%s' % event self.listening = False self.callback = callback if arg_dict is None: @@ -168,7 +175,7 @@ def __init__(self, db, event, callback, arg_dict=None, timeout=None): self.timeout = timeout def __del__(self): - self.close() + self.unlisten() def close(self): """Stop listening and close the connection.""" @@ -194,61 +201,66 @@ def unlisten(self): def notify(self, db=None, stop=False, payload=None): """Generate a notification. - Note: If the main loop is running in another thread, you must pass - a different database connection to avoid a collision. + Optionally, you can pass a payload with the notification. + If you set the stop flag, a stop notification will be sent that + will cause the handler to stop listening. + + Note: If the notification handler is running in another thread, you + must pass a different database connection since PyGreSQL database + connections are not thread-safe. """ - if not db: - db = self.db if self.listening: + if not db: + db = self.db q = 'notify "%s"' % (stop and self.stop_event or self.event) if payload: q += ", '%s'" % payload return db.query(q) - def __call__(self, close=False): + def __call__(self): """Invoke the notification handler. - The handler is a loop that actually LISTENs for two NOTIFY messages: - - and stop_. + The handler is a loop that listens for notifications on the event + and stop event channels. When either of these notifications are + received, its associated 'pid', 'event' and 'extra' (the payload + passed with the notification) are inserted into its arg_dict + dictionary and the callback is invoked with this dictionary as + a single argument. When the handler receives a stop event, it + stops listening to both events and return. - When either of these NOTIFY messages are received, its associated - 'pid' and 'event' are inserted into , and the callback is - invoked with . If the NOTIFY message is stop_, the - handler UNLISTENs both and stop_ and exits. + In the special case that the timeout of the handler has been set + to zero, the handler will poll all events synchronously and return. + If will keep listening until it receives a stop event. Note: If you run this loop in another thread, don't use the same database connection for database operations in the main thread. - """ self.listen() - _ilist = [self.db.fileno()] - - while True: - ilist, _olist, _elist = select.select(_ilist, [], [], self.timeout) - if ilist == []: # we timed out - self.unlisten() - self.callback(None) - break - else: - notice = self.db.getnotify() - if notice is None: - continue - event, pid, extra = notice - if event in (self.event, self.stop_event): - self.arg_dict['pid'] = pid - self.arg_dict['event'] = event - self.arg_dict['extra'] = extra - self.callback(self.arg_dict) + poll = self.timeout == 0 + if not poll: + rlist = [self.db.fileno()] + while self.listening: + if poll or select.select(rlist, [], [], self.timeout)[0]: + while self.listening: + notice = self.db.getnotify() + if not notice: # no more messages + break + event, pid, extra = notice + if event not in (self.event, self.stop_event): + self.unlisten() + raise _db_error( + 'Listening for "%s" and "%s", but notified of "%s"' + % (self.event, self.stop_event, event)) if event == self.stop_event: self.unlisten() - break - else: - self.unlisten() - raise _db_error( - 'listening for "%s" and "%s", but notified of "%s"' - % (self.event, self.stop_event, event)) + self.arg_dict.update(pid=pid, event=event, extra=extra) + self.callback(self.arg_dict) + if poll: + break + else: # we timed out + self.unlisten() + self.callback(None) def pgnotify(*args, **kw): @@ -304,7 +316,7 @@ def __init__(self, *args, **kw): # * to any other true value to just print debug statements def __getattr__(self, name): - # All undefined members are same as in underlying pg connection: + # All undefined members are same as in underlying connection: if self.db: return getattr(self.db, name) else: @@ -332,12 +344,19 @@ def _do_debug(self, s): if isinstance(self.debug, basestring): print(self.debug % s) elif isinstance(self.debug, file): - file.write(s + '\n') + self.debug.write(s + '\n') elif callable(self.debug): self.debug(s) else: print(s) + def _make_bool(d): + """Get boolean value corresponding to d.""" + if get_bool(): + return bool(d) + return d and 't' or 'f' + _make_bool = staticmethod(_make_bool) + def _quote_text(self, d): """Quote text value.""" if not isinstance(d, basestring): @@ -352,9 +371,7 @@ def _quote_bool(self, d): if not d: return 'NULL' d = d.lower() in self._bool_true - else: - d = bool(d) - return ("'f'", "'t'")[d] + return d and "'t'" or "'f'" _date_literals = frozenset('current_date current_time' ' current_timestamp localtime localtimestamp'.split()) @@ -406,7 +423,7 @@ def _split_schema(self, cl): """ s = _split_parts(cl) - if len(s) > 1: # name already qualfied? + if len(s) > 1: # name already qualified? # should be database.schema.table or schema.table if len(s) > 3: raise _prg_error('Too many dots in class name %s' % cl) @@ -418,16 +435,16 @@ def _split_schema(self, cl): schemas = self.db.query(q).getresult()[0][0][1:-1].split(',') if schemas: # non-empty path # search schema for this object in the current search path + # (we could also use unnest with ordinality here to spare + # one query, but this is only possible since PostgreSQL 9.4) q = ' UNION '.join( ["SELECT %d::integer AS n, '%s'::name AS nspname" % s for s in enumerate(schemas)]) - q = ("SELECT nspname FROM pg_class" - " JOIN pg_namespace" - " ON pg_class.relnamespace = pg_namespace.oid" + q = ("SELECT nspname FROM pg_class r" + " JOIN pg_namespace s ON r.relnamespace = s.oid" " JOIN (%s) AS p USING (nspname)" - " WHERE pg_class.relname = '%s'" - " ORDER BY n LIMIT 1" % (q, cl)) - schema = self.db.query(q).getresult() + " WHERE r.relname = $1 ORDER BY n LIMIT 1" % q) + schema = self.db.query(q, (cl,)).getresult() if schema: # schema found schema = schema[0][0] else: # object not found in current search path @@ -498,40 +515,175 @@ def commit(self): end = commit def rollback(self, name=None): - """Rollback the current transaction.""" + """Roll back the current transaction.""" qstr = 'ROLLBACK' if name: qstr += ' TO ' + name return self.query(qstr) - def savepoint(self, name=None): + abort = rollback + + def savepoint(self, name): """Define a new savepoint within the current transaction.""" - qstr = 'SAVEPOINT' - if name: - qstr += ' ' + name - return self.query(qstr) + return self.query('SAVEPOINT ' + name) def release(self, name): """Destroy a previously defined savepoint.""" return self.query('RELEASE ' + name) + def get_parameter(self, parameter): + """Get the value of a run-time parameter. + + If the parameter is a string, the return value will also be a string + that is the current setting of the run-time parameter with that name. + + You can get several parameters at once by passing a list, set or dict. + When passing a list of parameter names, the return value will be a + corresponding list of parameter settings. When passing a set of + parameter names, a new dict will be returned, mapping these parameter + names to their settings. Finally, if you pass a dict as parameter, + its values will be set to the current parameter settings corresponding + to its keys. + + By passing the special name 'all' as the parameter, you can get a dict + of all existing configuration parameters. + """ + if isinstance(parameter, basestring): + parameter = [parameter] + values = None + elif isinstance(parameter, (list, tuple)): + values = [] + elif isinstance(parameter, (set, frozenset)): + values = {} + elif isinstance(parameter, dict): + values = parameter + else: + raise TypeError( + 'The parameter must be a string, list, set or dict') + if not parameter: + raise TypeError('No parameter has been specified') + if isinstance(values, dict): + params = {} + else: + params = [] + for key in parameter: + if isinstance(key, basestring): + param = key.strip().lower() + else: + param = None + if not param: + raise TypeError('Invalid parameter') + if param == 'all': + q = 'SHOW ALL' + values = self.db.query(q).getresult() + values = dict(value[:2] for value in values) + break + if isinstance(values, dict): + params[param] = key + else: + params.append(param) + else: + for param in params: + q = 'SHOW %s' % (param,) + value = self.db.query(q).getresult()[0][0] + if values is None: + values = value + elif isinstance(values, list): + values.append(value) + else: + values[params[param]] = value + return values + + def set_parameter(self, parameter, value=None, local=False): + """Set the value of a run-time parameter. + + If the parameter and the value are strings, the run-time parameter + will be set to that value. If no value or None is passed as a value, + then the run-time parameter will be restored to its default value. + + You can set several parameters at once by passing a list of parameter + names, together with a single value that all parameters should be + set to or with a corresponding list of values. You can also pass + the parameters as a set if you only provide a single value. + Finally, you can pass a dict with parameter names as keys. In this + case, you should not pass a value, since the values for the parameters + will be taken from the dict. + + By passing the special name 'all' as the parameter, you can reset + all existing settable run-time parameters to their default values. + + If you set local to True, then the command takes effect for only the + current transaction. After commit() or rollback(), the session-level + setting takes effect again. Setting local to True will appear to + have no effect if it is executed outside a transaction, since the + transaction will end immediately. + """ + if isinstance(parameter, basestring): + parameter = {parameter: value} + elif isinstance(parameter, (list, tuple)): + if isinstance(value, (list, tuple)): + parameter = dict(zip(parameter, value)) + else: + parameter = dict.fromkeys(parameter, value) + elif isinstance(parameter, (set, frozenset)): + if isinstance(value, (list, tuple, set, frozenset)): + value = set(value) + if len(value) == 1: + value = value.pop() + if not(value is None or isinstance(value, basestring)): + raise ValueError('A single value must be specified' + ' when parameter is a set') + parameter = dict.fromkeys(parameter, value) + elif isinstance(parameter, dict): + if value is not None: + raise ValueError('A value must not be specified' + ' when parameter is a dictionary') + else: + raise TypeError( + 'The parameter must be a string, list, set or dict') + if not parameter: + raise TypeError('No parameter has been specified') + params = {} + for key, value in parameter.items(): + if isinstance(key, basestring): + param = key.strip().lower() + else: + param = None + if not param: + raise TypeError('Invalid parameter') + if param == 'all': + if value is not None: + raise ValueError('A value must ot be specified' + " when parameter is 'all'") + params = {'all': None} + break + params[param] = value + local = local and ' LOCAL' or '' + for param, value in params.items(): + if value is None: + q = 'RESET%s %s' % (local, param) + else: + q = 'SET%s %s TO %s' % (local, param, value) + self._do_debug(q) + self.db.query(q) + def query(self, qstr, *args): """Executes a SQL command string. - This method simply sends a SQL query to the database. If the query is + This method simply sends a SQL query to the database. If the query is an insert statement that inserted exactly one row into a table that has OIDs, the return value is the OID of the newly inserted row. If the query is an update or delete statement, or an insert statement that did not insert exactly one row in a table with OIDs, then the - numer of rows affected is returned as a string. If it is a statement + number of rows affected is returned as a string. If it is a statement that returns rows as a result (usually a select statement, but maybe also an "insert/update ... returning" statement), this method returns a pgqueryobject that can be accessed via getresult() or dictresult() - or simply printed. Otherwise, it returns `None`. + or simply printed. Otherwise, it returns `None`. The query can contain numbered parameters of the form $1 in place - of any data constant. Arguments given after the query string will - be substituted for the corresponding numbered parameter. Parameter + of any data constant. Arguments given after the query string will + be substituted for the corresponding numbered parameter. Parameter values can also be given as a single list or tuple argument. Note that the query string must not be passed as a unicode value, @@ -549,11 +701,11 @@ def pkey(self, cl, newpkey=None): """This method gets or sets the primary key of a class. Composite primary keys are represented as frozensets. Note that - this raises an exception if the table does not have a primary key. + this raises a KeyError if the table does not have a primary key. If newpkey is set and is not a dictionary then set that value as the primary key of the class. If it is a dictionary - then replace the _pkeys dictionary with a copy of it. + then replace the internal cache of primary keys with a copy of it. """ # First see if the caller is supplying a dictionary @@ -576,21 +728,19 @@ def pkey(self, cl, newpkey=None): self._pkeys = {} if self.server_version >= 80200: # the ANY syntax works correctly only with PostgreSQL >= 8.2 - any_indkey = "= ANY (pg_index.indkey)" + any_indkey = "= ANY (i.indkey)" else: any_indkey = "IN (%s)" % ', '.join( - ['pg_index.indkey[%d]' % i for i in range(16)]) - for r in self.db.query( - "SELECT pg_namespace.nspname, pg_class.relname," - " pg_attribute.attname FROM pg_class" - " JOIN pg_namespace" - " ON pg_namespace.oid = pg_class.relnamespace" - " AND pg_namespace.nspname NOT LIKE 'pg_%'" - " JOIN pg_attribute ON pg_attribute.attrelid = pg_class.oid" - " AND pg_attribute.attisdropped = 'f'" - " JOIN pg_index ON pg_index.indrelid = pg_class.oid" - " AND pg_index.indisprimary = 't'" - " AND pg_attribute.attnum " + any_indkey).getresult(): + ['i.indkey[%d]' % i for i in range(16)]) + q = ("SELECT s.nspname, r.relname, a.attname" + " FROM pg_class r" + " JOIN pg_namespace s ON s.oid = r.relnamespace" + " JOIN pg_attribute a ON a.attrelid = r.oid" + " AND NOT a.attisdropped" + " JOIN pg_index i ON i.indrelid = r.oid" + " AND i.indisprimary AND a.attnum %s" + " AND r.relkind IN ('r', 'v')" % any_indkey) + for r in self.db.query(q).getresult(): cl, pkey = _join_parts(r[:2]), r[2] self._pkeys.setdefault(cl, []).append(pkey) # (only) for composite primary keys, the values will be frozensets @@ -606,27 +756,35 @@ def get_databases(self): return [s[0] for s in self.db.query('SELECT datname FROM pg_database').getresult()] - def get_relations(self, kinds=None): + def get_relations(self, kinds=None, system=False): """Get list of relations in connected database of specified kinds. - If kinds is None or empty, all kinds of relations are returned. - Otherwise kinds can be a string or sequence of type letters - specifying which kind of relations you want to list. + If kinds is None or empty, all kinds of relations are returned. + Otherwise kinds can be a string or sequence of type letters + specifying which kind of relations you want to list. + Set the system flag if you want to get the system relations as well. + """ + where = [] + if kinds: + where.append("r.relkind IN (%s)" % + ','.join(["'%s'" % k for k in kinds])) + if not system: + where.append("s.nspname NOT SIMILAR" + " TO 'pg/_%|information/_schema' ESCAPE '/'") + where = where and " WHERE %s" % ' AND '.join(where) or '' + q = ("SELECT s.nspname, r.relname" + " FROM pg_class r" + " JOIN pg_namespace s ON s.oid = r.relnamespace%s" + " ORDER BY 1, 2") % where + return [_join_parts(r) for r in self.db.query(q).getresult()] + + def get_tables(self, system=False): + """Return list of tables in connected database. + + Set the system flag if you want to get the system tables as well. """ - where = kinds and "pg_class.relkind IN (%s) AND" % ','.join( - ["'%s'" % x for x in kinds]) or '' - return [_join_parts(x) for x in self.db.query( - "SELECT pg_namespace.nspname, pg_class.relname " - "FROM pg_class " - "JOIN pg_namespace ON pg_namespace.oid = pg_class.relnamespace " - "WHERE %s pg_class.relname !~ '^Inv' AND " - "pg_class.relname !~ '^pg_' " - "ORDER BY 1, 2" % where).getresult()] - - def get_tables(self): - """Return list of tables in connected database.""" - return self.get_relations('r') + return self.get_relations('r', system) def get_attnames(self, cl, newattnames=None): """Given the name of a table, digs out the set of attribute names. @@ -650,20 +808,27 @@ def get_attnames(self, cl, newattnames=None): # May as well cache them: if qcl in self._attnames: return self._attnames[qcl] - if qcl not in self.get_relations('rv'): - raise _prg_error('Class %s does not exist' % qcl) - q = "SELECT pg_attribute.attname, pg_type.typname" - if self._regtypes: - q += "::regtype" - q += (" FROM pg_class" - " JOIN pg_namespace ON pg_class.relnamespace = pg_namespace.oid" - " JOIN pg_attribute ON pg_attribute.attrelid = pg_class.oid" - " JOIN pg_type ON pg_type.oid = pg_attribute.atttypid" - " WHERE pg_namespace.nspname = '%s' AND pg_class.relname = '%s'" - " AND (pg_attribute.attnum > 0 OR pg_attribute.attname = 'oid')" - " AND pg_attribute.attisdropped = 'f'") % cl - q = self.db.query(q).getresult() + q = ("SELECT a.attname, t.typname%s" + " FROM pg_class r" + " JOIN pg_namespace s ON r.relnamespace = s.oid" + " JOIN pg_attribute a ON a.attrelid = r.oid" + " JOIN pg_type t ON t.oid = a.atttypid" + " WHERE s.nspname = $1 AND r.relname = $2" + " AND r.relkind IN ('r', 'v')" + " AND (a.attnum > 0 OR a.attname = 'oid')" + " AND NOT a.attisdropped") % ( + self._regtypes and '::regtype' or '',) + q = self.db.query(q, cl).getresult() + if not q: + r = ("SELECT r.relnamespace" + " FROM pg_class r" + " JOIN pg_namespace s ON s.oid = r.relnamespace" + " WHERE s.nspname =$1 AND r.relname = $2" + " AND r.relkind IN ('r', 'v') LIMIT 1") + r = self.db.query(r, cl).getresult() + if not r: + raise _prg_error('Class %s does not exist' % qcl) if self._regtypes: t = dict(q) @@ -715,15 +880,16 @@ def has_table_privilege(self, cl, privilege='select'): try: return self._privileges[(qcl, privilege)] except KeyError: - q = "SELECT has_table_privilege('%s', '%s')" % (qcl, privilege) - ret = self.db.query(q).getresult()[0][0] == 't' + q = "SELECT has_table_privilege($1, $2)" + q = self.db.query(q, (qcl, privilege)) + ret = q.getresult()[0][0] == self._make_bool(True) self._privileges[(qcl, privilege)] = ret return ret def get(self, cl, arg, keyname=None): - """Get a tuple from a database table or view. + """Get a row from a database table or view. - This method is the basic mechanism to get a single row. The keyname + This method is the basic mechanism to get a single row. It assumes that the key specifies a unique row. If keyname is not specified then the primary key for the table is used. If arg is a dictionary then the value for the key is taken from it and it is modified to @@ -739,7 +905,7 @@ def get(self, cl, arg, keyname=None): # build qualified class name qcl = self._add_schema(cl) # To allow users to work with multiple tables, - # we munge the name of the "oid" the key + # we munge the name of the "oid" key qoid = _oid_key(qcl) if not keyname: # use the primary key by default @@ -751,7 +917,7 @@ def get(self, cl, arg, keyname=None): if keyname == 'oid': if isinstance(arg, dict): if qoid not in arg: - raise _db_error('%s not in arg' % qoid) + raise _prg_error('%s not in arg' % qoid) else: arg = {qoid: arg} where = 'oid = %s' % arg[qoid] @@ -777,18 +943,18 @@ def get(self, cl, arg, keyname=None): return arg def insert(self, cl, d=None, **kw): - """Insert a tuple into a database table. + """Insert a row into a database table. - This method inserts a row into a table. If a dictionary is - supplied it starts with that. Otherwise it uses a blank dictionary. - Either way the dictionary is updated from the keywords. + This method inserts a row into a table. The name of the table must + be passed as the first parameter. The other parameters are used for + providing the data of the row that shall be inserted into the table. + If a dictionary is supplied as the second parameter, it starts with + that. Otherwise it uses a blank dictionary. Either way the dictionary + is updated from the keywords. The dictionary is then, if possible, reloaded with the values actually inserted in order to pick up values modified by rules, triggers, etc. - Note: The method currently doesn't support insert into views - although PostgreSQL does. - """ qcl = self._add_schema(cl) qoid = _oid_key(qcl) @@ -832,15 +998,15 @@ def update(self, cl, d=None, **kw): """Update an existing row in a database table. Similar to insert but updates an existing row. The update is based - on the OID value as munged by get or passed as keyword, or on the + on the OID value as munged by get() or passed as keyword, or on the primary key of the table. The dictionary is modified, if possible, to reflect any changes caused by the update due to triggers, rules, default values, etc. """ - # Update always works on the oid which get returns if available, + # Update always works on the oid which get() returns if available, # otherwise use the primary key. Fail if neither. - # Note that we only accept oid key from named args for safety + # Note that we only accept oid key from named args for safety. qcl = self._add_schema(cl) qoid = _oid_key(qcl) if 'oid' in kw: @@ -873,7 +1039,7 @@ def update(self, cl, d=None, **kw): return d values = ', '.join(values) selectable = self.has_table_privilege(qcl) - if selectable and self.server_version >= 880200: + if selectable and self.server_version >= 80200: ret = ' RETURNING %s*' % ('oid' in attnames and 'oid, ' or '') else: ret = '' @@ -892,19 +1058,19 @@ def update(self, cl, d=None, **kw): self.get(qcl, d) return d - def clear(self, cl, a=None): + def clear(self, cl, d=None): """Clear all the attributes to values determined by the types. - Numeric types are set to 0, Booleans are set to 'f', and everything - else is set to the empty string. If the array argument is present, - it is used as the array and any entries matching attribute names are - cleared with everything else left unchanged. + Numeric types are set to 0, Booleans are set to false, and everything + else is set to the empty string. If the second argument is present, + it is used as the row dictionary and any entries matching attribute + names are cleared with everything else left unchanged. """ # At some point we will need a way to get defaults from a table. qcl = self._add_schema(cl) - if a is None: - a = {} # empty if argument is not present + if d is None: + d = {} # empty if argument is not present attnames = self.get_attnames(qcl) for n, t in attnames.items(): if n == 'oid': @@ -912,18 +1078,18 @@ def clear(self, cl, a=None): if t in ('int', 'integer', 'smallint', 'bigint', 'float', 'real', 'double precision', 'num', 'numeric', 'money'): - a[n] = 0 + d[n] = 0 elif t in ('bool', 'boolean'): - a[n] = 'f' + d[n] = self._make_bool(False) else: - a[n] = '' - return a + d[n] = '' + return d def delete(self, cl, d=None, **kw): """Delete an existing row in a database table. This method deletes the row from a table. It deletes based on the - OID value as munged by get or passed as keyword, or on the primary + OID value as munged by get() or passed as keyword, or on the primary key of the table. The return value is the number of deleted rows (i.e. 0 if the row did not exist and 1 if the row was deleted). @@ -959,9 +1125,67 @@ def delete(self, cl, d=None, **kw): self._do_debug(q) return int(self.db.query(q)) - def notification_handler(self, event, callback, arg_dict={}, timeout=None): + def truncate(self, table, restart=False, cascade=False, only=False): + """Empty a table or set of tables. + + This method quickly removes all rows from the given table or set + of tables. It has the same effect as an unqualified DELETE on each + table, but since it does not actually scan the tables it is faster. + Furthermore, it reclaims disk space immediately, rather than requiring + a subsequent VACUUM operation. This is most useful on large tables. + + If restart is set to True, sequences owned by columns of the truncated + table(s) are automatically restarted. If cascade is set to True, it + also truncates all tables that have foreign-key references to any of + the named tables. If the parameter only is not set to True, all the + descendant tables (if any) will also be truncated. Optionally, a '*' + can be specified after the table name to explicitly indicate that + descendant tables are included. + """ + if isinstance(table, basestring): + only = {table: only} + table = [table] + elif isinstance(table, (list, tuple)): + if isinstance(only, (list, tuple)): + only = dict(zip(table, only)) + else: + only = dict.fromkeys(table, only) + elif isinstance(table, (set, frozenset)): + only = dict.fromkeys(table, only) + else: + raise TypeError('The table must be a string, list or set') + if not (restart is None or isinstance(restart, (bool, int))): + raise TypeError('Invalid type for the restart option') + if not (cascade is None or isinstance(cascade, (bool, int))): + raise TypeError('Invalid type for the cascade option') + tables = [] + for t in table: + u = only.get(t) + if not (u is None or isinstance(u, (bool, int))): + raise TypeError('Invalid type for the only option') + if t.endswith('*'): + if u: + raise ValueError( + 'Contradictory table name and only options') + t = t[:-1].rstrip() + t = self._add_schema(t) + if u: + t = 'ONLY %s' % t + tables.append(t) + q = ['TRUNCATE', ', '.join(tables)] + if restart: + q.append('RESTART IDENTITY') + if cascade: + q.append('CASCADE') + q = ' '.join(q) + self._do_debug(q) + return self.db.query(q) + + def notification_handler(self, + event, callback, arg_dict=None, timeout=None, stop_event=None): """Get notification handler that will run the given callback.""" - return NotificationHandler(self.db, event, callback, arg_dict, timeout) + return NotificationHandler(self, + event, callback, arg_dict, timeout, stop_event) # if run as script, print some information diff --git a/module/pgdb.py b/pgdb.py similarity index 96% rename from module/pgdb.py rename to pgdb.py index 65fec3ae..00505b50 100644 --- a/module/pgdb.py +++ b/pgdb.py @@ -66,14 +66,13 @@ from _pg import * try: frozenset -except NameError: # Python < 2.4 +except NameError: # Python < 2.4, unsupported from sets import ImmutableSet as frozenset from datetime import date, time, datetime, timedelta from time import localtime -try: # use Decimal if available +try: from decimal import Decimal - set_decimal(Decimal) -except ImportError: # otherwise (Python < 2.4) +except ImportError: # Python < 2.4, unsupported Decimal = float # use float instead of Decimal try: from math import isnan, isinf @@ -101,7 +100,7 @@ # shortcut methods are not supported by default # since they have been excluded from DB API 2 -# and are not recommended by the DB SIG; +# and are not recommended by the DB SIG. shortcutmethods = 0 @@ -113,7 +112,6 @@ def decimal_type(decimal_type=None): global Decimal if decimal_type is not None: _cast['numeric'] = Decimal = decimal_type - set_decimal(Decimal) return Decimal @@ -290,12 +288,14 @@ def row_factory(row): You can overwrite this with a custom row factory, e.g. a dict factory: - class myCursor(pgdb.pgdbCursor): - def cursor.row_factory(self, row): - d = {} - for idx, col in enumerate(self.description): - d[col[0]] = row[idx] - return d + class myCursor(pgdb.pgdbCursor): + + def row_factory(self, row): + d = {} + for idx, col in enumerate(self.description): + d[col[0]] = row[idx] + return d + cursor = myCursor(cnx) """ @@ -671,7 +671,7 @@ def Time(hour, minute=0, second=0, microsecond=0): def Timestamp(year, month, day, hour=0, minute=0, second=0, microsecond=0): - """construct an object holding a time stamp value.""" + """Construct an object holding a time stamp value.""" return datetime(year, month, day, hour, minute, second, microsecond) @@ -681,17 +681,17 @@ def DateFromTicks(ticks): def TimeFromTicks(ticks): - """construct an object holding a time value from the given ticks value.""" + """Construct an object holding a time value from the given ticks value.""" return Time(*localtime(ticks)[3:6]) def TimestampFromTicks(ticks): - """construct an object holding a time stamp from the given ticks value.""" + """Construct an object holding a time stamp from the given ticks value.""" return Timestamp(*localtime(ticks)[:6]) class Binary(str): - """construct an object capable of holding a binary (long) string value.""" + """Construct an object capable of holding a binary (long) string value.""" # If run as script, print some information: diff --git a/module/pgfs.h b/pgfs.h similarity index 100% rename from module/pgfs.h rename to pgfs.h diff --git a/module/pgmodule.c b/pgmodule.c similarity index 92% rename from module/pgmodule.c rename to pgmodule.c index 0fc4e30d..4f72500d 100644 --- a/module/pgmodule.c +++ b/pgmodule.c @@ -22,7 +22,7 @@ * AUTHOR HAS NO OBLIGATIONS TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, * ENHANCEMENTS, OR MODIFICATIONS. * - * Further modifications copyright 1997, 1998, 1999 by D'Arcy J.M. Cain + * Further modifications copyright 1997 to 2016 by D'Arcy J.M. Cain * (darcy@druid.net) subject to the same terms and conditions as above. * */ @@ -119,8 +119,8 @@ int *get_type_array(PGresult *result, int nfields); static PyObject *decimal = NULL, /* decimal type */ *namedresult = NULL; /* function for getting named results */ -static char *decimal_point = "."; /* decimal point used in money values */ - +static char decimal_point = '.'; /* decimal point used in money values */ +static int use_bool = 0; /* whether or not bool objects shall be returned */ /* --------------------------------------------------------------------- */ /* OBJECTS DECLARATION */ @@ -337,6 +337,16 @@ check_source_obj(pgsourceobject *self, int level) return 1; } +/* define internal types */ + +#define PYGRES_INT 1 +#define PYGRES_LONG 2 +#define PYGRES_FLOAT 3 +#define PYGRES_DECIMAL 4 +#define PYGRES_MONEY 5 +#define PYGRES_BOOL 6 +#define PYGRES_DEFAULT 7 + /* shared functions for converting PG types to Python types */ int * get_type_array(PGresult *result, int nfields) @@ -344,7 +354,7 @@ get_type_array(PGresult *result, int nfields) int *typ; int j; - if (!(typ = malloc(sizeof(int) * nfields))) + if (!(typ = PyMem_Malloc(sizeof(int) * nfields))) { PyErr_SetString(PyExc_MemoryError, "memory error in getresult()."); return NULL; @@ -357,28 +367,32 @@ get_type_array(PGresult *result, int nfields) case INT2OID: case INT4OID: case OIDOID: - typ[j] = 1; + typ[j] = PYGRES_INT; break; case INT8OID: - typ[j] = 2; + typ[j] = PYGRES_LONG; break; case FLOAT4OID: case FLOAT8OID: - typ[j] = 3; + typ[j] = PYGRES_FLOAT; break; case NUMERICOID: - typ[j] = 4; + typ[j] = PYGRES_DECIMAL; break; case CASHOID: - typ[j] = 5; + typ[j] = PYGRES_MONEY; + break; + + case BOOLOID: + typ[j] = PYGRES_BOOL; break; default: - typ[j] = 6; + typ[j] = PYGRES_DEFAULT; break; } } @@ -400,8 +414,8 @@ format_result(const PGresult *res) if (n > 0) { - char * const aligns = (char *) malloc(n * sizeof(char)); - int * const sizes = (int *) malloc(n * sizeof(int)); + char * const aligns = (char *) PyMem_Malloc(n * sizeof(char)); + int * const sizes = (int *) PyMem_Malloc(n * sizeof(int)); if (aligns && sizes) { @@ -470,7 +484,7 @@ format_result(const PGresult *res) /* plus size of footer */ size += 40; /* is the buffer size that needs to be allocated */ - buffer = (char *) malloc(size); + buffer = (char *) PyMem_Malloc(size); if (buffer) { char *p = buffer; @@ -527,13 +541,13 @@ format_result(const PGresult *res) *p++ = '\n'; } /* free memory */ - free(aligns); - free(sizes); + PyMem_Free(aligns); + PyMem_Free(sizes); /* create the footer */ sprintf(p, "(%d row%s)", m, m == 1 ? "" : "s"); /* return the result */ result = PyString_FromString(buffer); - free(buffer); + PyMem_Free(buffer); return result; } else @@ -543,10 +557,8 @@ format_result(const PGresult *res) return NULL; } } else { - if (aligns) - free(aligns); - if (sizes) - free(aligns); + PyMem_Free(aligns); + PyMem_Free(sizes); PyErr_SetString(PyExc_MemoryError, "Not enough memory for formatting the query result."); return NULL; @@ -1231,7 +1243,7 @@ pglarge_new(pgobject *pgcnx, Oid oid) static void pglarge_dealloc(pglargeobject *self) { - if (self->lo_fd >= 0 && check_cnx_obj(self->pgcnx)) + if (self->lo_fd >= 0 && self->pgcnx->valid) lo_close(self->pgcnx->cnx, self->lo_fd); Py_XDECREF(self->pgcnx); @@ -1598,7 +1610,7 @@ pglarge_getattr(pglargeobject *self, char *name) Py_INCREF(self->pgcnx); return (PyObject *) (self->pgcnx); } - + PyErr_Clear(); Py_INCREF(Py_None); return Py_None; } @@ -1608,7 +1620,7 @@ pglarge_getattr(pglargeobject *self, char *name) { if (check_lo_obj(self, 0)) return PyInt_FromLong(self->lo_oid); - + PyErr_Clear(); Py_INCREF(Py_None); return Py_None; } @@ -1775,10 +1787,11 @@ void notice_receiver(void *arg, const PGresult *res) PyGILState_STATE gstate = PyGILState_Ensure(); pgobject *self = (pgobject*) arg; PyObject *proc = self->notice_receiver; + if (proc && PyCallable_Check(proc)) { pgnoticeobject *notice = PyObject_NEW(pgnoticeobject, &PgNoticeType); - PyObject *args, *ret; + PyObject *ret; if (notice) { notice->pgcnx = arg; @@ -1789,10 +1802,8 @@ void notice_receiver(void *arg, const PGresult *res) Py_INCREF(Py_None); notice = (pgnoticeobject *)(void *)Py_None; } - args = Py_BuildValue("(O)", notice); - ret = PyObject_CallObject(proc, args); + ret = PyObject_CallFunction(proc, "(O)", notice); Py_XDECREF(ret); - Py_DECREF(args); } PyGILState_Release(gstate); } @@ -1951,9 +1962,15 @@ pg_set_notice_receiver(pgobject * self, PyObject * args) if (PyArg_ParseTuple(args, "O", &proc)) { - if (PyCallable_Check(proc)) + if (proc == Py_None) { - Py_XINCREF(proc); + Py_XDECREF(self->notice_receiver); + self->notice_receiver = NULL; + Py_INCREF(Py_None); ret = Py_None; + } + else if (PyCallable_Check(proc)) + { + Py_XINCREF(proc); Py_XDECREF(self->notice_receiver); self->notice_receiver = proc; PQsetNoticeReceiver(self->cnx, notice_receiver, self); Py_INCREF(Py_None); ret = Py_None; @@ -2155,51 +2172,63 @@ pgquery_getresult(pgqueryobject *self, PyObject *args) else switch (typ[j]) { - case 1: /* int2/4 */ + case PYGRES_INT: val = PyInt_FromString(s, NULL, 10); break; - case 2: /* int8 */ + case PYGRES_LONG: val = PyLong_FromString(s, NULL, 10); break; - case 3: /* float/double */ + case PYGRES_FLOAT: tmp_obj = PyString_FromString(s); val = PyFloat_FromString(tmp_obj, NULL); Py_DECREF(tmp_obj); break; - case 5: /* money */ + case PYGRES_MONEY: + /* convert to decimal only if decimal point is set */ + if (!decimal_point) goto default_case; for (k = 0; *s && k < sizeof(cashbuf) / sizeof(cashbuf[0]) - 1; s++) { - if (isdigit(*s)) + if (*s >= '0' && *s <= '9') cashbuf[k++] = *s; - else if (*s == *decimal_point) + else if (*s == decimal_point) cashbuf[k++] = '.'; else if (*s == '(' || *s == '-') cashbuf[k++] = '-'; } cashbuf[k] = 0; s = cashbuf; + /* FALLTHROUGH */ /* no break */ - /* FALLTHROUGH */ /* no break */ - case 4: /* numeric */ + case PYGRES_DECIMAL: if (decimal) { - tmp_obj = Py_BuildValue("(s)", s); - val = PyEval_CallObject(decimal, tmp_obj); + val = PyObject_CallFunction(decimal, "(s)", s); } else { tmp_obj = PyString_FromString(s); val = PyFloat_FromString(tmp_obj, NULL); + Py_DECREF(tmp_obj); } - Py_DECREF(tmp_obj); break; + case PYGRES_BOOL: + /* convert to bool only if bool_type is set */ + if (use_bool) + { + val = *s == 't' ? Py_True : Py_False; + Py_INCREF(val); + break; + } + /* FALLTHROUGH */ /* no break */ + default: + default_case: val = PyString_FromString(s); break; } @@ -2219,7 +2248,7 @@ pgquery_getresult(pgqueryobject *self, PyObject *args) } exit: - free(typ); + PyMem_Free(typ); /* returns list */ return reslist; @@ -2282,51 +2311,64 @@ pgquery_dictresult(pgqueryobject *self, PyObject *args) else switch (typ[j]) { - case 1: /* int2/4 */ + case PYGRES_INT: val = PyInt_FromString(s, NULL, 10); break; - case 2: /* int8 */ + case PYGRES_LONG: val = PyLong_FromString(s, NULL, 10); break; - case 3: /* float/double */ + case PYGRES_FLOAT: tmp_obj = PyString_FromString(s); val = PyFloat_FromString(tmp_obj, NULL); Py_DECREF(tmp_obj); break; - case 5: /* money */ + case PYGRES_MONEY: + /* convert to decimal only if decimal point is set */ + if (!decimal_point) goto default_case; + for (k = 0; *s && k < sizeof(cashbuf) / sizeof(cashbuf[0]) - 1; s++) { - if (isdigit(*s)) + if (*s >= '0' && *s <= '9') cashbuf[k++] = *s; - else if (*s == *decimal_point) + else if (*s == decimal_point) cashbuf[k++] = '.'; else if (*s == '(' || *s == '-') cashbuf[k++] = '-'; } cashbuf[k] = 0; s = cashbuf; + /* FALLTHROUGH */ /* no break */ - /* FALLTHROUGH */ /* no break */ - case 4: /* numeric */ + case PYGRES_DECIMAL: if (decimal) { - tmp_obj = Py_BuildValue("(s)", s); - val = PyEval_CallObject(decimal, tmp_obj); + val = PyObject_CallFunction(decimal, "(s)", s); } else { tmp_obj = PyString_FromString(s); val = PyFloat_FromString(tmp_obj, NULL); + Py_DECREF(tmp_obj); } - Py_DECREF(tmp_obj); break; + case PYGRES_BOOL: + /* convert to bool only if bool_type is set */ + if (use_bool) + { + val = *s == 't' ? Py_True : Py_False; + Py_INCREF(val); + break; + } + /* FALLTHROUGH */ /* no break */ + default: + default_case: val = PyString_FromString(s); break; } @@ -2347,7 +2389,7 @@ pgquery_dictresult(pgqueryobject *self, PyObject *args) } exit: - free(typ); + PyMem_Free(typ); /* returns list */ return reslist; @@ -2362,8 +2404,7 @@ static char pgquery_namedresult__doc__[] = static PyObject * pgquery_namedresult(pgqueryobject *self, PyObject *args) { - PyObject *arglist, - *ret; + PyObject *ret; /* checks args (args == NULL for an internal call) */ if (args && !PyArg_ParseTuple(args, "")) @@ -2380,9 +2421,7 @@ pgquery_namedresult(pgqueryobject *self, PyObject *args) return NULL; } - arglist = Py_BuildValue("(O)", self); - ret = PyObject_CallObject(namedresult, arglist); - Py_DECREF(arglist); + ret = PyObject_CallFunction(namedresult, "(O)", self); if (ret == NULL) return NULL; @@ -2488,7 +2527,7 @@ static PyObject * pg_query(pgobject *self, PyObject *args) { char *query; - PyObject *oargs = NULL; + PyObject *param_obj = NULL; PGresult *result; pgqueryobject *npgobj; int status, @@ -2501,126 +2540,120 @@ pg_query(pgobject *self, PyObject *args) } /* get query args */ - if (!PyArg_ParseTuple(args, "s|O", &query, &oargs)) + if (!PyArg_ParseTuple(args, "s|O", &query, ¶m_obj)) { PyErr_SetString(PyExc_TypeError, "query(sql, [args]), with sql (string)."); return NULL; } - /* If oargs is passed, ensure it's a non-empty tuple. We want to treat + /* If param_obj is passed, ensure it's a non-empty tuple. We want to treat * an empty tuple the same as no argument since we'll get that when the * caller passes no arguments to db.query(), and historic behaviour was * to call PQexec() in that case, which can execute multiple commands. */ - if (oargs) + if (param_obj) { - if (!PyTuple_Check(oargs) && !PyList_Check(oargs)) - { - PyErr_SetString(PyExc_TypeError, "query parameters must be a tuple or list."); + param_obj = PySequence_Fast(param_obj, + "query parameters must be a sequence."); + if (!param_obj) return NULL; - } + nparms = (int)PySequence_Fast_GET_SIZE(param_obj); - nparms = (int)PySequence_Size(oargs); + /* if there's a single argument and it's a list or tuple, it + * contains the positional arguments. */ + if (nparms == 1) + { + PyObject *first_obj = PySequence_Fast_GET_ITEM(param_obj, 0); + if (PyList_Check(first_obj) || PyTuple_Check(first_obj)) + { + Py_DECREF(param_obj); + param_obj = PySequence_Fast(first_obj, NULL); + nparms = (int)PySequence_Fast_GET_SIZE(param_obj); + } + } } /* gets result */ if (nparms) { /* prepare arguments */ - PyObject **str, **s, *obj = PySequence_GetItem(oargs, 0); + PyObject **str, **s; char **parms, **p, *enc=NULL; - int *lparms, *l; register int i; - /* if there's a single argument and it's a list or tuple, it - * contains the positional aguments. */ - if (nparms == 1 && (PyList_Check(obj) || PyTuple_Check(obj))) - { - oargs = obj; - nparms = (int)PySequence_Size(oargs); + str = (PyObject **)PyMem_Malloc(nparms * sizeof(*str)); + parms = (char **)PyMem_Malloc(nparms * sizeof(*parms)); + if (!str || !parms) { + PyMem_Free(parms); + PyMem_Free(str); + Py_XDECREF(param_obj); + PyErr_SetString(PyExc_MemoryError, "memory error in query()."); + return NULL; } - str = (PyObject **)malloc(nparms * sizeof(*str)); - parms = (char **)malloc(nparms * sizeof(*parms)); - lparms = (int *)malloc(nparms * sizeof(*lparms)); /* convert optional args to a list of strings -- this allows * the caller to pass whatever they like, and prevents us * from having to map types to OIDs */ - for (i = 0, s=str, p=parms, l=lparms; i < nparms; i++, s++, p++, l++) + for (i = 0, s=str, p=parms; i < nparms; i++, p++) { - obj = PySequence_GetItem(oargs, i); + PyObject *obj = PySequence_Fast_GET_ITEM(param_obj, i); if (obj == Py_None) { - *s = NULL; *p = NULL; - *l = 0; } else if (PyUnicode_Check(obj)) { + PyObject *str_obj; if (!enc) enc = (char *)pg_encoding_to_char( PQclientEncoding(self->cnx)); if (!strcmp(enc, "UTF8")) - *s = PyUnicode_AsUTF8String(obj); + str_obj = PyUnicode_AsUTF8String(obj); else if (!strcmp(enc, "LATIN1")) - *s = PyUnicode_AsLatin1String(obj); + str_obj = PyUnicode_AsLatin1String(obj); else if (!strcmp(enc, "SQL_ASCII")) - *s = PyUnicode_AsASCIIString(obj); + str_obj = PyUnicode_AsASCIIString(obj); else - *s = PyUnicode_AsEncodedString(obj, enc, "strict"); - if (*s == NULL) + str_obj = PyUnicode_AsEncodedString(obj, enc, "strict"); + if (!str_obj) { - free(lparms); free(parms); free(str); + PyMem_Free(parms); + while (s != str) { s--; Py_DECREF(*s); } + PyMem_Free(str); + Py_XDECREF(param_obj); PyErr_SetString(PyExc_UnicodeError, "query parameter" " could not be decoded (bad client encoding)"); - while (i--) - { - if (*--s) - { - Py_DECREF(*s); - } - } return NULL; } - *p = PyString_AsString(*s); - *l = (int)PyString_Size(*s); + *s++ = str_obj; + *p = PyString_AsString(str_obj); } else { - *s = PyObject_Str(obj); - if (*s == NULL) + PyObject *str_obj = PyObject_Str(obj); + if (!str_obj) { - free(lparms); free(parms); free(str); + PyMem_Free(parms); + while (s != str) { s--; Py_DECREF(*s); } + PyMem_Free(str); + Py_XDECREF(param_obj); PyErr_SetString(PyExc_TypeError, "query parameter has no string representation"); - while (i--) - { - if (*--s) - { - Py_DECREF(*s); - } - } return NULL; } - *p = PyString_AsString(*s); - *l = (int)PyString_Size(*s); + *s++ = str_obj; + *p = PyString_AsString(str_obj); } } Py_BEGIN_ALLOW_THREADS result = PQexecParams(self->cnx, query, nparms, - NULL, (const char * const *)parms, lparms, NULL, 0); + NULL, (const char * const *)parms, NULL, NULL, 0); Py_END_ALLOW_THREADS - free(lparms); free(parms); - for (i = 0, s=str; i < nparms; i++, s++) - { - if (*s) - { - Py_DECREF(*s); - } - } - free(str); + PyMem_Free(parms); + while (s != str) { s--; Py_DECREF(*s); } + PyMem_Free(str); } else { @@ -2629,6 +2662,9 @@ pg_query(pgobject *self, PyObject *args) Py_END_ALLOW_THREADS } + /* we don't need the params any more */ + Py_XDECREF(param_obj); + /* checks result validity */ if (!result) { @@ -2869,7 +2905,7 @@ pg_inserttable(pgobject *self, PyObject *args) } /* allocate buffer */ - if (!(buffer = malloc(MAX_BUFFER_SIZE))) + if (!(buffer = PyMem_Malloc(MAX_BUFFER_SIZE))) { PyErr_SetString(PyExc_MemoryError, "can't allocate insert buffer."); @@ -2885,7 +2921,7 @@ pg_inserttable(pgobject *self, PyObject *args) if (!result) { - free(buffer); + PyMem_Free(buffer); PyErr_SetString(PyExc_ValueError, PQerrorMessage(self->cnx)); return NULL; } @@ -2918,7 +2954,7 @@ pg_inserttable(pgobject *self, PyObject *args) { if (j != n) { - free(buffer); + PyMem_Free(buffer); PyErr_SetString(PyExc_TypeError, "arrays contained in second arg must have same size."); return NULL; @@ -2994,7 +3030,7 @@ pg_inserttable(pgobject *self, PyObject *args) if (bufsiz <= 0) { - free(buffer); + PyMem_Free(buffer); PyErr_SetString(PyExc_MemoryError, "insert buffer overflow."); return NULL; @@ -3009,7 +3045,7 @@ pg_inserttable(pgobject *self, PyObject *args) { PyErr_SetString(PyExc_IOError, PQerrorMessage(self->cnx)); PQendcopy(self->cnx); - free(buffer); + PyMem_Free(buffer); return NULL; } } @@ -3019,18 +3055,18 @@ pg_inserttable(pgobject *self, PyObject *args) { PyErr_SetString(PyExc_IOError, PQerrorMessage(self->cnx)); PQendcopy(self->cnx); - free(buffer); + PyMem_Free(buffer); return NULL; } if (PQendcopy(self->cnx)) { PyErr_SetString(PyExc_IOError, PQerrorMessage(self->cnx)); - free(buffer); + PyMem_Free(buffer); return NULL; } - free(buffer); + PyMem_Free(buffer); /* no error : returns nothing */ Py_INCREF(Py_None); @@ -3164,12 +3200,11 @@ pg_escape_string(pgobject *self, PyObject *args) to_length = from_length; from_length = (from_length - 1)/2; } - to = (char *)malloc(to_length); + to = (char *)PyMem_Malloc(to_length); to_length = (int)PQescapeStringConn(self->cnx, to, from, (size_t)from_length, NULL); ret = Py_BuildValue("s#", to, to_length); - if (to) - free(to); + PyMem_Free(to); if (!ret) /* pass on exception */ return NULL; return ret; @@ -3636,11 +3671,10 @@ escape_string(PyObject *self, PyObject *args) to_length = from_length; from_length = (from_length - 1)/2; } - to = (char *)malloc(to_length); + to = (char *)PyMem_Malloc(to_length); to_length = (int)PQescapeString(to, from, (size_t)from_length); ret = Py_BuildValue("s#", to, to_length); - if (to) - free(to); + PyMem_Free(to); if (!ret) /* pass on exception */ return NULL; return ret; @@ -3693,48 +3727,83 @@ static PyObject return ret; } +/* get decimal point */ +static char get_decimal_point__doc__[] = +"get_decimal_point() -- get decimal point to be used for money values."; + +static PyObject * +get_decimal_point(PyObject *self, PyObject * args) +{ + PyObject *ret = NULL; + char s[2]; + + if (PyArg_ParseTuple(args, "")) + { + if (decimal_point) + { + s[0] = decimal_point; s[1] = '\0'; + ret = PyString_FromString(s); + } else { + Py_INCREF(Py_None); ret = Py_None; + } + } + else + { + PyErr_SetString(PyExc_TypeError, + "get_decimal_point() takes no parameter"); + } + + return ret; +} + /* set decimal point */ static char set_decimal_point__doc__[] = -"set_decimal_point() -- set decimal point to be used for money values."; +"set_decimal_point(char) -- set decimal point to be used for money values."; static PyObject * set_decimal_point(PyObject *self, PyObject * args) { PyObject *ret = NULL; - char *s; + char *s = NULL; - if (PyArg_ParseTuple(args, "s", &s)) - { - decimal_point = s; + /* gets arguments */ + if (PyArg_ParseTuple(args, "z", &s)) { + if (!s) + s = "\0"; + else if (*s && (*(s+1) || !strchr(".,;: '*/_`|", *s))) + s = NULL; + } + + if (s) { + decimal_point = *s; Py_INCREF(Py_None); ret = Py_None; + } else { + PyErr_SetString(PyExc_TypeError, + "set_decimal_point() expects a decimal mark character"); } + return ret; } -/* get decimal point */ -static char get_decimal_point__doc__[] = -"get_decimal_point() -- get decimal point to be used for money values."; +/* get decimal type */ +static char get_decimal__doc__[] = +"get_decimal() -- set a decimal type to be used for numeric values."; static PyObject * -get_decimal_point(PyObject *self, PyObject * args) +get_decimal(PyObject *self, PyObject *args) { PyObject *ret = NULL; if (PyArg_ParseTuple(args, "")) { - ret = PyString_FromString(decimal_point); - } - else - { - PyErr_SetString(PyExc_TypeError, - " get_decimal_point() takes no parameter"); + ret = decimal ? decimal : Py_None; + Py_INCREF(ret); } - return ret; } -/* set decimal */ +/* set decimal type */ static char set_decimal__doc__[] = "set_decimal(cls) -- set a decimal type to be used for numeric values."; @@ -3757,12 +3826,70 @@ set_decimal(PyObject *self, PyObject *args) Py_INCREF(Py_None); ret = Py_None; } else - PyErr_SetString(PyExc_TypeError, "decimal type must be None or callable"); + PyErr_SetString(PyExc_TypeError, + "decimal type must be None or callable"); } + return ret; } -/* set named result */ +/* get usage of bool values */ +static char get_bool__doc__[] = +"get_bool() -- check whether boolean values are converted to bool."; + +static PyObject * +get_bool(PyObject *self, PyObject * args) +{ + PyObject *ret = NULL; + + if (PyArg_ParseTuple(args, "")) + { + ret = use_bool ? Py_True : Py_False; + Py_INCREF(ret); + } + + return ret; +} + +/* set usage of bool values */ +static char set_bool__doc__[] = +"set_bool(bool) -- set whether boolean values should be converted to bool."; + +static PyObject * +set_bool(PyObject *self, PyObject * args) +{ + PyObject *ret = NULL; + int i; + + /* gets arguments */ + if (PyArg_ParseTuple(args, "i", &i)) + { + use_bool = i ? 1 : 0; + Py_INCREF(Py_None); ret = Py_None; + } + + return ret; +} + +/* get named result factory */ +static char get_namedresult__doc__[] = +"get_namedresult(cls) -- get the function used for getting named results."; + +static PyObject * +get_namedresult(PyObject *self, PyObject *args) +{ + PyObject *ret = NULL; + + if (PyArg_ParseTuple(args, "")) + { + ret = namedresult ? namedresult : Py_None; + Py_INCREF(ret); + } + + return ret; +} + +/* set named result factory */ static char set_namedresult__doc__[] = "set_namedresult(cls) -- set a function to be used for getting named results."; @@ -3782,6 +3909,7 @@ set_namedresult(PyObject *self, PyObject *args) else PyErr_SetString(PyExc_TypeError, "parameter must be callable"); } + return ret; } @@ -4136,12 +4264,18 @@ static struct PyMethodDef pg_methods[] = { escape_bytea__doc__}, {"unescape_bytea", (PyCFunction) unescape_bytea, METH_VARARGS, unescape_bytea__doc__}, - {"set_decimal_point", (PyCFunction) set_decimal_point, METH_VARARGS, - set_decimal_point__doc__}, {"get_decimal_point", (PyCFunction) get_decimal_point, METH_VARARGS, get_decimal_point__doc__}, + {"set_decimal_point", (PyCFunction) set_decimal_point, METH_VARARGS, + set_decimal_point__doc__}, + {"get_decimal", (PyCFunction) get_decimal, METH_VARARGS, + get_decimal__doc__}, {"set_decimal", (PyCFunction) set_decimal, METH_VARARGS, set_decimal__doc__}, + {"get_bool", (PyCFunction) get_bool, METH_VARARGS, get_bool__doc__}, + {"set_bool", (PyCFunction) set_bool, METH_VARARGS, set_bool__doc__}, + {"get_namedresult", (PyCFunction) get_namedresult, METH_VARARGS, + get_namedresult__doc__}, {"set_namedresult", (PyCFunction) set_namedresult, METH_VARARGS, set_namedresult__doc__}, diff --git a/module/pgtypes.h b/pgtypes.h similarity index 94% rename from module/pgtypes.h rename to pgtypes.h index dc2d909b..354a76a1 100644 --- a/module/pgtypes.h +++ b/pgtypes.h @@ -2,8 +2,8 @@ pgtypes - PostgreSQL type definitions These are the standard PostgreSQL built-in types, - extracted from catalog/pg_type.h Revision 1.212, - because that header file is sometimes not availale + extracted from server/catalog/pg_type.h Revision 1.212, + because that header file is sometimes not available or needs other header files to get properly included. You can also query pg_type to get this information. */ diff --git a/module/setup.cfg b/setup.cfg similarity index 79% rename from module/setup.cfg rename to setup.cfg index c12b8440..ecaeed8f 100644 --- a/module/setup.cfg +++ b/setup.cfg @@ -7,4 +7,7 @@ large_objects = 1 default_vars = 1 # enable string escaping functions # (PostgreSQL version 9.0 and higher) -escaping_funcs = 1 \ No newline at end of file +escaping_funcs = 1 + +[metadata] +description-file = README.rst diff --git a/module/setup.py b/setup.py similarity index 74% rename from module/setup.py rename to setup.py index e65a21d7..6b182fcc 100755 --- a/module/setup.py +++ b/setup.py @@ -1,7 +1,7 @@ #! /usr/bin/python # $Id$ -"""Setup script for PyGreSQL version 4.1 +"""Setup script for PyGreSQL version 4.2.2 PyGreSQL is an open-source Python module that interfaces to a PostgreSQL database. It embeds the PostgreSQL query library to allow @@ -10,10 +10,10 @@ Authors and history: * PyGreSQL written 1997 by D'Arcy J.M. Cain * based on code written 1995 by Pascal Andre -* setup script created 2000/04 Mark Alexander -* tweaked 2000/05 Jeremy Hylton -* win32 support 2001/01 by Gerhard Haering -* tweaked 2006/02-2010/02 by Christoph Zwerschke +* setup script created 2000 by Mark Alexander +* improved 2000 by Jeremy Hylton +* improved 2001 by Gerhard Haering +* improved 2006 and 2016 by Christoph Zwerschke Prerequisites to be installed: * Python including devel package (header files and distutils) @@ -21,32 +21,31 @@ * PostgreSQL pg_config tool (usually included in the devel package) (the Windows installer has it as part of the database server feature) -The supported versions are Python 2.5-2.7 and PostgreSQL 8.3-9.2. +The supported versions are Python 2.4-2.7 and PostgreSQL 8.3-9.5. Use as follows: python setup.py build # to build the module python setup.py install # to install it -You can use MinGW or MinGW-w64 for building on Windows: -python setup.py build -c mingw32 install - See docs.python.org/doc/install/ for more information on using distutils to install Python programs. """ -version = '4.1.1' +version = '4.2.2' import sys -if not (2, 3) <= sys.version_info[:2] < (3, 0): +if not (2, 4) <= sys.version_info[:2] <= (2, 7): raise Exception("Sorry, PyGreSQL %s" " does not support this Python version" % version) import os import platform +import re +import warnings try: from setuptools import setup except ImportError: @@ -57,6 +56,15 @@ from distutils.sysconfig import get_python_inc, get_python_lib +# For historical reasons, PyGreSQL does not install itself as a single +# "pygresql" package, but as two top-level modules "pg", providing the +# classic interface, and "pgdb" for the modern DB-API 2.0 interface. +# These two top-level Python modules share the same C extension "_pg". + +py_modules = ['pg', 'pgdb'] +c_sources = ['pgmodule.c'] + + def pg_config(s): """Retrieve information about installed version of PostgreSQL.""" f = os.popen('pg_config --%s' % s) @@ -70,16 +78,13 @@ def pg_config(s): def pg_version(): """Return the PostgreSQL version as a tuple of integers.""" - parts = [] - for part in pg_config('version').split()[-1].split('.'): - if part.isdigit(): - part = int(part) - parts.append(part) - return tuple(parts or [8]) + match = re.search(r'(\d+)\.(\d+)', pg_config('version')) + if match: + return tuple(map(int, match.groups())) + return (8, 3) pg_version = pg_version() -py_modules = ['pg', 'pgdb'] libraries = ['pq'] # Make sure that the Python header files are searched before # those of PostgreSQL, because PostgreSQL can have its own Python.h @@ -87,7 +92,7 @@ def pg_version(): library_dirs = [get_python_lib(), pg_config('libdir')] define_macros = [('PYGRESQL_VERSION', version)] undef_macros = [] -extra_compile_args = ['-O2'] +extra_compile_args = ['-O2', '-funsigned-char', '-Wall', '-Werror'] class build_pg_ext(build_ext): @@ -118,6 +123,8 @@ def initialize_options(self): self.large_objects = True self.default_vars = True self.escaping_funcs = pg_version[0] >= 9 + if pg_version < (8, 3): + warnings.warn("PygreSQL does not support this PostgreSQL version.") def finalize_options(self): """Set final values for all build_pg options.""" @@ -128,11 +135,11 @@ def finalize_options(self): define_macros.append(('LARGE_OBJECTS', None)) if self.default_vars: define_macros.append(('DEFAULT_VARS', None)) - if self.escaping_funcs: + if self.escaping_funcs and pg_version[0] >= 9: define_macros.append(('ESCAPING_FUNCS', None)) if sys.platform == 'win32': bits = platform.architecture()[0] - if bits == '64bit': # we need to find libpq64 + if bits == '64bit': # we need to find libpq64 for path in os.environ['PATH'].split(os.pathsep) + [ r'C:\Program Files\PostgreSQL\libpq64']: library_dir = os.path.join(path, 'lib') @@ -149,42 +156,49 @@ def finalize_options(self): library_dirs.insert(1, library_dir) if include_dir not in include_dirs: include_dirs.insert(1, include_dir) - libraries[0] += 'dll' # libpqdll instead of libpq + libraries[0] += 'dll' # libpqdll instead of libpq break compiler = self.get_compiler() - if compiler == 'mingw32': # MinGW - if bits == '64bit': # needs MinGW-w64 + if compiler == 'mingw32': # MinGW + if bits == '64bit': # needs MinGW-w64 define_macros.append(('MS_WIN64', None)) - elif compiler == 'msvc': # Microsoft Visual C++ + elif compiler == 'msvc': # Microsoft Visual C++ libraries[0] = 'lib' + libraries[0] + extra_compile_args[1:] = ['-J', '-W3', '-WX'] setup( name="PyGreSQL", version=version, description="Python PostgreSQL Interfaces", - long_description=__doc__.split('\n\n', 2)[1], # first passage + long_description=__doc__.split('\n\n', 2)[1], # first passage keywords="pygresql postgresql database api dbapi", author="D'Arcy J. M. Cain", author_email="darcy@PyGreSQL.org", url="http://www.pygresql.org", download_url="ftp://ftp.pygresql.org/pub/distrib/", platforms=["any"], - license="Python", + license="PostgreSQL", py_modules=py_modules, - ext_modules=[Extension('_pg', ['pgmodule.c'], + ext_modules=[Extension('_pg', c_sources, include_dirs=include_dirs, library_dirs=library_dirs, define_macros=define_macros, undef_macros=undef_macros, libraries=libraries, extra_compile_args=extra_compile_args)], zip_safe=False, cmdclass=dict(build_ext=build_pg_ext), + test_suite='tests.discover', classifiers=[ "Development Status :: 6 - Mature", "Intended Audience :: Developers", - "License :: OSI Approved :: Python Software Foundation License", + "License :: OSI Approved :: The PostgreSQL License", "Operating System :: OS Independent", "Programming Language :: C", - "Programming Language :: Python", + 'Programming Language :: Python', + 'Programming Language :: Python :: 2', + 'Programming Language :: Python :: 2.4', + 'Programming Language :: Python :: 2.5', + 'Programming Language :: Python :: 2.6', + 'Programming Language :: Python :: 2.7', "Programming Language :: SQL", "Topic :: Database", "Topic :: Database :: Front-Ends", diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 00000000..d9db87db --- /dev/null +++ b/tests/__init__.py @@ -0,0 +1,16 @@ +"""PyGreSQL test suite. + +You can specify your local database settings in LOCAL_PyGreSQL.py. + +""" + +try: + import unittest2 as unittest # for Python < 2.7 +except ImportError: + import unittest + + +def discover(): + loader = unittest.TestLoader() + suite = loader.discover('.') + return suite \ No newline at end of file diff --git a/module/dbapi20.py b/tests/dbapi20.py similarity index 99% rename from module/dbapi20.py rename to tests/dbapi20.py index f7247807..69f41ce8 100644 --- a/module/dbapi20.py +++ b/tests/dbapi20.py @@ -15,7 +15,11 @@ __version__ = '$Revision: 1.5 $'[11:-2] __author__ = 'Stuart Bishop ' -import unittest +try: + import unittest2 as unittest # for Python < 2.7 +except ImportError: + import unittest + import time # $Log: not supported by cvs2svn $ diff --git a/module/TEST_PyGreSQL_classic.py b/tests/test_classic.py similarity index 65% rename from module/TEST_PyGreSQL_classic.py rename to tests/test_classic.py index ba9a058e..7977c20c 100755 --- a/module/TEST_PyGreSQL_classic.py +++ b/tests/test_classic.py @@ -1,14 +1,19 @@ #! /usr/bin/python -from __future__ import with_statement +try: + import unittest2 as unittest # for Python < 2.7 +except ImportError: + import unittest import sys -from functools import partial from time import sleep from threading import Thread -import unittest + from pg import * +# check whether the "with" statement is supported +no_with = sys.version_info[:2] < (2, 5) + # We need a database to test against. If LOCAL_PyGreSQL.py exists we will # get our information from that. Otherwise we use the defaults. dbname = 'unittest' @@ -116,10 +121,13 @@ def test_insert(self): db.insert('_test_schema', _test=1235) self.assertEqual(d['dvar'], 999) + @unittest.skipIf(no_with, 'context managers not supported') def test_context_manager(self): db = opendb() t = '_test_schema' d = dict(_test=1235) + # wrap "with" statements to avoid SyntaxError in Python < 2.5 + exec """from __future__ import with_statement\nif True: with db: db.insert(t, d) d['_test'] += 1 @@ -135,7 +143,7 @@ def test_context_manager(self): d['_test'] += 1 db.insert(t, d) d['_test'] += 1 - db.insert(t, d) + db.insert(t, d)\n""" self.assertTrue(db.get(t, 1235)) self.assertTrue(db.get(t, 1236)) self.assertRaises(DatabaseError, db.get, t, 1237) @@ -173,6 +181,7 @@ def test_update(self): self.assertEqual(r['dvar'], 123) r = db.get('_test_schema', 1234) + self.assertIn('dvar', r) db.update('_test_schema', _test=1234, dvar=456) r = db.get('_test_schema', 1234) self.assertEqual(r['dvar'], 456) @@ -234,74 +243,109 @@ def notify_callback(self, arg_dict): else: self.notify_timeout = True - def test_notify(self): + def test_notify(self, options=None): + if not options: + options = {} + run_as_method = options.get('run_as_method') + call_notify = options.get('call_notify') + two_payloads = options.get('two_payloads') + db = opendb() + if db.server_version < 90000: # PostgreSQL < 9.0 + self.skipTest('Notify with payload not supported') + # Get function under test, can be standalone or DB method. + if run_as_method: + fut = db.notification_handler + else: + # functools.partial is not available in Python < 2.5 + fut = lambda *args: NotificationHandler(db, *args) + arg_dict = dict(event=None, called=False) + self.notify_timeout = False + # Listen for 'event_1'. + target = fut('event_1', self.notify_callback, arg_dict, 5) + thread = Thread(None, target) + thread.start() + try: + # Wait until the thread has started. + for n in range(500): + if target.listening: + break + sleep(0.01) + self.assertTrue(target.listening) + self.assertTrue(thread.isAlive()) + # Open another connection for sending notifications. + db2 = opendb() + # Generate notification from the other connection. + if two_payloads: + db2.begin() + if call_notify: + if two_payloads: + target.notify(db2, payload='payload 0') + target.notify(db2, payload='payload 1') + else: + if two_payloads: + db2.query("notify event_1, 'payload 0'") + db2.query("notify event_1, 'payload 1'") + if two_payloads: + db2.commit() + # Wait until the notification has been caught. + for n in range(500): + if arg_dict['called'] or self.notify_timeout: + break + sleep(0.01) + # Check that callback has been invoked. + self.assertTrue(arg_dict['called']) + self.assertEqual(arg_dict['event'], 'event_1') + self.assertEqual(arg_dict['extra'], 'payload 1') + self.assertTrue(isinstance(arg_dict['pid'], int)) + self.assertFalse(self.notify_timeout) + arg_dict['called'] = False + self.assertTrue(thread.isAlive()) + # Generate stop notification. + if call_notify: + target.notify(db2, stop=True, payload='payload 2') + else: + db2.query("notify stop_event_1, 'payload 2'") + db2.close() + # Wait until the notification has been caught. + for n in range(500): + if arg_dict['called'] or self.notify_timeout: + break + sleep(0.01) + # Check that callback has been invoked. + self.assertTrue(arg_dict['called']) + self.assertEqual(arg_dict['event'], 'stop_event_1') + self.assertEqual(arg_dict['extra'], 'payload 2') + self.assertTrue(isinstance(arg_dict['pid'], int)) + self.assertFalse(self.notify_timeout) + thread.join(5) + self.assertFalse(thread.isAlive()) + self.assertFalse(target.listening) + finally: + target.close() + if thread.isAlive(): + thread.join(5) + + def test_notify_other_options(self): for run_as_method in False, True: for call_notify in False, True: - db = opendb() - # Get function under test, can be standalone or DB method. - fut = db.notification_handler if run_as_method else partial( - NotificationHandler, db) - arg_dict = dict(event=None, called=False) - self.notify_timeout = False - # Listen for 'event_1'. - target = fut('event_1', self.notify_callback, arg_dict) - thread = Thread(None, target) - thread.start() - # Wait until the thread has started. - for n in xrange(500): - if target.listening: - break - sleep(0.01) - self.assertTrue(target.listening) - self.assertTrue(thread.isAlive()) - # Open another connection for sending notifications. - db2 = opendb() - # Generate notification from the other connection. - if call_notify: - target.notify(db2, payload='payload 1') - else: - db2.query("notify event_1, 'payload 1'") - # Wait until the notification has been caught. - for n in xrange(500): - if arg_dict['called'] or self.notify_timeout: - break - sleep(0.01) - # Check that callback has been invoked. - self.assertTrue(arg_dict['called']) - self.assertEqual(arg_dict['event'], 'event_1') - self.assertEqual(arg_dict['extra'], 'payload 1') - self.assertTrue(isinstance(arg_dict['pid'], int)) - self.assertFalse(self.notify_timeout) - arg_dict['called'] = False - self.assertTrue(thread.isAlive()) - # Generate stop notification. - if call_notify: - target.notify(db2, stop=True, payload='payload 2') - else: - db2.query("notify stop_event_1, 'payload 2'") - db2.close() - # Wait until the notification has been caught. - for n in xrange(500): - if arg_dict['called'] or self.notify_timeout: - break - sleep(0.01) - # Check that callback has been invoked. - self.assertTrue(arg_dict['called']) - self.assertEqual(arg_dict['event'], 'stop_event_1') - self.assertEqual(arg_dict['extra'], 'payload 2') - self.assertTrue(isinstance(arg_dict['pid'], int)) - self.assertFalse(self.notify_timeout) - thread.join(5) - self.assertFalse(thread.isAlive()) - self.assertFalse(target.listening) - target.close() + for two_payloads in False, True: + options = dict( + run_as_method=run_as_method, + call_notify=call_notify, + two_payloads=two_payloads) + if True in options.values(): + self.test_notify(options) def test_notify_timeout(self): for run_as_method in False, True: db = opendb() + if db.server_version < 90000: # PostgreSQL < 9.0 + self.skipTest('Notify with payload not supported') # Get function under test, can be standalone or DB method. - fut = db.notification_handler if run_as_method else partial( - NotificationHandler, db) + if run_as_method: + fut = db.notification_handler + else: + fut = lambda *args: NotificationHandler(db, *args) arg_dict = dict(event=None, called=False) self.notify_timeout = False # Listen for 'event_1' with timeout of 10ms. @@ -319,22 +363,24 @@ def test_notify_timeout(self): if __name__ == '__main__': - suite = unittest.TestSuite() - - if len(sys.argv) > 1: test_list = sys.argv[1:] - else: test_list = unittest.getTestCaseNames(UtilityTest, 'test_') - if len(sys.argv) == 2 and sys.argv[1] == '-l': print '\n'.join(unittest.getTestCaseNames(UtilityTest, 'test_')) - sys.exit(1) + sys.exit(0) + + test_list = [name for name in sys.argv[1:] if not name.startswith('-')] + if not test_list: + test_list = unittest.getTestCaseNames(UtilityTest, 'test_') + suite = unittest.TestSuite() for test_name in test_list: try: suite.addTest(UtilityTest(test_name)) - except: + except Exception: print "\n ERROR: %s.\n" % sys.exc_value sys.exit(1) - rc = unittest.TextTestRunner(verbosity=1).run(suite) - sys.exit(len(rc.errors+rc.failures) != 0) - + verbosity = '-v' in sys.argv[1:] and 2 or 1 + failfast = '-l' in sys.argv[1:] + runner = unittest.TextTestRunner(verbosity=verbosity, failfast=failfast) + rc = runner.run(suite) + sys.exit((rc.errors or rc.failures) and 1 or 0) diff --git a/module/TEST_PyGreSQL_classic_connection.py b/tests/test_classic_connection.py similarity index 69% rename from module/TEST_PyGreSQL_classic_connection.py rename to tests/test_classic_connection.py index 0e0d8566..744bb615 100755 --- a/module/TEST_PyGreSQL_classic_connection.py +++ b/tests/test_classic_connection.py @@ -12,12 +12,14 @@ """ try: - import unittest2 as unittest # for Python < 2.6 + import unittest2 as unittest # for Python < 2.7 except ImportError: import unittest import sys +import tempfile import threading import time +import os import pg # the module under test @@ -27,8 +29,6 @@ except ImportError: # Python < 2.6 namedtuple = None -from StringIO import StringIO - # We need a database to test against. If LOCAL_PyGreSQL.py exists we will # get our information from that. Otherwise we use the defaults. dbname = 'unittest' @@ -40,6 +40,13 @@ except ImportError: pass +windows = os.name == 'nt' + +# There is a known a bug in libpq under Windows which can cause +# the interface to crash when calling PQhost(): +do_not_ask_for_host = windows +do_not_ask_for_host_reason = 'libpq issue on Windows' + def connect(): """Create a basic pg connection to the test database.""" @@ -63,7 +70,7 @@ def testCanConnect(self): class TestConnectObject(unittest.TestCase): - """"Test existence of basic pg connection methods.""" + """Test existence of basic pg connection methods.""" def setUp(self): self.connection = connect() @@ -74,11 +81,17 @@ def tearDown(self): except pg.InternalError: pass + def is_method(self, attribute): + """Check if given attribute on the connection is a method.""" + if do_not_ask_for_host and attribute == 'host': + return False + return callable(getattr(self.connection, attribute)) + def testAllConnectAttributes(self): attributes = '''db error host options port protocol_version server_version status tty user'''.split() connection_attributes = [a for a in dir(self.connection) - if not callable(eval("self.connection." + a))] + if not a.startswith('__') and not self.is_method(a)] self.assertEqual(attributes, connection_attributes) def testAllConnectMethods(self): @@ -87,8 +100,11 @@ def testAllConnectMethods(self): fileno get_notice_receiver getline getlo getnotify inserttable locreate loimport parameter putline query reset set_notice_receiver source transaction'''.split() + if self.connection.server_version < 90000: # PostgreSQL < 9.0 + methods.remove('escape_identifier') + methods.remove('escape_literal') connection_methods = [a for a in dir(self.connection) - if callable(eval("self.connection." + a))] + if not a.startswith('__') and self.is_method(a)] self.assertEqual(methods, connection_methods) def testAttributeDb(self): @@ -98,6 +114,7 @@ def testAttributeError(self): error = self.connection.error self.assertTrue(not error or 'krb5_' in error) + @unittest.skipIf(do_not_ask_for_host, do_not_ask_for_host_reason) def testAttributeHost(self): def_host = 'localhost' self.assertIsInstance(self.connection.host, str) @@ -176,7 +193,7 @@ def testMethodReset(self): query = self.connection.query # check that client encoding gets reset encoding = query('show client_encoding').getresult()[0][0].upper() - changed_encoding = 'LATIN1' if encoding == 'UTF8' else 'UTF8' + changed_encoding = encoding == 'UTF8' and 'LATIN1' or 'UTF8' self.assertNotEqual(encoding, changed_encoding) self.connection.query("set client_encoding=%s" % changed_encoding) new_encoding = query('show client_encoding').getresult()[0][0].upper() @@ -205,7 +222,7 @@ def sleep(): thread.start() # run the query while 1: # make sure the query is really running time.sleep(0.1) - if thread.is_alive() or time.time() - t1 > 5: + if thread.isAlive() or time.time() - t1 > 5: break r = self.connection.cancel() # cancel the running query thread.join() # wait for the thread to end @@ -223,7 +240,7 @@ def testMethodFileNo(self): class TestSimpleQueries(unittest.TestCase): - """"Test simple queries via a basic pg connection.""" + """Test simple queries via a basic pg connection.""" def setUp(self): self.c = connect() @@ -254,12 +271,19 @@ def testGetresult(self): self.assertEqual(r, result) def testGetresultLong(self): - q = "select 1234567890123456790" - result = 1234567890123456790L + q = "select 9876543210" + result = 9876543210L v = self.c.query(q).getresult()[0][0] self.assertIsInstance(v, long) self.assertEqual(v, result) + def testGetresultDecimal(self): + q = "select 98765432109876543210" + result = Decimal(98765432109876543210L) + v = self.c.query(q).getresult()[0][0] + self.assertIsInstance(v, Decimal) + self.assertEqual(v, result) + def testGetresultString(self): result = 'Hello, world!' q = "select '%s'" % result @@ -278,12 +302,19 @@ def testDictresult(self): self.assertEqual(r, result) def testDictresultLong(self): - q = "select 1234567890123456790 as longjohnsilver" - result = 1234567890123456790L + q = "select 9876543210 as longjohnsilver" + result = 9876543210L v = self.c.query(q).dictresult()[0]['longjohnsilver'] self.assertIsInstance(v, long) self.assertEqual(v, result) + def testDictresultDecimal(self): + q = "select 98765432109876543210 as longjohnsilver" + result = Decimal(98765432109876543210L) + v = self.c.query(q).dictresult()[0]['longjohnsilver'] + self.assertIsInstance(v, Decimal) + self.assertEqual(v, result) + def testDictresultString(self): result = 'Hello, world!' q = "select '%s' as greeting" % result @@ -472,16 +503,16 @@ def testPrint(self): q = ("select 1 as a, 'hello' as h, 'w' as world" " union select 2, 'xyz', 'uvw'") r = self.c.query(q) - s = StringIO() - stdout, sys.stdout = sys.stdout, s + f = tempfile.TemporaryFile() + stdout, sys.stdout = sys.stdout, f try: print r except Exception: pass - finally: - sys.stdout = stdout - r = s.getvalue() - s.close() + sys.stdout = stdout + f.seek(0) + r = f.read() + f.close() self.assertEqual(r, 'a| h |world\n' '-+-----+-----\n' @@ -491,7 +522,7 @@ def testPrint(self): class TestParamQueries(unittest.TestCase): - """"Test queries with parameters via a basic pg connection.""" + """Test queries with parameters via a basic pg connection.""" def setUp(self): self.c = connect() @@ -505,26 +536,34 @@ def testQueryWithNoneParam(self): self.assertEqual(self.c.query("select $1::text", [None] ).getresult(), [(None,)]) - def testQueryWithBoolParams(self): + def testQueryWithBoolParams(self, use_bool=None): query = self.c.query - self.assertEqual(query("select false").getresult(), [('f',)]) - self.assertEqual(query("select true").getresult(), [('t',)]) - self.assertEqual(query("select $1::bool", (None,)).getresult(), - [(None,)]) - self.assertEqual(query("select $1::bool", ('f',)).getresult(), [('f',)]) - self.assertEqual(query("select $1::bool", ('t',)).getresult(), [('t',)]) - self.assertEqual(query("select $1::bool", ('false',)).getresult(), - [('f',)]) - self.assertEqual(query("select $1::bool", ('true',)).getresult(), - [('t',)]) - self.assertEqual(query("select $1::bool", ('n',)).getresult(), [('f',)]) - self.assertEqual(query("select $1::bool", ('y',)).getresult(), [('t',)]) - self.assertEqual(query("select $1::bool", (0,)).getresult(), [('f',)]) - self.assertEqual(query("select $1::bool", (1,)).getresult(), [('t',)]) - self.assertEqual(query("select $1::bool", (False,)).getresult(), - [('f',)]) - self.assertEqual(query("select $1::bool", (True,)).getresult(), - [('t',)]) + if use_bool is not None: + use_bool_default = pg.get_bool() + pg.set_bool(use_bool) + try: + v_false, v_true = use_bool and (False, True) or 'ft' + r_false, r_true = [(v_false,)], [(v_true,)] + self.assertEqual(query("select false").getresult(), r_false) + self.assertEqual(query("select true").getresult(), r_true) + q = "select $1::bool" + self.assertEqual(query(q, (None,)).getresult(), [(None,)]) + self.assertEqual(query(q, ('f',)).getresult(), r_false) + self.assertEqual(query(q, ('t',)).getresult(), r_true) + self.assertEqual(query(q, ('false',)).getresult(), r_false) + self.assertEqual(query(q, ('true',)).getresult(), r_true) + self.assertEqual(query(q, ('n',)).getresult(), r_false) + self.assertEqual(query(q, ('y',)).getresult(), r_true) + self.assertEqual(query(q, (0,)).getresult(), r_false) + self.assertEqual(query(q, (1,)).getresult(), r_true) + self.assertEqual(query(q, (False,)).getresult(), r_false) + self.assertEqual(query(q, (True,)).getresult(), r_true) + finally: + if use_bool is not None: + pg.set_bool(use_bool_default) + + def testQueryWithBoolParamsAndUseBool(self): + self.testQueryWithBoolParams(use_bool=True) def testQueryWithIntParams(self): query = self.c.query @@ -537,7 +576,7 @@ def testQueryWithIntParams(self): [(Decimal('2'),)]) self.assertEqual(query("select 1, $1::integer", (2,) ).getresult(), [(1, 2)]) - self.assertEqual(query("select 1 union select $1", (2,) + self.assertEqual(query("select 1 union select $1::integer", (2,) ).getresult(), [(1,), (2,)]) self.assertEqual(query("select $1::integer+$2", (1, 2) ).getresult(), [(3,)]) @@ -621,7 +660,9 @@ def testUnicodeQuery(self): class TestInserttable(unittest.TestCase): - """"Test inserttable method.""" + """Test inserttable method.""" + + cls_set_up = False @classmethod def setUpClass(cls): @@ -632,6 +673,7 @@ def setUpClass(cls): "d numeric, f4 real, f8 double precision, m money," "c char(1), v4 varchar(4), c4 char(4), t text)") c.close() + cls.cls_set_up = True @classmethod def tearDownClass(cls): @@ -640,7 +682,9 @@ def tearDownClass(cls): c.close() def setUp(self): + self.assertTrue(self.cls_set_up) self.c = connect() + self.c.query("set lc_monetary='C'") self.c.query("set datestyle='ISO,YMD'") def tearDown(self): @@ -745,7 +789,9 @@ def testInserttableMaxValues(self): class TestDirectSocketAccess(unittest.TestCase): - """"Test copy command with direct socket access.""" + """Test copy command with direct socket access.""" + + cls_set_up = False @classmethod def setUpClass(cls): @@ -753,6 +799,7 @@ def setUpClass(cls): c.query("drop table if exists test cascade") c.query("create table test (i int, v varchar(16))") c.close() + cls.cls_set_up = True @classmethod def tearDownClass(cls): @@ -761,6 +808,7 @@ def tearDownClass(cls): c.close() def setUp(self): + self.assertTrue(self.cls_set_up) self.c = connect() self.c.query("set datestyle='ISO,YMD'") @@ -782,7 +830,7 @@ def testPutline(self): r = query("select * from test").getresult() self.assertEqual(r, data) - def testPutline(self): + def testGetline(self): getline = self.c.getline query = self.c.query data = list(enumerate("apple banana pear plum strawberry".split())) @@ -794,7 +842,7 @@ def testPutline(self): v = getline() if i < n: self.assertEqual(v, '%d\t%s' % data[i]) - elif i == n: + elif i == n or self.c.server_version < 90000: self.assertEqual(v, '\\.') else: self.assertIsNone(v) @@ -811,7 +859,7 @@ def testParameterChecks(self): class TestNotificatons(unittest.TestCase): - """"Test notification support.""" + """Test notification support.""" def setUp(self): self.c = connect() @@ -820,6 +868,8 @@ def tearDown(self): self.c.close() def testGetNotify(self): + if self.c.server_version < 90000: # PostgreSQL < 9.0 + self.skipTest('Notify with payload not supported') getnotify = self.c.getnotify query = self.c.query self.assertIsNone(getnotify()) @@ -836,20 +886,16 @@ def testGetNotify(self): self.assertEqual(r[0], 'test_notify') self.assertEqual(r[2], '') self.assertIsNone(self.c.getnotify()) - try: - query("notify test_notify, 'test_payload'") - except pg.ProgrammingError: # PostgreSQL < 9.0 - pass - else: - r = getnotify() - self.assertTrue(isinstance(r, tuple)) - self.assertEqual(len(r), 3) - self.assertIsInstance(r[0], str) - self.assertIsInstance(r[1], int) - self.assertIsInstance(r[2], str) - self.assertEqual(r[0], 'test_notify') - self.assertEqual(r[2], 'test_payload') - self.assertIsNone(getnotify()) + query("notify test_notify, 'test_payload'") + r = getnotify() + self.assertTrue(isinstance(r, tuple)) + self.assertEqual(len(r), 3) + self.assertIsInstance(r[0], str) + self.assertIsInstance(r[1], int) + self.assertIsInstance(r[2], str) + self.assertEqual(r[0], 'test_notify') + self.assertEqual(r[2], 'test_payload') + self.assertIsNone(getnotify()) finally: query('unlisten test_notify') @@ -857,14 +903,17 @@ def testGetNoticeReceiver(self): self.assertIsNone(self.c.get_notice_receiver()) def testSetNoticeReceiver(self): - self.assertRaises(TypeError, self.c.set_notice_receiver, None) self.assertRaises(TypeError, self.c.set_notice_receiver, 42) + self.assertRaises(TypeError, self.c.set_notice_receiver, 'invalid') self.assertIsNone(self.c.set_notice_receiver(lambda notice: None)) + self.assertIsNone(self.c.set_notice_receiver(None)) def testSetAndGetNoticeReceiver(self): r = lambda notice: None self.assertIsNone(self.c.set_notice_receiver(r)) self.assertIs(self.c.get_notice_receiver(), r) + self.assertIsNone(self.c.set_notice_receiver(None)) + self.assertIsNone(self.c.get_notice_receiver()) def testNoticeReceiver(self): self.c.query('''create function bilbo_notice() returns void AS $$ @@ -908,62 +957,281 @@ def tearDown(self): def testGetDecimalPoint(self): point = pg.get_decimal_point() + # error if a parameter is passed + self.assertRaises(TypeError, pg.get_decimal_point, point) self.assertIsInstance(point, str) - self.assertEqual(point, '.') + self.assertEqual(point, '.') # the default setting + pg.set_decimal_point(',') + try: + r = pg.get_decimal_point() + finally: + pg.set_decimal_point(point) + self.assertIsInstance(r, str) + self.assertEqual(r, ',') + pg.set_decimal_point("'") + try: + r = pg.get_decimal_point() + finally: + pg.set_decimal_point(point) + self.assertIsInstance(r, str) + self.assertEqual(r, "'") + pg.set_decimal_point('') + try: + r = pg.get_decimal_point() + finally: + pg.set_decimal_point(point) + self.assertIsNone(r) + pg.set_decimal_point(None) + try: + r = pg.get_decimal_point() + finally: + pg.set_decimal_point(point) + self.assertIsNone(r) def testSetDecimalPoint(self): d = pg.Decimal point = pg.get_decimal_point() + self.assertRaises(TypeError, pg.set_decimal_point) + # error if decimal point is not a string + self.assertRaises(TypeError, pg.set_decimal_point, 0) + # error if more than one decimal point passed + self.assertRaises(TypeError, pg.set_decimal_point, '.', ',') + self.assertRaises(TypeError, pg.set_decimal_point, '.,') + # error if decimal point is not a punctuation character + self.assertRaises(TypeError, pg.set_decimal_point, '0') query = self.c.query - # check that money values can be interpreted correctly - # if and only if the decimal point is set appropriately - # for the current lc_monetary setting - query("set lc_monetary='en_US'") + # check that money values are interpreted as decimal values + # only if decimal_point is set, and that the result is correct + # only if it is set suitable for the current lc_monetary setting + select_money = "select '34.25'::money" + proper_money = d('34.25') + bad_money = d('3425') + en_locales = 'en', 'en_US', 'en_US.utf8', 'en_US.UTF-8' + en_money = '$34.25', '$ 34.25', '34.25$', '34.25 $', '34.25 Dollar' + de_locales = 'de', 'de_DE', 'de_DE.utf8', 'de_DE.UTF-8' + de_money = ('34,25€', '34,25 €', '€34,25', '€ 34,25', + 'EUR34,25', 'EUR 34,25', '34,25 EUR', '34,25 Euro', '34,25 DM') + # first try with English localization (using the point) + for lc in en_locales: + try: + query("set lc_monetary='%s'" % lc) + except pg.ProgrammingError: + pass + else: + break + else: + self.skipTest("cannot set English money locale") + try: + r = query(select_money) + except pg.ProgrammingError: + # this can happen if the currency signs cannot be + # converted using the encoding of the test database + self.skipTest("database does not support English money") + pg.set_decimal_point(None) + try: + r = r.getresult()[0][0] + finally: + pg.set_decimal_point(point) + self.assertIsInstance(r, str) + self.assertIn(r, en_money) + r = query(select_money) + pg.set_decimal_point('') + try: + r = r.getresult()[0][0] + finally: + pg.set_decimal_point(point) + self.assertIsInstance(r, str) + self.assertIn(r, en_money) + r = query(select_money) pg.set_decimal_point('.') - r = query("select '34.25'::money").getresult()[0][0] + try: + r = r.getresult()[0][0] + finally: + pg.set_decimal_point(point) self.assertIsInstance(r, d) - self.assertEqual(r, d('34.25')) + self.assertEqual(r, proper_money) + r = query(select_money) pg.set_decimal_point(',') - r = query("select '34.25'::money").getresult()[0][0] - self.assertNotEqual(r, d('34.25')) - query("set lc_monetary='de_DE'") + try: + r = r.getresult()[0][0] + finally: + pg.set_decimal_point(point) + self.assertIsInstance(r, d) + self.assertEqual(r, bad_money) + r = query(select_money) + pg.set_decimal_point("'") + try: + r = r.getresult()[0][0] + finally: + pg.set_decimal_point(point) + self.assertIsInstance(r, d) + self.assertEqual(r, bad_money) + # then try with German localization (using the comma) + for lc in de_locales: + try: + query("set lc_monetary='%s'" % lc) + except pg.ProgrammingError: + pass + else: + break + else: + self.skipTest("cannot set German money locale") + select_money = select_money.replace('.', ',') + try: + r = query(select_money) + except pg.ProgrammingError: + self.skipTest("database does not support English money") + pg.set_decimal_point(None) + try: + r = r.getresult()[0][0] + finally: + pg.set_decimal_point(point) + self.assertIsInstance(r, str) + self.assertIn(r, de_money) + r = query(select_money) + pg.set_decimal_point('') + try: + r = r.getresult()[0][0] + finally: + pg.set_decimal_point(point) + self.assertIsInstance(r, str) + self.assertIn(r, de_money) + r = query(select_money) pg.set_decimal_point(',') - r = query("select '34,25'::money").getresult()[0][0] + try: + r = r.getresult()[0][0] + finally: + pg.set_decimal_point(point) self.assertIsInstance(r, d) - self.assertEqual(r, d('34.25')) + self.assertEqual(r, proper_money) + r = query(select_money) pg.set_decimal_point('.') - r = query("select '34,25'::money").getresult()[0][0] - self.assertNotEqual(r, d('34.25')) - pg.set_decimal_point(point) + try: + r = r.getresult()[0][0] + finally: + pg.set_decimal_point(point) + self.assertEqual(r, bad_money) + r = query(select_money) + pg.set_decimal_point("'") + try: + r = r.getresult()[0][0] + finally: + pg.set_decimal_point(point) + self.assertEqual(r, bad_money) + + def testGetDecimal(self): + decimal_class = pg.get_decimal() + # error if a parameter is passed + self.assertRaises(TypeError, pg.get_decimal, decimal_class) + self.assertIs(decimal_class, pg.Decimal) # the default setting + pg.set_decimal(int) + try: + r = pg.get_decimal() + finally: + pg.set_decimal(decimal_class) + self.assertIs(r, int) + r = pg.get_decimal() + self.assertIs(r, decimal_class) def testSetDecimal(self): - d = pg.Decimal + decimal_class = pg.get_decimal() + # error if no parameter is passed + self.assertRaises(TypeError, pg.set_decimal) query = self.c.query - r = query("select 3425::numeric").getresult()[0][0] - self.assertIsInstance(r, d) - self.assertEqual(r, d('3425')) - pg.set_decimal(long) - r = query("select 3425::numeric").getresult()[0][0] - self.assertNotIsInstance(r, d) - self.assertIsInstance(r, long) - self.assertEqual(r, 3425L) - pg.set_decimal(d) + try: + r = query("select 3425::numeric") + except pg.ProgrammingError: + self.skipTest('database does not support numeric') + r = r.getresult()[0][0] + self.assertIsInstance(r, decimal_class) + self.assertEqual(r, decimal_class('3425')) + r = query("select 3425::numeric") + pg.set_decimal(int) + try: + r = r.getresult()[0][0] + finally: + pg.set_decimal(decimal_class) + self.assertNotIsInstance(r, decimal_class) + self.assertIsInstance(r, int) + self.assertEqual(r, int(3425)) + + def testGetBool(self): + use_bool = pg.get_bool() + # error if a parameter is passed + self.assertRaises(TypeError, pg.get_bool, use_bool) + self.assertIsInstance(use_bool, bool) + self.assertIs(use_bool, False) # the default setting + pg.set_bool(True) + try: + r = pg.get_bool() + finally: + pg.set_bool(use_bool) + self.assertIsInstance(r, bool) + self.assertIs(r, True) + pg.set_bool(False) + try: + r = pg.get_bool() + finally: + pg.set_bool(use_bool) + self.assertIsInstance(r, bool) + self.assertIs(r, False) + pg.set_bool(1) + try: + r = pg.get_bool() + finally: + pg.set_bool(use_bool) + self.assertIsInstance(r, bool) + self.assertIs(r, True) + pg.set_bool(0) + try: + r = pg.get_bool() + finally: + pg.set_bool(use_bool) + self.assertIsInstance(r, bool) + self.assertIs(r, False) + + def testSetBool(self): + use_bool = pg.get_bool() + # error if no parameter is passed + self.assertRaises(TypeError, pg.set_bool) + query = self.c.query + try: + r = query("select true::bool") + except pg.ProgrammingError: + self.skipTest('database does not support bool') + r = r.getresult()[0][0] + self.assertIsInstance(r, str) + self.assertEqual(r, 't') + r = query("select true::bool") + pg.set_bool(True) + try: + r = r.getresult()[0][0] + finally: + pg.set_bool(use_bool) + self.assertIsInstance(r, bool) + self.assertIs(r, True) + r = query("select true::bool") + pg.set_bool(False) + try: + r = r.getresult()[0][0] + finally: + pg.set_bool(use_bool) + self.assertIsInstance(r, str) + self.assertIs(r, 't') @unittest.skipUnless(namedtuple, 'Named tuples not available') - def testSetNamedresult(self): - query = self.c.query + def testGetNamedresult(self): + namedresult = pg.get_namedresult() + # error if a parameter is passed + self.assertRaises(TypeError, pg.get_namedresult, namedresult) + self.assertIs(namedresult, pg._namedresult) # the default setting - r = query("select 1 as x, 2 as y").namedresult()[0] - self.assertIsInstance(r, tuple) - self.assertEqual(r, (1, 2)) - self.assertIsNot(type(r), tuple) - self.assertEqual(r._fields, ('x', 'y')) - self.assertEqual(r._asdict(), {'x': 1, 'y': 2}) - self.assertEqual(r.__class__.__name__, 'Row') + @unittest.skipUnless(namedtuple, 'Named tuples not available') + def testSetNamedresult(self): + namedresult = pg.get_namedresult() + self.assertTrue(callable(namedresult)) - _namedresult = pg._namedresult - self.assertTrue(callable(_namedresult)) - pg.set_namedresult(_namedresult) + query = self.c.query r = query("select 1 as x, 2 as y").namedresult()[0] self.assertIsInstance(r, tuple) @@ -973,12 +1241,13 @@ def testSetNamedresult(self): self.assertEqual(r._asdict(), {'x': 1, 'y': 2}) self.assertEqual(r.__class__.__name__, 'Row') - def _listresult(q): - return map(list, q.getresult()) - - pg.set_namedresult(_listresult) + def listresult(q): + return [list(row) for row in q.getresult()] + pg.set_namedresult(listresult) try: + r = pg.get_namedresult() + self.assertIs(r, listresult) r = query("select 1 as x, 2 as y").namedresult()[0] self.assertIsInstance(r, list) self.assertEqual(r, [1, 2]) @@ -986,7 +1255,10 @@ def _listresult(q): self.assertFalse(hasattr(r, '_fields')) self.assertNotEqual(r.__class__.__name__, 'Row') finally: - pg.set_namedresult(_namedresult) + pg.set_namedresult(namedresult) + + r = pg.get_namedresult() + self.assertIs(r, namedresult) if __name__ == '__main__': diff --git a/module/TEST_PyGreSQL_classic_dbwrapper.py b/tests/test_classic_dbwrapper.py similarity index 58% rename from module/TEST_PyGreSQL_classic_dbwrapper.py rename to tests/test_classic_dbwrapper.py index be0aac64..47d0f4c5 100755 --- a/module/TEST_PyGreSQL_classic_dbwrapper.py +++ b/tests/test_classic_dbwrapper.py @@ -11,17 +11,21 @@ """ -from __future__ import with_statement - try: - import unittest2 as unittest # for Python < 2.6 + import unittest2 as unittest # for Python < 2.7 except ImportError: import unittest +import os + +import sys import pg # the module under test from decimal import Decimal +# check whether the "with" statement is supported +no_with = sys.version_info[:2] < (2, 5) + # We need a database to test against. If LOCAL_PyGreSQL.py exists we will # get our information from that. Otherwise we use the defaults. # The current user must have create schema privilege on the database. @@ -36,6 +40,13 @@ except ImportError: pass +windows = os.name == 'nt' + +# There is a known a bug in libpq under Windows which can cause +# the interface to crash when calling PQhost(): +do_not_ask_for_host = windows +do_not_ask_for_host_reason = 'libpq issue on Windows' + def DB(): """Create a DB wrapper object connecting to the test database.""" @@ -47,7 +58,7 @@ def DB(): class TestDBClassBasic(unittest.TestCase): - """"Test existence of the DB class wrapped pg connection methods.""" + """Test existence of the DB class wrapped pg connection methods.""" def setUp(self): self.db = DB() @@ -60,63 +71,37 @@ def tearDown(self): def testAllDBAttributes(self): attributes = [ + 'abort', 'begin', - 'cancel', - 'clear', - 'close', - 'commit', - 'db', - 'dbname', - 'debug', - 'delete', - 'end', - 'endcopy', - 'error', - 'escape_bytea', - 'escape_identifier', - 'escape_literal', - 'escape_string', + 'cancel', 'clear', 'close', 'commit', + 'db', 'dbname', 'debug', 'delete', + 'end', 'endcopy', 'error', + 'escape_bytea', 'escape_identifier', + 'escape_literal', 'escape_string', 'fileno', - 'get', - 'get_attnames', - 'get_databases', - 'get_notice_receiver', - 'get_relations', - 'get_tables', - 'getline', - 'getlo', - 'getnotify', - 'has_table_privilege', - 'host', - 'insert', - 'inserttable', - 'locreate', - 'loimport', + 'get', 'get_attnames', 'get_databases', + 'get_notice_receiver', 'get_parameter', + 'get_relations', 'get_tables', + 'getline', 'getlo', 'getnotify', + 'has_table_privilege', 'host', + 'insert', 'inserttable', + 'locreate', 'loimport', 'notification_handler', 'options', - 'parameter', - 'pkey', - 'port', - 'protocol_version', - 'putline', + 'parameter', 'pkey', 'port', + 'protocol_version', 'putline', 'query', - 'release', - 'reopen', - 'reset', - 'rollback', - 'savepoint', - 'server_version', - 'set_notice_receiver', - 'source', - 'start', - 'status', - 'transaction', - 'tty', - 'unescape_bytea', - 'update', - 'use_regtypes', - 'user', + 'release', 'reopen', 'reset', 'rollback', + 'savepoint', 'server_version', + 'set_notice_receiver', 'set_parameter', + 'source', 'start', 'status', + 'transaction', 'truncate', 'tty', + 'unescape_bytea', 'update', + 'use_regtypes', 'user', ] + if self.db.server_version < 90000: # PostgreSQL < 9.0 + attributes.remove('escape_identifier') + attributes.remove('escape_literal') db_attributes = [a for a in dir(self.db) if not a.startswith('_')] self.assertEqual(attributes, db_attributes) @@ -132,6 +117,7 @@ def testAttributeError(self): self.assertTrue(not error or 'krb5_' in error) self.assertEqual(self.db.error, self.db.db.error) + @unittest.skipIf(do_not_ask_for_host, do_not_ask_for_host_reason) def testAttributeHost(self): def_host = 'localhost' host = self.db.host @@ -187,9 +173,13 @@ def testAttributeUser(self): self.assertEqual(user, self.db.db.user) def testMethodEscapeLiteral(self): + if self.db.server_version < 90000: # PostgreSQL < 9.0 + self.skipTest('Escaping functions not supported') self.assertEqual(self.db.escape_literal(''), "''") def testMethodEscapeIdentifier(self): + if self.db.server_version < 90000: # PostgreSQL < 9.0 + self.skipTest('Escaping functions not supported') self.assertEqual(self.db.escape_identifier(''), '""') def testMethodEscapeString(self): @@ -233,8 +223,32 @@ def testMethodClose(self): pass else: self.fail('Reset should give an error for a closed connection') + self.assertIsNone(self.db.db) self.assertRaises(pg.InternalError, self.db.close) self.assertRaises(pg.InternalError, self.db.query, 'select 1') + self.assertRaises(pg.InternalError, getattr, self.db, 'status') + self.assertRaises(pg.InternalError, getattr, self.db, 'error') + self.assertRaises(pg.InternalError, getattr, self.db, 'absent') + + def testMethodReset(self): + con = self.db.db + self.db.reset() + self.assertIs(self.db.db, con) + self.db.query("select 1+1") + self.db.close() + self.assertRaises(pg.InternalError, self.db.reset) + + def testMethodReopen(self): + con = self.db.db + self.db.reopen() + self.assertIsNot(self.db.db, con) + con = self.db.db + self.db.query("select 1+1") + self.db.close() + self.db.reopen() + self.assertIsNot(self.db.db, con) + self.db.query("select 1+1") + self.db.close() def testExistingConnection(self): db = pg.DB(self.db.db) @@ -261,7 +275,9 @@ class DB2: class TestDBClass(unittest.TestCase): - """"Test the methods of the DB class wrapped pg connection.""" + """Test the methods of the DB class wrapped pg connection.""" + + cls_set_up = False @classmethod def setUpClass(cls): @@ -269,11 +285,12 @@ def setUpClass(cls): db.query("drop table if exists test cascade") db.query("create table test (" "i2 smallint, i4 integer, i8 bigint," - "d numeric, f4 real, f8 double precision, m money, " - "v4 varchar(4), c4 char(4), t text)") + " d numeric, f4 real, f8 double precision, m money," + " v4 varchar(4), c4 char(4), t text)") db.query("create or replace view test_view as" " select i4, v4 from test") db.close() + cls.cls_set_up = True @classmethod def tearDownClass(cls): @@ -282,15 +299,24 @@ def tearDownClass(cls): db.close() def setUp(self): + self.assertTrue(self.cls_set_up) self.db = DB() - self.db.query("set lc_monetary='C'") - self.db.query('set bytea_output=hex') - self.db.query('set standard_conforming_strings=on') + query = self.db.query + query('set client_encoding=utf8') + query('set standard_conforming_strings=on') + query("set lc_monetary='C'") + query("set datestyle='ISO,YMD'") + try: + query('set bytea_output=hex') + except pg.ProgrammingError: # PostgreSQL < 9.0 + pass def tearDown(self): self.db.close() def testEscapeLiteral(self): + if self.db.server_version < 90000: # PostgreSQL < 9.0 + self.skipTest('Escaping functions not supported') f = self.db.escape_literal self.assertEqual(f("plain"), "'plain'") self.assertEqual(f("that's k\xe4se"), "'that''s k\xe4se'") @@ -300,6 +326,8 @@ def testEscapeLiteral(self): "'No \"quotes\" must be escaped.'") def testEscapeIdentifier(self): + if self.db.server_version < 90000: # PostgreSQL < 9.0 + self.skipTest('Escaping functions not supported') f = self.db.escape_identifier self.assertEqual(f("plain"), '"plain"') self.assertEqual(f("that's k\xe4se"), '"that\'s k\xe4se"') @@ -317,11 +345,16 @@ def testEscapeString(self): def testEscapeBytea(self): f = self.db.escape_bytea - # note that escape_byte always returns hex output since Pg 9.0, + # note that escape_byte always returns hex output since PostgreSQL 9.0, # regardless of the bytea_output setting - self.assertEqual(f("plain"), r"\x706c61696e") - self.assertEqual(f("that's k\xe4se"), r"\x746861742773206be47365") - self.assertEqual(f('O\x00ps\xff!'), r"\x4f007073ff21") + if self.db.server_version < 90000: + self.assertEqual(f("plain"), r"plain") + self.assertEqual(f("that's k\xe4se"), r"that''s k\344se") + self.assertEqual(f('O\x00ps\xff!'), r"O\000ps\377!") + else: + self.assertEqual(f("plain"), r"\x706c61696e") + self.assertEqual(f("that's k\xe4se"), r"\x746861742773206be47365") + self.assertEqual(f('O\x00ps\xff!'), r"\x4f007073ff21") def testUnescapeBytea(self): f = self.db.unescape_bytea @@ -407,6 +440,181 @@ def testQuote(self): self.assertEqual(f('ab\\c', 'text'), "'ab\\\\c'") self.assertEqual(f("a\\b'c", 'text'), "'a\\\\b''c'") + def testGetParameter(self): + f = self.db.get_parameter + self.assertRaises(TypeError, f) + self.assertRaises(TypeError, f, None) + self.assertRaises(TypeError, f, 42) + self.assertRaises(TypeError, f, '') + self.assertRaises(TypeError, f, []) + self.assertRaises(TypeError, f, ['']) + self.assertRaises(pg.ProgrammingError, f, 'this_does_not_exist') + r = f('standard_conforming_strings') + self.assertEqual(r, 'on') + r = f('lc_monetary') + self.assertEqual(r, 'C') + r = f('datestyle') + self.assertEqual(r, 'ISO, YMD') + r = f('bytea_output') + self.assertEqual(r, 'hex') + r = f(['bytea_output', 'lc_monetary']) + self.assertIsInstance(r, list) + self.assertEqual(r, ['hex', 'C']) + r = f(('standard_conforming_strings', 'datestyle', 'bytea_output')) + self.assertEqual(r, ['on', 'ISO, YMD', 'hex']) + r = f(set(['bytea_output', 'lc_monetary'])) + self.assertIsInstance(r, dict) + self.assertEqual(r, {'bytea_output': 'hex', 'lc_monetary': 'C'}) + r = f(set(['Bytea_Output', ' LC_Monetary '])) + self.assertIsInstance(r, dict) + self.assertEqual(r, {'Bytea_Output': 'hex', ' LC_Monetary ': 'C'}) + s = dict.fromkeys(('bytea_output', 'lc_monetary')) + r = f(s) + self.assertIs(r, s) + self.assertEqual(r, {'bytea_output': 'hex', 'lc_monetary': 'C'}) + s = dict.fromkeys(('Bytea_Output', ' LC_Monetary ')) + r = f(s) + self.assertIs(r, s) + self.assertEqual(r, {'Bytea_Output': 'hex', ' LC_Monetary ': 'C'}) + + def testGetParameterServerVersion(self): + r = self.db.get_parameter('server_version_num') + self.assertIsInstance(r, str) + s = self.db.server_version + self.assertIsInstance(s, int) + self.assertEqual(r, str(s)) + + def testGetParameterAll(self): + f = self.db.get_parameter + r = f('all') + self.assertIsInstance(r, dict) + self.assertEqual(r['standard_conforming_strings'], 'on') + self.assertEqual(r['lc_monetary'], 'C') + self.assertEqual(r['DateStyle'], 'ISO, YMD') + self.assertEqual(r['bytea_output'], 'hex') + + def testSetParameter(self): + f = self.db.set_parameter + g = self.db.get_parameter + self.assertRaises(TypeError, f) + self.assertRaises(TypeError, f, None) + self.assertRaises(TypeError, f, 42) + self.assertRaises(TypeError, f, '') + self.assertRaises(TypeError, f, []) + self.assertRaises(TypeError, f, ['']) + self.assertRaises(ValueError, f, 'all', 'invalid') + self.assertRaises(ValueError, f, { + 'invalid1': 'value1', 'invalid2': 'value2'}, 'value') + self.assertRaises(pg.ProgrammingError, f, 'this_does_not_exist') + f('standard_conforming_strings', 'off') + self.assertEqual(g('standard_conforming_strings'), 'off') + f('datestyle', 'ISO, DMY') + self.assertEqual(g('datestyle'), 'ISO, DMY') + f(['standard_conforming_strings', 'datestyle'], ['on', 'ISO, DMY']) + self.assertEqual(g('standard_conforming_strings'), 'on') + self.assertEqual(g('datestyle'), 'ISO, DMY') + f(['default_with_oids', 'standard_conforming_strings'], 'off') + self.assertEqual(g('default_with_oids'), 'off') + self.assertEqual(g('standard_conforming_strings'), 'off') + f(('standard_conforming_strings', 'datestyle'), ('on', 'ISO, YMD')) + self.assertEqual(g('standard_conforming_strings'), 'on') + self.assertEqual(g('datestyle'), 'ISO, YMD') + f(('default_with_oids', 'standard_conforming_strings'), 'off') + self.assertEqual(g('default_with_oids'), 'off') + self.assertEqual(g('standard_conforming_strings'), 'off') + f(set(['default_with_oids', 'standard_conforming_strings']), 'on') + self.assertEqual(g('default_with_oids'), 'on') + self.assertEqual(g('standard_conforming_strings'), 'on') + self.assertRaises(ValueError, f, set([ 'default_with_oids', + 'standard_conforming_strings']), ['off', 'on']) + f(set(['default_with_oids', 'standard_conforming_strings']), + ['off', 'off']) + self.assertEqual(g('default_with_oids'), 'off') + self.assertEqual(g('standard_conforming_strings'), 'off') + f({'standard_conforming_strings': 'on', 'datestyle': 'ISO, YMD'}) + self.assertEqual(g('standard_conforming_strings'), 'on') + self.assertEqual(g('datestyle'), 'ISO, YMD') + + def testResetParameter(self): + db = DB() + f = db.set_parameter + g = db.get_parameter + r = g('default_with_oids') + self.assertIn(r, ('on', 'off')) + dwi, not_dwi = r, r == 'on' and 'off' or 'on' + r = g('standard_conforming_strings') + self.assertIn(r, ('on', 'off')) + scs, not_scs = r, r == 'on' and 'off' or 'on' + f('default_with_oids', not_dwi) + f('standard_conforming_strings', not_scs) + self.assertEqual(g('default_with_oids'), not_dwi) + self.assertEqual(g('standard_conforming_strings'), not_scs) + f('default_with_oids') + f('standard_conforming_strings', None) + self.assertEqual(g('default_with_oids'), dwi) + self.assertEqual(g('standard_conforming_strings'), scs) + f('default_with_oids', not_dwi) + f('standard_conforming_strings', not_scs) + self.assertEqual(g('default_with_oids'), not_dwi) + self.assertEqual(g('standard_conforming_strings'), not_scs) + f(['default_with_oids', 'standard_conforming_strings'], None) + self.assertEqual(g('default_with_oids'), dwi) + self.assertEqual(g('standard_conforming_strings'), scs) + f('default_with_oids', not_dwi) + f('standard_conforming_strings', not_scs) + self.assertEqual(g('default_with_oids'), not_dwi) + self.assertEqual(g('standard_conforming_strings'), not_scs) + f(('default_with_oids', 'standard_conforming_strings')) + self.assertEqual(g('default_with_oids'), dwi) + self.assertEqual(g('standard_conforming_strings'), scs) + f('default_with_oids', not_dwi) + f('standard_conforming_strings', not_scs) + self.assertEqual(g('default_with_oids'), not_dwi) + self.assertEqual(g('standard_conforming_strings'), not_scs) + f(set(['default_with_oids', 'standard_conforming_strings']), None) + self.assertEqual(g('default_with_oids'), dwi) + self.assertEqual(g('standard_conforming_strings'), scs) + + def testResetParameterAll(self): + db = DB() + f = db.set_parameter + self.assertRaises(ValueError, f, 'all', 0) + self.assertRaises(ValueError, f, 'all', 'off') + g = db.get_parameter + r = g('default_with_oids') + self.assertIn(r, ('on', 'off')) + dwi, not_dwi = r, r == 'on' and 'off' or 'on' + r = g('standard_conforming_strings') + self.assertIn(r, ('on', 'off')) + scs, not_scs = r, r == 'on' and 'off' or 'on' + f('default_with_oids', not_dwi) + f('standard_conforming_strings', not_scs) + self.assertEqual(g('default_with_oids'), not_dwi) + self.assertEqual(g('standard_conforming_strings'), not_scs) + f('all') + self.assertEqual(g('default_with_oids'), dwi) + self.assertEqual(g('standard_conforming_strings'), scs) + + def testSetParameterLocal(self): + f = self.db.set_parameter + g = self.db.get_parameter + self.assertEqual(g('standard_conforming_strings'), 'on') + self.db.begin() + f('standard_conforming_strings', 'off', local=True) + self.assertEqual(g('standard_conforming_strings'), 'off') + self.db.end() + self.assertEqual(g('standard_conforming_strings'), 'on') + + def testSetParameterSession(self): + f = self.db.set_parameter + g = self.db.get_parameter + self.assertEqual(g('standard_conforming_strings'), 'on') + self.db.begin() + f('standard_conforming_strings', 'off', local=False) + self.assertEqual(g('standard_conforming_strings'), 'off') + self.db.end() + self.assertEqual(g('standard_conforming_strings'), 'off') + def testQuery(self): query = self.db.query query("drop table if exists test_table") @@ -495,9 +703,9 @@ def testPkey(self): query("create table pkeytest2 (" "c smallint, d smallint primary key)") query("create table pkeytest3 (" - "e smallint, f smallint, g smallint, " - "h smallint, i smallint, " - "primary key (f,h))") + "e smallint, f smallint, g smallint," + " h smallint, i smallint," + " primary key (f,h))") pkey = self.db.pkey self.assertRaises(KeyError, pkey, 'pkeytest0') self.assertEqual(pkey('pkeytest1'), 'b') @@ -523,6 +731,15 @@ def testGetDatabases(self): def testGetTables(self): get_tables = self.db.get_tables result1 = get_tables() + self.assertIsInstance(result1, list) + for t in result1: + t = t.split('.', 1) + self.assertGreaterEqual(len(t), 2) + if len(t) > 2: + self.assertTrue(t[1].startswith('"')) + t = t[0] + self.assertNotEqual(t, 'information_schema') + self.assertFalse(t.startswith('pg_')) tables = ('"A very Special Name"', '"A_MiXeD_quoted_NaMe"', 'a1', 'a2', 'A_MiXeD_NaMe', '"another special name"', @@ -548,6 +765,18 @@ def testGetTables(self): result2 = get_tables() self.assertEqual(result2, result1) + def testGetSystemTables(self): + get_tables = self.db.get_tables + result = get_tables() + self.assertNotIn('pg_catalog.pg_class', result) + self.assertNotIn('information_schema.tables', result) + result = get_tables(system=False) + self.assertNotIn('pg_catalog.pg_class', result) + self.assertNotIn('information_schema.tables', result) + result = get_tables(system=True) + self.assertIn('pg_catalog.pg_class', result) + self.assertNotIn('information_schema.tables', result) + def testGetRelations(self): get_relations = self.db.get_relations result = get_relations() @@ -566,20 +795,26 @@ def testGetRelations(self): self.assertNotIn('public.test', result) self.assertNotIn('public.test_view', result) - def testAttnames(self): + def testGetAttnames(self): self.assertRaises(pg.ProgrammingError, self.db.get_attnames, 'does_not_exist') self.assertRaises(pg.ProgrammingError, self.db.get_attnames, 'has.too.many.dots') + attributes = self.db.get_attnames('test') + self.assertIsInstance(attributes, dict) + self.assertEqual(attributes, dict( + i2='int', i4='int', i8='int', d='num', + f4='float', f8='float', m='money', + v4='text', c4='text', t='text')) for table in ('attnames_test_table', 'test table for attnames'): self.db.query('drop table if exists "%s"' % table) self.db.query('create table "%s" (' - 'a smallint, b integer, c bigint, ' - 'e numeric, f float, f2 double precision, m money, ' - 'x smallint, y smallint, z smallint, ' - 'Normal_NaMe smallint, "Special Name" smallint, ' - 't text, u char(2), v varchar(2), ' - 'primary key (y, u)) with oids' % table) + ' a smallint, b integer, c bigint,' + ' e numeric, f float, f2 double precision, m money,' + ' x smallint, y smallint, z smallint,' + ' Normal_NaMe smallint, "Special Name" smallint,' + ' t text, u char(2), v varchar(2),' + ' primary key (y, u)) with oids' % table) attributes = self.db.get_attnames(table) result = {'a': 'int', 'c': 'int', 'b': 'int', 'e': 'num', 'f': 'float', 'f2': 'float', 'm': 'money', @@ -589,6 +824,18 @@ def testAttnames(self): self.assertEqual(attributes, result) self.db.query('drop table "%s"' % table) + def testGetSystemRelations(self): + get_relations = self.db.get_relations + result = get_relations() + self.assertNotIn('pg_catalog.pg_class', result) + self.assertNotIn('information_schema.tables', result) + result = get_relations(system=False) + self.assertNotIn('pg_catalog.pg_class', result) + self.assertNotIn('information_schema.tables', result) + result = get_relations(system=True) + self.assertIn('pg_catalog.pg_class', result) + self.assertIn('information_schema.tables', result) + def testHasTablePrivilege(self): can = self.db.has_table_privilege self.assertEqual(can('test'), True) @@ -681,9 +928,49 @@ def testGetFromView(self): self.assertIn('v4', r) self.assertEqual(r['v4'], 'abc4') + def testGetLittleBobbyTables(self): + get = self.db.get + query = self.db.query + query("drop table if exists test_students") + query("create table test_students (firstname varchar primary key," + " nickname varchar, grade char(2))") + query("insert into test_students values (" + "'D''Arcy', 'Darcey', 'A+')") + query("insert into test_students values (" + "'Sheldon', 'Moonpie', 'A+')") + query("insert into test_students values (" + "'Robert', 'Little Bobby Tables', 'D-')") + r = get('test_students', 'Sheldon') + self.assertEqual(r, dict( + firstname="Sheldon", nickname='Moonpie', grade='A+')) + r = get('test_students', 'Robert') + self.assertEqual(r, dict( + firstname="Robert", nickname='Little Bobby Tables', grade='D-')) + r = get('test_students', "D'Arcy") + self.assertEqual(r, dict( + firstname="D'Arcy", nickname='Darcey', grade='A+')) + try: + get('test_students', "D' Arcy") + except pg.DatabaseError, error: + self.assertEqual(str(error), + 'No such record in public.test_students where firstname = ' + "'D'' Arcy'") + try: + get('test_students', "Robert'); TRUNCATE TABLE test_students;--") + except pg.DatabaseError, error: + self.assertEqual(str(error), + 'No such record in public.test_students where firstname = ' + "'Robert''); TRUNCATE TABLE test_students;--'") + q = "select * from test_students order by 1 limit 4" + r = query(q).getresult() + self.assertEqual(len(r), 3) + self.assertEqual(r[1][2], 'D-') + query('drop table test_students') + def testInsert(self): insert = self.db.insert query = self.db.query + server_version = self.db.server_version for table in ('insert_test_table', 'test table for insert'): query('drop table if exists "%s"' % table) query('create table "%s" (' @@ -705,12 +992,13 @@ def testInsert(self): dict(d=Decimal(0)), (dict(d=0), dict(d=Decimal(0))), dict(f4=None, f8=None), dict(f4=0, f8=0), (dict(f4='', f8=''), dict(f4=None, f8=None)), - dict(d=1234.5, f4=1234.5, f8=1234.5), + (dict(d=1234.5, f4=1234.5, f8=1234.5), + dict(d=Decimal('1234.5'))), dict(d=Decimal('123.456789'), f4=12.375, f8=123.4921875), dict(d=Decimal('123456789.9876543212345678987654321')), dict(m=None), (dict(m=''), dict(m=None)), dict(m=Decimal('-1234.56')), - (dict(m=('-1234.56')), dict(m=Decimal('-1234.56'))), + (dict(m='-1234.56'), dict(m=Decimal('-1234.56'))), dict(m=Decimal('1234.56')), dict(m=Decimal('123456')), (dict(m='1234.56'), dict(m=Decimal('1234.56'))), (dict(m=1234.5), dict(m=Decimal('1234.5'))), @@ -745,6 +1033,9 @@ def testInsert(self): data, change = test expect = data.copy() expect.update(change) + if data.get('m') and server_version < 90100: + # PostgreSQL < 9.1 cannot directly convert numbers to money + data['m'] = "'%s'::money" % data['m'] self.assertEqual(insert(table, data), data) self.assertIn(oid_table, data) oid = data[oid_table] @@ -925,6 +1216,227 @@ def testDeleteWithCompositeKey(self): self.assertEqual(r, ['f']) query("drop table %s" % table) + def testTruncate(self): + truncate = self.db.truncate + self.assertRaises(TypeError, truncate, None) + self.assertRaises(TypeError, truncate, 42) + self.assertRaises(TypeError, truncate, dict(test_table=None)) + query = self.db.query + query("drop table if exists test_table") + query("create table test_table (n smallint)") + for i in range(3): + query("insert into test_table values (1)") + q = "select count(*) from test_table" + r = query(q).getresult()[0][0] + self.assertEqual(r, 3) + truncate('test_table') + r = query(q).getresult()[0][0] + self.assertEqual(r, 0) + for i in range(3): + query("insert into test_table values (1)") + r = query(q).getresult()[0][0] + self.assertEqual(r, 3) + truncate('public.test_table') + r = query(q).getresult()[0][0] + self.assertEqual(r, 0) + query("drop table if exists test_table_2") + query('create table test_table_2 (n smallint)') + for t in (list, tuple, set): + for i in range(3): + query("insert into test_table values (1)") + query("insert into test_table_2 values (2)") + q = ("select (select count(*) from test_table)," + " (select count(*) from test_table_2)") + r = query(q).getresult()[0] + self.assertEqual(r, (3, 3)) + truncate(t(['test_table', 'test_table_2'])) + r = query(q).getresult()[0] + self.assertEqual(r, (0, 0)) + query("drop table test_table_2") + query("drop table test_table") + + def testTempCrud(self): + query = self.db.query + table = 'test_temp_table' + query("drop table if exists %s" % table) + query("create temporary table %s" + " (n int primary key, t varchar)" % table) + self.db.insert(table, dict(n=1, t='one')) + self.db.insert(table, dict(n=2, t='too')) + self.db.insert(table, dict(n=3, t='three')) + r = self.db.get(table, 2) + self.assertEqual(r['t'], 'too') + self.db.update(table, dict(n=2, t='two')) + r = self.db.get(table, 2) + self.assertEqual(r['t'], 'two') + self.db.delete(table, r) + r = query('select n, t from %s order by 1' % table).getresult() + self.assertEqual(r, [(1, 'one'), (3, 'three')]) + query("drop table %s" % table) + + def testTruncateRestart(self): + truncate = self.db.truncate + self.assertRaises(TypeError, truncate, 'test_table', restart='invalid') + query = self.db.query + query("drop table if exists test_table") + query("create table test_table (n serial, t text)") + for n in range(3): + query("insert into test_table (t) values ('test')") + q = "select count(n), min(n), max(n) from test_table" + r = query(q).getresult()[0] + self.assertEqual(r, (3, 1, 3)) + truncate('test_table') + r = query(q).getresult()[0] + self.assertEqual(r, (0, None, None)) + for n in range(3): + query("insert into test_table (t) values ('test')") + r = query(q).getresult()[0] + self.assertEqual(r, (3, 4, 6)) + truncate('test_table', restart=True) + r = query(q).getresult()[0] + self.assertEqual(r, (0, None, None)) + for n in range(3): + query("insert into test_table (t) values ('test')") + r = query(q).getresult()[0] + self.assertEqual(r, (3, 1, 3)) + query("drop table test_table") + + def testTruncateCascade(self): + truncate = self.db.truncate + self.assertRaises(TypeError, truncate, 'test_table', cascade='invalid') + query = self.db.query + query("drop table if exists test_child") + query("drop table if exists test_parent") + query("create table test_parent (n smallint primary key)") + query("create table test_child (" + " n smallint primary key references test_parent (n))") + for n in range(3): + query("insert into test_parent (n) values (%d)" % n) + query("insert into test_child (n) values (%d)" % n) + q = ("select (select count(*) from test_parent)," + " (select count(*) from test_child)") + r = query(q).getresult()[0] + self.assertEqual(r, (3, 3)) + self.assertRaises(pg.ProgrammingError, truncate, 'test_parent') + truncate(['test_parent', 'test_child']) + r = query(q).getresult()[0] + self.assertEqual(r, (0, 0)) + for n in range(3): + query("insert into test_parent (n) values (%d)" % n) + query("insert into test_child (n) values (%d)" % n) + r = query(q).getresult()[0] + self.assertEqual(r, (3, 3)) + truncate('test_parent', cascade=True) + r = query(q).getresult()[0] + self.assertEqual(r, (0, 0)) + for n in range(3): + query("insert into test_parent (n) values (%d)" % n) + query("insert into test_child (n) values (%d)" % n) + r = query(q).getresult()[0] + self.assertEqual(r, (3, 3)) + truncate('test_child') + r = query(q).getresult()[0] + self.assertEqual(r, (3, 0)) + self.assertRaises(pg.ProgrammingError, truncate, 'test_parent') + truncate('test_parent', cascade=True) + r = query(q).getresult()[0] + self.assertEqual(r, (0, 0)) + query("drop table test_child") + query("drop table test_parent") + + def testTruncateOnly(self): + truncate = self.db.truncate + self.assertRaises(TypeError, truncate, 'test_table', only='invalid') + query = self.db.query + query("drop table if exists test_child") + query("drop table if exists test_parent") + query("create table test_parent (n smallint)") + query("create table test_child (" + " m smallint) inherits (test_parent)") + for n in range(3): + query("insert into test_parent (n) values (1)") + query("insert into test_child (n, m) values (2, 3)") + q = ("select (select count(*) from test_parent)," + " (select count(*) from test_child)") + r = query(q).getresult()[0] + self.assertEqual(r, (6, 3)) + truncate('test_parent') + r = query(q).getresult()[0] + self.assertEqual(r, (0, 0)) + for n in range(3): + query("insert into test_parent (n) values (1)") + query("insert into test_child (n, m) values (2, 3)") + r = query(q).getresult()[0] + self.assertEqual(r, (6, 3)) + truncate('test_parent*') + r = query(q).getresult()[0] + self.assertEqual(r, (0, 0)) + for n in range(3): + query("insert into test_parent (n) values (1)") + query("insert into test_child (n, m) values (2, 3)") + r = query(q).getresult()[0] + self.assertEqual(r, (6, 3)) + truncate('test_parent', only=True) + r = query(q).getresult()[0] + self.assertEqual(r, (3, 3)) + truncate('test_parent', only=False) + r = query(q).getresult()[0] + self.assertEqual(r, (0, 0)) + self.assertRaises(ValueError, truncate, 'test_parent*', only=True) + truncate('test_parent*', only=False) + query("drop table if exists test_parent_2") + query("create table test_parent_2 (n smallint)") + query("drop table if exists test_child_2") + query("create table test_child_2 (" + " m smallint) inherits (test_parent_2)") + for n in range(3): + query("insert into test_parent (n) values (1)") + query("insert into test_child (n, m) values (2, 3)") + query("insert into test_parent_2 (n) values (1)") + query("insert into test_child_2 (n, m) values (2, 3)") + q = ("select (select count(*) from test_parent)," + " (select count(*) from test_child)," + " (select count(*) from test_parent_2)," + " (select count(*) from test_child_2)") + r = query(q).getresult()[0] + self.assertEqual(r, (6, 3, 6, 3)) + truncate(['test_parent', 'test_parent_2'], only=[False, True]) + r = query(q).getresult()[0] + self.assertEqual(r, (0, 0, 3, 3)) + truncate(['test_parent', 'test_parent_2'], only=False) + r = query(q).getresult()[0] + self.assertEqual(r, (0, 0, 0, 0)) + self.assertRaises(ValueError, truncate, + ['test_parent*', 'test_child'], only=[True, False]) + truncate(['test_parent*', 'test_child'], only=[False, True]) + query("drop table test_child_2") + query("drop table test_parent_2") + query("drop table test_child") + query("drop table test_parent") + + def testTruncateQuoted(self): + truncate = self.db.truncate + query = self.db.query + table = "test table for truncate()" + query('drop table if exists "%s"' % table) + query('create table "%s" (n smallint)' % table) + for i in range(3): + query('insert into "%s" values (1)' % table) + q = 'select count(*) from "%s"' % table + r = query(q).getresult()[0][0] + self.assertEqual(r, 3) + truncate(table) + r = query(q).getresult()[0][0] + self.assertEqual(r, 0) + for i in range(3): + query('insert into "%s" values (1)' % table) + r = query(q).getresult()[0][0] + self.assertEqual(r, 3) + truncate('public."%s"' % table) + r = query(q).getresult()[0][0] + self.assertEqual(r, 0) + query('drop table "%s"' % table) + def testTransaction(self): query = self.db.query query("drop table if exists test_table") @@ -958,10 +1470,13 @@ def testTransaction(self): self.assertEqual(r, [1, 2, 5, 7, 9]) query("drop table test_table") + @unittest.skipIf(no_with, 'context managers not supported') def testContextManager(self): query = self.db.query query("drop table if exists test_table") query("create table test_table (n integer check(n>0))") + # wrap "with" statements to avoid SyntaxError in Python < 2.5 + exec """from __future__ import with_statement\nif True: with self.db: query("insert into test_table values (1)") query("insert into test_table values (2)") @@ -981,7 +1496,7 @@ def testContextManager(self): except pg.ProgrammingError, error: self.assertTrue('check' in str(error)) with self.db: - query("insert into test_table values (7)") + query("insert into test_table values (7)")\n""" r = [r[0] for r in query( "select * from test_table order by 1").getresult()] self.assertEqual(r, [1, 2, 5, 7]) @@ -1005,6 +1520,83 @@ def testBytea(self): self.assertEqual(r, s) query('drop table bytea_test') + def testNotificationHandler(self): + # the notification handler itself is tested separately + f = self.db.notification_handler + callback = lambda arg_dict: None + handler = f('test', callback) + self.assertIsInstance(handler, pg.NotificationHandler) + self.assertIs(handler.db, self.db) + self.assertEqual(handler.event, 'test') + self.assertEqual(handler.stop_event, 'stop_test') + self.assertIs(handler.callback, callback) + self.assertIsInstance(handler.arg_dict, dict) + self.assertEqual(handler.arg_dict, {}) + self.assertIsNone(handler.timeout) + self.assertFalse(handler.listening) + handler.close() + self.assertIsNone(handler.db) + self.db.reopen() + self.assertIsNone(handler.db) + handler = f('test2', callback, timeout=2) + self.assertIsInstance(handler, pg.NotificationHandler) + self.assertIs(handler.db, self.db) + self.assertEqual(handler.event, 'test2') + self.assertEqual(handler.stop_event, 'stop_test2') + self.assertIs(handler.callback, callback) + self.assertIsInstance(handler.arg_dict, dict) + self.assertEqual(handler.arg_dict, {}) + self.assertEqual(handler.timeout, 2) + self.assertFalse(handler.listening) + handler.close() + self.assertIsNone(handler.db) + self.db.reopen() + self.assertIsNone(handler.db) + arg_dict = {'testing': 3} + handler = f('test3', callback, arg_dict=arg_dict) + self.assertIsInstance(handler, pg.NotificationHandler) + self.assertIs(handler.db, self.db) + self.assertEqual(handler.event, 'test3') + self.assertEqual(handler.stop_event, 'stop_test3') + self.assertIs(handler.callback, callback) + self.assertIs(handler.arg_dict, arg_dict) + self.assertEqual(arg_dict['testing'], 3) + self.assertIsNone(handler.timeout) + self.assertFalse(handler.listening) + handler.close() + self.assertIsNone(handler.db) + self.db.reopen() + self.assertIsNone(handler.db) + handler = f('test4', callback, stop_event='stop4') + self.assertIsInstance(handler, pg.NotificationHandler) + self.assertIs(handler.db, self.db) + self.assertEqual(handler.event, 'test4') + self.assertEqual(handler.stop_event, 'stop4') + self.assertIs(handler.callback, callback) + self.assertIsInstance(handler.arg_dict, dict) + self.assertEqual(handler.arg_dict, {}) + self.assertIsNone(handler.timeout) + self.assertFalse(handler.listening) + handler.close() + self.assertIsNone(handler.db) + self.db.reopen() + self.assertIsNone(handler.db) + arg_dict = {'testing': 5} + handler = f('test5', callback, arg_dict, 1.5, 'stop5') + self.assertIsInstance(handler, pg.NotificationHandler) + self.assertIs(handler.db, self.db) + self.assertEqual(handler.event, 'test5') + self.assertEqual(handler.stop_event, 'stop5') + self.assertIs(handler.callback, callback) + self.assertIs(handler.arg_dict, arg_dict) + self.assertEqual(arg_dict['testing'], 5) + self.assertEqual(handler.timeout, 1.5) + self.assertFalse(handler.listening) + handler.close() + self.assertIsNone(handler.db) + self.db.reopen() + self.assertIsNone(handler.db) + def testDebugWithCallable(self): if debug: self.assertEqual(self.db.debug, debug) @@ -1021,13 +1613,14 @@ def testDebugWithCallable(self): class TestSchemas(unittest.TestCase): - """"Test correct handling of schemas (namespaces).""" + """Test correct handling of schemas (namespaces).""" + + cls_set_up = False @classmethod def setUpClass(cls): db = DB() query = db.query - query("set client_min_messages=warning") for num_schema in range(5): if num_schema: schema = "s%d" % num_schema @@ -1047,12 +1640,12 @@ def setUpClass(cls): query("create table %s.t%d with oids as select 1 as n, %d as d" % (schema, num_schema, num_schema)) db.close() + cls.cls_set_up = True @classmethod def tearDownClass(cls): db = DB() query = db.query - query("set client_min_messages=warning") for num_schema in range(5): if num_schema: schema = "s%d" % num_schema @@ -1064,8 +1657,8 @@ def tearDownClass(cls): db.close() def setUp(self): + self.assertTrue(self.cls_set_up) self.db = DB() - self.db.query("set client_min_messages=warning") def tearDown(self): self.db.close() diff --git a/module/TEST_PyGreSQL_classic_functions.py b/tests/test_classic_functions.py similarity index 78% rename from module/TEST_PyGreSQL_classic_functions.py rename to tests/test_classic_functions.py index 530e6d14..978d15d2 100755 --- a/module/TEST_PyGreSQL_classic_functions.py +++ b/tests/test_classic_functions.py @@ -13,12 +13,23 @@ try: - import unittest2 as unittest # for Python < 2.6 + import unittest2 as unittest # for Python < 2.7 except ImportError: import unittest +import re + import pg # the module under test +try: + from decimal import Decimal +except ImportError: # Python < 2.4, unsupported + Decimal = None +try: + from collections import namedtuple +except ImportError: # Python < 2.6 + namedtuple = None + class TestAuxiliaryFunctions(unittest.TestCase): """Test the auxiliary functions external to the connection class.""" @@ -253,25 +264,29 @@ def testDefBase(self): class TestEscapeFunctions(unittest.TestCase): - """"Test pg escape and unescape functions.""" + """Test pg escape and unescape functions. + + The libpq interface memorizes some parameters of the last opened + connection that influence the result of these functions. + Therefore we cannot do rigid tests of these functions here. + We leave this for the test module that runs with a database. + + """ def testEscapeString(self): f = pg.escape_string self.assertEqual(f('plain'), 'plain') - self.assertEqual(f("that's k\xe4se"), "that''s k\xe4se") - self.assertEqual(f(r"It's fine to have a \ inside."), - r"It''s fine to have a \\ inside.") + self.assertEqual(f("that's cheese"), "that''s cheese") def testEscapeBytea(self): f = pg.escape_bytea self.assertEqual(f('plain'), 'plain') - self.assertEqual(f("that's k\xe4se"), "that''s k\\\\344se") - self.assertEqual(f('O\x00ps\xff!'), r'O\\000ps\\377!') + self.assertEqual(f("that's cheese"), "that''s cheese") def testUnescapeBytea(self): f = pg.unescape_bytea self.assertEqual(f('plain'), 'plain') - self.assertEqual(f("that's k\\344se"), "that's k\xe4se") + self.assertEqual(f("das is' k\\303\\244se"), "das is' käse") self.assertEqual(f(r'O\000ps\377!'), 'O\x00ps\xff!') @@ -292,17 +307,69 @@ def testSetDecimalPoint(self): point = pg.get_decimal_point() pg.set_decimal_point('*') r = pg.get_decimal_point() + pg.set_decimal_point(point) self.assertIsInstance(r, str) self.assertEqual(r, '*') - pg.set_decimal_point(point) + r = pg.get_decimal_point() + self.assertIsInstance(r, str) + self.assertEqual(r, point) + + def testGetDecimal(self): + r = pg.get_decimal() + self.assertIs(r, Decimal) def testSetDecimal(self): decimal_class = pg.Decimal - pg.set_decimal(long) + pg.set_decimal(int) + r = pg.get_decimal() pg.set_decimal(decimal_class) + self.assertIs(r, int) + r = pg.get_decimal() + self.assertIs(r, decimal_class) + + def testGetBool(self): + r = pg.get_bool() + self.assertIsInstance(r, bool) + self.assertIs(r, False) + + def testSetBool(self): + use_bool = pg.get_bool() + pg.set_bool(True) + r = pg.get_bool() + pg.set_bool(use_bool) + self.assertIsInstance(r, bool) + self.assertIs(r, True) + pg.set_bool(False) + r = pg.get_bool() + pg.set_bool(use_bool) + self.assertIsInstance(r, bool) + self.assertIs(r, False) + r = pg.get_bool() + self.assertIsInstance(r, bool) + self.assertIs(r, use_bool) + + def testGetNamedresult(self): + r = pg.get_namedresult() + if namedtuple: + self.assertTrue(callable(r)) + self.assertIs(r, pg._namedresult) + else: + self.assertIsNone(r) def testSetNamedresult(self): - pg.set_namedresult(tuple) + namedresult = pg.get_namedresult() + self.assertRaises(TypeError, pg.set_namedresult) + self.assertRaises(TypeError, pg.set_namedresult, None) + f = lambda q: q.getresult() + pg.set_namedresult(f) + r = pg.get_namedresult() + if namedtuple or namedresult is not None: + pg.set_namedresult(namedresult) + else: + namedresult = f + self.assertIs(r, f) + r = pg.get_namedresult() + self.assertIs(r, namedresult) class TestModuleConstants(unittest.TestCase): @@ -311,11 +378,16 @@ class TestModuleConstants(unittest.TestCase): def testVersion(self): v = pg.version self.assertIsInstance(v, str) - v = v.split('.') - self.assertTrue(2 <= len(v) <= 3) - for w in v: - self.assertTrue(1 <= len(w) <= 2) - self.assertTrue(w.isdigit()) + # make sure the version conforms to PEP440 + re_version = r"""^ + (\d[\.\d]*(?<= \d)) + ((?:[abc]|rc)\d+)? + (?:(\.post\d+))? + (?:(\.dev\d+))? + (?:(\+(?![.])[a-zA-Z0-9\.]*[a-zA-Z0-9]))? + $""" + match = re.match(re_version, v, re.X) + self.assertIsNotNone(match) if __name__ == '__main__': diff --git a/module/TEST_PyGreSQL_classic_largeobj.py b/tests/test_classic_largeobj.py similarity index 81% rename from module/TEST_PyGreSQL_classic_largeobj.py rename to tests/test_classic_largeobj.py index 6d536a9e..58ec6316 100755 --- a/module/TEST_PyGreSQL_classic_largeobj.py +++ b/tests/test_classic_largeobj.py @@ -12,10 +12,12 @@ """ try: - import unittest2 as unittest # for Python < 2.6 + import unittest2 as unittest # for Python < 2.7 except ImportError: import unittest import tempfile +import os +import sys import pg # the module under test @@ -30,6 +32,8 @@ except ImportError: pass +windows = os.name == 'nt' + def connect(): """Create a basic pg connection to the test database.""" @@ -59,7 +63,7 @@ def setUp(self): self.c.query('begin') def tearDown(self): - self.c.query('end') + self.c.query('rollback') self.c.close() def assertIsLargeObject(self, obj): @@ -109,17 +113,30 @@ def testGetLo(self): large_object.unlink() finally: del large_object + self.assertIsInstance(r, str) self.assertEqual(r, data) def testLoImport(self): - f = tempfile.NamedTemporaryFile() + if windows: + # NamedTemporaryFiles don't work well here + fname = 'temp_test_pg_largeobj_import.txt' + f = open(fname, 'wb') + else: + f = tempfile.NamedTemporaryFile() + fname = f.name data = 'some data to be imported' f.write(data) - f.flush() - f.seek(0) + if windows: + f.close() + f = open(fname, 'rb') + else: + f.flush() + f.seek(0) large_object = self.c.loimport(f.name) try: f.close() + if windows: + os.remove(fname) self.assertIsLargeObject(large_object) large_object.open(pg.INV_READ) large_object.seek(0, pg.SEEK_SET) @@ -147,14 +164,17 @@ def tearDown(self): if self.obj.oid: try: self.obj.close() - except IOError: + except (SystemError, IOError): pass try: self.obj.unlink() - except IOError: + except (SystemError, IOError): pass del self.obj - self.pgcnx.query('end') + try: + self.pgcnx.query('rollback') + except SystemError: + pass self.pgcnx.close() def testOid(self): @@ -244,18 +264,23 @@ def testSeek(self): self.obj.open(pg.INV_READ) seek(0, pg.SEEK_SET) r = self.obj.read(9) + self.assertIsInstance(r, str) self.assertEqual(r, 'some data') seek(4, pg.SEEK_CUR) r = self.obj.read(2) + self.assertIsInstance(r, str) self.assertEqual(r, 'be') seek(-10, pg.SEEK_CUR) r = self.obj.read(4) + self.assertIsInstance(r, str) self.assertEqual(r, 'data') seek(0, pg.SEEK_SET) r = self.obj.read(4) + self.assertIsInstance(r, str) self.assertEqual(r, 'some') seek(-6, pg.SEEK_END) r = self.obj.read(4) + self.assertIsInstance(r, str) self.assertEqual(r, 'seek') def testTell(self): @@ -286,23 +311,13 @@ def testUnlink(self): self.assertRaises(TypeError, unlink, 0) # unlinking when object is still open self.obj.open(pg.INV_WRITE) + self.assertIsNotNone(self.obj.oid) + self.assertNotEqual(0, self.obj.oid) self.assertRaises(IOError, unlink) data = 'some data to be sold' self.obj.write(data) self.obj.close() - oid = self.obj.oid - self.assertIsInstance(oid, int) - self.assertNotEqual(oid, 0) - obj = self.pgcnx.getlo(oid) - try: - self.assertIsNot(obj, self.obj) - self.assertEqual(obj.oid, oid) - obj.open(pg.INV_READ) - r = obj.read(80) - obj.close() - self.assertEqual(r, data) - finally: - del obj + # unlinking after object has been closed unlink() self.assertIsNone(self.obj.oid) @@ -353,18 +368,61 @@ def testExport(self): self.assertRaises(TypeError, export) self.assertRaises(TypeError, export, 0) self.assertRaises(TypeError, export, 'invalid', 0) - f = tempfile.NamedTemporaryFile() + if windows: + # NamedTemporaryFiles don't work well here + fname = 'temp_test_pg_largeobj_export.txt' + f = open(fname, 'wb') + else: + f = tempfile.NamedTemporaryFile() + fname = f.name data = 'some data to be exported' self.obj.open(pg.INV_WRITE) self.obj.write(data) # exporting when object is not yet closed self.assertRaises(IOError, export, f.name) self.obj.close() - export(f.name) + export(fname) + if windows: + f.close() + f = open(fname, 'rb') r = f.read() f.close() + if windows: + os.remove(fname) self.assertEqual(r, data) + def testPrint(self): + self.obj.open(pg.INV_WRITE) + data = 'some object to be printed' + self.obj.write(data) + if windows: + # TemporaryFiles don't work well here + fname = 'temp_test_pg_largeobj_export.txt' + f = open(fname, 'wb') + else: + f = tempfile.TemporaryFile() + stdout, sys.stdout = sys.stdout, f + try: + print self.obj + self.obj.close() + print self.obj + except Exception: + pass + sys.stdout = stdout + if windows: + f.close() + f = open(fname, 'rb') + else: + f.seek(0) + r = f.read() + f.close() + if windows: + os.remove(fname) + oid = self.obj.oid + self.assertEqual(r, + 'Opened large object, oid %d\n' + 'Closed large object, oid %d\n' % (oid, oid)) + if __name__ == '__main__': unittest.main() diff --git a/tests/test_classic_notification.py b/tests/test_classic_notification.py new file mode 100755 index 00000000..06d1c4f5 --- /dev/null +++ b/tests/test_classic_notification.py @@ -0,0 +1,415 @@ +#! /usr/bin/python +# -*- coding: utf-8 -*- + +"""Test the classic PyGreSQL interface. + +Sub-tests for the notification handler object. + +Contributed by Christoph Zwerschke. + +These tests need a database to test against. + +""" + +try: + import unittest2 as unittest # for Python < 2.7 +except ImportError: + import unittest + +from time import sleep +from threading import Thread + +import pg # the module under test + +# We need a database to test against. If LOCAL_PyGreSQL.py exists we will +# get our information from that. Otherwise we use the defaults. +# The current user must have create schema privilege on the database. +dbname = 'unittest' +dbhost = None +dbport = 5432 + +debug = False # let DB wrapper print debugging output + +try: + from LOCAL_PyGreSQL import * +except ImportError: + pass + + +def DB(): + """Create a DB wrapper object connecting to the test database.""" + db = pg.DB(dbname, dbhost, dbport) + if debug: + db.debug = debug + return db + + +class TestSyncNotification(unittest.TestCase): + """Test notification handler running in the same thread.""" + + def setUp(self): + self.db = DB() + self.timeout = None + self.called = True + self.payloads = [] + + def tearDown(self): + if self.db: + self.db.close() + + def callback(self, arg_dict): + if arg_dict is None: + self.timeout = True + else: + self.timeout = False + self.payloads.append(arg_dict.get('extra')) + + def get_handler(self, event=None, arg_dict=None, stop_event=None): + if not event: + event = 'test_async_notification' + if not stop_event: + stop_event = 'stop_async_notification' + callback = self.callback + handler = self.db.notification_handler( + event, callback, arg_dict, 0, stop_event) + self.assertEqual(handler.event, event) + self.assertEqual(handler.stop_event, stop_event or 'stop_%s' % event) + self.assertIs(handler.callback, callback) + if arg_dict is None: + self.assertEqual(handler.arg_dict, {}) + else: + self.assertIs(handler.arg_dict, arg_dict) + self.assertEqual(handler.timeout, 0) + self.assertFalse(handler.listening) + return handler + + def testCloseHandler(self): + handler = self.get_handler() + self.assertIs(handler.db, self.db) + handler.close() + self.assertRaises(pg.InternalError, self.db.close) + self.db = None + self.assertIs(handler.db, None) + + def testDeleteHandler(self): + handler = self.get_handler('test_del') + self.assertIs(handler.db, self.db) + handler.listen() + self.db.query('notify test_del') + self.db.query('notify test_del') + del handler + self.db.query('notify test_del') + n = 0 + while self.db.getnotify() and n < 4: + n += 1 + self.assertEqual(n, 2) + + def testNotify(self): + handler = self.get_handler() + handler.listen() + self.assertRaises(TypeError, handler.notify, invalid=True) + handler.notify(payload='baz') + handler.notify(stop=True, payload='buz') + handler.unlisten() + self.db.close() + self.db = None + + def testNotifyWithArgsAndPayload(self): + arg_dict = {'foo': 'bar'} + handler = self.get_handler(arg_dict=arg_dict) + self.assertEqual(handler.timeout, 0) + handler.listen() + handler.notify(payload='baz') + handler.notify(payload='biz') + handler() + self.assertIsNotNone(self.timeout) + self.assertFalse(self.timeout) + self.assertEqual(self.payloads, ['baz', 'biz']) + self.assertEqual(arg_dict['foo'], 'bar') + self.assertEqual(arg_dict['event'], handler.event) + self.assertIsInstance(arg_dict['pid'], int) + self.assertEqual(arg_dict['extra'], 'biz') + self.assertTrue(handler.listening) + del self.payloads[:] + handler.notify(stop=True, payload='buz') + handler() + self.assertIsNotNone(self.timeout) + self.assertFalse(self.timeout) + self.assertEqual(self.payloads, ['buz']) + self.assertEqual(arg_dict['foo'], 'bar') + self.assertEqual(arg_dict['event'], handler.stop_event) + self.assertIsInstance(arg_dict['pid'], int) + self.assertEqual(arg_dict['extra'], 'buz') + self.assertFalse(handler.listening) + handler.unlisten() + + def testNotifyWrongEvent(self): + handler = self.get_handler('good_event') + self.assertEqual(handler.timeout, 0) + handler.listen() + handler.notify(payload="note 1") + self.db.query("notify bad_event, 'note 2'") + handler.notify(payload="note 3") + handler() + self.assertIsNotNone(self.timeout) + self.assertFalse(self.timeout) + self.assertEqual(self.payloads, ['note 1', 'note 3']) + self.assertTrue(handler.listening) + del self.payloads[:] + self.db.query('listen bad_event') + handler.notify(payload="note 4") + self.db.query("notify bad_event, 'note 5'") + handler.notify(payload="note 6") + try: + handler() + except pg.DatabaseError, error: + self.assertEqual(str(error), + 'Listening for "good_event" and "stop_good_event",' + ' but notified of "bad_event"') + self.assertIsNotNone(self.timeout) + self.assertFalse(self.timeout) + self.assertEqual(self.payloads, ['note 4']) + self.assertFalse(handler.listening) + + +class TestAsyncNotification(unittest.TestCase): + """Test notification handler running in a separate thread.""" + + def setUp(self): + self.db = DB() + + def tearDown(self): + self.doCleanups() + if self.db: + self.db.close() + + def callback(self, arg_dict): + if arg_dict is None: + self.timeout = True + elif arg_dict is self.arg_dict: + arg_dict = arg_dict.copy() + pid = arg_dict.get('pid') + if isinstance(pid, int): + arg_dict['pid'] = 1 + self.received.append(arg_dict) + else: + self.received.append(dict(error=arg_dict)) + + def start_handler(self, + event=None, arg_dict=None, timeout=5, stop_event=None): + db = DB() + if not event: + event = 'test_async_notification' + if not stop_event: + stop_event = 'stop_async_notification' + callback = self.callback + handler = db.notification_handler( + event, callback, arg_dict, timeout, stop_event) + self.handler = handler + self.assertIsInstance(handler, pg.NotificationHandler) + self.assertEqual(handler.event, event) + self.assertEqual(handler.stop_event, stop_event or 'stop_%s' % event) + self.event = handler.event + self.assertIs(handler.callback, callback) + if arg_dict is None: + self.assertEqual(handler.arg_dict, {}) + else: + self.assertIsInstance(handler.arg_dict, dict) + self.arg_dict = handler.arg_dict + self.assertEqual(handler.timeout, timeout) + self.assertFalse(handler.listening) + thread = Thread(target=handler, name='test_notification_thread') + self.thread = thread + thread.start() + self.stopped = timeout == 0 + self.addCleanup(self.stop_handler) + for n in range(500): + if handler.listening: + break + sleep(0.01) + self.assertTrue(handler.listening) + if not self.stopped: + self.assertTrue(thread.isAlive()) + self.timeout = False + self.received = [] + self.sent = [] + + def stop_handler(self): + handler = self.handler + thread = self.thread + if not self.stopped and self.handler.listening: + self.notify_handler(stop=True) + handler.close() + self.db = None + if thread.isAlive(): + thread.join(5) + self.assertFalse(handler.listening) + self.assertFalse(thread.isAlive()) + + def notify_handler(self, stop=False, payload=None): + event = self.event + if stop: + event = self.handler.stop_event + self.stopped = True + arg_dict = self.arg_dict.copy() + arg_dict.update(event=event, pid=1, extra=payload or '') + self.handler.notify(db=self.db, stop=stop, payload=payload) + self.sent.append(arg_dict) + + def notify_query(self, stop=False, payload=None): + event = self.event + if stop: + event = self.handler.stop_event + self.stopped = True + q = 'notify "%s"' % event + if payload: + q += ", '%s'" % payload + arg_dict = self.arg_dict.copy() + arg_dict.update(event=event, pid=1, extra=payload or '') + self.db.query(q) + self.sent.append(arg_dict) + + def wait(self): + for n in range(500): + if self.timeout: + return False + if len(self.received) >= len(self.sent): + return True + sleep(0.01) + + def receive(self, stop=False): + if not self.sent: + stop = True + if stop: + self.notify_handler(stop=True, payload='stop') + self.assertTrue(self.wait()) + self.assertFalse(self.timeout) + self.assertEqual(self.received, self.sent) + self.received = [] + self.sent = [] + self.assertEqual(self.handler.listening, not self.stopped) + + def testNotifyHandlerEmpty(self): + self.start_handler() + self.notify_handler(stop=True) + self.assertEqual(len(self.sent), 1) + self.receive() + + def testNotifyQueryEmpty(self): + self.start_handler() + self.notify_query(stop=True) + self.assertEqual(len(self.sent), 1) + self.receive() + + def testNotifyHandlerOnce(self): + self.start_handler() + self.notify_handler() + self.assertEqual(len(self.sent), 1) + self.receive() + self.receive(stop=True) + + def testNotifyQueryOnce(self): + self.start_handler() + self.notify_query() + self.receive() + self.notify_query(stop=True) + self.receive() + + def testNotifyWithArgs(self): + arg_dict = {'test': 42, 'more': 43, 'less': 41} + self.start_handler('test_args', arg_dict) + self.notify_query() + self.receive(stop=True) + + def testNotifySeveralTimes(self): + arg_dict = {'test': 1} + self.start_handler(arg_dict=arg_dict) + for count in range(3): + self.notify_query() + self.receive() + arg_dict['test'] += 1 + for count in range(2): + self.notify_handler() + self.receive() + arg_dict['test'] += 1 + for count in range(3): + self.notify_query() + self.receive(stop=True) + + def testNotifyOnceWithPayload(self): + self.start_handler() + self.notify_query(payload='test_payload') + self.receive(stop=True) + + def testNotifyWithArgsAndPayload(self): + self.start_handler(arg_dict={'foo': 'bar'}) + self.notify_query(payload='baz') + self.receive(stop=True) + + def testNotifyQuotedNames(self): + self.start_handler('Hello, World!') + self.notify_query(payload='How do you do?') + self.receive(stop=True) + + def testNotifyWithFivePayloads(self): + self.start_handler('gimme_5', {'test': 'Gimme 5'}) + for count in range(5): + self.notify_query(payload="Round %d" % count) + self.assertEqual(len(self.sent), 5) + self.receive(stop=True) + + def testReceiveImmediately(self): + self.start_handler('immediate', {'test': 'immediate'}) + for count in range(3): + self.notify_query(payload="Round %d" % count) + self.receive() + self.receive(stop=True) + + def testNotifyDistinctInTransaction(self): + self.start_handler('test_transaction', {'transaction': True}) + self.db.begin() + for count in range(3): + self.notify_query(payload='Round %d' % count) + self.db.commit() + self.receive(stop=True) + + def testNotifySameInTransaction(self): + self.start_handler('test_transaction', {'transaction': True}) + self.db.begin() + for count in range(3): + self.notify_query() + self.db.commit() + # these same notifications may be delivered as one, + # so we must not wait for all three to appear + self.sent = self.sent[:1] + self.receive(stop=True) + + def testNotifyNoTimeout(self): + self.start_handler(timeout=None) + self.assertIsNone(self.handler.timeout) + self.assertTrue(self.handler.listening) + sleep(0.02) + self.assertFalse(self.timeout) + self.receive(stop=True) + + def testNotifyZeroTimeout(self): + self.start_handler(timeout=0) + self.assertEqual(self.handler.timeout, 0) + self.assertTrue(self.handler.listening) + self.assertFalse(self.timeout) + + def testNotifyWithoutTimeout(self): + self.start_handler(timeout=1) + self.assertEqual(self.handler.timeout, 1) + sleep(0.02) + self.assertFalse(self.timeout) + self.receive(stop=True) + + def testNotifyWithTimeout(self): + self.start_handler(timeout=0.01) + sleep(0.02) + self.assertTrue(self.timeout) + + +if __name__ == '__main__': + unittest.main() diff --git a/module/TEST_PyGreSQL_dbapi20.py b/tests/test_dbapi20.py similarity index 83% rename from module/TEST_PyGreSQL_dbapi20.py rename to tests/test_dbapi20.py index 2d1ecc53..e48da81e 100755 --- a/module/TEST_PyGreSQL_dbapi20.py +++ b/tests/test_dbapi20.py @@ -1,12 +1,20 @@ #! /usr/bin/python # $Id$ -from __future__ import with_statement +try: + import unittest2 as unittest # for Python < 2.7 +except ImportError: + import unittest + +import sys -import unittest -import dbapi20 import pgdb +import dbapi20 + +# check whether the "with" statement is supported +no_with = sys.version_info[:2] < (2, 5) + # We need a database to test against. # If LOCAL_PyGreSQL.py exists we will get our information from that. # Otherwise we use the defaults. @@ -23,7 +31,8 @@ class test_PyGreSQL(dbapi20.DatabaseAPI20Test): driver = pgdb connect_args = () - connect_kw_args = {'dsn': dbhost + ':' + dbname} + connect_kw_args = {'database': dbname, + 'host': '%s:%d' % (dbhost or '', dbport or -1)} lower_func = 'lower' # For stored procedure test @@ -33,9 +42,14 @@ def setUp(self): try: con = self._connect() con.close() - except pgdb.Error: + except pgdb.Error: # try to create a missing database import pg - pg.DB().query('create database ' + dbname) + try: # first try to log in as superuser + db = pg.DB('postgres', dbhost or None, dbport or -1, + user='postgres') + except Exception: # then try to log in as current user + db = pg.DB('postgres', dbhost or None, dbport or -1) + db.query('create database ' + dbname) def tearDown(self): dbapi20.DatabaseAPI20Test.tearDown(self) @@ -70,6 +84,7 @@ def test_fetch_2_rows(self): con = self._connect() try: cur = con.cursor() + cur.execute("set datestyle to 'iso'") cur.execute("create table %s (" "stringtest varchar," "binarytest bytea," @@ -84,6 +99,7 @@ def test_fetch_2_rows(self): "datetimetest timestamp," "intervaltest interval," "rowidtest oid)" % table) + cur.execute("set standard_conforming_strings to on") for s in ('numeric', 'monetary', 'time'): cur.execute("set lc_%s to 'C'" % s) for _i in range(2): @@ -156,6 +172,25 @@ def test_float(self): else: self.assertEqual(inval, outval) + def test_bool(self): + values = [False, True, None, 't', 'f', 'true', 'false'] + table = self.table_prefix + 'booze' + con = self._connect() + try: + cur = con.cursor() + cur.execute( + "create table %s (n smallint, booltest bool)" % table) + params = enumerate(values) + cur.executemany("insert into %s values (%%s,%%s)" % table, params) + cur.execute("select * from %s order by 1" % table) + rows = cur.fetchall() + finally: + con.close() + rows = [row[1] for row in rows] + values[3] = values[5] = True + values[4] = values[6] = False + self.assertEqual(rows, values) + def test_set_decimal_type(self): decimal_type = pgdb.decimal_type() self.assert_(decimal_type is not None and callable(decimal_type)) @@ -198,9 +233,12 @@ def test_connection_errors(self): self.assertEqual(con.DataError, pgdb.DataError) self.assertEqual(con.NotSupportedError, pgdb.NotSupportedError) + @unittest.skipIf(no_with, 'context managers not supported') def test_connection_as_contextmanager(self): table = self.table_prefix + 'booze' con = self._connect() + # wrap "with" statements to avoid SyntaxError in Python < 2.5 + exec """from __future__ import with_statement\nif True: try: cur = con.cursor() cur.execute("create table %s (n smallint check(n!=4))" % table) @@ -229,7 +267,7 @@ def test_connection_as_contextmanager(self): rows = cur.fetchall() rows = [row[0] for row in rows] finally: - con.close() + con.close()\n""" self.assertEqual(rows, [1, 2, 5, 6, 9]) def test_cursor_connection(self): @@ -238,10 +276,13 @@ def test_cursor_connection(self): self.assertEqual(cur.connection, con) cur.close() + @unittest.skipIf(no_with, 'context managers not supported') def test_cursor_as_contextmanager(self): con = self._connect() + # wrap "with" statements to avoid SyntaxError in Python < 2.5 + exec """from __future__ import with_statement\nif True: with con.cursor() as cur: - self.assertEqual(cur.connection, con) + self.assertEqual(cur.connection, con)\n""" def test_pgdb_type(self): self.assertEqual(pgdb.STRING, pgdb.STRING) diff --git a/tests/test_tutorial.py b/tests/test_tutorial.py new file mode 100644 index 00000000..055a173d --- /dev/null +++ b/tests/test_tutorial.py @@ -0,0 +1,182 @@ +#! /usr/bin/python +# -*- coding: utf-8 -*- + +try: + import unittest2 as unittest # for Python < 2.7 +except ImportError: + import unittest + +try: + from collections import namedtuple +except ImportError: # Python < 2.6 + namedtuple = None + +from pg import DB +from pgdb import connect + +# We need a database to test against. If LOCAL_PyGreSQL.py exists we will +# get our information from that. Otherwise we use the defaults. +dbname = 'unittest' +dbhost = None +dbport = 5432 + +try: + from LOCAL_PyGreSQL import * +except ImportError: + pass + + +class TestClassicTutorial(unittest.TestCase): + """Test the First Steps Tutorial for the classic interface.""" + + def setUp(self): + """Setup test tables or empty them if they already exist.""" + db = DB(dbname=dbname, host=dbhost, port=dbport) + db.query("set datestyle to 'iso'") + db.query("set default_with_oids=false") + db.query("set standard_conforming_strings=false") + db.query("set client_min_messages=warning") + db.query("drop table if exists fruits cascade") + db.query("create table fruits(id serial primary key, name varchar)") + self.db = db + + def tearDown(self): + db = self.db + db.query("drop table fruits") + db.close() + + def test_all_steps(self): + db = self.db + r = db.get_tables() + self.assertIsInstance(r, list) + self.assertIn('public.fruits', r) + r = db.get_attnames('fruits') + self.assertIsInstance(r, dict) + self.assertEqual(r, {'id': 'int', 'name': 'text'}) + r = db.has_table_privilege('fruits', 'insert') + self.assertTrue(r) + r = db.insert('fruits', name='apple') + self.assertIsInstance(r, dict) + self.assertEqual(r, {'name': 'apple', 'id': 1}) + banana = r = db.insert('fruits', name='banana') + self.assertIsInstance(r, dict) + self.assertEqual(r, {'name': 'banana', 'id': 2}) + more_fruits = 'cherimaya durian eggfruit fig grapefruit'.split() + if namedtuple: + data = list(enumerate(more_fruits, start=3)) + else: # Python < 2.6 + data = [(n + 3, name) for n, name in enumerate(more_fruits)] + db.inserttable('fruits', data) + q = db.query('select * from fruits') + r = str(q).splitlines() + self.assertEqual(r[0], 'id| name ') + self.assertEqual(r[1], '--+----------') + self.assertEqual(r[2], ' 1|apple ') + self.assertEqual(r[8], ' 7|grapefruit') + self.assertEqual(r[9], '(7 rows)') + q = db.query('select * from fruits') + r = q.getresult() + self.assertIsInstance(r, list) + self.assertIsInstance(r[0], tuple) + self.assertEqual(r[0], (1, 'apple')) + self.assertEqual(r[6], (7, 'grapefruit')) + r = q.dictresult() + self.assertIsInstance(r, list) + self.assertIsInstance(r[0], dict) + self.assertEqual(r[0], {'id': 1, 'name': 'apple'}) + self.assertEqual(r[6], {'id': 7, 'name': 'grapefruit'}) + try: + rows = r = q.namedresult() + self.assertIsInstance(r, list) + self.assertIsInstance(r[0], tuple) + self.assertEqual(rows[3].name, 'durian') + except (AttributeError, TypeError): # Python < 2.6 + self.assertIsNone(namedtuple) + r = db.update('fruits', banana, name=banana['name'].capitalize()) + self.assertIsInstance(r, dict) + self.assertEqual(r, {'id': 2, 'name': 'Banana'}) + q = db.query('select * from fruits where id between 1 and 3') + r = str(q).splitlines() + self.assertEqual(r[0], 'id| name ') + self.assertEqual(r[1], '--+---------') + self.assertEqual(r[2], ' 1|apple ') + self.assertEqual(r[3], ' 2|Banana ') + self.assertEqual(r[4], ' 3|cherimaya') + self.assertEqual(r[5], '(3 rows)') + r = db.query('update fruits set name=initcap(name)') + self.assertIsInstance(r, str) + self.assertEqual(r, '7') + r = db.delete('fruits', banana) + self.assertIsInstance(r, int) + self.assertEqual(r, 1) + r = db.delete('fruits', banana) + self.assertIsInstance(r, int) + self.assertEqual(r, 0) + r = db.insert('fruits', banana) + self.assertIsInstance(r, dict) + self.assertEqual(r, {'id': 2, 'name': 'Banana'}) + apple = r = db.get('fruits', 1) + self.assertIsInstance(r, dict) + self.assertEqual(r, {'name': 'Apple', 'id': 1}) + r = db.insert('fruits', apple, id=8) + self.assertIsInstance(r, dict) + self.assertEqual(r, {'id': 8, 'name': 'Apple'}) + r = db.delete('fruits', id=8) + self.assertIsInstance(r, int) + self.assertEqual(r, 1) + + +class TestDbApi20Tutorial(unittest.TestCase): + """Test the First Steps Tutorial for the DB-API 2.0 interface.""" + + def setUp(self): + """Setup test tables or empty them if they already exist.""" + database = dbname + host = '%s:%d' % (dbhost or '', dbport or -1) + con = connect(database=database, host=host) + cur = con.cursor() + cur.execute("set datestyle to 'iso'") + cur.execute("set default_with_oids=false") + cur.execute("set standard_conforming_strings=false") + cur.execute("set client_min_messages=warning") + cur.execute("drop table if exists fruits cascade") + cur.execute("create table fruits(id serial primary key, name varchar)") + cur.close() + self.con = con + + def tearDown(self): + con = self.con + cur = con.cursor() + cur.execute("drop table fruits") + cur.close() + con.close() + + def test_all_steps(self): + con = self.con + cursor = con.cursor() + cursor.execute("insert into fruits (name) values ('apple')") + cursor.execute("insert into fruits (name) values (%s)", ('banana',)) + more_fruits = 'cherimaya durian eggfruit fig grapefruit'.split() + parameters = [(name,) for name in more_fruits] + cursor.executemany("insert into fruits (name) values (%s)", parameters) + con.commit() + cursor.execute('select * from fruits where id=1') + r = cursor.fetchone() + self.assertIsInstance(r, list) + self.assertEqual(r, [1, 'apple']) + cursor.execute('select * from fruits') + r = cursor.fetchall() + self.assertIsInstance(r, list) + self.assertEqual(len(r), 7) + self.assertEqual(r[0], [1, 'apple']) + self.assertEqual(r[6], [7, 'grapefruit']) + cursor.execute('select * from fruits') + r = cursor.fetchmany(2) + self.assertIsInstance(r, list) + self.assertEqual(len(r), 2) + self.assertEqual(r[0], [1, 'apple']) + self.assertEqual(r[1], [2, 'banana']) + + +if __name__ == '__main__': + unittest.main() diff --git a/tox.ini b/tox.ini new file mode 100644 index 00000000..d3857307 --- /dev/null +++ b/tox.ini @@ -0,0 +1,12 @@ +# config file for tox 2.0 +# unfortunately py24,py25 are not supported by tox any more + +[tox] +envlist = py26,py27 + +[testenv] +deps = + py26: unittest2 +commands = + py26: unit2 discover [] + py27: python -m unittest discover [] diff --git a/tutorial/advanced.py b/tutorial/advanced.py deleted file mode 100755 index 9bb7ffe1..00000000 --- a/tutorial/advanced.py +++ /dev/null @@ -1,198 +0,0 @@ -#! /usr/bin/python -# advanced.py - demo of advanced features of PostGres. Some may not be ANSI. -# inspired from the Postgres tutorial -# adapted to Python 1995 by Pascal Andre - -print """ -__________________________________________________________________ -MODULE ADVANCED.PY : ADVANCED POSTGRES SQL COMMANDS TUTORIAL - -This module is designed for being imported from python prompt - -In order to run the samples included here, first create a connection -using : cnx = advanced.DB(...) - -The "..." should be replaced with whatever arguments you need to open an -existing database. Usually all you need is the name of the database and, -in fact, if it is the same as your login name, you can leave it empty. - -then start the demo with: advanced.demo(cnx) -__________________________________________________________________ -""" - -from pg import DB -import sys - -# waits for a key -def wait_key(): - print "Press " - sys.stdin.read(1) - -# inheritance features -def inherit_demo(pgcnx): - print "-----------------------------" - print "-- Inheritance:" - print "-- a table can inherit from zero or more tables. A query" - print "-- can reference either all rows of a table or all rows " - print "-- of a table plus all of its descendants." - print "-----------------------------" - print - print "-- For example, the capitals table inherits from cities table." - print "-- (It inherits all data fields from cities.)" - print - print "CREATE TABLE cities (" - print " name text," - print " population float8," - print " altitude int" - print ")" - print - print "CREATE TABLE capitals (" - print " state varchar(2)" - print ") INHERITS (cities)" - pgcnx.query("""CREATE TABLE cities ( - name text, - population float8, - altitude int)""") - pgcnx.query("""CREATE TABLE capitals ( - state varchar(2)) INHERITS (cities)""") - wait_key() - print - print "-- now, let's populate the tables" - print - print "INSERT INTO cities VALUES ('San Francisco', 7.24E+5, 63)" - print "INSERT INTO cities VALUES ('Las Vegas', 2.583E+5, 2174)" - print "INSERT INTO cities VALUES ('Mariposa', 1200, 1953)" - print - print "INSERT INTO capitals VALUES ('Sacramento', 3.694E+5, 30, 'CA')" - print "INSERT INTO capitals VALUES ('Madison', 1.913E+5, 845, 'WI')" - print - pgcnx.query("INSERT INTO cities VALUES ('San Francisco', 7.24E+5, 63)") - pgcnx.query("INSERT INTO cities VALUES ('Las Vegas', 2.583E+5, 2174)") - pgcnx.query("INSERT INTO cities VALUES ('Mariposa', 1200, 1953)") - pgcnx.query("INSERT INTO capitals VALUES ('Sacramento',3.694E+5,30,'CA')") - pgcnx.query("INSERT INTO capitals VALUES ('Madison', 1.913E+5, 845, 'WI')") - print - print "SELECT * FROM cities" - print pgcnx.query("SELECT * FROM cities") - print "SELECT * FROM capitals" - print pgcnx.query("SELECT * FROM capitals") - print - print "-- like before, a regular query references rows of the base" - print "-- table only" - print - print "SELECT name, altitude" - print "FROM cities" - print "WHERE altitude > 500;" - print pgcnx.query("""SELECT name, altitude - FROM cities - WHERE altitude > 500""") - print - print "-- on the other hand, you can find all cities, including " - print "-- capitals, that are located at an altitude of 500 'ft " - print "-- or higher by:" - print - print "SELECT c.name, c.altitude" - print "FROM cities* c" - print "WHERE c.altitude > 500" - print pgcnx.query("""SELECT c.name, c.altitude - FROM cities* c - WHERE c.altitude > 500""") - -# arrays attributes -def array_demo(pgcnx): - print "----------------------" - print "-- Arrays:" - print "-- attributes can be arrays of base types or user-defined " - print "-- types" - print "----------------------" - print - print "CREATE TABLE sal_emp (" - print " name text," - print " pay_by_quarter int4[]," - print " pay_by_extra_quarter int8[]," - print " schedule text[][]" - print ")" - pgcnx.query("""CREATE TABLE sal_emp ( - name text, - pay_by_quarter int4[], - pay_by_extra_quarter int8[], - schedule text[][])""") - wait_key() - print - print "-- insert instances with array attributes. " - print " Note the use of braces" - print - print "INSERT INTO sal_emp VALUES (" - print " 'Bill'," - print " '{10000,10000,10000,10000}'," - print " '{9223372036854775800,9223372036854775800,9223372036854775800}'," - print " '{{\"meeting\", \"lunch\"}, {}}')" - print - print "INSERT INTO sal_emp VALUES (" - print " 'Carol'," - print " '{20000,25000,25000,25000}'," - print " '{9223372036854775807,9223372036854775807,9223372036854775807}'," - print " '{{\"talk\", \"consult\"}, {\"meeting\"}}')" - print - pgcnx.query("""INSERT INTO sal_emp VALUES ( - 'Bill', '{10000,10000,10000,10000}', - '{9223372036854775800,9223372036854775800,9223372036854775800}', - '{{\"meeting\", \"lunch\"}, {}}')""") - pgcnx.query("""INSERT INTO sal_emp VALUES ( - 'Carol', '{20000,25000,25000,25000}', - '{9223372036854775807,9223372036854775807,9223372036854775807}', - '{{\"talk\", \"consult\"}, {\"meeting\"}}')""") - wait_key() - print - print "----------------------" - print "-- queries on array attributes" - print "----------------------" - print - print "SELECT name FROM sal_emp WHERE" - print " sal_emp.pay_by_quarter[1] <> sal_emp.pay_by_quarter[2]" - print - print pgcnx.query("""SELECT name FROM sal_emp WHERE - sal_emp.pay_by_quarter[1] <> sal_emp.pay_by_quarter[2]""") - print - print pgcnx.query("""SELECT name FROM sal_emp WHERE - sal_emp.pay_by_extra_quarter[1] <> sal_emp.pay_by_extra_quarter[2]""") - print - print "-- retrieve third quarter pay of all employees" - print - print "SELECT sal_emp.pay_by_quarter[3] FROM sal_emp" - print - print pgcnx.query("SELECT sal_emp.pay_by_quarter[3] FROM sal_emp") - print - print "-- retrieve third quarter extra pay of all employees" - print - print "SELECT sal_emp.pay_by_extra_quarter[3] FROM sal_emp" - print pgcnx.query("SELECT sal_emp.pay_by_extra_quarter[3] FROM sal_emp") - print - print "-- retrieve first two quarters of extra quarter pay of all employees" - print - print "SELECT sal_emp.pay_by_extra_quarter[1:2] FROM sal_emp" - print - print pgcnx.query("SELECT sal_emp.pay_by_extra_quarter[1:2] FROM sal_emp") - print - print "-- select subarrays" - print - print "SELECT sal_emp.schedule[1:2][1:1] FROM sal_emp WHERE" - print " sal_emp.name = 'Bill'" - print pgcnx.query("SELECT sal_emp.schedule[1:2][1:1] FROM sal_emp WHERE " \ - "sal_emp.name = 'Bill'") - -# base cleanup -def demo_cleanup(pgcnx): - print "-- clean up (you must remove the children first)" - print "DROP TABLE sal_emp" - print "DROP TABLE capitals" - print "DROP TABLE cities;" - pgcnx.query("DROP TABLE sal_emp") - pgcnx.query("DROP TABLE capitals") - pgcnx.query("DROP TABLE cities") - -# main demo function -def demo(pgcnx): - inherit_demo(pgcnx) - array_demo(pgcnx) - demo_cleanup(pgcnx) diff --git a/tutorial/basics.py b/tutorial/basics.py deleted file mode 100755 index 60012cc6..00000000 --- a/tutorial/basics.py +++ /dev/null @@ -1,296 +0,0 @@ -#! /usr/bin/python -# basics.py - basic SQL commands tutorial -# inspired from the Postgres95 tutorial -# adapted to Python 1995 by Pascal ANDRE - -print """ -__________________________________________________________________ -MODULE BASICS.PY : BASIC POSTGRES SQL COMMANDS TUTORIAL - -This module is designed for being imported from python prompt - -In order to run the samples included here, first create a connection -using : cnx = basics.DB(...) - -The "..." should be replaced with whatever arguments you need to open an -existing database. Usually all you need is the name of the database and, -in fact, if it is the same as your login name, you can leave it empty. - -then start the demo with: basics.demo(cnx) -__________________________________________________________________ -""" - -from pg import DB -import sys - -# waits for a key -def wait_key(): - print "Press " - sys.stdin.read(1) - -# table creation commands -def create_table(pgcnx): - print "-----------------------------" - print "-- Creating a table:" - print "-- a CREATE TABLE is used to create base tables. POSTGRES" - print "-- SQL has its own set of built-in types. (Note that" - print "-- keywords are case-insensitive but identifiers are " - print "-- case-sensitive.)" - print "-----------------------------" - print - print "Sending query :" - print "CREATE TABLE weather (" - print " city varchar(80)," - print " temp_lo int," - print " temp_hi int," - print " prcp float8," - print " date date" - print ")" - pgcnx.query("""CREATE TABLE weather (city varchar(80), temp_lo int, - temp_hi int, prcp float8, date date)""") - print - print "Sending query :" - print "CREATE TABLE cities (" - print " name varchar(80)," - print " location point" - print ")" - pgcnx.query("""CREATE TABLE cities ( - name varchar(80), - location point)""") - -# data insertion commands -def insert_data(pgcnx): - print "-----------------------------" - print "-- Inserting data:" - print "-- an INSERT statement is used to insert a new row into" - print "-- a table. There are several ways you can specify what" - print "-- columns the data should go to." - print "-----------------------------" - print - print "-- 1. the simplest case is when the list of value correspond to" - print "-- the order of the columns specified in CREATE TABLE." - print - print "Sending query :" - print "INSERT INTO weather " - print " VALUES ('San Francisco', 46, 50, 0.25, '11/27/1994')" - pgcnx.query("""INSERT INTO weather - VALUES ('San Francisco', 46, 50, 0.25, '11/27/1994')""") - print - print "Sending query :" - print "INSERT INTO cities " - print " VALUES ('San Francisco', '(-194.0, 53.0)')" - pgcnx.query("""INSERT INTO cities - VALUES ('San Francisco', '(-194.0, 53.0)')""") - print - wait_key() - print "-- 2. you can also specify what column the values correspond " - print " to. (The columns can be specified in any order. You may " - print " also omit any number of columns. eg. unknown precipitation" - print " below)" - print "Sending query :" - print "INSERT INTO weather (city, temp_lo, temp_hi, prcp, date)" - print " VALUES ('San Francisco', 43, 57, 0.0, '11/29/1994')" - pgcnx.query("INSERT INTO weather (date, city, temp_hi, temp_lo)" \ - "VALUES ('11/29/1994', 'Hayward', 54, 37)") - -# direct selection commands -def select_data1(pgcnx): - print "-----------------------------" - print "-- Retrieving data:" - print "-- a SELECT statement is used for retrieving data. The " - print "-- basic syntax is:" - print "-- SELECT columns FROM tables WHERE predicates" - print "-----------------------------" - print - print "-- a simple one would be the query:" - print "SELECT * FROM weather" - print - print "The result is :" - q = pgcnx.query("SELECT * FROM weather") - print q - print - print "-- you may also specify expressions in the target list (the " - print "-- 'AS column' specifies the column name of the result. It is " - print "-- optional.)" - print "The query :" - print " SELECT city, (temp_hi+temp_lo)/2 AS temp_avg, date " - print " FROM weather" - print "Gives :" - print pgcnx.query("""SELECT city, (temp_hi+temp_lo)/2 - AS temp_avg, date FROM weather""") - print - print "-- if you want to retrieve rows that satisfy certain condition" - print "-- (ie. a restriction), specify the condition in WHERE. The " - print "-- following retrieves the weather of San Francisco on rainy " - print "-- days." - print "SELECT *" - print "FROM weather" - print "WHERE city = 'San Francisco' " - print " and prcp > 0.0" - print pgcnx.query("""SELECT * FROM weather WHERE city = 'San Francisco' - AND prcp > 0.0""") - print - print "-- here is a more complicated one. Duplicates are removed when " - print "-- DISTINCT is specified. ORDER BY specifies the column to sort" - print "-- on. (Just to make sure the following won't confuse you, " - print "-- DISTINCT and ORDER BY can be used separately.)" - print "SELECT DISTINCT city" - print "FROM weather" - print "ORDER BY city;" - print pgcnx.query("SELECT DISTINCT city FROM weather ORDER BY city") - -# selection to a temporary table -def select_data2(pgcnx): - print "-----------------------------" - print "-- Retrieving data into other classes:" - print "-- a SELECT ... INTO statement can be used to retrieve " - print "-- data into another class." - print "-----------------------------" - print - print "The query :" - print "SELECT * INTO TABLE temptab " - print "FROM weather" - print "WHERE city = 'San Francisco' " - print " and prcp > 0.0" - pgcnx.query("""SELECT * INTO TABLE temptab FROM weather - WHERE city = 'San Francisco' and prcp > 0.0""") - print "Fills the table temptab, that can be listed with :" - print "SELECT * from temptab" - print pgcnx.query("SELECT * from temptab") - -# aggregate creation commands -def create_aggregate(pgcnx): - print "-----------------------------" - print "-- Aggregates" - print "-----------------------------" - print - print "Let's consider the query :" - print "SELECT max(temp_lo)" - print "FROM weather;" - print pgcnx.query("SELECT max(temp_lo) FROM weather") - print - print "-- Aggregate with GROUP BY" - print "SELECT city, max(temp_lo)" - print "FROM weather " - print "GROUP BY city;" - print pgcnx.query( """SELECT city, max(temp_lo) - FROM weather GROUP BY city""") - -# table join commands -def join_table(pgcnx): - print "-----------------------------" - print "-- Joining tables:" - print "-- queries can access multiple tables at once or access" - print "-- the same table in such a way that multiple instances" - print "-- of the table are being processed at the same time." - print "-----------------------------" - print - print "-- suppose we want to find all the records that are in the " - print "-- temperature range of other records. W1 and W2 are aliases " - print "--for weather." - print - print "SELECT W1.city, W1.temp_lo, W1.temp_hi, " - print " W2.city, W2.temp_lo, W2.temp_hi" - print "FROM weather W1, weather W2" - print "WHERE W1.temp_lo < W2.temp_lo " - print " and W1.temp_hi > W2.temp_hi" - print - print pgcnx.query("""SELECT W1.city, W1.temp_lo, W1.temp_hi, - W2.city, W2.temp_lo, W2.temp_hi FROM weather W1, weather W2 - WHERE W1.temp_lo < W2.temp_lo and W1.temp_hi > W2.temp_hi""") - print - print "-- let's join two tables. The following joins the weather table" - print "-- and the cities table." - print - print "SELECT city, location, prcp, date" - print "FROM weather, cities" - print "WHERE name = city" - print - print pgcnx.query("""SELECT city, location, prcp, date FROM weather, cities - WHERE name = city""") - print - print "-- since the column names are all different, we don't have to " - print "-- specify the table name. If you want to be clear, you can do " - print "-- the following. They give identical results, of course." - print - print "SELECT w.city, c.location, w.prcp, w.date" - print "FROM weather w, cities c" - print "WHERE c.name = w.city;" - print - print pgcnx.query("""SELECT w.city, c.location, w.prcp, w.date - FROM weather w, cities c WHERE c.name = w.city""") - -# data updating commands -def update_data(pgcnx): - print "-----------------------------" - print "-- Updating data:" - print "-- an UPDATE statement is used for updating data. " - print "-----------------------------" - print - print "-- suppose you discover the temperature readings are all off by" - print "-- 2 degrees as of Nov 28, you may update the data as follow:" - print - print "UPDATE weather" - print " SET temp_hi = temp_hi - 2, temp_lo = temp_lo - 2" - print " WHERE date > '11/28/1994'" - print - pgcnx.query("""UPDATE weather - SET temp_hi = temp_hi - 2, temp_lo = temp_lo - 2 - WHERE date > '11/28/1994'""") - print - print "SELECT * from weather" - print pgcnx.query("SELECT * from weather") - -# data deletion commands -def delete_data(pgcnx): - print "-----------------------------" - print "-- Deleting data:" - print "-- a DELETE statement is used for deleting rows from a " - print "-- table." - print "-----------------------------" - print - print "-- suppose you are no longer interested in the weather of " - print "-- Hayward, you can do the following to delete those rows from" - print "-- the table" - print - print "DELETE FROM weather WHERE city = 'Hayward'" - pgcnx.query("DELETE FROM weather WHERE city = 'Hayward'") - print - print "SELECT * from weather" - print - print pgcnx.query("SELECT * from weather") - print - print "-- you can also delete all the rows in a table by doing the " - print "-- following. (This is different from DROP TABLE which removes " - print "-- the table in addition to the removing the rows.)" - print - print "DELETE FROM weather" - pgcnx.query("DELETE FROM weather") - print - print "SELECT * from weather" - print pgcnx.query("SELECT * from weather") - -# table removal commands -def remove_table(pgcnx): - print "-----------------------------" - print "-- Removing the tables:" - print "-- DROP TABLE is used to remove tables. After you have" - print "-- done this, you can no longer use those tables." - print "-----------------------------" - print - print "DROP TABLE weather, cities, temptab" - pgcnx.query("DROP TABLE weather, cities, temptab") - -# main demo function -def demo(pgcnx): - create_table(pgcnx) - wait_key() - insert_data(pgcnx) - wait_key() - select_data1(pgcnx) - select_data2(pgcnx) - create_aggregate(pgcnx) - join_table(pgcnx) - update_data(pgcnx) - delete_data(pgcnx) - remove_table(pgcnx) diff --git a/tutorial/func.py b/tutorial/func.py deleted file mode 100755 index af2b412b..00000000 --- a/tutorial/func.py +++ /dev/null @@ -1,205 +0,0 @@ -# func.py - demonstrate the use of SQL functions -# inspired from the PostgreSQL tutorial -# adapted to Python 1995 by Pascal ANDRE - -print """ -__________________________________________________________________ -MODULE FUNC.PY : SQL FUNCTION DEFINITION TUTORIAL - -This module is designed for being imported from python prompt - -In order to run the samples included here, first create a connection -using : cnx = func.DB(...) - -The "..." should be replaced with whatever arguments you need to open an -existing database. Usually all you need is the name of the database and, -in fact, if it is the same as your login name, you can leave it empty. - -then start the demo with: func.demo(cnx) -__________________________________________________________________ -""" - -from pg import DB -import sys - -# waits for a key -def wait_key(): - print "Press " - sys.stdin.read(1) - -# basic functions declaration -def base_func(pgcnx): - print "-----------------------------" - print "-- Creating SQL Functions on Base Types" - print "-- a CREATE FUNCTION statement lets you create a new " - print "-- function that can be used in expressions (in SELECT, " - print "-- INSERT, etc.). We will start with functions that " - print "-- return values of base types." - print "-----------------------------" - print - print "--" - print "-- let's create a simple SQL function that takes no arguments" - print "-- and returns 1" - print - print "CREATE FUNCTION one() RETURNS int4" - print " AS 'SELECT 1 as ONE' LANGUAGE 'sql'" - pgcnx.query("""CREATE FUNCTION one() RETURNS int4 - AS 'SELECT 1 as ONE' LANGUAGE 'sql'""") - wait_key() - print - print "--" - print "-- functions can be used in any expressions (eg. in the target" - print "-- list or qualifications)" - print - print "SELECT one() AS answer" - print pgcnx.query("SELECT one() AS answer") - print - print "--" - print "-- here's how you create a function that takes arguments. The" - print "-- following function returns the sum of its two arguments:" - print - print "CREATE FUNCTION add_em(int4, int4) RETURNS int4" - print " AS 'SELECT $1 + $2' LANGUAGE 'sql'" - pgcnx.query("""CREATE FUNCTION add_em(int4, int4) RETURNS int4 - AS 'SELECT $1 + $2' LANGUAGE 'sql'""") - print - print "SELECT add_em(1, 2) AS answer" - print pgcnx.query("SELECT add_em(1, 2) AS answer") - -# functions on composite types -def comp_func(pgcnx): - print "-----------------------------" - print "-- Creating SQL Functions on Composite Types" - print "-- it is also possible to create functions that return" - print "-- values of composite types." - print "-----------------------------" - print - print "-- before we create more sophisticated functions, let's " - print "-- populate an EMP table" - print - print "CREATE TABLE EMP (" - print " name text," - print " salary int4," - print " age int4," - print " dept varchar(16)" - print ")" - pgcnx.query("""CREATE TABLE EMP ( - name text, - salary int4, - age int4, - dept varchar(16))""") - print - print "INSERT INTO EMP VALUES ('Sam', 1200, 16, 'toy')" - print "INSERT INTO EMP VALUES ('Claire', 5000, 32, 'shoe')" - print "INSERT INTO EMP VALUES ('Andy', -1000, 2, 'candy')" - print "INSERT INTO EMP VALUES ('Bill', 4200, 36, 'shoe')" - print "INSERT INTO EMP VALUES ('Ginger', 4800, 30, 'candy')" - pgcnx.query("INSERT INTO EMP VALUES ('Sam', 1200, 16, 'toy')") - pgcnx.query("INSERT INTO EMP VALUES ('Claire', 5000, 32, 'shoe')") - pgcnx.query("INSERT INTO EMP VALUES ('Andy', -1000, 2, 'candy')") - pgcnx.query("INSERT INTO EMP VALUES ('Bill', 4200, 36, 'shoe')") - pgcnx.query("INSERT INTO EMP VALUES ('Ginger', 4800, 30, 'candy')") - wait_key() - print - print "-- the argument of a function can also be a tuple. For " - print "-- instance, double_salary takes a tuple of the EMP table" - print - print "CREATE FUNCTION double_salary(EMP) RETURNS int4" - print " AS 'SELECT $1.salary * 2 AS salary' LANGUAGE 'sql'" - pgcnx.query("""CREATE FUNCTION double_salary(EMP) RETURNS int4 - AS 'SELECT $1.salary * 2 AS salary' LANGUAGE 'sql'""") - print - print "SELECT name, double_salary(EMP) AS dream" - print "FROM EMP" - print "WHERE EMP.dept = 'toy'" - print pgcnx.query("""SELECT name, double_salary(EMP) AS dream - FROM EMP WHERE EMP.dept = 'toy'""") - print - print "-- the return value of a function can also be a tuple. However," - print "-- make sure that the expressions in the target list is in the " - print "-- same order as the columns of EMP." - print - print "CREATE FUNCTION new_emp() RETURNS EMP" - print " AS 'SELECT \'None\'::text AS name," - print " 1000 AS salary," - print " 25 AS age," - print " \'none\'::varchar(16) AS dept'" - print " LANGUAGE 'sql'" - pgcnx.query("""CREATE FUNCTION new_emp() RETURNS EMP - AS 'SELECT \\\'None\\\'::text AS name, - 1000 AS salary, - 25 AS age, - \\\'none\\\'::varchar(16) AS dept' - LANGUAGE 'sql'""") - wait_key() - print - print "-- you can then project a column out of resulting the tuple by" - print "-- using the \"function notation\" for projection columns. " - print "-- (ie. bar(foo) is equivalent to foo.bar) Note that we don't" - print "-- support new_emp().name at this moment." - print - print "SELECT name(new_emp()) AS nobody" - print pgcnx.query("SELECT name(new_emp()) AS nobody") - print - print "-- let's try one more function that returns tuples" - print "CREATE FUNCTION high_pay() RETURNS setof EMP" - print " AS 'SELECT * FROM EMP where salary > 1500'" - print " LANGUAGE 'sql'" - pgcnx.query("""CREATE FUNCTION high_pay() RETURNS setof EMP - AS 'SELECT * FROM EMP where salary > 1500' - LANGUAGE 'sql'""") - print - print "SELECT name(high_pay()) AS overpaid" - print pgcnx.query("SELECT name(high_pay()) AS overpaid") - -# function with multiple SQL commands -def mult_func(pgcnx): - print "-----------------------------" - print "-- Creating SQL Functions with multiple SQL statements" - print "-- you can also create functions that do more than just a" - print "-- SELECT." - print "-----------------------------" - print - print "-- you may have noticed that Andy has a negative salary. We'll" - print "-- create a function that removes employees with negative " - print "-- salaries." - print - print "SELECT * FROM EMP" - print pgcnx.query("SELECT * FROM EMP") - print - print "CREATE FUNCTION clean_EMP () RETURNS int4" - print " AS 'DELETE FROM EMP WHERE EMP.salary <= 0" - print " SELECT 1 AS ignore_this'" - print " LANGUAGE 'sql'" - pgcnx.query("CREATE FUNCTION clean_EMP () RETURNS int4 AS 'DELETE FROM EMP WHERE EMP.salary <= 0; SELECT 1 AS ignore_this' LANGUAGE 'sql'") - print - print "SELECT clean_EMP()" - print pgcnx.query("SELECT clean_EMP()") - print - print "SELECT * FROM EMP" - print pgcnx.query("SELECT * FROM EMP") - -# base cleanup -def demo_cleanup(pgcnx): - print "-- remove functions that were created in this file" - print - print "DROP FUNCTION clean_EMP()" - print "DROP FUNCTION high_pay()" - print "DROP FUNCTION new_emp()" - print "DROP FUNCTION add_em(int4, int4)" - print "DROP FUNCTION one()" - print - print "DROP TABLE EMP CASCADE" - pgcnx.query("DROP FUNCTION clean_EMP()") - pgcnx.query("DROP FUNCTION high_pay()") - pgcnx.query("DROP FUNCTION new_emp()") - pgcnx.query("DROP FUNCTION add_em(int4, int4)") - pgcnx.query("DROP FUNCTION one()") - pgcnx.query("DROP TABLE EMP CASCADE") - -# main demo function -def demo(pgcnx): - base_func(pgcnx) - comp_func(pgcnx) - mult_func(pgcnx) - demo_cleanup(pgcnx) diff --git a/tutorial/syscat.py b/tutorial/syscat.py deleted file mode 100755 index 1ab1d584..00000000 --- a/tutorial/syscat.py +++ /dev/null @@ -1,149 +0,0 @@ -# syscat.py - parses some system catalogs -# inspired from the PostgreSQL tutorial -# adapted to Python 1995 by Pascal ANDRE - -print """ -__________________________________________________________________ -MODULE SYSCAT.PY : PARSES SOME POSTGRESQL SYSTEM CATALOGS - -This module is designed for being imported from python prompt - -In order to run the samples included here, first create a connection -using : cnx = syscat.DB(...) - -The "..." should be replaced with whatever arguments you need to open an -existing database. Usually all you need is the name of the database and, -in fact, if it is the same as your login name, you can leave it empty. - -then start the demo with: syscat.demo(cnx) - -Some results may be empty, depending on your base status." - -__________________________________________________________________ -""" - -from pg import DB -import sys - -# waits for a key -def wait_key(): - print "Press " - sys.stdin.read(1) - -# lists all simple indices -def list_simple_ind(pgcnx): - result = pgcnx.query("""SELECT bc.relname AS class_name, - ic.relname AS index_name, a.attname - FROM pg_class bc, pg_class ic, pg_index i, pg_attribute a - WHERE i.indrelid = bc.oid AND i.indexrelid = bc.oid - AND i.indkey[0] = a.attnum AND a.attrelid = bc.oid - AND i.indproc = '0'::oid AND a.attisdropped = 'f' - ORDER BY class_name, index_name, attname""") - return result - -# list all user defined attributes and their type in user-defined classes -def list_all_attr(pgcnx): - result = pgcnx.query("""SELECT c.relname, a.attname, t.typname - FROM pg_class c, pg_attribute a, pg_type t - WHERE c.relkind = 'r' and c.relname !~ '^pg_' - AND c.relname !~ '^Inv' and a.attnum > 0 - AND a.attrelid = c.oid and a.atttypid = t.oid - AND a.attisdropped = 'f' - ORDER BY relname, attname""") - return result - -# list all user defined base type -def list_user_base_type(pgcnx): - result = pgcnx.query("""SELECT u.usename, t.typname - FROM pg_type t, pg_user u - WHERE u.usesysid = int2in(int4out(t.typowner)) - AND t.typrelid = '0'::oid and t.typelem = '0'::oid - AND u.usename <> 'postgres' order by usename, typname""") - return result - -# list all right-unary operators -def list_right_unary_operator(pgcnx): - result = pgcnx.query("""SELECT o.oprname AS right_unary, - lt.typname AS operand, result.typname AS return_type - FROM pg_operator o, pg_type lt, pg_type result - WHERE o.oprkind='r' and o.oprleft = lt.oid - AND o.oprresult = result.oid - ORDER BY operand""") - return result - -# list all left-unary operators -def list_left_unary_operator(pgcnx): - result = pgcnx.query("""SELECT o.oprname AS left_unary, - rt.typname AS operand, result.typname AS return_type - FROM pg_operator o, pg_type rt, pg_type result - WHERE o.oprkind='l' AND o.oprright = rt.oid - AND o.oprresult = result.oid - ORDER BY operand""") - return result - -# list all binary operators -def list_binary_operator(pgcnx): - result = pgcnx.query("""SELECT o.oprname AS binary_op, - rt.typname AS right_opr, lt.typname AS left_opr, - result.typname AS return_type - FROM pg_operator o, pg_type rt, pg_type lt, pg_type result - WHERE o.oprkind = 'b' AND o.oprright = rt.oid - AND o.oprleft = lt.oid AND o.oprresult = result.oid""") - return result - -# returns the name, args and return type from all function of lang l -def list_lang_func(pgcnx, l): - result = pgcnx.query("""SELECT p.proname, p.pronargs, t.typname - FROM pg_proc p, pg_language l, pg_type t - WHERE p.prolang = l.oid AND p.prorettype = t.oid - AND l.lanname = '%s' - ORDER BY proname""" % l) - return result - -# lists all the aggregate functions and the type to which they can be applied -def list_agg_func(pgcnx): - result = pgcnx.query("""SELECT p.proname, t.typname - FROM pg_aggregate a, pg_proc p, pg_type t - WHERE a.aggfnoid = p.oid - and p.proargtypes[0] = t.oid - ORDER BY proname, typname""") - return result - -# lists all the operator classes that can be used with each access method as -# well as the operators that can be used with the respective operator classes -def list_op_class(pgcnx): - result = pgcnx.query("""SELECT am.amname, opc.opcname, opr.oprname - FROM pg_am am, pg_amop amop, pg_opclass opc, pg_operator opr - WHERE amop.amopid = am.oid and amop.amopclaid = opc.oid - AND amop.amopopr = opr.oid order by amname, opcname, oprname""") - return result - -# demo function - runs all examples -def demo(pgcnx): - import sys, os - save_stdout = sys.stdout - sys.stdout = os.popen("more", "w") - print "Listing simple indices ..." - print list_simple_ind(pgcnx) - print "Listing all attributes ..." - print list_all_attr(pgcnx) - print "Listing all user-defined base types ..." - print list_user_base_type(pgcnx) - print "Listing all left-unary operators defined ..." - print list_left_unary_operator(pgcnx) - print "Listing all right-unary operators defined ..." - print list_right_unary_operator(pgcnx) - print "Listing all binary operators ..." - print list_binary_operator(pgcnx) - print "Listing C external function linked ..." - print list_lang_func(pgcnx, 'C') - print "Listing C internal functions ..." - print list_lang_func(pgcnx, 'internal') - print "Listing SQL functions defined ..." - print list_lang_func(pgcnx, 'sql') - print "Listing 'aggregate functions' ..." - print list_agg_func(pgcnx) - print "Listing 'operator classes' ..." - print list_op_class(pgcnx) - del sys.stdout - sys.stdout = save_stdout