diff --git a/.github/README.md b/.github/README.md new file mode 100644 index 0000000000000..4f9cb6e604ac1 --- /dev/null +++ b/.github/README.md @@ -0,0 +1,267 @@ +# PostgreSQL DSQL Client (pdsql) + +A PostgreSQL command-line client with built-in AWS DSQL authentication support. Connect to AWS DSQL databases with automatic token generation - no manual token management required. + +## 🚀 Quick Installation + +Install `pdsql` with a single command: + +```bash +curl -sSL https://raw.githubusercontent.com/marcbowes/postgres/refs/heads/master/scripts/install.sh | sh +``` + +This installer automatically detects your platform (macOS/Linux) and architecture, downloads the appropriate package, and installs `pdsql` to your local environment. + +### Manual Installation + +If you prefer to download manually: + +1. Visit the [GitHub Releases page](https://github.com/marcbowes/postgres/releases) +2. Download the appropriate package for your platform: + - **macOS Intel**: `postgres-dsql-macos-x64.zip` + - **macOS Apple Silicon**: `postgres-dsql-macos-arm64.zip` + - **Linux x64**: `postgres-dsql-linux-x64.zip` + - **Linux ARM64**: `postgres-dsql-linux-arm64.zip` +3. Extract and run: + ```bash + unzip postgres-dsql-*.zip + cd postgres-dsql + ./bin/pdsql --version + ``` + +### Package Manager Installation (Linux) + +For Linux users, we also provide native packages: + +**Debian/Ubuntu (.deb)**: +```bash +wget https://github.com/marcbowes/postgres/releases/latest/download/postgres-dsql_1.0.0-1_amd64.deb +sudo apt install ./postgres-dsql_1.0.0-1_amd64.deb +``` + +**RHEL/Fedora (.rpm)**: +```bash +wget https://github.com/marcbowes/postgres/releases/latest/download/postgres-dsql-1.0.0-1.x86_64.rpm +sudo dnf install postgres-dsql-1.0.0-1.x86_64.rpm +``` + +## 🔧 Usage + +### Basic Connection + +Connect to an AWS DSQL database: + +```bash +pdsql --host=your-dsql-endpoint.example.com --user=admin --port=5432 --dbname=postgres +``` + +### Connection String Format + +You can also use PostgreSQL connection strings: + +```bash +pdsql "host=your-dsql-endpoint.example.com user=admin port=5432 dbname=postgres" +``` + +### Key Features + +- **Automatic Authentication**: No need to manually generate or manage tokens +- **Secure by Default**: Automatically enforces SSL connections +- **Token Auto-Renewal**: Handles token expiration transparently +- **Standard psql Interface**: All familiar psql commands and features work + +### How It Works + +When you connect with `pdsql`: + +1. **SSL Required**: Automatically enforces secure connections +2. **Token Generation**: Generates temporary AWS authentication tokens automatically +3. **Admin Privileges**: When connecting as `admin` user, full admin privileges are granted +4. **Auto-Renewal**: New tokens are generated for each connection attempt +5. **Short-Lived Tokens**: Tokens expire after 5 seconds for enhanced security + +## 🔐 AWS Credentials Setup + +`pdsql` uses your existing AWS credentials. Ensure you have credentials configured through one of these methods: + +### AWS CLI (Recommended) +```bash +aws configure +``` + +### Environment Variables +```bash +export AWS_ACCESS_KEY_ID=your_access_key +export AWS_SECRET_ACCESS_KEY=your_secret_key +export AWS_REGION=us-east-1 # Optional +``` + +### AWS Credentials File +Create `~/.aws/credentials`: +```ini +[default] +aws_access_key_id = your_access_key +aws_secret_access_key = your_secret_key +``` + +### IAM Roles (EC2/ECS/Lambda) +If running on AWS infrastructure, `pdsql` will automatically use IAM roles. + +## 📋 Examples + +### Interactive Session +```bash +# Start an interactive session +pdsql --host=workgroup.123456789012.us-east-1.dsql.amazonaws.com --user=admin + +# Once connected, you can run SQL commands: +postgres=> \l +postgres=> CREATE TABLE users (id serial, name text); +postgres=> INSERT INTO users (name) VALUES ('Alice'); +postgres=> SELECT * FROM users; +``` + +### One-liner Queries +```bash +# Execute a single query +pdsql --host=your-endpoint.dsql.amazonaws.com --user=admin -c "SELECT version();" + +# Execute SQL from a file +pdsql --host=your-endpoint.dsql.amazonaws.com --user=admin -f queries.sql +``` + +### Connection with Options +```bash +# Connect with specific database and additional options +pdsql --host=your-endpoint.dsql.amazonaws.com \ + --user=admin \ + --port=5432 \ + --dbname=postgres \ + --echo-queries \ + --no-password +``` + +## 🆘 Troubleshooting + +### Connection Issues + +**Error: "could not connect to server"** +- Verify your DSQL endpoint URL is correct +- Ensure your security group allows connections on port 5432 +- Check that your AWS credentials are properly configured + +### Authentication Issues + +**Error: "authentication failed"** +- Verify your AWS credentials have the necessary DSQL permissions +- Ensure you're connecting to the correct DSQL workgroup +- Check that your IAM user/role has `dsql:DbConnect` permissions + +### AWS Credentials + +**Error: "Unable to locate credentials"** +- Run `aws configure` to set up credentials +- Or set `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` environment variables +- Verify your credentials work: `aws sts get-caller-identity` + +## 🔍 Debug Logging + +For troubleshooting authentication and connection issues, enable detailed AWS SDK logging: + +### Environment Variables + +- **`AWS_LOG_LEVEL`**: Controls verbosity (NONE, FATAL, ERROR, WARN, INFO, DEBUG, TRACE) +- **`AWS_LOG_FILE`**: Controls output destination (stdout, stderr, or file path) + +### Basic Debugging + +Enable debug logging to stderr (default): +```bash +AWS_LOG_LEVEL=DEBUG pdsql --host=your-endpoint.dsql.amazonaws.com --user=admin +``` + +### Detailed Tracing + +Enable maximum verbosity for deep debugging: +```bash +AWS_LOG_LEVEL=TRACE pdsql --host=your-endpoint.dsql.amazonaws.com --user=admin +``` + +### Log to File + +Save logs to a file for analysis: +```bash +AWS_LOG_LEVEL=DEBUG AWS_LOG_FILE=/tmp/dsql-debug.log pdsql --host=your-endpoint.dsql.amazonaws.com --user=admin +``` + +### Log to stdout + +Send logs to stdout (useful for piping): +```bash +AWS_LOG_LEVEL=INFO AWS_LOG_FILE=stdout pdsql --host=your-endpoint.dsql.amazonaws.com --user=admin +``` + +### What the Logs Show + +The debug logs will reveal: +- **Token Generation**: Process of creating DSQL authentication tokens +- **AWS Region Detection**: How the region is determined from hostname or environment +- **Credentials Provider Chain**: Which credential sources are tried (environment, files, IAM roles, IMDS) +- **HTTP Infrastructure**: Event loops and network setup for IMDS on EC2 +- **Error Details**: Specific AWS SDK errors with error codes + +### Example Log Output + +``` +[INFO] [2025-06-28T03:33:55Z] Starting DSQL token generation for endpoint: your-endpoint.dsql.amazonaws.com +[DEBUG] [2025-06-28T03:33:55Z] Using AWS_REGION from environment: us-west-2 +[DEBUG] [2025-06-28T03:33:55Z] Creating credentials provider chain with bootstrap for IMDS +[INFO] [2025-06-28T03:33:55Z] Token generation successful +``` + +### Disable Logging + +To disable all logging: +```bash +AWS_LOG_LEVEL=NONE pdsql --host=your-endpoint.dsql.amazonaws.com --user=admin +``` + +### Getting Help + +For additional help: +```bash +pdsql --help +``` + +## 🔄 Updates + +To update to the latest version, simply run the installation command again: + +```bash +curl -sSL https://raw.githubusercontent.com/marcbowes/postgres/refs/heads/master/scripts/install.sh | sh +``` + +## 🏗️ Building from Source + +If you need to build from source or contribute to development, see our [Development Guide](README_PACKAGING.md) for detailed build instructions. + +### Quick Build + +```bash +git clone https://github.com/marcbowes/postgres.git +cd postgres +git submodule update --init --recursive +./scripts/build-dsql.sh +``` + +## 📄 License + +This project is based on PostgreSQL and maintains compatibility with the PostgreSQL license. + +## 🤝 Contributing + +Contributions are welcome! Please see our contributing guidelines and feel free to submit issues or pull requests. + +--- + +**Note**: This tool is specifically designed for AWS DSQL connections. For regular PostgreSQL connections, use the standard `psql` client. diff --git a/.github/workflows/build-dsql.yml b/.github/workflows/build-dsql.yml new file mode 100644 index 0000000000000..d51943511bf0e --- /dev/null +++ b/.github/workflows/build-dsql.yml @@ -0,0 +1,133 @@ +name: Build and Package DSQL + +on: + workflow_dispatch: # Allow manual triggering + push: + branches: ["*"] # Run on all branch pushes (CI) + tags: ['v*'] # Run on version tags (Release) + pull_request: + branches: ["*"] # Run on all pull requests (CI) + +jobs: + build: + strategy: + matrix: + os: [macos-latest, ubuntu-22.04] + runs-on: ${{ matrix.os }} + + steps: + - name: Check out repository + uses: actions/checkout@v3 + with: + submodules: "recursive" + + - name: Set up macOS + if: runner.os == 'macOS' + run: | + brew update + brew install openssl@3 icu4c zip readline go + + - name: Set up Ubuntu + if: runner.os == 'Linux' + run: | + sudo apt-get update + sudo apt-get install -y \ + flex \ + bison \ + libreadline-dev \ + zlib1g-dev \ + libssl-dev \ + libicu-dev \ + build-essential \ + cmake \ + zip \ + rpm \ + patchelf + + - name: Build DSQL + run: | + chmod +x scripts/build-dsql.sh + ./scripts/build-dsql.sh + + - name: Package for Distribution + run: | + chmod +x scripts/package.sh + ./scripts/package.sh + + - name: Test Package + run: | + chmod +x scripts/test-packaging.sh + ./scripts/test-packaging.sh --test-only + + - name: Upload Build Artifact + uses: actions/upload-artifact@v4 + with: + name: postgres-dsql-${{ matrix.os }}-${{ runner.arch }} + path: build/postgres-dsql.zip + + + - name: Upload RPM Artifact + if: runner.os == 'Linux' + uses: actions/upload-artifact@v4 + with: + name: postgres-dsql-rpm-${{ matrix.os }}-${{ runner.arch }} + path: "build/*.rpm" + + - name: Upload DEB Artifact + if: runner.os == 'Linux' + uses: actions/upload-artifact@v4 + with: + name: postgres-dsql-deb-${{ matrix.os }}-${{ runner.arch }} + path: "build/*.deb" + + release: + needs: build + runs-on: ubuntu-latest + if: startsWith(github.ref, 'refs/tags/v') + steps: + - name: Download all artifacts + uses: actions/download-artifact@v4 + with: + path: artifacts + + - name: Display structure of downloaded files + run: ls -R artifacts + + - name: Prepare release files + run: | + mkdir -p release-files + + # Rename ZIP files to be unique + find artifacts -name "postgres-dsql.zip" | while read file; do + dir=$(dirname "$file") + artifact_name=$(basename "$dir") + # Extract platform info from artifact name + if [[ "$artifact_name" == *"macos"* ]]; then + if [[ "$artifact_name" == *"ARM64"* ]]; then + cp "$file" "release-files/postgres-dsql-macos-arm64.zip" + else + cp "$file" "release-files/postgres-dsql-macos-x64.zip" + fi + elif [[ "$artifact_name" == *"ubuntu"* ]]; then + if [[ "$artifact_name" == *"ARM64"* ]]; then + cp "$file" "release-files/postgres-dsql-linux-arm64.zip" + else + cp "$file" "release-files/postgres-dsql-linux-x64.zip" + fi + fi + done + + # Copy other files with original names + find artifacts -name "*.rpm" -exec cp {} release-files/ \; + find artifacts -name "*.deb" -exec cp {} release-files/ \; + + echo "Release files prepared:" + ls -la release-files/ + + - name: Create Release + uses: softprops/action-gh-release@v1 + with: + files: release-files/* + generate_release_notes: true + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.gitignore b/.gitignore index 4e911395fe3ba..85567083dd7e4 100644 --- a/.gitignore +++ b/.gitignore @@ -43,3 +43,5 @@ lib*.pc /Release/ /tmp_install/ /portlock/ +# Build artifacts +build/ diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 0000000000000..0f3bdbe01c3a8 --- /dev/null +++ b/.gitmodules @@ -0,0 +1,3 @@ +[submodule "aws-dsql-auth"] + path = aws-dsql-auth + url = https://github.com/marcbowes/aws-dsql-auth.git diff --git a/aws-dsql-auth b/aws-dsql-auth new file mode 160000 index 0000000000000..3c53ec315ed61 --- /dev/null +++ b/aws-dsql-auth @@ -0,0 +1 @@ +Subproject commit 3c53ec315ed619715c56875e81476119c365ddac diff --git a/scripts/build-dsql.sh b/scripts/build-dsql.sh new file mode 100755 index 0000000000000..2a2f9f6197cab --- /dev/null +++ b/scripts/build-dsql.sh @@ -0,0 +1,115 @@ +#!/bin/bash +set -e + +echo "Building PostgreSQL with DSQL Authentication support" +echo "===================================================" + +# Determine OS type for library path configuration +if [[ "$OSTYPE" == "darwin"* ]]; then + echo "Detected macOS" + LIBRARY_PATH_VAR="DYLD_LIBRARY_PATH" + + # Base configuration with OpenSSL and readline + OS_SPECIFIC_CONFIG="--with-ssl=openssl --with-includes=/opt/homebrew/opt/openssl/include:/opt/homebrew/opt/readline/include --with-libraries=/opt/homebrew/opt/openssl/lib:/opt/homebrew/opt/readline/lib" + + # Check for ICU4C in Homebrew + if [ -d "/opt/homebrew/opt/icu4c" ]; then + echo " Detected Homebrew ICU4C installation" + # Use the Homebrew-maintained symlink to the current version + export PKG_CONFIG_PATH="/opt/homebrew/opt/icu4c/lib/pkgconfig:$PKG_CONFIG_PATH" + echo " Added ICU4C to build configuration" + elif [ -d "/usr/local/opt/icu4c" ]; then + # For Intel Macs with Homebrew installed in /usr/local + echo " Detected Homebrew ICU4C installation in /usr/local" + export PKG_CONFIG_PATH="/usr/local/opt/icu4c/lib/pkgconfig:$PKG_CONFIG_PATH" + echo " Added ICU4C to build configuration" + else + echo " Warning: Homebrew ICU4C not detected, configure may fail if ICU is required" + fi +elif [[ "$OSTYPE" == "linux-gnu"* ]]; then + echo "Detected Linux system" + LIBRARY_PATH_VAR="LD_LIBRARY_PATH" + + # Base configuration for Linux with system packages + OS_SPECIFIC_CONFIG="--with-ssl=openssl --with-icu" +else + echo "Detected Unix system (assuming Linux-like)" + LIBRARY_PATH_VAR="LD_LIBRARY_PATH" + OS_SPECIFIC_CONFIG="--with-ssl=openssl" +fi + +# Step 1: Initialize and build aws-dsql-auth +echo "Step 1: Setting up AWS DSQL Auth library..." + +# Check if aws-dsql-auth submodules are initialized +if [ ! -d "aws-dsql-auth/aws-sdk/aws-c-common/.git" ]; then + echo " Initializing aws-dsql-auth submodules..." + cd aws-dsql-auth + git submodule update --init --recursive + cd .. +else + echo " aws-dsql-auth submodules already initialized." +fi + +if [ ! -d "aws-dsql-auth/build/install/" ]; then + # Build aws-dsql-auth + echo " Building aws-dsql-auth library..." + cd aws-dsql-auth + ./build.sh + cd .. + echo " AWS DSQL Auth library built successfully!" +else + echo " aws-dsql-auth already built." +fi + +# Step 2: Configure PostgreSQL with SSL support +echo "Step 2: Configuring PostgreSQL with SSL support..." +if [ ! -f "config.status" ]; then + echo " Running configure ..." + ./configure $OS_SPECIFIC_CONFIG +else + echo " PostgreSQL already configured. If you need to reconfigure, run './configure $OS_SPECIFIC_CONFIG' manually." +fi + +# Step 3: Build libpq (PostgreSQL client library) +echo "Step 3: Building libpq..." +make -C src/interfaces/libpq +echo " libpq built successfully!" + +# Step 4: Build psql +echo "Step 4: Building psql..." +make -C src/bin/psql +echo " psql built successfully!" + +# Step 5: Build pgbench +echo "Step 5: Building pgbench..." +make -C src/bin/pgbench +echo " pgbench built successfully!" + +# Final instructions +echo "" +echo "Build completed successfully!" +echo "To run psql with DSQL authentication, use the following command:" +echo "" +echo " $LIBRARY_PATH_VAR=$(pwd)/src/interfaces/libpq \\" +echo " ./src/bin/psql/psql --dsql --host=your-dsql-endpoint.example.com --user=admin --dbname=postgres" +echo "" +echo "Or with connection string format:" +echo "" +echo " $LIBRARY_PATH_VAR=$(pwd)/src/interfaces/libpq \\" +echo " ./src/bin/psql/psql --dsql \"dbname=postgres user=admin host=your-dsql-endpoint.example.com\"" +echo "" +echo "To run pgbench with DSQL authentication, use the following command:" +echo "" +echo " $LIBRARY_PATH_VAR=$(pwd)/src/interfaces/libpq \\" +echo " ./src/bin/pgbench/pgbench --dsql --host=your-dsql-endpoint.example.com --user=admin --dbname=postgres" +echo "" +echo "For example, to run a simple benchmark test:" +echo "" +echo " $LIBRARY_PATH_VAR=$(pwd)/src/interfaces/libpq \\" +echo " ./src/bin/pgbench/pgbench --dsql --host=your-dsql-endpoint.example.com --user=admin --dbname=postgres --initialize --scale=1" +echo "" +echo " $LIBRARY_PATH_VAR=$(pwd)/src/interfaces/libpq \\" +echo " ./src/bin/pgbench/pgbench --dsql --host=your-dsql-endpoint.example.com --user=admin --dbname=postgres --time=60 --client=10" +echo "" +echo "Note: You need to have AWS credentials configured in your environment for DSQL authentication to work." diff --git a/scripts/install.sh b/scripts/install.sh new file mode 100755 index 0000000000000..0dcbba2d306d3 --- /dev/null +++ b/scripts/install.sh @@ -0,0 +1,265 @@ +#!/bin/bash +set -e + +REPO="marcbowes/postgres" +INSTALL_PATH="$HOME/.local" + +# Function to display error messages and exit +error_exit() { + echo "Error: $1" >&2 + exit 1 +} + +# Function to detect OS +detect_os() { + if [[ "$OSTYPE" == "darwin"* ]]; then + echo "macos" + elif [[ "$OSTYPE" == "linux-gnu"* ]]; then + echo "linux" + else + error_exit "Unsupported operating system: $OSTYPE" + fi +} + +# Function to detect architecture +detect_arch() { + local arch=$(uname -m) + case "$arch" in + x86_64) + echo "X64" + ;; + aarch64|arm64) + echo "ARM64" + ;; + *) + error_exit "Unsupported architecture: $arch" + ;; + esac +} + +# Function to detect Linux distribution +detect_linux_distro() { + if command -v apt-get >/dev/null 2>&1; then + echo "debian" + elif command -v yum >/dev/null 2>&1 || command -v dnf >/dev/null 2>&1; then + echo "rhel" + else + echo "unknown" + fi +} + +# Function to install via package manager (Linux) +install_via_package() { + local os="$1" + local arch="$2" + local distro="$3" + + echo "Attempting package manager installation..." + + # Get latest release information + echo "Fetching latest release information..." + RELEASE_INFO=$(curl -s "https://api.github.com/repos/$REPO/releases/latest") + if [[ -z "$RELEASE_INFO" || "$RELEASE_INFO" == *"Not Found"* ]]; then + error_exit "Could not fetch release information. Check your internet connection." + fi + + TAG_NAME=$(echo "$RELEASE_INFO" | grep -o '"tag_name": *"[^"]*"' | cut -d'"' -f4) + echo "Latest release found: $TAG_NAME" + + # Determine package type and download URL + local package_type="" + local download_url="" + local package_file="" + + if [[ "$distro" == "debian" ]]; then + package_type="deb" + # Convert arch format for DEB (X64 -> amd64, ARM64 -> arm64) + local deb_arch="" + if [[ "$arch" == "X64" ]]; then + deb_arch="amd64" + elif [[ "$arch" == "ARM64" ]]; then + deb_arch="arm64" + fi + package_file="postgres-dsql_1.0.0-1_${deb_arch}.deb" + elif [[ "$distro" == "rhel" ]]; then + package_type="rpm" + # Convert arch format for RPM (X64 -> x86_64, ARM64 -> aarch64) + local rpm_arch="" + if [[ "$arch" == "X64" ]]; then + rpm_arch="x86_64" + elif [[ "$arch" == "ARM64" ]]; then + rpm_arch="aarch64" + fi + package_file="postgres-dsql-1.0.0-1.${rpm_arch}.rpm" + else + echo "Unknown Linux distribution, falling back to ZIP installation..." + return 1 + fi + + # Find download URL for the package + download_url=$(echo "$RELEASE_INFO" | grep -o "\"browser_download_url\": *\"[^\"]*${package_file}\"" | cut -d'"' -f4) + if [[ -z "$download_url" ]]; then + echo "Package ${package_file} not found in release, falling back to ZIP installation..." + return 1 + fi + + # Download and install package + echo "Downloading ${package_type} package..." + TEMP_DIR=$(mktemp -d) + local temp_package="$TEMP_DIR/$package_file" + curl -L "$download_url" -o "$temp_package" + + echo "Installing package (may require sudo password)..." + if [[ "$package_type" == "deb" ]]; then + if command -v apt >/dev/null 2>&1; then + sudo apt install -y "$temp_package" + else + sudo dpkg -i "$temp_package" + # Fix dependencies if needed + sudo apt-get install -f -y 2>/dev/null || true + fi + elif [[ "$package_type" == "rpm" ]]; then + if command -v dnf >/dev/null 2>&1; then + sudo dnf install -y "$temp_package" + elif command -v yum >/dev/null 2>&1; then + sudo yum install -y "$temp_package" + else + sudo rpm -ivh "$temp_package" + fi + fi + + # Clean up + rm -rf "$TEMP_DIR" + + echo "Package installation completed successfully!" + echo "PostgreSQL DSQL (pdsql) is now available system-wide." + return 0 +} + +# Function to install via ZIP extraction +install_via_zip() { + local os="$1" + local arch="$2" + + echo "Installing via ZIP extraction..." + + # Get latest release information + echo "Fetching latest release information..." + RELEASE_INFO=$(curl -s "https://api.github.com/repos/$REPO/releases/latest") + if [[ -z "$RELEASE_INFO" || "$RELEASE_INFO" == *"Not Found"* ]]; then + error_exit "Could not fetch release information. Check your internet connection." + fi + + TAG_NAME=$(echo "$RELEASE_INFO" | grep -o '"tag_name": *"[^"]*"' | cut -d'"' -f4) + echo "Latest release found: $TAG_NAME" + + # Find appropriate ZIP file based on OS and architecture + local zip_filename="" + local arch_suffix="" + + # Convert architecture format + if [[ "$arch" == "X64" ]]; then + arch_suffix="x64" + elif [[ "$arch" == "ARM64" ]]; then + arch_suffix="arm64" + fi + + if [[ "$os" == "macos" ]]; then + zip_filename="postgres-dsql-macos-${arch_suffix}.zip" + elif [[ "$os" == "linux" ]]; then + zip_filename="postgres-dsql-linux-${arch_suffix}.zip" + fi + + # Find download URL for the platform-specific zip file + DOWNLOAD_URL=$(echo "$RELEASE_INFO" | grep -o "\"browser_download_url\": *\"[^\"]*${zip_filename}\"" | cut -d'"' -f4) + if [[ -z "$DOWNLOAD_URL" ]]; then + error_exit "${zip_filename} not found in the latest release." + fi + + # Create directories if they don't exist + mkdir -p "$INSTALL_PATH/bin" + mkdir -p "$INSTALL_PATH/lib" + + # Download the release + echo "Downloading release from $DOWNLOAD_URL..." + TEMP_DIR=$(mktemp -d) + curl -L "$DOWNLOAD_URL" -o "$TEMP_DIR/postgres-dsql.zip" + + # Extract the release + echo "Extracting files to $INSTALL_PATH..." + unzip -o "$TEMP_DIR/postgres-dsql.zip" -d "$TEMP_DIR" + + # Copy files to install location + cp -r "$TEMP_DIR/postgres-dsql/bin/"* "$INSTALL_PATH/bin/" + cp -r "$TEMP_DIR/postgres-dsql/lib/"* "$INSTALL_PATH/lib/" + + # Clean up temp files + rm -rf "$TEMP_DIR" + + # Make the binaries executable + chmod +x "$INSTALL_PATH/bin/pdsql" + chmod +x "$INSTALL_PATH/bin/pgbench" + + echo "ZIP installation completed successfully!" + echo "PostgreSQL DSQL tools installed to:" + echo " - pdsql (DSQL client): $INSTALL_PATH/bin/pdsql" + echo " - pgbench (benchmark tool): $INSTALL_PATH/bin/pgbench" + + # Check if installation path is in PATH + if [[ ":$PATH:" != *":$INSTALL_PATH/bin:"* ]]; then + echo "" + echo "NOTICE: Your PATH environment variable doesn't contain $INSTALL_PATH/bin" + echo "To add it to your PATH, add the following line to your shell configuration file:" + echo "" + echo " export PATH=\"$INSTALL_PATH/bin:\$PATH\"" + echo "" + echo "Shell configuration files:" + echo " - Bash: $HOME/.bashrc or $HOME/.bash_profile" + echo " - Zsh: $HOME/.zshrc" + echo " - Fish: $HOME/.config/fish/config.fish" + echo "" + echo "Then, reload your shell configuration or restart your terminal." + fi +} + +# Main installation logic +main() { + echo "PostgreSQL DSQL Universal Installer" + echo "===================================" + + # Detect system information + OS=$(detect_os) + ARCH=$(detect_arch) + + echo "Detected system: $OS $ARCH" + + if [[ "$OS" == "linux" ]]; then + DISTRO=$(detect_linux_distro) + echo "Detected Linux distribution type: $DISTRO" + + # Try package manager installation first, fall back to ZIP if it fails + if ! install_via_package "$OS" "$ARCH" "$DISTRO"; then + echo "Package installation failed or unavailable, trying ZIP installation..." + install_via_zip "$OS" "$ARCH" + fi + else + # macOS - use ZIP installation + install_via_zip "$OS" "$ARCH" + fi + + echo "" + echo "Installation completed! To verify, run:" + echo "" + echo " pdsql --version" + echo "" + echo "For usage help, run:" + echo "" + echo " pdsql --help" + echo "" + echo "Example DSQL connection:" + echo "" + echo " pdsql --host=your-dsql-endpoint.example.com --user=admin --dbname=postgres" +} + +# Run main function +main "$@" diff --git a/scripts/package.sh b/scripts/package.sh new file mode 100755 index 0000000000000..262b88dd67ddd --- /dev/null +++ b/scripts/package.sh @@ -0,0 +1,375 @@ +#!/bin/bash +set -e + +# Package PostgreSQL DSQL client for distribution +# This script creates a standalone distribution with psql (renamed to pdsql), +# pgbench, and libpq that can be used without additional dependencies + +echo "Packaging PostgreSQL DSQL client" +echo "================================" + +# Path setup +ROOT_DIR=$(pwd) +BUILD_DIR="$ROOT_DIR/build" +DIST_NAME="postgres-dsql" +DIST_DIR="$BUILD_DIR/$DIST_NAME" +SRC_PSQL_BIN="$ROOT_DIR/src/bin/psql/psql" +SRC_PGBENCH_BIN="$ROOT_DIR/src/bin/pgbench/pgbench" +PSQL_BINARY_NAME="pdsql" +PGBENCH_BINARY_NAME="pgbench" + +# Detect OS and set appropriate library paths +if [[ "$OSTYPE" == "darwin"* ]]; then + echo "Detected macOS" + SRC_LIB="$ROOT_DIR/src/interfaces/libpq/libpq.5.dylib" + LIB_EXT="dylib" + PLATFORM="macos" +elif [[ "$OSTYPE" == "linux-gnu"* ]]; then + echo "Detected Linux" + SRC_LIB="$ROOT_DIR/src/interfaces/libpq/libpq.so.5" + LIB_EXT="so" + PLATFORM="linux" +else + echo "Error: Unsupported operating system: $OSTYPE" + echo "This script supports macOS and Linux only" + exit 1 +fi + +# Check if the build artifacts exist +if [ ! -f "$SRC_PSQL_BIN" ]; then + echo "Error: psql binary not found at $SRC_PSQL_BIN" + echo "Please run scripts/build-dsql.sh first" + exit 1 +fi + +if [ ! -f "$SRC_PGBENCH_BIN" ]; then + echo "Error: pgbench binary not found at $SRC_PGBENCH_BIN" + echo "Please run scripts/build-dsql.sh first" + exit 1 +fi + +if [ ! -f "$SRC_LIB" ]; then + echo "Error: libpq library not found at $SRC_LIB" + echo "Please run scripts/build-dsql.sh first" + exit 1 +fi + +# Create build directory +mkdir -p "$BUILD_DIR" + +# Clean any previous packaging attempts +if [ -d "$DIST_DIR" ]; then + echo "Cleaning previous packaging directory..." + rm -rf "$DIST_DIR" +fi + +# Create directory structure +mkdir -p "$DIST_DIR/bin" +mkdir -p "$DIST_DIR/lib" + +# Copy binaries and libraries +echo "Copying psql to $DIST_DIR/bin/$PSQL_BINARY_NAME" +cp "$SRC_PSQL_BIN" "$DIST_DIR/bin/$PSQL_BINARY_NAME" + +echo "Copying pgbench to $DIST_DIR/bin/$PGBENCH_BINARY_NAME" +cp "$SRC_PGBENCH_BIN" "$DIST_DIR/bin/$PGBENCH_BINARY_NAME" + +echo "Copying libpq to $DIST_DIR/lib/" +cp "$SRC_LIB" "$DIST_DIR/lib/" + +# Handle platform-specific library setup +if [[ "$PLATFORM" == "macos" ]]; then + # Copy additional dylib if it exists + cp "$ROOT_DIR/src/interfaces/libpq/libpq.dylib" "$DIST_DIR/lib/" 2>/dev/null || true + + # Set up correct library paths in the binaries + echo "Updating library paths in $PSQL_BINARY_NAME binary..." + LIBRARY_PATH=$(otool -L "$DIST_DIR/bin/$PSQL_BINARY_NAME" | grep libpq | awk '{print $1}') + install_name_tool -change "$LIBRARY_PATH" "@loader_path/../lib/libpq.5.dylib" "$DIST_DIR/bin/$PSQL_BINARY_NAME" + + echo "Updating library paths in $PGBENCH_BINARY_NAME binary..." + LIBRARY_PATH=$(otool -L "$DIST_DIR/bin/$PGBENCH_BINARY_NAME" | grep libpq | awk '{print $1}') + install_name_tool -change "$LIBRARY_PATH" "@loader_path/../lib/libpq.5.dylib" "$DIST_DIR/bin/$PGBENCH_BINARY_NAME" + + # Fix library itself to refer to itself by relative path + install_name_tool -id "@loader_path/libpq.5.dylib" "$DIST_DIR/lib/libpq.5.dylib" + + # Verify the changes + echo "Verifying library path changes:" + otool -L "$DIST_DIR/bin/$PSQL_BINARY_NAME" | grep libpq + otool -L "$DIST_DIR/bin/$PGBENCH_BINARY_NAME" | grep libpq + otool -L "$DIST_DIR/lib/libpq.5.dylib" | grep libpq + +elif [[ "$PLATFORM" == "linux" ]]; then + # Copy additional .so files if they exist + cp "$ROOT_DIR/src/interfaces/libpq/libpq.so" "$DIST_DIR/lib/" 2>/dev/null || true + + # Set up RPATH for the binaries to find libraries in ../lib + echo "Setting RPATH for $PSQL_BINARY_NAME binary..." + patchelf --set-rpath '$ORIGIN/../lib' "$DIST_DIR/bin/$PSQL_BINARY_NAME" 2>/dev/null || { + echo "Warning: patchelf not available. Installing patchelf..." + if command -v apt-get >/dev/null 2>&1; then + sudo apt-get update && sudo apt-get install -y patchelf + patchelf --set-rpath '$ORIGIN/../lib' "$DIST_DIR/bin/$PSQL_BINARY_NAME" + patchelf --set-rpath '$ORIGIN/../lib' "$DIST_DIR/bin/$PGBENCH_BINARY_NAME" + elif command -v yum >/dev/null 2>&1; then + sudo yum install -y patchelf + patchelf --set-rpath '$ORIGIN/../lib' "$DIST_DIR/bin/$PSQL_BINARY_NAME" + patchelf --set-rpath '$ORIGIN/../lib' "$DIST_DIR/bin/$PGBENCH_BINARY_NAME" + else + echo "Warning: Could not install patchelf. The binary may not find libraries correctly." + fi + } + + echo "Setting RPATH for $PGBENCH_BINARY_NAME binary..." + patchelf --set-rpath '$ORIGIN/../lib' "$DIST_DIR/bin/$PGBENCH_BINARY_NAME" 2>/dev/null || echo "patchelf already handled above" + + # Verify the changes + echo "Verifying RPATH changes:" + ldd "$DIST_DIR/bin/$PSQL_BINARY_NAME" | grep libpq || echo "libpq dependency check complete for pdsql" + ldd "$DIST_DIR/bin/$PGBENCH_BINARY_NAME" | grep libpq || echo "libpq dependency check complete for pgbench" +fi + +# Create a ZIP archive +echo "Creating ZIP archive..." +ZIP_NAME="${DIST_NAME}.zip" +ZIP_PATH="$BUILD_DIR/$ZIP_NAME" +rm -f "$ZIP_PATH" +(cd "$BUILD_DIR" && zip -r "$ZIP_NAME" "$DIST_NAME") + +echo "Package created at $ZIP_PATH" + +# Create platform-specific packages for Linux only +if [[ "$PLATFORM" == "linux" ]]; then + echo "Creating RPM package..." + + # Create RPM build directory structure + RPM_BUILD_DIR="$BUILD_DIR/rpmbuild" + mkdir -p "$RPM_BUILD_DIR"/{BUILD,RPMS,SOURCES,SPECS,SRPMS} + + # Detect architecture for RPM + ARCH=$(uname -m) + case "$ARCH" in + x86_64) + RPM_ARCH="x86_64" + ;; + aarch64) + RPM_ARCH="aarch64" + ;; + arm64) + RPM_ARCH="aarch64" + ;; + *) + RPM_ARCH="$ARCH" + ;; + esac + + # Create spec file + SPEC_FILE="$RPM_BUILD_DIR/SPECS/postgres-dsql.spec" + cat > "$SPEC_FILE" << EOF +Name: postgres-dsql +Version: 1.0.0 +Release: 1%{?dist} +Summary: PostgreSQL DSQL client (pdsql) - AWS DSQL authentication enabled psql +License: PostgreSQL +URL: https://github.com/your-org/postgres-dsql +BuildArch: $RPM_ARCH + +%description +PostgreSQL DSQL client provides pdsql, a PostgreSQL client with AWS DSQL +authentication support. This package installs alongside existing PostgreSQL +installations without conflicts by using different binary and library names. + +%prep +# No prep needed - files are already prepared + +%build +# No build needed - binaries are already built + +%install +mkdir -p %{buildroot}/opt/postgres-dsql/bin +mkdir -p %{buildroot}/opt/postgres-dsql/lib +mkdir -p %{buildroot}/usr/bin + +# Install binaries and libraries to /opt to avoid conflicts +cp %{_sourcedir}/bin/pdsql %{buildroot}/opt/postgres-dsql/bin/ +cp %{_sourcedir}/bin/pgbench %{buildroot}/opt/postgres-dsql/bin/ +cp %{_sourcedir}/lib/* %{buildroot}/opt/postgres-dsql/lib/ + +# Create symlinks in /usr/bin for easy access +ln -s /opt/postgres-dsql/bin/pdsql %{buildroot}/usr/bin/pdsql +ln -s /opt/postgres-dsql/bin/pgbench %{buildroot}/usr/bin/pgbench + +%files +/opt/postgres-dsql/bin/pdsql +/opt/postgres-dsql/bin/pgbench +/opt/postgres-dsql/lib/* +/usr/bin/pdsql +/usr/bin/pgbench + +%post +echo "PostgreSQL DSQL client installed successfully!" +echo "Use 'pdsql' command to connect to AWS DSQL databases." +echo "Example: pdsql --host=your-dsql-endpoint.example.com --user=admin --dbname=postgres" +echo "" +echo "Use 'pgbench' command to run PostgreSQL benchmarks against DSQL databases." +echo "Example: pgbench --dsql --host=your-dsql-endpoint.example.com --user=admin --dbname=postgres --initialize --scale=1" + +%changelog +* $(date +'%a %b %d %Y') Build System - 1.0.0-1 +- Initial RPM package for PostgreSQL DSQL client +EOF + + # Copy files to SOURCES directory with the structure expected by the spec + mkdir -p "$RPM_BUILD_DIR/SOURCES/bin" + mkdir -p "$RPM_BUILD_DIR/SOURCES/lib" + cp "$DIST_DIR/bin/pdsql" "$RPM_BUILD_DIR/SOURCES/bin/" + cp "$DIST_DIR/bin/pgbench" "$RPM_BUILD_DIR/SOURCES/bin/" + cp "$DIST_DIR/lib"/* "$RPM_BUILD_DIR/SOURCES/lib/" + + # Build the RPM + echo "Building RPM package..." + if command -v rpmbuild >/dev/null 2>&1; then + rpmbuild --define "_topdir $RPM_BUILD_DIR" -bb "$SPEC_FILE" + + # Find and copy the generated RPM + RPM_FILE=$(find "$RPM_BUILD_DIR/RPMS" -name "*.rpm" | head -1) + if [ -n "$RPM_FILE" ]; then + cp "$RPM_FILE" "$BUILD_DIR/" + RPM_NAME=$(basename "$RPM_FILE") + echo "RPM package created at $BUILD_DIR/$RPM_NAME" + else + echo "Warning: RPM file not found after build" + fi + else + echo "Warning: rpmbuild not available. Installing rpm-build..." + if command -v yum >/dev/null 2>&1; then + sudo yum install -y rpm-build + elif command -v dnf >/dev/null 2>&1; then + sudo dnf install -y rpm-build + elif command -v apt-get >/dev/null 2>&1; then + sudo apt-get update && sudo apt-get install -y rpm + else + echo "Error: Could not install rpm-build. RPM package not created." + echo "Please install rpm-build manually and re-run this script." + fi + + # Retry RPM build if rpmbuild is now available + if command -v rpmbuild >/dev/null 2>&1; then + rpmbuild --define "_topdir $RPM_BUILD_DIR" -bb "$SPEC_FILE" + RPM_FILE=$(find "$RPM_BUILD_DIR/RPMS" -name "*.rpm" | head -1) + if [ -n "$RPM_FILE" ]; then + cp "$RPM_FILE" "$BUILD_DIR/" + RPM_NAME=$(basename "$RPM_FILE") + echo "RPM package created at $BUILD_DIR/$RPM_NAME" + fi + fi + fi + + echo "" + echo "RPM Installation Instructions:" + echo " sudo rpm -ivh $RPM_NAME" + echo " # Or to upgrade: sudo rpm -Uvh $RPM_NAME" + echo "" + echo "RPM Removal Instructions:" + echo " sudo rpm -e postgres-dsql" + echo "" + + # Create DEB package + echo "Creating DEB package..." + + # Detect architecture for DEB + case "$ARCH" in + x86_64) + DEB_ARCH="amd64" + ;; + aarch64) + DEB_ARCH="arm64" + ;; + arm64) + DEB_ARCH="arm64" + ;; + *) + DEB_ARCH="$ARCH" + ;; + esac + + # Create DEB build directory structure + DEB_BUILD_DIR="$BUILD_DIR/debbuild" + DEB_PKG_DIR="$DEB_BUILD_DIR/postgres-dsql_1.0.0-1_$DEB_ARCH" + mkdir -p "$DEB_PKG_DIR"/{DEBIAN,opt/postgres-dsql/{bin,lib},usr/bin} + + # Copy files + cp "$DIST_DIR/bin/pdsql" "$DEB_PKG_DIR/opt/postgres-dsql/bin/" + cp "$DIST_DIR/bin/pgbench" "$DEB_PKG_DIR/opt/postgres-dsql/bin/" + cp "$DIST_DIR/lib"/* "$DEB_PKG_DIR/opt/postgres-dsql/lib/" + + # Create symlinks + ln -s /opt/postgres-dsql/bin/pdsql "$DEB_PKG_DIR/usr/bin/pdsql" + ln -s /opt/postgres-dsql/bin/pgbench "$DEB_PKG_DIR/usr/bin/pgbench" + + # Create control file + cat > "$DEB_PKG_DIR/DEBIAN/control" << EOF +Package: postgres-dsql +Version: 1.0.0-1 +Section: database +Priority: optional +Architecture: $DEB_ARCH +Maintainer: Build System +Description: PostgreSQL DSQL client (pdsql) - AWS DSQL authentication enabled psql + PostgreSQL DSQL client provides pdsql, a PostgreSQL client with AWS DSQL + authentication support. This package installs alongside existing PostgreSQL + installations without conflicts by using different binary and library names. +Homepage: https://github.com/your-org/postgres-dsql +EOF + + # Create postinst script + cat > "$DEB_PKG_DIR/DEBIAN/postinst" << 'EOF' +#!/bin/bash +echo "PostgreSQL DSQL client installed successfully!" +echo "Use 'pdsql' command to connect to AWS DSQL databases." +echo "Example: pdsql --host=your-dsql-endpoint.example.com --user=admin --dbname=postgres" +echo "" +echo "Use 'pgbench' command to run PostgreSQL benchmarks against DSQL databases." +echo "Example: pgbench --dsql --host=your-dsql-endpoint.example.com --user=admin --dbname=postgres --initialize --scale=1" +EOF + chmod 755 "$DEB_PKG_DIR/DEBIAN/postinst" + + # Build the DEB package + echo "Building DEB package..." + if command -v dpkg-deb >/dev/null 2>&1; then + dpkg-deb --build "$DEB_PKG_DIR" + DEB_FILE="$DEB_PKG_DIR.deb" + if [ -f "$DEB_FILE" ]; then + mv "$DEB_FILE" "$BUILD_DIR/" + DEB_NAME=$(basename "$DEB_FILE") + echo "DEB package created at $BUILD_DIR/$DEB_NAME" + else + echo "Warning: DEB file not found after build" + fi + else + echo "Warning: dpkg-deb not available. DEB package not created." + fi + + echo "" + echo "DEB Installation Instructions:" + echo " sudo dpkg -i $DEB_NAME" + echo " # Or: sudo apt install ./$DEB_NAME" + echo "" + echo "DEB Removal Instructions:" + echo " sudo apt remove postgres-dsql" + echo "" +fi + +echo "Done!" + +# For testing, you can: +# unzip -o build/postgres-dsql.zip -d /tmp +# /tmp/postgres-dsql/bin/pdsql --version +# +# On Linux, you may also need to ensure the library path is set: +# LD_LIBRARY_PATH=/tmp/postgres-dsql/lib /tmp/postgres-dsql/bin/pdsql --version +# +# For RPM testing: +# sudo rpm -ivh build/postgres-dsql-*.rpm +# pdsql --version diff --git a/scripts/test-packaging.sh b/scripts/test-packaging.sh new file mode 100755 index 0000000000000..92dd60a0bd126 --- /dev/null +++ b/scripts/test-packaging.sh @@ -0,0 +1,53 @@ +#!/bin/bash +set -e + +# Test script to verify local packaging +echo "Testing local packaging workflow" +echo "===============================" + +# Check if we should skip building and packaging +SKIP_BUILD_PACKAGE=0 +if [ "$1" == "--test-only" ]; then + SKIP_BUILD_PACKAGE=1 +fi + +if [ $SKIP_BUILD_PACKAGE -eq 0 ]; then + # Build DSQL if needed + if [ ! -f "src/bin/psql/psql" ]; then + echo "Building DSQL first..." + ./scripts/build-dsql.sh + else + echo "DSQL already built, skipping build step" + fi + + # Package the build + echo "Packaging DSQL..." + ./scripts/package.sh +else + echo "Skipping build and package steps, testing only..." +fi + +# Test the packaged binary +echo "Testing packaged binary..." +if [ -f "build/postgres-dsql.zip" ]; then + # Clean any previous test + rm -rf /tmp/postgres-dsql + + # Extract and test + unzip -o build/postgres-dsql.zip -d /tmp + + echo "Running pdsql --version to verify:" + /tmp/postgres-dsql/bin/pdsql --version + + if [ $? -eq 0 ]; then + echo "✅ Test passed! Package is working correctly." + else + echo "❌ Test failed! Please check the package." + exit 1 + fi +else + echo "❌ Package file not found at build/postgres-dsql.zip!" + exit 1 +fi + +echo "All tests completed successfully." diff --git a/src/bin/pgbench/pgbench.c b/src/bin/pgbench/pgbench.c index 497a936c141f3..49adc6dfcc1fe 100644 --- a/src/bin/pgbench/pgbench.c +++ b/src/bin/pgbench/pgbench.c @@ -68,6 +68,7 @@ #include "pgbench.h" #include "port/pg_bitutils.h" #include "portability/instr_time.h" +#include "fe-dsql-auth.h" /* X/Open (XSI) requires to provide M_PI, but core POSIX does not */ #ifndef M_PI @@ -771,6 +772,8 @@ static bool verbose_errors = false; /* print verbose messages of all errors */ static bool exit_on_abort = false; /* exit when any client is aborted */ +static bool dsql = false; /* --dsql command line option */ + /* Builtin test scripts */ typedef struct BuiltinScript { @@ -849,6 +852,10 @@ static const PsqlScanCallbacks pgbench_callbacks = { NULL, /* don't need get_variable functionality */ }; +static bool is_dsql() { + return strcmp(getenv("PGDSQL"), "1") == 0; +} + static char get_table_relkind(PGconn *con, const char *table) { @@ -4852,26 +4859,26 @@ initCreateTables(PGconn *con) static const struct ddlinfo DDLs[] = { { "pgbench_history", - "tid int,bid int,aid int,delta int,mtime timestamp,filler char(22)", - "tid int,bid int,aid bigint,delta int,mtime timestamp,filler char(22)", + "id uuid primary key default gen_random_uuid(),tid int,bid int,aid int,delta int,mtime timestamp,filler char(22)", + "id uuid primary key default gen_random_uuid(),tid int,bid int,aid bigint,delta int,mtime timestamp,filler char(22)", 0 }, { "pgbench_tellers", - "tid int not null,bid int,tbalance int,filler char(84)", - "tid int not null,bid int,tbalance int,filler char(84)", + "tid int primary key,bid int,tbalance int,filler char(84)", + "tid int primary key,bid int,tbalance int,filler char(84)", 1 }, { "pgbench_accounts", - "aid int not null,bid int,abalance int,filler char(84)", - "aid bigint not null,bid int,abalance int,filler char(84)", + "aid int primary key,bid int,abalance int,filler char(84)", + "aid bigint primary key,bid int,abalance int,filler char(84)", 1 }, { "pgbench_branches", - "bid int not null,bbalance int,filler char(88)", - "bid int not null,bbalance int,filler char(88)", + "bid int primary key,bbalance int,filler char(88)", + "bid int primary key,bbalance int,filler char(88)", 1 } }; @@ -4892,14 +4899,16 @@ initCreateTables(PGconn *con) ddl->table, (scale >= SCALE_32BIT_THRESHOLD) ? ddl->bigcols : ddl->smcols); - /* Partition pgbench_accounts table */ - if (partition_method != PART_NONE && strcmp(ddl->table, "pgbench_accounts") == 0) - appendPQExpBuffer(&query, - " partition by %s (aid)", PARTITION_METHOD[partition_method]); - else if (ddl->declare_fillfactor) - { - /* fillfactor is only expected on actual tables */ - appendPQExpBuffer(&query, " with (fillfactor=%d)", fillfactor); + if (!is_dsql()) { + /* Partition pgbench_accounts table */ + if (partition_method != PART_NONE && strcmp(ddl->table, "pgbench_accounts") == 0) + appendPQExpBuffer(&query, + " partition by %s (aid)", PARTITION_METHOD[partition_method]); + else if (ddl->declare_fillfactor) + { + /* fillfactor is only expected on actual tables */ + appendPQExpBuffer(&query, " with (fillfactor=%d)", fillfactor); + } } if (tablespace != NULL) @@ -4926,11 +4935,17 @@ initCreateTables(PGconn *con) static void initTruncateTables(PGconn *con) { - executeStatement(con, "truncate table " + if (!is_dsql()) { + executeStatement(con, "truncate table " "pgbench_accounts, " "pgbench_branches, " "pgbench_history, " "pgbench_tellers"); + } else { + // TODO: DSQL mode needs to do deletes in batches. + // This is not implemented because it's relatively easy to simply + // drop all tables and re-init with -i. + } } static void @@ -4984,8 +4999,9 @@ initPopulateTable(PGconn *con, const char *table, int64 base, initPQExpBuffer(&sql); /* Use COPY with FREEZE on v14 and later for all ordinary tables */ + // XXX: DSQL doesn't support (or need) freeze. if ((PQserverVersion(con) >= 140000) && - get_table_relkind(con, table) == RELKIND_RELATION) + get_table_relkind(con, table) == RELKIND_RELATION && !is_dsql()) copy_statement_fmt = "copy %s from stdin with (freeze on)"; @@ -4995,16 +5011,22 @@ initPopulateTable(PGconn *con, const char *table, int64 base, else if (n == -1) pg_fatal("invalid format string"); - res = PQexec(con, copy_statement); - - if (PQresultStatus(res) != PGRES_COPY_IN) - pg_fatal("unexpected copy in result: %s", PQerrorMessage(con)); - PQclear(res); - start = pg_time_now(); for (k = 0; k < total; k++) { + if (k == 0) { + if (is_dsql()) { + executeStatement(con, "begin"); + } + + res = PQexec(con, copy_statement); + + if (PQresultStatus(res) != PGRES_COPY_IN) + pg_fatal("unexpected copy in result: %s", PQerrorMessage(con)); + PQclear(res); + } + int64 j = k + 1; init_row(&sql, k); @@ -5014,6 +5036,21 @@ initPopulateTable(PGconn *con, const char *table, int64 base, if (CancelRequested) break; + // XXX: Start a new transaction every 1000 rows + if (is_dsql() && (k > 0 && k % 1000 == 0)) { + if (PQputline(con, "\\.\n")) + pg_fatal("very last PQputline failed"); + if (PQendcopy(con)) + pg_fatal("PQendcopy failed"); + + executeStatement(con, "commit"); + executeStatement(con, "begin"); + res = PQexec(con, copy_statement); + if (PQresultStatus(res) != PGRES_COPY_IN) + pg_fatal("unexpected copy in result: %s", PQerrorMessage(con)); + PQclear(res); + } + /* * If we want to stick with the original logging, print a message each * 100k inserted rows. @@ -5076,6 +5113,10 @@ initPopulateTable(PGconn *con, const char *table, int64 base, if (PQendcopy(con)) pg_fatal("PQendcopy failed"); + if (is_dsql()) { + executeStatement(con, "commit"); + } + termPQExpBuffer(&sql); } @@ -5090,11 +5131,14 @@ initGenerateDataClientSide(PGconn *con) { fprintf(stderr, "generating data (client-side)...\n"); + // XXX: On DSQL we do batch inserts within populate. + if (!is_dsql()) { /* * we do all of this in one transaction to enable the backend's * data-loading optimizations */ executeStatement(con, "begin"); + } /* truncate away any old data */ initTruncateTables(con); @@ -5107,7 +5151,9 @@ initGenerateDataClientSide(PGconn *con) initPopulateTable(con, "pgbench_tellers", ntellers, initTeller); initPopulateTable(con, "pgbench_accounts", naccounts, initAccount); + if (!is_dsql()) { executeStatement(con, "commit"); + } } /* @@ -5178,6 +5224,11 @@ initVacuum(PGconn *con) static void initCreatePKeys(PGconn *con) { + // Schema has been updated to always have PKs. + if (is_dsql()) { + return; + } + static const char *const DDLINDEXes[] = { "alter table pgbench_branches add primary key (bid)", "alter table pgbench_tellers add primary key (tid)", @@ -6705,6 +6756,7 @@ main(int argc, char **argv) {"verbose-errors", no_argument, NULL, 15}, {"exit-on-abort", no_argument, NULL, 16}, {"debug", no_argument, NULL, 17}, + {"dsql", no_argument, NULL, 18}, {NULL, 0, NULL, 0} }; @@ -7058,6 +7110,9 @@ main(int argc, char **argv) case 17: /* debug */ pg_logging_increase_verbosity(); break; + case 18: /* dsql */ + dsql = true; + break; default: /* getopt_long already emitted a complaint */ pg_log_error_hint("Try \"%s --help\" for more information.", progname); @@ -7065,6 +7120,36 @@ main(int argc, char **argv) } } + if (dsql) + { + setenv("PGDSQL", "1", 1); + is_no_vacuum = true; + foreign_keys = false; + + /* Initialize DSQL token generator */ + if (dsql_initialize_token_generator() != 0) + { + pg_fatal("Failed to initialize DSQL token generator"); + } + + /* Validate AWS credentials */ + { + char *err_msg = NULL; + if (dsql_validate_aws_credentials(&err_msg) != 0) + { + if (err_msg) + { + pg_fatal("DSQL credential validation failed: %s", err_msg); + free(err_msg); + } + else + { + pg_fatal("DSQL credential validation failed"); + } + } + } + } + /* set default script if none */ if (num_scripts == 0 && !is_init_mode) { diff --git a/src/bin/psql/command.c b/src/bin/psql/command.c index 83e84a778411a..6b2886544b798 100644 --- a/src/bin/psql/command.c +++ b/src/bin/psql/command.c @@ -4038,6 +4038,11 @@ do_connect(enum trivalue reuse_previous_specification, success = false; } + /* When in DSQL mode, we never reuse passwords (as tokens may have expired). */ + if (pset.dsql) { + keep_password = false; + } + /* * If the user asked to be prompted for a password, ask for one now. If * not, use the password from the old connection, provided the username @@ -4048,7 +4053,7 @@ do_connect(enum trivalue reuse_previous_specification, * the postmaster's log. But libpq offers no API that would let us obtain * a password and then continue with the first connection attempt. */ - if (pset.getPassword == TRI_YES && success) + if (pset.getPassword == TRI_YES && success && !pset.dsql) { bool canceled = false; diff --git a/src/bin/psql/common.h b/src/bin/psql/common.h index 7f1a23de1e82d..d587604a7bcd3 100644 --- a/src/bin/psql/common.h +++ b/src/bin/psql/common.h @@ -14,7 +14,6 @@ #include "fe_utils/print.h" #include "fe_utils/psqlscan.h" #include "libpq-fe.h" - extern bool openQueryOutputFile(const char *fname, FILE **fout, bool *is_pipe); extern bool setQFout(const char *fname); diff --git a/src/bin/psql/help.c b/src/bin/psql/help.c index db6adec8b692b..e43f90982137e 100644 --- a/src/bin/psql/help.c +++ b/src/bin/psql/help.c @@ -57,9 +57,9 @@ usage(unsigned short int pager) */ initPQExpBuffer(&buf); - HELP0("psql is the PostgreSQL interactive terminal.\n\n"); + HELP0("psql (with DSQL support) is the PostgreSQL interactive terminal.\n\n"); HELP0("Usage:\n"); - HELP0(" psql [OPTION]... [DBNAME [USERNAME]]\n\n"); + HELP0(" pdsql [OPTION]... [DBNAME [USERNAME]]\n\n"); HELP0("General options:\n"); HELP0(" -c, --command=COMMAND run only single command (SQL or internal) and exit\n"); @@ -113,6 +113,7 @@ usage(unsigned short int pager) HELP0(" -U, --username=USERNAME database user name\n"); HELP0(" -w, --no-password never prompt for password\n"); HELP0(" -W, --password force password prompt (should happen automatically)\n"); + HELP0(" --dsql enable Aurora DSQL mode\n"); HELP0("\nFor more information, type \"\\?\" (for internal commands) or \"\\help\" (for SQL\n" "commands) from within psql, or consult the psql section in the PostgreSQL\n" diff --git a/src/bin/psql/settings.h b/src/bin/psql/settings.h index fd82303f776c4..75997b5f068f9 100644 --- a/src/bin/psql/settings.h +++ b/src/bin/psql/settings.h @@ -169,6 +169,7 @@ typedef struct _psqlSettings bool singlestep; bool hide_compression; bool hide_tableam; + bool dsql; /* --dsql command line option */ int fetch_count; int histsize; int ignoreeof; diff --git a/src/bin/psql/startup.c b/src/bin/psql/startup.c index 249b6aa516902..abdcb57abf589 100644 --- a/src/bin/psql/startup.c +++ b/src/bin/psql/startup.c @@ -6,6 +6,7 @@ * src/bin/psql/startup.c */ #include "postgres_fe.h" +#include "libpq-fe.h" #ifndef WIN32 #include @@ -26,6 +27,8 @@ #include "mainloop.h" #include "settings.h" +#include "fe-dsql-auth.h" + /* * Global psql options */ @@ -186,6 +189,7 @@ main(int argc, char *argv[]) pset.notty = (!isatty(fileno(stdin)) || !isatty(fileno(stdout))); pset.getPassword = TRI_DEFAULT; + pset.dsql = false; /* Initialize dsql flag to false */ EstablishVariableSpace(); @@ -210,7 +214,40 @@ main(int argc, char *argv[]) SetVariable(pset.vars, "PIPELINE_COMMAND_COUNT", "0"); SetVariable(pset.vars, "PIPELINE_RESULT_COUNT", "0"); + /* Automatically enable dsql mode if the program is named "pdsql" */ + if (strcmp(pset.progname, "pdsql") == 0) + pset.dsql = true; + parse_psql_options(argc, argv, &options); + + if (pset.dsql) + { + setenv("PGDSQL", "1", 1); + pset.getPassword = TRI_NO; + + /* Initialize DSQL token generator */ + if (dsql_initialize_token_generator() != 0) + { + pg_fatal("Failed to initialize DSQL token generator"); + } + + /* Validate AWS credentials */ + { + char *err_msg = NULL; + if (dsql_validate_aws_credentials(&err_msg) != 0) + { + if (err_msg) + { + pg_fatal("DSQL credential validation failed: %s", err_msg); + free(err_msg); + } + else + { + pg_fatal("DSQL credential validation failed"); + } + } + } + } /* * If no action was specified and we're in non-interactive mode, treat it @@ -274,6 +311,7 @@ main(int argc, char *argv[]) values[7] = NULL; new_pass = false; + pset.db = PQconnectdbParams(keywords, values, true); free(keywords); free(values); @@ -525,6 +563,7 @@ parse_psql_options(int argc, char *argv[], struct adhoc_opts *options) {"no-psqlrc", no_argument, NULL, 'X'}, {"help", optional_argument, NULL, 1}, {"csv", no_argument, NULL, 2}, + {"dsql", no_argument, NULL, 3}, {NULL, 0, NULL, 0} }; @@ -718,6 +757,9 @@ parse_psql_options(int argc, char *argv[], struct adhoc_opts *options) case 2: pset.popt.topt.format = PRINT_CSV; break; + case 3: + pset.dsql = true; + break; default: unknown_option: /* getopt_long already emitted a complaint */ diff --git a/src/interfaces/libpq/Makefile b/src/interfaces/libpq/Makefile index 853aab4b1b886..883aaa78da118 100644 --- a/src/interfaces/libpq/Makefile +++ b/src/interfaces/libpq/Makefile @@ -44,7 +44,8 @@ OBJS = \ legacy-pqsignal.o \ libpq-events.o \ pqexpbuffer.o \ - fe-auth.o + fe-auth.o \ + fe-dsql-auth.o # File shared across all SSL implementations supported. ifneq ($(with_ssl),no) @@ -79,6 +80,35 @@ OBJS += \ win32.o endif +AWS_DSQL_AUTH_CPPFLAGS := $(addprefix -I,$(shell find $(top_srcdir)/aws-dsql-auth/build/install -type d -name include)) +AWS_DSQL_AUTH_ALL_LIBS := $(shell find $(top_srcdir)/aws-dsql-auth/build/install/ -type f -name '*.a') + +# Custom rule for fe-dsql-auth.o that hides all symbols except the public API +fe-dsql-auth.o: fe-dsql-auth.c + $(CC) $(CFLAGS) $(CPPFLAGS) $(AWS_DSQL_AUTH_CPPFLAGS) -c fe-dsql-auth.c -o fe-dsql-auth-temp.o +ifeq ($(PORTNAME), linux) + # Link with AWS libraries using start-group to resolve dependencies + $(LD) -r -o fe-dsql-auth-with-aws.o fe-dsql-auth-temp.o --start-group $(AWS_DSQL_AUTH_ALL_LIBS) --end-group + # Create a list of symbols to keep (only the public API from fe-dsql-auth.h) + echo "dsql_initialize_token_generator" > keep-symbols.txt + echo "dsql_generate_token" >> keep-symbols.txt + echo "dsql_validate_aws_credentials" >> keep-symbols.txt + echo "dsql_cleanup" >> keep-symbols.txt + # Hide all symbols except the ones we want to keep + objcopy --keep-global-symbols=keep-symbols.txt fe-dsql-auth-with-aws.o $@ + rm -f fe-dsql-auth-temp.o fe-dsql-auth-with-aws.o keep-symbols.txt +else ifeq ($(PORTNAME), darwin) + # macOS: Use ld with exported symbols list + echo "_dsql_initialize_token_generator" > exported-symbols.txt + echo "_dsql_generate_token" >> exported-symbols.txt + echo "_dsql_validate_aws_credentials" >> exported-symbols.txt + echo "_dsql_cleanup" >> exported-symbols.txt + $(LD) -r -o $@ fe-dsql-auth-temp.o -exported_symbols_list exported-symbols.txt $(AWS_DSQL_AUTH_ALL_LIBS) + rm -f fe-dsql-auth-temp.o exported-symbols.txt +else + $(LD) -r -o $@ fe-dsql-auth-temp.o --whole-archive $(AWS_DSQL_AUTH_ALL_LIBS) + rm -f fe-dsql-auth-temp.o +endif # Add libraries that libpq depends (or might depend) on into the # shared library link. (The order in which you list them here doesn't @@ -88,6 +118,11 @@ endif SHLIB_LINK_INTERNAL = -lpgcommon_shlib -lpgport_shlib ifneq ($(PORTNAME), win32) SHLIB_LINK += $(filter -lcrypt -ldes -lcom_err -lcrypto -lk5crypto -lkrb5 -lgssapi_krb5 -lgss -lgssapi -lssl -lsocket -lnsl -lresolv -lintl -lm, $(LIBS)) $(LDAP_LIBS_FE) $(PTHREAD_LIBS) + +# Add macOS-specific frameworks +ifeq ($(PORTNAME),darwin) +SHLIB_LINK += -framework CoreFoundation -framework Security -framework Network +endif else SHLIB_LINK += $(filter -lcrypt -ldes -lcom_err -lcrypto -lk5crypto -lkrb5 -lgssapi32 -lssl -lsocket -lnsl -lresolv -lintl -lm $(PTHREAD_LIBS), $(LIBS)) $(LDAP_LIBS_FE) endif @@ -144,7 +179,7 @@ $(stlib): $(OBJS_STATIC) libpq-refs-stamp: $(shlib) ifneq ($(enable_coverage), yes) ifeq (,$(filter solaris,$(PORTNAME))) - @if nm -A -u $< 2>/dev/null | grep -v -e __cxa_atexit -e __tsan_func_exit | grep exit; then \ + @if nm -A -u $< 2>/dev/null | grep -v -e __cxa_atexit -e __tsan_func_exit | grep -v s2n_disable_atexit | grep exit; then \ echo 'libpq must not be calling any function which invokes exit'; exit 1; \ fi endif @@ -168,6 +203,7 @@ install: all installdirs install-lib $(INSTALL_DATA) $(srcdir)/libpq-events.h '$(DESTDIR)$(includedir)' $(INSTALL_DATA) $(srcdir)/libpq-int.h '$(DESTDIR)$(includedir_internal)' $(INSTALL_DATA) $(srcdir)/fe-auth-sasl.h '$(DESTDIR)$(includedir_internal)' + $(INSTALL_DATA) $(srcdir)/fe-dsql-auth.h '$(DESTDIR)$(includedir_internal)' $(INSTALL_DATA) $(srcdir)/pqexpbuffer.h '$(DESTDIR)$(includedir_internal)' $(INSTALL_DATA) $(srcdir)/pg_service.conf.sample '$(DESTDIR)$(datadir)/pg_service.conf.sample' @@ -190,12 +226,13 @@ uninstall: uninstall-lib rm -f '$(DESTDIR)$(includedir)/libpq-events.h' rm -f '$(DESTDIR)$(includedir_internal)/libpq-int.h' rm -f '$(DESTDIR)$(includedir_internal)/fe-auth-sasl.h' + rm -f '$(DESTDIR)$(includedir_internal)/fe-dsql-auth.h' rm -f '$(DESTDIR)$(includedir_internal)/pqexpbuffer.h' rm -f '$(DESTDIR)$(datadir)/pg_service.conf.sample' clean distclean: clean-lib $(MAKE) -C test $@ rm -rf tmp_check - rm -f $(OBJS) $(OBJS_SHLIB) $(OBJS_STATIC) pthread.h libpq-refs-stamp + rm -f $(OBJS) $(OBJS_SHLIB) $(OBJS_STATIC) pthread.h libpq-refs-stamp fe-dsql-auth-temp.o # Might be left over from a Win32 client-only build rm -f pg_config_paths.h diff --git a/src/interfaces/libpq/exports.txt b/src/interfaces/libpq/exports.txt index 0625cf39e9af3..7ccf0b80cbd03 100644 --- a/src/interfaces/libpq/exports.txt +++ b/src/interfaces/libpq/exports.txt @@ -211,3 +211,7 @@ PQgetAuthDataHook 208 PQdefaultAuthDataHook 209 PQfullProtocolVersion 210 appendPQExpBufferVA 211 +dsql_initialize_token_generator 212 +dsql_generate_token 213 +dsql_validate_aws_credentials 214 +dsql_cleanup 215 diff --git a/src/interfaces/libpq/fe-connect.c b/src/interfaces/libpq/fe-connect.c index ccb01aad36109..55cd08889ca0e 100644 --- a/src/interfaces/libpq/fe-connect.c +++ b/src/interfaces/libpq/fe-connect.c @@ -23,10 +23,12 @@ #include #include "common/base64.h" +#include "common/logging.h" #include "common/ip.h" #include "common/link-canary.h" #include "common/scram-common.h" #include "common/string.h" +#include "fe-dsql-auth.h" #include "fe-auth.h" #include "fe-auth-oauth.h" #include "libpq-fe.h" @@ -221,7 +223,7 @@ static const internalPQconninfoOption PQconninfoOptions[] = { "Connect-timeout", "", 10, /* strlen(INT32_MAX) == 10 */ offsetof(struct pg_conn, connect_timeout)}, - {"dbname", "PGDATABASE", NULL, NULL, + {"dbname", "PGDATABASE", "postgres", NULL, "Database-Name", "", 20, offsetof(struct pg_conn, dbName)}, @@ -279,11 +281,11 @@ static const internalPQconninfoOption PQconninfoOptions[] = { * parameters have no effect on non-SSL connections, so there is no reason * to exclude them since none of them are mandatory. */ - {"sslmode", "PGSSLMODE", DefaultSSLMode, NULL, + {"sslmode", "PGSSLMODE", "verify-full", NULL, "SSL-Mode", "", 12, /* sizeof("verify-full") == 12 */ offsetof(struct pg_conn, sslmode)}, - {"sslnegotiation", "PGSSLNEGOTIATION", DefaultSSLNegotiation, NULL, + {"sslnegotiation", "PGSSLNEGOTIATION", "direct", NULL, "SSL-Negotiation", "", 9, /* sizeof("postgres") == 9 */ offsetof(struct pg_conn, sslnegotiation)}, @@ -307,7 +309,7 @@ static const internalPQconninfoOption PQconninfoOptions[] = { "SSL-Client-Key-Password", "*", 20, offsetof(struct pg_conn, sslpassword)}, - {"sslrootcert", "PGSSLROOTCERT", NULL, NULL, + {"sslrootcert", "PGSSLROOTCERT", "system", NULL, "SSL-Root-Certificate", "", 64, offsetof(struct pg_conn, sslrootcert)}, @@ -879,6 +881,12 @@ PQconnectStartParams(const char *const *keywords, if (conn == NULL) return NULL; + if (strcmp(getenv("PGDSQL"), "1") == 0) { + conn->is_dsql = true; + conn->sslmode = strdup("direct"); + conn->auth_required = true; + } + /* * Parse the conninfo arrays */ @@ -1418,11 +1426,39 @@ pqConnectOptions2(PGconn *conn) goto oom_error; } + /* + * In DSQL mode, generate auth tokens. + */ + if (conn->is_dsql) + { + for (i = 0; i < conn->nconnhost; i++) + { + bool is_admin; + char *token; + char *err_msg = NULL; + const char *pwhost = conn->connhost[i].host; + if (pwhost == NULL || pwhost[0] == '\0') + pwhost = conn->connhost[i].hostaddr; + + is_admin = strcmp("admin", conn->pguser) == 0; + token = dsql_generate_token(pwhost, is_admin, &err_msg); + if (!token) + { + libpq_append_conn_error(conn, "DSQL token generation failed for host=%s: %s", + pwhost, err_msg ? err_msg : "unknown error"); + if (err_msg) + free(err_msg); + } + else + conn->connhost[i].password = token; + } + } + /* * If password was not given, try to look it up in password file. Note * that the result might be different for each host/port pair. */ - if (conn->pgpass == NULL || conn->pgpass[0] == '\0') + if ((conn->pgpass == NULL || conn->pgpass[0] == '\0') && !conn->is_dsql) { /* If password file wasn't specified, use ~/PGPASSFILE */ if (conn->pgpassfile == NULL || conn->pgpassfile[0] == '\0') diff --git a/src/interfaces/libpq/fe-dsql-auth.c b/src/interfaces/libpq/fe-dsql-auth.c new file mode 100644 index 0000000000000..6163302a38cab --- /dev/null +++ b/src/interfaces/libpq/fe-dsql-auth.c @@ -0,0 +1,486 @@ +/* + * fe-dsql-auth.c + * + * Support for AWS DSQL authentication token generation + * + * Copyright (c) 2025 PostgreSQL Global Development Group + */ +#include "postgres_fe.h" + +#include "fe-dsql-auth.h" +#include "libpq-int.h" + +#include +#include +#include +#include + +/* Include AWS DSQL Auth library functions */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static bool aws_libs_initialized = false; +static struct aws_logger dsql_logger; +static bool dsql_logger_initialized = false; + +/* HTTP infrastructure for IMDS */ +static struct aws_event_loop_group *s_el_group = NULL; +static struct aws_host_resolver *s_host_resolver = NULL; +static struct aws_client_bootstrap *s_client_bootstrap = NULL; + +/* + * DSQL Token Generator - holds state for efficient token generation + */ +struct dsql_token_generator { + struct aws_allocator *allocator; + struct aws_credentials_provider *credentials_provider; +}; + +/* Global token generator instance */ +static struct dsql_token_generator s_token_generator = {0}; + +/* + * Initialize DSQL logging + */ +static void +initialize_dsql_logging(void) +{ + if (!dsql_logger_initialized) + { + struct aws_allocator *allocator = aws_default_allocator(); + struct aws_logger_standard_options logger_options = { + .level = AWS_LOG_LEVEL_NONE, /* Can be controlled by environment variable */ + .file = stderr /* Log to stderr by default */ + }; + + /* Check for AWS_LOG_LEVEL environment variable */ + const char *log_level_str = getenv("AWS_LOG_LEVEL"); + if (log_level_str != NULL) + { + enum aws_log_level level; + if (aws_string_to_log_level(log_level_str, &level) == AWS_OP_SUCCESS) + { + logger_options.level = level; + } + } + + /* Check for AWS_LOG_FILE environment variable for file output */ + const char *log_file_str = getenv("AWS_LOG_FILE"); + if (log_file_str != NULL && strlen(log_file_str) > 0) + { + if (strcmp(log_file_str, "stdout") == 0) + { + logger_options.file = stdout; + } + else if (strcmp(log_file_str, "stderr") == 0) + { + logger_options.file = stderr; + } + else + { + /* Use as a filename */ + logger_options.filename = log_file_str; + logger_options.file = NULL; + } + } + + if (aws_logger_init_standard(&dsql_logger, allocator, &logger_options) == AWS_OP_SUCCESS) + { + aws_logger_set(&dsql_logger); + dsql_logger_initialized = true; + } + } +} + +/* + * Initialize AWS libraries if not already initialized + */ +static void +initialize_aws_libs(void) +{ + if (!aws_libs_initialized) + { + struct aws_allocator *allocator; + struct aws_host_resolver_default_options resolver_options; + struct aws_client_bootstrap_options bootstrap_options; + + allocator = aws_default_allocator(); + aws_common_library_init(allocator); + aws_io_library_init(allocator); + aws_http_library_init(allocator); + aws_auth_library_init(allocator); + aws_sdkutils_library_init(allocator); + + /* Initialize HTTP infrastructure for IMDS */ + AWS_LOGF_DEBUG(AWS_LS_AUTH_GENERAL, "Initializing HTTP infrastructure for IMDS"); + + s_el_group = aws_event_loop_group_new_default(allocator, 1, NULL); + if (!s_el_group) { + AWS_LOGF_ERROR(AWS_LS_AUTH_GENERAL, "Failed to create event loop group"); + goto error; + } + + AWS_ZERO_STRUCT(resolver_options); + resolver_options.el_group = s_el_group; + resolver_options.max_entries = 8; + s_host_resolver = aws_host_resolver_new_default(allocator, &resolver_options); + if (!s_host_resolver) { + AWS_LOGF_ERROR(AWS_LS_AUTH_GENERAL, "Failed to create host resolver"); + goto error; + } + + AWS_ZERO_STRUCT(bootstrap_options); + bootstrap_options.event_loop_group = s_el_group; + bootstrap_options.host_resolver = s_host_resolver; + s_client_bootstrap = aws_client_bootstrap_new(allocator, &bootstrap_options); + if (!s_client_bootstrap) { + AWS_LOGF_ERROR(AWS_LS_AUTH_GENERAL, "Failed to create client bootstrap"); + goto error; + } + + aws_libs_initialized = true; + AWS_LOGF_DEBUG(AWS_LS_AUTH_GENERAL, "AWS libraries and HTTP infrastructure initialized successfully"); + + /* Note: We cannot use atexit() in libpq as it's not allowed to call exit-related functions. + * The cleanup will be handled by explicit calls at application shutdown or by the OS. + */ + return; + + error: + /* Clean up on error */ + if (s_client_bootstrap) { + aws_client_bootstrap_release(s_client_bootstrap); + s_client_bootstrap = NULL; + } + if (s_host_resolver) { + aws_host_resolver_release(s_host_resolver); + s_host_resolver = NULL; + } + if (s_el_group) { + aws_event_loop_group_release(s_el_group); + s_el_group = NULL; + } + AWS_LOGF_ERROR(AWS_LS_AUTH_GENERAL, "Failed to initialize AWS libraries"); + } +} + +/* + * Initialize the DSQL token generator with long-lived components + */ +static int +initialize_token_generator(void) +{ + struct aws_credentials_provider_chain_default_options credentials_options; + + if (s_token_generator.allocator != NULL) { + /* Already initialized */ + return AWS_OP_SUCCESS; + } + + AWS_LOGF_DEBUG(AWS_LS_AUTH_GENERAL, "Initializing DSQL token generator"); + + s_token_generator.allocator = aws_default_allocator(); + + /* Create credentials provider with client bootstrap for IMDS */ + AWS_ZERO_STRUCT(credentials_options); + credentials_options.bootstrap = s_client_bootstrap; + + s_token_generator.credentials_provider = aws_credentials_provider_new_chain_default( + s_token_generator.allocator, &credentials_options); + + if (!s_token_generator.credentials_provider) { + AWS_LOGF_ERROR(AWS_LS_AUTH_GENERAL, "Failed to create credentials provider for token generator"); + s_token_generator.allocator = NULL; + return AWS_OP_ERR; + } + + AWS_LOGF_DEBUG(AWS_LS_AUTH_GENERAL, "DSQL token generator initialized successfully"); + return AWS_OP_SUCCESS; +} + +/* + * Public initialization function for the DSQL token generator + */ +int +dsql_initialize_token_generator(void) +{ + /* Initialize AWS libraries and logging */ + initialize_aws_libs(); + initialize_dsql_logging(); + + return initialize_token_generator(); +} + +/* + * Clean up the DSQL token generator + */ +static void +cleanup_token_generator(void) +{ + if (s_token_generator.allocator != NULL) { + AWS_LOGF_DEBUG(AWS_LS_AUTH_GENERAL, "Cleaning up DSQL token generator"); + + if (s_token_generator.credentials_provider) { + aws_credentials_provider_release(s_token_generator.credentials_provider); + s_token_generator.credentials_provider = NULL; + } + + s_token_generator.allocator = NULL; + } +} + +/* + * Clean up DSQL authentication resources + */ +void +dsql_cleanup(void) +{ + cleanup_token_generator(); + + if (dsql_logger_initialized) + { + aws_logger_set(NULL); + aws_logger_clean_up(&dsql_logger); + dsql_logger_initialized = false; + } + + if (aws_libs_initialized) + { + aws_sdkutils_library_clean_up(); + aws_auth_library_clean_up(); + aws_io_library_clean_up(); + aws_common_library_clean_up(); + aws_libs_initialized = false; + } +} + +/* + * Generate a DSQL authentication token for the specified endpoint. + * Uses a local auth_config for thread safety and cached credentials provider for efficiency. + * Returns a newly allocated string containing the token. + */ +char * +dsql_generate_token(const char *endpoint, bool admin, char **err_msg) +{ + struct aws_dsql_auth_config auth_config = {0}; + struct aws_dsql_auth_token auth_token = {0}; + struct aws_string *aws_region = NULL; + char *token = NULL; + int aws_error; + const char *env_region; + const char *token_str; + + /* Check if token generator is initialized */ + if (s_token_generator.allocator == NULL) { + if (err_msg) + *err_msg = strdup("Token generator not initialized"); + return NULL; + } + + AWS_LOGF_INFO(AWS_LS_AUTH_GENERAL, "Starting DSQL token generation for endpoint: %s", endpoint); + + /* Initialize a local auth config for thread safety */ + if (aws_dsql_auth_config_init(&auth_config) != AWS_OP_SUCCESS) { + AWS_LOGF_ERROR(AWS_LS_AUTH_GENERAL, "Failed to initialize local auth config"); + if (err_msg) + *err_msg = strdup("Failed to initialize auth config"); + return NULL; + } + + /* Set hostname on the local auth config */ + aws_dsql_auth_config_set_hostname(&auth_config, endpoint); + + /* Try to get region from environment variable first */ + env_region = getenv("AWS_REGION"); + if (env_region != NULL && env_region[0] != '\0') + { + AWS_LOGF_DEBUG(AWS_LS_AUTH_GENERAL, "Using AWS_REGION from environment: %s", env_region); + aws_region = aws_string_new_from_c_str(s_token_generator.allocator, env_region); + if (!aws_region) { + AWS_LOGF_ERROR(AWS_LS_AUTH_GENERAL, "Failed to create region string from AWS_REGION"); + if (err_msg) + *err_msg = strdup("Failed to create region string from AWS_REGION"); + goto cleanup; + } + } + else + { + AWS_LOGF_DEBUG(AWS_LS_AUTH_GENERAL, "AWS_REGION not set, attempting to infer from hostname: %s", endpoint); + /* Try to infer region from hostname */ + if (aws_dsql_auth_config_infer_region(s_token_generator.allocator, &auth_config, &aws_region) != AWS_OP_SUCCESS || + aws_region == NULL) + { + AWS_LOGF_ERROR(AWS_LS_AUTH_GENERAL, "Failed to infer AWS region from hostname: %s", endpoint); + if (err_msg) + *err_msg = strdup("Failed to infer AWS region from hostname. Please set AWS_REGION environment variable."); + goto cleanup; + } + AWS_LOGF_INFO(AWS_LS_AUTH_GENERAL, "Inferred region: %s", aws_string_c_str(aws_region)); + } + aws_dsql_auth_config_set_region(&auth_config, aws_region); + + /* Set the cached credentials provider */ + aws_dsql_auth_config_set_credentials_provider(&auth_config, s_token_generator.credentials_provider); + + /* Set expiration time to 5 seconds for shorter token lifetime */ + aws_dsql_auth_config_set_expires_in(&auth_config, 5); /* 5 seconds */ + + /* Generate the token using local auth config and cached components */ + AWS_ZERO_STRUCT(auth_token); + if (aws_dsql_auth_token_generate(&auth_config, admin, s_token_generator.allocator, &auth_token) != AWS_OP_SUCCESS) + { + aws_error = aws_last_error(); + if (err_msg) + *err_msg = strdup(aws_error_str(aws_error)); + goto cleanup; + } + + /* Get the token string */ + token_str = aws_dsql_auth_token_get_str(&auth_token); + if (token_str) + { + token = strdup(token_str); + AWS_LOGF_DEBUG(AWS_LS_AUTH_GENERAL, "DSQL token generated successfully using local auth config and cached credentials"); + } + else + { + AWS_LOGF_ERROR(AWS_LS_AUTH_GENERAL, "Failed to get token string from generated token"); + if (err_msg) + *err_msg = strdup("Failed to get token string"); + } + +cleanup: + aws_dsql_auth_token_clean_up(&auth_token); + aws_dsql_auth_config_clean_up(&auth_config); + + if (aws_region) + { + aws_string_destroy(aws_region); + } + + return token; +} + +/* Synchronous credential retrieval state */ +struct credential_validation_state { + struct aws_credentials *credentials; + int error_code; + bool completed; + struct aws_mutex mutex; + struct aws_condition_variable condition_variable; +}; + +/* Callback for synchronous credential retrieval */ +static void +s_on_credentials_acquired(struct aws_credentials *credentials, int error_code, void *user_data) +{ + struct credential_validation_state *state = (struct credential_validation_state *)user_data; + + aws_mutex_lock(&state->mutex); + + state->credentials = credentials; + state->error_code = error_code; + state->completed = true; + + if (credentials) { + aws_credentials_acquire(credentials); + AWS_LOGF_DEBUG(AWS_LS_AUTH_GENERAL, "Credentials acquired successfully for validation"); + } else { + AWS_LOGF_ERROR(AWS_LS_AUTH_GENERAL, "Credentials acquisition failed with error: %d", error_code); + } + + aws_condition_variable_notify_one(&state->condition_variable); + aws_mutex_unlock(&state->mutex); +} + +/* + * Validate AWS credentials early for DSQL authentication. + * This initializes the token generator and validates that credentials can be obtained. + * Returns AWS_OP_SUCCESS on success, AWS_OP_ERR on failure. + */ +int +dsql_validate_aws_credentials(char **err_msg) +{ + struct credential_validation_state state = {0}; + int result = AWS_OP_ERR; + + /* Check if token generator is initialized */ + if (s_token_generator.allocator == NULL) { + AWS_LOGF_ERROR(AWS_LS_AUTH_GENERAL, "Token generator not initialized during credential validation"); + if (err_msg) + *err_msg = strdup("Token generator not initialized"); + return AWS_OP_ERR; + } + + AWS_LOGF_INFO(AWS_LS_AUTH_GENERAL, "Validating AWS credentials for DSQL authentication"); + + /* Initialize synchronization primitives */ + if (aws_mutex_init(&state.mutex) != AWS_OP_SUCCESS) { + AWS_LOGF_ERROR(AWS_LS_AUTH_GENERAL, "Failed to initialize mutex for credential validation"); + if (err_msg) + *err_msg = strdup("Failed to initialize synchronization"); + return AWS_OP_ERR; + } + + if (aws_condition_variable_init(&state.condition_variable) != AWS_OP_SUCCESS) { + AWS_LOGF_ERROR(AWS_LS_AUTH_GENERAL, "Failed to initialize condition variable for credential validation"); + aws_mutex_clean_up(&state.mutex); + if (err_msg) + *err_msg = strdup("Failed to initialize synchronization"); + return AWS_OP_ERR; + } + + /* Actually retrieve credentials to validate they exist and are accessible */ + AWS_LOGF_DEBUG(AWS_LS_AUTH_GENERAL, "Attempting to retrieve AWS credentials for validation"); + + if (aws_credentials_provider_get_credentials( + s_token_generator.credentials_provider, + s_on_credentials_acquired, + &state) != AWS_OP_SUCCESS) { + + AWS_LOGF_ERROR(AWS_LS_AUTH_GENERAL, "Failed to initiate credentials retrieval"); + if (err_msg) + *err_msg = strdup("Failed to initiate credentials retrieval"); + goto cleanup; + } + + /* Wait for credentials retrieval to complete */ + aws_mutex_lock(&state.mutex); + while (!state.completed) { + aws_condition_variable_wait(&state.condition_variable, &state.mutex); + } + aws_mutex_unlock(&state.mutex); + + /* Check if credentials were successfully retrieved */ + if (state.credentials && state.error_code == AWS_OP_SUCCESS) { + AWS_LOGF_INFO(AWS_LS_AUTH_GENERAL, "AWS credentials validation completed successfully"); + AWS_LOGF_DEBUG(AWS_LS_AUTH_GENERAL, "Token generator ready for DSQL authentication"); + result = AWS_OP_SUCCESS; + + aws_credentials_release(state.credentials); + } else { + AWS_LOGF_ERROR(AWS_LS_AUTH_GENERAL, "Failed to retrieve AWS credentials: %s", + aws_error_str(state.error_code)); + if (err_msg) { + const char *error_str = aws_error_str(state.error_code); + *err_msg = strdup(error_str ? error_str : "Unknown credential retrieval error"); + } + } + +cleanup: + aws_condition_variable_clean_up(&state.condition_variable); + aws_mutex_clean_up(&state.mutex); + + return result; +} diff --git a/src/interfaces/libpq/fe-dsql-auth.h b/src/interfaces/libpq/fe-dsql-auth.h new file mode 100644 index 0000000000000..d24c3a600c7d7 --- /dev/null +++ b/src/interfaces/libpq/fe-dsql-auth.h @@ -0,0 +1,25 @@ +/* + * fe-dsql-auth.h + * + * Support for AWS DSQL authentication token generation + * + * Copyright (c) 2025 PostgreSQL Global Development Group + */ +#ifndef FE_DSQL_AUTH_H +#define FE_DSQL_AUTH_H + +#include + +/* Initialize the DSQL token generator */ +int dsql_initialize_token_generator(void); + +/* Generate a DSQL authentication token for the specified endpoint */ +char *dsql_generate_token(const char *endpoint, bool admin, char **err_msg); + +/* Initialize and validate AWS credentials early (for startup validation) */ +int dsql_validate_aws_credentials(char **err_msg); + +/* Clean up DSQL authentication resources */ +void dsql_cleanup(void); + +#endif /* FE_DSQL_AUTH_H */ diff --git a/src/interfaces/libpq/libpq-int.h b/src/interfaces/libpq/libpq-int.h index a6cfd7f5c9d83..44e4f9f7fd4ea 100644 --- a/src/interfaces/libpq/libpq-int.h +++ b/src/interfaces/libpq/libpq-int.h @@ -520,6 +520,7 @@ struct pg_conn char current_auth_response; /* used by pqTraceOutputMessage to * know which auth response we're * sending */ + bool is_dsql; /* when true, auth tokens are auto-generated */ /* Callbacks for external async authentication */ PostgresPollingStatusType (*async_auth) (PGconn *conn);