diff --git a/.flake8 b/.flake8 index f5990a83..0fc0cadc 100644 --- a/.flake8 +++ b/.flake8 @@ -4,5 +4,5 @@ # Line break before operand needs to be ignored for line lengths # greater than max-line-length. Best practice shows W504 ignore = E722, W504 -exclude = optimizely/lib/pymmh3.py,*virtualenv* +exclude = optimizely/lib/pymmh3.py,*virtualenv*,tests/testapp/application.py max-line-length = 120 diff --git a/.github/workflows/integration_test.yml b/.github/workflows/integration_test.yml new file mode 100644 index 00000000..7619ca51 --- /dev/null +++ b/.github/workflows/integration_test.yml @@ -0,0 +1,58 @@ +name: Reusable action of running integration of production suite + +on: + workflow_call: + inputs: + FULLSTACK_TEST_REPO: + required: false + type: string + secrets: + CI_USER_TOKEN: + required: true + TRAVIS_COM_TOKEN: + required: true +jobs: + test: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + with: + # You should create a personal access token and store it in your repository + token: ${{ secrets.CI_USER_TOKEN }} + repository: 'optimizely/travisci-tools' + path: 'home/runner/travisci-tools' + ref: 'master' + - name: set SDK Branch if PR + env: + HEAD_REF: ${{ github.head_ref }} + if: ${{ github.event_name == 'pull_request' }} + run: | + echo "SDK_BRANCH=$HEAD_REF" >> $GITHUB_ENV + - name: set SDK Branch if not pull request + env: + REF_NAME: ${{ github.ref_name }} + if: ${{ github.event_name != 'pull_request' }} + run: | + echo "SDK_BRANCH=${REF_NAME}" >> $GITHUB_ENV + echo "TRAVIS_BRANCH=${REF_NAME}" >> $GITHUB_ENV + - name: Trigger build + env: + SDK: python + FULLSTACK_TEST_REPO: ${{ inputs.FULLSTACK_TEST_REPO }} + BUILD_NUMBER: ${{ github.run_id }} + TESTAPP_BRANCH: master + GITHUB_TOKEN: ${{ secrets.CI_USER_TOKEN }} + EVENT_TYPE: ${{ github.event_name }} + GITHUB_CONTEXT: ${{ toJson(github) }} + #REPO_SLUG: ${{ github.repository }} + PULL_REQUEST_SLUG: ${{ github.repository }} + UPSTREAM_REPO: ${{ github.repository }} + PULL_REQUEST_SHA: ${{ github.event.pull_request.head.sha }} + PULL_REQUEST_NUMBER: ${{ github.event.pull_request.number }} + UPSTREAM_SHA: ${{ github.sha }} + TOKEN: ${{ secrets.TRAVIS_COM_TOKEN }} + EVENT_MESSAGE: ${{ github.event.message }} + HOME: 'home/runner' + run: | + echo "$GITHUB_CONTEXT" + home/runner/travisci-tools/trigger-script-with-status-update.sh diff --git a/.github/workflows/python.yml b/.github/workflows/python.yml new file mode 100644 index 00000000..0699f84c --- /dev/null +++ b/.github/workflows/python.yml @@ -0,0 +1,117 @@ +# This workflow will install Python dependencies, run tests and lint with a variety of Python versions +# For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions + +name: build + +on: + push: + branches: [ master ] + pull_request: + branches: [ master ] + +jobs: + lint_markdown_files: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - name: Set up Ruby + uses: ruby/setup-ruby@v1 + with: + ruby-version: '2.6' + bundler-cache: true # runs 'bundle install' and caches installed gems automatically + - name: Install gem + run: | + gem install awesome_bot + - name: Run tests + run: find . -type f -name '*.md' -exec awesome_bot {} \; + + linting: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - name: Set up Python 3.12 + uses: actions/setup-python@v4 + with: + python-version: '3.12' + # flake8 version should be same as the version in requirements/test.txt + # to avoid lint errors on CI + - name: pip install flak8 + run: pip install flake8>=4.1.0 + - name: Lint with flake8 + run: | + flake8 + # stop the build if there are Python syntax errors or undefined names + flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics + # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide + flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics + + integration_tests: + uses: optimizely/python-sdk/.github/workflows/integration_test.yml@master + secrets: + CI_USER_TOKEN: ${{ secrets.CI_USER_TOKEN }} + TRAVIS_COM_TOKEN: ${{ secrets.TRAVIS_COM_TOKEN }} + + fullstack_production_suite: + uses: optimizely/python-sdk/.github/workflows/integration_test.yml@master + with: + FULLSTACK_TEST_REPO: ProdTesting + secrets: + CI_USER_TOKEN: ${{ secrets.CI_USER_TOKEN }} + TRAVIS_COM_TOKEN: ${{ secrets.TRAVIS_COM_TOKEN }} + + test: + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + python-version: + - "pypy-3.8" + - "pypy-3.9" + - "pypy-3.10" + - "3.8" + - "3.9" + - "3.10" + - "3.11" + - "3.12" + steps: + - uses: actions/checkout@v3 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v4 + with: + python-version: ${{ matrix.python-version }} + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install -r requirements/core.txt;pip install -r requirements/test.txt + - name: Test with pytest + run: | + pytest --cov=optimizely + + type-check: + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + python-version: + - "pypy-3.8" + - "pypy-3.9" + - "pypy-3.10" + - "3.8" + - "3.9" + - "3.10" + - "3.11" + - "3.12" + steps: + - uses: actions/checkout@v3 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v4 + with: + python-version: ${{ matrix.python-version }} + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install -r requirements/typing.txt + - name: Type check with mypy + run: | + mypy . --exclude "tests/testapp" + mypy . --exclude "tests/" --strict diff --git a/.github/workflows/source_clear_cron.yml b/.github/workflows/source_clear_cron.yml new file mode 100644 index 00000000..862b4a3f --- /dev/null +++ b/.github/workflows/source_clear_cron.yml @@ -0,0 +1,16 @@ +name: Source clear + +on: + schedule: + # Runs "weekly" + - cron: '0 0 * * 0' + +jobs: + source_clear: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - name: Source clear scan + env: + SRCCLR_API_TOKEN: ${{ secrets.SRCCLR_API_TOKEN }} + run: curl -sSL https://download.sourceclear.com/ci.sh | bash -s – scan diff --git a/.github/workflows/ticket_reference_check.yml b/.github/workflows/ticket_reference_check.yml new file mode 100644 index 00000000..3d58f804 --- /dev/null +++ b/.github/workflows/ticket_reference_check.yml @@ -0,0 +1,16 @@ +name: Jira ticket reference check + +on: + pull_request: + types: [opened, edited, reopened, synchronize] + +jobs: + + jira_ticket_reference_check: + runs-on: ubuntu-latest + + steps: + - name: Check for Jira ticket reference + uses: optimizely/github-action-ticket-reference-checker-public@master + with: + bodyRegex: 'FSSDK-(?\d+)' diff --git a/.gitignore b/.gitignore index 961aa6ad..00ad86a4 100644 --- a/.gitignore +++ b/.gitignore @@ -2,6 +2,8 @@ MANIFEST .idea/* .*virtualenv/* +.mypy_cache +.vscode/* # Output of building package *.egg-info @@ -25,3 +27,4 @@ datafile.json # Sphinx documentation docs/build/ + diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index dc008188..00000000 --- a/.travis.yml +++ /dev/null @@ -1,84 +0,0 @@ -dist: focal -language: python -python: - - "pypy3.7-7.3.5" - - "3.7" - - "3.8" - - "3.9" - - "3.10.0" -before_install: "python -m pip install --upgrade pip" -install: "pip install -r requirements/core.txt;pip install -r requirements/test.txt" -script: "pytest --cov=optimizely" -after_success: - - coveralls - -# Linting and Integration tests need to run first to reset the PR build status to pending. -stages: - - 'Source Clear' - - 'Lint markdown files' - - 'Linting' - - 'Integration tests' - - 'Full stack production tests' - - 'Test' - -jobs: - include: - - stage: 'Lint markdown files' - os: linux - language: generic - install: gem install awesome_bot - script: - - find . -type f -name '*.md' -exec awesome_bot {} \; - notifications: - email: false - - - stage: 'Linting' - language: python - python: "3.9" - # flake8 version should be same as the version in requirements/test.txt - # to avoid lint errors on CI - install: "pip install flake8>=4.1.0" - script: "flake8" - after_success: travis_terminate 0 - - - &integrationtest - stage: 'Integration tests' - merge_mode: replace - env: SDK=python SDK_BRANCH=$TRAVIS_PULL_REQUEST_BRANCH - cache: false - language: minimal - install: skip - before_script: - - mkdir $HOME/travisci-tools && pushd $HOME/travisci-tools && git init && git pull https://$CI_USER_TOKEN@github.com/optimizely/travisci-tools.git && popd - script: - - $HOME/travisci-tools/trigger-script-with-status-update.sh - after_success: travis_terminate 0 - - - <<: *integrationtest - stage: 'Full stack production tests' - env: - SDK=python - SDK_BRANCH=$TRAVIS_PULL_REQUEST_BRANCH - FULLSTACK_TEST_REPO=ProdTesting - - stage: 'Test' - python: "pypy3.7-7.3.5" -# before_install: -# - pip install "cryptography>=1.3.4" - - stage: 'Test' - python: "3.7" - - stage: 'Test' - python: "3.8" - - stage: 'Test' - python: "3.9" - - stage: 'Test' - python: "3.10.0" - - - stage: 'Source Clear' - if: type = cron - addons: - srcclr: true - before_install: skip - install: skip - before_script: skip - script: skip - after_success: skip diff --git a/CHANGELOG.md b/CHANGELOG.md index 892d8ad3..d0cd8b71 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,117 @@ # Optimizely Python SDK Changelog +## 5.2.0 +February 26, 2025 + +Python threads have been named. + +`PollingConfigManager` now has another optional parameter `retries` that will control how many times the SDK will attempt to get the datafile if the connection fails. Previously, the SDK would only try once. Now it defaults to maximum of three attempts. When sending event data, the SDK will attempt to send event data up to three times, where as before it would only attempt once. + +## 5.1.0 +November 27th, 2024 + +Added support for batch processing in DecideAll and DecideForKeys, enabling more efficient handling of multiple decisions in the User Profile Service.([#440](https://github.com/optimizely/python-sdk/pull/440)) + +## 5.0.1 +June 26th, 2024 + +We removed redundant dependencies pyOpenSSL and cryptography ([#435](https://github.com/optimizely/python-sdk/pull/435), [#436](https://github.com/optimizely/python-sdk/pull/436)). + +## 5.0.0 +January 18th, 2024 + +### New Features + +The 5.0.0 release introduces a new primary feature, [Advanced Audience Targeting]( https://docs.developers.optimizely.com/feature-experimentation/docs/optimizely-data-platform-advanced-audience-targeting) enabled through integration with [Optimizely Data Platform (ODP)](https://docs.developers.optimizely.com/optimizely-data-platform/docs) ([#395](https://github.com/optimizely/python-sdk/pull/395), [#398](https://github.com/optimizely/python-sdk/pull/398), [#402](https://github.com/optimizely/python-sdk/pull/402), [#403](https://github.com/optimizely/python-sdk/pull/403), [#405](https://github.com/optimizely/python-sdk/pull/405)). + +You can use ODP, a high-performance [Customer Data Platform (CDP)]( https://www.optimizely.com/optimization-glossary/customer-data-platform/), to easily create complex real-time segments (RTS) using first-party and 50+ third-party data sources out of the box. You can create custom schemas that support the user attributes important for your business, and stitch together user behavior done on different devices to better understand and target your customers for personalized user experiences. ODP can be used as a single source of truth for these segments in any Optimizely or 3rd party tool. + +With ODP accounts integrated into Optimizely projects, you can build audiences using segments pre-defined in ODP. The SDK will fetch the segments for given users and make decisions using the segments. For access to ODP audience targeting in your Feature Experimentation account, please contact your Optimizely Customer Success Manager. + +This version includes the following changes: + +* New API added to `OptimizelyUserContext`: + + * `fetchQualifiedSegments()`: this API will retrieve user segments from the ODP server. The fetched segments will be used for audience evaluation. The fetched data will be stored in the local cache to avoid repeated network delays. + * When an `OptimizelyUserContext` is created, the SDK will automatically send an identify request to the ODP server to facilitate observing user activities. + +* New APIs added to `OptimizelyClient`: + + * `sendOdpEvent()`: customers can build/send arbitrary ODP events that will bind user identifiers and data to user profiles in ODP. + +For details, refer to our documentation pages: + +* [Advanced Audience Targeting](https://docs.developers.optimizely.com/feature-experimentation/docs/optimizely-data-platform-advanced-audience-targeting) +* [Server SDK Support](https://docs.developers.optimizely.com/feature-experimentation/v1.0/docs/advanced-audience-targeting-for-server-side-sdks) +* [Initialize Python SDK](https://docs.developers.optimizely.com/feature-experimentation/docs/initialize-sdk-python) +* [OptimizelyUserContext Python SDK](https://docs.developers.optimizely.com/feature-experimentation/docs/wip-fsodp-optimizelyusercontext-python) +* [Advanced Audience Targeting segment qualification methods](https://docs.developers.optimizely.com/feature-experimentation/v1.0/docs/advanced-audience-targeting-segment-qualification-methods-python) +* [Send Optimizely Data Platform data using Advanced Audience Targeting](https://docs.developers.optimizely.com/feature-experimentation/v1.0/docs/send-odp-data-using-advanced-audience-targeting-python) + +### Logging + +* Add warning to polling intervals below 30 seconds ([#428](https://github.com/optimizely/python-sdk/pull/428)) +* Add warning to duplicate experiment keys ([#430](https://github.com/optimizely/python-sdk/pull/430)) + +### Enhancements +* Added `py.typed` to enable external usage of mypy type annotations. + +### Breaking Changes +* Updated minimum supported Python version from 3.7 -> 3.8 +* `ODPManager` in the SDK is enabled by default. Unless an ODP account is integrated into the Optimizely projects, most `ODPManager` functions will be ignored. If needed, `ODPManager` can be disabled when `OptimizelyClient` is instantiated. +* `BaseConfigManager` abstract class now requires a get_sdk_key method. ([#413](https://github.com/optimizely/python-sdk/pull/413)) +* `PollingConfigManager` requires either the sdk_key parameter or datafile containing an sdkKey. ([#413](https://github.com/optimizely/python-sdk/pull/413)) +* Asynchronous `BatchEventProcessor` is now the default event processor. ([#378](https://github.com/optimizely/python-sdk/pull/378)) + +## 5.0.0-beta +Apr 28th, 2023 + +### New Features + +The 5.0.0-beta release introduces a new primary feature, [Advanced Audience Targeting]( https://docs.developers.optimizely.com/feature-experimentation/docs/optimizely-data-platform-advanced-audience-targeting) enabled through integration with [Optimizely Data Platform (ODP)](https://docs.developers.optimizely.com/optimizely-data-platform/docs) ([#395](https://github.com/optimizely/python-sdk/pull/395), [#398](https://github.com/optimizely/python-sdk/pull/398), [#402](https://github.com/optimizely/python-sdk/pull/402), [#403](https://github.com/optimizely/python-sdk/pull/403), [#405](https://github.com/optimizely/python-sdk/pull/405)). + +You can use ODP, a high-performance [Customer Data Platform (CDP)]( https://www.optimizely.com/optimization-glossary/customer-data-platform/), to easily create complex real-time segments (RTS) using first-party and 50+ third-party data sources out of the box. You can create custom schemas that support the user attributes important for your business, and stitch together user behavior done on different devices to better understand and target your customers for personalized user experiences. ODP can be used as a single source of truth for these segments in any Optimizely or 3rd party tool. + +With ODP accounts integrated into Optimizely projects, you can build audiences using segments pre-defined in ODP. The SDK will fetch the segments for given users and make decisions using the segments. For access to ODP audience targeting in your Feature Experimentation account, please contact your Optimizely Customer Success Manager. + +This version includes the following changes: + +* New API added to `OptimizelyUserContext`: + + * `fetchQualifiedSegments()`: this API will retrieve user segments from the ODP server. The fetched segments will be used for audience evaluation. The fetched data will be stored in the local cache to avoid repeated network delays. + * When an `OptimizelyUserContext` is created, the SDK will automatically send an identify request to the ODP server to facilitate observing user activities. + +* New APIs added to `OptimizelyClient`: + + * `sendOdpEvent()`: customers can build/send arbitrary ODP events that will bind user identifiers and data to user profiles in ODP. + +For details, refer to our documentation pages: + +* [Advanced Audience Targeting](https://docs.developers.optimizely.com/feature-experimentation/docs/optimizely-data-platform-advanced-audience-targeting) +* [Server SDK Support](https://docs.developers.optimizely.com/feature-experimentation/v1.0/docs/advanced-audience-targeting-for-server-side-sdks) +* [Initialize Python SDK](https://docs.developers.optimizely.com/feature-experimentation/docs/initialize-sdk-python) +* [OptimizelyUserContext Python SDK](https://docs.developers.optimizely.com/feature-experimentation/docs/wip-fsodp-optimizelyusercontext-python) +* [Advanced Audience Targeting segment qualification methods](https://docs.developers.optimizely.com/feature-experimentation/v1.0/docs/advanced-audience-targeting-segment-qualification-methods-python) +* [Send Optimizely Data Platform data using Advanced Audience Targeting](https://docs.developers.optimizely.com/feature-experimentation/v1.0/docs/send-odp-data-using-advanced-audience-targeting-python) + +### Breaking Changes + +* `ODPManager` in the SDK is enabled by default. Unless an ODP account is integrated into the Optimizely projects, most `ODPManager` functions will be ignored. If needed, `ODPManager` can be disabled when `OptimizelyClient` is instantiated. +* `BaseConfigManager` abstract class now requires a get_sdk_key method. ([#413](https://github.com/optimizely/python-sdk/pull/413)) +* `PollingConfigManager` requires either the sdk_key parameter or datafile containing an sdkKey. ([#413](https://github.com/optimizely/python-sdk/pull/413)) +* Asynchronous `BatchEventProcessor` is now the default event processor. ([#378](https://github.com/optimizely/python-sdk/pull/378)) + +## 4.1.1 +March 10th, 2023 + +We updated our README.md and other non-functional code to reflect that this SDK supports both Optimizely Feature Experimentation and Optimizely Full Stack. ([#420](https://github.com/optimizely/python-sdk/pull/420)) + +## 4.1.0 +July 7th, 2022 + +### Bug Fixes +* Fix invalid datafile returned from `ProjectConfig.to_datafile` and `OptimizelyConfig.get_datafile` ([#321](https://github.com/optimizely/python-sdk/pull/321), [#384](https://github.com/optimizely/python-sdk/pull/384)) + ## 4.0.0 January 12th, 2022 @@ -22,10 +134,10 @@ January 12th, 2022 September 16th, 2021 ### New Features -* Added new public properties to OptimizelyConfig. +* Added new public properties to OptimizelyConfig. - sdk_key and environment_key [#338] (https://github.com/optimizely/python-sdk/pull/338) - attributes and events [#339] (https://github.com/optimizely/python-sdk/pull/339) - - experiment_rules, delivery_rules, audiences and audiences in OptimizelyExperiment + - experiment_rules, delivery_rules, audiences and audiences in OptimizelyExperiment - [#342] (https://github.com/optimizely/python-sdk/pull/342) - [#351] (https://github.com/optimizely/python-sdk/pull/351/files) * For details please refer to our documentation page: @@ -150,7 +262,7 @@ October 28th, 2019 * To configure event batching, set the `batch_size` and `flush_interval` properties when initializing instance of [BatchEventProcessor](https://github.com/optimizely/python-sdk/blob/3.3.x/optimizely/event/event_processor.py#L45). * Event batching is disabled by default. You can pass in instance of `BatchEventProcessor` when creating `Optimizely` instance to enable event batching. * Users can subscribe to `LogEvent` notification to be notified of whenever a payload consisting of a batch of user events is handed off to the event dispatcher to send to Optimizely's backend. -* Introduced blocking timeout in `PollingConfigManager`. By default, calls to `get_config` will block for maximum of 10 seconds until config is available. +* Introduced blocking timeout in `PollingConfigManager`. By default, calls to `get_config` will block for maximum of 10 seconds until config is available. ### Bug Fixes: * Fixed incorrect log message when numeric metric is not used. ([#217](https://github.com/optimizely/python-sdk/pull/217)) diff --git a/LICENSE b/LICENSE index 532cbad9..1b91d409 100644 --- a/LICENSE +++ b/LICENSE @@ -186,7 +186,7 @@ same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright 2016 Optimizely + © Optimizely 2016 Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/README.md b/README.md index b2cae17b..e0aeafb6 100644 --- a/README.md +++ b/README.md @@ -1,30 +1,29 @@ # Optimizely Python SDK [![PyPI version](https://badge.fury.io/py/optimizely-sdk.svg)](https://pypi.org/project/optimizely-sdk) -[![Build Status](https://travis-ci.org/optimizely/python-sdk.svg?branch=master)](https://travis-ci.org/optimizely/python-sdk) +[![Build Status](https://github.com/optimizely/python-sdk/actions/workflows/python.yml/badge.svg?branch=master)](https://github.com/optimizely/python-sdk/actions/workflows/python.yml?query=branch%3Amaster) [![Coverage Status](https://coveralls.io/repos/github/optimizely/python-sdk/badge.svg)](https://coveralls.io/github/optimizely/python-sdk) -[![Documentation Status](https://readthedocs.org/projects/optimizely-python-sdk/badge/?version=latest)](https://optimizely-python-sdk.readthedocs.io/en/latest/?badge=latest) [![Apache 2.0](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](http://www.apache.org/licenses/LICENSE-2.0) -This repository houses the official Python SDK for use with Optimizely -Full Stack and Optimizely Rollouts. +This repository houses the Python SDK for use with Optimizely Feature Experimentation and Optimizely Full Stack (legacy). -Optimizely Full Stack is A/B testing and feature flag management for -product development teams. Experiment in any application. Make every -feature on your roadmap an opportunity to learn. Learn more at -, or see the [Full -Stack -documentation](https://docs.developers.optimizely.com/full-stack/docs). +Optimizely Feature Experimentation is an A/B testing and feature management tool for product development teams that enables you to experiment at every step. Using Optimizely Feature Experimentation allows for every feature on your roadmap to be an opportunity to discover hidden insights. Learn more at [Optimizely.com](https://www.optimizely.com/products/experiment/feature-experimentation/), or see the [developer documentation](https://docs.developers.optimizely.com/experimentation/v4.0.0-full-stack/docs/welcome). -Optimizely Rollouts is free feature flags for development teams. Easily -roll out and roll back features in any application without code deploys. -Mitigate risk for every feature on your roadmap. Learn more at -, or see the [Rollouts -documentation](https://docs.developers.optimizely.com/rollouts/docs). +Optimizely Rollouts is [free feature flags](https://www.optimizely.com/free-feature-flagging/) for development teams. You can easily roll out and roll back features in any application without code deploys, mitigating risk for every feature on your roadmap. -## Getting Started +## Get Started -### Installing the SDK +Refer to the [Python SDK's developer documentation](https://docs.developers.optimizely.com/experimentation/v4.0.0-full-stack/docs/python-sdk) for detailed instructions on getting started with using the SDK. + +### Requirements + +Version `5.0+`: Python 3.8+, PyPy 3.8+ + +Version `4.0+`: Python 3.7+, PyPy 3.7+ + +Version `3.0+`: Python 2.7+, PyPy 3.4+ + +### Install the SDK The SDK is available through [PyPi](https://pypi.python.org/pypi?name=optimizely-sdk&:action=display). @@ -32,17 +31,14 @@ To install: pip install optimizely-sdk -Note: -If you are running the SDK with PyPy or PyPy3 and you are experiencing issues, install this cryptography package **first** and then optimizely-sdk package: - - pip install "cryptography>=1.3.4,<=3.1.1" - ### Feature Management Access To access the Feature Management configuration in the Optimizely -dashboard, please contact your Optimizely account executive. +dashboard, please contact your Optimizely customer success manager. + +## Use the Python SDK -### Using the SDK +### Initialization You can initialize the Optimizely instance in three ways: with a datafile, by providing an sdk_key, or by providing an implementation of [BaseConfigManager](https://github.com/optimizely/python-sdk/tree/master/optimizely/config_manager.py#L32). @@ -84,7 +80,7 @@ Each method is described below. config_manager=custom_config_manager ) -#### PollingConfigManager +### PollingConfigManager The [PollingConfigManager](https://github.com/optimizely/python-sdk/blob/master/optimizely/config_manager.py#L150) asynchronously polls for datafiles from a specified URL at regular intervals by making HTTP requests. @@ -125,7 +121,7 @@ used to form the target URL. You may also provide your own logger, error_handler, or notification_center. -#### AuthDatafilePollingConfigManager +### AuthDatafilePollingConfigManager The [AuthDatafilePollingConfigManager](https://github.com/optimizely/python-sdk/blob/master/optimizely/config_manager.py#L375) implements `PollingConfigManager` and asynchronously polls for authenticated datafiles from a specified URL at regular intervals @@ -142,7 +138,7 @@ your project and generate an access token for your datafile. **datafile_access_token** The datafile_access_token is attached to the outbound HTTP request header to authorize the request and fetch the datafile. -#### Advanced configuration +### Advanced configuration The following properties can be set to override the default configurations for [PollingConfigManager](#pollingconfigmanager) and [AuthDatafilePollingConfigManager](#authdatafilepollingconfigmanager). @@ -163,10 +159,10 @@ notifications, use: notification_center.add_notification_listener(NotificationTypes.OPTIMIZELY_CONFIG_UPDATE, update_callback) ``` -For Further details see the Optimizely [Full Stack documentation](https://docs.developers.optimizely.com/full-stack/docs) +For Further details see the Optimizely [Feature Experimentation documentation](https://docs.developers.optimizely.com/experimentation/v4.0.0-full-stack/docs/welcome) to learn how to set up your first Python project and use the SDK. -## Development +## SDK Development ### Building the SDK @@ -174,7 +170,7 @@ Build and install the SDK with pip, using the following command: pip install -e . -### Unit tests +### Unit Tests #### Running all tests @@ -225,9 +221,36 @@ would be: Please see [CONTRIBUTING](https://github.com/optimizely/python-sdk/blob/master/CONTRIBUTING.md). -### Additional Code -This software incorporates code from the following open source repos: -requests (Apache-2.0 License: https://github.com/psf/requests/blob/master/LICENSE) -pyOpenSSL (Apache-2.0 License https://github.com/pyca/pyopenssl/blob/main/LICENSE) -cryptography (Apache-2.0 https://github.com/pyca/cryptography/blob/main/LICENSE.APACHE) -idna (BSD 3-Clause License https://github.com/kjd/idna/blob/master/LICENSE.md) +### Credits + +This software incorporates code from the following open source projects: + +requests (Apache-2.0 License: https://github.com/psf/requests/blob/master/LICENSE) + +idna (BSD 3-Clause License https://github.com/kjd/idna/blob/master/LICENSE.md) + +### Other Optimizely SDKs + +- Agent - https://github.com/optimizely/agent + +- Android - https://github.com/optimizely/android-sdk + +- C# - https://github.com/optimizely/csharp-sdk + +- Flutter - https://github.com/optimizely/optimizely-flutter-sdk + +- Go - https://github.com/optimizely/go-sdk + +- Java - https://github.com/optimizely/java-sdk + +- JavaScript - https://github.com/optimizely/javascript-sdk + +- PHP - https://github.com/optimizely/php-sdk + +- Python - https://github.com/optimizely/python-sdk + +- React - https://github.com/optimizely/react-sdk + +- Ruby - https://github.com/optimizely/ruby-sdk + +- Swift - https://github.com/optimizely/swift-sdk diff --git a/mypy.ini b/mypy.ini new file mode 100644 index 00000000..5de83593 --- /dev/null +++ b/mypy.ini @@ -0,0 +1,15 @@ +[mypy] +# regex to exclude: +# - docs folder +# - setup.py +# https://mypy.readthedocs.io/en/stable/config_file.html#confval-exclude +exclude = (?x)( + ^docs/ + | ^setup\.py$ + ) +show_error_codes = True +pretty = True + +# suppress error on conditional import of typing_extensions module +[mypy-optimizely.helpers.types] +no_warn_unused_ignores = True diff --git a/optimizely/bucketer.py b/optimizely/bucketer.py index dcfec3ea..38da3798 100644 --- a/optimizely/bucketer.py +++ b/optimizely/bucketer.py @@ -1,4 +1,4 @@ -# Copyright 2016-2017, 2019-2021 Optimizely +# Copyright 2016-2017, 2019-2022 Optimizely # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -11,28 +11,44 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations +from typing import Optional, TYPE_CHECKING import math +from sys import version_info from .lib import pymmh3 as mmh3 -MAX_TRAFFIC_VALUE = 10000 -UNSIGNED_MAX_32_BIT_VALUE = 0xFFFFFFFF -MAX_HASH_VALUE = math.pow(2, 32) -HASH_SEED = 1 -BUCKETING_ID_TEMPLATE = '{bucketing_id}{parent_id}' -GROUP_POLICIES = ['random'] +if version_info < (3, 8): + from typing_extensions import Final +else: + from typing import Final # type: ignore -class Bucketer(object): +if TYPE_CHECKING: + # prevent circular dependenacy by skipping import at runtime + from .project_config import ProjectConfig + from .entities import Experiment, Variation + from .helpers.types import TrafficAllocation + + +MAX_TRAFFIC_VALUE: Final = 10000 +UNSIGNED_MAX_32_BIT_VALUE: Final = 0xFFFFFFFF +MAX_HASH_VALUE: Final = math.pow(2, 32) +HASH_SEED: Final = 1 +BUCKETING_ID_TEMPLATE: Final = '{bucketing_id}{parent_id}' +GROUP_POLICIES: Final = ['random'] + + +class Bucketer: """ Optimizely bucketing algorithm that evenly distributes visitors. """ - def __init__(self): + def __init__(self) -> None: """ Bucketer init method to set bucketing seed and logger instance. """ self.bucket_seed = HASH_SEED - def _generate_unsigned_hash_code_32_bit(self, bucketing_id): + def _generate_unsigned_hash_code_32_bit(self, bucketing_id: str) -> int: """ Helper method to retrieve hash code. Args: @@ -45,7 +61,7 @@ def _generate_unsigned_hash_code_32_bit(self, bucketing_id): # Adjusting MurmurHash code to be unsigned return mmh3.hash(bucketing_id, self.bucket_seed) & UNSIGNED_MAX_32_BIT_VALUE - def _generate_bucket_value(self, bucketing_id): + def _generate_bucket_value(self, bucketing_id: str) -> int: """ Helper function to generate bucket value in half-closed interval [0, MAX_TRAFFIC_VALUE). Args: @@ -58,7 +74,10 @@ def _generate_bucket_value(self, bucketing_id): ratio = float(self._generate_unsigned_hash_code_32_bit(bucketing_id)) / MAX_HASH_VALUE return math.floor(ratio * MAX_TRAFFIC_VALUE) - def find_bucket(self, project_config, bucketing_id, parent_id, traffic_allocations): + def find_bucket( + self, project_config: ProjectConfig, bucketing_id: str, + parent_id: Optional[str], traffic_allocations: list[TrafficAllocation] + ) -> Optional[str]: """ Determine entity based on bucket value and traffic allocations. Args: @@ -72,19 +91,21 @@ def find_bucket(self, project_config, bucketing_id, parent_id, traffic_allocatio """ bucketing_key = BUCKETING_ID_TEMPLATE.format(bucketing_id=bucketing_id, parent_id=parent_id) bucketing_number = self._generate_bucket_value(bucketing_key) - message = 'Assigned bucket %s to user with bucketing ID "%s".' % (bucketing_number, bucketing_id) project_config.logger.debug( - message + f'Assigned bucket {bucketing_number} to user with bucketing ID "{bucketing_id}".' ) for traffic_allocation in traffic_allocations: current_end_of_range = traffic_allocation.get('endOfRange') - if bucketing_number < current_end_of_range: + if current_end_of_range is not None and bucketing_number < current_end_of_range: return traffic_allocation.get('entityId') return None - def bucket(self, project_config, experiment, user_id, bucketing_id): + def bucket( + self, project_config: ProjectConfig, + experiment: Experiment, user_id: str, bucketing_id: str + ) -> tuple[Optional[Variation], list[str]]: """ For a given experiment and bucketing ID determines variation to be shown to user. Args: @@ -98,7 +119,7 @@ def bucket(self, project_config, experiment, user_id, bucketing_id): and array of log messages representing decision making. */. """ - decide_reasons = [] + decide_reasons: list[str] = [] if not experiment: return None, decide_reasons @@ -115,24 +136,19 @@ def bucket(self, project_config, experiment, user_id, bucketing_id): ) if not user_experiment_id: - message = 'User "%s" is in no experiment.' % user_id + message = f'User "{user_id}" is in no experiment.' project_config.logger.info(message) decide_reasons.append(message) return None, decide_reasons if user_experiment_id != experiment.id: - message = 'User "%s" is not in experiment "%s" of group %s.' \ - % (user_id, experiment.key, experiment.groupId) - project_config.logger.info( - message - ) + message = f'User "{user_id}" is not in experiment "{experiment.key}" of group {experiment.groupId}.' + project_config.logger.info(message) decide_reasons.append(message) return None, decide_reasons - message = 'User "%s" is in experiment %s of group %s.' % (user_id, experiment.key, experiment.groupId) - project_config.logger.info( - message - ) + message = f'User "{user_id}" is in experiment {experiment.key} of group {experiment.groupId}.' + project_config.logger.info(message) decide_reasons.append(message) # Bucket user if not in white-list and in group (if any) diff --git a/optimizely/cmab/cmab_client.py b/optimizely/cmab/cmab_client.py new file mode 100644 index 00000000..dfcffa78 --- /dev/null +++ b/optimizely/cmab/cmab_client.py @@ -0,0 +1,193 @@ +# Copyright 2025 Optimizely +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import json +import time +import requests +import math +from typing import Dict, Any, Optional +from optimizely import logger as _logging +from optimizely.helpers.enums import Errors +from optimizely.exceptions import CmabFetchError, CmabInvalidResponseError + +# Default constants for CMAB requests +DEFAULT_MAX_RETRIES = 3 +DEFAULT_INITIAL_BACKOFF = 0.1 # in seconds (100 ms) +DEFAULT_MAX_BACKOFF = 10 # in seconds +DEFAULT_BACKOFF_MULTIPLIER = 2.0 +MAX_WAIT_TIME = 10.0 + + +class CmabRetryConfig: + """Configuration for retrying CMAB requests. + + Contains parameters for maximum retries, backoff intervals, and multipliers. + """ + def __init__( + self, + max_retries: int = DEFAULT_MAX_RETRIES, + initial_backoff: float = DEFAULT_INITIAL_BACKOFF, + max_backoff: float = DEFAULT_MAX_BACKOFF, + backoff_multiplier: float = DEFAULT_BACKOFF_MULTIPLIER, + ): + self.max_retries = max_retries + self.initial_backoff = initial_backoff + self.max_backoff = max_backoff + self.backoff_multiplier = backoff_multiplier + + +class DefaultCmabClient: + """Client for interacting with the CMAB service. + + Provides methods to fetch decisions with optional retry logic. + """ + def __init__(self, http_client: Optional[requests.Session] = None, + retry_config: Optional[CmabRetryConfig] = None, + logger: Optional[_logging.Logger] = None): + """Initialize the CMAB client. + + Args: + http_client (Optional[requests.Session]): HTTP client for making requests. + retry_config (Optional[CmabRetryConfig]): Configuration for retry logic. + logger (Optional[_logging.Logger]): Logger for logging messages. + """ + self.http_client = http_client or requests.Session() + self.retry_config = retry_config + self.logger = _logging.adapt_logger(logger or _logging.NoOpLogger()) + + def fetch_decision( + self, + rule_id: str, + user_id: str, + attributes: Dict[str, Any], + cmab_uuid: str, + timeout: float = MAX_WAIT_TIME + ) -> str: + """Fetch a decision from the CMAB prediction service. + + Args: + rule_id (str): The rule ID for the experiment. + user_id (str): The user ID for the request. + attributes (Dict[str, Any]): User attributes for the request. + cmab_uuid (str): Unique identifier for the CMAB request. + timeout (float): Maximum wait time for request to respond in seconds. Defaults to 10 seconds. + + Returns: + str: The variation ID. + """ + url = f"https://prediction.cmab.optimizely.com/predict/{rule_id}" + cmab_attributes = [ + {"id": key, "value": value, "type": "custom_attribute"} + for key, value in attributes.items() + ] + + request_body = { + "instances": [{ + "visitorId": user_id, + "experimentId": rule_id, + "attributes": cmab_attributes, + "cmabUUID": cmab_uuid, + }] + } + if self.retry_config: + variation_id = self._do_fetch_with_retry(url, request_body, self.retry_config, timeout) + else: + variation_id = self._do_fetch(url, request_body, timeout) + return variation_id + + def _do_fetch(self, url: str, request_body: Dict[str, Any], timeout: float) -> str: + """Perform a single fetch request to the CMAB prediction service. + + Args: + url (str): The endpoint URL. + request_body (Dict[str, Any]): The request payload. + timeout (float): Maximum wait time for request to respond in seconds. + Returns: + str: The variation ID + """ + headers = {'Content-Type': 'application/json'} + try: + response = self.http_client.post(url, data=json.dumps(request_body), headers=headers, timeout=timeout) + except requests.exceptions.RequestException as e: + error_message = Errors.CMAB_FETCH_FAILED.format(str(e)) + self.logger.error(error_message) + raise CmabFetchError(error_message) + + if not 200 <= response.status_code < 300: + error_message = Errors.CMAB_FETCH_FAILED.format(str(response.status_code)) + self.logger.error(error_message) + raise CmabFetchError(error_message) + + try: + body = response.json() + except json.JSONDecodeError: + error_message = Errors.INVALID_CMAB_FETCH_RESPONSE + self.logger.error(error_message) + raise CmabInvalidResponseError(error_message) + + if not self.validate_response(body): + error_message = Errors.INVALID_CMAB_FETCH_RESPONSE + self.logger.error(error_message) + raise CmabInvalidResponseError(error_message) + + return str(body['predictions'][0]['variation_id']) + + def validate_response(self, body: Dict[str, Any]) -> bool: + """Validate the response structure from the CMAB service. + + Args: + body (Dict[str, Any]): The response body to validate. + + Returns: + bool: True if the response is valid, False otherwise. + """ + return ( + isinstance(body, dict) and + 'predictions' in body and + isinstance(body['predictions'], list) and + len(body['predictions']) > 0 and + isinstance(body['predictions'][0], dict) and + "variation_id" in body["predictions"][0] + ) + + def _do_fetch_with_retry( + self, + url: str, + request_body: Dict[str, Any], + retry_config: CmabRetryConfig, + timeout: float + ) -> str: + """Perform a fetch request with retry logic. + + Args: + url (str): The endpoint URL. + request_body (Dict[str, Any]): The request payload. + retry_config (CmabRetryConfig): Configuration for retry logic. + timeout (float): Maximum wait time for request to respond in seconds. + Returns: + str: The variation ID + """ + backoff = retry_config.initial_backoff + for attempt in range(retry_config.max_retries + 1): + try: + variation_id = self._do_fetch(url, request_body, timeout) + return variation_id + except: + if attempt < retry_config.max_retries: + self.logger.info(f"Retrying CMAB request (attempt: {attempt + 1}) after {backoff} seconds...") + time.sleep(backoff) + backoff = min(backoff * math.pow(retry_config.backoff_multiplier, attempt + 1), + retry_config.max_backoff) + + error_message = Errors.CMAB_FETCH_FAILED.format('Exhausted all retries for CMAB request.') + self.logger.error(error_message) + raise CmabFetchError(error_message) diff --git a/optimizely/cmab/cmab_service.py b/optimizely/cmab/cmab_service.py new file mode 100644 index 00000000..418280b8 --- /dev/null +++ b/optimizely/cmab/cmab_service.py @@ -0,0 +1,106 @@ +# Copyright 2025 Optimizely +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import uuid +import json +import hashlib + +from typing import Optional, List, TypedDict +from optimizely.cmab.cmab_client import DefaultCmabClient +from optimizely.odp.lru_cache import LRUCache +from optimizely.optimizely_user_context import OptimizelyUserContext, UserAttributes +from optimizely.project_config import ProjectConfig +from optimizely.decision.optimizely_decide_option import OptimizelyDecideOption +from optimizely import logger as _logging + + +class CmabDecision(TypedDict): + variation_id: str + cmab_uuid: str + + +class CmabCacheValue(TypedDict): + attributes_hash: str + variation_id: str + cmab_uuid: str + + +class DefaultCmabService: + def __init__(self, cmab_cache: LRUCache[str, CmabCacheValue], + cmab_client: DefaultCmabClient, logger: Optional[_logging.Logger] = None): + self.cmab_cache = cmab_cache + self.cmab_client = cmab_client + self.logger = logger + + def get_decision(self, project_config: ProjectConfig, user_context: OptimizelyUserContext, + rule_id: str, options: List[str]) -> CmabDecision: + + filtered_attributes = self._filter_attributes(project_config, user_context, rule_id) + + if OptimizelyDecideOption.IGNORE_CMAB_CACHE in options: + return self._fetch_decision(rule_id, user_context.user_id, filtered_attributes) + + if OptimizelyDecideOption.RESET_CMAB_CACHE in options: + self.cmab_cache.reset() + + cache_key = self._get_cache_key(user_context.user_id, rule_id) + + if OptimizelyDecideOption.INVALIDATE_USER_CMAB_CACHE in options: + self.cmab_cache.remove(cache_key) + + cached_value = self.cmab_cache.lookup(cache_key) + + attributes_hash = self._hash_attributes(filtered_attributes) + + if cached_value: + if cached_value['attributes_hash'] == attributes_hash: + return CmabDecision(variation_id=cached_value['variation_id'], cmab_uuid=cached_value['cmab_uuid']) + else: + self.cmab_cache.remove(cache_key) + + cmab_decision = self._fetch_decision(rule_id, user_context.user_id, filtered_attributes) + self.cmab_cache.save(cache_key, { + 'attributes_hash': attributes_hash, + 'variation_id': cmab_decision['variation_id'], + 'cmab_uuid': cmab_decision['cmab_uuid'], + }) + return cmab_decision + + def _fetch_decision(self, rule_id: str, user_id: str, attributes: UserAttributes) -> CmabDecision: + cmab_uuid = str(uuid.uuid4()) + variation_id = self.cmab_client.fetch_decision(rule_id, user_id, attributes, cmab_uuid) + cmab_decision = CmabDecision(variation_id=variation_id, cmab_uuid=cmab_uuid) + return cmab_decision + + def _filter_attributes(self, project_config: ProjectConfig, + user_context: OptimizelyUserContext, rule_id: str) -> UserAttributes: + user_attributes = user_context.get_user_attributes() + filtered_user_attributes = UserAttributes({}) + + experiment = project_config.experiment_id_map.get(rule_id) + if not experiment or not experiment.cmab: + return filtered_user_attributes + + cmab_attribute_ids = experiment.cmab['attributeIds'] + for attribute_id in cmab_attribute_ids: + attribute = project_config.attribute_id_map.get(attribute_id) + if attribute and attribute.key in user_attributes: + filtered_user_attributes[attribute.key] = user_attributes[attribute.key] + + return filtered_user_attributes + + def _get_cache_key(self, user_id: str, rule_id: str) -> str: + return f"{len(user_id)}-{user_id}-{rule_id}" + + def _hash_attributes(self, attributes: UserAttributes) -> str: + sorted_attrs = json.dumps(attributes, sort_keys=True) + return hashlib.md5(sorted_attrs.encode()).hexdigest() diff --git a/optimizely/config_manager.py b/optimizely/config_manager.py index b0f959bf..3dce2741 100644 --- a/optimizely/config_manager.py +++ b/optimizely/config_manager.py @@ -1,4 +1,4 @@ -# Copyright 2019-2020, Optimizely +# Copyright 2019-2020, 2022-2023, Optimizely # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -11,30 +11,42 @@ # See the License for the specific language governing permissions and # limitations under the License. -import abc +from __future__ import annotations +from abc import ABC, abstractmethod import numbers +from typing import TYPE_CHECKING, Any, Optional import requests import threading -import time from requests import codes as http_status_codes from requests import exceptions as requests_exceptions +from requests.adapters import HTTPAdapter +from urllib3.util.retry import Retry from . import exceptions as optimizely_exceptions from . import logger as optimizely_logger from . import project_config -from .error_handler import NoOpErrorHandler +from .error_handler import NoOpErrorHandler, BaseErrorHandler from .notification_center import NotificationCenter +from .notification_center_registry import _NotificationCenterRegistry from .helpers import enums from .helpers import validator -from .optimizely_config import OptimizelyConfigService +from .optimizely_config import OptimizelyConfig, OptimizelyConfigService -ABC = abc.ABCMeta('ABC', (object,), {'__slots__': ()}) + +if TYPE_CHECKING: + # prevent circular dependenacy by skipping import at runtime + from requests.models import CaseInsensitiveDict class BaseConfigManager(ABC): """ Base class for Optimizely's config manager. """ - def __init__(self, logger=None, error_handler=None, notification_center=None): + def __init__( + self, + logger: Optional[optimizely_logger.Logger] = None, + error_handler: Optional[BaseErrorHandler] = None, + notification_center: Optional[NotificationCenter] = None + ): """ Initialize config manager. Args: @@ -45,9 +57,10 @@ def __init__(self, logger=None, error_handler=None, notification_center=None): self.logger = optimizely_logger.adapt_logger(logger or optimizely_logger.NoOpLogger()) self.error_handler = error_handler or NoOpErrorHandler() self.notification_center = notification_center or NotificationCenter(self.logger) + self.optimizely_config: Optional[OptimizelyConfig] self._validate_instantiation_options() - def _validate_instantiation_options(self): + def _validate_instantiation_options(self) -> None: """ Helper method to validate all parameters. Raises: @@ -62,18 +75,30 @@ def _validate_instantiation_options(self): if not validator.is_notification_center_valid(self.notification_center): raise optimizely_exceptions.InvalidInputException(enums.Errors.INVALID_INPUT.format('notification_center')) - @abc.abstractmethod - def get_config(self): + @abstractmethod + def get_config(self) -> Optional[project_config.ProjectConfig]: """ Get config for use by optimizely.Optimizely. The config should be an instance of project_config.ProjectConfig.""" pass + @abstractmethod + def get_sdk_key(self) -> Optional[str]: + """ Get sdk_key for use by optimizely.Optimizely. + The sdk_key should uniquely identify the datafile for a project and environment combination. + """ + pass + class StaticConfigManager(BaseConfigManager): """ Config manager that returns ProjectConfig based on provided datafile. """ def __init__( - self, datafile=None, logger=None, error_handler=None, notification_center=None, skip_json_validation=False, + self, + datafile: Optional[str] = None, + logger: Optional[optimizely_logger.Logger] = None, + error_handler: Optional[BaseErrorHandler] = None, + notification_center: Optional[NotificationCenter] = None, + skip_json_validation: Optional[bool] = False, ): """ Initialize config manager. Datafile has to be provided to use. @@ -86,15 +111,19 @@ def __init__( validation upon object invocation. By default JSON schema validation will be performed. """ - super(StaticConfigManager, self).__init__( + super().__init__( logger=logger, error_handler=error_handler, notification_center=notification_center, ) - self._config = None - self.optimizely_config = None + self._config: project_config.ProjectConfig = None # type: ignore[assignment] + self.optimizely_config: Optional[OptimizelyConfig] = None + self._sdk_key: Optional[str] = None self.validate_schema = not skip_json_validation self._set_config(datafile) - def _set_config(self, datafile): + def get_sdk_key(self) -> Optional[str]: + return self._sdk_key + + def _set_config(self, datafile: Optional[str | bytes]) -> None: """ Looks up and sets datafile and config based on response body. Args: @@ -107,10 +136,11 @@ def _set_config(self, datafile): return error_msg = None - error_to_handle = None + error_to_handle: Optional[Exception] = None config = None try: + assert datafile is not None config = project_config.ProjectConfig(datafile, self.logger, self.error_handler) except optimizely_exceptions.UnsupportedDatafileVersionException as error: error_msg = error.args[0] @@ -119,9 +149,9 @@ def _set_config(self, datafile): error_msg = enums.Errors.INVALID_INPUT.format('datafile') error_to_handle = optimizely_exceptions.InvalidInputException(error_msg) finally: - if error_msg: + if error_msg or config is None: self.logger.error(error_msg) - self.error_handler.handle_error(error_to_handle) + self.error_handler.handle_error(error_to_handle or Exception('Unknown Error')) return previous_revision = self._config.get_revision() if self._config else None @@ -130,14 +160,22 @@ def _set_config(self, datafile): return self._config = config - self.optimizely_config = OptimizelyConfigService(config).get_config() + self._sdk_key = self._sdk_key or config.sdk_key + self.optimizely_config = OptimizelyConfigService(config, self.logger).get_config() self.notification_center.send_notifications(enums.NotificationTypes.OPTIMIZELY_CONFIG_UPDATE) + + internal_notification_center = _NotificationCenterRegistry.get_notification_center( + self._sdk_key, self.logger + ) + if internal_notification_center: + internal_notification_center.send_notifications(enums.NotificationTypes.OPTIMIZELY_CONFIG_UPDATE) + self.logger.debug( 'Received new datafile and updated config. ' - 'Old revision number: {}. New revision number: {}.'.format(previous_revision, config.get_revision()) + f'Old revision number: {previous_revision}. New revision number: {config.get_revision()}.' ) - def get_config(self): + def get_config(self) -> Optional[project_config.ProjectConfig]: """ Returns instance of ProjectConfig. Returns: @@ -154,22 +192,24 @@ class PollingConfigManager(StaticConfigManager): def __init__( self, - sdk_key=None, - datafile=None, - update_interval=None, - blocking_timeout=None, - url=None, - url_template=None, - logger=None, - error_handler=None, - notification_center=None, - skip_json_validation=False, + sdk_key: Optional[str] = None, + datafile: Optional[str] = None, + update_interval: Optional[float] = None, + blocking_timeout: Optional[int] = None, + url: Optional[str] = None, + url_template: Optional[str] = None, + logger: Optional[optimizely_logger.Logger] = None, + error_handler: Optional[BaseErrorHandler] = None, + notification_center: Optional[NotificationCenter] = None, + skip_json_validation: Optional[bool] = False, + retries: Optional[int] = 3, ): - """ Initialize config manager. One of sdk_key or url has to be set to be able to use. + """ Initialize config manager. One of sdk_key or datafile has to be set to be able to use. Args: - sdk_key: Optional string uniquely identifying the datafile. - datafile: Optional JSON string representing the project. + sdk_key: Optional string uniquely identifying the datafile. If not provided, datafile must + contain a sdk_key. + datafile: Optional JSON string representing the project. If not provided, sdk_key is required. update_interval: Optional floating point number representing time interval in seconds at which to request datafile and set ProjectConfig. blocking_timeout: Optional Time in seconds to block the get_config call until config object @@ -185,26 +225,32 @@ def __init__( JSON schema validation will be performed. """ + self.retries = retries self._config_ready_event = threading.Event() - super(PollingConfigManager, self).__init__( + super().__init__( datafile=datafile, logger=logger, error_handler=error_handler, notification_center=notification_center, skip_json_validation=skip_json_validation, ) + self._sdk_key = sdk_key or self._sdk_key + + if self._sdk_key is None: + raise optimizely_exceptions.InvalidInputException(enums.Errors.MISSING_SDK_KEY) + self.datafile_url = self.get_datafile_url( - sdk_key, url, url_template or self.DATAFILE_URL_TEMPLATE + self._sdk_key, url, url_template or self.DATAFILE_URL_TEMPLATE ) self.set_update_interval(update_interval) self.set_blocking_timeout(blocking_timeout) - self.last_modified = None - self._polling_thread = threading.Thread(target=self._run) - self._polling_thread.setDaemon(True) + self.last_modified: Optional[str] = None + self.stopped = threading.Event() + self._initialize_thread() self._polling_thread.start() @staticmethod - def get_datafile_url(sdk_key, url, url_template): + def get_datafile_url(sdk_key: Optional[str], url: Optional[str], url_template: Optional[str]) -> str: """ Helper method to determine URL from where to fetch the datafile. Args: @@ -228,25 +274,26 @@ def get_datafile_url(sdk_key, url, url_template): # Return URL if one is provided or use template and SDK key to get it. if url is None: try: + assert url_template is not None return url_template.format(sdk_key=sdk_key) - except (AttributeError, KeyError): + except (AssertionError, AttributeError, KeyError): raise optimizely_exceptions.InvalidInputException( - 'Invalid url_template {} provided.'.format(url_template) + f'Invalid url_template {url_template} provided.' ) return url - def _set_config(self, datafile): + def _set_config(self, datafile: Optional[str | bytes]) -> None: """ Looks up and sets datafile and config based on response body. Args: datafile: JSON string representing the Optimizely project. """ if datafile or self._config_ready_event.is_set(): - super(PollingConfigManager, self)._set_config(datafile=datafile) + super()._set_config(datafile=datafile) self._config_ready_event.set() - def get_config(self): + def get_config(self) -> Optional[project_config.ProjectConfig]: """ Returns instance of ProjectConfig. Returns immediately if project config is ready otherwise blocks maximum for value of blocking_timeout in seconds. @@ -257,7 +304,7 @@ def get_config(self): self._config_ready_event.wait(self.blocking_timeout) return self._config - def set_update_interval(self, update_interval): + def set_update_interval(self, update_interval: Optional[int | float]) -> None: """ Helper method to set frequency at which datafile has to be polled and ProjectConfig updated. Args: @@ -265,25 +312,29 @@ def set_update_interval(self, update_interval): """ if update_interval is None: update_interval = enums.ConfigManager.DEFAULT_UPDATE_INTERVAL - self.logger.debug('Setting config update interval to default value {}.'.format(update_interval)) + self.logger.debug(f'Setting config update interval to default value {update_interval}.') if not isinstance(update_interval, (int, float)): raise optimizely_exceptions.InvalidInputException( - 'Invalid update_interval "{}" provided.'.format(update_interval) + f'Invalid update_interval "{update_interval}" provided.' ) # If polling interval is less than or equal to 0 then set it to default update interval. if update_interval <= 0: self.logger.debug( - 'update_interval value {} too small. Defaulting to {}'.format( - update_interval, enums.ConfigManager.DEFAULT_UPDATE_INTERVAL - ) + f'update_interval value {update_interval} too small. ' + f'Defaulting to {enums.ConfigManager.DEFAULT_UPDATE_INTERVAL}' ) update_interval = enums.ConfigManager.DEFAULT_UPDATE_INTERVAL + if update_interval < 30: + self.logger.warning( + 'Polling intervals below 30 seconds are not recommended.' + ) + self.update_interval = update_interval - def set_blocking_timeout(self, blocking_timeout): + def set_blocking_timeout(self, blocking_timeout: Optional[int | float]) -> None: """ Helper method to set time in seconds to block the config call until config has been initialized. Args: @@ -291,25 +342,24 @@ def set_blocking_timeout(self, blocking_timeout): """ if blocking_timeout is None: blocking_timeout = enums.ConfigManager.DEFAULT_BLOCKING_TIMEOUT - self.logger.debug('Setting config blocking timeout to default value {}.'.format(blocking_timeout)) + self.logger.debug(f'Setting config blocking timeout to default value {blocking_timeout}.') if not isinstance(blocking_timeout, (numbers.Integral, float)): raise optimizely_exceptions.InvalidInputException( - 'Invalid blocking timeout "{}" provided.'.format(blocking_timeout) + f'Invalid blocking timeout "{blocking_timeout}" provided.' ) # If blocking timeout is less than 0 then set it to default blocking timeout. if blocking_timeout < 0: self.logger.debug( - 'blocking timeout value {} too small. Defaulting to {}'.format( - blocking_timeout, enums.ConfigManager.DEFAULT_BLOCKING_TIMEOUT - ) + f'blocking timeout value {blocking_timeout} too small. ' + f'Defaulting to {enums.ConfigManager.DEFAULT_BLOCKING_TIMEOUT}' ) blocking_timeout = enums.ConfigManager.DEFAULT_BLOCKING_TIMEOUT self.blocking_timeout = blocking_timeout - def set_last_modified(self, response_headers): + def set_last_modified(self, response_headers: CaseInsensitiveDict[str]) -> None: """ Looks up and sets last modified time based on Last-Modified header in the response. Args: @@ -317,7 +367,7 @@ def set_last_modified(self, response_headers): """ self.last_modified = response_headers.get(enums.HTTPHeaders.LAST_MODIFIED) - def _handle_response(self, response): + def _handle_response(self, response: requests.Response) -> None: """ Helper method to handle response containing datafile. Args: @@ -326,18 +376,18 @@ def _handle_response(self, response): try: response.raise_for_status() except requests_exceptions.RequestException as err: - self.logger.error('Fetching datafile from {} failed. Error: {}'.format(self.datafile_url, str(err))) + self.logger.error(f'Fetching datafile from {self.datafile_url} failed. Error: {err}') return # Leave datafile and config unchanged if it has not been modified. if response.status_code == http_status_codes.not_modified: - self.logger.debug('Not updating config as datafile has not updated since {}.'.format(self.last_modified)) + self.logger.debug(f'Not updating config as datafile has not updated since {self.last_modified}.') return self.set_last_modified(response.headers) self._set_config(response.content) - def fetch_datafile(self): + def fetch_datafile(self) -> None: """ Fetch datafile and set ProjectConfig. """ request_headers = {} @@ -345,37 +395,58 @@ def fetch_datafile(self): request_headers[enums.HTTPHeaders.IF_MODIFIED_SINCE] = self.last_modified try: - response = requests.get( - self.datafile_url, headers=request_headers, timeout=enums.ConfigManager.REQUEST_TIMEOUT, - ) + session = requests.Session() + + retries = Retry(total=self.retries, + backoff_factor=0.1, + status_forcelist=[500, 502, 503, 504]) + adapter = HTTPAdapter(max_retries=retries) + + session.mount('http://', adapter) + session.mount("https://", adapter) + response = session.get(self.datafile_url, + headers=request_headers, + timeout=enums.ConfigManager.REQUEST_TIMEOUT) except requests_exceptions.RequestException as err: - self.logger.error('Fetching datafile from {} failed. Error: {}'.format(self.datafile_url, str(err))) + self.logger.error(f'Fetching datafile from {self.datafile_url} failed. Error: {err}') return self._handle_response(response) @property - def is_running(self): + def is_running(self) -> bool: """ Check if polling thread is alive or not. """ return self._polling_thread.is_alive() - def _run(self): + def stop(self) -> None: + """ Stop the polling thread and briefly wait for it to exit. """ + if self.is_running: + self.stopped.set() + # no need to wait too long as this exists to avoid interfering with tests + self._polling_thread.join(timeout=0.2) + + def _run(self) -> None: """ Triggered as part of the thread which fetches the datafile and sleeps until next update interval. """ try: - while self.is_running: + while True: self.fetch_datafile() - time.sleep(self.update_interval) - except (OSError, OverflowError) as err: + if self.stopped.wait(self.update_interval): + self.stopped.clear() + break + except Exception as err: self.logger.error( - 'Error in time.sleep. ' 'Provided update_interval value may be too big. Error: {}'.format(str(err)) + f'Thread for background datafile polling failed. Error: {err}' ) raise - def start(self): + def start(self) -> None: """ Start the config manager and the thread to periodically fetch datafile. """ if not self.is_running: self._polling_thread.start() + def _initialize_thread(self) -> None: + self._polling_thread = threading.Thread(target=self._run, name="PollThread", daemon=True) + class AuthDatafilePollingConfigManager(PollingConfigManager): """ Config manager that polls for authenticated datafile using access token. """ @@ -384,11 +455,11 @@ class AuthDatafilePollingConfigManager(PollingConfigManager): def __init__( self, - datafile_access_token, - *args, - **kwargs + datafile_access_token: str, + *args: Any, + **kwargs: Any ): - """ Initialize config manager. One of sdk_key or url has to be set to be able to use. + """ Initialize config manager. One of sdk_key or datafile has to be set to be able to use. Args: datafile_access_token: String to be attached to the request header to fetch the authenticated datafile. @@ -396,16 +467,16 @@ def __init__( **kwargs: Refer to keyword arguments descriptions in PollingConfigManager. """ self._set_datafile_access_token(datafile_access_token) - super(AuthDatafilePollingConfigManager, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) - def _set_datafile_access_token(self, datafile_access_token): + def _set_datafile_access_token(self, datafile_access_token: str) -> None: """ Checks for valid access token input and sets it. """ if not datafile_access_token: raise optimizely_exceptions.InvalidInputException( 'datafile_access_token cannot be empty or None.') self.datafile_access_token = datafile_access_token - def fetch_datafile(self): + def fetch_datafile(self) -> None: """ Fetch authenticated datafile and set ProjectConfig. """ request_headers = { enums.HTTPHeaders.AUTHORIZATION: enums.ConfigManager.AUTHORIZATION_HEADER_DATA_TEMPLATE.format( @@ -417,11 +488,20 @@ def fetch_datafile(self): request_headers[enums.HTTPHeaders.IF_MODIFIED_SINCE] = self.last_modified try: - response = requests.get( - self.datafile_url, headers=request_headers, timeout=enums.ConfigManager.REQUEST_TIMEOUT, - ) + session = requests.Session() + + retries = Retry(total=self.retries, + backoff_factor=0.1, + status_forcelist=[500, 502, 503, 504]) + adapter = HTTPAdapter(max_retries=retries) + + session.mount('http://', adapter) + session.mount("https://", adapter) + response = session.get(self.datafile_url, + headers=request_headers, + timeout=enums.ConfigManager.REQUEST_TIMEOUT) except requests_exceptions.RequestException as err: - self.logger.error('Fetching datafile from {} failed. Error: {}'.format(self.datafile_url, str(err))) + self.logger.error(f'Fetching datafile from {self.datafile_url} failed. Error: {err}') return self._handle_response(response) diff --git a/optimizely/decision/optimizely_decide_option.py b/optimizely/decision/optimizely_decide_option.py index 4eb8e7e5..8cffcfec 100644 --- a/optimizely/decision/optimizely_decide_option.py +++ b/optimizely/decision/optimizely_decide_option.py @@ -1,4 +1,4 @@ -# Copyright 2021, Optimizely +# Copyright 2021, 2022, Optimizely # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -11,10 +11,20 @@ # See the License for the specific language governing permissions and # limitations under the License. +from sys import version_info -class OptimizelyDecideOption(object): - DISABLE_DECISION_EVENT = 'DISABLE_DECISION_EVENT' - ENABLED_FLAGS_ONLY = 'ENABLED_FLAGS_ONLY' - IGNORE_USER_PROFILE_SERVICE = 'IGNORE_USER_PROFILE_SERVICE' - INCLUDE_REASONS = 'INCLUDE_REASONS' - EXCLUDE_VARIABLES = 'EXCLUDE_VARIABLES' +if version_info < (3, 8): + from typing_extensions import Final +else: + from typing import Final # type: ignore + + +class OptimizelyDecideOption: + DISABLE_DECISION_EVENT: Final = 'DISABLE_DECISION_EVENT' + ENABLED_FLAGS_ONLY: Final = 'ENABLED_FLAGS_ONLY' + IGNORE_USER_PROFILE_SERVICE: Final = 'IGNORE_USER_PROFILE_SERVICE' + INCLUDE_REASONS: Final = 'INCLUDE_REASONS' + EXCLUDE_VARIABLES: Final = 'EXCLUDE_VARIABLES' + IGNORE_CMAB_CACHE: Final = "IGNORE_CMAB_CACHE" + RESET_CMAB_CACHE: Final = "RESET_CMAB_CACHE" + INVALIDATE_USER_CMAB_CACHE: Final = "INVALIDATE_USER_CMAB_CACHE" diff --git a/optimizely/decision/optimizely_decision.py b/optimizely/decision/optimizely_decision.py index 781ab2bb..7ae3f136 100644 --- a/optimizely/decision/optimizely_decision.py +++ b/optimizely/decision/optimizely_decision.py @@ -1,4 +1,4 @@ -# Copyright 2021, Optimizely +# Copyright 2021, 2022, Optimizely # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -11,25 +11,40 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations +from typing import Optional, Any, TYPE_CHECKING -class OptimizelyDecision(object): - def __init__(self, variation_key=None, enabled=None, - variables=None, rule_key=None, flag_key=None, user_context=None, reasons=None): +if TYPE_CHECKING: + # prevent circular dependenacy by skipping import at runtime + from optimizely.optimizely_user_context import OptimizelyUserContext + + +class OptimizelyDecision: + def __init__( + self, + variation_key: Optional[str] = None, + enabled: bool = False, + variables: Optional[dict[str, Any]] = None, + rule_key: Optional[str] = None, + flag_key: Optional[str] = None, + user_context: Optional[OptimizelyUserContext] = None, + reasons: Optional[list[str]] = None + ): self.variation_key = variation_key - self.enabled = enabled or False + self.enabled = enabled self.variables = variables or {} self.rule_key = rule_key self.flag_key = flag_key self.user_context = user_context self.reasons = reasons or [] - def as_json(self): + def as_json(self) -> dict[str, Any]: return { 'variation_key': self.variation_key, 'enabled': self.enabled, 'variables': self.variables, 'rule_key': self.rule_key, 'flag_key': self.flag_key, - 'user_context': self.user_context.as_json(), + 'user_context': self.user_context.as_json() if self.user_context else None, 'reasons': self.reasons } diff --git a/optimizely/decision/optimizely_decision_message.py b/optimizely/decision/optimizely_decision_message.py index 5b1ab417..20231ea5 100644 --- a/optimizely/decision/optimizely_decision_message.py +++ b/optimizely/decision/optimizely_decision_message.py @@ -1,4 +1,4 @@ -# Copyright 2021, Optimizely +# Copyright 2021, 2022, Optimizely # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -11,8 +11,15 @@ # See the License for the specific language governing permissions and # limitations under the License. +from sys import version_info -class OptimizelyDecisionMessage(object): - SDK_NOT_READY = 'Optimizely SDK not configured properly yet.' - FLAG_KEY_INVALID = 'No flag was found for key "{}".' - VARIABLE_VALUE_INVALID = 'Variable value for key "{}" is invalid or wrong type.' +if version_info < (3, 8): + from typing_extensions import Final +else: + from typing import Final # type: ignore + + +class OptimizelyDecisionMessage: + SDK_NOT_READY: Final = 'Optimizely SDK not configured properly yet.' + FLAG_KEY_INVALID: Final = 'No flag was found for key "{}".' + VARIABLE_VALUE_INVALID: Final = 'Variable value for key "{}" is invalid or wrong type.' diff --git a/optimizely/decision_service.py b/optimizely/decision_service.py index 3aff4719..df85464e 100644 --- a/optimizely/decision_service.py +++ b/optimizely/decision_service.py @@ -11,26 +11,37 @@ # See the License for the specific language governing permissions and # limitations under the License. -from collections import namedtuple - -from six import string_types +from __future__ import annotations +from typing import TYPE_CHECKING, NamedTuple, Optional, Sequence from . import bucketer +from . import entities from .decision.optimizely_decide_option import OptimizelyDecideOption from .helpers import audience as audience_helper from .helpers import enums from .helpers import experiment as experiment_helper from .helpers import validator -from .optimizely_user_context import OptimizelyUserContext -from .user_profile import UserProfile +from .optimizely_user_context import OptimizelyUserContext, UserAttributes +from .user_profile import UserProfile, UserProfileService, UserProfileTracker + +if TYPE_CHECKING: + # prevent circular dependenacy by skipping import at runtime + from .project_config import ProjectConfig + from .logger import Logger + -Decision = namedtuple('Decision', 'experiment variation source') +class Decision(NamedTuple): + """Named tuple containing selected experiment, variation and source. + None if no experiment/variation was selected.""" + experiment: Optional[entities.Experiment] + variation: Optional[entities.Variation] + source: Optional[str] -class DecisionService(object): +class DecisionService: """ Class encapsulating all decision related capabilities. """ - def __init__(self, logger, user_profile_service): + def __init__(self, logger: Logger, user_profile_service: Optional[UserProfileService]): self.bucketer = bucketer.Bucketer() self.logger = logger self.user_profile_service = user_profile_service @@ -39,9 +50,9 @@ def __init__(self, logger, user_profile_service): # This contains all the forced variations set by the user # by calling set_forced_variation (it is not the same as the # whitelisting forcedVariations data structure). - self.forced_variation_map = {} + self.forced_variation_map: dict[str, dict[str, str]] = {} - def _get_bucketing_id(self, user_id, attributes): + def _get_bucketing_id(self, user_id: str, attributes: Optional[UserAttributes]) -> tuple[str, list[str]]: """ Helper method to determine bucketing ID for the user. Args: @@ -52,12 +63,12 @@ def _get_bucketing_id(self, user_id, attributes): String representing bucketing ID if it is a String type in attributes else return user ID array of log messages representing decision making. """ - decide_reasons = [] - attributes = attributes or {} + decide_reasons: list[str] = [] + attributes = attributes or UserAttributes({}) bucketing_id = attributes.get(enums.ControlAttributes.BUCKETING_ID) if bucketing_id is not None: - if isinstance(bucketing_id, string_types): + if isinstance(bucketing_id, str): return bucketing_id, decide_reasons message = 'Bucketing ID attribute is not a string. Defaulted to user_id.' self.logger.warning(message) @@ -65,7 +76,10 @@ def _get_bucketing_id(self, user_id, attributes): return user_id, decide_reasons - def set_forced_variation(self, project_config, experiment_key, user_id, variation_key): + def set_forced_variation( + self, project_config: ProjectConfig, experiment_key: str, + user_id: str, variation_key: Optional[str] + ) -> bool: """ Sets users to a map of experiments to forced variations. Args: @@ -85,20 +99,19 @@ def set_forced_variation(self, project_config, experiment_key, user_id, variatio experiment_id = experiment.id if variation_key is None: if user_id in self.forced_variation_map: - experiment_to_variation_map = self.forced_variation_map.get(user_id) + experiment_to_variation_map = self.forced_variation_map[user_id] if experiment_id in experiment_to_variation_map: del self.forced_variation_map[user_id][experiment_id] self.logger.debug( - 'Variation mapped to experiment "%s" has been removed for user "%s".' - % (experiment_key, user_id) + f'Variation mapped to experiment "{experiment_key}" has been removed for user "{user_id}".' ) else: self.logger.debug( - 'Nothing to remove. Variation mapped to experiment "%s" for user "%s" does not exist.' - % (experiment_key, user_id) + f'Nothing to remove. Variation mapped to experiment "{experiment_key}" for ' + f'user "{user_id}" does not exist.' ) else: - self.logger.debug('Nothing to remove. User "%s" does not exist in the forced variation map.' % user_id) + self.logger.debug(f'Nothing to remove. User "{user_id}" does not exist in the forced variation map.') return True if not validator.is_non_empty_string(variation_key): @@ -118,12 +131,14 @@ def set_forced_variation(self, project_config, experiment_key, user_id, variatio self.forced_variation_map[user_id][experiment_id] = variation_id self.logger.debug( - 'Set variation "%s" for experiment "%s" and user "%s" in the forced variation map.' - % (variation_id, experiment_id, user_id) + f'Set variation "{variation_id}" for experiment "{experiment_id}" and ' + f'user "{user_id}" in the forced variation map.' ) return True - def get_forced_variation(self, project_config, experiment_key, user_id): + def get_forced_variation( + self, project_config: ProjectConfig, experiment_key: str, user_id: str + ) -> tuple[Optional[entities.Variation], list[str]]: """ Gets the forced variation key for the given user and experiment. Args: @@ -135,9 +150,9 @@ def get_forced_variation(self, project_config, experiment_key, user_id): The variation which the given user and experiment should be forced into and array of log messages representing decision making. """ - decide_reasons = [] + decide_reasons: list[str] = [] if user_id not in self.forced_variation_map: - message = 'User "%s" is not in the forced variation map.' % user_id + message = f'User "{user_id}" is not in the forced variation map.' self.logger.debug(message) return None, decide_reasons @@ -149,28 +164,30 @@ def get_forced_variation(self, project_config, experiment_key, user_id): experiment_to_variation_map = self.forced_variation_map.get(user_id) if not experiment_to_variation_map: - message = 'No experiment "%s" mapped to user "%s" in the forced variation map.' % (experiment_key, user_id) - self.logger.debug( - message - ) + message = f'No experiment "{experiment_key}" mapped to user "{user_id}" in the forced variation map.' + self.logger.debug(message) return None, decide_reasons variation_id = experiment_to_variation_map.get(experiment.id) if variation_id is None: - message = 'No variation mapped to experiment "%s" in the forced variation map.' % experiment_key + message = f'No variation mapped to experiment "{experiment_key}" in the forced variation map.' self.logger.debug(message) return None, decide_reasons variation = project_config.get_variation_from_id(experiment_key, variation_id) - message = 'Variation "%s" is mapped to experiment "%s" and user "%s" in the forced variation map' \ - % (variation.key, experiment_key, user_id) - self.logger.debug( - message - ) + # this case is logged in get_variation_from_id + if variation is None: + return None, decide_reasons + + message = f'Variation "{variation.key}" is mapped to experiment "{experiment_key}" and ' \ + f'user "{user_id}" in the forced variation map' + self.logger.debug(message) decide_reasons.append(message) return variation, decide_reasons - def get_whitelisted_variation(self, project_config, experiment, user_id): + def get_whitelisted_variation( + self, project_config: ProjectConfig, experiment: entities.Experiment, user_id: str + ) -> tuple[Optional[entities.Variation], list[str]]: """ Determine if a user is forced into a variation (through whitelisting) for the given experiment and return that variation. @@ -187,11 +204,11 @@ def get_whitelisted_variation(self, project_config, experiment, user_id): forced_variations = experiment.forcedVariations if forced_variations and user_id in forced_variations: - forced_variation_key = forced_variations.get(user_id) + forced_variation_key = forced_variations[user_id] forced_variation = project_config.get_variation_from_key(experiment.key, forced_variation_key) if forced_variation: - message = 'User "%s" is forced in variation "%s".' % (user_id, forced_variation_key) + message = f'User "{user_id}" is forced in variation "{forced_variation_key}".' self.logger.info(message) decide_reasons.append(message) @@ -199,7 +216,9 @@ def get_whitelisted_variation(self, project_config, experiment, user_id): return None, decide_reasons - def get_stored_variation(self, project_config, experiment, user_profile): + def get_stored_variation( + self, project_config: ProjectConfig, experiment: entities.Experiment, user_profile: UserProfile + ) -> Optional[entities.Variation]: """ Determine if the user has a stored variation available for the given experiment and return that. Args: @@ -216,16 +235,22 @@ def get_stored_variation(self, project_config, experiment, user_profile): if variation_id: variation = project_config.get_variation_from_id(experiment.key, variation_id) if variation: - message = 'Found a stored decision. User "%s" is in variation "%s" of experiment "%s".' \ - % (user_id, variation.key, experiment.key) - self.logger.info( - message - ) + message = f'Found a stored decision. User "{user_id}" is in ' \ + f'variation "{variation.key}" of experiment "{experiment.key}".' + self.logger.info(message) return variation return None - def get_variation(self, project_config, experiment, user_context, options=None): + def get_variation( + self, + project_config: ProjectConfig, + experiment: entities.Experiment, + user_context: OptimizelyUserContext, + user_profile_tracker: Optional[UserProfileTracker], + reasons: list[str] = [], + options: Optional[Sequence[str]] = None + ) -> tuple[Optional[entities.Variation], list[str]]: """ Top-level function to help determine variation user should be put in. First, check if experiment is running. @@ -237,7 +262,9 @@ def get_variation(self, project_config, experiment, user_context, options=None): Args: project_config: Instance of ProjectConfig. experiment: Experiment for which user variation needs to be determined. - user_context: contains user id and attributes + user_context: contains user id and attributes. + user_profile_tracker: tracker for reading and updating user profile of the user. + reasons: Decision reasons. options: Decide options. Returns: @@ -245,7 +272,6 @@ def get_variation(self, project_config, experiment, user_context, options=None): And an array of log messages representing decision making. """ user_id = user_context.user_id - attributes = user_context.get_user_attributes() if options: ignore_user_profile = OptimizelyDecideOption.IGNORE_USER_PROFILE_SERVICE in options @@ -253,14 +279,17 @@ def get_variation(self, project_config, experiment, user_context, options=None): ignore_user_profile = False decide_reasons = [] + if reasons is not None: + decide_reasons += reasons # Check if experiment is running if not experiment_helper.is_experiment_running(experiment): - message = 'Experiment "%s" is not running.' % experiment.key + message = f'Experiment "{experiment.key}" is not running.' self.logger.info(message) decide_reasons.append(message) return None, decide_reasons # Check if the user is forced into a variation + variation: Optional[entities.Variation] variation, reasons_received = self.get_forced_variation(project_config, experiment.key, user_id) decide_reasons += reasons_received if variation: @@ -273,23 +302,14 @@ def get_variation(self, project_config, experiment, user_context, options=None): return variation, decide_reasons # Check to see if user has a decision available for the given experiment - user_profile = UserProfile(user_id) - if not ignore_user_profile and self.user_profile_service: - try: - retrieved_profile = self.user_profile_service.lookup(user_id) - except: - self.logger.exception('Unable to retrieve user profile for user "{}" as lookup failed.'.format(user_id)) - retrieved_profile = None - - if validator.is_user_profile_valid(retrieved_profile): - user_profile = UserProfile(**retrieved_profile) - variation = self.get_stored_variation(project_config, experiment, user_profile) - if variation: - message = 'Returning previously activated variation ID "{}" of experiment ' \ - '"{}" for user "{}" from user profile.'.format(variation, experiment, user_id) - self.logger.info(message) - decide_reasons.append(message) - return variation, decide_reasons + if user_profile_tracker is not None and not ignore_user_profile: + variation = self.get_stored_variation(project_config, experiment, user_profile_tracker.get_user_profile()) + if variation: + message = f'Returning previously activated variation ID "{variation}" of experiment ' \ + f'"{experiment}" for user "{user_id}" from user profile.' + self.logger.info(message) + decide_reasons.append(message) + return variation, decide_reasons else: self.logger.warning('User profile has invalid format.') @@ -299,41 +319,38 @@ def get_variation(self, project_config, experiment, user_context, options=None): project_config, audience_conditions, enums.ExperimentAudienceEvaluationLogs, experiment.key, - attributes, self.logger) + user_context, self.logger) decide_reasons += reasons_received if not user_meets_audience_conditions: - message = 'User "{}" does not meet conditions to be in experiment "{}".'.format(user_id, experiment.key) - self.logger.info( - message - ) + message = f'User "{user_id}" does not meet conditions to be in experiment "{experiment.key}".' + self.logger.info(message) decide_reasons.append(message) return None, decide_reasons # Determine bucketing ID to be used - bucketing_id, bucketing_id_reasons = self._get_bucketing_id(user_id, attributes) + bucketing_id, bucketing_id_reasons = self._get_bucketing_id(user_id, user_context.get_user_attributes()) decide_reasons += bucketing_id_reasons variation, bucket_reasons = self.bucketer.bucket(project_config, experiment, user_id, bucketing_id) decide_reasons += bucket_reasons - if variation: - message = 'User "%s" is in variation "%s" of experiment %s.' % (user_id, variation.key, experiment.key) - self.logger.info( - message - ) + if isinstance(variation, entities.Variation): + message = f'User "{user_id}" is in variation "{variation.key}" of experiment {experiment.key}.' + self.logger.info(message) decide_reasons.append(message) # Store this new decision and return the variation for the user - if not ignore_user_profile and self.user_profile_service: + if user_profile_tracker is not None and not ignore_user_profile: try: - user_profile.save_variation_for_experiment(experiment.id, variation.id) - self.user_profile_service.save(user_profile.__dict__) + user_profile_tracker.update_user_profile(experiment, variation) except: - self.logger.exception('Unable to save user profile for user "{}".'.format(user_id)) + self.logger.exception(f'Unable to save user profile for user "{user_id}".') return variation, decide_reasons - message = 'User "%s" is in no variation.' % user_id + message = f'User "{user_id}" is in no variation.' self.logger.info(message) decide_reasons.append(message) return None, decide_reasons - def get_variation_for_rollout(self, project_config, feature, user): + def get_variation_for_rollout( + self, project_config: ProjectConfig, feature: entities.FeatureFlag, user_context: OptimizelyUserContext + ) -> tuple[Decision, list[str]]: """ Determine which experiment/variation the user is in for a given rollout. Returns the variation of the first experiment the user qualifies for. @@ -348,9 +365,9 @@ def get_variation_for_rollout(self, project_config, feature, user): Decision namedtuple consisting of experiment and variation for the user and array of log messages representing decision making. """ - decide_reasons = [] - user_id = user.user_id - attributes = user.get_user_attributes() + decide_reasons: list[str] = [] + user_id = user_context.user_id + attributes = user_context.get_user_attributes() if not feature or not feature.rolloutId: return Decision(None, None, enums.DecisionSources.ROLLOUT), decide_reasons @@ -358,7 +375,7 @@ def get_variation_for_rollout(self, project_config, feature, user): rollout = project_config.get_rollout_from_id(feature.rolloutId) if not rollout: - message = 'There is no rollout of feature {}.'.format(feature.key) + message = f'There is no rollout of feature {feature.key}.' self.logger.debug(message) decide_reasons.append(message) return Decision(None, None, enums.DecisionSources.ROLLOUT), decide_reasons @@ -366,7 +383,7 @@ def get_variation_for_rollout(self, project_config, feature, user): rollout_rules = project_config.get_rollout_experiments(rollout) if not rollout_rules: - message = 'Rollout {} has no experiments.'.format(rollout.id) + message = f'Rollout {rollout.id} has no experiments.' self.logger.debug(message) decide_reasons.append(message) return Decision(None, None, enums.DecisionSources.ROLLOUT), decide_reasons @@ -379,7 +396,7 @@ def get_variation_for_rollout(self, project_config, feature, user): rule = rollout_rules[index] optimizely_decision_context = OptimizelyUserContext.OptimizelyDecisionContext(feature.key, rule.key) forced_decision_variation, reasons_received = self.validated_forced_decision( - project_config, optimizely_decision_context, user) + project_config, optimizely_decision_context, user_context) decide_reasons += reasons_received if forced_decision_variation: @@ -393,16 +410,19 @@ def get_variation_for_rollout(self, project_config, feature, user): logging_key = "Everyone Else" if everyone_else else str(index + 1) rollout_rule = project_config.get_experiment_from_id(rule.id) + # error is logged in get_experiment_from_id + if rollout_rule is None: + continue audience_conditions = rollout_rule.get_audience_conditions_or_ids() audience_decision_response, reasons_received_audience = audience_helper.does_user_meet_audience_conditions( project_config, audience_conditions, enums.RolloutRuleAudienceEvaluationLogs, - logging_key, attributes, self.logger) + logging_key, user_context, self.logger) decide_reasons += reasons_received_audience if audience_decision_response: - message = 'User "{}" meets audience conditions for targeting rule {}.'.format(user_id, logging_key) + message = f'User "{user_id}" meets audience conditions for targeting rule {logging_key}.' self.logger.debug(message) decide_reasons.append(message) @@ -411,7 +431,7 @@ def get_variation_for_rollout(self, project_config, feature, user): decide_reasons.extend(bucket_reasons) if bucketed_variation: - message = 'User "{}" bucketed into a targeting rule {}.'.format(user_id, logging_key) + message = f'User "{user_id}" bucketed into a targeting rule {logging_key}.' self.logger.debug(message) decide_reasons.append(message) return Decision(experiment=rule, variation=bucketed_variation, @@ -419,8 +439,8 @@ def get_variation_for_rollout(self, project_config, feature, user): elif not everyone_else: # skip this logging for EveryoneElse since this has a message not for everyone_else - message = 'User "{}" not bucketed into a targeting rule {}. ' \ - 'Checking "Everyone Else" rule now.'.format(user_id, logging_key) + message = f'User "{user_id}" not bucketed into a targeting rule {logging_key}. ' \ + 'Checking "Everyone Else" rule now.' self.logger.debug(message) decide_reasons.append(message) @@ -428,8 +448,7 @@ def get_variation_for_rollout(self, project_config, feature, user): skip_to_everyone_else = True else: - message = 'User "{}" does not meet audience conditions for targeting rule {}.'.format( - user_id, logging_key) + message = f'User "{user_id}" does not meet audience conditions for targeting rule {logging_key}.' self.logger.debug(message) decide_reasons.append(message) @@ -438,59 +457,32 @@ def get_variation_for_rollout(self, project_config, feature, user): return Decision(None, None, enums.DecisionSources.ROLLOUT), decide_reasons - def get_variation_for_feature(self, project_config, feature, user_context, options=None): + def get_variation_for_feature( + self, + project_config: ProjectConfig, + feature: entities.FeatureFlag, + user_context: OptimizelyUserContext, + options: Optional[list[str]] = None + ) -> tuple[Decision, list[str]]: """ Returns the experiment/variation the user is bucketed in for the given feature. Args: project_config: Instance of ProjectConfig. feature: Feature for which we are determining if it is enabled or not for the given user. - user: user context for user. - attributes: Dict representing user attributes. + user_context: user context for user. options: Decide options. Returns: Decision namedtuple consisting of experiment and variation for the user. """ - decide_reasons = [] - - # Check if the feature flag is under an experiment and the the user is bucketed into one of these experiments - if feature.experimentIds: - # Evaluate each experiment ID and return the first bucketed experiment variation - for experiment in feature.experimentIds: - experiment = project_config.get_experiment_from_id(experiment) - decision_variation = None - - if experiment: - optimizely_decision_context = OptimizelyUserContext.OptimizelyDecisionContext(feature.key, - experiment.key) - - forced_decision_variation, reasons_received = self.validated_forced_decision( - project_config, optimizely_decision_context, user_context) - decide_reasons += reasons_received - - if forced_decision_variation: - decision_variation = forced_decision_variation - else: - decision_variation, variation_reasons = self.get_variation(project_config, - experiment, user_context, options) - decide_reasons += variation_reasons - - if decision_variation: - message = 'User "{}" bucketed into a experiment "{}" of feature "{}".'.format( - user_context.user_id, experiment.key, feature.key) - self.logger.debug(message) - return Decision(experiment, decision_variation, - enums.DecisionSources.FEATURE_TEST), decide_reasons - - message = 'User "{}" is not bucketed into any of the experiments on the feature "{}".'.format( - user_context.user_id, feature.key) - self.logger.debug(message) - variation, rollout_variation_reasons = self.get_variation_for_rollout(project_config, feature, user_context) - if rollout_variation_reasons: - decide_reasons += rollout_variation_reasons - return variation, decide_reasons - - def validated_forced_decision(self, project_config, decision_context, user_context): + return self.get_variations_for_feature_list(project_config, [feature], user_context, options)[0] + + def validated_forced_decision( + self, + project_config: ProjectConfig, + decision_context: OptimizelyUserContext.OptimizelyDecisionContext, + user_context: OptimizelyUserContext + ) -> tuple[Optional[entities.Variation], list[str]]: """ Gets forced decisions based on flag key, rule key and variation. @@ -502,7 +494,7 @@ def validated_forced_decision(self, project_config, decision_context, user_conte Returns: Variation of the forced decision. """ - reasons = [] + reasons: list[str] = [] forced_decision = user_context.get_forced_decision(decision_context) @@ -547,3 +539,91 @@ def validated_forced_decision(self, project_config, decision_context, user_conte user_context.logger.info(user_has_forced_decision_but_invalid) return None, reasons + + def get_variations_for_feature_list( + self, + project_config: ProjectConfig, + features: list[entities.FeatureFlag], + user_context: OptimizelyUserContext, + options: Optional[Sequence[str]] = None + ) -> list[tuple[Decision, list[str]]]: + """ + Returns the list of experiment/variation the user is bucketed in for the given list of features. + Args: + project_config: Instance of ProjectConfig. + features: List of features for which we are determining if it is enabled or not for the given user. + user_context: user context for user. + options: Decide options. + + Returns: + List of Decision namedtuple consisting of experiment and variation for the user. + """ + decide_reasons: list[str] = [] + + if options: + ignore_ups = OptimizelyDecideOption.IGNORE_USER_PROFILE_SERVICE in options + else: + ignore_ups = False + + user_profile_tracker: Optional[UserProfileTracker] = None + if self.user_profile_service is not None and not ignore_ups: + user_profile_tracker = UserProfileTracker(user_context.user_id, self.user_profile_service, self.logger) + user_profile_tracker.load_user_profile(decide_reasons, None) + + decisions = [] + + for feature in features: + feature_reasons = decide_reasons.copy() + experiment_decision_found = False # Track if an experiment decision was made for the feature + + # Check if the feature flag is under an experiment + if feature.experimentIds: + for experiment_id in feature.experimentIds: + experiment = project_config.get_experiment_from_id(experiment_id) + decision_variation = None + + if experiment: + optimizely_decision_context = OptimizelyUserContext.OptimizelyDecisionContext( + feature.key, experiment.key) + forced_decision_variation, reasons_received = self.validated_forced_decision( + project_config, optimizely_decision_context, user_context) + feature_reasons.extend(reasons_received) + + if forced_decision_variation: + decision_variation = forced_decision_variation + else: + decision_variation, variation_reasons = self.get_variation( + project_config, experiment, user_context, user_profile_tracker, feature_reasons, options + ) + feature_reasons.extend(variation_reasons) + + if decision_variation: + self.logger.debug( + f'User "{user_context.user_id}" ' + f'bucketed into experiment "{experiment.key}" of feature "{feature.key}".' + ) + decision = Decision(experiment, decision_variation, enums.DecisionSources.FEATURE_TEST) + decisions.append((decision, feature_reasons)) + experiment_decision_found = True # Mark that a decision was found + break # Stop after the first successful experiment decision + + # Only process rollout if no experiment decision was found + if not experiment_decision_found: + rollout_decision, rollout_reasons = self.get_variation_for_rollout(project_config, + feature, + user_context) + if rollout_reasons: + feature_reasons.extend(rollout_reasons) + if rollout_decision: + self.logger.debug(f'User "{user_context.user_id}" ' + f'bucketed into rollout for feature "{feature.key}".') + else: + self.logger.debug(f'User "{user_context.user_id}" ' + f'not bucketed into any rollout for feature "{feature.key}".') + + decisions.append((rollout_decision, feature_reasons)) + + if self.user_profile_service is not None and user_profile_tracker is not None and ignore_ups is False: + user_profile_tracker.save_user_profile() + + return decisions diff --git a/optimizely/entities.py b/optimizely/entities.py index 15576568..7d257656 100644 --- a/optimizely/entities.py +++ b/optimizely/entities.py @@ -1,4 +1,4 @@ -# Copyright 2016-2021, Optimizely +# Copyright 2016-2022, Optimizely # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -10,30 +10,61 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations +from typing import TYPE_CHECKING, Any, Optional, Sequence +from sys import version_info +if version_info < (3, 8): + from typing_extensions import Final +else: + from typing import Final # type: ignore -class BaseEntity(object): - def __eq__(self, other): + +if TYPE_CHECKING: + # prevent circular dependenacy by skipping import at runtime + from .helpers.types import ExperimentDict, TrafficAllocation, VariableDict, VariationDict, CmabDict + + +class BaseEntity: + def __eq__(self, other: object) -> bool: return self.__dict__ == other.__dict__ class Attribute(BaseEntity): - def __init__(self, id, key, **kwargs): + def __init__(self, id: str, key: str, **kwargs: Any): self.id = id self.key = key class Audience(BaseEntity): - def __init__(self, id, name, conditions, conditionStructure=None, conditionList=None, **kwargs): + def __init__( + self, + id: str, + name: str, + conditions: str, + conditionStructure: Optional[list[str | list[str]]] = None, + conditionList: Optional[list[str | list[str]]] = None, + **kwargs: Any + ): self.id = id self.name = name self.conditions = conditions self.conditionStructure = conditionStructure self.conditionList = conditionList + def get_segments(self) -> list[str]: + """ Extract all audience segments used in the this audience's conditions. + + Returns: + List of segment names. + """ + if not self.conditionList: + return [] + return list({c[1] for c in self.conditionList if c[3] == 'qualified'}) + class Event(BaseEntity): - def __init__(self, id, key, experimentIds, **kwargs): + def __init__(self, id: str, key: str, experimentIds: list[str], **kwargs: Any): self.id = id self.key = key self.experimentIds = experimentIds @@ -42,18 +73,19 @@ def __init__(self, id, key, experimentIds, **kwargs): class Experiment(BaseEntity): def __init__( self, - id, - key, - status, - audienceIds, - variations, - forcedVariations, - trafficAllocation, - layerId, - audienceConditions=None, - groupId=None, - groupPolicy=None, - **kwargs + id: str, + key: str, + status: str, + audienceIds: list[str], + variations: list[VariationDict], + forcedVariations: dict[str, str], + trafficAllocation: list[TrafficAllocation], + layerId: str, + audienceConditions: Optional[Sequence[str | list[str]]] = None, + groupId: Optional[str] = None, + groupPolicy: Optional[str] = None, + cmab: Optional[CmabDict] = None, + **kwargs: Any ): self.id = id self.key = key @@ -66,16 +98,17 @@ def __init__( self.layerId = layerId self.groupId = groupId self.groupPolicy = groupPolicy + self.cmab = cmab - def get_audience_conditions_or_ids(self): + def get_audience_conditions_or_ids(self) -> Sequence[str | list[str]]: """ Returns audienceConditions if present, otherwise audienceIds. """ return self.audienceConditions if self.audienceConditions is not None else self.audienceIds - def __str__(self): + def __str__(self) -> str: return self.key @staticmethod - def get_default(): + def get_default() -> Experiment: """ returns an empty experiment object. """ experiment = Experiment( id='', @@ -93,17 +126,23 @@ def get_default(): class FeatureFlag(BaseEntity): - def __init__(self, id, key, experimentIds, rolloutId, variables, groupId=None, **kwargs): + def __init__( + self, id: str, key: str, experimentIds: list[str], rolloutId: str, + variables: list[VariableDict], groupId: Optional[str] = None, **kwargs: Any + ): self.id = id self.key = key self.experimentIds = experimentIds self.rolloutId = rolloutId - self.variables = variables + self.variables: dict[str, Variable] = variables # type: ignore[assignment] self.groupId = groupId class Group(BaseEntity): - def __init__(self, id, policy, experiments, trafficAllocation, **kwargs): + def __init__( + self, id: str, policy: str, experiments: list[Experiment], + trafficAllocation: list[TrafficAllocation], **kwargs: Any + ): self.id = id self.policy = policy self.experiments = experiments @@ -112,20 +151,20 @@ def __init__(self, id, policy, experiments, trafficAllocation, **kwargs): class Layer(BaseEntity): """Layer acts as rollout.""" - def __init__(self, id, experiments, **kwargs): + def __init__(self, id: str, experiments: list[ExperimentDict], **kwargs: Any): self.id = id self.experiments = experiments class Variable(BaseEntity): - class Type(object): - BOOLEAN = 'boolean' - DOUBLE = 'double' - INTEGER = 'integer' - JSON = 'json' - STRING = 'string' - - def __init__(self, id, key, type, defaultValue, **kwargs): + class Type: + BOOLEAN: Final = 'boolean' + DOUBLE: Final = 'double' + INTEGER: Final = 'integer' + JSON: Final = 'json' + STRING: Final = 'string' + + def __init__(self, id: str, key: str, type: str, defaultValue: Any, **kwargs: Any): self.id = id self.key = key self.type = type @@ -134,15 +173,24 @@ def __init__(self, id, key, type, defaultValue, **kwargs): class Variation(BaseEntity): class VariableUsage(BaseEntity): - def __init__(self, id, value, **kwards): + def __init__(self, id: str, value: str, **kwargs: Any): self.id = id self.value = value - def __init__(self, id, key, featureEnabled=False, variables=None, **kwargs): + def __init__( + self, id: str, key: str, featureEnabled: bool = False, variables: Optional[list[Variable]] = None, **kwargs: Any + ): self.id = id self.key = key self.featureEnabled = featureEnabled self.variables = variables or [] - def __str__(self): + def __str__(self) -> str: return self.key + + +class Integration(BaseEntity): + def __init__(self, key: str, host: Optional[str] = None, publicKey: Optional[str] = None, **kwargs: Any): + self.key = key + self.host = host + self.publicKey = publicKey diff --git a/optimizely/error_handler.py b/optimizely/error_handler.py index ed88625e..69411fb0 100644 --- a/optimizely/error_handler.py +++ b/optimizely/error_handler.py @@ -1,4 +1,4 @@ -# Copyright 2016, Optimizely +# Copyright 2016, 2022, Optimizely # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -12,12 +12,12 @@ # limitations under the License. -class BaseErrorHandler(object): +class BaseErrorHandler: """ Class encapsulating exception handling functionality. Override with your own exception handler providing handle_error method. """ @staticmethod - def handle_error(*args): + def handle_error(error: Exception) -> None: pass @@ -29,5 +29,5 @@ class RaiseExceptionErrorHandler(BaseErrorHandler): """ Class providing handle_error method which raises provided exception. """ @staticmethod - def handle_error(error): + def handle_error(error: Exception) -> None: raise error diff --git a/optimizely/event/event_factory.py b/optimizely/event/event_factory.py index 54155358..8a4bb0cf 100644 --- a/optimizely/event/event_factory.py +++ b/optimizely/event/event_factory.py @@ -1,4 +1,4 @@ -# Copyright 2019 Optimizely +# Copyright 2019, 2022, Optimizely # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -11,6 +11,10 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations +from typing import TYPE_CHECKING, Optional, Sequence, cast, List +from sys import version_info +from optimizely import entities from optimizely.helpers import enums from optimizely.helpers import event_tag_utils from optimizely.helpers import validator @@ -18,22 +22,37 @@ from . import payload from . import user_event -CUSTOM_ATTRIBUTE_FEATURE_TYPE = 'custom' +if version_info < (3, 8): + from typing_extensions import Final +else: + from typing import Final # type: ignore +if TYPE_CHECKING: + # prevent circular dependenacy by skipping import at runtime + from optimizely.project_config import ProjectConfig + from optimizely.optimizely_user_context import UserAttributes + from optimizely.logger import Logger -class EventFactory(object): +CUSTOM_ATTRIBUTE_FEATURE_TYPE: Final = 'custom' + + +class EventFactory: """ EventFactory builds LogEvent object from a given UserEvent. This class serves to separate concerns between events in the SDK and the API used to record the events via the Optimizely Events API ("https://developers.optimizely.com/x/events/api/index.html") """ - EVENT_ENDPOINT = 'https://logx.optimizely.com/v1/events' - HTTP_VERB = 'POST' - HTTP_HEADERS = {'Content-Type': 'application/json'} - ACTIVATE_EVENT_KEY = 'campaign_activated' + EVENT_ENDPOINT: Final = 'https://logx.optimizely.com/v1/events' + HTTP_VERB: Final = 'POST' + HTTP_HEADERS: Final = {'Content-Type': 'application/json'} + ACTIVATE_EVENT_KEY: Final = 'campaign_activated' @classmethod - def create_log_event(cls, user_events, logger): + def create_log_event( + cls, + user_events: Sequence[Optional[user_event.UserEvent]] | Optional[user_event.UserEvent], + logger: Logger + ) -> Optional[log_event.LogEvent]: """ Create LogEvent instance. Args: @@ -45,7 +64,7 @@ def create_log_event(cls, user_events, logger): """ if not isinstance(user_events, list): - user_events = [user_events] + user_events = cast(List[Optional[user_event.UserEvent]], [user_events]) visitors = [] @@ -58,7 +77,12 @@ def create_log_event(cls, user_events, logger): if len(visitors) == 0: return None - user_context = user_events[0].event_context + first_event = user_events[0] + + if not first_event: + return None + + user_context = first_event.event_context event_batch = payload.EventBatch( user_context.account_id, user_context.project_id, @@ -76,7 +100,7 @@ def create_log_event(cls, user_events, logger): return log_event.LogEvent(cls.EVENT_ENDPOINT, event_params, cls.HTTP_VERB, cls.HTTP_HEADERS) @classmethod - def _create_visitor(cls, event, logger): + def _create_visitor(cls, event: Optional[user_event.UserEvent], logger: Logger) -> Optional[payload.Visitor]: """ Helper method to create Visitor instance for event_batch. Args: @@ -91,7 +115,7 @@ def _create_visitor(cls, event, logger): if isinstance(event, user_event.ImpressionEvent): experiment_layerId, experiment_id, variation_id, variation_key = '', '', '', '' - if event.variation: + if isinstance(event.variation, entities.Variation): variation_id = event.variation.id variation_key = event.variation.key @@ -111,7 +135,7 @@ def _create_visitor(cls, event, logger): return visitor - elif isinstance(event, user_event.ConversionEvent): + elif isinstance(event, user_event.ConversionEvent) and event.event: revenue = event_tag_utils.get_revenue_value(event.event_tags) value = event_tag_utils.get_numeric_value(event.event_tags, logger) @@ -130,7 +154,9 @@ def _create_visitor(cls, event, logger): return None @staticmethod - def build_attribute_list(attributes, project_config): + def build_attribute_list( + attributes: Optional[UserAttributes], project_config: ProjectConfig + ) -> list[payload.VisitorAttribute]: """ Create Vistor Attribute List. Args: @@ -141,7 +167,7 @@ def build_attribute_list(attributes, project_config): List consisting of valid attributes for the user. Empty otherwise. """ - attributes_list = [] + attributes_list: list[payload.VisitorAttribute] = [] if project_config is None: return attributes_list diff --git a/optimizely/event/event_processor.py b/optimizely/event/event_processor.py index f6dfa312..05f5e078 100644 --- a/optimizely/event/event_processor.py +++ b/optimizely/event/event_processor.py @@ -1,4 +1,4 @@ -# Copyright 2019-2021 Optimizely +# Copyright 2019-2022, Optimizely # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -11,30 +11,37 @@ # See the License for the specific language governing permissions and # limitations under the License. -import abc +from __future__ import annotations +from abc import ABC, abstractmethod import numbers import threading import time +from typing import Optional from datetime import timedelta -from six.moves import queue +import queue +from sys import version_info from optimizely import logger as _logging from optimizely import notification_center as _notification_center -from optimizely.event_dispatcher import EventDispatcher as default_event_dispatcher +from optimizely.event_dispatcher import EventDispatcher, CustomEventDispatcher from optimizely.helpers import enums from optimizely.helpers import validator from .event_factory import EventFactory from .user_event import UserEvent -ABC = abc.ABCMeta('ABC', (object,), {'__slots__': ()}) + +if version_info < (3, 8): + from typing_extensions import Final +else: + from typing import Final # type: ignore class BaseEventProcessor(ABC): """ Class encapsulating event processing. Override with your own implementation. """ - @abc.abstractmethod - def process(self, user_event): + @abstractmethod + def process(self, user_event: UserEvent) -> None: """ Method to provide intermediary processing stage within event production. Args: user_event: UserEvent instance that needs to be processed and dispatched. @@ -51,24 +58,28 @@ class BatchEventProcessor(BaseEventProcessor): maximum duration before the resulting LogEvent is sent to the EventDispatcher. """ - _DEFAULT_QUEUE_CAPACITY = 1000 - _DEFAULT_BATCH_SIZE = 10 - _DEFAULT_FLUSH_INTERVAL = 30 - _DEFAULT_TIMEOUT_INTERVAL = 5 - _SHUTDOWN_SIGNAL = object() - _FLUSH_SIGNAL = object() - LOCK = threading.Lock() + class Signal: + '''Used to create unique objects for sending signals to event queue.''' + pass + + _DEFAULT_QUEUE_CAPACITY: Final = 1000 + _DEFAULT_BATCH_SIZE: Final = 10 + _DEFAULT_FLUSH_INTERVAL: Final = 30 + _DEFAULT_TIMEOUT_INTERVAL: Final = 5 + _SHUTDOWN_SIGNAL: Final = Signal() + _FLUSH_SIGNAL: Final = Signal() + LOCK: Final = threading.Lock() def __init__( self, - event_dispatcher, - logger=None, - start_on_init=False, - event_queue=None, - batch_size=None, - flush_interval=None, - timeout_interval=None, - notification_center=None, + event_dispatcher: Optional[type[EventDispatcher] | CustomEventDispatcher] = None, + logger: Optional[_logging.Logger] = None, + start_on_init: bool = False, + event_queue: Optional[queue.Queue[UserEvent | Signal]] = None, + batch_size: Optional[int] = None, + flush_interval: Optional[float] = None, + timeout_interval: Optional[float] = None, + notification_center: Optional[_notification_center.NotificationCenter] = None, ): """ BatchEventProcessor init method to configure event batching. @@ -86,43 +97,48 @@ def __init__( thread. notification_center: Optional instance of notification_center.NotificationCenter. """ - self.event_dispatcher = event_dispatcher or default_event_dispatcher + self.event_dispatcher = event_dispatcher or EventDispatcher self.logger = _logging.adapt_logger(logger or _logging.NoOpLogger()) self.event_queue = event_queue or queue.Queue(maxsize=self._DEFAULT_QUEUE_CAPACITY) - self.batch_size = ( - batch_size + self.batch_size: int = ( + batch_size # type: ignore[assignment] if self._validate_instantiation_props(batch_size, 'batch_size', self._DEFAULT_BATCH_SIZE) else self._DEFAULT_BATCH_SIZE ) - self.flush_interval = ( - timedelta(seconds=flush_interval) + self.flush_interval: timedelta = ( + timedelta(seconds=flush_interval) # type: ignore[arg-type] if self._validate_instantiation_props(flush_interval, 'flush_interval', self._DEFAULT_FLUSH_INTERVAL) else timedelta(seconds=self._DEFAULT_FLUSH_INTERVAL) ) - self.timeout_interval = ( - timedelta(seconds=timeout_interval) + self.timeout_interval: timedelta = ( + timedelta(seconds=timeout_interval) # type: ignore[arg-type] if self._validate_instantiation_props(timeout_interval, 'timeout_interval', self._DEFAULT_TIMEOUT_INTERVAL) else timedelta(seconds=self._DEFAULT_TIMEOUT_INTERVAL) ) self.notification_center = notification_center or _notification_center.NotificationCenter(self.logger) - self._current_batch = list() + self._current_batch: list[UserEvent] = [] if not validator.is_notification_center_valid(self.notification_center): self.logger.error(enums.Errors.INVALID_INPUT.format('notification_center')) self.logger.debug('Creating notification center for use.') self.notification_center = _notification_center.NotificationCenter(self.logger) - self.executor = None + self.executor: Optional[threading.Thread] = None if start_on_init is True: self.start() @property - def is_running(self): + def is_running(self) -> bool: """ Property to check if consumer thread is alive or not. """ return self.executor.is_alive() if self.executor else False - def _validate_instantiation_props(self, prop, prop_name, default_value): + def _validate_instantiation_props( + self, + prop: Optional[numbers.Integral | int | float], + prop_name: str, + default_value: numbers.Integral | int | float + ) -> bool: """ Method to determine if instantiation properties like batch_size, flush_interval and timeout_interval are valid. @@ -145,11 +161,11 @@ def _validate_instantiation_props(self, prop, prop_name, default_value): is_valid = False if is_valid is False: - self.logger.info('Using default value {} for {}.'.format(default_value, prop_name)) + self.logger.info(f'Using default value {default_value} for {prop_name}.') return is_valid - def _get_time(self, _time=None): + def _get_time(self, _time: Optional[float] = None) -> float: """ Method to return time as float in seconds. If _time is None, uses current time. Args: @@ -163,18 +179,17 @@ def _get_time(self, _time=None): return _time - def start(self): + def start(self) -> None: """ Starts the batch processing thread to batch events. """ if hasattr(self, 'executor') and self.is_running: self.logger.warning('BatchEventProcessor already started.') return self.flushing_interval_deadline = self._get_time() + self._get_time(self.flush_interval.total_seconds()) - self.executor = threading.Thread(target=self._run) - self.executor.setDaemon(True) + self.executor = threading.Thread(target=self._run, name="EventThread", daemon=True) self.executor.start() - def _run(self): + def _run(self) -> None: """ Triggered as part of the thread which batches events or flushes event_queue and hangs on get for flush interval if queue is empty. """ @@ -211,25 +226,25 @@ def _run(self): self._add_to_batch(item) except Exception as exception: - self.logger.error('Uncaught exception processing buffer. Error: ' + str(exception)) + self.logger.error(f'Uncaught exception processing buffer. Error: {exception}') finally: self.logger.info('Exiting processing loop. Attempting to flush pending events.') self._flush_batch() - def flush(self): + def flush(self) -> None: """ Adds flush signal to event_queue. """ self.event_queue.put(self._FLUSH_SIGNAL) - def _flush_batch(self): + def _flush_batch(self) -> None: """ Flushes current batch by dispatching event. """ batch_len = len(self._current_batch) if batch_len == 0: self.logger.debug('Nothing to flush.') return - self.logger.debug('Flushing batch size ' + str(batch_len)) + self.logger.debug(f'Flushing batch size {batch_len}') with self.LOCK: to_process_batch = list(self._current_batch) @@ -239,12 +254,16 @@ def _flush_batch(self): self.notification_center.send_notifications(enums.NotificationTypes.LOG_EVENT, log_event) + if log_event is None: + self.logger.exception('Error dispatching event: Cannot dispatch None event.') + return + try: self.event_dispatcher.dispatch_event(log_event) except Exception as e: - self.logger.error('Error dispatching event: ' + str(log_event) + ' ' + str(e)) + self.logger.error(f'Error dispatching event: {log_event} {e}') - def process(self, user_event): + def process(self, user_event: UserEvent) -> None: """ Method to process the user_event by putting it in event_queue. Args: @@ -255,17 +274,17 @@ def process(self, user_event): return self.logger.debug( - 'Received event of type {} for user {}.'.format(type(user_event).__name__, user_event.user_id) + f'Received event of type {type(user_event).__name__} for user {user_event.user_id}.' ) try: self.event_queue.put_nowait(user_event) except queue.Full: self.logger.warning( - 'Payload not accepted by the queue. Current size: {}'.format(str(self.event_queue.qsize())) + f'Payload not accepted by the queue. Current size: {self.event_queue.qsize()}' ) - def _add_to_batch(self, user_event): + def _add_to_batch(self, user_event: UserEvent) -> None: """ Method to append received user event to current batch. Args: @@ -285,7 +304,7 @@ def _add_to_batch(self, user_event): self.logger.debug('Flushing on batch size.') self._flush_batch() - def _should_split(self, user_event): + def _should_split(self, user_event: UserEvent) -> bool: """ Method to check if current event batch should split into two. Args: @@ -310,7 +329,7 @@ def _should_split(self, user_event): return False - def stop(self): + def stop(self) -> None: """ Stops and disposes batch event processor. """ self.event_queue.put(self._SHUTDOWN_SIGNAL) self.logger.warning('Stopping Scheduler.') @@ -319,7 +338,7 @@ def stop(self): self.executor.join(self.timeout_interval.total_seconds()) if self.is_running: - self.logger.error('Timeout exceeded while attempting to close for ' + str(self.timeout_interval) + ' ms.') + self.logger.error(f'Timeout exceeded while attempting to close for {self.timeout_interval} ms.') class ForwardingEventProcessor(BaseEventProcessor): @@ -329,7 +348,12 @@ class ForwardingEventProcessor(BaseEventProcessor): The ForwardingEventProcessor sends the LogEvent to EventDispatcher as soon as it is received. """ - def __init__(self, event_dispatcher, logger=None, notification_center=None): + def __init__( + self, + event_dispatcher: Optional[type[EventDispatcher] | CustomEventDispatcher], + logger: Optional[_logging.Logger] = None, + notification_center: Optional[_notification_center.NotificationCenter] = None + ): """ ForwardingEventProcessor init method to configure event dispatching. Args: @@ -337,7 +361,7 @@ def __init__(self, event_dispatcher, logger=None, notification_center=None): logger: Optional component which provides a log method to log messages. By default nothing would be logged. notification_center: Optional instance of notification_center.NotificationCenter. """ - self.event_dispatcher = event_dispatcher or default_event_dispatcher + self.event_dispatcher = event_dispatcher or EventDispatcher self.logger = _logging.adapt_logger(logger or _logging.NoOpLogger()) self.notification_center = notification_center or _notification_center.NotificationCenter(self.logger) @@ -345,7 +369,7 @@ def __init__(self, event_dispatcher, logger=None, notification_center=None): self.logger.error(enums.Errors.INVALID_INPUT.format('notification_center')) self.notification_center = _notification_center.NotificationCenter() - def process(self, user_event): + def process(self, user_event: UserEvent) -> None: """ Method to process the user_event by dispatching it. Args: @@ -356,14 +380,18 @@ def process(self, user_event): return self.logger.debug( - 'Received event of type {} for user {}.'.format(type(user_event).__name__, user_event.user_id) + f'Received event of type {type(user_event).__name__} for user {user_event.user_id}.' ) log_event = EventFactory.create_log_event(user_event, self.logger) self.notification_center.send_notifications(enums.NotificationTypes.LOG_EVENT, log_event) + if log_event is None: + self.logger.exception('Error dispatching event: Cannot dispatch None event.') + return + try: self.event_dispatcher.dispatch_event(log_event) except Exception as e: - self.logger.exception('Error dispatching event: ' + str(log_event) + ' ' + str(e)) + self.logger.exception(f'Error dispatching event: {log_event} {e}') diff --git a/optimizely/event/log_event.py b/optimizely/event/log_event.py index 1c5ce71d..7c0beeb6 100644 --- a/optimizely/event/log_event.py +++ b/optimizely/event/log_event.py @@ -1,4 +1,4 @@ -# Copyright 2019 Optimizely +# Copyright 2019, 2022, Optimizely # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -11,15 +11,32 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations +from typing import Optional, Any +from sys import version_info +from optimizely import event_builder -class LogEvent(object): + +if version_info < (3, 8): + from typing_extensions import Literal +else: + from typing import Literal # type: ignore + + +class LogEvent(event_builder.Event): """ Representation of an event which can be sent to Optimizely events API. """ - def __init__(self, url, params, http_verb=None, headers=None): + def __init__( + self, + url: str, + params: dict[str, Any], + http_verb: Optional[Literal['POST', 'GET']] = None, + headers: Optional[dict[str, str]] = None + ): self.url = url self.params = params self.http_verb = http_verb or 'POST' self.headers = headers - def __str__(self): - return str(self.__class__) + ": " + str(self.__dict__) + def __str__(self) -> str: + return f'{self.__class__}: {self.__dict__}' diff --git a/optimizely/event/payload.py b/optimizely/event/payload.py index b7e51a24..ac6f35e4 100644 --- a/optimizely/event/payload.py +++ b/optimizely/event/payload.py @@ -1,4 +1,4 @@ -# Copyright 2019 Optimizely +# Copyright 2019, 2022, Optimizely # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -11,22 +11,29 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations import json +from numbers import Integral +from typing import TYPE_CHECKING, Any, Optional -class EventBatch(object): +if TYPE_CHECKING: + from optimizely.helpers.event_tag_utils import EventTags + + +class EventBatch: """ Class respresenting Event Batch. """ def __init__( self, - account_id, - project_id, - revision, - client_name, - client_version, - anonymize_ip, - enrich_decisions=True, - visitors=None, + account_id: str, + project_id: str, + revision: str, + client_name: str, + client_version: str, + anonymize_ip: bool, + enrich_decisions: bool = True, + visitors: Optional[list[Visitor]] = None, ): self.account_id = account_id self.project_id = project_id @@ -37,11 +44,11 @@ def __init__( self.enrich_decisions = enrich_decisions self.visitors = visitors or [] - def __eq__(self, other): + def __eq__(self, other: object) -> bool: batch_obj = self.get_event_params() return batch_obj == other - def _dict_clean(self, obj): + def _dict_clean(self, obj: list[tuple[str, Any]]) -> dict[str, Any]: """ Helper method to remove keys from dictionary with None values. """ result = {} @@ -52,26 +59,29 @@ def _dict_clean(self, obj): result[k] = v return result - def get_event_params(self): + def get_event_params(self) -> dict[str, Any]: """ Method to return valid params for LogEvent payload. """ - return json.loads(json.dumps(self.__dict__, default=lambda o: o.__dict__), object_pairs_hook=self._dict_clean,) + return json.loads( # type: ignore[no-any-return] + json.dumps(self.__dict__, default=lambda o: o.__dict__), + object_pairs_hook=self._dict_clean, + ) -class Decision(object): +class Decision: """ Class respresenting Decision. """ - def __init__(self, campaign_id, experiment_id, variation_id, metadata): + def __init__(self, campaign_id: str, experiment_id: str, variation_id: str, metadata: Metadata): self.campaign_id = campaign_id self.experiment_id = experiment_id self.variation_id = variation_id self.metadata = metadata -class Metadata(object): +class Metadata: """ Class respresenting Metadata. """ - def __init__(self, flag_key, rule_key, rule_type, variation_key, enabled): + def __init__(self, flag_key: str, rule_key: str, rule_type: str, variation_key: str, enabled: bool): self.flag_key = flag_key self.rule_key = rule_key self.rule_type = rule_type @@ -79,18 +89,27 @@ def __init__(self, flag_key, rule_key, rule_type, variation_key, enabled): self.enabled = enabled -class Snapshot(object): +class Snapshot: """ Class representing Snapshot. """ - def __init__(self, events, decisions=None): + def __init__(self, events: list[SnapshotEvent], decisions: Optional[list[Decision]] = None): self.events = events self.decisions = decisions -class SnapshotEvent(object): +class SnapshotEvent: """ Class representing Snapshot Event. """ - def __init__(self, entity_id, uuid, key, timestamp, revenue=None, value=None, tags=None): + def __init__( + self, + entity_id: str, + uuid: str, + key: str, + timestamp: int, + revenue: Optional[Integral] = None, + value: Any = None, + tags: Optional[EventTags] = None + ): self.entity_id = entity_id self.uuid = uuid self.key = key @@ -100,19 +119,19 @@ def __init__(self, entity_id, uuid, key, timestamp, revenue=None, value=None, ta self.tags = tags -class Visitor(object): +class Visitor: """ Class representing Visitor. """ - def __init__(self, snapshots, attributes, visitor_id): + def __init__(self, snapshots: list[Snapshot], attributes: list[VisitorAttribute], visitor_id: str): self.snapshots = snapshots self.attributes = attributes self.visitor_id = visitor_id -class VisitorAttribute(object): +class VisitorAttribute: """ Class representing Visitor Attribute. """ - def __init__(self, entity_id, key, attribute_type, value): + def __init__(self, entity_id: str, key: str, attribute_type: str, value: Any): self.entity_id = entity_id self.key = key self.type = attribute_type diff --git a/optimizely/event/user_event.py b/optimizely/event/user_event.py index 0c4e021a..9cdb623a 100644 --- a/optimizely/event/user_event.py +++ b/optimizely/event/user_event.py @@ -1,4 +1,4 @@ -# Copyright 2019 Optimizely +# Copyright 2019, 2022, Optimizely # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -10,19 +10,38 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - +from __future__ import annotations import time import uuid +from typing import TYPE_CHECKING, Optional +from sys import version_info from optimizely import version -CLIENT_NAME = 'python-sdk' + +if version_info < (3, 8): + from typing_extensions import Final +else: + from typing import Final # type: ignore + + +if TYPE_CHECKING: + # prevent circular dependenacy by skipping import at runtime + from optimizely.entities import Experiment, Variation, Event + from optimizely.event.payload import VisitorAttribute + from optimizely.helpers.event_tag_utils import EventTags -class UserEvent(object): +CLIENT_NAME: Final = 'python-sdk' + + +class UserEvent: """ Class respresenting User Event. """ - def __init__(self, event_context, user_id, visitor_attributes, bot_filtering=None): + def __init__( + self, event_context: EventContext, user_id: str, + visitor_attributes: list[VisitorAttribute], bot_filtering: Optional[bool] = None + ): self.event_context = event_context self.user_id = user_id self.visitor_attributes = visitor_attributes @@ -30,10 +49,10 @@ def __init__(self, event_context, user_id, visitor_attributes, bot_filtering=Non self.uuid = self._get_uuid() self.timestamp = self._get_time() - def _get_time(self): + def _get_time(self) -> int: return int(round(time.time() * 1000)) - def _get_uuid(self): + def _get_uuid(self) -> str: return str(uuid.uuid4()) @@ -41,10 +60,19 @@ class ImpressionEvent(UserEvent): """ Class representing Impression Event. """ def __init__( - self, event_context, user_id, experiment, visitor_attributes, variation, flag_key, - rule_key, rule_type, enabled, bot_filtering=None + self, + event_context: EventContext, + user_id: str, + experiment: Experiment, + visitor_attributes: list[VisitorAttribute], + variation: Optional[Variation], + flag_key: str, + rule_key: str, + rule_type: str, + enabled: bool, + bot_filtering: Optional[bool] = None ): - super(ImpressionEvent, self).__init__(event_context, user_id, visitor_attributes, bot_filtering) + super().__init__(event_context, user_id, visitor_attributes, bot_filtering) self.experiment = experiment self.variation = variation self.flag_key = flag_key @@ -57,17 +85,19 @@ class ConversionEvent(UserEvent): """ Class representing Conversion Event. """ def __init__( - self, event_context, event, user_id, visitor_attributes, event_tags, bot_filtering=None, + self, event_context: EventContext, event: Optional[Event], user_id: str, + visitor_attributes: list[VisitorAttribute], event_tags: Optional[EventTags], + bot_filtering: Optional[bool] = None, ): - super(ConversionEvent, self).__init__(event_context, user_id, visitor_attributes, bot_filtering) + super().__init__(event_context, user_id, visitor_attributes, bot_filtering) self.event = event self.event_tags = event_tags -class EventContext(object): +class EventContext: """ Class respresenting User Event Context. """ - def __init__(self, account_id, project_id, revision, anonymize_ip): + def __init__(self, account_id: str, project_id: str, revision: str, anonymize_ip: bool): self.account_id = account_id self.project_id = project_id self.revision = revision diff --git a/optimizely/event/user_event_factory.py b/optimizely/event/user_event_factory.py index fb5c70ed..ef07d06b 100644 --- a/optimizely/event/user_event_factory.py +++ b/optimizely/event/user_event_factory.py @@ -1,4 +1,4 @@ -# Copyright 2019, 2021 Optimizely +# Copyright 2019, 2021-2022, Optimizely # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -11,19 +11,37 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations +from typing import TYPE_CHECKING, Optional +from optimizely.helpers.event_tag_utils import EventTags from . import event_factory from . import user_event from optimizely.helpers import enums -class UserEventFactory(object): +if TYPE_CHECKING: + # prevent circular dependenacy by skipping import at runtime + from optimizely.optimizely_user_context import UserAttributes + from optimizely.project_config import ProjectConfig + from optimizely.entities import Experiment, Variation + + +class UserEventFactory: """ UserEventFactory builds impression and conversion events from a given UserEvent. """ @classmethod def create_impression_event( - cls, project_config, activated_experiment, variation_id, flag_key, rule_key, rule_type, - enabled, user_id, user_attributes - ): + cls, + project_config: ProjectConfig, + activated_experiment: Experiment, + variation_id: Optional[str], + flag_key: str, + rule_key: str, + rule_type: str, + enabled: bool, + user_id: str, + user_attributes: Optional[UserAttributes] + ) -> Optional[user_event.ImpressionEvent]: """ Create impression Event to be sent to the logging endpoint. Args: @@ -35,7 +53,7 @@ def create_impression_event( rule_type: type for the source. enabled: boolean representing if feature is enabled user_id: ID for user. - attributes: Dict representing user attributes and values which need to be recorded. + user_attributes: Dict representing user attributes and values which need to be recorded. Returns: Event object encapsulating the impression event. None if: @@ -45,7 +63,8 @@ def create_impression_event( if not activated_experiment and rule_type is not enums.DecisionSources.ROLLOUT: return None - variation, experiment_id = None, None + variation: Optional[Variation] = None + experiment_id = None if activated_experiment: experiment_id = activated_experiment.id @@ -74,14 +93,21 @@ def create_impression_event( ) @classmethod - def create_conversion_event(cls, project_config, event_key, user_id, user_attributes, event_tags): + def create_conversion_event( + cls, + project_config: ProjectConfig, + event_key: str, + user_id: str, + user_attributes: Optional[UserAttributes], + event_tags: Optional[EventTags] + ) -> Optional[user_event.ConversionEvent]: """ Create conversion Event to be sent to the logging endpoint. Args: project_config: Instance of ProjectConfig. event_key: Key representing the event which needs to be recorded. user_id: ID for user. - attributes: Dict representing user attributes and values. + user_attributes: Dict representing user attributes and values. event_tags: Dict representing metadata associated with the event. Returns: diff --git a/optimizely/event_builder.py b/optimizely/event_builder.py index befe2700..ecabf14c 100644 --- a/optimizely/event_builder.py +++ b/optimizely/event_builder.py @@ -1,4 +1,4 @@ -# Copyright 2016-2019, Optimizely +# Copyright 2016-2019, 2022, Optimizely # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -11,58 +11,80 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations import time +from typing import TYPE_CHECKING, Any, Optional import uuid +from sys import version_info from . import version from .helpers import enums from .helpers import event_tag_utils from .helpers import validator +if version_info < (3, 8): + from typing_extensions import Final, Literal +else: + from typing import Final, Literal # type: ignore -class Event(object): +if TYPE_CHECKING: + # prevent circular dependenacy by skipping import at runtime + from .entities import Experiment + from .optimizely_user_context import UserAttributes + from .project_config import ProjectConfig + + +class Event: """ Representation of an event which can be sent to the Optimizely logging endpoint. """ - def __init__(self, url, params, http_verb=None, headers=None): + def __init__( + self, + url: str, + params: dict[str, Any], + http_verb: Optional[Literal['POST', 'GET']] = None, + headers: Optional[dict[str, str]] = None + ): self.url = url self.params = params self.http_verb = http_verb or 'GET' self.headers = headers -class EventBuilder(object): +class EventBuilder: """ Class which encapsulates methods to build events for tracking impressions and conversions using the new V3 event API (batch). """ - EVENTS_URL = 'https://logx.optimizely.com/v1/events' - HTTP_VERB = 'POST' - HTTP_HEADERS = {'Content-Type': 'application/json'} - - class EventParams(object): - ACCOUNT_ID = 'account_id' - PROJECT_ID = 'project_id' - EXPERIMENT_ID = 'experiment_id' - CAMPAIGN_ID = 'campaign_id' - VARIATION_ID = 'variation_id' - END_USER_ID = 'visitor_id' - ENRICH_DECISIONS = 'enrich_decisions' - EVENTS = 'events' - EVENT_ID = 'entity_id' - ATTRIBUTES = 'attributes' - DECISIONS = 'decisions' - TIME = 'timestamp' - KEY = 'key' - TAGS = 'tags' - UUID = 'uuid' - USERS = 'visitors' - SNAPSHOTS = 'snapshots' - SOURCE_SDK_TYPE = 'client_name' - SOURCE_SDK_VERSION = 'client_version' - CUSTOM = 'custom' - ANONYMIZE_IP = 'anonymize_ip' - REVISION = 'revision' - - def _get_attributes_data(self, project_config, attributes): + EVENTS_URL: Final = 'https://logx.optimizely.com/v1/events' + HTTP_VERB: Final = 'POST' + HTTP_HEADERS: Final = {'Content-Type': 'application/json'} + + class EventParams: + ACCOUNT_ID: Final = 'account_id' + PROJECT_ID: Final = 'project_id' + EXPERIMENT_ID: Final = 'experiment_id' + CAMPAIGN_ID: Final = 'campaign_id' + VARIATION_ID: Final = 'variation_id' + END_USER_ID: Final = 'visitor_id' + ENRICH_DECISIONS: Final = 'enrich_decisions' + EVENTS: Final = 'events' + EVENT_ID: Final = 'entity_id' + ATTRIBUTES: Final = 'attributes' + DECISIONS: Final = 'decisions' + TIME: Final = 'timestamp' + KEY: Final = 'key' + TAGS: Final = 'tags' + UUID: Final = 'uuid' + USERS: Final = 'visitors' + SNAPSHOTS: Final = 'snapshots' + SOURCE_SDK_TYPE: Final = 'client_name' + SOURCE_SDK_VERSION: Final = 'client_version' + CUSTOM: Final = 'custom' + ANONYMIZE_IP: Final = 'anonymize_ip' + REVISION: Final = 'revision' + + def _get_attributes_data( + self, project_config: ProjectConfig, attributes: UserAttributes + ) -> list[dict[str, Any]]: """ Get attribute(s) information. Args: @@ -105,7 +127,7 @@ def _get_attributes_data(self, project_config, attributes): return params - def _get_time(self): + def _get_time(self) -> int: """ Get time in milliseconds to be added. Returns: @@ -114,7 +136,9 @@ def _get_time(self): return int(round(time.time() * 1000)) - def _get_common_params(self, project_config, user_id, attributes): + def _get_common_params( + self, project_config: ProjectConfig, user_id: str, attributes: UserAttributes + ) -> dict[str, Any]: """ Get params which are used same in both conversion and impression events. Args: @@ -125,7 +149,7 @@ def _get_common_params(self, project_config, user_id, attributes): Returns: Dict consisting of parameters common to both impression and conversion events. """ - common_params = { + common_params: dict[str, Any] = { self.EventParams.PROJECT_ID: project_config.get_project_id(), self.EventParams.ACCOUNT_ID: project_config.get_account_id(), } @@ -149,7 +173,9 @@ def _get_common_params(self, project_config, user_id, attributes): return common_params - def _get_required_params_for_impression(self, experiment, variation_id): + def _get_required_params_for_impression( + self, experiment: Experiment, variation_id: str + ) -> dict[str, list[dict[str, str | int]]]: """ Get parameters that are required for the impression event to register. Args: @@ -159,7 +185,7 @@ def _get_required_params_for_impression(self, experiment, variation_id): Returns: Dict consisting of decisions and events info for impression event. """ - snapshot = {} + snapshot: dict[str, list[dict[str, str | int]]] = {} snapshot[self.EventParams.DECISIONS] = [ { @@ -180,7 +206,9 @@ def _get_required_params_for_impression(self, experiment, variation_id): return snapshot - def _get_required_params_for_conversion(self, project_config, event_key, event_tags): + def _get_required_params_for_conversion( + self, project_config: ProjectConfig, event_key: str, event_tags: event_tag_utils.EventTags + ) -> dict[str, list[dict[str, Any]]]: """ Get parameters that are required for the conversion event to register. Args: @@ -192,9 +220,10 @@ def _get_required_params_for_conversion(self, project_config, event_key, event_t Dict consisting of the decisions and events info for conversion event. """ snapshot = {} + event = project_config.get_event(event_key) - event_dict = { - self.EventParams.EVENT_ID: project_config.get_event(event_key).id, + event_dict: dict[str, Any] = { + self.EventParams.EVENT_ID: event.id if event else None, self.EventParams.TIME: self._get_time(), self.EventParams.KEY: event_key, self.EventParams.UUID: str(uuid.uuid4()), @@ -215,7 +244,10 @@ def _get_required_params_for_conversion(self, project_config, event_key, event_t snapshot[self.EventParams.EVENTS] = [event_dict] return snapshot - def create_impression_event(self, project_config, experiment, variation_id, user_id, attributes): + def create_impression_event( + self, project_config: ProjectConfig, experiment: Experiment, + variation_id: str, user_id: str, attributes: UserAttributes + ) -> Event: """ Create impression Event to be sent to the logging endpoint. Args: @@ -236,7 +268,10 @@ def create_impression_event(self, project_config, experiment, variation_id, user return Event(self.EVENTS_URL, params, http_verb=self.HTTP_VERB, headers=self.HTTP_HEADERS) - def create_conversion_event(self, project_config, event_key, user_id, attributes, event_tags): + def create_conversion_event( + self, project_config: ProjectConfig, event_key: str, + user_id: str, attributes: UserAttributes, event_tags: event_tag_utils.EventTags + ) -> Event: """ Create conversion Event to be sent to the logging endpoint. Args: diff --git a/optimizely/event_dispatcher.py b/optimizely/event_dispatcher.py index f21b47a1..767fbb7d 100644 --- a/optimizely/event_dispatcher.py +++ b/optimizely/event_dispatcher.py @@ -1,4 +1,4 @@ -# Copyright 2016, Optimizely +# Copyright 2016, 2022, Optimizely # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -13,31 +13,57 @@ import json import logging -import requests +from sys import version_info +import requests from requests import exceptions as request_exception +from requests.adapters import HTTPAdapter +from urllib3.util.retry import Retry + +from . import event_builder +from .helpers.enums import HTTPVerbs, EventDispatchConfig + +if version_info < (3, 8): + from typing_extensions import Protocol +else: + from typing import Protocol # type: ignore + -from .helpers import enums +class CustomEventDispatcher(Protocol): + """Interface for a custom event dispatcher and required method `dispatch_event`. """ -REQUEST_TIMEOUT = 10 + def dispatch_event(self, event: event_builder.Event) -> None: + ... -class EventDispatcher(object): +class EventDispatcher: + @staticmethod - def dispatch_event(event): + def dispatch_event(event: event_builder.Event) -> None: """ Dispatch the event being represented by the Event object. Args: event: Object holding information about the request to be dispatched to the Optimizely backend. """ - try: - if event.http_verb == enums.HTTPVerbs.GET: - requests.get(event.url, params=event.params, timeout=REQUEST_TIMEOUT).raise_for_status() - elif event.http_verb == enums.HTTPVerbs.POST: - requests.post( - event.url, data=json.dumps(event.params), headers=event.headers, timeout=REQUEST_TIMEOUT, + session = requests.Session() + + retries = Retry(total=EventDispatchConfig.RETRIES, + backoff_factor=0.1, + status_forcelist=[500, 502, 503, 504]) + adapter = HTTPAdapter(max_retries=retries) + + session.mount('http://', adapter) + session.mount("https://", adapter) + + if event.http_verb == HTTPVerbs.GET: + session.get(event.url, params=event.params, + timeout=EventDispatchConfig.REQUEST_TIMEOUT).raise_for_status() + elif event.http_verb == HTTPVerbs.POST: + session.post( + event.url, data=json.dumps(event.params), headers=event.headers, + timeout=EventDispatchConfig.REQUEST_TIMEOUT, ).raise_for_status() except request_exception.RequestException as error: - logging.error('Dispatch event failed. Error: %s' % str(error)) + logging.error(f'Dispatch event failed. Error: {error}') diff --git a/optimizely/exceptions.py b/optimizely/exceptions.py index d6003ab1..b17b1397 100644 --- a/optimizely/exceptions.py +++ b/optimizely/exceptions.py @@ -64,3 +64,39 @@ class UnsupportedDatafileVersionException(Exception): """ Raised when provided version in datafile is not supported. """ pass + + +class OdpNotEnabled(Exception): + """ Raised when Optimizely Data Platform (ODP) is not enabled. """ + + pass + + +class OdpNotIntegrated(Exception): + """ Raised when Optimizely Data Platform (ODP) is not integrated. """ + + pass + + +class OdpInvalidData(Exception): + """ Raised when passing invalid ODP data. """ + + pass + + +class CmabError(Exception): + """Base exception for CMAB client errors.""" + + pass + + +class CmabFetchError(CmabError): + """Exception raised when CMAB fetch fails.""" + + pass + + +class CmabInvalidResponseError(CmabError): + """Exception raised when CMAB response is invalid.""" + + pass diff --git a/optimizely/helpers/audience.py b/optimizely/helpers/audience.py index e9914c66..190a38f8 100644 --- a/optimizely/helpers/audience.py +++ b/optimizely/helpers/audience.py @@ -1,4 +1,4 @@ -# Copyright 2016, 2018-2021, Optimizely +# Copyright 2016, 2018-2022, Optimizely # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -11,18 +11,29 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations import json +from typing import TYPE_CHECKING, Optional, Sequence, Type from . import condition as condition_helper from . import condition_tree_evaluator - - -def does_user_meet_audience_conditions(config, - audience_conditions, - audience_logs, - logging_key, - attributes, - logger): +from optimizely import optimizely_user_context + +if TYPE_CHECKING: + # prevent circular dependenacy by skipping import at runtime + from optimizely.project_config import ProjectConfig + from optimizely.logger import Logger + from optimizely.helpers.enums import ExperimentAudienceEvaluationLogs, RolloutRuleAudienceEvaluationLogs + + +def does_user_meet_audience_conditions( + config: ProjectConfig, + audience_conditions: Optional[Sequence[str | list[str]]], + audience_logs: Type[ExperimentAudienceEvaluationLogs | RolloutRuleAudienceEvaluationLogs], + logging_key: str, + user_context: optimizely_user_context.OptimizelyUserContext, + logger: Logger +) -> tuple[bool, list[str]]: """ Determine for given experiment if user satisfies the audiences for the experiment. Args: @@ -51,18 +62,17 @@ def does_user_meet_audience_conditions(config, return True, decide_reasons - if attributes is None: - attributes = {} - - def evaluate_custom_attr(audience_id, index): + def evaluate_custom_attr(audience_id: str, index: int) -> Optional[bool]: audience = config.get_audience(audience_id) + if not audience or audience.conditionList is None: + return None custom_attr_condition_evaluator = condition_helper.CustomAttributeConditionEvaluator( - audience.conditionList, attributes, logger + audience.conditionList, user_context, logger ) return custom_attr_condition_evaluator.evaluate(index) - def evaluate_audience(audience_id): + def evaluate_audience(audience_id: str) -> Optional[bool]: audience = config.get_audience(audience_id) if audience is None: diff --git a/optimizely/helpers/condition.py b/optimizely/helpers/condition.py index 57ec558c..58000a90 100644 --- a/optimizely/helpers/condition.py +++ b/optimizely/helpers/condition.py @@ -1,4 +1,4 @@ -# Copyright 2016, 2018-2020, Optimizely +# Copyright 2016, 2018-2020, 2022, Optimizely # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -11,50 +11,70 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations import json import numbers - -from six import string_types +from typing import TYPE_CHECKING, Any, Callable, Optional +from sys import version_info from . import validator +from optimizely import optimizely_user_context from .enums import CommonAudienceEvaluationLogs as audience_logs from .enums import Errors from .enums import VersionType -class ConditionOperatorTypes(object): - AND = 'and' - OR = 'or' - NOT = 'not' +if TYPE_CHECKING: + # prevent circular dependenacy by skipping import at runtime + from optimizely.logger import Logger + + +if version_info < (3, 8): + from typing_extensions import Literal, Final +else: + from typing import Literal, Final # type: ignore + + +class ConditionOperatorTypes: + AND: Final = 'and' + OR: Final = 'or' + NOT: Final = 'not' operators = [AND, OR, NOT] -class ConditionMatchTypes(object): - EXACT = 'exact' - EXISTS = 'exists' - GREATER_THAN = 'gt' - GREATER_THAN_OR_EQUAL = 'ge' - LESS_THAN = 'lt' - LESS_THAN_OR_EQUAL = 'le' - SEMVER_EQ = 'semver_eq' - SEMVER_GE = 'semver_ge' - SEMVER_GT = 'semver_gt' - SEMVER_LE = 'semver_le' - SEMVER_LT = 'semver_lt' - SUBSTRING = 'substring' +class ConditionMatchTypes: + EXACT: Final = 'exact' + EXISTS: Final = 'exists' + GREATER_THAN: Final = 'gt' + GREATER_THAN_OR_EQUAL: Final = 'ge' + LESS_THAN: Final = 'lt' + LESS_THAN_OR_EQUAL: Final = 'le' + SEMVER_EQ: Final = 'semver_eq' + SEMVER_GE: Final = 'semver_ge' + SEMVER_GT: Final = 'semver_gt' + SEMVER_LE: Final = 'semver_le' + SEMVER_LT: Final = 'semver_lt' + SUBSTRING: Final = 'substring' + QUALIFIED: Final = 'qualified' -class CustomAttributeConditionEvaluator(object): +class CustomAttributeConditionEvaluator: """ Class encapsulating methods to be used in audience leaf condition evaluation. """ - CUSTOM_ATTRIBUTE_CONDITION_TYPE = 'custom_attribute' + CONDITION_TYPES: Final = ('custom_attribute', 'third_party_dimension') - def __init__(self, condition_data, attributes, logger): + def __init__( + self, + condition_data: list[str | list[str]], + user_context: optimizely_user_context.OptimizelyUserContext, + logger: Logger + ): self.condition_data = condition_data - self.attributes = attributes or {} + self.user_context = user_context + self.attributes = user_context.get_user_attributes() self.logger = logger - def _get_condition_json(self, index): + def _get_condition_json(self, index: int) -> str: """ Method to generate json for logging audience condition. Args: @@ -73,7 +93,7 @@ def _get_condition_json(self, index): return json.dumps(condition_log) - def is_value_type_valid_for_exact_conditions(self, value): + def is_value_type_valid_for_exact_conditions(self, value: Any) -> bool: """ Method to validate if the value is valid for exact match type evaluation. Args: @@ -83,18 +103,18 @@ def is_value_type_valid_for_exact_conditions(self, value): Boolean: True if value is a string, boolean, or number. Otherwise False. """ # No need to check for bool since bool is a subclass of int - if isinstance(value, string_types) or isinstance(value, (numbers.Integral, float)): + if isinstance(value, str) or isinstance(value, (numbers.Integral, float)): return True return False - def is_value_a_number(self, value): + def is_value_a_number(self, value: Any) -> bool: if isinstance(value, (numbers.Integral, float)) and not isinstance(value, bool): return True return False - def is_pre_release_version(self, version): + def is_pre_release_version(self, version: str) -> bool: """ Method to check if given version is pre-release. Criteria for pre-release includes: - Version includes "-" @@ -114,7 +134,7 @@ def is_pre_release_version(self, version): return True return False - def is_build_version(self, version): + def is_build_version(self, version: str) -> bool: """ Method to check given version is a build version. Criteria for build version includes: - Version includes "+" @@ -134,7 +154,7 @@ def is_build_version(self, version): return True return False - def has_white_space(self, version): + def has_white_space(self, version: str) -> bool: """ Method to check if the given version contains " " (white space) Args: @@ -147,7 +167,9 @@ def has_white_space(self, version): """ return ' ' in version - def compare_user_version_with_target_version(self, target_version, user_version): + def compare_user_version_with_target_version( + self, target_version: str, user_version: str + ) -> Optional[Literal[0] | Literal[1] | Literal[-1]]: """ Method to compare user version with target version. Args: @@ -200,7 +222,7 @@ def compare_user_version_with_target_version(self, target_version, user_version) return -1 return 0 - def exact_evaluator(self, index): + def exact_evaluator(self, index: int) -> Optional[bool]: """ Evaluate the given exact match condition for the user attributes. Args: @@ -240,7 +262,7 @@ def exact_evaluator(self, index): return condition_value == user_value - def exists_evaluator(self, index): + def exists_evaluator(self, index: int) -> bool: """ Evaluate the given exists match condition for the user attributes. Args: @@ -253,7 +275,7 @@ def exists_evaluator(self, index): attr_name = self.condition_data[index][0] return self.attributes.get(attr_name) is not None - def greater_than_evaluator(self, index): + def greater_than_evaluator(self, index: int) -> Optional[bool]: """ Evaluate the given greater than match condition for the user attributes. Args: @@ -285,9 +307,9 @@ def greater_than_evaluator(self, index): ) return None - return user_value > condition_value + return user_value > condition_value # type: ignore[operator] - def greater_than_or_equal_evaluator(self, index): + def greater_than_or_equal_evaluator(self, index: int) -> Optional[bool]: """ Evaluate the given greater than or equal to match condition for the user attributes. Args: @@ -319,9 +341,9 @@ def greater_than_or_equal_evaluator(self, index): ) return None - return user_value >= condition_value + return user_value >= condition_value # type: ignore[operator] - def less_than_evaluator(self, index): + def less_than_evaluator(self, index: int) -> Optional[bool]: """ Evaluate the given less than match condition for the user attributes. Args: @@ -353,9 +375,9 @@ def less_than_evaluator(self, index): ) return None - return user_value < condition_value + return user_value < condition_value # type: ignore[operator] - def less_than_or_equal_evaluator(self, index): + def less_than_or_equal_evaluator(self, index: int) -> Optional[bool]: """ Evaluate the given less than or equal to match condition for the user attributes. Args: @@ -387,9 +409,9 @@ def less_than_or_equal_evaluator(self, index): ) return None - return user_value <= condition_value + return user_value <= condition_value # type: ignore[operator] - def substring_evaluator(self, index): + def substring_evaluator(self, index: int) -> Optional[bool]: """ Evaluate the given substring match condition for the given user attributes. Args: @@ -405,11 +427,11 @@ def substring_evaluator(self, index): condition_value = self.condition_data[index][1] user_value = self.attributes.get(condition_name) - if not isinstance(condition_value, string_types): + if not isinstance(condition_value, str): self.logger.warning(audience_logs.UNKNOWN_CONDITION_VALUE.format(self._get_condition_json(index),)) return None - if not isinstance(user_value, string_types): + if not isinstance(user_value, str): self.logger.warning( audience_logs.UNEXPECTED_TYPE.format(self._get_condition_json(index), type(user_value), condition_name) ) @@ -417,7 +439,7 @@ def substring_evaluator(self, index): return condition_value in user_value - def semver_equal_evaluator(self, index): + def semver_equal_evaluator(self, index: int) -> Optional[bool]: """ Evaluate the given semantic version equal match target version for the user version. Args: @@ -435,11 +457,11 @@ def semver_equal_evaluator(self, index): target_version = self.condition_data[index][1] user_version = self.attributes.get(condition_name) - if not isinstance(target_version, string_types): + if not isinstance(target_version, str): self.logger.warning(audience_logs.UNKNOWN_CONDITION_VALUE.format(self._get_condition_json(index), )) return None - if not isinstance(user_version, string_types): + if not isinstance(user_version, str): self.logger.warning( audience_logs.UNEXPECTED_TYPE.format( self._get_condition_json(index), type(user_version), condition_name @@ -453,7 +475,7 @@ def semver_equal_evaluator(self, index): return result == 0 - def semver_greater_than_evaluator(self, index): + def semver_greater_than_evaluator(self, index: int) -> Optional[bool]: """ Evaluate the given semantic version greater than match target version for the user version. Args: @@ -470,11 +492,11 @@ def semver_greater_than_evaluator(self, index): target_version = self.condition_data[index][1] user_version = self.attributes.get(condition_name) - if not isinstance(target_version, string_types): + if not isinstance(target_version, str): self.logger.warning(audience_logs.UNKNOWN_CONDITION_VALUE.format(self._get_condition_json(index), )) return None - if not isinstance(user_version, string_types): + if not isinstance(user_version, str): self.logger.warning( audience_logs.UNEXPECTED_TYPE.format( self._get_condition_json(index), type(user_version), condition_name @@ -488,7 +510,7 @@ def semver_greater_than_evaluator(self, index): return result > 0 - def semver_less_than_evaluator(self, index): + def semver_less_than_evaluator(self, index: int) -> Optional[bool]: """ Evaluate the given semantic version less than match target version for the user version. Args: @@ -505,11 +527,11 @@ def semver_less_than_evaluator(self, index): target_version = self.condition_data[index][1] user_version = self.attributes.get(condition_name) - if not isinstance(target_version, string_types): + if not isinstance(target_version, str): self.logger.warning(audience_logs.UNKNOWN_CONDITION_VALUE.format(self._get_condition_json(index), )) return None - if not isinstance(user_version, string_types): + if not isinstance(user_version, str): self.logger.warning( audience_logs.UNEXPECTED_TYPE.format( self._get_condition_json(index), type(user_version), condition_name @@ -523,7 +545,7 @@ def semver_less_than_evaluator(self, index): return result < 0 - def semver_less_than_or_equal_evaluator(self, index): + def semver_less_than_or_equal_evaluator(self, index: int) -> Optional[bool]: """ Evaluate the given semantic version less than or equal to match target version for the user version. Args: @@ -540,11 +562,11 @@ def semver_less_than_or_equal_evaluator(self, index): target_version = self.condition_data[index][1] user_version = self.attributes.get(condition_name) - if not isinstance(target_version, string_types): + if not isinstance(target_version, str): self.logger.warning(audience_logs.UNKNOWN_CONDITION_VALUE.format(self._get_condition_json(index), )) return None - if not isinstance(user_version, string_types): + if not isinstance(user_version, str): self.logger.warning( audience_logs.UNEXPECTED_TYPE.format( self._get_condition_json(index), type(user_version), condition_name @@ -558,7 +580,7 @@ def semver_less_than_or_equal_evaluator(self, index): return result <= 0 - def semver_greater_than_or_equal_evaluator(self, index): + def semver_greater_than_or_equal_evaluator(self, index: int) -> Optional[bool]: """ Evaluate the given semantic version greater than or equal to match target version for the user version. Args: @@ -575,11 +597,11 @@ def semver_greater_than_or_equal_evaluator(self, index): target_version = self.condition_data[index][1] user_version = self.attributes.get(condition_name) - if not isinstance(target_version, string_types): + if not isinstance(target_version, str): self.logger.warning(audience_logs.UNKNOWN_CONDITION_VALUE.format(self._get_condition_json(index), )) return None - if not isinstance(user_version, string_types): + if not isinstance(user_version, str): self.logger.warning( audience_logs.UNEXPECTED_TYPE.format( self._get_condition_json(index), type(user_version), condition_name @@ -593,7 +615,27 @@ def semver_greater_than_or_equal_evaluator(self, index): return result >= 0 - EVALUATORS_BY_MATCH_TYPE = { + def qualified_evaluator(self, index: int) -> Optional[bool]: + """ Check if the user is qualifed for the given segment. + + Args: + index: Index of the condition to be evaluated. + + Returns: + Boolean: + - True if the user is qualified. + - False if the user is not qualified. + None: if the condition value isn't a string. + """ + condition_value = self.condition_data[index][1] + + if not isinstance(condition_value, str): + self.logger.warning(audience_logs.UNKNOWN_CONDITION_VALUE.format(self._get_condition_json(index),)) + return None + + return self.user_context.is_qualified_for(condition_value) + + EVALUATORS_BY_MATCH_TYPE: dict[str, Callable[[CustomAttributeConditionEvaluator, int], Optional[bool]]] = { ConditionMatchTypes.EXACT: exact_evaluator, ConditionMatchTypes.EXISTS: exists_evaluator, ConditionMatchTypes.GREATER_THAN: greater_than_evaluator, @@ -605,10 +647,11 @@ def semver_greater_than_or_equal_evaluator(self, index): ConditionMatchTypes.SEMVER_GT: semver_greater_than_evaluator, ConditionMatchTypes.SEMVER_LE: semver_less_than_or_equal_evaluator, ConditionMatchTypes.SEMVER_LT: semver_less_than_evaluator, - ConditionMatchTypes.SUBSTRING: substring_evaluator + ConditionMatchTypes.SUBSTRING: substring_evaluator, + ConditionMatchTypes.QUALIFIED: qualified_evaluator } - def split_version(self, version): + def split_version(self, version: str) -> Optional[list[str]]: """ Method to split the given version. Args: @@ -621,7 +664,7 @@ def split_version(self, version): - if the given version is invalid in format """ target_prefix = version - target_suffix = "" + target_suffix = [] target_parts = [] # check that version shouldn't have white space @@ -662,7 +705,7 @@ def split_version(self, version): target_version_parts.extend(target_suffix) return target_version_parts - def evaluate(self, index): + def evaluate(self, index: int) -> Optional[bool]: """ Given a custom attribute audience condition and user attributes, evaluate the condition against the attributes. @@ -676,7 +719,7 @@ def evaluate(self, index): None: if the user attributes and condition can't be evaluated. """ - if self.condition_data[index][2] != self.CUSTOM_ATTRIBUTE_CONDITION_TYPE: + if self.condition_data[index][2] not in self.CONDITION_TYPES: self.logger.warning(audience_logs.UNKNOWN_CONDITION_TYPE.format(self._get_condition_json(index))) return None @@ -688,7 +731,7 @@ def evaluate(self, index): self.logger.warning(audience_logs.UNKNOWN_MATCH_TYPE.format(self._get_condition_json(index))) return None - if condition_match != ConditionMatchTypes.EXISTS: + if condition_match not in (ConditionMatchTypes.EXISTS, ConditionMatchTypes.QUALIFIED): attribute_key = self.condition_data[index][0] if attribute_key not in self.attributes: self.logger.debug( @@ -705,16 +748,16 @@ def evaluate(self, index): return self.EVALUATORS_BY_MATCH_TYPE[condition_match](self, index) -class ConditionDecoder(object): +class ConditionDecoder: """ Class which provides an object_hook method for decoding dict objects into a list when given a condition_decoder. """ - def __init__(self, condition_decoder): - self.condition_list = [] + def __init__(self, condition_decoder: Callable[[dict[str, str]], list[Optional[str]]]): + self.condition_list: list[Optional[str] | list[str]] = [] self.index = -1 self.decoder = condition_decoder - def object_hook(self, object_dict): + def object_hook(self, object_dict: dict[str, str]) -> int: """ Hook which when passed into a json.JSONDecoder will replace each dict in a json string with its index and convert the dict to an object as defined by the passed in condition_decoder. The newly created condition object is @@ -727,12 +770,12 @@ def object_hook(self, object_dict): An index which will be used as the placeholder in the condition_structure """ instance = self.decoder(object_dict) - self.condition_list.append(instance) + self.condition_list.append(instance) # type: ignore[arg-type] self.index += 1 return self.index -def _audience_condition_deserializer(obj_dict): +def _audience_condition_deserializer(obj_dict: dict[str, str]) -> list[Optional[str]]: """ Deserializer defining how dict objects need to be decoded for audience conditions. Args: @@ -749,7 +792,7 @@ def _audience_condition_deserializer(obj_dict): ] -def loads(conditions_string): +def loads(conditions_string: str) -> tuple[list[str | list[str]], list[Optional[list[str] | str]]]: """ Deserializes the conditions property into its corresponding components: the condition_structure and the condition_list. diff --git a/optimizely/helpers/condition_tree_evaluator.py b/optimizely/helpers/condition_tree_evaluator.py index c0fe7b87..1e9a95c0 100644 --- a/optimizely/helpers/condition_tree_evaluator.py +++ b/optimizely/helpers/condition_tree_evaluator.py @@ -1,4 +1,4 @@ -# Copyright 2018-2019, Optimizely +# Copyright 2018-2019, 2022, Optimizely # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -11,10 +11,16 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations +from typing import Any, Callable, Optional, Sequence + from .condition import ConditionOperatorTypes -def and_evaluator(conditions, leaf_evaluator): +LeafEvaluator = Callable[[Any], Optional[bool]] + + +def and_evaluator(conditions: Sequence[str | list[str]], leaf_evaluator: LeafEvaluator) -> Optional[bool]: """ Evaluates a list of conditions as if the evaluator had been applied to each entry and the results AND-ed together. @@ -40,7 +46,7 @@ def and_evaluator(conditions, leaf_evaluator): return None if saw_null_result else True -def or_evaluator(conditions, leaf_evaluator): +def or_evaluator(conditions: Sequence[str | list[str]], leaf_evaluator: LeafEvaluator) -> Optional[bool]: """ Evaluates a list of conditions as if the evaluator had been applied to each entry and the results OR-ed together. @@ -66,7 +72,7 @@ def or_evaluator(conditions, leaf_evaluator): return None if saw_null_result else False -def not_evaluator(conditions, leaf_evaluator): +def not_evaluator(conditions: Sequence[str | list[str]], leaf_evaluator: LeafEvaluator) -> Optional[bool]: """ Evaluates a list of conditions as if the evaluator had been applied to a single entry and NOT was applied to the result. @@ -94,7 +100,7 @@ def not_evaluator(conditions, leaf_evaluator): } -def evaluate(conditions, leaf_evaluator): +def evaluate(conditions: Optional[Sequence[str | list[str]]], leaf_evaluator: LeafEvaluator) -> Optional[bool]: """ Top level method to evaluate conditions. Args: diff --git a/optimizely/helpers/constants.py b/optimizely/helpers/constants.py index 06803152..06f2cb93 100644 --- a/optimizely/helpers/constants.py +++ b/optimizely/helpers/constants.py @@ -149,6 +149,14 @@ }, "version": {"type": "string"}, "revision": {"type": "string"}, + "integrations": { + "type": "array", + "items": { + "type": "object", + "properties": {"key": {"type": "string"}, "host": {"type": "string"}, "publicKey": {"type": "string"}}, + "required": ["key"], + } + } }, "required": [ "projectId", diff --git a/optimizely/helpers/enums.py b/optimizely/helpers/enums.py index aed202eb..2d6febab 100644 --- a/optimizely/helpers/enums.py +++ b/optimizely/helpers/enums.py @@ -12,141 +12,161 @@ # limitations under the License. import logging +from sys import version_info +if version_info < (3, 8): + from typing_extensions import Final +else: + from typing import Final # type: ignore -class CommonAudienceEvaluationLogs(object): - AUDIENCE_EVALUATION_RESULT = 'Audience "{}" evaluated to {}.' - EVALUATING_AUDIENCE = 'Starting to evaluate audience "{}" with conditions: {}.' - INFINITE_ATTRIBUTE_VALUE = ( + +class CommonAudienceEvaluationLogs: + AUDIENCE_EVALUATION_RESULT: Final = 'Audience "{}" evaluated to {}.' + EVALUATING_AUDIENCE: Final = 'Starting to evaluate audience "{}" with conditions: {}.' + INFINITE_ATTRIBUTE_VALUE: Final = ( 'Audience condition "{}" evaluated to UNKNOWN because the number value ' 'for user attribute "{}" is not in the range [-2^53, +2^53].' ) - MISSING_ATTRIBUTE_VALUE = ( + MISSING_ATTRIBUTE_VALUE: Final = ( 'Audience condition {} evaluated to UNKNOWN because no value was passed for ' 'user attribute "{}".' ) - NULL_ATTRIBUTE_VALUE = ( + NULL_ATTRIBUTE_VALUE: Final = ( 'Audience condition "{}" evaluated to UNKNOWN because a null value was passed ' 'for user attribute "{}".' ) - UNEXPECTED_TYPE = ( + UNEXPECTED_TYPE: Final = ( 'Audience condition "{}" evaluated to UNKNOWN because a value of type "{}" was passed ' 'for user attribute "{}".' ) - UNKNOWN_CONDITION_TYPE = ( + UNKNOWN_CONDITION_TYPE: Final = ( 'Audience condition "{}" uses an unknown condition type. You may need to upgrade to a ' 'newer release of the Optimizely SDK.' ) - UNKNOWN_CONDITION_VALUE = ( + UNKNOWN_CONDITION_VALUE: Final = ( 'Audience condition "{}" has an unsupported condition value. You may need to upgrade to a ' 'newer release of the Optimizely SDK.' ) - UNKNOWN_MATCH_TYPE = ( + UNKNOWN_MATCH_TYPE: Final = ( 'Audience condition "{}" uses an unknown match type. You may need to upgrade to a ' 'newer release of the Optimizely SDK.' ) class ExperimentAudienceEvaluationLogs(CommonAudienceEvaluationLogs): - AUDIENCE_EVALUATION_RESULT_COMBINED = 'Audiences for experiment "{}" collectively evaluated to {}.' - EVALUATING_AUDIENCES_COMBINED = 'Evaluating audiences for experiment "{}": {}.' + AUDIENCE_EVALUATION_RESULT_COMBINED: Final = 'Audiences for experiment "{}" collectively evaluated to {}.' + EVALUATING_AUDIENCES_COMBINED: Final = 'Evaluating audiences for experiment "{}": {}.' class RolloutRuleAudienceEvaluationLogs(CommonAudienceEvaluationLogs): - AUDIENCE_EVALUATION_RESULT_COMBINED = 'Audiences for rule {} collectively evaluated to {}.' - EVALUATING_AUDIENCES_COMBINED = 'Evaluating audiences for rule {}: {}.' + AUDIENCE_EVALUATION_RESULT_COMBINED: Final = 'Audiences for rule {} collectively evaluated to {}.' + EVALUATING_AUDIENCES_COMBINED: Final = 'Evaluating audiences for rule {}: {}.' -class ConfigManager(object): - AUTHENTICATED_DATAFILE_URL_TEMPLATE = 'https://config.optimizely.com/datafiles/auth/{sdk_key}.json' - AUTHORIZATION_HEADER_DATA_TEMPLATE = 'Bearer {datafile_access_token}' - DATAFILE_URL_TEMPLATE = 'https://cdn.optimizely.com/datafiles/{sdk_key}.json' +class ConfigManager: + AUTHENTICATED_DATAFILE_URL_TEMPLATE: Final = 'https://config.optimizely.com/datafiles/auth/{sdk_key}.json' + AUTHORIZATION_HEADER_DATA_TEMPLATE: Final = 'Bearer {datafile_access_token}' + DATAFILE_URL_TEMPLATE: Final = 'https://cdn.optimizely.com/datafiles/{sdk_key}.json' # Default time in seconds to block the 'get_config' method call until 'config' instance has been initialized. - DEFAULT_BLOCKING_TIMEOUT = 10 + DEFAULT_BLOCKING_TIMEOUT: Final = 10 # Default config update interval of 5 minutes - DEFAULT_UPDATE_INTERVAL = 5 * 60 + DEFAULT_UPDATE_INTERVAL: Final = 5 * 60 # Time in seconds before which request for datafile times out - REQUEST_TIMEOUT = 10 - - -class ControlAttributes(object): - BOT_FILTERING = '$opt_bot_filtering' - BUCKETING_ID = '$opt_bucketing_id' - USER_AGENT = '$opt_user_agent' - - -class DatafileVersions(object): - V2 = '2' - V3 = '3' - V4 = '4' - - -class DecisionNotificationTypes(object): - AB_TEST = 'ab-test' - ALL_FEATURE_VARIABLES = 'all-feature-variables' - FEATURE = 'feature' - FEATURE_TEST = 'feature-test' - FEATURE_VARIABLE = 'feature-variable' - FLAG = 'flag' - - -class DecisionSources(object): - EXPERIMENT = 'experiment' - FEATURE_TEST = 'feature-test' - ROLLOUT = 'rollout' - - -class Errors(object): - INVALID_ATTRIBUTE = 'Provided attribute is not in datafile.' - INVALID_ATTRIBUTE_FORMAT = 'Attributes provided are in an invalid format.' - INVALID_AUDIENCE = 'Provided audience is not in datafile.' - INVALID_EVENT_TAG_FORMAT = 'Event tags provided are in an invalid format.' - INVALID_EXPERIMENT_KEY = 'Provided experiment is not in datafile.' - INVALID_EVENT_KEY = 'Provided event is not in datafile.' - INVALID_FEATURE_KEY = 'Provided feature key is not in the datafile.' - INVALID_GROUP_ID = 'Provided group is not in datafile.' - INVALID_INPUT = 'Provided "{}" is in an invalid format.' - INVALID_OPTIMIZELY = 'Optimizely instance is not valid. Failing "{}".' - INVALID_PROJECT_CONFIG = 'Invalid config. Optimizely instance is not valid. Failing "{}".' - INVALID_VARIATION = 'Provided variation is not in datafile.' - INVALID_VARIABLE_KEY = 'Provided variable key is not in the feature flag.' - NONE_FEATURE_KEY_PARAMETER = '"None" is an invalid value for feature key.' - NONE_USER_ID_PARAMETER = '"None" is an invalid value for user ID.' - NONE_VARIABLE_KEY_PARAMETER = '"None" is an invalid value for variable key.' - UNSUPPORTED_DATAFILE_VERSION = 'This version of the Python SDK does not support the given datafile version: "{}".' - - -class ForcedDecisionLogs(object): - USER_HAS_FORCED_DECISION_WITH_RULE_SPECIFIED = 'Variation ({}) is mapped to flag ({}), rule ({}) and user ({}) ' \ - 'in the forced decision map.' - USER_HAS_FORCED_DECISION_WITHOUT_RULE_SPECIFIED = 'Variation ({}) is mapped to flag ({}) and user ({}) ' \ - 'in the forced decision map.' - USER_HAS_FORCED_DECISION_WITH_RULE_SPECIFIED_BUT_INVALID = 'Invalid variation is mapped to flag ({}), rule ({}) ' \ - 'and user ({}) in the forced decision map.' - USER_HAS_FORCED_DECISION_WITHOUT_RULE_SPECIFIED_BUT_INVALID = 'Invalid variation is mapped to flag ({}) ' \ - 'and user ({}) in the forced decision map.' - - -class HTTPHeaders(object): - AUTHORIZATION = 'Authorization' - IF_MODIFIED_SINCE = 'If-Modified-Since' - LAST_MODIFIED = 'Last-Modified' - - -class HTTPVerbs(object): - GET = 'GET' - POST = 'POST' - - -class LogLevels(object): - NOTSET = logging.NOTSET - DEBUG = logging.DEBUG - INFO = logging.INFO - WARNING = logging.WARNING - ERROR = logging.ERROR - CRITICAL = logging.CRITICAL - - -class NotificationTypes(object): + REQUEST_TIMEOUT: Final = 10 + + +class ControlAttributes: + BOT_FILTERING: Final = '$opt_bot_filtering' + BUCKETING_ID: Final = '$opt_bucketing_id' + USER_AGENT: Final = '$opt_user_agent' + + +class DatafileVersions: + V2: Final = '2' + V3: Final = '3' + V4: Final = '4' + + +class DecisionNotificationTypes: + AB_TEST: Final = 'ab-test' + ALL_FEATURE_VARIABLES: Final = 'all-feature-variables' + FEATURE: Final = 'feature' + FEATURE_TEST: Final = 'feature-test' + FEATURE_VARIABLE: Final = 'feature-variable' + FLAG: Final = 'flag' + + +class DecisionSources: + EXPERIMENT: Final = 'experiment' + FEATURE_TEST: Final = 'feature-test' + ROLLOUT: Final = 'rollout' + + +class Errors: + INVALID_ATTRIBUTE: Final = 'Provided attribute is not in datafile.' + INVALID_ATTRIBUTE_FORMAT: Final = 'Attributes provided are in an invalid format.' + INVALID_AUDIENCE: Final = 'Provided audience is not in datafile.' + INVALID_EVENT_TAG_FORMAT: Final = 'Event tags provided are in an invalid format.' + INVALID_EXPERIMENT_KEY: Final = 'Provided experiment is not in datafile.' + INVALID_EVENT_KEY: Final = 'Provided event is not in datafile.' + INVALID_FEATURE_KEY: Final = 'Provided feature key is not in the datafile.' + INVALID_GROUP_ID: Final = 'Provided group is not in datafile.' + INVALID_INPUT: Final = 'Provided "{}" is in an invalid format.' + INVALID_OPTIMIZELY: Final = 'Optimizely instance is not valid. Failing "{}".' + INVALID_PROJECT_CONFIG: Final = 'Invalid config. Optimizely instance is not valid. Failing "{}".' + INVALID_VARIATION: Final = 'Provided variation is not in datafile.' + INVALID_VARIABLE_KEY: Final = 'Provided variable key is not in the feature flag.' + NONE_FEATURE_KEY_PARAMETER: Final = '"None" is an invalid value for feature key.' + NONE_USER_ID_PARAMETER: Final = '"None" is an invalid value for user ID.' + NONE_VARIABLE_KEY_PARAMETER: Final = '"None" is an invalid value for variable key.' + UNSUPPORTED_DATAFILE_VERSION: Final = ( + 'This version of the Python SDK does not support the given datafile version: "{}".') + FETCH_SEGMENTS_FAILED: Final = 'Audience segments fetch failed ({}).' + ODP_EVENT_FAILED: Final = 'ODP event send failed ({}).' + ODP_NOT_INTEGRATED: Final = 'ODP is not integrated.' + ODP_NOT_ENABLED: Final = 'ODP is not enabled.' + ODP_INVALID_DATA: Final = 'ODP data is not valid.' + ODP_INVALID_ACTION: Final = 'ODP action is not valid (cannot be empty).' + MISSING_SDK_KEY: Final = 'SDK key not provided/cannot be found in the datafile.' + CMAB_FETCH_FAILED: Final = 'CMAB decision fetch failed with status: {}' + INVALID_CMAB_FETCH_RESPONSE = 'Invalid CMAB fetch response' + + +class ForcedDecisionLogs: + USER_HAS_FORCED_DECISION_WITH_RULE_SPECIFIED: Final = ( + 'Variation ({}) is mapped to flag ({}), rule ({}) and user ({}) ' + 'in the forced decision map.') + USER_HAS_FORCED_DECISION_WITHOUT_RULE_SPECIFIED: Final = ( + 'Variation ({}) is mapped to flag ({}) and user ({}) ' + 'in the forced decision map.') + USER_HAS_FORCED_DECISION_WITH_RULE_SPECIFIED_BUT_INVALID: Final = ( + 'Invalid variation is mapped to flag ({}), rule ({}) ' + 'and user ({}) in the forced decision map.') + USER_HAS_FORCED_DECISION_WITHOUT_RULE_SPECIFIED_BUT_INVALID: Final = ( + 'Invalid variation is mapped to flag ({}) ' + 'and user ({}) in the forced decision map.') + + +class HTTPHeaders: + AUTHORIZATION: Final = 'Authorization' + IF_MODIFIED_SINCE: Final = 'If-Modified-Since' + LAST_MODIFIED: Final = 'Last-Modified' + + +class HTTPVerbs: + GET: Final = 'GET' + POST: Final = 'POST' + + +class LogLevels: + NOTSET: Final = logging.NOTSET + DEBUG: Final = logging.DEBUG + INFO: Final = logging.INFO + WARNING: Final = logging.WARNING + ERROR: Final = logging.ERROR + CRITICAL: Final = logging.CRITICAL + + +class NotificationTypes: """ NotificationTypes for the notification_center.NotificationCenter format is NOTIFICATION TYPE: list of parameters to callback. @@ -165,13 +185,49 @@ class NotificationTypes(object): LogEvent log_event """ - ACTIVATE = 'ACTIVATE:experiment, user_id, attributes, variation, event' - DECISION = 'DECISION:type, user_id, attributes, decision_info' - OPTIMIZELY_CONFIG_UPDATE = 'OPTIMIZELY_CONFIG_UPDATE' - TRACK = 'TRACK:event_key, user_id, attributes, event_tags, event' - LOG_EVENT = 'LOG_EVENT:log_event' + ACTIVATE: Final = 'ACTIVATE:experiment, user_id, attributes, variation, event' + DECISION: Final = 'DECISION:type, user_id, attributes, decision_info' + OPTIMIZELY_CONFIG_UPDATE: Final = 'OPTIMIZELY_CONFIG_UPDATE' + TRACK: Final = 'TRACK:event_key, user_id, attributes, event_tags, event' + LOG_EVENT: Final = 'LOG_EVENT:log_event' + + +class VersionType: + IS_PRE_RELEASE: Final = '-' + IS_BUILD: Final = '+' + + +class EventDispatchConfig: + """Event dispatching configs.""" + REQUEST_TIMEOUT: Final = 10 + RETRIES: Final = 3 + + +class OdpEventApiConfig: + """ODP Events API configs.""" + REQUEST_TIMEOUT: Final = 10 + + +class OdpSegmentApiConfig: + """ODP Segments API configs.""" + REQUEST_TIMEOUT: Final = 10 + + +class OdpEventManagerConfig: + """ODP Event Manager configs.""" + DEFAULT_QUEUE_CAPACITY: Final = 1000 + DEFAULT_BATCH_SIZE: Final = 10 + DEFAULT_FLUSH_INTERVAL: Final = 1 + DEFAULT_RETRY_COUNT: Final = 3 + + +class OdpManagerConfig: + """ODP Manager configs.""" + KEY_FOR_USER_ID: Final = 'fs_user_id' + EVENT_TYPE: Final = 'fullstack' -class VersionType(object): - IS_PRE_RELEASE = '-' - IS_BUILD = '+' +class OdpSegmentsCacheConfig: + """ODP Segment Cache configs.""" + DEFAULT_CAPACITY: Final = 10_000 + DEFAULT_TIMEOUT_SECS: Final = 600 diff --git a/optimizely/helpers/event_tag_utils.py b/optimizely/helpers/event_tag_utils.py index 0a5ae264..0efbafb7 100644 --- a/optimizely/helpers/event_tag_utils.py +++ b/optimizely/helpers/event_tag_utils.py @@ -1,4 +1,4 @@ -# Copyright 2017, Optimizely +# Copyright 2017, 2022, Optimizely # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -11,15 +11,32 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations +from typing import TYPE_CHECKING, Any, Optional, NewType, Dict from . import enums import math import numbers +from sys import version_info -REVENUE_METRIC_TYPE = 'revenue' -NUMERIC_METRIC_TYPE = 'value' +if version_info < (3, 8): + from typing_extensions import Final +else: + from typing import Final # type: ignore -def get_revenue_value(event_tags): +if TYPE_CHECKING: + # prevent circular dependenacy by skipping import at runtime + from optimizely.logger import Logger + + +REVENUE_METRIC_TYPE: Final = 'revenue' +NUMERIC_METRIC_TYPE: Final = 'value' + +# type for tracking event tags (essentially a sub-type of dict) +EventTags = NewType('EventTags', Dict[str, Any]) + + +def get_revenue_value(event_tags: Optional[EventTags]) -> Optional[numbers.Integral]: if event_tags is None: return None @@ -40,7 +57,7 @@ def get_revenue_value(event_tags): return raw_value -def get_numeric_value(event_tags, logger=None): +def get_numeric_value(event_tags: Optional[EventTags], logger: Optional[Logger] = None) -> Optional[float]: """ A smart getter of the numeric value from the event tags. @@ -87,9 +104,7 @@ def get_numeric_value(event_tags, logger=None): if not isinstance(cast_numeric_metric_value, float) or \ math.isnan(cast_numeric_metric_value) or \ math.isinf(cast_numeric_metric_value): - logger_message_debug = 'Provided numeric value {} is in an invalid format.'.format( - numeric_metric_value - ) + logger_message_debug = f'Provided numeric value {numeric_metric_value} is in an invalid format.' numeric_metric_value = None else: # Handle booleans as a special case. @@ -116,15 +131,14 @@ def get_numeric_value(event_tags, logger=None): if logger: logger.log( enums.LogLevels.INFO, - 'The numeric metric value {} will be sent to results.'.format(numeric_metric_value), + f'The numeric metric value {numeric_metric_value} will be sent to results.' ) else: if logger: logger.log( enums.LogLevels.WARNING, - 'The provided numeric metric value {} is in an invalid format and will not be sent to results.'.format( - numeric_metric_value - ), + f'The provided numeric metric value {numeric_metric_value}' + ' is in an invalid format and will not be sent to results.' ) - return numeric_metric_value + return numeric_metric_value # type: ignore[no-any-return] diff --git a/optimizely/helpers/experiment.py b/optimizely/helpers/experiment.py index 45bdd1b5..8a644b43 100644 --- a/optimizely/helpers/experiment.py +++ b/optimizely/helpers/experiment.py @@ -1,4 +1,4 @@ -# Copyright 2016, Optimizely +# Copyright 2016, 2022, Optimizely # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -10,11 +10,18 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + # prevent circular dependenacy by skipping import at runtime + from optimizely.entities import Experiment + ALLOWED_EXPERIMENT_STATUS = ['Running'] -def is_experiment_running(experiment): +def is_experiment_running(experiment: Experiment) -> bool: """ Determine for given experiment if experiment is running. Args: diff --git a/optimizely/helpers/sdk_settings.py b/optimizely/helpers/sdk_settings.py new file mode 100644 index 00000000..6b31ee9c --- /dev/null +++ b/optimizely/helpers/sdk_settings.py @@ -0,0 +1,65 @@ +# Copyright 2022, Optimizely +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from typing import Optional + +from optimizely.helpers import enums +from optimizely.odp.lru_cache import OptimizelySegmentsCache +from optimizely.odp.odp_event_manager import OdpEventManager +from optimizely.odp.odp_segment_manager import OdpSegmentManager + + +class OptimizelySdkSettings: + """Contains configuration used for Optimizely Project initialization.""" + + def __init__( + self, + odp_disabled: bool = False, + segments_cache_size: int = enums.OdpSegmentsCacheConfig.DEFAULT_CAPACITY, + segments_cache_timeout_in_secs: int = enums.OdpSegmentsCacheConfig.DEFAULT_TIMEOUT_SECS, + odp_segments_cache: Optional[OptimizelySegmentsCache] = None, + odp_segment_manager: Optional[OdpSegmentManager] = None, + odp_event_manager: Optional[OdpEventManager] = None, + odp_segment_request_timeout: Optional[int] = None, + odp_event_request_timeout: Optional[int] = None, + odp_event_flush_interval: Optional[int] = None + ) -> None: + """ + Args: + odp_disabled: Set this flag to true (default = False) to disable ODP features. + segments_cache_size: The maximum size of audience segments cache (optional. default = 10,000). + Set to zero to disable caching. + segments_cache_timeout_in_secs: The timeout in seconds of audience segments cache (optional. default = 600). + Set to zero to disable timeout. + odp_segments_cache: A custom odp segments cache. Required methods include: + `save(key, value)`, `lookup(key) -> value`, and `reset()` + odp_segment_manager: A custom odp segment manager. Required method is: + `fetch_qualified_segments(user_key, user_value, options)`. + odp_event_manager: A custom odp event manager. Required method is: + `send_event(type:, action:, identifiers:, data:)` + odp_segment_request_timeout: Time to wait in seconds for fetch_qualified_segments request to + send successfully (optional). + odp_event_request_timeout: Time to wait in seconds for send_odp_events request to send successfully. + odp_event_flush_interval: Time to wait for events to accumulate before sending a batch in seconds (optional). + """ + + self.odp_disabled = odp_disabled + self.segments_cache_size = segments_cache_size + self.segments_cache_timeout_in_secs = segments_cache_timeout_in_secs + self.segments_cache = odp_segments_cache + self.odp_segment_manager = odp_segment_manager + self.odp_event_manager = odp_event_manager + self.fetch_segments_timeout = odp_segment_request_timeout + self.odp_event_timeout = odp_event_request_timeout + self.odp_flush_interval = odp_event_flush_interval diff --git a/optimizely/helpers/types.py b/optimizely/helpers/types.py new file mode 100644 index 00000000..3cca45de --- /dev/null +++ b/optimizely/helpers/types.py @@ -0,0 +1,117 @@ +# Copyright 2022, Optimizely +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +from typing import Optional, Any +from sys import version_info + + +if version_info < (3, 8): + from typing_extensions import TypedDict +else: + from typing import TypedDict # type: ignore + + +# Intermediate types for type checking deserialized datafile json before actual class instantiation. +# These aren't used for anything other than type signatures + +class BaseEntity(TypedDict): + pass + + +class BaseDict(BaseEntity): + """Base type for parsed datafile json, before instantiation of class objects.""" + id: str + key: str + + +class EventDict(BaseDict): + """Event dict from parsed datafile json.""" + experimentIds: list[str] + + +class AttributeDict(BaseDict): + """Attribute dict from parsed datafile json.""" + pass + + +class TrafficAllocation(BaseEntity): + """Traffic Allocation dict from parsed datafile json.""" + endOfRange: int + entityId: str + + +class VariableDict(BaseDict): + """Variable dict from parsed datafile json.""" + value: str + type: str + defaultValue: str + subType: str + + +class VariationDict(BaseDict): + """Variation dict from parsed datafile json.""" + variables: list[VariableDict] + featureEnabled: Optional[bool] + + +class ExperimentDict(BaseDict): + """Experiment dict from parsed datafile json.""" + status: str + forcedVariations: dict[str, str] + variations: list[VariationDict] + layerId: str + audienceIds: list[str] + audienceConditions: list[str | list[str]] + trafficAllocation: list[TrafficAllocation] + + +class RolloutDict(BaseEntity): + """Rollout dict from parsed datafile json.""" + id: str + experiments: list[ExperimentDict] + + +class FeatureFlagDict(BaseDict): + """Feature flag dict from parsed datafile json.""" + rolloutId: str + variables: list[VariableDict] + experimentIds: list[str] + + +class GroupDict(BaseEntity): + """Group dict from parsed datafile json.""" + id: str + policy: str + experiments: list[ExperimentDict] + trafficAllocation: list[TrafficAllocation] + + +class AudienceDict(BaseEntity): + """Audience dict from parsed datafile json.""" + id: str + name: str + conditions: list[Any] | str + + +class IntegrationDict(BaseEntity): + """Integration dict from parsed datafile json.""" + key: str + host: str + publicKey: str + + +class CmabDict(BaseEntity): + """Cmab dict from parsed datafile json.""" + attributeIds: list[str] + trafficAllocation: int diff --git a/optimizely/helpers/validator.py b/optimizely/helpers/validator.py index 522faccd..b9e4fcc5 100644 --- a/optimizely/helpers/validator.py +++ b/optimizely/helpers/validator.py @@ -1,4 +1,4 @@ -# Copyright 2016-2019, Optimizely +# Copyright 2016-2019, 2022, Optimizely # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -11,18 +11,33 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations import json +from typing import TYPE_CHECKING, Any, Optional, Type import jsonschema import math import numbers -from six import string_types from optimizely.notification_center import NotificationCenter from optimizely.user_profile import UserProfile from . import constants - - -def is_datafile_valid(datafile): +from ..odp.lru_cache import OptimizelySegmentsCache +from ..odp.odp_event_manager import OdpEventManager +from ..odp.odp_segment_manager import OdpSegmentManager + +if TYPE_CHECKING: + # prevent circular dependenacy by skipping import at runtime + from optimizely.logger import Logger + from optimizely.event_dispatcher import CustomEventDispatcher + from optimizely.error_handler import BaseErrorHandler + from optimizely.config_manager import BaseConfigManager + from optimizely.event.event_processor import BaseEventProcessor + from optimizely.helpers.event_tag_utils import EventTags + from optimizely.optimizely_user_context import UserAttributes + from optimizely.odp.odp_event import OdpDataDict + + +def is_datafile_valid(datafile: Optional[str | bytes]) -> bool: """ Given a datafile determine if it is valid or not. Args: @@ -31,6 +46,8 @@ def is_datafile_valid(datafile): Returns: Boolean depending upon whether datafile is valid or not. """ + if datafile is None: + return False try: datafile_json = json.loads(datafile) @@ -45,7 +62,7 @@ def is_datafile_valid(datafile): return True -def _has_method(obj, method): +def _has_method(obj: object, method: str) -> bool: """ Given an object determine if it supports the method. Args: @@ -53,13 +70,13 @@ def _has_method(obj, method): method: Method whose presence needs to be determined. Returns: - Boolean depending upon whether the method is available or not. + Boolean depending upon whether the method is available and callable or not. """ - return getattr(obj, method, None) is not None + return callable(getattr(obj, method, None)) -def is_config_manager_valid(config_manager): +def is_config_manager_valid(config_manager: BaseConfigManager) -> bool: """ Given a config_manager determine if it is valid or not i.e. provides a get_config method. Args: @@ -72,7 +89,7 @@ def is_config_manager_valid(config_manager): return _has_method(config_manager, 'get_config') -def is_event_processor_valid(event_processor): +def is_event_processor_valid(event_processor: BaseEventProcessor) -> bool: """ Given an event_processor, determine if it is valid or not i.e. provides a process method. Args: @@ -85,7 +102,7 @@ def is_event_processor_valid(event_processor): return _has_method(event_processor, 'process') -def is_error_handler_valid(error_handler): +def is_error_handler_valid(error_handler: Type[BaseErrorHandler] | BaseErrorHandler) -> bool: """ Given a error_handler determine if it is valid or not i.e. provides a handle_error method. Args: @@ -98,7 +115,7 @@ def is_error_handler_valid(error_handler): return _has_method(error_handler, 'handle_error') -def is_event_dispatcher_valid(event_dispatcher): +def is_event_dispatcher_valid(event_dispatcher: Type[CustomEventDispatcher] | CustomEventDispatcher) -> bool: """ Given a event_dispatcher determine if it is valid or not i.e. provides a dispatch_event method. Args: @@ -111,7 +128,7 @@ def is_event_dispatcher_valid(event_dispatcher): return _has_method(event_dispatcher, 'dispatch_event') -def is_logger_valid(logger): +def is_logger_valid(logger: Logger) -> bool: """ Given a logger determine if it is valid or not i.e. provides a log method. Args: @@ -124,7 +141,7 @@ def is_logger_valid(logger): return _has_method(logger, 'log') -def is_notification_center_valid(notification_center): +def is_notification_center_valid(notification_center: NotificationCenter) -> bool: """ Given notification_center determine if it is valid or not. Args: @@ -137,7 +154,7 @@ def is_notification_center_valid(notification_center): return isinstance(notification_center, NotificationCenter) -def are_attributes_valid(attributes): +def are_attributes_valid(attributes: UserAttributes) -> bool: """ Determine if attributes provided are dict or not. Args: @@ -150,7 +167,7 @@ def are_attributes_valid(attributes): return type(attributes) is dict -def are_event_tags_valid(event_tags): +def are_event_tags_valid(event_tags: EventTags) -> bool: """ Determine if event tags provided are dict or not. Args: @@ -163,7 +180,7 @@ def are_event_tags_valid(event_tags): return type(event_tags) is dict -def is_user_profile_valid(user_profile): +def is_user_profile_valid(user_profile: dict[str, Any]) -> bool: """ Determine if provided user profile is valid or not. Args: @@ -196,7 +213,7 @@ def is_user_profile_valid(user_profile): return True -def is_non_empty_string(input_id_key): +def is_non_empty_string(input_id_key: str) -> bool: """ Determine if provided input_id_key is a non-empty string or not. Args: @@ -205,13 +222,13 @@ def is_non_empty_string(input_id_key): Returns: Boolean depending upon whether input is valid or not. """ - if input_id_key and isinstance(input_id_key, string_types): + if input_id_key and isinstance(input_id_key, str): return True return False -def is_attribute_valid(attribute_key, attribute_value): +def is_attribute_valid(attribute_key: str, attribute_value: Any) -> bool: """ Determine if given attribute is valid. Args: @@ -224,10 +241,10 @@ def is_attribute_valid(attribute_key, attribute_value): True otherwise """ - if not isinstance(attribute_key, string_types): + if not isinstance(attribute_key, str): return False - if isinstance(attribute_value, (string_types, bool)): + if isinstance(attribute_value, (str, bool)): return True if isinstance(attribute_value, (numbers.Integral, float)): @@ -236,7 +253,7 @@ def is_attribute_valid(attribute_key, attribute_value): return False -def is_finite_number(value): +def is_finite_number(value: Any) -> bool: """ Validates if the given value is a number, enforces absolute limit of 2^53 and restricts NAN, INF, -INF. @@ -259,13 +276,14 @@ def is_finite_number(value): if math.isnan(value) or math.isinf(value): return False - if abs(value) > (2 ** 53): - return False + if isinstance(value, (int, float)): + if abs(value) > (2 ** 53): + return False return True -def are_values_same_type(first_val, second_val): +def are_values_same_type(first_val: Any, second_val: Any) -> bool: """ Method to verify that both values belong to same type. Float and integer are considered as same type. @@ -281,7 +299,7 @@ def are_values_same_type(first_val, second_val): second_val_type = type(second_val) # use isinstance to accomodate Python 2 unicode and str types. - if isinstance(first_val, string_types) and isinstance(second_val, string_types): + if isinstance(first_val, str) and isinstance(second_val, str): return True # Compare types if one of the values is bool because bool is a subclass on Integer. @@ -293,3 +311,71 @@ def are_values_same_type(first_val, second_val): return True return False + + +def are_odp_data_types_valid(data: OdpDataDict) -> bool: + valid_types = (str, int, float, bool, type(None)) + return all(isinstance(v, valid_types) for v in data.values()) + + +def is_segments_cache_valid(segments_cache: Optional[OptimizelySegmentsCache]) -> bool: + """ Given a segments_cache determine if it is valid or not i.e. provides a reset, lookup and save methods. + + Args: + segments_cache: Provides cache methods: reset, lookup, save. + + Returns: + Boolean depending upon whether segments_cache is valid or not. + """ + if not _has_method(segments_cache, 'reset'): + return False + + if not _has_method(segments_cache, 'lookup'): + return False + + if not _has_method(segments_cache, 'save'): + return False + + return True + + +def is_segment_manager_valid(segment_manager: Optional[OdpSegmentManager]) -> bool: + """ Given a segments_manager determine if it is valid or not. + + Args: + segment_manager: Provides methods fetch_qualified_segments and reset + + Returns: + Boolean depending upon whether segments_manager is valid or not. + """ + if not _has_method(segment_manager, 'fetch_qualified_segments'): + return False + + if not _has_method(segment_manager, 'reset'): + return False + + return True + + +def is_event_manager_valid(event_manager: Optional[OdpEventManager]) -> bool: + """ Given an event_manager determine if it is valid or not. + + Args: + event_manager: Provides send_event method + + Returns: + Boolean depending upon whether event_manager is valid or not. + """ + if not hasattr(event_manager, 'is_running'): + return False + + if not _has_method(event_manager, 'send_event'): + return False + + if not _has_method(event_manager, 'stop'): + return False + + if not _has_method(event_manager, 'update_config'): + return False + + return True diff --git a/optimizely/lib/pymmh3.py b/optimizely/lib/pymmh3.py index 4997de21..b37bf944 100755 --- a/optimizely/lib/pymmh3.py +++ b/optimizely/lib/pymmh3.py @@ -16,36 +16,21 @@ https://pypi.python.org/pypi/mmh3/2.3.1 ''' +from __future__ import annotations -import sys as _sys - -if _sys.version_info > (3, 0): - - def xrange(a, b, c): - return range(a, b, c) - - def xencode(x): - if isinstance(x, bytes) or isinstance(x, bytearray): - return x - else: - return x.encode() - - -else: - - def xencode(x): +def xencode(x: bytes | bytearray | str) -> bytes | bytearray: + if isinstance(x, bytes) or isinstance(x, bytearray): return x + else: + return x.encode() -del _sys - - -def hash(key, seed=0x0): +def hash(key: str | bytearray, seed: int = 0x0) -> int: ''' Implements 32bit murmur3 hash. ''' key = bytearray(xencode(key)) - def fmix(h): + def fmix(h: int) -> int: h ^= h >> 16 h = (h * 0x85EBCA6B) & 0xFFFFFFFF h ^= h >> 13 @@ -62,7 +47,7 @@ def fmix(h): c2 = 0x1B873593 # body - for block_start in xrange(0, nblocks * 4, 4): + for block_start in range(0, nblocks * 4, 4): # ??? big endian? k1 = key[block_start + 3] << 24 | key[block_start + 2] << 16 | key[block_start + 1] << 8 | key[block_start + 0] @@ -100,13 +85,13 @@ def fmix(h): return -((unsigned_val ^ 0xFFFFFFFF) + 1) -def hash128(key, seed=0x0, x64arch=True): +def hash128(key: bytes, seed: int = 0x0, x64arch: bool = True) -> int: ''' Implements 128bit murmur3 hash. ''' - def hash128_x64(key, seed): + def hash128_x64(key: bytes, seed: int) -> int: ''' Implements 128bit murmur3 hash for x64. ''' - def fmix(k): + def fmix(k: int) -> int: k ^= k >> 33 k = (k * 0xFF51AFD7ED558CCD) & 0xFFFFFFFFFFFFFFFF k ^= k >> 33 @@ -124,7 +109,7 @@ def fmix(k): c2 = 0x4CF5AD432745937F # body - for block_start in xrange(0, nblocks * 8, 8): + for block_start in range(0, nblocks * 8, 8): # ??? big endian? k1 = ( key[2 * block_start + 7] << 56 | @@ -231,10 +216,10 @@ def fmix(k): return h2 << 64 | h1 - def hash128_x86(key, seed): + def hash128_x86(key: bytes, seed: int) -> int: ''' Implements 128bit murmur3 hash for x86. ''' - def fmix(h): + def fmix(h: int) -> int: h ^= h >> 16 h = (h * 0x85EBCA6B) & 0xFFFFFFFF h ^= h >> 13 @@ -256,7 +241,7 @@ def fmix(h): c4 = 0xA1E38B93 # body - for block_start in xrange(0, nblocks * 16, 16): + for block_start in range(0, nblocks * 16, 16): k1 = ( key[block_start + 3] << 24 | key[block_start + 2] << 16 | @@ -422,7 +407,7 @@ def fmix(h): return hash128_x86(key, seed) -def hash64(key, seed=0x0, x64arch=True): +def hash64(key: bytes, seed: int = 0x0, x64arch: bool = True) -> tuple[int, int]: ''' Implements 64bit murmur3 hash. Returns a tuple. ''' hash_128 = hash128(key, seed, x64arch) @@ -442,14 +427,14 @@ def hash64(key, seed=0x0, x64arch=True): return (int(signed_val1), int(signed_val2)) -def hash_bytes(key, seed=0x0, x64arch=True): +def hash_bytes(key: bytes, seed: int = 0x0, x64arch: bool = True) -> str: ''' Implements 128bit murmur3 hash. Returns a byte string. ''' hash_128 = hash128(key, seed, x64arch) bytestring = '' - for i in xrange(0, 16, 1): + for i in range(0, 16, 1): lsbyte = hash_128 & 0xFF bytestring = bytestring + str(chr(lsbyte)) hash_128 = hash_128 >> 8 @@ -459,6 +444,7 @@ def hash_bytes(key, seed=0x0, x64arch=True): if __name__ == "__main__": import argparse + import sys parser = argparse.ArgumentParser('pymurmur3', 'pymurmur [options] "string to hash"') parser.add_argument('--seed', type=int, default=0) @@ -467,4 +453,4 @@ def hash_bytes(key, seed=0x0, x64arch=True): opts = parser.parse_args() for str_to_hash in opts.strings: - sys.stdout.write('"%s" = 0x%08X\n' % (str_to_hash, hash(str_to_hash))) + sys.stdout.write(f'"{str_to_hash}" = 0x{hash(str_to_hash):08X}\n') diff --git a/optimizely/logger.py b/optimizely/logger.py index 4754e347..33d3660c 100644 --- a/optimizely/logger.py +++ b/optimizely/logger.py @@ -1,4 +1,4 @@ -# Copyright 2016, 2018-2019, Optimizely +# Copyright 2016, 2018-2019, 2022, Optimizely # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -11,15 +11,22 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging +from typing import Any, Optional, Union import warnings +from sys import version_info from .helpers import enums +if version_info < (3, 8): + from typing_extensions import Final +else: + from typing import Final # type: ignore -_DEFAULT_LOG_FORMAT = '%(levelname)-8s %(asctime)s %(filename)s:%(lineno)s:%(message)s' +_DEFAULT_LOG_FORMAT: Final = '%(levelname)-8s %(asctime)s %(filename)s:%(lineno)s:%(message)s' -def reset_logger(name, level=None, handler=None): + +def reset_logger(name: str, level: Optional[int] = None, handler: Optional[logging.Handler] = None) -> logging.Logger: """ Make a standard python logger object with default formatter, handler, etc. @@ -52,18 +59,42 @@ def reset_logger(name, level=None, handler=None): return logger -class BaseLogger(object): +class BaseLogger: """ Class encapsulating logging functionality. Override with your own logger providing log method. """ @staticmethod - def log(*args): + def log(*args: Any) -> None: + pass # pragma: no cover + + @staticmethod + def error(*args: Any) -> None: + pass # pragma: no cover + + @staticmethod + def warning(*args: Any) -> None: + pass # pragma: no cover + + @staticmethod + def info(*args: Any) -> None: pass # pragma: no cover + @staticmethod + def debug(*args: Any) -> None: + pass # pragma: no cover + + @staticmethod + def exception(*args: Any) -> None: + pass # pragma: no cover + + +# type alias for optimizely logger +Logger = Union[logging.Logger, BaseLogger] + class NoOpLogger(BaseLogger): """ Class providing log method which logs nothing. """ - def __init__(self): + def __init__(self) -> None: self.logger = reset_logger( name='.'.join([__name__, self.__class__.__name__]), level=logging.NOTSET, handler=logging.NullHandler(), ) @@ -72,21 +103,21 @@ def __init__(self): class SimpleLogger(BaseLogger): """ Class providing log method which logs to stdout. """ - def __init__(self, min_level=enums.LogLevels.INFO): + def __init__(self, min_level: int = enums.LogLevels.INFO): self.level = min_level self.logger = reset_logger(name='.'.join([__name__, self.__class__.__name__]), level=min_level) - def log(self, log_level, message): + def log(self, log_level: int, message: object) -> None: # type: ignore[override] # Log a deprecation/runtime warning. # Clients should be using standard loggers instead of this wrapper. - warning = '{} is deprecated. Please use standard python loggers.'.format(self.__class__) + warning = f'{self.__class__} is deprecated. Please use standard python loggers.' warnings.warn(warning, DeprecationWarning) # Log the message. self.logger.log(log_level, message) -def adapt_logger(logger): +def adapt_logger(logger: Logger) -> Logger: """ Adapt our custom logger.BaseLogger object into a standard logging.Logger object. diff --git a/optimizely/notification_center.py b/optimizely/notification_center.py index 539088a8..322a5862 100644 --- a/optimizely/notification_center.py +++ b/optimizely/notification_center.py @@ -1,4 +1,4 @@ -# Copyright 2017-2019, Optimizely +# Copyright 2017-2019, 2022, Optimizely # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -11,27 +11,35 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations +from typing import Any, Callable, Optional from .helpers import enums from . import logger as optimizely_logger +from sys import version_info +if version_info < (3, 8): + from typing_extensions import Final +else: + from typing import Final # type: ignore -NOTIFICATION_TYPES = tuple( + +NOTIFICATION_TYPES: Final = tuple( getattr(enums.NotificationTypes, attr) for attr in dir(enums.NotificationTypes) if not attr.startswith('__') ) -class NotificationCenter(object): +class NotificationCenter: """ Class encapsulating methods to manage notifications and their listeners. The enums.NotificationTypes includes predefined notifications.""" - def __init__(self, logger=None): + def __init__(self, logger: Optional[optimizely_logger.Logger] = None): self.listener_id = 1 - self.notification_listeners = {} + self.notification_listeners: dict[str, list[tuple[int, Callable[..., None]]]] = {} for notification_type in NOTIFICATION_TYPES: self.notification_listeners[notification_type] = [] self.logger = optimizely_logger.adapt_logger(logger or optimizely_logger.NoOpLogger()) - def add_notification_listener(self, notification_type, notification_callback): + def add_notification_listener(self, notification_type: str, notification_callback: Callable[..., None]) -> int: """ Add a notification callback to the notification center for a given notification type. Args: @@ -45,7 +53,7 @@ def add_notification_listener(self, notification_type, notification_callback): """ if notification_type not in NOTIFICATION_TYPES: - self.logger.error('Invalid notification_type: {} provided. Not adding listener.'.format(notification_type)) + self.logger.error(f'Invalid notification_type: {notification_type} provided. Not adding listener.') return -1 for _, listener in self.notification_listeners[notification_type]: @@ -59,7 +67,7 @@ def add_notification_listener(self, notification_type, notification_callback): return current_listener_id - def remove_notification_listener(self, notification_id): + def remove_notification_listener(self, notification_id: int) -> bool: """ Remove a previously added notification callback. Args: @@ -77,7 +85,7 @@ def remove_notification_listener(self, notification_id): return False - def clear_notification_listeners(self, notification_type): + def clear_notification_listeners(self, notification_type: str) -> None: """ Remove notification listeners for a certain notification type. Args: @@ -86,11 +94,11 @@ def clear_notification_listeners(self, notification_type): if notification_type not in NOTIFICATION_TYPES: self.logger.error( - 'Invalid notification_type: {} provided. Not removing any listener.'.format(notification_type) + f'Invalid notification_type: {notification_type} provided. Not removing any listener.' ) self.notification_listeners[notification_type] = [] - def clear_notifications(self, notification_type): + def clear_notifications(self, notification_type: str) -> None: """ (DEPRECATED since 3.2.0, use clear_notification_listeners) Remove notification listeners for a certain notification type. @@ -99,17 +107,17 @@ def clear_notifications(self, notification_type): """ self.clear_notification_listeners(notification_type) - def clear_all_notification_listeners(self): + def clear_all_notification_listeners(self) -> None: """ Remove all notification listeners. """ for notification_type in self.notification_listeners.keys(): self.clear_notification_listeners(notification_type) - def clear_all_notifications(self): + def clear_all_notifications(self) -> None: """ (DEPRECATED since 3.2.0, use clear_all_notification_listeners) Remove all notification listeners. """ self.clear_all_notification_listeners() - def send_notifications(self, notification_type, *args): + def send_notifications(self, notification_type: str, *args: Any) -> None: """ Fires off the notification for the specific event. Uses var args to pass in a arbitrary list of parameter according to which notification type was fired. @@ -120,7 +128,7 @@ def send_notifications(self, notification_type, *args): if notification_type not in NOTIFICATION_TYPES: self.logger.error( - 'Invalid notification_type: {} provided. ' 'Not triggering any notification.'.format(notification_type) + f'Invalid notification_type: {notification_type} provided. ' 'Not triggering any notification.' ) return @@ -130,5 +138,5 @@ def send_notifications(self, notification_type, *args): callback(*args) except: self.logger.exception( - 'Unknown problem when sending "{}" type notification.'.format(notification_type) + f'Unknown problem when sending "{notification_type}" type notification.' ) diff --git a/optimizely/notification_center_registry.py b/optimizely/notification_center_registry.py new file mode 100644 index 00000000..b07702ab --- /dev/null +++ b/optimizely/notification_center_registry.py @@ -0,0 +1,64 @@ +# Copyright 2023, Optimizely +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations +from threading import Lock +from typing import Optional +from .logger import Logger as OptimizelyLogger +from .notification_center import NotificationCenter +from .helpers.enums import Errors + + +class _NotificationCenterRegistry: + """ Class managing internal notification centers.""" + _notification_centers: dict[str, NotificationCenter] = {} + _lock = Lock() + + @classmethod + def get_notification_center(cls, sdk_key: Optional[str], logger: OptimizelyLogger) -> Optional[NotificationCenter]: + """Returns an internal notification center for the given sdk_key, creating one + if none exists yet. + + Args: + sdk_key: A string sdk key to uniquely identify the notification center. + logger: Optional logger. + + Returns: + None or NotificationCenter + """ + + if not sdk_key: + logger.error(f'{Errors.MISSING_SDK_KEY} ODP may not work properly without it.') + return None + + with cls._lock: + if sdk_key in cls._notification_centers: + notification_center = cls._notification_centers[sdk_key] + else: + notification_center = NotificationCenter(logger) + cls._notification_centers[sdk_key] = notification_center + + return notification_center + + @classmethod + def remove_notification_center(cls, sdk_key: str) -> None: + """Remove a previously added notification center and clear all its listeners. + + Args: + sdk_key: The sdk_key of the notification center to remove. + """ + + with cls._lock: + notification_center = cls._notification_centers.pop(sdk_key, None) + if notification_center: + notification_center.clear_all_notification_listeners() diff --git a/optimizely/odp/__init__.py b/optimizely/odp/__init__.py new file mode 100644 index 00000000..cd898c0e --- /dev/null +++ b/optimizely/odp/__init__.py @@ -0,0 +1,12 @@ +# Copyright 2022, Optimizely +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/optimizely/odp/lru_cache.py b/optimizely/odp/lru_cache.py new file mode 100644 index 00000000..073973e6 --- /dev/null +++ b/optimizely/odp/lru_cache.py @@ -0,0 +1,125 @@ +# Copyright 2022, Optimizely +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations +from dataclasses import dataclass, field +import threading +from time import time +from collections import OrderedDict +from typing import Optional, Generic, TypeVar, Hashable +from sys import version_info + +if version_info < (3, 8): + from typing_extensions import Protocol +else: + from typing import Protocol # type: ignore + +# generic type definitions for LRUCache parameters +K = TypeVar('K', bound=Hashable, contravariant=True) +V = TypeVar('V') + + +class LRUCache(Generic[K, V]): + """Least Recently Used cache that invalidates entries older than the timeout.""" + + def __init__(self, capacity: int, timeout_in_secs: int): + self.lock = threading.Lock() + self.map: OrderedDict[K, CacheElement[V]] = OrderedDict() + self.capacity = capacity + self.timeout = timeout_in_secs + + def lookup(self, key: K) -> Optional[V]: + """Return the non-stale value associated with the provided key and move the + element to the end of the cache. If the selected value is stale, remove it from + the cache and clear the entire cache if stale. + """ + if self.capacity <= 0: + return None + + with self.lock: + if key not in self.map: + return None + + self.map.move_to_end(key) + element = self.map[key] + + if element._is_stale(self.timeout): + del self.map[key] + return None + + return element.value + + def save(self, key: K, value: V) -> None: + """Insert and/or move the provided key/value pair to the most recent end of the cache. + If the cache grows beyond the cache capacity, the least recently used element will be + removed. + """ + if self.capacity <= 0: + return + + with self.lock: + if key in self.map: + self.map.move_to_end(key) + + self.map[key] = CacheElement(value) + + if len(self.map) > self.capacity: + self.map.popitem(last=False) + + def reset(self) -> None: + """ Clear the cache.""" + if self.capacity <= 0: + return + with self.lock: + self.map.clear() + + def peek(self, key: K) -> Optional[V]: + """Returns the value associated with the provided key without updating the cache.""" + if self.capacity <= 0: + return None + with self.lock: + element = self.map.get(key) + return element.value if element is not None else None + + def remove(self, key: K) -> None: + """Remove the element associated with the provided key from the cache.""" + with self.lock: + self.map.pop(key, None) + + +@dataclass +class CacheElement(Generic[V]): + """Individual element for the LRUCache.""" + value: V + timestamp: float = field(default_factory=time) + + def _is_stale(self, timeout: float) -> bool: + """Returns True if the provided timeout has passed since the element's timestamp.""" + if timeout <= 0: + return False + return time() - self.timestamp >= timeout + + +class OptimizelySegmentsCache(Protocol): + """Protocol for implementing custom cache.""" + def reset(self) -> None: + """ Clear the cache.""" + ... + + def lookup(self, key: str) -> Optional[list[str]]: + """Return the value associated with the provided key.""" + ... + + def save(self, key: str, value: list[str]) -> None: + """Save the key/value pair in the cache.""" + ... diff --git a/optimizely/odp/odp_config.py b/optimizely/odp/odp_config.py new file mode 100644 index 00000000..17e435dc --- /dev/null +++ b/optimizely/odp/odp_config.py @@ -0,0 +1,96 @@ +# Copyright 2022, Optimizely +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations +from enum import Enum + +from typing import Optional +from threading import Lock + + +class OdpConfigState(Enum): + """State of the ODP integration.""" + UNDETERMINED = 1 + INTEGRATED = 2 + NOT_INTEGRATED = 3 + + +class OdpConfig: + """ + Contains configuration used for ODP integration. + + Args: + api_host: The host URL for the ODP audience segments API (optional). + api_key: The public API key for the ODP account from which the audience segments will be fetched (optional). + segments_to_check: A list of all ODP segments used in the current datafile + (associated with api_host/api_key). + """ + def __init__( + self, + api_key: Optional[str] = None, + api_host: Optional[str] = None, + segments_to_check: Optional[list[str]] = None + ) -> None: + self._api_key = api_key + self._api_host = api_host + self._segments_to_check = segments_to_check or [] + self.lock = Lock() + self._odp_state = OdpConfigState.UNDETERMINED + if self._api_host and self._api_key: + self._odp_state = OdpConfigState.INTEGRATED + + def update(self, api_key: Optional[str], api_host: Optional[str], segments_to_check: list[str]) -> bool: + """ + Override the ODP configuration. + + Args: + api_host: The host URL for the ODP audience segments API (optional). + api_key: The public API key for the ODP account from which the audience segments will be fetched (optional). + segments_to_check: A list of all ODP segments used in the current datafile + (associated with api_host/api_key). + + Returns: + True if the provided values were different than the existing values. + """ + + updated = False + with self.lock: + if api_key and api_host: + self._odp_state = OdpConfigState.INTEGRATED + else: + self._odp_state = OdpConfigState.NOT_INTEGRATED + + if self._api_key != api_key or self._api_host != api_host or self._segments_to_check != segments_to_check: + self._api_key = api_key + self._api_host = api_host + self._segments_to_check = segments_to_check + updated = True + + return updated + + def get_api_host(self) -> Optional[str]: + with self.lock: + return self._api_host + + def get_api_key(self) -> Optional[str]: + with self.lock: + return self._api_key + + def get_segments_to_check(self) -> list[str]: + with self.lock: + return self._segments_to_check.copy() + + def odp_state(self) -> OdpConfigState: + """Returns the state of ODP integration (UNDETERMINED, INTEGRATED, or NOT_INTEGRATED).""" + with self.lock: + return self._odp_state diff --git a/optimizely/odp/odp_event.py b/optimizely/odp/odp_event.py new file mode 100644 index 00000000..640b0dc3 --- /dev/null +++ b/optimizely/odp/odp_event.py @@ -0,0 +1,74 @@ +# Copyright 2022, Optimizely +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from typing import Any, Union, Dict +import uuid +import json +from optimizely import version +from optimizely.helpers.enums import OdpManagerConfig + +OdpDataDict = Dict[str, Union[str, int, float, bool, None]] + + +class OdpEvent: + """ Representation of an odp event which can be sent to the Optimizely odp platform. """ + + def __init__(self, type: str, action: str, identifiers: dict[str, str], data: OdpDataDict) -> None: + self.type = type + self.action = action + self.identifiers = self._convert_identifers(identifiers) + self.data = self._add_common_event_data(data) + + def __repr__(self) -> str: + return str(self.__dict__) + + def __eq__(self, other: object) -> bool: + if isinstance(other, OdpEvent): + return self.__dict__ == other.__dict__ + elif isinstance(other, dict): + return self.__dict__ == other + else: + return False + + def _add_common_event_data(self, custom_data: OdpDataDict) -> OdpDataDict: + data: OdpDataDict = { + 'idempotence_id': str(uuid.uuid4()), + 'data_source_type': 'sdk', + 'data_source': 'python-sdk', + 'data_source_version': version.__version__ + } + data.update(custom_data) + return data + + def _convert_identifers(self, identifiers: dict[str, str]) -> dict[str, str]: + """ + Convert incorrect case/separator of identifier key `fs_user_id` + (ie. `fs-user-id`, `FS_USER_ID`). + """ + for key in list(identifiers): + if key == OdpManagerConfig.KEY_FOR_USER_ID: + break + elif key.lower() in ("fs-user-id", OdpManagerConfig.KEY_FOR_USER_ID): + identifiers[OdpManagerConfig.KEY_FOR_USER_ID] = identifiers.pop(key) + break + + return identifiers + + +class OdpEventEncoder(json.JSONEncoder): + def default(self, obj: object) -> Any: + if isinstance(obj, OdpEvent): + return obj.__dict__ + return json.JSONEncoder.default(self, obj) diff --git a/optimizely/odp/odp_event_api_manager.py b/optimizely/odp/odp_event_api_manager.py new file mode 100644 index 00000000..85967415 --- /dev/null +++ b/optimizely/odp/odp_event_api_manager.py @@ -0,0 +1,98 @@ +# Copyright 2022, Optimizely +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +import json +from typing import Optional + +import requests +from requests.exceptions import RequestException, ConnectionError, Timeout + +from optimizely import logger as optimizely_logger +from optimizely.helpers.enums import Errors, OdpEventApiConfig +from optimizely.odp.odp_event import OdpEvent, OdpEventEncoder + +""" + ODP REST Events API + - https://api.zaius.com/v3/events + - test ODP public API key = "W4WzcEs-ABgXorzY7h1LCQ" + + [Event Request] + curl -i -H 'Content-Type: application/json' -H 'x-api-key: W4WzcEs-ABgXorzY7h1LCQ' -X POST -d + '{"type":"fullstack","action":"identified","identifiers":{"vuid": "123","fs_user_id": "abc"}, + "data":{"idempotence_id":"xyz","source":"swift-sdk"}}' https://api.zaius.com/v3/events + [Event Response] + {"title":"Accepted","status":202,"timestamp":"2022-06-30T20:59:52.046Z"} +""" + + +class OdpEventApiManager: + """Provides an internal service for ODP event REST api access.""" + + def __init__(self, logger: Optional[optimizely_logger.Logger] = None, timeout: Optional[int] = None): + self.logger = logger or optimizely_logger.NoOpLogger() + self.timeout = timeout or OdpEventApiConfig.REQUEST_TIMEOUT + + def send_odp_events(self, + api_key: str, + api_host: str, + events: list[OdpEvent]) -> bool: + """ + Dispatch the event being represented by the OdpEvent object. + + Args: + api_key: public api key + api_host: domain url of the host + events: list of odp events to be sent to optimizely's odp platform. + + Returns: + retry is True - if network or server error (5xx), otherwise False + """ + should_retry = False + url = f'{api_host}/v3/events' + request_headers = {'content-type': 'application/json', 'x-api-key': api_key} + + try: + payload_dict = json.dumps(events, cls=OdpEventEncoder) + except TypeError as err: + self.logger.error(Errors.ODP_EVENT_FAILED.format(err)) + return should_retry + + try: + response = requests.post(url=url, + headers=request_headers, + data=payload_dict, + timeout=self.timeout) + + response.raise_for_status() + + except (ConnectionError, Timeout): + self.logger.error(Errors.ODP_EVENT_FAILED.format('network error')) + # retry on network errors + should_retry = True + except RequestException as err: + if err.response is not None: + if 400 <= err.response.status_code < 500: + # log 4xx + self.logger.error(Errors.ODP_EVENT_FAILED.format(err.response.text)) + else: + # log 5xx + self.logger.error(Errors.ODP_EVENT_FAILED.format(err)) + # retry on 500 exceptions + should_retry = True + else: + # log exceptions without response body (i.e. invalid url) + self.logger.error(Errors.ODP_EVENT_FAILED.format(err)) + + return should_retry diff --git a/optimizely/odp/odp_event_manager.py b/optimizely/odp/odp_event_manager.py new file mode 100644 index 00000000..85512e90 --- /dev/null +++ b/optimizely/odp/odp_event_manager.py @@ -0,0 +1,281 @@ +# Copyright 2022, Optimizely +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +import time +from enum import Enum +from queue import Empty, Queue, Full +from threading import Thread +from typing import Optional + +from optimizely import logger as _logging +from optimizely.helpers.enums import OdpEventManagerConfig, Errors, OdpManagerConfig +from .odp_config import OdpConfig, OdpConfigState +from .odp_event import OdpEvent, OdpDataDict +from .odp_event_api_manager import OdpEventApiManager + + +class Signal(Enum): + """Enum for sending signals to the event queue.""" + SHUTDOWN = 1 + FLUSH = 2 + UPDATE_CONFIG = 3 + + +class OdpEventManager: + """ + Class that sends batches of ODP events. + + The OdpEventManager maintains a single consumer thread that pulls events off of + the queue and buffers them before events are sent to ODP. + Sends events when the batch size is met or when the flush timeout has elapsed. + Flushes the event queue after specified time (seconds). + """ + + def __init__( + self, + logger: Optional[_logging.Logger] = None, + api_manager: Optional[OdpEventApiManager] = None, + request_timeout: Optional[int] = None, + flush_interval: Optional[int] = None + ): + """OdpEventManager init method to configure event batching. + + Args: + logger: Optional component which provides a log method to log messages. By default nothing would be logged. + api_manager: Optional component which sends events to ODP. + request_timeout: Optional event timeout in seconds - wait time for odp platform to respond before failing. + flush_interval: Optional time to wait for events to accumulate before sending the batch in seconds. + """ + self.logger = logger or _logging.NoOpLogger() + self.api_manager = api_manager or OdpEventApiManager(self.logger, request_timeout) + + self.odp_config: Optional[OdpConfig] = None + self.api_key: Optional[str] = None + self.api_host: Optional[str] = None + + self.event_queue: Queue[OdpEvent | Signal] = Queue(OdpEventManagerConfig.DEFAULT_QUEUE_CAPACITY) + self.batch_size = 1 if flush_interval == 0 else OdpEventManagerConfig.DEFAULT_BATCH_SIZE + + self.flush_interval = OdpEventManagerConfig.DEFAULT_FLUSH_INTERVAL if flush_interval is None \ + else flush_interval + + self._flush_deadline: float = 0 + self.retry_count = OdpEventManagerConfig.DEFAULT_RETRY_COUNT + self._current_batch: list[OdpEvent] = [] + """_current_batch should only be modified by the processing thread, as it is not thread safe""" + self.thread = Thread(target=self._run, name="OdpThread", daemon=True) + self.thread_exception = False + """thread_exception will be True if the processing thread did not exit cleanly""" + + @property + def is_running(self) -> bool: + """Property to check if consumer thread is alive or not.""" + return self.thread.is_alive() + + def start(self, odp_config: OdpConfig) -> None: + """Starts the batch processing thread to batch events.""" + if self.is_running: + self.logger.warning('ODP event queue already started.') + return + + self.odp_config = odp_config + self.api_host = self.odp_config.get_api_host() + self.api_key = self.odp_config.get_api_key() + + self.thread.start() + + def _run(self) -> None: + """Processes the event queue from a child thread. Events are batched until + the batch size is met or until the flush timeout has elapsed. + """ + try: + while True: + timeout = self._get_queue_timeout() + + try: + item = self.event_queue.get(True, timeout) + except Empty: + item = None + + if item == Signal.SHUTDOWN: + self.logger.debug('ODP event queue: received shutdown signal.') + break + + elif item == Signal.FLUSH: + self.logger.debug('ODP event queue: received flush signal.') + self._flush_batch() + self.event_queue.task_done() + + elif item == Signal.UPDATE_CONFIG: + self.logger.debug('ODP event queue: received update config signal.') + self._update_config() + self.event_queue.task_done() + + elif isinstance(item, OdpEvent): + self._add_to_batch(item) + self.event_queue.task_done() + + elif len(self._current_batch) > 0: + self.logger.debug('ODP event queue: flushing on interval.') + self._flush_batch() + + except Exception as exception: + self.thread_exception = True + self.logger.error(f'Uncaught exception processing ODP events. Error: {exception}') + + finally: + self.logger.info('Exiting ODP event processing loop. Attempting to flush pending events.') + self._flush_batch() + if item == Signal.SHUTDOWN: + self.event_queue.task_done() + + def flush(self) -> None: + """Adds flush signal to event_queue.""" + try: + self.event_queue.put_nowait(Signal.FLUSH) + except Full: + self.logger.error("Error flushing ODP event queue") + + def _flush_batch(self) -> None: + """Flushes current batch by dispatching event. + Should only be called by the processing thread.""" + batch_len = len(self._current_batch) + if batch_len == 0: + self.logger.debug('ODP event queue: nothing to flush.') + return + + if not self.api_key or not self.api_host: + self.logger.debug(Errors.ODP_NOT_INTEGRATED) + self._current_batch.clear() + return + + self.logger.debug(f'ODP event queue: flushing batch size {batch_len}.') + should_retry = False + + for i in range(1 + self.retry_count): + try: + should_retry = self.api_manager.send_odp_events(self.api_key, + self.api_host, + self._current_batch) + except Exception as error: + should_retry = False + self.logger.error(Errors.ODP_EVENT_FAILED.format(f'Error: {error} {self._current_batch}')) + + if not should_retry: + break + if i < self.retry_count: + self.logger.debug('Error dispatching ODP events, scheduled to retry.') + + if should_retry: + self.logger.error(Errors.ODP_EVENT_FAILED.format(f'Failed after {i} retries: {self._current_batch}')) + + self._current_batch.clear() + + def _add_to_batch(self, odp_event: OdpEvent) -> None: + """Appends received ODP event to current batch, flushing if batch is greater than batch size. + Should only be called by the processing thread.""" + if not self._current_batch: + self._set_flush_deadline() + + self._current_batch.append(odp_event) + if len(self._current_batch) >= self.batch_size: + self.logger.debug('ODP event queue: flushing on batch size.') + self._flush_batch() + + def _set_flush_deadline(self) -> None: + """Sets time that next flush will occur.""" + self._flush_deadline = time.time() + self.flush_interval + + def _get_time_till_flush(self) -> float: + """Returns seconds until next flush; no less than 0.""" + return max(0, self._flush_deadline - time.time()) + + def _get_queue_timeout(self) -> Optional[float]: + """Returns seconds until next flush or None if current batch is empty.""" + if len(self._current_batch) == 0: + return None + return self._get_time_till_flush() + + def stop(self) -> None: + """Flushes and then stops ODP event queue.""" + try: + self.event_queue.put_nowait(Signal.SHUTDOWN) + except Full: + self.logger.error('Error stopping ODP event queue.') + return + + self.logger.warning('Stopping ODP event queue.') + + if self.is_running: + self.thread.join() + + if len(self._current_batch) > 0: + self.logger.error(Errors.ODP_EVENT_FAILED.format(self._current_batch)) + + if self.is_running: + self.logger.error('Error stopping ODP event queue.') + + def send_event(self, type: str, action: str, identifiers: dict[str, str], data: OdpDataDict) -> None: + """Create OdpEvent and add it to the event queue.""" + if not self.odp_config: + self.logger.debug('ODP event queue: cannot send before config has been set.') + return + + odp_state = self.odp_config.odp_state() + if odp_state == OdpConfigState.UNDETERMINED: + self.logger.debug('ODP event queue: cannot send before the datafile has loaded.') + return + + if odp_state == OdpConfigState.NOT_INTEGRATED: + self.logger.debug(Errors.ODP_NOT_INTEGRATED) + return + + self.dispatch(OdpEvent(type, action, identifiers, data)) + + def dispatch(self, event: OdpEvent) -> None: + """Add OdpEvent to the event queue.""" + if self.thread_exception: + self.logger.error(Errors.ODP_EVENT_FAILED.format('Queue is down')) + return + + if not self.is_running: + self.logger.warning('ODP event queue is shutdown, not accepting events.') + return + + try: + self.logger.debug('ODP event queue: adding event.') + self.event_queue.put_nowait(event) + except Full: + self.logger.warning(Errors.ODP_EVENT_FAILED.format("Queue is full")) + + def identify_user(self, user_id: str) -> None: + self.send_event(OdpManagerConfig.EVENT_TYPE, 'identified', + {OdpManagerConfig.KEY_FOR_USER_ID: user_id}, {}) + + def update_config(self) -> None: + """Adds update config signal to event_queue.""" + try: + self.event_queue.put_nowait(Signal.UPDATE_CONFIG) + except Full: + self.logger.error("Error updating ODP config for the event queue") + + def _update_config(self) -> None: + """Updates the configuration used to send events.""" + if len(self._current_batch) > 0: + self._flush_batch() + + if self.odp_config: + self.api_host = self.odp_config.get_api_host() + self.api_key = self.odp_config.get_api_key() diff --git a/optimizely/odp/odp_manager.py b/optimizely/odp/odp_manager.py new file mode 100644 index 00000000..a6e26253 --- /dev/null +++ b/optimizely/odp/odp_manager.py @@ -0,0 +1,135 @@ +# Copyright 2022, Optimizely +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from typing import Optional, Any + +from optimizely import logger as optimizely_logger +from optimizely.helpers.enums import Errors, OdpManagerConfig, OdpSegmentsCacheConfig +from optimizely.helpers.validator import are_odp_data_types_valid +from optimizely.odp.lru_cache import OptimizelySegmentsCache, LRUCache +from optimizely.odp.odp_config import OdpConfig, OdpConfigState +from optimizely.odp.odp_event_manager import OdpEventManager +from optimizely.odp.odp_segment_manager import OdpSegmentManager + + +class OdpManager: + """Orchestrates segment manager, event manager and odp config.""" + + def __init__( + self, + disable: bool, + segments_cache: Optional[OptimizelySegmentsCache] = None, + segment_manager: Optional[OdpSegmentManager] = None, + event_manager: Optional[OdpEventManager] = None, + fetch_segments_timeout: Optional[int] = None, + odp_event_timeout: Optional[int] = None, + odp_flush_interval: Optional[int] = None, + logger: Optional[optimizely_logger.Logger] = None + ) -> None: + + self.enabled = not disable + self.odp_config = OdpConfig() + self.logger = logger or optimizely_logger.NoOpLogger() + + self.segment_manager = segment_manager + self.event_manager = event_manager + self.fetch_segments_timeout = fetch_segments_timeout + + if not self.enabled: + self.logger.info('ODP is disabled.') + return + + if not self.segment_manager: + if not segments_cache: + segments_cache = LRUCache( + OdpSegmentsCacheConfig.DEFAULT_CAPACITY, + OdpSegmentsCacheConfig.DEFAULT_TIMEOUT_SECS + ) + self.segment_manager = OdpSegmentManager(segments_cache, logger=self.logger, timeout=fetch_segments_timeout) + + self.event_manager = self.event_manager or OdpEventManager(self.logger, request_timeout=odp_event_timeout, + flush_interval=odp_flush_interval) + self.segment_manager.odp_config = self.odp_config + + def fetch_qualified_segments(self, user_id: str, options: list[str]) -> Optional[list[str]]: + if not self.enabled or not self.segment_manager: + self.logger.error(Errors.ODP_NOT_ENABLED) + return None + + user_key = OdpManagerConfig.KEY_FOR_USER_ID + user_value = user_id + + return self.segment_manager.fetch_qualified_segments(user_key, user_value, options) + + def identify_user(self, user_id: str) -> None: + if not self.enabled or not self.event_manager: + self.logger.debug('ODP identify event is not dispatched (ODP disabled).') + return + if self.odp_config.odp_state() == OdpConfigState.NOT_INTEGRATED: + self.logger.debug('ODP identify event is not dispatched (ODP not integrated).') + return + + self.event_manager.identify_user(user_id) + + def send_event(self, type: str, action: str, identifiers: dict[str, str], data: dict[str, Any]) -> None: + """ + Send an event to the ODP server. + + Args: + type: The event type. + action: The event action name. + identifiers: A dictionary for identifiers. + data: A dictionary for associated data. The default event data will be added to this data + before sending to the ODP server. + """ + if not self.enabled or not self.event_manager: + self.logger.error(Errors.ODP_NOT_ENABLED) + return + + if self.odp_config.odp_state() == OdpConfigState.NOT_INTEGRATED: + self.logger.error(Errors.ODP_NOT_INTEGRATED) + return + + if not are_odp_data_types_valid(data): + self.logger.error(Errors.ODP_INVALID_DATA) + return + + self.event_manager.send_event(type, action, identifiers, data) + + def update_odp_config(self, api_key: Optional[str], api_host: Optional[str], + segments_to_check: list[str]) -> None: + if not self.enabled: + return + + config_changed = self.odp_config.update(api_key, api_host, segments_to_check) + if not config_changed: + self.logger.debug('Odp config was not changed.') + return + + # reset segments cache when odp integration or segments to check are changed + if self.segment_manager: + self.segment_manager.reset() + + if not self.event_manager: + return + + if self.event_manager.is_running: + self.event_manager.update_config() + elif self.odp_config.odp_state() == OdpConfigState.INTEGRATED: + self.event_manager.start(self.odp_config) + + def close(self) -> None: + if self.enabled and self.event_manager: + self.event_manager.stop() diff --git a/optimizely/odp/odp_segment_api_manager.py b/optimizely/odp/odp_segment_api_manager.py new file mode 100644 index 00000000..1ea191eb --- /dev/null +++ b/optimizely/odp/odp_segment_api_manager.py @@ -0,0 +1,194 @@ +# Copyright 2022, Optimizely +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +import json +from typing import Optional + +import requests +from requests.exceptions import RequestException, ConnectionError, Timeout, JSONDecodeError + +from optimizely import logger as optimizely_logger +from optimizely.helpers.enums import Errors, OdpSegmentApiConfig + +""" + ODP GraphQL API + - https://api.zaius.com/v3/graphql + - test ODP public API key = "W4WzcEs-ABgXorzY7h1LCQ" + + + [GraphQL Request] + + # fetch info with fs_user_id for ["has_email", "has_email_opted_in", "push_on_sale"] segments + curl -i -H 'Content-Type: application/json' -H 'x-api-key: W4WzcEs-ABgXorzY7h1LCQ' -X POST -d + '{"query":"query {customer(fs_user_id: \"tester-101\") {audiences(subset:[\"has_email\", + \"has_email_opted_in\", \"push_on_sale\"]) {edges {node {name state}}}}}"}' https://api.zaius.com/v3/graphql + # fetch info with vuid for ["has_email", "has_email_opted_in", "push_on_sale"] segments + curl -i -H 'Content-Type: application/json' -H 'x-api-key: W4WzcEs-ABgXorzY7h1LCQ' -X POST -d + '{"query":"query {customer(vuid: \"d66a9d81923d4d2f99d8f64338976322\") {audiences(subset:[\"has_email\", + \"has_email_opted_in\", \"push_on_sale\"]) {edges {node {name state}}}}}"}' https://api.zaius.com/v3/graphql + + query MyQuery { + customer(vuid: "d66a9d81923d4d2f99d8f64338976322") { + audiences(subset:["has_email", "has_email_opted_in", "push_on_sale"]) { + edges { + node { + name + state + } + } + } + } + } + + + [GraphQL Response] + { + "data": { + "customer": { + "audiences": { + "edges": [ + { + "node": { + "name": "has_email", + "state": "qualified", + } + }, + { + "node": { + "name": "has_email_opted_in", + "state": "qualified", + } + }, + ... + ] + } + } + } + } + + [GraphQL Error Response] + { + "errors": [ + { + "message": "Exception while fetching data (/customer) : java.lang.RuntimeException: + could not resolve _fs_user_id = asdsdaddddd", + "locations": [ + { + "line": 2, + "column": 3 + } + ], + "path": [ + "customer" + ], + "extensions": { + "classification": "InvalidIdentifierException" + } + } + ], + "data": { + "customer": null + } + } +""" + + +class OdpSegmentApiManager: + """Interface for manging the fetching of audience segments.""" + + def __init__(self, logger: Optional[optimizely_logger.Logger] = None, timeout: Optional[int] = None): + self.logger = logger or optimizely_logger.NoOpLogger() + self.timeout = timeout or OdpSegmentApiConfig.REQUEST_TIMEOUT + + def fetch_segments(self, api_key: str, api_host: str, user_key: str, + user_value: str, segments_to_check: list[str]) -> Optional[list[str]]: + """ + Fetch segments from ODP GraphQL API. + + Args: + api_key: public api key + api_host: domain url of the host + user_key: vuid or fs_user_id (client device id or fullstack id) + user_value: vaue of user_key + segments_to_check: lit of segments to check + + Returns: + Audience segments from GraphQL. + """ + url = f'{api_host}/v3/graphql' + request_headers = {'content-type': 'application/json', + 'x-api-key': str(api_key)} + + query = { + 'query': + 'query($userId: String, $audiences: [String]) {' + f'customer({user_key}: $userId) ' + '{audiences(subset: $audiences) {edges {node {name state}}}}}', + 'variables': { + 'userId': str(user_value), + 'audiences': segments_to_check} + } + + try: + payload_dict = json.dumps(query) + except TypeError as err: + self.logger.error(Errors.FETCH_SEGMENTS_FAILED.format(err)) + return None + + try: + response = requests.post(url=url, + headers=request_headers, + data=payload_dict, + timeout=self.timeout) + + response.raise_for_status() + response_dict = response.json() + + # There is no status code with network issues such as ConnectionError or Timeouts + # (i.e. no internet, server can't be reached). + except (ConnectionError, Timeout) as err: + self.logger.debug(f'GraphQL download failed: {err}') + self.logger.error(Errors.FETCH_SEGMENTS_FAILED.format('network error')) + return None + except JSONDecodeError: + self.logger.error(Errors.FETCH_SEGMENTS_FAILED.format('JSON decode error')) + return None + except RequestException as err: + self.logger.error(Errors.FETCH_SEGMENTS_FAILED.format(err)) + return None + + if response_dict and 'errors' in response_dict: + try: + extensions = response_dict['errors'][0]['extensions'] + error_class = extensions['classification'] + error_code = extensions.get('code') + except (KeyError, IndexError, TypeError): + self.logger.error(Errors.FETCH_SEGMENTS_FAILED.format('decode error')) + return None + + if error_code == 'INVALID_IDENTIFIER_EXCEPTION': + self.logger.warning(Errors.FETCH_SEGMENTS_FAILED.format('invalid identifier')) + return None + else: + self.logger.error(Errors.FETCH_SEGMENTS_FAILED.format(error_class)) + return None + else: + try: + audiences = response_dict['data']['customer']['audiences']['edges'] + segments = [edge['node']['name'] for edge in audiences if edge['node']['state'] == 'qualified'] + return segments + except (KeyError, TypeError): + self.logger.error(Errors.FETCH_SEGMENTS_FAILED.format('decode error')) + return None diff --git a/optimizely/odp/odp_segment_manager.py b/optimizely/odp/odp_segment_manager.py new file mode 100644 index 00000000..b0f04b73 --- /dev/null +++ b/optimizely/odp/odp_segment_manager.py @@ -0,0 +1,94 @@ +# Copyright 2022, Optimizely +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from typing import Optional + +from optimizely import logger as optimizely_logger +from optimizely.helpers.enums import Errors +from optimizely.odp.odp_config import OdpConfig +from optimizely.odp.optimizely_odp_option import OptimizelyOdpOption +from optimizely.odp.lru_cache import OptimizelySegmentsCache +from optimizely.odp.odp_segment_api_manager import OdpSegmentApiManager + + +class OdpSegmentManager: + """Schedules connections to ODP for audience segmentation and caches the results.""" + + def __init__( + self, + segments_cache: OptimizelySegmentsCache, + api_manager: Optional[OdpSegmentApiManager] = None, + logger: Optional[optimizely_logger.Logger] = None, + timeout: Optional[int] = None + ) -> None: + + self.odp_config: Optional[OdpConfig] = None + self.segments_cache = segments_cache + self.logger = logger or optimizely_logger.NoOpLogger() + self.api_manager = api_manager or OdpSegmentApiManager(self.logger, timeout) + + def fetch_qualified_segments(self, user_key: str, user_value: str, options: list[str]) -> Optional[list[str]]: + """ + Args: + user_key: The key for identifying the id type. + user_value: The id itself. + options: An array of OptimizelySegmentOptions used to ignore and/or reset the cache. + + Returns: + Qualified segments for the user from the cache or the ODP server if not in the cache. + """ + if self.odp_config: + odp_api_key = self.odp_config.get_api_key() + odp_api_host = self.odp_config.get_api_host() + odp_segments_to_check = self.odp_config.get_segments_to_check() + + if not self.odp_config or not (odp_api_key and odp_api_host): + self.logger.error(Errors.FETCH_SEGMENTS_FAILED.format('api_key/api_host not defined')) + return None + + if not odp_segments_to_check: + self.logger.debug('No segments are used in the project. Returning empty list.') + return [] + + cache_key = self.make_cache_key(user_key, user_value) + + ignore_cache = OptimizelyOdpOption.IGNORE_CACHE in options + reset_cache = OptimizelyOdpOption.RESET_CACHE in options + + if reset_cache: + self.reset() + + if not ignore_cache and not reset_cache: + segments = self.segments_cache.lookup(cache_key) + if segments: + self.logger.debug('ODP cache hit. Returning segments from cache.') + return segments + self.logger.debug('ODP cache miss.') + + self.logger.debug('Making a call to ODP server.') + + segments = self.api_manager.fetch_segments(odp_api_key, odp_api_host, user_key, user_value, + odp_segments_to_check) + + if segments and not ignore_cache: + self.segments_cache.save(cache_key, segments) + + return segments + + def reset(self) -> None: + self.segments_cache.reset() + + def make_cache_key(self, user_key: str, user_value: str) -> str: + return f'{user_key}-$-{user_value}' diff --git a/optimizely/odp/optimizely_odp_option.py b/optimizely/odp/optimizely_odp_option.py new file mode 100644 index 00000000..ce6eaf00 --- /dev/null +++ b/optimizely/odp/optimizely_odp_option.py @@ -0,0 +1,25 @@ +# Copyright 2022, Optimizely +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from sys import version_info + +if version_info < (3, 8): + from typing_extensions import Final +else: + from typing import Final # type: ignore + + +class OptimizelyOdpOption: + """Options for the OdpSegmentManager.""" + IGNORE_CACHE: Final = 'IGNORE_CACHE' + RESET_CACHE: Final = 'RESET_CACHE' diff --git a/optimizely/optimizely.py b/optimizely/optimizely.py index 10464a72..af442224 100644 --- a/optimizely/optimizely.py +++ b/optimizely/optimizely.py @@ -1,9 +1,9 @@ -# Copyright 2016-2022, Optimizely +# Copyright 2016-2023, Optimizely # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, @@ -11,49 +11,66 @@ # See the License for the specific language governing permissions and # limitations under the License. -from six import string_types +from __future__ import annotations + +from typing import TYPE_CHECKING, Any, Optional from . import decision_service from . import entities from . import event_builder from . import exceptions from . import logger as _logging +from . import project_config +from . import user_profile from .config_manager import AuthDatafilePollingConfigManager +from .config_manager import BaseConfigManager from .config_manager import PollingConfigManager from .config_manager import StaticConfigManager from .decision.optimizely_decide_option import OptimizelyDecideOption from .decision.optimizely_decision import OptimizelyDecision from .decision.optimizely_decision_message import OptimizelyDecisionMessage from .decision_service import Decision -from .error_handler import NoOpErrorHandler as noop_error_handler +from .error_handler import NoOpErrorHandler, BaseErrorHandler from .event import event_factory, user_event_factory -from .event.event_processor import ForwardingEventProcessor -from .event_dispatcher import EventDispatcher as default_event_dispatcher +from .event.event_processor import BatchEventProcessor, BaseEventProcessor +from .event_dispatcher import EventDispatcher, CustomEventDispatcher from .helpers import enums, validator +from .helpers.sdk_settings import OptimizelySdkSettings from .helpers.enums import DecisionSources from .notification_center import NotificationCenter -from .optimizely_config import OptimizelyConfigService -from .optimizely_user_context import OptimizelyUserContext +from .notification_center_registry import _NotificationCenterRegistry +from .odp.lru_cache import LRUCache +from .odp.odp_manager import OdpManager +from .optimizely_config import OptimizelyConfig, OptimizelyConfigService +from .optimizely_user_context import OptimizelyUserContext, UserAttributes +from .project_config import ProjectConfig + +if TYPE_CHECKING: + # prevent circular dependency by skipping import at runtime + from .user_profile import UserProfileService + from .helpers.event_tag_utils import EventTags -class Optimizely(object): +class Optimizely: """ Class encapsulating all SDK functionality. """ def __init__( self, - datafile=None, - event_dispatcher=None, - logger=None, - error_handler=None, - skip_json_validation=False, - user_profile_service=None, - sdk_key=None, - config_manager=None, - notification_center=None, - event_processor=None, - datafile_access_token=None, - default_decide_options=None - ): + datafile: Optional[str] = None, + event_dispatcher: Optional[CustomEventDispatcher] = None, + logger: Optional[_logging.Logger] = None, + error_handler: Optional[BaseErrorHandler] = None, + skip_json_validation: Optional[bool] = False, + user_profile_service: Optional[UserProfileService] = None, + sdk_key: Optional[str] = None, + config_manager: Optional[BaseConfigManager] = None, + notification_center: Optional[NotificationCenter] = None, + event_processor: Optional[BaseEventProcessor] = None, + datafile_access_token: Optional[str] = None, + default_decide_options: Optional[list[str]] = None, + event_processor_options: Optional[dict[str, Any]] = None, + settings: Optional[OptimizelySdkSettings] = None + ) -> None: """ Optimizely init method for managing Custom projects. Args: @@ -74,23 +91,37 @@ def __init__( config_manager.BaseConfigManager implementation which can be using the same NotificationCenter instance. event_processor: Optional component which processes the given event(s). - By default optimizely.event.event_processor.ForwardingEventProcessor is used - which simply forwards events to the event dispatcher. - To enable event batching configure and use - optimizely.event.event_processor.BatchEventProcessor. + By default optimizely.event.event_processor.BatchEventProcessor is used + which batches events. To simply forward events to the event dispatcher + configure and use optimizely.event.event_processor.ForwardingEventProcessor. datafile_access_token: Optional string used to fetch authenticated datafile for a secure project environment. default_decide_options: Optional list of decide options used with the decide APIs. + event_processor_options: Optional dict of options to be passed to the default batch event processor. + settings: Optional instance of OptimizelySdkSettings for sdk configuration. """ self.logger_name = '.'.join([__name__, self.__class__.__name__]) self.is_valid = True - self.event_dispatcher = event_dispatcher or default_event_dispatcher + self.event_dispatcher = event_dispatcher or EventDispatcher self.logger = _logging.adapt_logger(logger or _logging.NoOpLogger()) - self.error_handler = error_handler or noop_error_handler - self.config_manager = config_manager + self.error_handler = error_handler or NoOpErrorHandler + self.config_manager: BaseConfigManager = config_manager # type: ignore[assignment] self.notification_center = notification_center or NotificationCenter(self.logger) - self.event_processor = event_processor or ForwardingEventProcessor( - self.event_dispatcher, logger=self.logger, notification_center=self.notification_center, + event_processor_defaults = { + 'batch_size': 1, + 'flush_interval': 30, + 'timeout_interval': 5, + 'start_on_init': True + } + if event_processor_options: + event_processor_defaults.update(event_processor_options) + + self.event_processor = event_processor or BatchEventProcessor( + self.event_dispatcher, + logger=self.logger, + notification_center=self.notification_center, + **event_processor_defaults # type: ignore[arg-type] ) + self.default_decide_options: list[str] if default_decide_options is None: self.default_decide_options = [] @@ -103,6 +134,8 @@ def __init__( self.logger.debug('Provided default decide options is not a list.') self.default_decide_options = [] + self.sdk_settings: OptimizelySdkSettings = settings # type: ignore[assignment] + try: self._validate_instantiation_options() except exceptions.InvalidInputException as error: @@ -113,7 +146,7 @@ def __init__( self.logger.exception(str(error)) return - config_manager_options = { + config_manager_options: dict[str, Any] = { 'datafile': datafile, 'logger': self.logger, 'error_handler': self.error_handler, @@ -132,10 +165,14 @@ def __init__( else: self.config_manager = StaticConfigManager(**config_manager_options) + self.odp_manager: OdpManager + self._setup_odp(self.config_manager.get_sdk_key()) + self.event_builder = event_builder.EventBuilder() self.decision_service = decision_service.DecisionService(self.logger, user_profile_service) + self.user_profile_service = user_profile_service - def _validate_instantiation_options(self): + def _validate_instantiation_options(self) -> None: """ Helper method to validate all instantiation parameters. Raises: @@ -159,7 +196,26 @@ def _validate_instantiation_options(self): if not validator.is_event_processor_valid(self.event_processor): raise exceptions.InvalidInputException(enums.Errors.INVALID_INPUT.format('event_processor')) - def _validate_user_inputs(self, attributes=None, event_tags=None): + if not isinstance(self.sdk_settings, OptimizelySdkSettings): + if self.sdk_settings is not None: + self.logger.debug('Provided sdk_settings is not an OptimizelySdkSettings instance.') + self.sdk_settings = OptimizelySdkSettings() + + if self.sdk_settings.segments_cache: + if not validator.is_segments_cache_valid(self.sdk_settings.segments_cache): + raise exceptions.InvalidInputException(enums.Errors.INVALID_INPUT.format('segments_cache')) + + if self.sdk_settings.odp_segment_manager: + if not validator.is_segment_manager_valid(self.sdk_settings.odp_segment_manager): + raise exceptions.InvalidInputException(enums.Errors.INVALID_INPUT.format('segment_manager')) + + if self.sdk_settings.odp_event_manager: + if not validator.is_event_manager_valid(self.sdk_settings.odp_event_manager): + raise exceptions.InvalidInputException(enums.Errors.INVALID_INPUT.format('event_manager')) + + def _validate_user_inputs( + self, attributes: Optional[UserAttributes] = None, event_tags: Optional[EventTags] = None + ) -> bool: """ Helper method to validate user inputs. Args: @@ -183,8 +239,11 @@ def _validate_user_inputs(self, attributes=None, event_tags=None): return True - def _send_impression_event(self, project_config, experiment, variation, flag_key, rule_key, rule_type, enabled, - user_id, attributes): + def _send_impression_event( + self, project_config: project_config.ProjectConfig, experiment: Optional[entities.Experiment], + variation: Optional[entities.Variation], flag_key: str, rule_key: str, rule_type: str, + enabled: bool, user_id: str, attributes: Optional[UserAttributes] + ) -> None: """ Helper method to send impression event. Args: @@ -206,6 +265,10 @@ def _send_impression_event(self, project_config, experiment, variation, flag_key project_config, experiment, variation_id, flag_key, rule_key, rule_type, enabled, user_id, attributes ) + if user_event is None: + self.logger.error('Cannot process None event.') + return + self.event_processor.process(user_event) # Kept for backward compatibility. @@ -218,9 +281,11 @@ def _send_impression_event(self, project_config, experiment, variation, flag_key ) def _get_feature_variable_for_type( - self, project_config, feature_key, variable_key, variable_type, user_id, attributes - ): - """ Helper method to determine value for a certain variable attached to a feature flag based on type of variable. + self, project_config: project_config.ProjectConfig, feature_key: str, variable_key: str, + variable_type: Optional[str], user_id: str, attributes: Optional[UserAttributes] + ) -> Any: + """ Helper method to determine value for a certain variable attached to a feature flag based on + type of variable. Args: project_config: Instance of ProjectConfig. @@ -244,7 +309,7 @@ def _get_feature_variable_for_type( self.logger.error(enums.Errors.INVALID_INPUT.format('variable_key')) return None - if not isinstance(user_id, string_types): + if not isinstance(user_id, str): self.logger.error(enums.Errors.INVALID_INPUT.format('user_id')) return None @@ -263,8 +328,8 @@ def _get_feature_variable_for_type( variable_type = variable_type or variable.type if variable.type != variable_type: self.logger.warning( - 'Requested variable type "%s", but variable is of type "%s". ' - 'Use correct API to retrieve value. Returning None.' % (variable_type, variable.type) + f'Requested variable type "{variable_type}", but variable is of ' + f'type "{variable.type}". Use correct API to retrieve value. Returning None.' ) return None @@ -272,7 +337,8 @@ def _get_feature_variable_for_type( source_info = {} variable_value = variable.defaultValue - user_context = self.create_user_context(user_id, attributes) + user_context = OptimizelyUserContext(self, self.logger, user_id, attributes, False) + decision, _ = self.decision_service.get_variation_for_feature(project_config, feature_flag, user_context) if decision.variation: @@ -281,24 +347,24 @@ def _get_feature_variable_for_type( if feature_enabled: variable_value = project_config.get_variable_value_for_variation(variable, decision.variation) self.logger.info( - 'Got variable value "%s" for variable "%s" of feature flag "%s".' - % (variable_value, variable_key, feature_key) + f'Got variable value "{variable_value}" for ' + f'variable "{variable_key}" of feature flag "{feature_key}".' ) else: self.logger.info( - 'Feature "%s" is not enabled for user "%s". ' - 'Returning the default variable value "%s".' % (feature_key, user_id, variable_value) + f'Feature "{feature_key}" is not enabled for user "{user_id}". ' + f'Returning the default variable value "{variable_value}".' ) else: self.logger.info( - 'User "%s" is not in any variation or rollout rule. ' - 'Returning default value for variable "%s" of feature flag "%s".' % (user_id, variable_key, feature_key) + f'User "{user_id}" is not in any variation or rollout rule. ' + f'Returning default value for variable "{variable_key}" of feature flag "{feature_key}".' ) if decision.source == enums.DecisionSources.FEATURE_TEST: source_info = { - 'experiment_key': decision.experiment.key, - 'variation_key': decision.variation.key, + 'experiment_key': decision.experiment.key if decision.experiment else None, + 'variation_key': decision.variation.key if decision.variation else None, } try: @@ -325,8 +391,9 @@ def _get_feature_variable_for_type( return actual_value def _get_all_feature_variables_for_type( - self, project_config, feature_key, user_id, attributes, - ): + self, project_config: project_config.ProjectConfig, feature_key: str, + user_id: str, attributes: Optional[UserAttributes], + ) -> Optional[dict[str, Any]]: """ Helper method to determine value for all variables attached to a feature flag. Args: @@ -343,7 +410,7 @@ def _get_all_feature_variables_for_type( self.logger.error(enums.Errors.INVALID_INPUT.format('feature_key')) return None - if not isinstance(user_id, string_types): + if not isinstance(user_id, str): self.logger.error(enums.Errors.INVALID_INPUT.format('user_id')) return None @@ -357,7 +424,8 @@ def _get_all_feature_variables_for_type( feature_enabled = False source_info = {} - user_context = self.create_user_context(user_id, attributes) + user_context = OptimizelyUserContext(self, self.logger, user_id, attributes, False) + decision, _ = self.decision_service.get_variation_for_feature(project_config, feature_flag, user_context) if decision.variation: @@ -365,27 +433,26 @@ def _get_all_feature_variables_for_type( feature_enabled = decision.variation.featureEnabled if feature_enabled: self.logger.info( - 'Feature "%s" is enabled for user "%s".' % (feature_key, user_id) + f'Feature "{feature_key}" is enabled for user "{user_id}".' ) else: self.logger.info( - 'Feature "%s" is not enabled for user "%s".' % (feature_key, user_id) + f'Feature "{feature_key}" is not enabled for user "{user_id}".' ) else: self.logger.info( - 'User "%s" is not in any variation or rollout rule. ' - 'Returning default value for all variables of feature flag "%s".' % (user_id, feature_key) + f'User "{user_id}" is not in any variation or rollout rule. ' + f'Returning default value for all variables of feature flag "{feature_key}".' ) all_variables = {} - for variable_key in feature_flag.variables: - variable = project_config.get_variable_for_feature(feature_key, variable_key) + for variable_key, variable in feature_flag.variables.items(): variable_value = variable.defaultValue if feature_enabled: variable_value = project_config.get_variable_value_for_variation(variable, decision.variation) self.logger.debug( - 'Got variable value "%s" for variable "%s" of feature flag "%s".' - % (variable_value, variable_key, feature_key) + f'Got variable value "{variable_value}" for ' + f'variable "{variable_key}" of feature flag "{feature_key}".' ) try: @@ -398,8 +465,8 @@ def _get_all_feature_variables_for_type( if decision.source == enums.DecisionSources.FEATURE_TEST: source_info = { - 'experiment_key': decision.experiment.key, - 'variation_key': decision.variation.key, + 'experiment_key': decision.experiment.key if decision.experiment else None, + 'variation_key': decision.variation.key if decision.variation else None, } self.notification_center.send_notifications( @@ -417,7 +484,7 @@ def _get_all_feature_variables_for_type( ) return all_variables - def activate(self, experiment_key, user_id, attributes=None): + def activate(self, experiment_key: str, user_id: str, attributes: Optional[UserAttributes] = None) -> Optional[str]: """ Buckets visitor and sends impression event to Optimizely. Args: @@ -438,7 +505,7 @@ def activate(self, experiment_key, user_id, attributes=None): self.logger.error(enums.Errors.INVALID_INPUT.format('experiment_key')) return None - if not isinstance(user_id, string_types): + if not isinstance(user_id, str): self.logger.error(enums.Errors.INVALID_INPUT.format('user_id')) return None @@ -450,20 +517,27 @@ def activate(self, experiment_key, user_id, attributes=None): variation_key = self.get_variation(experiment_key, user_id, attributes) if not variation_key: - self.logger.info('Not activating user "%s".' % user_id) + self.logger.info(f'Not activating user "{user_id}".') return None experiment = project_config.get_experiment_from_key(experiment_key) variation = project_config.get_variation_from_key(experiment_key, variation_key) + if not variation or not experiment: + self.logger.info(f'Not activating user "{user_id}".') + return None # Create and dispatch impression event - self.logger.info('Activating user "%s" in experiment "%s".' % (user_id, experiment.key)) + self.logger.info(f'Activating user "{user_id}" in experiment "{experiment.key}".') self._send_impression_event(project_config, experiment, variation, '', experiment.key, enums.DecisionSources.EXPERIMENT, True, user_id, attributes) return variation.key - def track(self, event_key, user_id, attributes=None, event_tags=None): + def track( + self, event_key: str, user_id: str, + attributes: Optional[UserAttributes] = None, + event_tags: Optional[EventTags] = None + ) -> None: """ Send conversion event to Optimizely. Args: @@ -481,7 +555,7 @@ def track(self, event_key, user_id, attributes=None, event_tags=None): self.logger.error(enums.Errors.INVALID_INPUT.format('event_key')) return - if not isinstance(user_id, string_types): + if not isinstance(user_id, str): self.logger.error(enums.Errors.INVALID_INPUT.format('user_id')) return @@ -495,15 +569,19 @@ def track(self, event_key, user_id, attributes=None, event_tags=None): event = project_config.get_event(event_key) if not event: - self.logger.info('Not tracking user "%s" for event "%s".' % (user_id, event_key)) + self.logger.info(f'Not tracking user "{user_id}" for event "{event_key}".') return user_event = user_event_factory.UserEventFactory.create_conversion_event( project_config, event_key, user_id, attributes, event_tags ) + if user_event is None: + self.logger.error('Cannot process None event.') + return + self.event_processor.process(user_event) - self.logger.info('Tracking event "%s" for user "%s".' % (event_key, user_id)) + self.logger.info(f'Tracking event "{event_key}" for user "{user_id}".') if len(self.notification_center.notification_listeners[enums.NotificationTypes.TRACK]) > 0: log_event = event_factory.EventFactory.create_log_event(user_event, self.logger) @@ -511,7 +589,9 @@ def track(self, event_key, user_id, attributes=None, event_tags=None): enums.NotificationTypes.TRACK, event_key, user_id, attributes, event_tags, log_event.__dict__, ) - def get_variation(self, experiment_key, user_id, attributes=None): + def get_variation( + self, experiment_key: str, user_id: str, attributes: Optional[UserAttributes] = None + ) -> Optional[str]: """ Gets variation where user will be bucketed. Args: @@ -532,7 +612,7 @@ def get_variation(self, experiment_key, user_id, attributes=None): self.logger.error(enums.Errors.INVALID_INPUT.format('experiment_key')) return None - if not isinstance(user_id, string_types): + if not isinstance(user_id, str): self.logger.error(enums.Errors.INVALID_INPUT.format('user_id')) return None @@ -545,15 +625,20 @@ def get_variation(self, experiment_key, user_id, attributes=None): variation_key = None if not experiment: - self.logger.info('Experiment key "%s" is invalid. Not activating user "%s".' % (experiment_key, user_id)) + self.logger.info(f'Experiment key "{experiment_key}" is invalid. Not activating user "{user_id}".') return None if not self._validate_user_inputs(attributes): return None - user_context = self.create_user_context(user_id, attributes) - - variation, _ = self.decision_service.get_variation(project_config, experiment, user_context) + user_context = OptimizelyUserContext(self, self.logger, user_id, attributes, False) + user_profile_tracker = user_profile.UserProfileTracker(user_id, self.user_profile_service, self.logger) + user_profile_tracker.load_user_profile() + variation, _ = self.decision_service.get_variation(project_config, + experiment, + user_context, + user_profile_tracker) + user_profile_tracker.save_user_profile() if variation: variation_key = variation.key @@ -572,7 +657,7 @@ def get_variation(self, experiment_key, user_id, attributes=None): return variation_key - def is_feature_enabled(self, feature_key, user_id, attributes=None): + def is_feature_enabled(self, feature_key: str, user_id: str, attributes: Optional[UserAttributes] = None) -> bool: """ Returns true if the feature is enabled for the given user. Args: @@ -592,7 +677,7 @@ def is_feature_enabled(self, feature_key, user_id, attributes=None): self.logger.error(enums.Errors.INVALID_INPUT.format('feature_key')) return False - if not isinstance(user_id, string_types): + if not isinstance(user_id, str): self.logger.error(enums.Errors.INVALID_INPUT.format('user_id')) return False @@ -610,7 +695,9 @@ def is_feature_enabled(self, feature_key, user_id, attributes=None): feature_enabled = False source_info = {} - user_context = self.create_user_context(user_id, attributes) + + user_context = OptimizelyUserContext(self, self.logger, user_id, attributes, False) + decision, _ = self.decision_service.get_variation_for_feature(project_config, feature, user_context) is_source_experiment = decision.source == enums.DecisionSources.FEATURE_TEST is_source_rollout = decision.source == enums.DecisionSources.ROLLOUT @@ -622,24 +709,24 @@ def is_feature_enabled(self, feature_key, user_id, attributes=None): if (is_source_rollout or not decision.variation) and project_config.get_send_flag_decisions_value(): self._send_impression_event( project_config, decision.experiment, decision.variation, feature.key, decision.experiment.key if - decision.experiment else '', decision.source, feature_enabled, user_id, attributes + decision.experiment else '', str(decision.source), feature_enabled, user_id, attributes ) # Send event if Decision came from an experiment. - if is_source_experiment and decision.variation: + if is_source_experiment and decision.variation and decision.experiment: source_info = { 'experiment_key': decision.experiment.key, 'variation_key': decision.variation.key, } self._send_impression_event( project_config, decision.experiment, decision.variation, feature.key, decision.experiment.key, - decision.source, feature_enabled, user_id, attributes + str(decision.source), feature_enabled, user_id, attributes ) if feature_enabled: - self.logger.info('Feature "%s" is enabled for user "%s".' % (feature_key, user_id)) + self.logger.info(f'Feature "{feature_key}" is enabled for user "{user_id}".') else: - self.logger.info('Feature "%s" is not enabled for user "%s".' % (feature_key, user_id)) + self.logger.info(f'Feature "{feature_key}" is not enabled for user "{user_id}".') self.notification_center.send_notifications( enums.NotificationTypes.DECISION, @@ -656,7 +743,7 @@ def is_feature_enabled(self, feature_key, user_id, attributes=None): return feature_enabled - def get_enabled_features(self, user_id, attributes=None): + def get_enabled_features(self, user_id: str, attributes: Optional[UserAttributes] = None) -> list[str]: """ Returns the list of features that are enabled for the user. Args: @@ -667,12 +754,12 @@ def get_enabled_features(self, user_id, attributes=None): A list of the keys of the features that are enabled for the user. """ - enabled_features = [] + enabled_features: list[str] = [] if not self.is_valid: self.logger.error(enums.Errors.INVALID_OPTIMIZELY.format('get_enabled_features')) return enabled_features - if not isinstance(user_id, string_types): + if not isinstance(user_id, str): self.logger.error(enums.Errors.INVALID_INPUT.format('user_id')) return enabled_features @@ -690,7 +777,9 @@ def get_enabled_features(self, user_id, attributes=None): return enabled_features - def get_feature_variable(self, feature_key, variable_key, user_id, attributes=None): + def get_feature_variable( + self, feature_key: str, variable_key: str, user_id: str, attributes: Optional[UserAttributes] = None + ) -> Any: """ Returns value for a variable attached to a feature flag. Args: @@ -711,7 +800,9 @@ def get_feature_variable(self, feature_key, variable_key, user_id, attributes=No return self._get_feature_variable_for_type(project_config, feature_key, variable_key, None, user_id, attributes) - def get_feature_variable_boolean(self, feature_key, variable_key, user_id, attributes=None): + def get_feature_variable_boolean( + self, feature_key: str, variable_key: str, user_id: str, attributes: Optional[UserAttributes] = None + ) -> Optional[bool]: """ Returns value for a certain boolean variable attached to a feature flag. Args: @@ -733,11 +824,13 @@ def get_feature_variable_boolean(self, feature_key, variable_key, user_id, attri self.logger.error(enums.Errors.INVALID_PROJECT_CONFIG.format('get_feature_variable_boolean')) return None - return self._get_feature_variable_for_type( + return self._get_feature_variable_for_type( # type: ignore[no-any-return] project_config, feature_key, variable_key, variable_type, user_id, attributes, ) - def get_feature_variable_double(self, feature_key, variable_key, user_id, attributes=None): + def get_feature_variable_double( + self, feature_key: str, variable_key: str, user_id: str, attributes: Optional[UserAttributes] = None + ) -> Optional[float]: """ Returns value for a certain double variable attached to a feature flag. Args: @@ -759,11 +852,13 @@ def get_feature_variable_double(self, feature_key, variable_key, user_id, attrib self.logger.error(enums.Errors.INVALID_PROJECT_CONFIG.format('get_feature_variable_double')) return None - return self._get_feature_variable_for_type( + return self._get_feature_variable_for_type( # type: ignore[no-any-return] project_config, feature_key, variable_key, variable_type, user_id, attributes, ) - def get_feature_variable_integer(self, feature_key, variable_key, user_id, attributes=None): + def get_feature_variable_integer( + self, feature_key: str, variable_key: str, user_id: str, attributes: Optional[UserAttributes] = None + ) -> Optional[int]: """ Returns value for a certain integer variable attached to a feature flag. Args: @@ -785,11 +880,13 @@ def get_feature_variable_integer(self, feature_key, variable_key, user_id, attri self.logger.error(enums.Errors.INVALID_PROJECT_CONFIG.format('get_feature_variable_integer')) return None - return self._get_feature_variable_for_type( + return self._get_feature_variable_for_type( # type: ignore[no-any-return] project_config, feature_key, variable_key, variable_type, user_id, attributes, ) - def get_feature_variable_string(self, feature_key, variable_key, user_id, attributes=None): + def get_feature_variable_string( + self, feature_key: str, variable_key: str, user_id: str, attributes: Optional[UserAttributes] = None + ) -> Optional[str]: """ Returns value for a certain string variable attached to a feature. Args: @@ -811,11 +908,13 @@ def get_feature_variable_string(self, feature_key, variable_key, user_id, attrib self.logger.error(enums.Errors.INVALID_PROJECT_CONFIG.format('get_feature_variable_string')) return None - return self._get_feature_variable_for_type( + return self._get_feature_variable_for_type( # type: ignore[no-any-return] project_config, feature_key, variable_key, variable_type, user_id, attributes, ) - def get_feature_variable_json(self, feature_key, variable_key, user_id, attributes=None): + def get_feature_variable_json( + self, feature_key: str, variable_key: str, user_id: str, attributes: Optional[UserAttributes] = None + ) -> Optional[dict[str, Any]]: """ Returns value for a certain JSON variable attached to a feature. Args: @@ -837,11 +936,13 @@ def get_feature_variable_json(self, feature_key, variable_key, user_id, attribut self.logger.error(enums.Errors.INVALID_PROJECT_CONFIG.format('get_feature_variable_json')) return None - return self._get_feature_variable_for_type( + return self._get_feature_variable_for_type( # type: ignore[no-any-return] project_config, feature_key, variable_key, variable_type, user_id, attributes, ) - def get_all_feature_variables(self, feature_key, user_id, attributes=None): + def get_all_feature_variables( + self, feature_key: str, user_id: str, attributes: Optional[UserAttributes] = None + ) -> Optional[dict[str, Any]]: """ Returns dictionary of all variables and their corresponding values in the context of a feature. Args: @@ -863,7 +964,7 @@ def get_all_feature_variables(self, feature_key, user_id, attributes=None): project_config, feature_key, user_id, attributes, ) - def set_forced_variation(self, experiment_key, user_id, variation_key): + def set_forced_variation(self, experiment_key: str, user_id: str, variation_key: Optional[str]) -> bool: """ Force a user into a variation for a given experiment. Args: @@ -884,7 +985,7 @@ def set_forced_variation(self, experiment_key, user_id, variation_key): self.logger.error(enums.Errors.INVALID_INPUT.format('experiment_key')) return False - if not isinstance(user_id, string_types): + if not isinstance(user_id, str): self.logger.error(enums.Errors.INVALID_INPUT.format('user_id')) return False @@ -895,7 +996,7 @@ def set_forced_variation(self, experiment_key, user_id, variation_key): return self.decision_service.set_forced_variation(project_config, experiment_key, user_id, variation_key) - def get_forced_variation(self, experiment_key, user_id): + def get_forced_variation(self, experiment_key: str, user_id: str) -> Optional[str]: """ Gets the forced variation for a given user and experiment. Args: @@ -914,7 +1015,7 @@ def get_forced_variation(self, experiment_key, user_id): self.logger.error(enums.Errors.INVALID_INPUT.format('experiment_key')) return None - if not isinstance(user_id, string_types): + if not isinstance(user_id, str): self.logger.error(enums.Errors.INVALID_INPUT.format('user_id')) return None @@ -926,7 +1027,7 @@ def get_forced_variation(self, experiment_key, user_id): forced_variation, _ = self.decision_service.get_forced_variation(project_config, experiment_key, user_id) return forced_variation.key if forced_variation else None - def get_optimizely_config(self): + def get_optimizely_config(self) -> Optional[OptimizelyConfig]: """ Gets OptimizelyConfig instance for the current project config. Returns: @@ -946,9 +1047,11 @@ def get_optimizely_config(self): if hasattr(self.config_manager, 'optimizely_config'): return self.config_manager.optimizely_config - return OptimizelyConfigService(project_config).get_config() + return OptimizelyConfigService(project_config, self.logger).get_config() - def create_user_context(self, user_id, attributes=None): + def create_user_context( + self, user_id: str, attributes: Optional[UserAttributes] = None + ) -> Optional[OptimizelyUserContext]: """ We do not check for is_valid here as a user context can be created successfully even when the SDK is not fully configured. @@ -960,7 +1063,7 @@ def create_user_context(self, user_id, attributes=None): Returns: UserContext instance or None if the user id or attributes are invalid. """ - if not isinstance(user_id, string_types): + if not isinstance(user_id, str): self.logger.error(enums.Errors.INVALID_INPUT.format('user_id')) return None @@ -968,9 +1071,12 @@ def create_user_context(self, user_id, attributes=None): self.logger.error(enums.Errors.INVALID_INPUT.format('attributes')) return None - return OptimizelyUserContext(self, self.logger, user_id, attributes) + return OptimizelyUserContext(self, self.logger, user_id, attributes, True) - def _decide(self, user_context, key, decide_options=None): + def _decide( + self, user_context: Optional[OptimizelyUserContext], key: str, + decide_options: Optional[list[str]] = None + ) -> OptimizelyDecision: """ decide calls optimizely decide with feature key provided Args: @@ -995,7 +1101,7 @@ def _decide(self, user_context, key, decide_options=None): return OptimizelyDecision(flag_key=key, user_context=user_context, reasons=reasons) # validate that key is a string - if not isinstance(key, string_types): + if not isinstance(key, str): self.logger.error('Key parameter is invalid') reasons.append(OptimizelyDecisionMessage.FLAG_KEY_INVALID.format(key)) return OptimizelyDecision(flag_key=key, user_context=user_context, reasons=reasons) @@ -1009,7 +1115,7 @@ def _decide(self, user_context, key, decide_options=None): feature_flag = config.get_feature_from_key(key) if feature_flag is None: - self.logger.error("No feature flag was found for key '#{key}'.") + self.logger.error(f"No feature flag was found for key '{key}'.") reasons.append(OptimizelyDecisionMessage.FLAG_KEY_INVALID.format(key)) return OptimizelyDecision(flag_key=key, user_context=user_context, reasons=reasons) @@ -1020,74 +1126,70 @@ def _decide(self, user_context, key, decide_options=None): self.logger.debug('Provided decide options is not an array. Using default decide options.') decide_options = self.default_decide_options - # Create Optimizely Decision Result. + if OptimizelyDecideOption.ENABLED_FLAGS_ONLY in decide_options: + decide_options.remove(OptimizelyDecideOption.ENABLED_FLAGS_ONLY) + + decision = self._decide_for_keys( + user_context, + [key], + decide_options, + True + )[key] + + return decision + + def _create_optimizely_decision( + self, + user_context: OptimizelyUserContext, + flag_key: str, + flag_decision: Decision, + decision_reasons: Optional[list[str]], + decide_options: list[str], + project_config: ProjectConfig + ) -> OptimizelyDecision: user_id = user_context.user_id - attributes = user_context.get_user_attributes() - variation_key = None - variation = None feature_enabled = False - rule_key = None - flag_key = key + if flag_decision.variation is not None: + if flag_decision.variation.featureEnabled: + feature_enabled = True + + self.logger.info(f'Feature {flag_key} is enabled for user {user_id} {feature_enabled}"') + + # Create Optimizely Decision Result. + attributes = user_context.get_user_attributes() + rule_key = flag_decision.experiment.key if flag_decision.experiment else None all_variables = {} - experiment = None - decision_source = DecisionSources.ROLLOUT - source_info = {} + decision_source = flag_decision.source decision_event_dispatched = False - # Check forced decisions first - optimizely_decision_context = OptimizelyUserContext.OptimizelyDecisionContext(flag_key=key, rule_key=rule_key) - forced_decision_response = self.decision_service.validated_forced_decision(config, - optimizely_decision_context, - user_context) - variation, decision_reasons = forced_decision_response - reasons += decision_reasons - - if variation: - decision = Decision(None, variation, enums.DecisionSources.FEATURE_TEST) - else: - # Regular decision - decision, decision_reasons = self.decision_service.get_variation_for_feature(config, - feature_flag, - user_context, decide_options) - - reasons += decision_reasons - - # Fill in experiment and variation if returned (rollouts can have featureEnabled variables as well.) - if decision.experiment is not None: - experiment = decision.experiment - source_info["experiment"] = experiment - rule_key = experiment.key if experiment else None - if decision.variation is not None: - variation = decision.variation - variation_key = variation.key - feature_enabled = variation.featureEnabled - decision_source = decision.source - source_info["variation"] = variation + feature_flag = project_config.feature_key_map.get(flag_key) # Send impression event if Decision came from a feature # test and decide options doesn't include disableDecisionEvent if OptimizelyDecideOption.DISABLE_DECISION_EVENT not in decide_options: - if decision_source == DecisionSources.FEATURE_TEST or config.send_flag_decisions: - self._send_impression_event(config, experiment, variation, flag_key, rule_key or '', - decision_source, feature_enabled, + if decision_source == DecisionSources.FEATURE_TEST or project_config.send_flag_decisions: + self._send_impression_event(project_config, + flag_decision.experiment, + flag_decision.variation, + flag_key, rule_key or '', + str(decision_source), feature_enabled, user_id, attributes) decision_event_dispatched = True # Generate all variables map if decide options doesn't include excludeVariables - if OptimizelyDecideOption.EXCLUDE_VARIABLES not in decide_options: - for variable_key in feature_flag.variables: - variable = config.get_variable_for_feature(flag_key, variable_key) + if OptimizelyDecideOption.EXCLUDE_VARIABLES not in decide_options and feature_flag: + for variable_key, variable in feature_flag.variables.items(): variable_value = variable.defaultValue if feature_enabled: - variable_value = config.get_variable_value_for_variation(variable, decision.variation) + variable_value = project_config.get_variable_value_for_variation(variable, flag_decision.variation) self.logger.debug( - 'Got variable value "%s" for variable "%s" of feature flag "%s".' - % (variable_value, variable_key, flag_key) + f'Got variable value "{variable_value}" for ' + f'variable "{variable_key}" of feature flag "{flag_key}".' ) try: - actual_value = config.get_typecast_value(variable_value, variable.type) + actual_value = project_config.get_typecast_value(variable_value, variable.type) except: self.logger.error('Unable to cast value. Returning None.') actual_value = None @@ -1095,6 +1197,26 @@ def _decide(self, user_context, key, decide_options=None): all_variables[variable_key] = actual_value should_include_reasons = OptimizelyDecideOption.INCLUDE_REASONS in decide_options + variation_key = ( + flag_decision.variation.key + if flag_decision is not None and flag_decision.variation is not None + else None + ) + + experiment_id = None + variation_id = None + + try: + if flag_decision.experiment is not None: + experiment_id = flag_decision.experiment.id + except AttributeError: + self.logger.warning("flag_decision.experiment has no attribute 'id'") + + try: + if flag_decision.variation is not None: + variation_id = flag_decision.variation.id + except AttributeError: + self.logger.warning("flag_decision.variation has no attribute 'id'") # Send notification self.notification_center.send_notifications( @@ -1108,18 +1230,24 @@ def _decide(self, user_context, key, decide_options=None): 'variables': all_variables, 'variation_key': variation_key, 'rule_key': rule_key, - 'reasons': reasons if should_include_reasons else [], - 'decision_event_dispatched': decision_event_dispatched + 'reasons': decision_reasons if should_include_reasons else [], + 'decision_event_dispatched': decision_event_dispatched, + 'experiment_id': experiment_id, + 'variation_id': variation_id }, ) return OptimizelyDecision(variation_key=variation_key, enabled=feature_enabled, variables=all_variables, rule_key=rule_key, flag_key=flag_key, - user_context=user_context, reasons=reasons if should_include_reasons else [] + user_context=user_context, reasons=decision_reasons if should_include_reasons else [] ) - def _decide_all(self, user_context, decide_options=None): + def _decide_all( + self, + user_context: Optional[OptimizelyUserContext], + decide_options: Optional[list[str]] = None + ) -> dict[str, OptimizelyDecision]: """ decide_all will return a decision for every feature key in the current config Args: @@ -1148,7 +1276,13 @@ def _decide_all(self, user_context, decide_options=None): keys.append(f['key']) return self._decide_for_keys(user_context, keys, decide_options) - def _decide_for_keys(self, user_context, keys, decide_options=None): + def _decide_for_keys( + self, + user_context: Optional[OptimizelyUserContext], + keys: list[str], + decide_options: Optional[list[str]] = None, + ignore_default_options: bool = False + ) -> dict[str, OptimizelyDecision]: """ Args: user_context: UserContent @@ -1168,20 +1302,206 @@ def _decide_for_keys(self, user_context, keys, decide_options=None): return {} # merge decide_options and default_decide_options - merged_decide_options = [] + merged_decide_options: list[str] = [] if isinstance(decide_options, list): merged_decide_options = decide_options[:] - merged_decide_options += self.default_decide_options + if not ignore_default_options: + merged_decide_options += self.default_decide_options else: self.logger.debug('Provided decide options is not an array. Using default decide options.') merged_decide_options = self.default_decide_options - enabled_flags_only = OptimizelyDecideOption.ENABLED_FLAGS_ONLY in merged_decide_options + decisions: dict[str, OptimizelyDecision] = {} + valid_keys = [] + decision_reasons_dict = {} + + project_config = self.config_manager.get_config() + flags_without_forced_decision: list[entities.FeatureFlag] = [] + flag_decisions: dict[str, Decision] = {} - decisions = {} + if project_config is None: + return decisions for key in keys: - decision = self._decide(user_context, key, decide_options) - if enabled_flags_only and not decision.enabled: + feature_flag = project_config.feature_key_map.get(key) + if feature_flag is None: + decisions[key] = OptimizelyDecision(None, False, None, None, key, user_context, []) continue - decisions[key] = decision + valid_keys.append(key) + decision_reasons: list[str] = [] + decision_reasons_dict[key] = decision_reasons + + optimizely_decision_context = OptimizelyUserContext.OptimizelyDecisionContext(flag_key=key, rule_key=None) + forced_decision_response = self.decision_service.validated_forced_decision(project_config, + optimizely_decision_context, + user_context) + variation, decision_reasons = forced_decision_response + decision_reasons_dict[key] += decision_reasons + + if variation: + decision = Decision(None, variation, enums.DecisionSources.FEATURE_TEST) + flag_decisions[key] = decision + else: + flags_without_forced_decision.append(feature_flag) + + decision_list = self.decision_service.get_variations_for_feature_list( + project_config, + flags_without_forced_decision, + user_context, + merged_decide_options + ) + + for i in range(0, len(flags_without_forced_decision)): + decision = decision_list[i][0] + reasons = decision_list[i][1] + flag_key = flags_without_forced_decision[i].key + flag_decisions[flag_key] = decision + decision_reasons_dict[flag_key] += reasons + + for key in valid_keys: + flag_decision = flag_decisions[key] + decision_reasons = decision_reasons_dict[key] + optimizely_decision = self._create_optimizely_decision( + user_context, + key, + flag_decision, + decision_reasons, + merged_decide_options, + project_config + ) + enabled_flags_only_missing = OptimizelyDecideOption.ENABLED_FLAGS_ONLY not in merged_decide_options + is_enabled = optimizely_decision.enabled + if enabled_flags_only_missing or is_enabled: + decisions[key] = optimizely_decision + return decisions + + def _setup_odp(self, sdk_key: Optional[str]) -> None: + """ + - Make sure odp manager is instantiated with provided parameters or defaults. + - Set up listener to update odp_config when datafile is updated. + - Manually call callback in case datafile was received before the listener was registered. + """ + + # no need to instantiate a cache if a custom cache or segment manager is provided. + if ( + not self.sdk_settings.odp_disabled and + not self.sdk_settings.odp_segment_manager and + not self.sdk_settings.segments_cache + ): + self.sdk_settings.segments_cache = LRUCache( + self.sdk_settings.segments_cache_size, + self.sdk_settings.segments_cache_timeout_in_secs + ) + + self.odp_manager = OdpManager( + self.sdk_settings.odp_disabled, + self.sdk_settings.segments_cache, + self.sdk_settings.odp_segment_manager, + self.sdk_settings.odp_event_manager, + self.sdk_settings.fetch_segments_timeout, + self.sdk_settings.odp_event_timeout, + self.sdk_settings.odp_flush_interval, + self.logger, + ) + + if self.sdk_settings.odp_disabled: + return + + internal_notification_center = _NotificationCenterRegistry.get_notification_center(sdk_key, self.logger) + if internal_notification_center: + internal_notification_center.add_notification_listener( + enums.NotificationTypes.OPTIMIZELY_CONFIG_UPDATE, + self._update_odp_config_on_datafile_update + ) + + self._update_odp_config_on_datafile_update() + + def _update_odp_config_on_datafile_update(self) -> None: + config = None + + if isinstance(self.config_manager, PollingConfigManager): + # can not use get_config here because callback is fired before _config_ready event is set + # and that would be a deadlock + config = self.config_manager._config + elif self.config_manager: + config = self.config_manager.get_config() + + if not config: + return + + self.odp_manager.update_odp_config( + config.public_key_for_odp, + config.host_for_odp, + config.all_segments + ) + + def _identify_user(self, user_id: str) -> None: + if not self.is_valid: + self.logger.error(enums.Errors.INVALID_OPTIMIZELY.format('identify_user')) + return + + config = self.config_manager.get_config() + if not config: + self.logger.error(enums.Errors.INVALID_PROJECT_CONFIG.format('identify_user')) + return + + self.odp_manager.identify_user(user_id) + + def _fetch_qualified_segments(self, user_id: str, options: Optional[list[str]] = None) -> Optional[list[str]]: + if not self.is_valid: + self.logger.error(enums.Errors.INVALID_OPTIMIZELY.format('fetch_qualified_segments')) + return None + + config = self.config_manager.get_config() + if not config: + self.logger.error(enums.Errors.INVALID_PROJECT_CONFIG.format('fetch_qualified_segments')) + return None + + return self.odp_manager.fetch_qualified_segments(user_id, options or []) + + def send_odp_event( + self, + action: str, + identifiers: dict[str, str], + type: str = enums.OdpManagerConfig.EVENT_TYPE, + data: Optional[dict[str, str | int | float | bool | None]] = None + ) -> None: + """ + Send an event to the ODP server. + + Args: + action: The event action name. Cannot be None or empty string. + identifiers: A dictionary for identifiers. The caller must provide at least one key-value pair. + type: The event type. Default 'fullstack'. + data: An optional dictionary for associated data. The default event data will be added to this data + before sending to the ODP server. + """ + if not self.is_valid: + self.logger.error(enums.Errors.INVALID_OPTIMIZELY.format('send_odp_event')) + return + + if action is None or action == "": + self.logger.error(enums.Errors.ODP_INVALID_ACTION) + return + + if not identifiers or not isinstance(identifiers, dict): + self.logger.error('ODP events must have at least one key-value pair in identifiers.') + return + + if type is None or type == "": + type = enums.OdpManagerConfig.EVENT_TYPE + + config = self.config_manager.get_config() + if not config: + self.logger.error(enums.Errors.INVALID_PROJECT_CONFIG.format('send_odp_event')) + return + + self.odp_manager.send_event(type, action, identifiers, data or {}) + + def close(self) -> None: + if callable(getattr(self.event_processor, 'stop', None)): + self.event_processor.stop() # type: ignore[attr-defined] + if self.is_valid: + self.odp_manager.close() + if callable(getattr(self.config_manager, 'stop', None)): + self.config_manager.stop() # type: ignore[attr-defined] diff --git a/optimizely/optimizely_config.py b/optimizely/optimizely_config.py index 5e9b58d2..cf443896 100644 --- a/optimizely/optimizely_config.py +++ b/optimizely/optimizely_config.py @@ -1,4 +1,4 @@ -# Copyright 2020-2021, Optimizely +# Copyright 2020-2022, Optimizely # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -11,16 +11,29 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations import copy -from .helpers.condition import ConditionOperatorTypes +from typing import Any, Optional +from .helpers.condition import ConditionOperatorTypes +from .helpers.types import VariationDict, ExperimentDict, RolloutDict, AttributeDict, EventDict from .project_config import ProjectConfig - -class OptimizelyConfig(object): - def __init__(self, revision, experiments_map, features_map, datafile=None, - sdk_key=None, environment_key=None, attributes=None, events=None, - audiences=None): +from .logger import Logger + + +class OptimizelyConfig: + def __init__( + self, revision: str, + experiments_map: dict[str, OptimizelyExperiment], + features_map: dict[str, OptimizelyFeature], + datafile: Optional[str] = None, + sdk_key: Optional[str] = None, + environment_key: Optional[str] = None, + attributes: Optional[list[OptimizelyAttribute]] = None, + events: Optional[list[OptimizelyEvent]] = None, + audiences: Optional[list[OptimizelyAudience]] = None + ): self.revision = revision # This experiments_map is for experiments of legacy projects only. @@ -37,7 +50,7 @@ def __init__(self, revision, experiments_map, features_map, datafile=None, self.events = events or [] self.audiences = audiences or [] - def get_datafile(self): + def get_datafile(self) -> Optional[str]: """ Get the datafile associated with OptimizelyConfig. Returns: @@ -46,16 +59,22 @@ def get_datafile(self): return self._datafile -class OptimizelyExperiment(object): - def __init__(self, id, key, variations_map, audiences=''): +class OptimizelyExperiment: + def __init__(self, id: str, key: str, variations_map: dict[str, OptimizelyVariation], audiences: str = ''): self.id = id self.key = key self.variations_map = variations_map self.audiences = audiences -class OptimizelyFeature(object): - def __init__(self, id, key, experiments_map, variables_map): +class OptimizelyFeature: + def __init__( + self, + id: str, + key: str, + experiments_map: dict[str, OptimizelyExperiment], + variables_map: dict[str, OptimizelyVariable] + ): self.id = id self.key = key @@ -64,54 +83,57 @@ def __init__(self, id, key, experiments_map, variables_map): self.experiments_map = experiments_map self.variables_map = variables_map - self.delivery_rules = [] - self.experiment_rules = [] + self.delivery_rules: list[OptimizelyExperiment] = [] + self.experiment_rules: list[OptimizelyExperiment] = [] -class OptimizelyVariation(object): - def __init__(self, id, key, feature_enabled, variables_map): +class OptimizelyVariation: + def __init__( + self, id: str, key: str, feature_enabled: Optional[bool], variables_map: dict[str, OptimizelyVariable] + ): self.id = id self.key = key self.feature_enabled = feature_enabled self.variables_map = variables_map -class OptimizelyVariable(object): - def __init__(self, id, key, variable_type, value): +class OptimizelyVariable: + def __init__(self, id: str, key: str, variable_type: str, value: Any): self.id = id self.key = key self.type = variable_type self.value = value -class OptimizelyAttribute(object): - def __init__(self, id, key): +class OptimizelyAttribute: + def __init__(self, id: str, key: str): self.id = id self.key = key -class OptimizelyEvent(object): - def __init__(self, id, key, experiment_ids): +class OptimizelyEvent: + def __init__(self, id: str, key: str, experiment_ids: list[str]): self.id = id self.key = key self.experiment_ids = experiment_ids -class OptimizelyAudience(object): - def __init__(self, id, name, conditions): +class OptimizelyAudience: + def __init__(self, id: Optional[str], name: Optional[str], conditions: Optional[list[Any] | str]): self.id = id self.name = name self.conditions = conditions -class OptimizelyConfigService(object): +class OptimizelyConfigService: """ Class encapsulating methods to be used in creating instance of OptimizelyConfig. """ - def __init__(self, project_config): + def __init__(self, project_config: ProjectConfig, logger: Logger): """ Args: project_config ProjectConfig """ + self.logger = logger self.is_valid = True if not isinstance(project_config, ProjectConfig): @@ -135,7 +157,7 @@ def __init__(self, project_config): Merging typed_audiences with audiences from project_config. The typed_audiences has higher precedence. ''' - optly_typed_audiences = [] + optly_typed_audiences: list[OptimizelyAudience] = [] id_lookup_dict = {} for typed_audience in project_config.typed_audiences: optly_audience = OptimizelyAudience( @@ -159,7 +181,7 @@ def __init__(self, project_config): self.audiences = optly_typed_audiences - def replace_ids_with_names(self, conditions, audiences_map): + def replace_ids_with_names(self, conditions: str | list[Any], audiences_map: dict[str, str]) -> str: ''' Gets conditions and audiences_map [id:name] @@ -173,7 +195,7 @@ def replace_ids_with_names(self, conditions, audiences_map): else: return '' - def lookup_name_from_id(self, audience_id, audiences_map): + def lookup_name_from_id(self, audience_id: str, audiences_map: dict[str, str]) -> str: ''' Gets and audience ID and audiences map @@ -189,7 +211,7 @@ def lookup_name_from_id(self, audience_id, audiences_map): return name - def stringify_conditions(self, conditions, audiences_map): + def stringify_conditions(self, conditions: str | list[Any], audiences_map: dict[str, str]) -> str: ''' Gets a list of conditions from an entities.Experiment and an audiences_map [id:name] @@ -224,7 +246,7 @@ def stringify_conditions(self, conditions, audiences_map): operand = conditions[i].upper() else: # Check if element is a list or not - if type(conditions[i]) == list: + if isinstance(conditions[i], list): # Check if at the end or not to determine where to add the operand # Recursive call to call stringify on embedded list if i + 1 < length: @@ -246,7 +268,7 @@ def stringify_conditions(self, conditions, audiences_map): return conditions_str or '' - def get_config(self): + def get_config(self) -> Optional[OptimizelyConfig]: """ Gets instance of OptimizelyConfig Returns: @@ -271,7 +293,7 @@ def get_config(self): self.audiences ) - def _create_lookup_maps(self): + def _create_lookup_maps(self) -> None: """ Creates lookup maps to avoid redundant iteration of config objects. """ self.exp_id_to_feature_map = {} @@ -298,7 +320,9 @@ def _create_lookup_maps(self): self.feature_key_variable_key_to_variable_map[feature['key']] = variables_key_map self.feature_key_variable_id_to_variable_map[feature['key']] = variables_id_map - def _get_variables_map(self, experiment, variation, feature_id=None): + def _get_variables_map( + self, experiment: ExperimentDict, variation: VariationDict, feature_id: Optional[str] = None + ) -> dict[str, OptimizelyVariable]: """ Gets variables map for given experiment and variation. Args: @@ -308,7 +332,7 @@ def _get_variables_map(self, experiment, variation, feature_id=None): Returns: dict - Map of variable key to OptimizelyVariable for the given variation. """ - variables_map = {} + variables_map: dict[str, OptimizelyVariable] = {} feature_flag = self.exp_id_to_feature_map.get(experiment['id'], None) if feature_flag is None and feature_id is None: @@ -317,18 +341,22 @@ def _get_variables_map(self, experiment, variation, feature_id=None): # set default variables for each variation if feature_id: variables_map = copy.deepcopy(self.feature_id_variable_key_to_feature_variables_map[feature_id]) - else: + elif feature_flag: variables_map = copy.deepcopy(self.feature_key_variable_key_to_variable_map[feature_flag['key']]) # set variation specific variable value if any if variation.get('featureEnabled'): + feature_variables_map = self.feature_key_variable_id_to_variable_map[feature_flag['key']] for variable in variation.get('variables', []): - feature_variable = self.feature_key_variable_id_to_variable_map[feature_flag['key']][variable['id']] - variables_map[feature_variable.key].value = variable['value'] + feature_variable = feature_variables_map.get(variable['id']) + if feature_variable: + variables_map[feature_variable.key].value = variable['value'] return variables_map - def _get_variations_map(self, experiment, feature_id=None): + def _get_variations_map( + self, experiment: ExperimentDict, feature_id: Optional[str] = None + ) -> dict[str, OptimizelyVariation]: """ Gets variation map for the given experiment. Args: @@ -337,7 +365,7 @@ def _get_variations_map(self, experiment, feature_id=None): Returns: dict -- Map of variation key to OptimizelyVariation. """ - variations_map = {} + variations_map: dict[str, OptimizelyVariation] = {} for variation in experiment.get('variations', []): variables_map = self._get_variables_map(experiment, variation, feature_id) @@ -351,7 +379,7 @@ def _get_variations_map(self, experiment, feature_id=None): return variations_map - def _get_all_experiments(self): + def _get_all_experiments(self) -> list[ExperimentDict]: """ Gets all experiments in the project config. Returns: @@ -364,7 +392,7 @@ def _get_all_experiments(self): return experiments - def _get_experiments_maps(self): + def _get_experiments_maps(self) -> tuple[dict[str, OptimizelyExperiment], dict[str, OptimizelyExperiment]]: """ Gets maps for all the experiments in the project config and updates the experiment with updated experiment audiences string. @@ -376,14 +404,22 @@ def _get_experiments_maps(self): # Id map comes in handy to figure out feature experiment. experiments_id_map = {} # Audiences map to use for updating experiments with new audience conditions string - audiences_map = {} + audiences_map: dict[str, str] = {} # Build map from OptimizelyAudience array for optly_audience in self.audiences: - audiences_map[optly_audience.id] = optly_audience.name + audience_id = optly_audience.id + audience_name = optly_audience.name + if audience_id is not None: + audiences_map[audience_id] = audience_name if audience_name is not None else '' all_experiments = self._get_all_experiments() + for exp in all_experiments: + # check if experiment key already exists + if exp["key"] in experiments_key_map: + self.logger.warning(f"Duplicate experiment keys found in datafile: {exp['key']}") + optly_exp = OptimizelyExperiment( exp['id'], exp['key'], self._get_variations_map(exp) ) @@ -396,7 +432,7 @@ def _get_experiments_maps(self): return experiments_key_map, experiments_id_map - def _get_features_map(self, experiments_id_map): + def _get_features_map(self, experiments_id_map: dict[str, OptimizelyExperiment]) -> dict[str, OptimizelyFeature]: """ Gets features map for the project config. Args: @@ -406,7 +442,7 @@ def _get_features_map(self, experiments_id_map): dict -- feaure key to OptimizelyFeature map """ features_map = {} - experiment_rules = [] + experiment_rules: list[OptimizelyExperiment] = [] for feature in self.feature_flags: @@ -431,7 +467,9 @@ def _get_features_map(self, experiments_id_map): return features_map - def _get_delivery_rules(self, rollouts, rollout_id, feature_id): + def _get_delivery_rules( + self, rollouts: list[RolloutDict], rollout_id: Optional[str], feature_id: str + ) -> list[OptimizelyExperiment]: """ Gets an array of rollouts for the project config returns: @@ -440,19 +478,22 @@ def _get_delivery_rules(self, rollouts, rollout_id, feature_id): # Return list for delivery rules delivery_rules = [] # Audiences map to use for updating experiments with new audience conditions string - audiences_map = {} + audiences_map: dict[str, str] = {} # Gets a rollout based on provided rollout_id rollout = [rollout for rollout in rollouts if rollout.get('id') == rollout_id] if rollout: - rollout = rollout[0] + found_rollout = rollout[0] # Build map from OptimizelyAudience array for optly_audience in self.audiences: - audiences_map[optly_audience.id] = optly_audience.name + audience_id = optly_audience.id + audience_name = optly_audience.name + if audience_id is not None: + audiences_map[audience_id] = audience_name if audience_name is not None else '' # Get the experiments for that rollout - experiments = rollout.get('experiments') + experiments = found_rollout.get('experiments') if experiments: for experiment in experiments: optly_exp = OptimizelyExperiment( @@ -465,7 +506,7 @@ def _get_delivery_rules(self, rollouts, rollout_id, feature_id): return delivery_rules - def _get_attributes_list(self, attributes): + def _get_attributes_list(self, attributes: list[AttributeDict]) -> list[OptimizelyAttribute]: """ Gets attributes list for the project config Returns: @@ -482,7 +523,7 @@ def _get_attributes_list(self, attributes): return attributes_list - def _get_events_list(self, events): + def _get_events_list(self, events: list[EventDict]) -> list[OptimizelyEvent]: """ Gets events list for the project_config Returns: diff --git a/optimizely/optimizely_factory.py b/optimizely/optimizely_factory.py index d9da72ba..ae466979 100644 --- a/optimizely/optimizely_factory.py +++ b/optimizely/optimizely_factory.py @@ -1,4 +1,4 @@ -# Copyright 2021, Optimizely +# Copyright 2021-2022, Optimizely # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -10,26 +10,35 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations +from typing import TYPE_CHECKING, Optional + +from optimizely.helpers.sdk_settings import OptimizelySdkSettings + from . import logger as optimizely_logger -from .config_manager import PollingConfigManager -from .error_handler import NoOpErrorHandler +from .config_manager import BaseConfigManager, PollingConfigManager +from .error_handler import BaseErrorHandler, NoOpErrorHandler from .event.event_processor import BatchEventProcessor -from .event_dispatcher import EventDispatcher +from .event_dispatcher import EventDispatcher, CustomEventDispatcher from .notification_center import NotificationCenter from .optimizely import Optimizely +if TYPE_CHECKING: + # prevent circular dependenacy by skipping import at runtime + from .user_profile import UserProfileService + -class OptimizelyFactory(object): +class OptimizelyFactory: """ Optimizely factory to provides basic utility to instantiate the Optimizely SDK with a minimal number of configuration options.""" - max_event_batch_size = None - max_event_flush_interval = None - polling_interval = None - blocking_timeout = None + max_event_batch_size: Optional[int] = None + max_event_flush_interval: Optional[int] = None + polling_interval: Optional[float] = None + blocking_timeout: Optional[int] = None @staticmethod - def set_batch_size(batch_size): + def set_batch_size(batch_size: int) -> int: """ Convenience method for setting the maximum number of events contained within a batch. Args: batch_size: Sets size of event_queue. @@ -39,7 +48,7 @@ def set_batch_size(batch_size): return OptimizelyFactory.max_event_batch_size @staticmethod - def set_flush_interval(flush_interval): + def set_flush_interval(flush_interval: int) -> int: """ Convenience method for setting the maximum time interval in milliseconds between event dispatches. Args: flush_interval: Time interval between event dispatches. @@ -49,7 +58,7 @@ def set_flush_interval(flush_interval): return OptimizelyFactory.max_event_flush_interval @staticmethod - def set_polling_interval(polling_interval): + def set_polling_interval(polling_interval: int) -> int: """ Method to set frequency at which datafile has to be polled. Args: polling_interval: Time in seconds after which to update datafile. @@ -58,7 +67,7 @@ def set_polling_interval(polling_interval): return OptimizelyFactory.polling_interval @staticmethod - def set_blocking_timeout(blocking_timeout): + def set_blocking_timeout(blocking_timeout: int) -> int: """ Method to set time in seconds to block the config call until config has been initialized. Args: blocking_timeout: Time in seconds to block the config call. @@ -67,7 +76,7 @@ def set_blocking_timeout(blocking_timeout): return OptimizelyFactory.blocking_timeout @staticmethod - def default_instance(sdk_key, datafile=None): + def default_instance(sdk_key: str, datafile: Optional[str] = None) -> Optimizely: """ Returns a new optimizely instance.. Args: sdk_key: Required string uniquely identifying the fallback datafile corresponding to project. @@ -77,17 +86,15 @@ def default_instance(sdk_key, datafile=None): logger = optimizely_logger.NoOpLogger() notification_center = NotificationCenter(logger) - config_manager_options = { - 'sdk_key': sdk_key, - 'update_interval': OptimizelyFactory.polling_interval, - 'blocking_timeout': OptimizelyFactory.blocking_timeout, - 'datafile': datafile, - 'logger': logger, - 'error_handler': error_handler, - 'notification_center': notification_center, - } - - config_manager = PollingConfigManager(**config_manager_options) + config_manager = PollingConfigManager( + sdk_key=sdk_key, + update_interval=OptimizelyFactory.polling_interval, + blocking_timeout=OptimizelyFactory.blocking_timeout, + datafile=datafile, + logger=logger, + error_handler=error_handler, + notification_center=notification_center + ) event_processor = BatchEventProcessor( event_dispatcher=EventDispatcher(), @@ -104,15 +111,24 @@ def default_instance(sdk_key, datafile=None): return optimizely @staticmethod - def default_instance_with_config_manager(config_manager): + def default_instance_with_config_manager(config_manager: BaseConfigManager) -> Optimizely: return Optimizely( config_manager=config_manager ) @staticmethod - def custom_instance(sdk_key, datafile=None, event_dispatcher=None, logger=None, error_handler=None, - skip_json_validation=None, user_profile_service=None, config_manager=None, - notification_center=None): + def custom_instance( + sdk_key: str, + datafile: Optional[str] = None, + event_dispatcher: Optional[CustomEventDispatcher] = None, + logger: Optional[optimizely_logger.Logger] = None, + error_handler: Optional[BaseErrorHandler] = None, + skip_json_validation: Optional[bool] = None, + user_profile_service: Optional[UserProfileService] = None, + config_manager: Optional[BaseConfigManager] = None, + notification_center: Optional[NotificationCenter] = None, + settings: Optional[OptimizelySdkSettings] = None + ) -> Optimizely: """ Returns a new optimizely instance. if max_event_batch_size and max_event_flush_interval are None then default batch_size and flush_interval will be used to setup BatchEventProcessor. @@ -131,6 +147,7 @@ def custom_instance(sdk_key, datafile=None, event_dispatcher=None, logger=None, user profiles. config_manager: Optional ConfigManager interface responds to 'config' method. notification_center: Optional Instance of NotificationCenter. + settings: Optional Instance of OptimizelySdkSettings. """ error_handler = error_handler or NoOpErrorHandler() @@ -146,19 +163,18 @@ def custom_instance(sdk_key, datafile=None, event_dispatcher=None, logger=None, notification_center=notification_center, ) - config_manager_options = { - 'sdk_key': sdk_key, - 'update_interval': OptimizelyFactory.polling_interval, - 'blocking_timeout': OptimizelyFactory.blocking_timeout, - 'datafile': datafile, - 'logger': logger, - 'error_handler': error_handler, - 'skip_json_validation': skip_json_validation, - 'notification_center': notification_center, - } - config_manager = config_manager or PollingConfigManager(**config_manager_options) + config_manager = config_manager or PollingConfigManager( + sdk_key=sdk_key, + update_interval=OptimizelyFactory.polling_interval, + blocking_timeout=OptimizelyFactory.blocking_timeout, + datafile=datafile, + logger=logger, + error_handler=error_handler, + skip_json_validation=skip_json_validation, + notification_center=notification_center, + ) return Optimizely( datafile, event_dispatcher, logger, error_handler, skip_json_validation, user_profile_service, - sdk_key, config_manager, notification_center, event_processor + sdk_key, config_manager, notification_center, event_processor, settings=settings ) diff --git a/optimizely/optimizely_user_context.py b/optimizely/optimizely_user_context.py index f096ced5..e88c0f52 100644 --- a/optimizely/optimizely_user_context.py +++ b/optimizely/optimizely_user_context.py @@ -12,17 +12,38 @@ # See the License for the specific language governing permissions and # limitations under the License. # +from __future__ import annotations import copy import threading +from typing import TYPE_CHECKING, Any, Callable, Optional, NewType, Dict +from optimizely.decision import optimizely_decision -class OptimizelyUserContext(object): +if TYPE_CHECKING: + # prevent circular dependency by skipping import at runtime + from . import optimizely + from optimizely.helpers.event_tag_utils import EventTags + from .logger import Logger + + +# type for tracking user attributes (essentially a sub-type of dict) +UserAttributes = NewType('UserAttributes', Dict[str, Any]) + + +class OptimizelyUserContext: """ Representation of an Optimizely User Context using which APIs are to be called. """ - def __init__(self, optimizely_client, logger, user_id, user_attributes=None): + def __init__( + self, + optimizely_client: optimizely.Optimizely, + logger: Logger, + user_id: str, + user_attributes: Optional[UserAttributes] = None, + identify: bool = True + ): """ Create an instance of the Optimizely User Context. Args: @@ -30,6 +51,7 @@ def __init__(self, optimizely_client, logger, user_id, user_attributes=None): logger: logger for logging user_id: user id of this user context user_attributes: user attributes to use for this user context + identify: True to send identify event to ODP. Returns: UserContext instance @@ -38,52 +60,68 @@ def __init__(self, optimizely_client, logger, user_id, user_attributes=None): self.client = optimizely_client self.logger = logger self.user_id = user_id + self._qualified_segments: Optional[list[str]] = None if not isinstance(user_attributes, dict): - user_attributes = {} + user_attributes = UserAttributes({}) - self._user_attributes = user_attributes.copy() if user_attributes else {} + self._user_attributes = UserAttributes(user_attributes.copy() if user_attributes else {}) self.lock = threading.Lock() - self.forced_decisions_map = {} + self.forced_decisions_map: dict[ + OptimizelyUserContext.OptimizelyDecisionContext, + OptimizelyUserContext.OptimizelyForcedDecision + ] = {} + + if self.client and identify: + self.client._identify_user(user_id) - # decision context - class OptimizelyDecisionContext(object): + class OptimizelyDecisionContext: """ Using class with attributes here instead of namedtuple because class is extensible, it's easy to add another attribute if we wanted to extend decision context. """ - def __init__(self, flag_key, rule_key=None): + def __init__(self, flag_key: str, rule_key: Optional[str] = None): self.flag_key = flag_key self.rule_key = rule_key - def __hash__(self): + def __hash__(self) -> int: return hash((self.flag_key, self.rule_key)) - def __eq__(self, other): + def __eq__(self, other: OptimizelyUserContext.OptimizelyDecisionContext) -> bool: # type: ignore[override] return (self.flag_key, self.rule_key) == (other.flag_key, other.rule_key) # forced decision - class OptimizelyForcedDecision(object): - def __init__(self, variation_key): + class OptimizelyForcedDecision: + def __init__(self, variation_key: str): self.variation_key = variation_key - def _clone(self): + def _clone(self) -> Optional[OptimizelyUserContext]: if not self.client: return None - user_context = OptimizelyUserContext(self.client, self.logger, self.user_id, self.get_user_attributes()) + user_context = OptimizelyUserContext( + self.client, + self.logger, + self.user_id, + self.get_user_attributes(), + identify=False + ) with self.lock: if self.forced_decisions_map: + # makes sure forced_decisions_map is duplicated without any references user_context.forced_decisions_map = copy.deepcopy(self.forced_decisions_map) + if self._qualified_segments: + # no need to use deepcopy here as qualified_segments does not contain anything other than strings + user_context._qualified_segments = self._qualified_segments.copy() return user_context - def get_user_attributes(self): + def get_user_attributes(self) -> UserAttributes: with self.lock: - return self._user_attributes.copy() + return UserAttributes(self._user_attributes.copy()) - def set_attribute(self, attribute_key, attribute_value): + def set_attribute(self, attribute_key: str, attribute_value: Any) -> None: """ sets a attribute by key for this user context. Args: @@ -96,7 +134,9 @@ def set_attribute(self, attribute_key, attribute_value): with self.lock: self._user_attributes[attribute_key] = attribute_value - def decide(self, key, options=None): + def decide( + self, key: str, options: Optional[list[str]] = None + ) -> optimizely_decision.OptimizelyDecision: """ Call decide on contained Optimizely object Args: @@ -111,7 +151,9 @@ def decide(self, key, options=None): return self.client._decide(self._clone(), key, options) - def decide_for_keys(self, keys, options=None): + def decide_for_keys( + self, keys: list[str], options: Optional[list[str]] = None + ) -> dict[str, optimizely_decision.OptimizelyDecision]: """ Call decide_for_keys on contained optimizely object Args: @@ -126,7 +168,7 @@ def decide_for_keys(self, keys, options=None): return self.client._decide_for_keys(self._clone(), keys, options) - def decide_all(self, options=None): + def decide_all(self, options: Optional[list[str]] = None) -> dict[str, optimizely_decision.OptimizelyDecision]: """ Call decide_all on contained optimizely instance Args: @@ -140,16 +182,18 @@ def decide_all(self, options=None): return self.client._decide_all(self._clone(), options) - def track_event(self, event_key, event_tags=None): + def track_event(self, event_key: str, event_tags: Optional[EventTags] = None) -> None: return self.client.track(event_key, self.user_id, self.get_user_attributes(), event_tags) - def as_json(self): + def as_json(self) -> dict[str, Any]: return { 'user_id': self.user_id, 'attributes': self.get_user_attributes(), } - def set_forced_decision(self, decision_context, decision): + def set_forced_decision( + self, decision_context: OptimizelyDecisionContext, decision: OptimizelyForcedDecision + ) -> bool: """ Sets the forced decision for a given decision context. @@ -165,7 +209,7 @@ def set_forced_decision(self, decision_context, decision): return True - def get_forced_decision(self, decision_context): + def get_forced_decision(self, decision_context: OptimizelyDecisionContext) -> Optional[OptimizelyForcedDecision]: """ Gets the forced decision (variation key) for a given decision context. @@ -178,7 +222,7 @@ def get_forced_decision(self, decision_context): forced_decision = self.find_forced_decision(decision_context) return forced_decision - def remove_forced_decision(self, decision_context): + def remove_forced_decision(self, decision_context: OptimizelyDecisionContext) -> bool: """ Removes the forced decision for a given decision context. @@ -186,7 +230,7 @@ def remove_forced_decision(self, decision_context): decision_context: a decision context. Returns: - Returns: true if the forced decision has been removed successfully. + True if the forced decision has been removed successfully. """ with self.lock: if decision_context in self.forced_decisions_map: @@ -195,7 +239,7 @@ def remove_forced_decision(self, decision_context): return False - def remove_all_forced_decisions(self): + def remove_all_forced_decisions(self) -> bool: """ Removes all forced decisions bound to this user context. @@ -207,7 +251,7 @@ def remove_all_forced_decisions(self): return True - def find_forced_decision(self, decision_context): + def find_forced_decision(self, decision_context: OptimizelyDecisionContext) -> Optional[OptimizelyForcedDecision]: """ Gets forced decision from forced decision map. @@ -223,3 +267,77 @@ def find_forced_decision(self, decision_context): # must allow None to be returned for the Flags only case return self.forced_decisions_map.get(decision_context) + + def is_qualified_for(self, segment: str) -> bool: + """ + Checks is the provided segment is in the qualified_segments list. + + Args: + segment: a segment name. + + Returns: + Returns: true if the segment is in the qualified segments list. + """ + with self.lock: + if self._qualified_segments is not None: + return segment in self._qualified_segments + return False + + def get_qualified_segments(self) -> Optional[list[str]]: + """ + Gets the qualified segments. + + Returns: + A list of qualified segment names. + """ + with self.lock: + if self._qualified_segments is not None: + return self._qualified_segments.copy() + return None + + def set_qualified_segments(self, segments: Optional[list[str]]) -> None: + """ + Replaces any qualified segments with the provided list of segments. + + Args: + segments: a list of segment names. + + Returns: + None. + """ + with self.lock: + self._qualified_segments = None if segments is None else segments.copy() + + def fetch_qualified_segments( + self, + callback: Optional[Callable[[bool], None]] = None, + options: Optional[list[str]] = None + ) -> bool | threading.Thread: + """ + Fetch all qualified segments for the user context. + The fetched segments will be saved and can be accessed using get/set_qualified_segment methods. + + Args: + callback: An optional function to run after the fetch has completed. The function will be provided + a boolean value indicating if the fetch was successful. If a callback is provided, the fetch + will be run in a seperate thread, otherwise it will be run syncronously. + options: An array of OptimizelySegmentOptions used to ignore and/or reset the cache (optional). + + Returns: + A boolean value indicating if the fetch was successful. + """ + def _fetch_qualified_segments() -> bool: + segments = self.client._fetch_qualified_segments(self.user_id, options or []) if self.client else None + self.set_qualified_segments(segments) + success = segments is not None + + if callable(callback): + callback(success) + return success + + if callback: + fetch_thread = threading.Thread(target=_fetch_qualified_segments, name="FetchQualifiedSegmentsThread") + fetch_thread.start() + return fetch_thread + else: + return _fetch_qualified_segments() diff --git a/optimizely/project_config.py b/optimizely/project_config.py index 82da17c9..f774ff8a 100644 --- a/optimizely/project_config.py +++ b/optimizely/project_config.py @@ -1,4 +1,4 @@ -# Copyright 2016-2019, 2021, Optimizely +# Copyright 2016-2019, 2021-2022, Optimizely # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -10,14 +10,26 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - +from __future__ import annotations import json -from collections import OrderedDict +from typing import TYPE_CHECKING, Optional, Type, TypeVar, cast, Any, Iterable, List +from sys import version_info from . import entities from . import exceptions from .helpers import condition as condition_helper from .helpers import enums +from .helpers import types + +if version_info < (3, 8): + from typing_extensions import Final +else: + from typing import Final # type: ignore + +if TYPE_CHECKING: + # prevent circular dependenacy by skipping import at runtime + from .logger import Logger + SUPPORTED_VERSIONS = [ enums.DatafileVersions.V2, @@ -25,13 +37,15 @@ enums.DatafileVersions.V4, ] -RESERVED_ATTRIBUTE_PREFIX = '$opt_' +RESERVED_ATTRIBUTE_PREFIX: Final = '$opt_' +EntityClass = TypeVar('EntityClass') -class ProjectConfig(object): + +class ProjectConfig: """ Representation of the Optimizely project config. """ - def __init__(self, datafile, logger, error_handler): + def __init__(self, datafile: str | bytes, logger: Logger, error_handler: Any): """ ProjectConfig init method to load and set project config data. Args: @@ -41,39 +55,54 @@ def __init__(self, datafile, logger, error_handler): """ config = json.loads(datafile) - self._datafile = u'{}'.format(datafile) + self._datafile = datafile.decode('utf-8') if isinstance(datafile, bytes) else datafile self.logger = logger self.error_handler = error_handler - self.version = config.get('version') + self.version: str = config.get('version') if self.version not in SUPPORTED_VERSIONS: raise exceptions.UnsupportedDatafileVersionException( enums.Errors.UNSUPPORTED_DATAFILE_VERSION.format(self.version) ) - self.account_id = config.get('accountId') - self.project_id = config.get('projectId') - self.revision = config.get('revision') - self.sdk_key = config.get('sdkKey', None) - self.environment_key = config.get('environmentKey', None) - self.groups = config.get('groups', []) - self.experiments = config.get('experiments', []) - self.events = config.get('events', []) - self.attributes = config.get('attributes', []) - self.audiences = config.get('audiences', []) - self.typed_audiences = config.get('typedAudiences', []) - self.feature_flags = config.get('featureFlags', []) - self.rollouts = config.get('rollouts', []) - self.anonymize_ip = config.get('anonymizeIP', False) - self.send_flag_decisions = config.get('sendFlagDecisions', False) - self.bot_filtering = config.get('botFiltering', None) + self.account_id: str = config.get('accountId') + self.project_id: str = config.get('projectId') + self.revision: str = config.get('revision') + self.sdk_key: Optional[str] = config.get('sdkKey', None) + self.environment_key: Optional[str] = config.get('environmentKey', None) + self.groups: list[types.GroupDict] = config.get('groups', []) + self.experiments: list[types.ExperimentDict] = config.get('experiments', []) + self.events: list[types.EventDict] = config.get('events', []) + self.attributes: list[types.AttributeDict] = config.get('attributes', []) + self.audiences: list[types.AudienceDict] = config.get('audiences', []) + self.typed_audiences: list[types.AudienceDict] = config.get('typedAudiences', []) + self.feature_flags: list[types.FeatureFlagDict] = config.get('featureFlags', []) + self.rollouts: list[types.RolloutDict] = config.get('rollouts', []) + self.integrations: list[types.IntegrationDict] = config.get('integrations', []) + self.anonymize_ip: bool = config.get('anonymizeIP', False) + self.send_flag_decisions: bool = config.get('sendFlagDecisions', False) + self.bot_filtering: Optional[bool] = config.get('botFiltering', None) + self.public_key_for_odp: Optional[str] = None + self.host_for_odp: Optional[str] = None + self.all_segments: list[str] = [] # Utility maps for quick lookup - self.group_id_map = self._generate_key_map(self.groups, 'id', entities.Group) - self.experiment_id_map = self._generate_key_map(self.experiments, 'id', entities.Experiment) - self.event_key_map = self._generate_key_map(self.events, 'key', entities.Event) - self.attribute_key_map = self._generate_key_map(self.attributes, 'key', entities.Attribute) - - self.audience_id_map = self._generate_key_map(self.audiences, 'id', entities.Audience) + self.group_id_map: dict[str, entities.Group] = self._generate_key_map(self.groups, 'id', entities.Group) + self.experiment_id_map: dict[str, entities.Experiment] = self._generate_key_map( + self.experiments, 'id', entities.Experiment + ) + self.event_key_map: dict[str, entities.Event] = self._generate_key_map(self.events, 'key', entities.Event) + self.attribute_key_map: dict[str, entities.Attribute] = self._generate_key_map( + self.attributes, 'key', entities.Attribute + ) + self.attribute_id_to_key_map: dict[str, str] = {} + for attribute in self.attributes: + self.attribute_id_to_key_map[attribute['id']] = attribute['key'] + self.attribute_id_map: dict[str, entities.Attribute] = self._generate_key_map( + self.attributes, 'id', entities.Attribute + ) + self.audience_id_map: dict[str, entities.Audience] = self._generate_key_map( + self.audiences, 'id', entities.Audience + ) # Conditions of audiences in typedAudiences are not expected # to be string-encoded as they are in audiences. @@ -84,8 +113,17 @@ def __init__(self, datafile, logger, error_handler): self.rollout_id_map = self._generate_key_map(self.rollouts, 'id', entities.Layer) for layer in self.rollout_id_map.values(): - for experiment in layer.experiments: - self.experiment_id_map[experiment['id']] = entities.Experiment(**experiment) + for experiment_dict in layer.experiments: + self.experiment_id_map[experiment_dict['id']] = entities.Experiment(**experiment_dict) + + if self.integrations: + self.integration_key_map = self._generate_key_map( + self.integrations, 'key', entities.Integration, first_value=True + ) + odp_integration = self.integration_key_map.get('odp') + if odp_integration: + self.public_key_for_odp = odp_integration.publicKey + self.host_for_odp = odp_integration.host self.audience_id_map = self._deserialize_audience(self.audience_id_map) for group in self.group_id_map.values(): @@ -94,13 +132,16 @@ def __init__(self, datafile, logger, error_handler): experiment.__dict__.update({'groupId': group.id, 'groupPolicy': group.policy}) self.experiment_id_map.update(experiments_in_group_id_map) - self.experiment_key_map = {} - self.variation_key_map = {} - self.variation_id_map = {} - self.variation_variable_usage_map = {} - self.variation_id_map_by_experiment_id = {} - self.variation_key_map_by_experiment_id = {} - self.flag_variations_map = {} + for audience in self.audience_id_map.values(): + self.all_segments += audience.get_segments() + + self.experiment_key_map: dict[str, entities.Experiment] = {} + self.variation_key_map: dict[str, dict[str, entities.Variation]] = {} + self.variation_id_map: dict[str, dict[str, entities.Variation]] = {} + self.variation_variable_usage_map: dict[str, dict[str, entities.Variation.VariableUsage]] = {} + self.variation_id_map_by_experiment_id: dict[str, dict[str, entities.Variation]] = {} + self.variation_key_map_by_experiment_id: dict[str, dict[str, entities.Variation]] = {} + self.flag_variations_map: dict[str, list[entities.Variation]] = {} for experiment in self.experiment_id_map.values(): self.experiment_key_map[experiment.key] = experiment @@ -112,7 +153,7 @@ def __init__(self, datafile, logger, error_handler): self.variation_id_map_by_experiment_id[experiment.id] = {} self.variation_key_map_by_experiment_id[experiment.id] = {} - for variation in self.variation_key_map.get(experiment.key).values(): + for variation in self.variation_key_map[experiment.key].values(): self.variation_id_map[experiment.key][variation.id] = variation self.variation_id_map_by_experiment_id[experiment.id][variation.id] = variation self.variation_key_map_by_experiment_id[experiment.id][variation.key] = variation @@ -124,20 +165,20 @@ def __init__(self, datafile, logger, error_handler): # Dictionary containing dictionary of experiment ID to feature ID. # for checking that experiment is a feature experiment or not. - self.experiment_feature_map = {} + self.experiment_feature_map: dict[str, list[str]] = {} for feature in self.feature_key_map.values(): # As we cannot create json variables in datafile directly, here we convert # the variables of string type and json subType to json type # This is needed to fully support json variables - for variable in self.feature_key_map[feature.key].variables: + for variable in cast(List[types.VariableDict], self.feature_key_map[feature.key].variables): sub_type = variable.get('subType', '') if variable['type'] == entities.Variable.Type.STRING and sub_type == entities.Variable.Type.JSON: variable['type'] = entities.Variable.Type.JSON feature.variables = self._generate_key_map(feature.variables, 'key', entities.Variable) - rules = [] - variations = [] + rules: list[entities.Experiment] = [] + variations: list[entities.Variation] = [] for exp_id in feature.experimentIds: # Add this experiment in experiment-feature map. self.experiment_feature_map[exp_id] = [feature.id] @@ -150,35 +191,37 @@ def __init__(self, datafile, logger, error_handler): for rule in rules: # variation_id_map_by_experiment_id gives variation entity object while # experiment_id_map will give us dictionary - for rule_variation in self.variation_id_map_by_experiment_id.get(rule.id).values(): + for rule_variation in self.variation_id_map_by_experiment_id[rule.id].values(): if len(list(filter(lambda variation: variation.id == rule_variation.id, variations))) == 0: variations.append(rule_variation) self.flag_variations_map[feature.key] = variations @staticmethod - def _generate_key_map(entity_list, key, entity_class): + def _generate_key_map( + entity_list: Iterable[Any], key: str, entity_class: Type[EntityClass], first_value: bool = False + ) -> dict[str, EntityClass]: """ Helper method to generate map from key to entity object for given list of dicts. Args: entity_list: List consisting of dict. key: Key in each dict which will be key in the map. entity_class: Class representing the entity. + first_value: If True, only save the first value found for each key. Returns: Map mapping key to entity object. """ - # using ordered dict here to preserve insertion order of entities - # OrderedDict() is needed for Py versions 3.5 and less to work. - # Insertion order has been made default in dicts since Py 3.6 - key_map = OrderedDict() + key_map: dict[str, EntityClass] = {} for obj in entity_list: + if first_value and key_map.get(obj[key]): + continue key_map[obj[key]] = entity_class(**obj) return key_map @staticmethod - def _deserialize_audience(audience_map): + def _deserialize_audience(audience_map: dict[str, entities.Audience]) -> dict[str, entities.Audience]: """ Helper method to de-serialize and populate audience map with the condition list and structure. Args: @@ -194,7 +237,7 @@ def _deserialize_audience(audience_map): return audience_map - def get_rollout_experiments(self, rollout): + def get_rollout_experiments(self, rollout: entities.Layer) -> list[entities.Experiment]: """ Helper method to get rollout experiments. Args: @@ -209,7 +252,7 @@ def get_rollout_experiments(self, rollout): return rollout_experiments - def get_typecast_value(self, value, type): + def get_typecast_value(self, value: str, type: str) -> Any: """ Helper method to determine actual value based on type of feature variable. Args: @@ -231,7 +274,7 @@ def get_typecast_value(self, value, type): else: return value - def to_datafile(self): + def to_datafile(self) -> str: """ Get the datafile corresponding to ProjectConfig. Returns: @@ -240,7 +283,7 @@ def to_datafile(self): return self._datafile - def get_version(self): + def get_version(self) -> str: """ Get version of the datafile. Returns: @@ -249,7 +292,7 @@ def get_version(self): return self.version - def get_revision(self): + def get_revision(self) -> str: """ Get revision of the datafile. Returns: @@ -258,7 +301,7 @@ def get_revision(self): return self.revision - def get_sdk_key(self): + def get_sdk_key(self) -> Optional[str]: """ Get sdk key from the datafile. Returns: @@ -267,7 +310,7 @@ def get_sdk_key(self): return self.sdk_key - def get_environment_key(self): + def get_environment_key(self) -> Optional[str]: """ Get environment key from the datafile. Returns: @@ -276,7 +319,7 @@ def get_environment_key(self): return self.environment_key - def get_account_id(self): + def get_account_id(self) -> str: """ Get account ID from the config. Returns: @@ -285,7 +328,7 @@ def get_account_id(self): return self.account_id - def get_project_id(self): + def get_project_id(self) -> str: """ Get project ID from the config. Returns: @@ -294,7 +337,7 @@ def get_project_id(self): return self.project_id - def get_experiment_from_key(self, experiment_key): + def get_experiment_from_key(self, experiment_key: str) -> Optional[entities.Experiment]: """ Get experiment for the provided experiment key. Args: @@ -309,11 +352,11 @@ def get_experiment_from_key(self, experiment_key): if experiment: return experiment - self.logger.error('Experiment key "%s" is not in datafile.' % experiment_key) + self.logger.error(f'Experiment key "{experiment_key}" is not in datafile.') self.error_handler.handle_error(exceptions.InvalidExperimentException(enums.Errors.INVALID_EXPERIMENT_KEY)) return None - def get_experiment_from_id(self, experiment_id): + def get_experiment_from_id(self, experiment_id: str) -> Optional[entities.Experiment]: """ Get experiment for the provided experiment ID. Args: @@ -328,11 +371,11 @@ def get_experiment_from_id(self, experiment_id): if experiment: return experiment - self.logger.error('Experiment ID "%s" is not in datafile.' % experiment_id) + self.logger.error(f'Experiment ID "{experiment_id}" is not in datafile.') self.error_handler.handle_error(exceptions.InvalidExperimentException(enums.Errors.INVALID_EXPERIMENT_KEY)) return None - def get_group(self, group_id): + def get_group(self, group_id: Optional[str]) -> Optional[entities.Group]: """ Get group for the provided group ID. Args: @@ -342,16 +385,16 @@ def get_group(self, group_id): Group corresponding to the provided group ID. """ - group = self.group_id_map.get(group_id) + group = self.group_id_map.get(group_id) # type: ignore[arg-type] if group: return group - self.logger.error('Group ID "%s" is not in datafile.' % group_id) + self.logger.error(f'Group ID "{group_id}" is not in datafile.') self.error_handler.handle_error(exceptions.InvalidGroupException(enums.Errors.INVALID_GROUP_ID)) return None - def get_audience(self, audience_id): + def get_audience(self, audience_id: str) -> Optional[entities.Audience]: """ Get audience object for the provided audience ID. Args: @@ -365,10 +408,11 @@ def get_audience(self, audience_id): if audience: return audience - self.logger.error('Audience ID "%s" is not in datafile.' % audience_id) + self.logger.error(f'Audience ID "{audience_id}" is not in datafile.') self.error_handler.handle_error(exceptions.InvalidAudienceException((enums.Errors.INVALID_AUDIENCE))) + return None - def get_variation_from_key(self, experiment_key, variation_key): + def get_variation_from_key(self, experiment_key: str, variation_key: str) -> Optional[entities.Variation]: """ Get variation given experiment and variation key. Args: @@ -387,15 +431,15 @@ def get_variation_from_key(self, experiment_key, variation_key): if variation: return variation else: - self.logger.error('Variation key "%s" is not in datafile.' % variation_key) + self.logger.error(f'Variation key "{variation_key}" is not in datafile.') self.error_handler.handle_error(exceptions.InvalidVariationException(enums.Errors.INVALID_VARIATION)) return None - self.logger.error('Experiment key "%s" is not in datafile.' % experiment_key) + self.logger.error(f'Experiment key "{experiment_key}" is not in datafile.') self.error_handler.handle_error(exceptions.InvalidExperimentException(enums.Errors.INVALID_EXPERIMENT_KEY)) return None - def get_variation_from_id(self, experiment_key, variation_id): + def get_variation_from_id(self, experiment_key: str, variation_id: str) -> Optional[entities.Variation]: """ Get variation given experiment and variation ID. Args: @@ -413,15 +457,15 @@ def get_variation_from_id(self, experiment_key, variation_id): if variation: return variation else: - self.logger.error('Variation ID "%s" is not in datafile.' % variation_id) + self.logger.error(f'Variation ID "{variation_id}" is not in datafile.') self.error_handler.handle_error(exceptions.InvalidVariationException(enums.Errors.INVALID_VARIATION)) return None - self.logger.error('Experiment key "%s" is not in datafile.' % experiment_key) + self.logger.error(f'Experiment key "{experiment_key}" is not in datafile.') self.error_handler.handle_error(exceptions.InvalidExperimentException(enums.Errors.INVALID_EXPERIMENT_KEY)) return None - def get_event(self, event_key): + def get_event(self, event_key: str) -> Optional[entities.Event]: """ Get event for the provided event key. Args: @@ -436,11 +480,11 @@ def get_event(self, event_key): if event: return event - self.logger.error('Event "%s" is not in datafile.' % event_key) + self.logger.error(f'Event "{event_key}" is not in datafile.') self.error_handler.handle_error(exceptions.InvalidEventException(enums.Errors.INVALID_EVENT_KEY)) return None - def get_attribute_id(self, attribute_key): + def get_attribute_id(self, attribute_key: str) -> Optional[str]: """ Get attribute ID for the provided attribute key. Args: @@ -457,8 +501,8 @@ def get_attribute_id(self, attribute_key): if has_reserved_prefix: self.logger.warning( ( - 'Attribute %s unexpectedly has reserved prefix %s; using attribute ID ' - 'instead of reserved attribute name.' % (attribute_key, RESERVED_ATTRIBUTE_PREFIX) + f'Attribute {attribute_key} unexpectedly has reserved prefix {RESERVED_ATTRIBUTE_PREFIX};' + f' using attribute ID instead of reserved attribute name.' ) ) @@ -467,11 +511,39 @@ def get_attribute_id(self, attribute_key): if has_reserved_prefix: return attribute_key - self.logger.error('Attribute "%s" is not in datafile.' % attribute_key) + self.logger.error(f'Attribute "{attribute_key}" is not in datafile.') self.error_handler.handle_error(exceptions.InvalidAttributeException(enums.Errors.INVALID_ATTRIBUTE)) return None - def get_feature_from_key(self, feature_key): + def get_attribute_by_key(self, key: str) -> Optional[entities.Attribute]: + """ Get attribute for the provided attribute key. + + Args: + key: Attribute key for which attribute is to be fetched. + + Returns: + Attribute corresponding to the provided attribute key. + """ + if key in self.attribute_key_map: + return self.attribute_key_map[key] + self.logger.error(f'Attribute with key:"{key}" is not in datafile.') + return None + + def get_attribute_key_by_id(self, id: str) -> Optional[str]: + """ Get attribute key for the provided attribute id. + + Args: + id: Attribute id for which attribute is to be fetched. + + Returns: + Attribute key corresponding to the provided attribute id. + """ + if id in self.attribute_id_to_key_map: + return self.attribute_id_to_key_map[id] + self.logger.error(f'Attribute with id:"{id}" is not in datafile.') + return None + + def get_feature_from_key(self, feature_key: str) -> Optional[entities.FeatureFlag]: """ Get feature for the provided feature key. Args: @@ -486,10 +558,10 @@ def get_feature_from_key(self, feature_key): if feature: return feature - self.logger.error('Feature "%s" is not in datafile.' % feature_key) + self.logger.error(f'Feature "{feature_key}" is not in datafile.') return None - def get_rollout_from_id(self, rollout_id): + def get_rollout_from_id(self, rollout_id: str) -> Optional[entities.Layer]: """ Get rollout for the provided ID. Args: @@ -504,10 +576,12 @@ def get_rollout_from_id(self, rollout_id): if layer: return layer - self.logger.error('Rollout with ID "%s" is not in datafile.' % rollout_id) + self.logger.error(f'Rollout with ID "{rollout_id}" is not in datafile.') return None - def get_variable_value_for_variation(self, variable, variation): + def get_variable_value_for_variation( + self, variable: Optional[entities.Variable], variation: Optional[entities.Variation] + ) -> Optional[str]: """ Get the variable value for the given variation. Args: @@ -521,7 +595,7 @@ def get_variable_value_for_variation(self, variable, variation): if not variable or not variation: return None if variation.id not in self.variation_variable_usage_map: - self.logger.error('Variation with ID "%s" is not in the datafile.' % variation.id) + self.logger.error(f'Variation with ID "{variation.id}" is not in the datafile.') return None # Get all variable usages for the given variation @@ -540,7 +614,7 @@ def get_variable_value_for_variation(self, variable, variation): return variable_value - def get_variable_for_feature(self, feature_key, variable_key): + def get_variable_for_feature(self, feature_key: str, variable_key: str) -> Optional[entities.Variable]: """ Get the variable with the given variable key for the given feature. Args: @@ -553,16 +627,16 @@ def get_variable_for_feature(self, feature_key, variable_key): feature = self.feature_key_map.get(feature_key) if not feature: - self.logger.error('Feature with key "%s" not found in the datafile.' % feature_key) + self.logger.error(f'Feature with key "{feature_key}" not found in the datafile.') return None if variable_key not in feature.variables: - self.logger.error('Variable with key "%s" not found in the datafile.' % variable_key) + self.logger.error(f'Variable with key "{variable_key}" not found in the datafile.') return None return feature.variables.get(variable_key) - def get_anonymize_ip_value(self): + def get_anonymize_ip_value(self) -> bool: """ Gets the anonymize IP value. Returns: @@ -571,7 +645,7 @@ def get_anonymize_ip_value(self): return self.anonymize_ip - def get_send_flag_decisions_value(self): + def get_send_flag_decisions_value(self) -> bool: """ Gets the Send Flag Decisions value. Returns: @@ -580,7 +654,7 @@ def get_send_flag_decisions_value(self): return self.send_flag_decisions - def get_bot_filtering_value(self): + def get_bot_filtering_value(self) -> Optional[bool]: """ Gets the bot filtering value. Returns: @@ -589,7 +663,7 @@ def get_bot_filtering_value(self): return self.bot_filtering - def is_feature_experiment(self, experiment_id): + def is_feature_experiment(self, experiment_id: str) -> bool: """ Determines if given experiment is a feature test. Args: @@ -601,39 +675,47 @@ def is_feature_experiment(self, experiment_id): return experiment_id in self.experiment_feature_map - def get_variation_from_id_by_experiment_id(self, experiment_id, variation_id): + def get_variation_from_id_by_experiment_id( + self, experiment_id: str, variation_id: str + ) -> Optional[entities.Variation]: """ Gets variation from variation id and specific experiment id Returns: The variation for the experiment id and variation id - or empty dict if not found + or None if not found """ if (experiment_id in self.variation_id_map_by_experiment_id and variation_id in self.variation_id_map_by_experiment_id[experiment_id]): return self.variation_id_map_by_experiment_id[experiment_id][variation_id] - self.logger.error('Variation with id "%s" not defined in the datafile for experiment "%s".' % - (variation_id, experiment_id)) + self.logger.error( + f'Variation with id "{variation_id}" not defined in the datafile for experiment "{experiment_id}".' + ) - return {} + return None - def get_variation_from_key_by_experiment_id(self, experiment_id, variation_key): + def get_variation_from_key_by_experiment_id( + self, experiment_id: str, variation_key: str + ) -> Optional[entities.Variation]: """ Gets variation from variation key and specific experiment id Returns: The variation for the experiment id and variation key - or empty dict if not found + or None if not found """ if (experiment_id in self.variation_key_map_by_experiment_id and variation_key in self.variation_key_map_by_experiment_id[experiment_id]): return self.variation_key_map_by_experiment_id[experiment_id][variation_key] - self.logger.error('Variation with key "%s" not defined in the datafile for experiment "%s".' % - (variation_key, experiment_id)) + self.logger.error( + f'Variation with key "{variation_key}" not defined in the datafile for experiment "{experiment_id}".' + ) - return {} + return None - def get_flag_variation(self, flag_key, variation_attribute, target_value): + def get_flag_variation( + self, flag_key: str, variation_attribute: str, target_value: str + ) -> Optional[entities.Variation]: """ Gets variation by specified variation attribute. For example if variation_attribute is id, the function gets variation by using variation_id. diff --git a/optimizely/py.typed b/optimizely/py.typed new file mode 100644 index 00000000..8b137891 --- /dev/null +++ b/optimizely/py.typed @@ -0,0 +1 @@ + diff --git a/optimizely/user_profile.py b/optimizely/user_profile.py index 177bfc7c..f5ded013 100644 --- a/optimizely/user_profile.py +++ b/optimizely/user_profile.py @@ -1,4 +1,4 @@ -# Copyright 2017, Optimizely +# Copyright 2017, 2022, Optimizely # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -11,8 +11,23 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations +from typing import Any, Optional +from sys import version_info +from . import logger as _logging -class UserProfile(object): +if version_info < (3, 8): + from typing_extensions import Final +else: + from typing import Final, TYPE_CHECKING # type: ignore + + if TYPE_CHECKING: + # prevent circular dependenacy by skipping import at runtime + from .entities import Experiment, Variation + from optimizely.error_handler import BaseErrorHandler + + +class UserProfile: """ Class encapsulating information representing a user's profile. user_id: User's identifier. @@ -20,18 +35,23 @@ class UserProfile(object): variation ID identifying the variation for the user. """ - USER_ID_KEY = 'user_id' - EXPERIMENT_BUCKET_MAP_KEY = 'experiment_bucket_map' - VARIATION_ID_KEY = 'variation_id' + USER_ID_KEY: Final = 'user_id' + EXPERIMENT_BUCKET_MAP_KEY: Final = 'experiment_bucket_map' + VARIATION_ID_KEY: Final = 'variation_id' - def __init__(self, user_id, experiment_bucket_map=None, **kwargs): + def __init__( + self, + user_id: str, + experiment_bucket_map: Optional[dict[str, dict[str, Optional[str]]]] = None, + **kwargs: Any + ): self.user_id = user_id self.experiment_bucket_map = experiment_bucket_map or {} - def __eq__(self, other): + def __eq__(self, other: object) -> bool: return self.__dict__ == other.__dict__ - def get_variation_for_experiment(self, experiment_id): + def get_variation_for_experiment(self, experiment_id: str) -> Optional[str]: """ Helper method to retrieve variation ID for given experiment. Args: @@ -40,25 +60,23 @@ def get_variation_for_experiment(self, experiment_id): Returns: Variation ID corresponding to the experiment. None if no decision available. """ - return self.experiment_bucket_map.get(experiment_id, {self.VARIATION_ID_KEY: None}).get(self.VARIATION_ID_KEY) - def save_variation_for_experiment(self, experiment_id, variation_id): + def save_variation_for_experiment(self, experiment_id: str, variation_id: str) -> None: """ Helper method to save new experiment/variation as part of the user's profile. Args: experiment_id: ID for experiment for which the decision is to be stored. variation_id: ID for variation that the user saw. """ - self.experiment_bucket_map.update({experiment_id: {self.VARIATION_ID_KEY: variation_id}}) -class UserProfileService(object): +class UserProfileService: """ Class encapsulating user profile service functionality. Override with your own implementation for storing and retrieving the user profile. """ - def lookup(self, user_id): + def lookup(self, user_id: str) -> dict[str, Any]: """ Fetch the user profile dict corresponding to the user ID. Args: @@ -69,10 +87,71 @@ def lookup(self, user_id): """ return UserProfile(user_id).__dict__ - def save(self, user_profile): + def save(self, user_profile: dict[str, Any]) -> None: """ Save the user profile dict sent to this method. Args: user_profile: Dict representing the user's profile. """ pass + + +class UserProfileTracker: + def __init__(self, + user_id: str, + user_profile_service: Optional[UserProfileService], + logger: Optional[_logging.Logger] = None): + self.user_id = user_id + self.user_profile_service = user_profile_service + self.logger = _logging.adapt_logger(logger or _logging.NoOpLogger()) + self.profile_updated = False + self.user_profile = UserProfile(user_id, {}) + + def get_user_profile(self) -> UserProfile: + return self.user_profile + + def load_user_profile(self, reasons: Optional[list[str]] = [], + error_handler: Optional[BaseErrorHandler] = None) -> None: + if reasons is None: + reasons = [] + try: + user_profile = self.user_profile_service.lookup(self.user_id) if self.user_profile_service else None + if user_profile is None: + message = "Unable to get a user profile from the UserProfileService." + reasons.append(message) + else: + if 'user_id' in user_profile and 'experiment_bucket_map' in user_profile: + self.user_profile = UserProfile( + user_profile['user_id'], + user_profile['experiment_bucket_map'] + ) + self.logger.info("User profile loaded successfully.") + else: + missing_keys = [key for key in ['user_id', 'experiment_bucket_map'] if key not in user_profile] + message = f"User profile is missing keys: {', '.join(missing_keys)}" + reasons.append(message) + except Exception as exception: + message = str(exception) + reasons.append(message) + self.logger.exception(f'Unable to retrieve user profile for user "{self.user_id}" as lookup failed.') + if error_handler: + error_handler.handle_error(exception) + + def update_user_profile(self, experiment: Experiment, variation: Variation) -> None: + variation_id = variation.id + experiment_id = experiment.id + self.user_profile.save_variation_for_experiment(experiment_id, variation_id) + self.profile_updated = True + + def save_user_profile(self, error_handler: Optional[BaseErrorHandler] = None) -> None: + if not self.profile_updated: + return + try: + if self.user_profile_service: + self.user_profile_service.save(self.user_profile.__dict__) + self.logger.info(f'Saved user profile of user "{self.user_profile.user_id}".') + except Exception as exception: + self.logger.warning(f'Failed to save user profile of user "{self.user_profile.user_id}" ' + f'for exception:{exception}".') + if error_handler: + error_handler.handle_error(exception) diff --git a/optimizely/version.py b/optimizely/version.py index d6504ce4..4f0f20c6 100644 --- a/optimizely/version.py +++ b/optimizely/version.py @@ -1,4 +1,4 @@ -# Copyright 2016-2020, Optimizely +# Copyright 2016-2020, 2022-2023, Optimizely # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -11,5 +11,5 @@ # See the License for the specific language governing permissions and # limitations under the License. -version_info = (4, 0, 0) +version_info = (5, 2, 0) __version__ = '.'.join(str(v) for v in version_info) diff --git a/requirements/core.txt b/requirements/core.txt index f5362041..7cbfe29f 100644 --- a/requirements/core.txt +++ b/requirements/core.txt @@ -1,7 +1,4 @@ jsonschema>=3.2.0 pyrsistent>=0.16.0 requests>=2.21 -pyOpenSSL>=19.1.0 -cryptography>=2.8.0 idna>=2.10 -six>=1.12.0 diff --git a/requirements/docs.txt b/requirements/docs.txt index 51d4bf0e..91542e7a 100644 --- a/requirements/docs.txt +++ b/requirements/docs.txt @@ -1,3 +1,3 @@ -sphinx==2.4.4 -sphinx-rtd-theme==0.4.3 -m2r==0.2.1 +sphinx==4.4.0 +sphinx-rtd-theme==1.2.2 +m2r==0.3.1 diff --git a/requirements/test.txt b/requirements/test.txt index 069b65b7..c2e086c8 100644 --- a/requirements/test.txt +++ b/requirements/test.txt @@ -1,7 +1,6 @@ coverage flake8 >= 4.0.1 funcsigs >= 0.4 -mock >= 4.0.0 pytest >= 6.2.0 pytest-cov python-coveralls \ No newline at end of file diff --git a/requirements/typing.txt b/requirements/typing.txt new file mode 100644 index 00000000..ba65f536 --- /dev/null +++ b/requirements/typing.txt @@ -0,0 +1,4 @@ +mypy +types-jsonschema +types-requests +types-Flask \ No newline at end of file diff --git a/setup.py b/setup.py index e66ce1fe..1954aa48 100644 --- a/setup.py +++ b/setup.py @@ -24,16 +24,17 @@ CHANGELOG = _file.read() about_text = ( - 'Optimizely X Full Stack is A/B testing and feature management for product development teams. ' + 'Optimizely Feature Experimentation is A/B testing and feature management for product development teams. ' 'Experiment in any application. Make every feature on your roadmap an opportunity to learn. ' - 'Learn more at https://www.optimizely.com/products/full-stack/ or see our documentation at ' - 'https://docs.developers.optimizely.com/full-stack/docs. ' + 'Learn more at https://www.optimizely.com/products/experiment/feature-experimentation/ or see our documentation at ' + 'https://docs.developers.optimizely.com/experimentation/v4.0.0-full-stack/docs/welcome. ' ) setup( name='optimizely-sdk', version=__version__, - description='Python SDK for Optimizely X Full Stack.', + description='Python SDK for Optimizely Feature Experimentation, Optimizely Full Stack (legacy), ' + 'and Optimizely Rollouts.', long_description=about_text + README + CHANGELOG, long_description_content_type='text/markdown', author='Optimizely', @@ -46,12 +47,11 @@ 'License :: OSI Approved :: Apache Software License', 'Operating System :: OS Independent', 'Programming Language :: Python', - 'Programming Language :: Python :: 2.7', - 'Programming Language :: Python :: 3.4', - 'Programming Language :: Python :: 3.5', - 'Programming Language :: Python :: 3.6', - 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', + 'Programming Language :: Python :: 3.9', + 'Programming Language :: Python :: 3.10', + 'Programming Language :: Python :: 3.11', + 'Programming Language :: Python :: 3.12', ], packages=find_packages(exclude=['docs', 'tests']), extras_require={'test': TEST_REQUIREMENTS}, diff --git a/tests/base.py b/tests/base.py index 05127caf..875a26e6 100644 --- a/tests/base.py +++ b/tests/base.py @@ -1,4 +1,4 @@ -# Copyright 2016-2021, Optimizely +# Copyright 2016-2023 Optimizely # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -13,20 +13,24 @@ import json import unittest -from six import PY3 +from typing import Optional +from copy import deepcopy +from unittest import mock -from optimizely import optimizely +from requests import Response -if PY3: +from optimizely import optimizely - def long(a): - raise NotImplementedError('Tests should only call `long` if running in PY2') -# Check to verify if TestCase has the attribute assertRasesRegex or assertRaisesRegexp -# This check depends on the version of python with assertRaisesRegexp being used by -# python2.7. Later versions of python are using the non-deprecated assertRaisesRegex. -if not hasattr(unittest.TestCase, 'assertRaisesRegex'): - unittest.TestCase.assertRaisesRegex = getattr(unittest.TestCase, 'assertRaisesRegexp') +class CopyingMock(mock.MagicMock): + """ + Forces mock to make a copy of the args instead of keeping a reference. + Otherwise mutable args (lists, dicts) can change after they're captured. + """ + def __call__(self, *args, **kwargs): + args = deepcopy(args) + kwargs = deepcopy(kwargs) + return super().__call__(*args, **kwargs) class BaseTest(unittest.TestCase): @@ -36,9 +40,25 @@ def assertStrictTrue(self, to_assert): def assertStrictFalse(self, to_assert): self.assertIs(to_assert, False) + def fake_server_response(self, status_code: Optional[int] = None, + content: Optional[str] = None, + url: Optional[str] = None) -> Response: + """Mock the server response.""" + response = Response() + + if status_code: + response.status_code = status_code + if content: + response._content = content.encode('utf-8') + if url: + response.url = url + + return response + def setUp(self, config_dict='config_dict'): self.config_dict = { 'revision': '42', + 'sdkKey': 'basic-test', 'version': '2', 'events': [ {'key': 'test_event', 'experimentIds': ['111127'], 'id': '111095'}, @@ -131,6 +151,7 @@ def setUp(self, config_dict='config_dict'): # datafile version 4 self.config_dict_with_features = { 'revision': '1', + 'sdkKey': 'features-test', 'accountId': '12001', 'projectId': '111111', 'version': '4', @@ -533,6 +554,7 @@ def setUp(self, config_dict='config_dict'): self.config_dict_with_multiple_experiments = { 'revision': '42', + 'sdkKey': 'multiple-experiments', 'version': '2', 'events': [ {'key': 'test_event', 'experimentIds': ['111127', '111130'], 'id': '111095'}, @@ -638,6 +660,7 @@ def setUp(self, config_dict='config_dict'): self.config_dict_with_unsupported_version = { 'version': '5', + 'sdkKey': 'unsupported-version', 'rollouts': [], 'projectId': '10431130345', 'variables': [], @@ -1054,6 +1077,204 @@ def setUp(self, config_dict='config_dict'): {'key': 'user_signed_up', 'id': '594090', 'experimentIds': ['1323241598', '1323241599']}, ], 'revision': '3', + 'sdkKey': 'typed-audiences', + } + + self.config_dict_with_audience_segments = { + 'version': '4', + 'sendFlagDecisions': True, + 'rollouts': [ + { + 'experiments': [ + { + 'audienceIds': ['13389130056'], + 'forcedVariations': {}, + 'id': '3332020515', + 'key': 'rollout-rule-1', + 'layerId': '3319450668', + 'status': 'Running', + 'trafficAllocation': [ + { + 'endOfRange': 10000, + 'entityId': '3324490633' + } + ], + 'variations': [ + { + 'featureEnabled': True, + 'id': '3324490633', + 'key': 'rollout-variation-on', + 'variables': [] + } + ] + }, + { + 'audienceIds': [], + 'forcedVariations': {}, + 'id': '3332020556', + 'key': 'rollout-rule-2', + 'layerId': '3319450668', + 'status': 'Running', + 'trafficAllocation': [ + { + 'endOfRange': 10000, + 'entityId': '3324490644' + } + ], + 'variations': [ + { + 'featureEnabled': False, + 'id': '3324490644', + 'key': 'rollout-variation-off', + 'variables': [] + } + ] + } + ], + 'id': '3319450668' + } + ], + 'anonymizeIP': True, + 'botFiltering': True, + 'projectId': '10431130345', + 'variables': [], + 'featureFlags': [ + { + 'experimentIds': ['10390977673'], + 'id': '4482920077', + 'key': 'flag-segment', + 'rolloutId': '3319450668', + 'variables': [ + { + 'defaultValue': '42', + 'id': '2687470095', + 'key': 'i_42', + 'type': 'integer' + } + ] + } + ], + 'experiments': [ + { + 'status': 'Running', + 'key': 'experiment-segment', + 'layerId': '10420273888', + 'trafficAllocation': [ + { + 'entityId': '10389729780', + 'endOfRange': 10000 + } + ], + 'audienceIds': ['$opt_dummy_audience'], + 'audienceConditions': ['or', '13389142234', '13389141123'], + 'variations': [ + { + 'variables': [], + 'featureEnabled': True, + 'id': '10389729780', + 'key': 'variation-a' + }, + { + 'variables': [], + 'id': '10416523121', + 'key': 'variation-b' + } + ], + 'forcedVariations': {}, + 'id': '10390977673' + } + ], + 'groups': [], + 'integrations': [ + { + 'key': 'odp', + 'host': 'https://api.zaius.com', + 'publicKey': 'W4WzcEs-ABgXorzY7h1LCQ' + } + ], + 'typedAudiences': [ + { + 'id': '13389142234', + 'conditions': [ + 'and', + [ + 'or', + [ + 'or', + { + 'value': 'odp-segment-1', + 'type': 'third_party_dimension', + 'name': 'odp.audiences', + 'match': 'qualified' + } + ] + ] + ], + 'name': 'odp-segment-1' + }, + { + 'id': '13389130056', + 'conditions': [ + 'and', + [ + 'or', + [ + 'or', + { + 'value': 'odp-segment-2', + 'type': 'third_party_dimension', + 'name': 'odp.audiences', + 'match': 'qualified' + }, + { + 'value': 'us', + 'type': 'custom_attribute', + 'name': 'country', + 'match': 'exact' + } + ], + [ + 'or', + { + 'value': 'odp-segment-3', + 'type': 'third_party_dimension', + 'name': 'odp.audiences', + 'match': 'qualified' + } + ] + ] + ], + 'name': 'odp-segment-2' + } + ], + 'audiences': [ + { + 'id': '13389141123', + 'name': 'adult', + 'conditions': '["and", ["or", ["or", ' + '{"match": "gt", "name": "age", "type": "custom_attribute", "value": 20}]]]' + } + ], + 'attributes': [ + { + 'id': '10401066117', + 'key': 'gender' + }, + { + 'id': '10401066170', + 'key': 'testvar' + } + ], + 'accountId': '10367498574', + 'events': [ + { + "experimentIds": ["10420810910"], + "id": "10404198134", + "key": "event1" + } + ], + 'revision': '101', + 'sdkKey': 'segments-test' } config = getattr(self, config_dict) diff --git a/tests/helpers_tests/test_audience.py b/tests/helpers_tests/test_audience.py index 719705d6..bab80380 100644 --- a/tests/helpers_tests/test_audience.py +++ b/tests/helpers_tests/test_audience.py @@ -12,9 +12,10 @@ # limitations under the License. import json -import mock +from unittest import mock from optimizely import optimizely +from optimizely.entities import Audience from optimizely.helpers import audience from optimizely.helpers import enums from tests import base @@ -24,12 +25,11 @@ class AudienceTest(base.BaseTest): def setUp(self): base.BaseTest.setUp(self) self.mock_client_logger = mock.MagicMock() + self.user_context = self.optimizely.create_user_context('any-user') def test_does_user_meet_audience_conditions__no_audience(self): """ Test that does_user_meet_audience_conditions returns True when experiment is using no audience. """ - user_attributes = {} - # Both Audience Ids and Conditions are Empty experiment = self.project_config.get_experiment_from_key('test_experiment') experiment.audienceIds = [] @@ -39,7 +39,7 @@ def test_does_user_meet_audience_conditions__no_audience(self): experiment.get_audience_conditions_or_ids(), enums.ExperimentAudienceEvaluationLogs, 'test_experiment', - user_attributes, + self.user_context, self.mock_client_logger ) self.assertStrictTrue( @@ -55,7 +55,7 @@ def test_does_user_meet_audience_conditions__no_audience(self): experiment.get_audience_conditions_or_ids(), enums.ExperimentAudienceEvaluationLogs, 'test_experiment', - user_attributes, + self.user_context, self.mock_client_logger ) self.assertStrictTrue( @@ -71,7 +71,7 @@ def test_does_user_meet_audience_conditions__no_audience(self): experiment.get_audience_conditions_or_ids(), enums.ExperimentAudienceEvaluationLogs, 'test_experiment', - user_attributes, + self.user_context, self.mock_client_logger ) self.assertStrictTrue( @@ -84,7 +84,7 @@ def test_does_user_meet_audience_conditions__with_audience(self): Test that does_user_meet_audience_conditions uses audienceIds when audienceConditions is None. """ - user_attributes = {'test_attribute': 'test_value_1'} + self.user_context._user_attributes = {'test_attribute': 'test_value_1'} experiment = self.project_config.get_experiment_from_key('test_experiment') experiment.audienceIds = ['11154'] @@ -101,7 +101,7 @@ def test_does_user_meet_audience_conditions__with_audience(self): experiment.get_audience_conditions_or_ids(), enums.ExperimentAudienceEvaluationLogs, 'test_experiment', - user_attributes, + self.user_context, self.mock_client_logger ) @@ -116,7 +116,7 @@ def test_does_user_meet_audience_conditions__with_audience(self): experiment.get_audience_conditions_or_ids(), enums.ExperimentAudienceEvaluationLogs, 'test_experiment', - user_attributes, + self.user_context, self.mock_client_logger ) @@ -124,41 +124,23 @@ def test_does_user_meet_audience_conditions__with_audience(self): def test_does_user_meet_audience_conditions__no_attributes(self): """ Test that does_user_meet_audience_conditions evaluates audience when attributes are empty. - Test that does_user_meet_audience_conditions defaults attributes to empty dict when attributes is None. """ experiment = self.project_config.get_experiment_from_key('test_experiment') - # attributes set to empty dict - with mock.patch('optimizely.helpers.condition.CustomAttributeConditionEvaluator') as custom_attr_eval: - audience.does_user_meet_audience_conditions( - self.project_config, - experiment.get_audience_conditions_or_ids(), - enums.ExperimentAudienceEvaluationLogs, - 'test_experiment', - {}, - self.mock_client_logger - ) - - self.assertEqual({}, custom_attr_eval.call_args[0][1]) - - # attributes set to None - with mock.patch('optimizely.helpers.condition.CustomAttributeConditionEvaluator') as custom_attr_eval: - audience.does_user_meet_audience_conditions( - self.project_config, - experiment.get_audience_conditions_or_ids(), - enums.ExperimentAudienceEvaluationLogs, - 'test_experiment', - None, - self.mock_client_logger - ) - - self.assertEqual({}, custom_attr_eval.call_args[0][1]) + audience.does_user_meet_audience_conditions( + self.project_config, + experiment.get_audience_conditions_or_ids(), + enums.ExperimentAudienceEvaluationLogs, + 'test_experiment', + self.user_context, + self.mock_client_logger + ) def test_does_user_meet_audience_conditions__returns_true__when_condition_tree_evaluator_returns_true(self): """ Test that does_user_meet_audience_conditions returns True when call to condition_tree_evaluator returns True. """ - user_attributes = {'test_attribute': 'test_value_1'} + self.user_context._user_attributes = {'test_attribute': 'test_value_1'} experiment = self.project_config.get_experiment_from_key('test_experiment') with mock.patch('optimizely.helpers.condition_tree_evaluator.evaluate', return_value=True): user_meets_audience_conditions, _ = audience.does_user_meet_audience_conditions( @@ -166,7 +148,7 @@ def test_does_user_meet_audience_conditions__returns_true__when_condition_tree_e experiment.get_audience_conditions_or_ids(), enums.ExperimentAudienceEvaluationLogs, 'test_experiment', - user_attributes, + self.user_context, self.mock_client_logger ) self.assertStrictTrue( @@ -177,7 +159,7 @@ def test_does_user_meet_audience_conditions_returns_false_when_condition_tree_ev """ Test that does_user_meet_audience_conditions returns False when call to condition_tree_evaluator returns None or False. """ - user_attributes = {'test_attribute': 'test_value_1'} + self.user_context._user_attributes = {'test_attribute': 'test_value_1'} experiment = self.project_config.get_experiment_from_key('test_experiment') with mock.patch('optimizely.helpers.condition_tree_evaluator.evaluate', return_value=None): user_meets_audience_conditions, _ = audience.does_user_meet_audience_conditions( @@ -185,7 +167,7 @@ def test_does_user_meet_audience_conditions_returns_false_when_condition_tree_ev experiment.get_audience_conditions_or_ids(), enums.ExperimentAudienceEvaluationLogs, 'test_experiment', - user_attributes, + self.user_context, self.mock_client_logger ) self.assertStrictFalse( @@ -198,7 +180,7 @@ def test_does_user_meet_audience_conditions_returns_false_when_condition_tree_ev experiment.get_audience_conditions_or_ids(), enums.ExperimentAudienceEvaluationLogs, 'test_experiment', - user_attributes, + self.user_context, self.mock_client_logger ) self.assertStrictFalse( @@ -219,7 +201,7 @@ def test_does_user_meet_audience_conditions__evaluates_audience_ids(self): experiment.get_audience_conditions_or_ids(), enums.ExperimentAudienceEvaluationLogs, 'test_experiment', - {}, + self.user_context, self.mock_client_logger ) @@ -227,8 +209,8 @@ def test_does_user_meet_audience_conditions__evaluates_audience_ids(self): audience_11159 = self.project_config.get_audience('11159') custom_attr_eval.assert_has_calls( [ - mock.call(audience_11154.conditionList, {}, self.mock_client_logger), - mock.call(audience_11159.conditionList, {}, self.mock_client_logger), + mock.call(audience_11154.conditionList, self.user_context, self.mock_client_logger), + mock.call(audience_11159.conditionList, self.user_context, self.mock_client_logger), mock.call().evaluate(0), mock.call().evaluate(0), ], @@ -255,7 +237,7 @@ def test_does_user_meet_audience_conditions__evaluates_audience_conditions(self) experiment.get_audience_conditions_or_ids(), enums.ExperimentAudienceEvaluationLogs, 'audience_combinations_experiment', - {}, + self.user_context, self.mock_client_logger ) @@ -266,10 +248,10 @@ def test_does_user_meet_audience_conditions__evaluates_audience_conditions(self) custom_attr_eval.assert_has_calls( [ - mock.call(audience_3468206642.conditionList, {}, self.mock_client_logger), - mock.call(audience_3988293898.conditionList, {}, self.mock_client_logger), - mock.call(audience_3988293899.conditionList, {}, self.mock_client_logger), - mock.call(audience_3468206646.conditionList, {}, self.mock_client_logger), + mock.call(audience_3468206642.conditionList, self.user_context, self.mock_client_logger), + mock.call(audience_3988293898.conditionList, self.user_context, self.mock_client_logger), + mock.call(audience_3988293899.conditionList, self.user_context, self.mock_client_logger), + mock.call(audience_3468206646.conditionList, self.user_context, self.mock_client_logger), mock.call().evaluate(0), mock.call().evaluate(0), mock.call().evaluate(0), @@ -292,7 +274,7 @@ def test_does_user_meet_audience_conditions__evaluates_audience_conditions_leaf_ experiment.get_audience_conditions_or_ids(), enums.ExperimentAudienceEvaluationLogs, 'audience_combinations_experiment', - {}, + self.user_context, self.mock_client_logger ) @@ -300,18 +282,41 @@ def test_does_user_meet_audience_conditions__evaluates_audience_conditions_leaf_ custom_attr_eval.assert_has_calls( [ - mock.call(audience_3468206645.conditionList, {}, self.mock_client_logger), + mock.call(audience_3468206645.conditionList, self.user_context, self.mock_client_logger), mock.call().evaluate(0), mock.call().evaluate(1), ], any_order=True, ) + def test_get_segments(self): + seg1 = ['odp.audiences', 'seg1', 'third_party_dimension', 'qualified'] + seg2 = ['odp.audiences', 'seg2', 'third_party_dimension', 'qualified'] + seg3 = ['odp.audiences', 'seg3', 'third_party_dimension', 'qualified'] + other = ['other', 'a', 'custom_attribute', 'eq'] + + def make_audience(conditions): + return Audience('12345', 'group-a', '', conditionList=conditions) + + audience = make_audience([seg1]) + self.assertEqual(['seg1'], audience.get_segments()) + + audience = make_audience([seg1, seg2, other]) + self.assertEqual(['seg1', 'seg2'], sorted(audience.get_segments())) + + audience = make_audience([seg1, other, seg2]) + self.assertEqual(['seg1', 'seg2'], sorted(audience.get_segments())) + + audience = make_audience([seg1, other, seg2, seg1, seg2, seg3]) + self.assertEqual(3, len(audience.get_segments())) + self.assertEqual(['seg1', 'seg2', 'seg3'], sorted(audience.get_segments())) + class ExperimentAudienceLoggingTest(base.BaseTest): def setUp(self): base.BaseTest.setUp(self) self.mock_client_logger = mock.MagicMock() + self.user_context = self.optimizely.create_user_context('any-user') def test_does_user_meet_audience_conditions__with_no_audience(self): experiment = self.project_config.get_experiment_from_key('test_experiment') @@ -335,7 +340,7 @@ def test_does_user_meet_audience_conditions__with_no_audience(self): ) def test_does_user_meet_audience_conditions__evaluates_audience_ids(self): - user_attributes = {'test_attribute': 'test_value_1'} + self.user_context._user_attributes = {'test_attribute': 'test_value_1'} experiment = self.project_config.get_experiment_from_key('test_experiment') experiment.audienceIds = ['11154', '11159'] experiment.audienceConditions = None @@ -350,7 +355,7 @@ def test_does_user_meet_audience_conditions__evaluates_audience_ids(self): experiment.get_audience_conditions_or_ids(), enums.ExperimentAudienceEvaluationLogs, 'test_experiment', - user_attributes, + self.user_context, self.mock_client_logger ) @@ -361,11 +366,11 @@ def test_does_user_meet_audience_conditions__evaluates_audience_ids(self): [ mock.call.debug('Evaluating audiences for experiment "test_experiment": ["11154", "11159"].'), mock.call.debug( - 'Starting to evaluate audience "11154" with conditions: ' + audience_11154.conditions + '.' + f'Starting to evaluate audience "11154" with conditions: {audience_11154.conditions}.' ), mock.call.debug('Audience "11154" evaluated to UNKNOWN.'), mock.call.debug( - 'Starting to evaluate audience "11159" with conditions: ' + audience_11159.conditions + '.' + f'Starting to evaluate audience "11159" with conditions: {audience_11159.conditions}.' ), mock.call.debug('Audience "11159" evaluated to UNKNOWN.'), mock.call.info('Audiences for experiment "test_experiment" collectively evaluated to FALSE.'), @@ -393,7 +398,7 @@ def test_does_user_meet_audience_conditions__evaluates_audience_conditions(self) experiment.get_audience_conditions_or_ids(), enums.ExperimentAudienceEvaluationLogs, 'audience_combinations_experiment', - {}, + self.user_context, self.mock_client_logger ) @@ -409,17 +414,17 @@ def test_does_user_meet_audience_conditions__evaluates_audience_conditions(self) ), mock.call.debug( 'Starting to evaluate audience "3468206642" with ' - 'conditions: ' + audience_3468206642.conditions + '.' + f'conditions: {audience_3468206642.conditions}.' ), mock.call.debug('Audience "3468206642" evaluated to FALSE.'), mock.call.debug( 'Starting to evaluate audience "3988293898" with ' - 'conditions: ' + audience_3988293898.conditions + '.' + f'conditions: {audience_3988293898.conditions}.' ), mock.call.debug('Audience "3988293898" evaluated to UNKNOWN.'), mock.call.debug( 'Starting to evaluate audience "3988293899" with ' - 'conditions: ' + audience_3988293899.conditions + '.' + f'conditions: {audience_3988293899.conditions}.' ), mock.call.debug('Audience "3988293899" evaluated to TRUE.'), mock.call.info( @@ -433,6 +438,7 @@ class RolloutRuleAudienceLoggingTest(base.BaseTest): def setUp(self): base.BaseTest.setUp(self) self.mock_client_logger = mock.MagicMock() + self.user_context = self.optimizely.create_user_context('any-user') def test_does_user_meet_audience_conditions__with_no_audience(self): # Using experiment as rule for testing log messages @@ -458,7 +464,7 @@ def test_does_user_meet_audience_conditions__with_no_audience(self): def test_does_user_meet_audience_conditions__evaluates_audience_ids(self): # Using experiment as rule for testing log messages - user_attributes = {'test_attribute': 'test_value_1'} + self.user_context._user_attributes = {'test_attribute': 'test_value_1'} experiment = self.project_config.get_experiment_from_key('test_experiment') experiment.audienceIds = ['11154', '11159'] experiment.audienceConditions = None @@ -473,7 +479,7 @@ def test_does_user_meet_audience_conditions__evaluates_audience_ids(self): experiment.get_audience_conditions_or_ids(), enums.RolloutRuleAudienceEvaluationLogs, 'test_rule', - user_attributes, + self.user_context, self.mock_client_logger ) @@ -484,11 +490,11 @@ def test_does_user_meet_audience_conditions__evaluates_audience_ids(self): [ mock.call.debug('Evaluating audiences for rule test_rule: ["11154", "11159"].'), mock.call.debug( - 'Starting to evaluate audience "11154" with conditions: ' + audience_11154.conditions + '.' + f'Starting to evaluate audience "11154" with conditions: {audience_11154.conditions}.' ), mock.call.debug('Audience "11154" evaluated to UNKNOWN.'), mock.call.debug( - 'Starting to evaluate audience "11159" with conditions: ' + audience_11159.conditions + '.' + f'Starting to evaluate audience "11159" with conditions: {audience_11159.conditions}.' ), mock.call.debug('Audience "11159" evaluated to UNKNOWN.'), mock.call.info('Audiences for rule test_rule collectively evaluated to FALSE.'), @@ -517,7 +523,7 @@ def test_does_user_meet_audience_conditions__evaluates_audience_conditions(self) experiment.get_audience_conditions_or_ids(), enums.RolloutRuleAudienceEvaluationLogs, 'test_rule', - {}, + self.user_context, self.mock_client_logger ) @@ -533,17 +539,17 @@ def test_does_user_meet_audience_conditions__evaluates_audience_conditions(self) ), mock.call.debug( 'Starting to evaluate audience "3468206642" with ' - 'conditions: ' + audience_3468206642.conditions + '.' + f'conditions: {audience_3468206642.conditions}.' ), mock.call.debug('Audience "3468206642" evaluated to FALSE.'), mock.call.debug( 'Starting to evaluate audience "3988293898" with ' - 'conditions: ' + audience_3988293898.conditions + '.' + f'conditions: {audience_3988293898.conditions}.' ), mock.call.debug('Audience "3988293898" evaluated to UNKNOWN.'), mock.call.debug( 'Starting to evaluate audience "3988293899" with ' - 'conditions: ' + audience_3988293899.conditions + '.' + f'conditions: {audience_3988293899.conditions}.' ), mock.call.debug('Audience "3988293899" evaluated to TRUE.'), mock.call.info( diff --git a/tests/helpers_tests/test_condition.py b/tests/helpers_tests/test_condition.py index 78dfe38c..9d7ae52f 100644 --- a/tests/helpers_tests/test_condition.py +++ b/tests/helpers_tests/test_condition.py @@ -12,7 +12,7 @@ # limitations under the License. import json -import mock +from unittest import mock from optimizely.helpers import condition as condition_helper @@ -37,6 +37,7 @@ lt_float_condition_list = [['meters_travelled', 48.2, 'custom_attribute', 'lt']] le_int_condition_list = [['meters_travelled', 48, 'custom_attribute', 'le']] le_float_condition_list = [['meters_travelled', 48.2, 'custom_attribute', 'le']] +qualified_condition_list = [['odp.audiences', 'odp-segment-2', 'third_party_dimension', 'qualified']] class CustomAttributeConditionEvaluatorTest(base.BaseTest): @@ -49,23 +50,26 @@ def setUp(self): doubleCondition, ] self.mock_client_logger = mock.MagicMock() + self.user_context = self.optimizely.create_user_context('any-user') def test_evaluate__returns_true__when_attributes_pass_audience_condition(self): + self.user_context._user_attributes = {'browser_type': 'safari'} evaluator = condition_helper.CustomAttributeConditionEvaluator( - self.condition_list, {'browser_type': 'safari'}, self.mock_client_logger + self.condition_list, self.user_context, self.mock_client_logger ) self.assertStrictTrue(evaluator.evaluate(0)) def test_evaluate__returns_false__when_attributes_fail_audience_condition(self): + self.user_context._user_attributes = {'browser_type': 'chrome'} evaluator = condition_helper.CustomAttributeConditionEvaluator( - self.condition_list, {'browser_type': 'chrome'}, self.mock_client_logger + self.condition_list, self.user_context, self.mock_client_logger ) self.assertStrictFalse(evaluator.evaluate(0)) def test_evaluate__evaluates__different_typed_attributes(self): - userAttributes = { + self.user_context._user_attributes = { 'browser_type': 'safari', 'is_firefox': True, 'num_users': 10, @@ -73,7 +77,7 @@ def test_evaluate__evaluates__different_typed_attributes(self): } evaluator = condition_helper.CustomAttributeConditionEvaluator( - self.condition_list, userAttributes, self.mock_client_logger + self.condition_list, self.user_context, self.mock_client_logger ) self.assertStrictTrue(evaluator.evaluate(0)) @@ -84,9 +88,9 @@ def test_evaluate__evaluates__different_typed_attributes(self): def test_evaluate__returns_null__when_condition_has_an_invalid_match_property(self): condition_list = [['weird_condition', 'hi', 'custom_attribute', 'weird_match']] - + self.user_context._user_attributes = {'weird_condition': 'hi'} evaluator = condition_helper.CustomAttributeConditionEvaluator( - condition_list, {'weird_condition': 'hi'}, self.mock_client_logger + condition_list, self.user_context, self.mock_client_logger ) self.assertIsNone(evaluator.evaluate(0)) @@ -94,9 +98,9 @@ def test_evaluate__returns_null__when_condition_has_an_invalid_match_property(se def test_evaluate__assumes_exact__when_condition_match_property_is_none(self): condition_list = [['favorite_constellation', 'Lacerta', 'custom_attribute', None]] - + self.user_context._user_attributes = {'favorite_constellation': 'Lacerta'} evaluator = condition_helper.CustomAttributeConditionEvaluator( - condition_list, {'favorite_constellation': 'Lacerta'}, self.mock_client_logger, + condition_list, self.user_context, self.mock_client_logger, ) self.assertStrictTrue(evaluator.evaluate(0)) @@ -104,9 +108,9 @@ def test_evaluate__assumes_exact__when_condition_match_property_is_none(self): def test_evaluate__returns_null__when_condition_has_an_invalid_type_property(self): condition_list = [['weird_condition', 'hi', 'weird_type', 'exact']] - + self.user_context._user_attributes = {'weird_condition': 'hi'} evaluator = condition_helper.CustomAttributeConditionEvaluator( - condition_list, {'weird_condition': 'hi'}, self.mock_client_logger + condition_list, self.user_context, self.mock_client_logger ) self.assertIsNone(evaluator.evaluate(0)) @@ -115,120 +119,132 @@ def test_semver_eq__returns_true(self): semver_equal_2_0_condition_list = [['Android', "2.0", 'custom_attribute', 'semver_eq']] user_versions = ['2.0.0', '2.0'] for user_version in user_versions: + self.user_context._user_attributes = {'Android': user_version} evaluator = condition_helper.CustomAttributeConditionEvaluator( - semver_equal_2_0_condition_list, {'Android': user_version}, self.mock_client_logger) + semver_equal_2_0_condition_list, self.user_context, self.mock_client_logger) result = evaluator.evaluate(0) - custom_err_msg = "Got {} in result. Failed for user version: {}".format(result, user_version) + custom_err_msg = f"Got {result} in result. Failed for user version: {user_version}" self.assertTrue(result, custom_err_msg) def test_semver_eq__returns_false(self): semver_equal_2_0_condition_list = [['Android', "2.0", 'custom_attribute', 'semver_eq']] user_versions = ['2.9', '1.9'] for user_version in user_versions: + self.user_context._user_attributes = {'Android': user_version} evaluator = condition_helper.CustomAttributeConditionEvaluator( - semver_equal_2_0_condition_list, {'Android': user_version}, self.mock_client_logger) + semver_equal_2_0_condition_list, self.user_context, self.mock_client_logger) result = evaluator.evaluate(0) - custom_err_msg = "Got {} in result. Failed for user version: {}".format(result, user_version) + custom_err_msg = f"Got {result} in result. Failed for user version: {user_version}" self.assertFalse(result, custom_err_msg) def test_semver_le__returns_true(self): semver_less_than_or_equal_2_0_condition_list = [['Android', "2.0", 'custom_attribute', 'semver_le']] user_versions = ['2.0.0', '1.9'] for user_version in user_versions: + self.user_context._user_attributes = {'Android': user_version} evaluator = condition_helper.CustomAttributeConditionEvaluator( - semver_less_than_or_equal_2_0_condition_list, {'Android': user_version}, self.mock_client_logger) + semver_less_than_or_equal_2_0_condition_list, self.user_context, self.mock_client_logger) result = evaluator.evaluate(0) - custom_err_msg = "Got {} in result. Failed for user version: {}".format(result, user_version) + custom_err_msg = f"Got {result} in result. Failed for user version: {user_version}" self.assertTrue(result, custom_err_msg) def test_semver_le__returns_false(self): semver_less_than_or_equal_2_0_condition_list = [['Android', "2.0", 'custom_attribute', 'semver_le']] user_versions = ['2.5.1'] for user_version in user_versions: + self.user_context._user_attributes = {'Android': user_version} evaluator = condition_helper.CustomAttributeConditionEvaluator( - semver_less_than_or_equal_2_0_condition_list, {'Android': user_version}, self.mock_client_logger) + semver_less_than_or_equal_2_0_condition_list, self.user_context, self.mock_client_logger) result = evaluator.evaluate(0) - custom_err_msg = "Got {} in result. Failed for user version: {}".format(result, user_version) + custom_err_msg = f"Got {result} in result. Failed for user version: {user_version}" self.assertFalse(result, custom_err_msg) def test_semver_ge__returns_true(self): semver_greater_than_or_equal_2_0_condition_list = [['Android', "2.0", 'custom_attribute', 'semver_ge']] user_versions = ['2.0.0', '2.9'] for user_version in user_versions: + self.user_context._user_attributes = {'Android': user_version} evaluator = condition_helper.CustomAttributeConditionEvaluator( - semver_greater_than_or_equal_2_0_condition_list, {'Android': user_version}, self.mock_client_logger) + semver_greater_than_or_equal_2_0_condition_list, self.user_context, self.mock_client_logger) result = evaluator.evaluate(0) - custom_err_msg = "Got {} in result. Failed for user version: {}".format(result, user_version) + custom_err_msg = f"Got {result} in result. Failed for user version: {user_version}" self.assertTrue(result, custom_err_msg) def test_semver_ge__returns_false(self): semver_greater_than_or_equal_2_0_condition_list = [['Android', "2.0", 'custom_attribute', 'semver_ge']] user_versions = ['1.9'] for user_version in user_versions: + self.user_context._user_attributes = {'Android': user_version} evaluator = condition_helper.CustomAttributeConditionEvaluator( - semver_greater_than_or_equal_2_0_condition_list, {'Android': user_version}, self.mock_client_logger) + semver_greater_than_or_equal_2_0_condition_list, self.user_context, self.mock_client_logger) result = evaluator.evaluate(0) - custom_err_msg = "Got {} in result. Failed for user version: {}".format(result, user_version) + custom_err_msg = f"Got {result} in result. Failed for user version: {user_version}" self.assertFalse(result, custom_err_msg) def test_semver_lt__returns_true(self): semver_less_than_2_0_condition_list = [['Android', "2.0", 'custom_attribute', 'semver_lt']] user_versions = ['1.9'] for user_version in user_versions: + self.user_context._user_attributes = {'Android': user_version} evaluator = condition_helper.CustomAttributeConditionEvaluator( - semver_less_than_2_0_condition_list, {'Android': user_version}, self.mock_client_logger) + semver_less_than_2_0_condition_list, self.user_context, self.mock_client_logger) result = evaluator.evaluate(0) - custom_err_msg = "Got {} in result. Failed for user version: {}".format(result, user_version) + custom_err_msg = f"Got {result} in result. Failed for user version: {user_version}" self.assertTrue(result, custom_err_msg) def test_semver_lt__returns_false(self): semver_less_than_2_0_condition_list = [['Android', "2.0", 'custom_attribute', 'semver_lt']] user_versions = ['2.0.0', '2.5.1'] for user_version in user_versions: + self.user_context._user_attributes = {'Android': user_version} evaluator = condition_helper.CustomAttributeConditionEvaluator( - semver_less_than_2_0_condition_list, {'Android': user_version}, self.mock_client_logger) + semver_less_than_2_0_condition_list, self.user_context, self.mock_client_logger) result = evaluator.evaluate(0) - custom_err_msg = "Got {} in result. Failed for user version: {}".format(result, user_version) + custom_err_msg = f"Got {result} in result. Failed for user version: {user_version}" self.assertFalse(result, custom_err_msg) def test_semver_gt__returns_true(self): semver_greater_than_2_0_condition_list = [['Android', "2.0", 'custom_attribute', 'semver_gt']] user_versions = ['2.9'] for user_version in user_versions: + self.user_context._user_attributes = {'Android': user_version} evaluator = condition_helper.CustomAttributeConditionEvaluator( - semver_greater_than_2_0_condition_list, {'Android': user_version}, self.mock_client_logger) + semver_greater_than_2_0_condition_list, self.user_context, self.mock_client_logger) result = evaluator.evaluate(0) - custom_err_msg = "Got {} in result. Failed for user version: {}".format(result, user_version) + custom_err_msg = f"Got {result} in result. Failed for user version: {user_version}" self.assertTrue(result, custom_err_msg) def test_semver_gt__returns_false(self): semver_greater_than_2_0_condition_list = [['Android', "2.0", 'custom_attribute', 'semver_gt']] user_versions = ['2.0.0', '1.9'] for user_version in user_versions: + self.user_context._user_attributes = {'Android': user_version} evaluator = condition_helper.CustomAttributeConditionEvaluator( - semver_greater_than_2_0_condition_list, {'Android': user_version}, self.mock_client_logger) + semver_greater_than_2_0_condition_list, self.user_context, self.mock_client_logger) result = evaluator.evaluate(0) - custom_err_msg = "Got {} in result. Failed for user version: {}".format(result, user_version) + custom_err_msg = f"Got {result} in result. Failed for user version: {user_version}" self.assertFalse(result, custom_err_msg) def test_evaluate__returns_None__when_user_version_is_not_string(self): semver_greater_than_2_0_condition_list = [['Android', "2.0", 'custom_attribute', 'semver_gt']] user_versions = [True, 37] for user_version in user_versions: + self.user_context._user_attributes = {'Android': user_version} evaluator = condition_helper.CustomAttributeConditionEvaluator( - semver_greater_than_2_0_condition_list, {'Android': user_version}, self.mock_client_logger) + semver_greater_than_2_0_condition_list, self.user_context, self.mock_client_logger) result = evaluator.evaluate(0) - custom_err_msg = "Got {} in result. Failed for user version: {}".format(result, user_version) + custom_err_msg = f"Got {result} in result. Failed for user version: {user_version}" self.assertIsNone(result, custom_err_msg) def test_evaluate__returns_None__when_user_version_with_invalid_semantic(self): semver_greater_than_2_0_condition_list = [['Android', "2.0", 'custom_attribute', 'semver_gt']] user_versions = ['3.7.2.2', '+'] for user_version in user_versions: + self.user_context._user_attributes = {'Android': user_version} evaluator = condition_helper.CustomAttributeConditionEvaluator( - semver_greater_than_2_0_condition_list, {'Android': user_version}, self.mock_client_logger) + semver_greater_than_2_0_condition_list, self.user_context, self.mock_client_logger) result = evaluator.evaluate(0) - custom_err_msg = "Got {} in result. Failed for user version: {}".format(result, user_version) + custom_err_msg = f"Got {result} in result. Failed for user version: {user_version}" self.assertIsNone(result, custom_err_msg) def test_compare_user_version_with_target_version_equal_to_0(self): @@ -242,14 +258,12 @@ def test_compare_user_version_with_target_version_equal_to_0(self): ('2.9.1', '2.9.1+beta') ] for target_version, user_version in versions: + self.user_context._user_attributes = {'Android': user_version} evaluator = condition_helper.CustomAttributeConditionEvaluator( - semver_greater_than_2_0_condition_list, {'Android': user_version}, self.mock_client_logger) + semver_greater_than_2_0_condition_list, self.user_context, self.mock_client_logger) result = evaluator.compare_user_version_with_target_version(target_version, user_version) - custom_err_msg = "Got {} in result. Failed for user version:" \ - " {} and target version: {}".format(result, - user_version, - target_version - ) + custom_err_msg = f"Got {result} in result. Failed for user version:" \ + f" {user_version} and target version: {target_version}" self.assertEqual(result, 0, custom_err_msg) def test_compare_user_version_with_target_version_greater_than_0(self): @@ -267,13 +281,12 @@ def test_compare_user_version_with_target_version_greater_than_0(self): ('2.2.3+beta2-beta1', '2.2.3+beta3-beta2') ] for target_version, user_version in versions: + self.user_context._user_attributes = {'Android': user_version} evaluator = condition_helper.CustomAttributeConditionEvaluator( - semver_greater_than_2_0_condition_list, {'Android': user_version}, self.mock_client_logger) + semver_greater_than_2_0_condition_list, self.user_context, self.mock_client_logger) result = evaluator.compare_user_version_with_target_version(target_version, user_version) - custom_err_msg = "Got {} in result. Failed for user version:" \ - " {} and target version: {}".format(result, - user_version, - target_version) + custom_err_msg = f"Got {result} in result. Failed for user version:" \ + f" {user_version} and target version: {target_version}" self.assertEqual(result, 1, custom_err_msg) def test_compare_user_version_with_target_version_less_than_0(self): @@ -291,13 +304,12 @@ def test_compare_user_version_with_target_version_less_than_0(self): ('2.1.3-beta1+beta3', '2.1.3-beta1+beta2') ] for target_version, user_version in versions: + self.user_context._user_attributes = {'Android': user_version} evaluator = condition_helper.CustomAttributeConditionEvaluator( - semver_greater_than_2_0_condition_list, {'Android': user_version}, self.mock_client_logger) + semver_greater_than_2_0_condition_list, self.user_context, self.mock_client_logger) result = evaluator.compare_user_version_with_target_version(target_version, user_version) - custom_err_msg = "Got {} in result. Failed for user version: {} " \ - "and target version: {}".format(result, - user_version, - target_version) + custom_err_msg = f"Got {result} in result. Failed for user version:" \ + f" {user_version} and target version: {target_version}" self.assertEqual(result, -1, custom_err_msg) def test_compare_invalid_user_version_with(self): @@ -307,78 +319,81 @@ def test_compare_invalid_user_version_with(self): target_version = '2.1.0' for user_version in versions: + self.user_context._user_attributes = {'Android': user_version} evaluator = condition_helper.CustomAttributeConditionEvaluator( - semver_greater_than_2_0_condition_list, {'Android': user_version}, self.mock_client_logger) + semver_greater_than_2_0_condition_list, self.user_context, self.mock_client_logger) result = evaluator.compare_user_version_with_target_version(user_version, target_version) - custom_err_msg = "Got {} in result. Failed for user version: {}".format(result, user_version) + custom_err_msg = f"Got {result} in result. Failed for user version: {user_version}" self.assertIsNone(result, custom_err_msg) def test_exists__returns_false__when_no_user_provided_value(self): evaluator = condition_helper.CustomAttributeConditionEvaluator( - exists_condition_list, {}, self.mock_client_logger + exists_condition_list, self.user_context, self.mock_client_logger ) self.assertStrictFalse(evaluator.evaluate(0)) def test_exists__returns_false__when_user_provided_value_is_null(self): - + self.user_context._user_attributes = {'input_value': None} evaluator = condition_helper.CustomAttributeConditionEvaluator( - exists_condition_list, {'input_value': None}, self.mock_client_logger + exists_condition_list, self.user_context, self.mock_client_logger ) self.assertStrictFalse(evaluator.evaluate(0)) def test_exists__returns_true__when_user_provided_value_is_string(self): + self.user_context._user_attributes = {'input_value': 'hi'} evaluator = condition_helper.CustomAttributeConditionEvaluator( - exists_condition_list, {'input_value': 'hi'}, self.mock_client_logger + exists_condition_list, self.user_context, self.mock_client_logger ) self.assertStrictTrue(evaluator.evaluate(0)) def test_exists__returns_true__when_user_provided_value_is_number(self): - + self.user_context._user_attributes = {'input_value': 10} evaluator = condition_helper.CustomAttributeConditionEvaluator( - exists_condition_list, {'input_value': 10}, self.mock_client_logger + exists_condition_list, self.user_context, self.mock_client_logger ) self.assertStrictTrue(evaluator.evaluate(0)) + self.user_context._user_attributes = {'input_value': 10.0} evaluator = condition_helper.CustomAttributeConditionEvaluator( - exists_condition_list, {'input_value': 10.0}, self.mock_client_logger + exists_condition_list, self.user_context, self.mock_client_logger ) self.assertStrictTrue(evaluator.evaluate(0)) def test_exists__returns_true__when_user_provided_value_is_boolean(self): - + self.user_context._user_attributes = {'input_value': False} evaluator = condition_helper.CustomAttributeConditionEvaluator( - exists_condition_list, {'input_value': False}, self.mock_client_logger + exists_condition_list, self.user_context, self.mock_client_logger ) self.assertStrictTrue(evaluator.evaluate(0)) def test_exact_string__returns_true__when_user_provided_value_is_equal_to_condition_value(self, ): - + self.user_context._user_attributes = {'favorite_constellation': 'Lacerta'} evaluator = condition_helper.CustomAttributeConditionEvaluator( - exact_string_condition_list, {'favorite_constellation': 'Lacerta'}, self.mock_client_logger, + exact_string_condition_list, self.user_context, self.mock_client_logger, ) self.assertStrictTrue(evaluator.evaluate(0)) def test_exact_string__returns_false__when_user_provided_value_is_not_equal_to_condition_value(self, ): - + self.user_context._user_attributes = {'favorite_constellation': 'The Big Dipper'} evaluator = condition_helper.CustomAttributeConditionEvaluator( - exact_string_condition_list, {'favorite_constellation': 'The Big Dipper'}, self.mock_client_logger, + exact_string_condition_list, self.user_context, self.mock_client_logger, ) self.assertStrictFalse(evaluator.evaluate(0)) def test_exact_string__returns_null__when_user_provided_value_is_different_type_from_condition_value(self, ): - + self.user_context._user_attributes = {'favorite_constellation': False} evaluator = condition_helper.CustomAttributeConditionEvaluator( - exact_string_condition_list, {'favorite_constellation': False}, self.mock_client_logger, + exact_string_condition_list, self.user_context, self.mock_client_logger, ) self.assertIsNone(evaluator.evaluate(0)) @@ -386,79 +401,83 @@ def test_exact_string__returns_null__when_user_provided_value_is_different_type_ def test_exact_string__returns_null__when_no_user_provided_value(self): evaluator = condition_helper.CustomAttributeConditionEvaluator( - exact_string_condition_list, {}, self.mock_client_logger + exact_string_condition_list, self.user_context, self.mock_client_logger ) self.assertIsNone(evaluator.evaluate(0)) def test_exact_int__returns_true__when_user_provided_value_is_equal_to_condition_value(self, ): - + self.user_context._user_attributes = {'lasers_count': 9000} evaluator = condition_helper.CustomAttributeConditionEvaluator( - exact_int_condition_list, {'lasers_count': 9000}, self.mock_client_logger + exact_int_condition_list, self.user_context, self.mock_client_logger ) self.assertStrictTrue(evaluator.evaluate(0)) + self.user_context._user_attributes = {'lasers_count': 9000.0} evaluator = condition_helper.CustomAttributeConditionEvaluator( - exact_int_condition_list, {'lasers_count': 9000.0}, self.mock_client_logger + exact_int_condition_list, self.user_context, self.mock_client_logger ) self.assertStrictTrue(evaluator.evaluate(0)) def test_exact_float__returns_true__when_user_provided_value_is_equal_to_condition_value(self, ): - + self.user_context._user_attributes = {'lasers_count': 9000} evaluator = condition_helper.CustomAttributeConditionEvaluator( - exact_float_condition_list, {'lasers_count': 9000}, self.mock_client_logger + exact_float_condition_list, self.user_context, self.mock_client_logger ) self.assertStrictTrue(evaluator.evaluate(0)) + self.user_context._user_attributes = {'lasers_count': 9000.0} evaluator = condition_helper.CustomAttributeConditionEvaluator( - exact_float_condition_list, {'lasers_count': 9000.0}, self.mock_client_logger, + exact_float_condition_list, self.user_context, self.mock_client_logger, ) self.assertStrictTrue(evaluator.evaluate(0)) def test_exact_int__returns_false__when_user_provided_value_is_not_equal_to_condition_value(self, ): - + self.user_context._user_attributes = {'lasers_count': 8000} evaluator = condition_helper.CustomAttributeConditionEvaluator( - exact_int_condition_list, {'lasers_count': 8000}, self.mock_client_logger + exact_int_condition_list, self.user_context, self.mock_client_logger ) self.assertStrictFalse(evaluator.evaluate(0)) def test_exact_float__returns_false__when_user_provided_value_is_not_equal_to_condition_value(self, ): - + self.user_context._user_attributes = {'lasers_count': 8000.0} evaluator = condition_helper.CustomAttributeConditionEvaluator( - exact_float_condition_list, {'lasers_count': 8000.0}, self.mock_client_logger, + exact_float_condition_list, self.user_context, self.mock_client_logger, ) self.assertStrictFalse(evaluator.evaluate(0)) def test_exact_int__returns_null__when_user_provided_value_is_different_type_from_condition_value(self, ): - + self.user_context._user_attributes = {'lasers_count': 'hi'} evaluator = condition_helper.CustomAttributeConditionEvaluator( - exact_int_condition_list, {'lasers_count': 'hi'}, self.mock_client_logger + exact_int_condition_list, self.user_context, self.mock_client_logger ) self.assertIsNone(evaluator.evaluate(0)) + self.user_context._user_attributes = {'lasers_count': True} evaluator = condition_helper.CustomAttributeConditionEvaluator( - exact_int_condition_list, {'lasers_count': True}, self.mock_client_logger + exact_int_condition_list, self.user_context, self.mock_client_logger ) self.assertIsNone(evaluator.evaluate(0)) def test_exact_float__returns_null__when_user_provided_value_is_different_type_from_condition_value(self, ): - + self.user_context._user_attributes = {'lasers_count': 'hi'} evaluator = condition_helper.CustomAttributeConditionEvaluator( - exact_float_condition_list, {'lasers_count': 'hi'}, self.mock_client_logger + exact_float_condition_list, self.user_context, self.mock_client_logger ) self.assertIsNone(evaluator.evaluate(0)) + self.user_context._user_attributes = {'lasers_count': True} evaluator = condition_helper.CustomAttributeConditionEvaluator( - exact_float_condition_list, {'lasers_count': True}, self.mock_client_logger + exact_float_condition_list, self.user_context, self.mock_client_logger ) self.assertIsNone(evaluator.evaluate(0)) @@ -466,7 +485,7 @@ def test_exact_float__returns_null__when_user_provided_value_is_different_type_f def test_exact_int__returns_null__when_no_user_provided_value(self): evaluator = condition_helper.CustomAttributeConditionEvaluator( - exact_int_condition_list, {}, self.mock_client_logger + exact_int_condition_list, self.user_context, self.mock_client_logger ) self.assertIsNone(evaluator.evaluate(0)) @@ -474,7 +493,7 @@ def test_exact_int__returns_null__when_no_user_provided_value(self): def test_exact_float__returns_null__when_no_user_provided_value(self): evaluator = condition_helper.CustomAttributeConditionEvaluator( - exact_float_condition_list, {}, self.mock_client_logger + exact_float_condition_list, self.user_context, self.mock_client_logger ) self.assertIsNone(evaluator.evaluate(0)) @@ -482,9 +501,9 @@ def test_exact_float__returns_null__when_no_user_provided_value(self): def test_exact__given_number_values__calls_is_finite_number(self): """ Test that CustomAttributeConditionEvaluator.evaluate returns True if is_finite_number returns True. Returns None if is_finite_number returns False. """ - + self.user_context._user_attributes = {'lasers_count': 9000} evaluator = condition_helper.CustomAttributeConditionEvaluator( - exact_int_condition_list, {'lasers_count': 9000}, self.mock_client_logger + exact_int_condition_list, self.user_context, self.mock_client_logger ) # assert that isFiniteNumber only needs to reject condition value to stop evaluation. @@ -507,57 +526,56 @@ def test_exact__given_number_values__calls_is_finite_number(self): mock_is_finite.assert_has_calls([mock.call(9000), mock.call(9000)]) def test_exact_bool__returns_true__when_user_provided_value_is_equal_to_condition_value(self, ): - + self.user_context._user_attributes = {'did_register_user': False} evaluator = condition_helper.CustomAttributeConditionEvaluator( - exact_bool_condition_list, {'did_register_user': False}, self.mock_client_logger, + exact_bool_condition_list, self.user_context, self.mock_client_logger, ) self.assertStrictTrue(evaluator.evaluate(0)) def test_exact_bool__returns_false__when_user_provided_value_is_not_equal_to_condition_value(self, ): - + self.user_context._user_attributes = {'did_register_user': True} evaluator = condition_helper.CustomAttributeConditionEvaluator( - exact_bool_condition_list, {'did_register_user': True}, self.mock_client_logger, + exact_bool_condition_list, self.user_context, self.mock_client_logger, ) self.assertStrictFalse(evaluator.evaluate(0)) def test_exact_bool__returns_null__when_user_provided_value_is_different_type_from_condition_value(self, ): - + self.user_context._user_attributes = {'did_register_user': 0} evaluator = condition_helper.CustomAttributeConditionEvaluator( - exact_bool_condition_list, {'did_register_user': 0}, self.mock_client_logger + exact_bool_condition_list, self.user_context, self.mock_client_logger ) self.assertIsNone(evaluator.evaluate(0)) def test_exact_bool__returns_null__when_no_user_provided_value(self): - evaluator = condition_helper.CustomAttributeConditionEvaluator( - exact_bool_condition_list, {}, self.mock_client_logger + exact_bool_condition_list, self.user_context, self.mock_client_logger ) self.assertIsNone(evaluator.evaluate(0)) def test_substring__returns_true__when_condition_value_is_substring_of_user_value(self, ): - + self.user_context._user_attributes = {'headline_text': 'Limited time, buy now!'} evaluator = condition_helper.CustomAttributeConditionEvaluator( - substring_condition_list, {'headline_text': 'Limited time, buy now!'}, self.mock_client_logger, + substring_condition_list, self.user_context, self.mock_client_logger, ) self.assertStrictTrue(evaluator.evaluate(0)) def test_substring__returns_false__when_condition_value_is_not_a_substring_of_user_value(self, ): - + self.user_context._user_attributes = {'headline_text': 'Breaking news!'} evaluator = condition_helper.CustomAttributeConditionEvaluator( - substring_condition_list, {'headline_text': 'Breaking news!'}, self.mock_client_logger, + substring_condition_list, self.user_context, self.mock_client_logger, ) self.assertStrictFalse(evaluator.evaluate(0)) def test_substring__returns_null__when_user_provided_value_not_a_string(self): - + self.user_context._user_attributes = {'headline_text': 10} evaluator = condition_helper.CustomAttributeConditionEvaluator( - substring_condition_list, {'headline_text': 10}, self.mock_client_logger + substring_condition_list, self.user_context, self.mock_client_logger ) self.assertIsNone(evaluator.evaluate(0)) @@ -565,91 +583,96 @@ def test_substring__returns_null__when_user_provided_value_not_a_string(self): def test_substring__returns_null__when_no_user_provided_value(self): evaluator = condition_helper.CustomAttributeConditionEvaluator( - substring_condition_list, {}, self.mock_client_logger + substring_condition_list, self.user_context, self.mock_client_logger ) self.assertIsNone(evaluator.evaluate(0)) def test_greater_than_int__returns_true__when_user_value_greater_than_condition_value(self, ): - + self.user_context._user_attributes = {'meters_travelled': 48.1} evaluator = condition_helper.CustomAttributeConditionEvaluator( - gt_int_condition_list, {'meters_travelled': 48.1}, self.mock_client_logger + gt_int_condition_list, self.user_context, self.mock_client_logger ) self.assertStrictTrue(evaluator.evaluate(0)) + self.user_context._user_attributes = {'meters_travelled': 49} evaluator = condition_helper.CustomAttributeConditionEvaluator( - gt_int_condition_list, {'meters_travelled': 49}, self.mock_client_logger + gt_int_condition_list, self.user_context, self.mock_client_logger ) self.assertStrictTrue(evaluator.evaluate(0)) def test_greater_than_float__returns_true__when_user_value_greater_than_condition_value(self, ): - + self.user_context._user_attributes = {'meters_travelled': 48.3} evaluator = condition_helper.CustomAttributeConditionEvaluator( - gt_float_condition_list, {'meters_travelled': 48.3}, self.mock_client_logger + gt_float_condition_list, self.user_context, self.mock_client_logger ) self.assertStrictTrue(evaluator.evaluate(0)) + self.user_context._user_attributes = {'meters_travelled': 49} evaluator = condition_helper.CustomAttributeConditionEvaluator( - gt_float_condition_list, {'meters_travelled': 49}, self.mock_client_logger + gt_float_condition_list, self.user_context, self.mock_client_logger ) self.assertStrictTrue(evaluator.evaluate(0)) def test_greater_than_int__returns_false__when_user_value_not_greater_than_condition_value(self, ): - + self.user_context._user_attributes = {'meters_travelled': 47.9} evaluator = condition_helper.CustomAttributeConditionEvaluator( - gt_int_condition_list, {'meters_travelled': 47.9}, self.mock_client_logger + gt_int_condition_list, self.user_context, self.mock_client_logger ) self.assertStrictFalse(evaluator.evaluate(0)) - + self.user_context._user_attributes = {'meters_travelled': 47} evaluator = condition_helper.CustomAttributeConditionEvaluator( - gt_int_condition_list, {'meters_travelled': 47}, self.mock_client_logger + gt_int_condition_list, self.user_context, self.mock_client_logger ) self.assertStrictFalse(evaluator.evaluate(0)) def test_greater_than_float__returns_false__when_user_value_not_greater_than_condition_value(self, ): - + self.user_context._user_attributes = {'meters_travelled': 48.2} evaluator = condition_helper.CustomAttributeConditionEvaluator( - gt_float_condition_list, {'meters_travelled': 48.2}, self.mock_client_logger + gt_float_condition_list, self.user_context, self.mock_client_logger ) self.assertStrictFalse(evaluator.evaluate(0)) + self.user_context._user_attributes = {'meters_travelled': 48} evaluator = condition_helper.CustomAttributeConditionEvaluator( - gt_float_condition_list, {'meters_travelled': 48}, self.mock_client_logger + gt_float_condition_list, self.user_context, self.mock_client_logger ) self.assertStrictFalse(evaluator.evaluate(0)) def test_greater_than_int__returns_null__when_user_value_is_not_a_number(self): - + self.user_context._user_attributes = {'meters_travelled': 'a long way'} evaluator = condition_helper.CustomAttributeConditionEvaluator( - gt_int_condition_list, {'meters_travelled': 'a long way'}, self.mock_client_logger, + gt_int_condition_list, self.user_context, self.mock_client_logger, ) self.assertIsNone(evaluator.evaluate(0)) + self.user_context._user_attributes = {'meters_travelled': False} evaluator = condition_helper.CustomAttributeConditionEvaluator( - gt_int_condition_list, {'meters_travelled': False}, self.mock_client_logger + gt_int_condition_list, self.user_context, self.mock_client_logger ) self.assertIsNone(evaluator.evaluate(0)) def test_greater_than_float__returns_null__when_user_value_is_not_a_number(self): - + self.user_context._user_attributes = {'meters_travelled': 'a long way'} evaluator = condition_helper.CustomAttributeConditionEvaluator( - gt_float_condition_list, {'meters_travelled': 'a long way'}, self.mock_client_logger, + gt_float_condition_list, self.user_context, self.mock_client_logger, ) self.assertIsNone(evaluator.evaluate(0)) + self.user_context._user_attributes = {'meters_travelled': False} evaluator = condition_helper.CustomAttributeConditionEvaluator( - gt_float_condition_list, {'meters_travelled': False}, self.mock_client_logger, + gt_float_condition_list, self.user_context, self.mock_client_logger, ) self.assertIsNone(evaluator.evaluate(0)) @@ -657,7 +680,7 @@ def test_greater_than_float__returns_null__when_user_value_is_not_a_number(self) def test_greater_than_int__returns_null__when_no_user_provided_value(self): evaluator = condition_helper.CustomAttributeConditionEvaluator( - gt_int_condition_list, {}, self.mock_client_logger + gt_int_condition_list, self.user_context, self.mock_client_logger ) self.assertIsNone(evaluator.evaluate(0)) @@ -665,105 +688,113 @@ def test_greater_than_int__returns_null__when_no_user_provided_value(self): def test_greater_than_float__returns_null__when_no_user_provided_value(self): evaluator = condition_helper.CustomAttributeConditionEvaluator( - gt_float_condition_list, {}, self.mock_client_logger + gt_float_condition_list, self.user_context, self.mock_client_logger ) self.assertIsNone(evaluator.evaluate(0)) def test_greater_than_or_equal_int__returns_true__when_user_value_greater_than_or_equal_condition_value(self): - + self.user_context._user_attributes = {'meters_travelled': 48.1} evaluator = condition_helper.CustomAttributeConditionEvaluator( - ge_int_condition_list, {'meters_travelled': 48.1}, self.mock_client_logger + ge_int_condition_list, self.user_context, self.mock_client_logger ) self.assertStrictTrue(evaluator.evaluate(0)) + self.user_context._user_attributes = {'meters_travelled': 48} evaluator = condition_helper.CustomAttributeConditionEvaluator( - ge_int_condition_list, {'meters_travelled': 48}, self.mock_client_logger + ge_int_condition_list, self.user_context, self.mock_client_logger ) self.assertStrictTrue(evaluator.evaluate(0)) + self.user_context._user_attributes = {'meters_travelled': 49} evaluator = condition_helper.CustomAttributeConditionEvaluator( - ge_int_condition_list, {'meters_travelled': 49}, self.mock_client_logger + ge_int_condition_list, self.user_context, self.mock_client_logger ) self.assertStrictTrue(evaluator.evaluate(0)) def test_greater_than_or_equal_float__returns_true__when_user_value_greater_than_or_equal_condition_value(self): - + self.user_context._user_attributes = {'meters_travelled': 48.3} evaluator = condition_helper.CustomAttributeConditionEvaluator( - ge_float_condition_list, {'meters_travelled': 48.3}, self.mock_client_logger + ge_float_condition_list, self.user_context, self.mock_client_logger ) self.assertStrictTrue(evaluator.evaluate(0)) + self.user_context._user_attributes = {'meters_travelled': 48.2} evaluator = condition_helper.CustomAttributeConditionEvaluator( - ge_float_condition_list, {'meters_travelled': 48.2}, self.mock_client_logger + ge_float_condition_list, self.user_context, self.mock_client_logger ) self.assertStrictTrue(evaluator.evaluate(0)) + self.user_context._user_attributes = {'meters_travelled': 49} evaluator = condition_helper.CustomAttributeConditionEvaluator( - ge_float_condition_list, {'meters_travelled': 49}, self.mock_client_logger + ge_float_condition_list, self.user_context, self.mock_client_logger ) self.assertStrictTrue(evaluator.evaluate(0)) def test_greater_than_or_equal_int__returns_false__when_user_value_not_greater_than_or_equal_condition_value( self): - + self.user_context._user_attributes = {'meters_travelled': 47.9} evaluator = condition_helper.CustomAttributeConditionEvaluator( - ge_int_condition_list, {'meters_travelled': 47.9}, self.mock_client_logger + ge_int_condition_list, self.user_context, self.mock_client_logger ) self.assertStrictFalse(evaluator.evaluate(0)) + self.user_context._user_attributes = {'meters_travelled': 47} evaluator = condition_helper.CustomAttributeConditionEvaluator( - ge_int_condition_list, {'meters_travelled': 47}, self.mock_client_logger + ge_int_condition_list, self.user_context, self.mock_client_logger ) self.assertStrictFalse(evaluator.evaluate(0)) def test_greater_than_or_equal_float__returns_false__when_user_value_not_greater_than_or_equal_condition_value( self): - + self.user_context._user_attributes = {'meters_travelled': 48.1} evaluator = condition_helper.CustomAttributeConditionEvaluator( - ge_float_condition_list, {'meters_travelled': 48.1}, self.mock_client_logger + ge_float_condition_list, self.user_context, self.mock_client_logger ) self.assertStrictFalse(evaluator.evaluate(0)) + self.user_context._user_attributes = {'meters_travelled': 48} evaluator = condition_helper.CustomAttributeConditionEvaluator( - ge_float_condition_list, {'meters_travelled': 48}, self.mock_client_logger + ge_float_condition_list, self.user_context, self.mock_client_logger ) self.assertStrictFalse(evaluator.evaluate(0)) def test_greater_than_or_equal_int__returns_null__when_user_value_is_not_a_number(self): - + self.user_context._user_attributes = {'meters_travelled': 'a long way'} evaluator = condition_helper.CustomAttributeConditionEvaluator( - ge_int_condition_list, {'meters_travelled': 'a long way'}, self.mock_client_logger, + ge_int_condition_list, self.user_context, self.mock_client_logger, ) self.assertIsNone(evaluator.evaluate(0)) + self.user_context._user_attributes = {'meters_travelled': False} evaluator = condition_helper.CustomAttributeConditionEvaluator( - ge_int_condition_list, {'meters_travelled': False}, self.mock_client_logger + ge_int_condition_list, self.user_context, self.mock_client_logger ) self.assertIsNone(evaluator.evaluate(0)) def test_greater_than_or_equal_float__returns_null__when_user_value_is_not_a_number(self): - + self.user_context._user_attributes = {'meters_travelled': 'a long way'} evaluator = condition_helper.CustomAttributeConditionEvaluator( - ge_float_condition_list, {'meters_travelled': 'a long way'}, self.mock_client_logger, + ge_float_condition_list, self.user_context, self.mock_client_logger, ) self.assertIsNone(evaluator.evaluate(0)) + self.user_context._user_attributes = {'meters_travelled': False} evaluator = condition_helper.CustomAttributeConditionEvaluator( - ge_float_condition_list, {'meters_travelled': False}, self.mock_client_logger, + ge_float_condition_list, self.user_context, self.mock_client_logger, ) self.assertIsNone(evaluator.evaluate(0)) @@ -771,7 +802,7 @@ def test_greater_than_or_equal_float__returns_null__when_user_value_is_not_a_num def test_greater_than_or_equal_int__returns_null__when_no_user_provided_value(self): evaluator = condition_helper.CustomAttributeConditionEvaluator( - ge_int_condition_list, {}, self.mock_client_logger + ge_int_condition_list, self.user_context, self.mock_client_logger ) self.assertIsNone(evaluator.evaluate(0)) @@ -779,79 +810,84 @@ def test_greater_than_or_equal_int__returns_null__when_no_user_provided_value(se def test_greater_than_or_equal_float__returns_null__when_no_user_provided_value(self): evaluator = condition_helper.CustomAttributeConditionEvaluator( - ge_float_condition_list, {}, self.mock_client_logger + ge_float_condition_list, self.user_context, self.mock_client_logger ) self.assertIsNone(evaluator.evaluate(0)) def test_less_than_int__returns_true__when_user_value_less_than_condition_value(self): - + self.user_context._user_attributes = {'meters_travelled': 47.9} evaluator = condition_helper.CustomAttributeConditionEvaluator( - lt_int_condition_list, {'meters_travelled': 47.9}, self.mock_client_logger + lt_int_condition_list, self.user_context, self.mock_client_logger ) self.assertStrictTrue(evaluator.evaluate(0)) + self.user_context._user_attributes = {'meters_travelled': 47} evaluator = condition_helper.CustomAttributeConditionEvaluator( - lt_int_condition_list, {'meters_travelled': 47}, self.mock_client_logger + lt_int_condition_list, self.user_context, self.mock_client_logger ) self.assertStrictTrue(evaluator.evaluate(0)) def test_less_than_float__returns_true__when_user_value_less_than_condition_value(self, ): - + self.user_context._user_attributes = {'meters_travelled': 48.1} evaluator = condition_helper.CustomAttributeConditionEvaluator( - lt_float_condition_list, {'meters_travelled': 48.1}, self.mock_client_logger + lt_float_condition_list, self.user_context, self.mock_client_logger ) self.assertStrictTrue(evaluator.evaluate(0)) + self.user_context._user_attributes = {'meters_travelled': 48} evaluator = condition_helper.CustomAttributeConditionEvaluator( - lt_float_condition_list, {'meters_travelled': 48}, self.mock_client_logger + lt_float_condition_list, self.user_context, self.mock_client_logger ) self.assertStrictTrue(evaluator.evaluate(0)) def test_less_than_int__returns_false__when_user_value_not_less_than_condition_value(self, ): + self.user_context._user_attributes = {'meters_travelled': 48.1} evaluator = condition_helper.CustomAttributeConditionEvaluator( - lt_int_condition_list, {'meters_travelled': 48.1}, self.mock_client_logger + lt_int_condition_list, self.user_context, self.mock_client_logger ) self.assertStrictFalse(evaluator.evaluate(0)) + self.user_context._user_attributes = {'meters_travelled': 49} evaluator = condition_helper.CustomAttributeConditionEvaluator( - lt_int_condition_list, {'meters_travelled': 49}, self.mock_client_logger + lt_int_condition_list, self.user_context, self.mock_client_logger ) self.assertStrictFalse(evaluator.evaluate(0)) def test_less_than_float__returns_false__when_user_value_not_less_than_condition_value(self, ): - + self.user_context._user_attributes = {'meters_travelled': 48.2} evaluator = condition_helper.CustomAttributeConditionEvaluator( - lt_float_condition_list, {'meters_travelled': 48.2}, self.mock_client_logger + lt_float_condition_list, self.user_context, self.mock_client_logger ) self.assertStrictFalse(evaluator.evaluate(0)) + self.user_context._user_attributes = {'meters_travelled': 49} evaluator = condition_helper.CustomAttributeConditionEvaluator( - lt_float_condition_list, {'meters_travelled': 49}, self.mock_client_logger + lt_float_condition_list, self.user_context, self.mock_client_logger ) self.assertStrictFalse(evaluator.evaluate(0)) def test_less_than_int__returns_null__when_user_value_is_not_a_number(self): - + self.user_context._user_attributes = {'meters_travelled': False} evaluator = condition_helper.CustomAttributeConditionEvaluator( - lt_int_condition_list, {'meters_travelled': False}, self.mock_client_logger + lt_int_condition_list, self.user_context, self.mock_client_logger ) self.assertIsNone(evaluator.evaluate(0)) def test_less_than_float__returns_null__when_user_value_is_not_a_number(self): - + self.user_context._user_attributes = {'meters_travelled': False} evaluator = condition_helper.CustomAttributeConditionEvaluator( - lt_float_condition_list, {'meters_travelled': False}, self.mock_client_logger, + lt_float_condition_list, self.user_context, self.mock_client_logger, ) self.assertIsNone(evaluator.evaluate(0)) @@ -859,7 +895,7 @@ def test_less_than_float__returns_null__when_user_value_is_not_a_number(self): def test_less_than_int__returns_null__when_no_user_provided_value(self): evaluator = condition_helper.CustomAttributeConditionEvaluator( - lt_int_condition_list, {}, self.mock_client_logger + lt_int_condition_list, self.user_context, self.mock_client_logger ) self.assertIsNone(evaluator.evaluate(0)) @@ -867,91 +903,97 @@ def test_less_than_int__returns_null__when_no_user_provided_value(self): def test_less_than_float__returns_null__when_no_user_provided_value(self): evaluator = condition_helper.CustomAttributeConditionEvaluator( - lt_float_condition_list, {}, self.mock_client_logger + lt_float_condition_list, self.user_context, self.mock_client_logger ) self.assertIsNone(evaluator.evaluate(0)) def test_less_than_or_equal_int__returns_true__when_user_value_less_than_or_equal_condition_value(self): - + self.user_context._user_attributes = {'meters_travelled': 47.9} evaluator = condition_helper.CustomAttributeConditionEvaluator( - le_int_condition_list, {'meters_travelled': 47.9}, self.mock_client_logger + le_int_condition_list, self.user_context, self.mock_client_logger ) self.assertStrictTrue(evaluator.evaluate(0)) + self.user_context._user_attributes = {'meters_travelled': 47} evaluator = condition_helper.CustomAttributeConditionEvaluator( - le_int_condition_list, {'meters_travelled': 47}, self.mock_client_logger + le_int_condition_list, self.user_context, self.mock_client_logger ) self.assertStrictTrue(evaluator.evaluate(0)) + self.user_context._user_attributes = {'meters_travelled': 48} evaluator = condition_helper.CustomAttributeConditionEvaluator( - le_int_condition_list, {'meters_travelled': 48}, self.mock_client_logger + le_int_condition_list, self.user_context, self.mock_client_logger ) self.assertStrictTrue(evaluator.evaluate(0)) def test_less_than_or_equal_float__returns_true__when_user_value_less_than_or_equal_condition_value(self): - + self.user_context._user_attributes = {'meters_travelled': 41} evaluator = condition_helper.CustomAttributeConditionEvaluator( - le_float_condition_list, {'meters_travelled': 48.1}, self.mock_client_logger + le_float_condition_list, self.user_context, self.mock_client_logger ) self.assertStrictTrue(evaluator.evaluate(0)) + self.user_context._user_attributes = {'meters_travelled': 48.2} evaluator = condition_helper.CustomAttributeConditionEvaluator( - le_float_condition_list, {'meters_travelled': 48.2}, self.mock_client_logger + le_float_condition_list, self.user_context, self.mock_client_logger ) self.assertStrictTrue(evaluator.evaluate(0)) + self.user_context._user_attributes = {'meters_travelled': 48} evaluator = condition_helper.CustomAttributeConditionEvaluator( - le_float_condition_list, {'meters_travelled': 48}, self.mock_client_logger + le_float_condition_list, self.user_context, self.mock_client_logger ) self.assertStrictTrue(evaluator.evaluate(0)) def test_less_than_or_equal_int__returns_false__when_user_value_not_less_than_or_equal_condition_value(self): - + self.user_context._user_attributes = {'meters_travelled': 48.1} evaluator = condition_helper.CustomAttributeConditionEvaluator( - le_int_condition_list, {'meters_travelled': 48.1}, self.mock_client_logger + le_int_condition_list, self.user_context, self.mock_client_logger ) self.assertStrictFalse(evaluator.evaluate(0)) + self.user_context._user_attributes = {'meters_travelled': 49} evaluator = condition_helper.CustomAttributeConditionEvaluator( - le_int_condition_list, {'meters_travelled': 49}, self.mock_client_logger + le_int_condition_list, self.user_context, self.mock_client_logger ) self.assertStrictFalse(evaluator.evaluate(0)) def test_less_than_or_equal_float__returns_false__when_user_value_not_less_than_or_equal_condition_value(self): - + self.user_context._user_attributes = {'meters_travelled': 48.3} evaluator = condition_helper.CustomAttributeConditionEvaluator( - le_float_condition_list, {'meters_travelled': 48.3}, self.mock_client_logger + le_float_condition_list, self.user_context, self.mock_client_logger ) self.assertStrictFalse(evaluator.evaluate(0)) + self.user_context._user_attributes = {'meters_travelled': 49} evaluator = condition_helper.CustomAttributeConditionEvaluator( - le_float_condition_list, {'meters_travelled': 49}, self.mock_client_logger + le_float_condition_list, self.user_context, self.mock_client_logger ) self.assertStrictFalse(evaluator.evaluate(0)) def test_less_than_or_equal_int__returns_null__when_user_value_is_not_a_number(self): - + self.user_context._user_attributes = {'meters_travelled': False} evaluator = condition_helper.CustomAttributeConditionEvaluator( - le_int_condition_list, {'meters_travelled': False}, self.mock_client_logger + le_int_condition_list, self.user_context, self.mock_client_logger ) self.assertIsNone(evaluator.evaluate(0)) def test_less_than_or_equal_float__returns_null__when_user_value_is_not_a_number(self): - + self.user_context._user_attributes = {'meters_travelled': False} evaluator = condition_helper.CustomAttributeConditionEvaluator( - le_float_condition_list, {'meters_travelled': False}, self.mock_client_logger, + le_float_condition_list, self.user_context, self.mock_client_logger, ) self.assertIsNone(evaluator.evaluate(0)) @@ -959,7 +1001,7 @@ def test_less_than_or_equal_float__returns_null__when_user_value_is_not_a_number def test_less_than_or_equal_int__returns_null__when_no_user_provided_value(self): evaluator = condition_helper.CustomAttributeConditionEvaluator( - le_int_condition_list, {}, self.mock_client_logger + le_int_condition_list, self.user_context, self.mock_client_logger ) self.assertIsNone(evaluator.evaluate(0)) @@ -967,7 +1009,7 @@ def test_less_than_or_equal_int__returns_null__when_no_user_provided_value(self) def test_less_than_or_equal_float__returns_null__when_no_user_provided_value(self): evaluator = condition_helper.CustomAttributeConditionEvaluator( - le_float_condition_list, {}, self.mock_client_logger + le_float_condition_list, self.user_context, self.mock_client_logger ) self.assertIsNone(evaluator.evaluate(0)) @@ -975,9 +1017,9 @@ def test_less_than_or_equal_float__returns_null__when_no_user_provided_value(sel def test_greater_than__calls_is_finite_number(self): """ Test that CustomAttributeConditionEvaluator.evaluate returns True if is_finite_number returns True. Returns None if is_finite_number returns False. """ - + self.user_context._user_attributes = {'meters_travelled': 48.1} evaluator = condition_helper.CustomAttributeConditionEvaluator( - gt_int_condition_list, {'meters_travelled': 48.1}, self.mock_client_logger + gt_int_condition_list, self.user_context, self.mock_client_logger ) def is_finite_number__rejecting_condition_value(value): @@ -1019,9 +1061,9 @@ def is_finite_number__accepting_both_values(value): def test_less_than__calls_is_finite_number(self): """ Test that CustomAttributeConditionEvaluator.evaluate returns True if is_finite_number returns True. Returns None if is_finite_number returns False. """ - + self.user_context._user_attributes = {'meters_travelled': 47} evaluator = condition_helper.CustomAttributeConditionEvaluator( - lt_int_condition_list, {'meters_travelled': 47}, self.mock_client_logger + lt_int_condition_list, self.user_context, self.mock_client_logger ) def is_finite_number__rejecting_condition_value(value): @@ -1063,9 +1105,9 @@ def is_finite_number__accepting_both_values(value): def test_greater_than_or_equal__calls_is_finite_number(self): """ Test that CustomAttributeConditionEvaluator.evaluate returns True if is_finite_number returns True. Returns None if is_finite_number returns False. """ - + self.user_context._user_attributes = {'meters_travelled': 48.1} evaluator = condition_helper.CustomAttributeConditionEvaluator( - ge_int_condition_list, {'meters_travelled': 48.1}, self.mock_client_logger + ge_int_condition_list, self.user_context, self.mock_client_logger ) def is_finite_number__rejecting_condition_value(value): @@ -1107,9 +1149,9 @@ def is_finite_number__accepting_both_values(value): def test_less_than_or_equal__calls_is_finite_number(self): """ Test that CustomAttributeConditionEvaluator.evaluate returns True if is_finite_number returns True. Returns None if is_finite_number returns False. """ - + self.user_context._user_attributes = {'meters_travelled': 47} evaluator = condition_helper.CustomAttributeConditionEvaluator( - le_int_condition_list, {'meters_travelled': 47}, self.mock_client_logger + le_int_condition_list, self.user_context, self.mock_client_logger ) def is_finite_number__rejecting_condition_value(value): @@ -1155,13 +1197,55 @@ def test_invalid_semver__returns_None__when_semver_is_invalid(self): "+build-prerelease", "2..0"] for user_version in invalid_test_cases: + self.user_context._user_attributes = {'Android': user_version} evaluator = condition_helper.CustomAttributeConditionEvaluator( - semver_less_than_or_equal_2_0_1_condition_list, {'Android': user_version}, self.mock_client_logger) + semver_less_than_or_equal_2_0_1_condition_list, self.user_context, self.mock_client_logger) result = evaluator.evaluate(0) - custom_err_msg = "Got {} in result. Failed for user version: {}".format(result, user_version) + custom_err_msg = f"Got {result} in result. Failed for user version: {user_version}" self.assertIsNone(result, custom_err_msg) + def test_qualified__returns_true__when_user_is_qualified(self, ): + self.user_context.set_qualified_segments(['odp-segment-2']) + evaluator = condition_helper.CustomAttributeConditionEvaluator( + qualified_condition_list, self.user_context, self.mock_client_logger, + ) + + self.assertStrictTrue(evaluator.evaluate(0)) + + def test_qualified__returns_false__when_user_is_not_qualified(self, ): + self.user_context.set_qualified_segments(['odp-segment-1']) + evaluator = condition_helper.CustomAttributeConditionEvaluator( + qualified_condition_list, self.user_context, self.mock_client_logger, + ) + + self.assertStrictFalse(evaluator.evaluate(0)) + + def test_qualified__returns_false__with_no_qualified_segments(self): + + evaluator = condition_helper.CustomAttributeConditionEvaluator( + qualified_condition_list, self.user_context, self.mock_client_logger + ) + + self.assertStrictFalse(evaluator.evaluate(0)) + + def test_qualified__returns_null__when_condition_value_is_not_string(self): + qualified_condition_list = [['odp.audiences', 5, 'third_party_dimension', 'qualified']] + evaluator = condition_helper.CustomAttributeConditionEvaluator( + qualified_condition_list, self.user_context, self.mock_client_logger + ) + + self.assertIsNone(evaluator.evaluate(0)) + + def test_qualified__returns_true__when_name_is_different(self): + self.user_context.set_qualified_segments(['odp-segment-2']) + qualified_condition_list = [['other-name', 'odp-segment-2', 'third_party_dimension', 'qualified']] + evaluator = condition_helper.CustomAttributeConditionEvaluator( + qualified_condition_list, self.user_context, self.mock_client_logger + ) + + self.assertStrictTrue(evaluator.evaluate(0)) + class ConditionDecoderTests(base.BaseTest): def test_loads(self): @@ -1190,14 +1274,14 @@ class CustomAttributeConditionEvaluatorLogging(base.BaseTest): def setUp(self): base.BaseTest.setUp(self) self.mock_client_logger = mock.MagicMock() + self.user_context = self.optimizely.create_user_context('any-user') def test_evaluate__match_type__invalid(self): log_level = 'warning' condition_list = [['favorite_constellation', 'Lacerta', 'custom_attribute', 'regex']] - user_attributes = {} evaluator = condition_helper.CustomAttributeConditionEvaluator( - condition_list, user_attributes, self.mock_client_logger + condition_list, self.user_context, self.mock_client_logger ) expected_condition_log = { @@ -1211,19 +1295,16 @@ def test_evaluate__match_type__invalid(self): mock_log = getattr(self.mock_client_logger, log_level) mock_log.assert_called_once_with( - ( - 'Audience condition "{}" uses an unknown match ' - 'type. You may need to upgrade to a newer release of the Optimizely SDK.' - ).format(json.dumps(expected_condition_log)) + f'Audience condition "{json.dumps(expected_condition_log)}" uses an unknown match ' + 'type. You may need to upgrade to a newer release of the Optimizely SDK.' ) def test_evaluate__condition_type__invalid(self): log_level = 'warning' condition_list = [['favorite_constellation', 'Lacerta', 'sdk_version', 'exact']] - user_attributes = {} evaluator = condition_helper.CustomAttributeConditionEvaluator( - condition_list, user_attributes, self.mock_client_logger + condition_list, self.user_context, self.mock_client_logger ) expected_condition_log = { @@ -1237,19 +1318,16 @@ def test_evaluate__condition_type__invalid(self): mock_log = getattr(self.mock_client_logger, log_level) mock_log.assert_called_once_with( - ( - 'Audience condition "{}" uses an unknown condition type. ' - 'You may need to upgrade to a newer release of the Optimizely SDK.' - ).format(json.dumps(expected_condition_log)) + f'Audience condition "{json.dumps(expected_condition_log)}" uses an unknown condition type. ' + 'You may need to upgrade to a newer release of the Optimizely SDK.' ) def test_exact__user_value__missing(self): log_level = 'debug' exact_condition_list = [['favorite_constellation', 'Lacerta', 'custom_attribute', 'exact']] - user_attributes = {} evaluator = condition_helper.CustomAttributeConditionEvaluator( - exact_condition_list, user_attributes, self.mock_client_logger + exact_condition_list, self.user_context, self.mock_client_logger ) expected_condition_log = { @@ -1263,19 +1341,16 @@ def test_exact__user_value__missing(self): mock_log = getattr(self.mock_client_logger, log_level) mock_log.assert_called_once_with( - ( - 'Audience condition {} evaluated to UNKNOWN because ' - 'no value was passed for user attribute "favorite_constellation".' - ).format(json.dumps(expected_condition_log)) + f'Audience condition {json.dumps(expected_condition_log)} evaluated to UNKNOWN because ' + 'no value was passed for user attribute "favorite_constellation".' ) def test_greater_than__user_value__missing(self): log_level = 'debug' gt_condition_list = [['meters_travelled', 48, 'custom_attribute', 'gt']] - user_attributes = {} evaluator = condition_helper.CustomAttributeConditionEvaluator( - gt_condition_list, user_attributes, self.mock_client_logger + gt_condition_list, self.user_context, self.mock_client_logger ) expected_condition_log = { @@ -1289,19 +1364,16 @@ def test_greater_than__user_value__missing(self): mock_log = getattr(self.mock_client_logger, log_level) mock_log.assert_called_once_with( - ( - 'Audience condition {} evaluated to UNKNOWN because no value was passed for user ' - 'attribute "meters_travelled".' - ).format(json.dumps(expected_condition_log)) + f'Audience condition {json.dumps(expected_condition_log)} evaluated to UNKNOWN ' + 'because no value was passed for user attribute "meters_travelled".' ) def test_less_than__user_value__missing(self): log_level = 'debug' lt_condition_list = [['meters_travelled', 48, 'custom_attribute', 'lt']] - user_attributes = {} evaluator = condition_helper.CustomAttributeConditionEvaluator( - lt_condition_list, user_attributes, self.mock_client_logger + lt_condition_list, self.user_context, self.mock_client_logger ) expected_condition_log = { @@ -1315,19 +1387,16 @@ def test_less_than__user_value__missing(self): mock_log = getattr(self.mock_client_logger, log_level) mock_log.assert_called_once_with( - ( - 'Audience condition {} evaluated to UNKNOWN because no value was passed for user attribute ' - '"meters_travelled".' - ).format(json.dumps(expected_condition_log)) + f'Audience condition {json.dumps(expected_condition_log)} evaluated to UNKNOWN ' + 'because no value was passed for user attribute "meters_travelled".' ) def test_substring__user_value__missing(self): log_level = 'debug' substring_condition_list = [['headline_text', 'buy now', 'custom_attribute', 'substring']] - user_attributes = {} evaluator = condition_helper.CustomAttributeConditionEvaluator( - substring_condition_list, user_attributes, self.mock_client_logger + substring_condition_list, self.user_context, self.mock_client_logger ) expected_condition_log = { @@ -1341,18 +1410,15 @@ def test_substring__user_value__missing(self): mock_log = getattr(self.mock_client_logger, log_level) mock_log.assert_called_once_with( - ( - 'Audience condition {} evaluated to UNKNOWN because no value was passed for ' - 'user attribute "headline_text".' - ).format(json.dumps(expected_condition_log)) + f'Audience condition {json.dumps(expected_condition_log)} evaluated to UNKNOWN ' + 'because no value was passed for user attribute "headline_text".' ) def test_exists__user_value__missing(self): exists_condition_list = [['input_value', None, 'custom_attribute', 'exists']] - user_attributes = {} evaluator = condition_helper.CustomAttributeConditionEvaluator( - exists_condition_list, user_attributes, self.mock_client_logger + exists_condition_list, self.user_context, self.mock_client_logger ) self.assertStrictFalse(evaluator.evaluate(0)) @@ -1364,10 +1430,10 @@ def test_exists__user_value__missing(self): def test_exact__user_value__None(self): log_level = 'debug' exact_condition_list = [['favorite_constellation', 'Lacerta', 'custom_attribute', 'exact']] - user_attributes = {'favorite_constellation': None} + self.user_context._user_attributes = {'favorite_constellation': None} evaluator = condition_helper.CustomAttributeConditionEvaluator( - exact_condition_list, user_attributes, self.mock_client_logger + exact_condition_list, self.user_context, self.mock_client_logger ) expected_condition_log = { @@ -1381,19 +1447,17 @@ def test_exact__user_value__None(self): mock_log = getattr(self.mock_client_logger, log_level) mock_log.assert_called_once_with( - ( - 'Audience condition "{}" evaluated to UNKNOWN because a null value was passed for user attribute ' - '"favorite_constellation".' - ).format(json.dumps(expected_condition_log)) + f'Audience condition "{json.dumps(expected_condition_log)}" evaluated to UNKNOWN ' + 'because a null value was passed for user attribute "favorite_constellation".' ) def test_greater_than__user_value__None(self): log_level = 'debug' gt_condition_list = [['meters_travelled', 48, 'custom_attribute', 'gt']] - user_attributes = {'meters_travelled': None} + self.user_context._user_attributes = {'meters_travelled': None} evaluator = condition_helper.CustomAttributeConditionEvaluator( - gt_condition_list, user_attributes, self.mock_client_logger + gt_condition_list, self.user_context, self.mock_client_logger ) expected_condition_log = { @@ -1407,19 +1471,17 @@ def test_greater_than__user_value__None(self): mock_log = getattr(self.mock_client_logger, log_level) mock_log.assert_called_once_with( - ( - 'Audience condition "{}" evaluated to UNKNOWN because a null value was passed for ' - 'user attribute "meters_travelled".' - ).format(json.dumps(expected_condition_log)) + f'Audience condition "{json.dumps(expected_condition_log)}" evaluated to UNKNOWN ' + 'because a null value was passed for user attribute "meters_travelled".' ) def test_less_than__user_value__None(self): log_level = 'debug' lt_condition_list = [['meters_travelled', 48, 'custom_attribute', 'lt']] - user_attributes = {'meters_travelled': None} + self.user_context._user_attributes = {'meters_travelled': None} evaluator = condition_helper.CustomAttributeConditionEvaluator( - lt_condition_list, user_attributes, self.mock_client_logger + lt_condition_list, self.user_context, self.mock_client_logger ) expected_condition_log = { @@ -1433,19 +1495,17 @@ def test_less_than__user_value__None(self): mock_log = getattr(self.mock_client_logger, log_level) mock_log.assert_called_once_with( - ( - 'Audience condition "{}" evaluated to UNKNOWN because a null value was passed ' - 'for user attribute "meters_travelled".' - ).format(json.dumps(expected_condition_log)) + f'Audience condition "{json.dumps(expected_condition_log)}" evaluated to UNKNOWN ' + 'because a null value was passed for user attribute "meters_travelled".' ) def test_substring__user_value__None(self): log_level = 'debug' substring_condition_list = [['headline_text', '12', 'custom_attribute', 'substring']] - user_attributes = {'headline_text': None} + self.user_context._user_attributes = {'headline_text': None} evaluator = condition_helper.CustomAttributeConditionEvaluator( - substring_condition_list, user_attributes, self.mock_client_logger + substring_condition_list, self.user_context, self.mock_client_logger ) expected_condition_log = { @@ -1459,18 +1519,16 @@ def test_substring__user_value__None(self): mock_log = getattr(self.mock_client_logger, log_level) mock_log.assert_called_once_with( - ( - 'Audience condition "{}" evaluated to UNKNOWN because a null value was ' - 'passed for user attribute "headline_text".' - ).format(json.dumps(expected_condition_log)) + f'Audience condition "{json.dumps(expected_condition_log)}" evaluated to UNKNOWN ' + 'because a null value was passed for user attribute "headline_text".' ) def test_exists__user_value__None(self): exists_condition_list = [['input_value', None, 'custom_attribute', 'exists']] - user_attributes = {'input_value': None} + self.user_context._user_attributes = {'input_value': None} evaluator = condition_helper.CustomAttributeConditionEvaluator( - exists_condition_list, user_attributes, self.mock_client_logger + exists_condition_list, self.user_context, self.mock_client_logger ) self.assertStrictFalse(evaluator.evaluate(0)) @@ -1482,10 +1540,10 @@ def test_exists__user_value__None(self): def test_exact__user_value__unexpected_type(self): log_level = 'warning' exact_condition_list = [['favorite_constellation', 'Lacerta', 'custom_attribute', 'exact']] - user_attributes = {'favorite_constellation': {}} + self.user_context._user_attributes = {'favorite_constellation': {}} evaluator = condition_helper.CustomAttributeConditionEvaluator( - exact_condition_list, user_attributes, self.mock_client_logger + exact_condition_list, self.user_context, self.mock_client_logger ) expected_condition_log = { @@ -1499,19 +1557,17 @@ def test_exact__user_value__unexpected_type(self): mock_log = getattr(self.mock_client_logger, log_level) mock_log.assert_called_once_with( - ( - 'Audience condition "{}" evaluated to UNKNOWN because a value of type "{}" was passed for ' - 'user attribute "favorite_constellation".' - ).format(json.dumps(expected_condition_log), type({})) + f'Audience condition "{json.dumps(expected_condition_log)}" evaluated to UNKNOWN because ' + f'a value of type "{dict}" was passed for user attribute "favorite_constellation".' ) def test_greater_than__user_value__unexpected_type(self): log_level = 'warning' gt_condition_list = [['meters_travelled', 48, 'custom_attribute', 'gt']] - user_attributes = {'meters_travelled': '48'} + self.user_context._user_attributes = {'meters_travelled': '48'} evaluator = condition_helper.CustomAttributeConditionEvaluator( - gt_condition_list, user_attributes, self.mock_client_logger + gt_condition_list, self.user_context, self.mock_client_logger ) expected_condition_log = { @@ -1525,20 +1581,17 @@ def test_greater_than__user_value__unexpected_type(self): mock_log = getattr(self.mock_client_logger, log_level) mock_log.assert_called_once_with( - ( - 'Audience condition "{}"' - ' evaluated to UNKNOWN because a value of type "{}" was passed for user attribute ' - '"meters_travelled".' - ).format(json.dumps(expected_condition_log), type('48')) + f'Audience condition "{json.dumps(expected_condition_log)}" evaluated to UNKNOWN because ' + f'a value of type "{str}" was passed for user attribute "meters_travelled".' ) def test_less_than__user_value__unexpected_type(self): log_level = 'warning' lt_condition_list = [['meters_travelled', 48, 'custom_attribute', 'lt']] - user_attributes = {'meters_travelled': True} + self.user_context._user_attributes = {'meters_travelled': True} evaluator = condition_helper.CustomAttributeConditionEvaluator( - lt_condition_list, user_attributes, self.mock_client_logger + lt_condition_list, self.user_context, self.mock_client_logger ) expected_condition_log = { @@ -1552,20 +1605,17 @@ def test_less_than__user_value__unexpected_type(self): mock_log = getattr(self.mock_client_logger, log_level) mock_log.assert_called_once_with( - ( - 'Audience condition "{}"' - ' evaluated to UNKNOWN because a value of type "{}" was passed for user attribute ' - '"meters_travelled".' - ).format(json.dumps(expected_condition_log), type(True)) + f'Audience condition "{json.dumps(expected_condition_log)}" evaluated to UNKNOWN because ' + f'a value of type "{bool}" was passed for user attribute "meters_travelled".' ) def test_substring__user_value__unexpected_type(self): log_level = 'warning' substring_condition_list = [['headline_text', '12', 'custom_attribute', 'substring']] - user_attributes = {'headline_text': 1234} + self.user_context._user_attributes = {'headline_text': 1234} evaluator = condition_helper.CustomAttributeConditionEvaluator( - substring_condition_list, user_attributes, self.mock_client_logger + substring_condition_list, self.user_context, self.mock_client_logger ) expected_condition_log = { @@ -1579,19 +1629,17 @@ def test_substring__user_value__unexpected_type(self): mock_log = getattr(self.mock_client_logger, log_level) mock_log.assert_called_once_with( - ( - 'Audience condition "{}" evaluated to UNKNOWN because a value of type "{}" was passed for ' - 'user attribute "headline_text".' - ).format(json.dumps(expected_condition_log), type(1234)) + f'Audience condition "{json.dumps(expected_condition_log)}" evaluated to UNKNOWN because ' + f'a value of type "{int}" was passed for user attribute "headline_text".' ) def test_exact__user_value__infinite(self): log_level = 'warning' exact_condition_list = [['meters_travelled', 48, 'custom_attribute', 'exact']] - user_attributes = {'meters_travelled': float("inf")} + self.user_context._user_attributes = {'meters_travelled': float("inf")} evaluator = condition_helper.CustomAttributeConditionEvaluator( - exact_condition_list, user_attributes, self.mock_client_logger + exact_condition_list, self.user_context, self.mock_client_logger ) self.assertIsNone(evaluator.evaluate(0)) @@ -1605,19 +1653,17 @@ def test_exact__user_value__infinite(self): mock_log = getattr(self.mock_client_logger, log_level) mock_log.assert_called_once_with( - ( - 'Audience condition "{}" evaluated to UNKNOWN because the number value for ' - 'user attribute "meters_travelled" is not in the range [-2^53, +2^53].' - ).format(json.dumps(expected_condition_log)) + f'Audience condition "{json.dumps(expected_condition_log)}" evaluated to UNKNOWN because ' + 'the number value for user attribute "meters_travelled" is not in the range [-2^53, +2^53].' ) def test_greater_than__user_value__infinite(self): log_level = 'warning' gt_condition_list = [['meters_travelled', 48, 'custom_attribute', 'gt']] - user_attributes = {'meters_travelled': float("nan")} + self.user_context._user_attributes = {'meters_travelled': float("nan")} evaluator = condition_helper.CustomAttributeConditionEvaluator( - gt_condition_list, user_attributes, self.mock_client_logger + gt_condition_list, self.user_context, self.mock_client_logger ) expected_condition_log = { @@ -1631,20 +1677,18 @@ def test_greater_than__user_value__infinite(self): mock_log = getattr(self.mock_client_logger, log_level) mock_log.assert_called_once_with( - ( - 'Audience condition "{}" ' - 'evaluated to UNKNOWN because the number value for user attribute "meters_travelled" is not' - ' in the range [-2^53, +2^53].' - ).format(json.dumps(expected_condition_log)) + f'Audience condition "{json.dumps(expected_condition_log)}" ' + 'evaluated to UNKNOWN because the number value for user attribute "meters_travelled" is not' + ' in the range [-2^53, +2^53].' ) def test_less_than__user_value__infinite(self): log_level = 'warning' lt_condition_list = [['meters_travelled', 48, 'custom_attribute', 'lt']] - user_attributes = {'meters_travelled': float('-inf')} + self.user_context._user_attributes = {'meters_travelled': float('-inf')} evaluator = condition_helper.CustomAttributeConditionEvaluator( - lt_condition_list, user_attributes, self.mock_client_logger + lt_condition_list, self.user_context, self.mock_client_logger ) expected_condition_log = { @@ -1658,20 +1702,18 @@ def test_less_than__user_value__infinite(self): mock_log = getattr(self.mock_client_logger, log_level) mock_log.assert_called_once_with( - ( - 'Audience condition "{}" ' - 'evaluated to UNKNOWN because the number value for user attribute "meters_travelled" is not in ' - 'the range [-2^53, +2^53].' - ).format(json.dumps(expected_condition_log)) + f'Audience condition "{json.dumps(expected_condition_log)}" ' + 'evaluated to UNKNOWN because the number value for user attribute "meters_travelled" is not in ' + 'the range [-2^53, +2^53].' ) def test_exact__user_value_type_mismatch(self): log_level = 'warning' exact_condition_list = [['favorite_constellation', 'Lacerta', 'custom_attribute', 'exact']] - user_attributes = {'favorite_constellation': 5} + self.user_context._user_attributes = {'favorite_constellation': 5} evaluator = condition_helper.CustomAttributeConditionEvaluator( - exact_condition_list, user_attributes, self.mock_client_logger + exact_condition_list, self.user_context, self.mock_client_logger ) expected_condition_log = { @@ -1685,19 +1727,17 @@ def test_exact__user_value_type_mismatch(self): mock_log = getattr(self.mock_client_logger, log_level) mock_log.assert_called_once_with( - ( - 'Audience condition "{}" evaluated to UNKNOWN because a value of type "{}" was passed for ' - 'user attribute "favorite_constellation".' - ).format(json.dumps(expected_condition_log), type(5)) + f'Audience condition "{json.dumps(expected_condition_log)}" evaluated to UNKNOWN because ' + f'a value of type "{int}" was passed for user attribute "favorite_constellation".' ) def test_exact__condition_value_invalid(self): log_level = 'warning' exact_condition_list = [['favorite_constellation', {}, 'custom_attribute', 'exact']] - user_attributes = {'favorite_constellation': 'Lacerta'} + self.user_context._user_attributes = {'favorite_constellation': 'Lacerta'} evaluator = condition_helper.CustomAttributeConditionEvaluator( - exact_condition_list, user_attributes, self.mock_client_logger + exact_condition_list, self.user_context, self.mock_client_logger ) expected_condition_log = { @@ -1711,19 +1751,17 @@ def test_exact__condition_value_invalid(self): mock_log = getattr(self.mock_client_logger, log_level) mock_log.assert_called_once_with( - ( - 'Audience condition "{}" has an unsupported condition value. You may need to upgrade to a ' - 'newer release of the Optimizely SDK.' - ).format(json.dumps(expected_condition_log)) + f'Audience condition "{json.dumps(expected_condition_log)}" has an unsupported condition value. ' + 'You may need to upgrade to a newer release of the Optimizely SDK.' ) def test_exact__condition_value_infinite(self): log_level = 'warning' exact_condition_list = [['favorite_constellation', float('inf'), 'custom_attribute', 'exact']] - user_attributes = {'favorite_constellation': 'Lacerta'} + self.user_context._user_attributes = {'favorite_constellation': 'Lacerta'} evaluator = condition_helper.CustomAttributeConditionEvaluator( - exact_condition_list, user_attributes, self.mock_client_logger + exact_condition_list, self.user_context, self.mock_client_logger ) expected_condition_log = { @@ -1737,19 +1775,17 @@ def test_exact__condition_value_infinite(self): mock_log = getattr(self.mock_client_logger, log_level) mock_log.assert_called_once_with( - ( - 'Audience condition "{}" has an unsupported condition value. You may need to upgrade to a ' - 'newer release of the Optimizely SDK.' - ).format(json.dumps(expected_condition_log)) + f'Audience condition "{json.dumps(expected_condition_log)}" has an unsupported condition value. ' + 'You may need to upgrade to a newer release of the Optimizely SDK.' ) def test_greater_than__condition_value_invalid(self): log_level = 'warning' gt_condition_list = [['meters_travelled', True, 'custom_attribute', 'gt']] - user_attributes = {'meters_travelled': 48} + self.user_context._user_attributes = {'meters_travelled': 48} evaluator = condition_helper.CustomAttributeConditionEvaluator( - gt_condition_list, user_attributes, self.mock_client_logger + gt_condition_list, self.user_context, self.mock_client_logger ) expected_condition_log = { @@ -1763,19 +1799,17 @@ def test_greater_than__condition_value_invalid(self): mock_log = getattr(self.mock_client_logger, log_level) mock_log.assert_called_once_with( - ( - 'Audience condition "{}" has an unsupported condition value. You may need to upgrade to a ' - 'newer release of the Optimizely SDK.' - ).format(json.dumps(expected_condition_log)) + f'Audience condition "{json.dumps(expected_condition_log)}" has an unsupported condition value. ' + 'You may need to upgrade to a newer release of the Optimizely SDK.' ) def test_less_than__condition_value_invalid(self): log_level = 'warning' gt_condition_list = [['meters_travelled', float('nan'), 'custom_attribute', 'lt']] - user_attributes = {'meters_travelled': 48} + self.user_context._user_attributes = {'meters_travelled': 48} evaluator = condition_helper.CustomAttributeConditionEvaluator( - gt_condition_list, user_attributes, self.mock_client_logger + gt_condition_list, self.user_context, self.mock_client_logger ) expected_condition_log = { @@ -1789,19 +1823,17 @@ def test_less_than__condition_value_invalid(self): mock_log = getattr(self.mock_client_logger, log_level) mock_log.assert_called_once_with( - ( - 'Audience condition "{}" has an unsupported condition value. You may need to upgrade to a ' - 'newer release of the Optimizely SDK.' - ).format(json.dumps(expected_condition_log)) + f'Audience condition "{json.dumps(expected_condition_log)}" has an unsupported condition value. ' + 'You may need to upgrade to a newer release of the Optimizely SDK.' ) def test_substring__condition_value_invalid(self): log_level = 'warning' substring_condition_list = [['headline_text', False, 'custom_attribute', 'substring']] - user_attributes = {'headline_text': 'breaking news'} + self.user_context._user_attributes = {'headline_text': 'breaking news'} evaluator = condition_helper.CustomAttributeConditionEvaluator( - substring_condition_list, user_attributes, self.mock_client_logger + substring_condition_list, self.user_context, self.mock_client_logger ) expected_condition_log = { @@ -1815,8 +1847,30 @@ def test_substring__condition_value_invalid(self): mock_log = getattr(self.mock_client_logger, log_level) mock_log.assert_called_once_with( - ( - 'Audience condition "{}" has an unsupported condition value. You may need to upgrade to a ' - 'newer release of the Optimizely SDK.' - ).format(json.dumps(expected_condition_log)) + f'Audience condition "{json.dumps(expected_condition_log)}" has an unsupported condition value. ' + 'You may need to upgrade to a newer release of the Optimizely SDK.' + ) + + def test_qualified__condition_value_invalid(self): + log_level = 'warning' + qualified_condition_list = [['odp.audiences', False, 'third_party_dimension', 'qualified']] + self.user_context.qualified_segments = ['segment1'] + + evaluator = condition_helper.CustomAttributeConditionEvaluator( + qualified_condition_list, self.user_context, self.mock_client_logger + ) + + expected_condition_log = { + "name": 'odp.audiences', + "value": False, + "type": 'third_party_dimension', + "match": 'qualified', + } + + self.assertIsNone(evaluator.evaluate(0)) + + mock_log = getattr(self.mock_client_logger, log_level) + mock_log.assert_called_once_with( + f'Audience condition "{json.dumps(expected_condition_log)}" has an unsupported condition value. ' + 'You may need to upgrade to a newer release of the Optimizely SDK.' ) diff --git a/tests/helpers_tests/test_condition_tree_evaluator.py b/tests/helpers_tests/test_condition_tree_evaluator.py index 63405b90..233a895e 100644 --- a/tests/helpers_tests/test_condition_tree_evaluator.py +++ b/tests/helpers_tests/test_condition_tree_evaluator.py @@ -11,7 +11,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import mock +from unittest import mock from optimizely.helpers.condition_tree_evaluator import evaluate from tests import base diff --git a/tests/helpers_tests/test_event_tag_utils.py b/tests/helpers_tests/test_event_tag_utils.py index 9b081629..011e11f5 100644 --- a/tests/helpers_tests/test_event_tag_utils.py +++ b/tests/helpers_tests/test_event_tag_utils.py @@ -115,39 +115,39 @@ def test_get_numeric_metric__value_tag(self): self.assertIsNone(event_tag_utils.get_numeric_value({'value': None}, self.logger)) numeric_value_nan = event_tag_utils.get_numeric_value({'value': float('nan')}, self.logger) - self.assertIsNone(numeric_value_nan, 'nan numeric value is {}'.format(numeric_value_nan)) + self.assertIsNone(numeric_value_nan, f'nan numeric value is {numeric_value_nan}') numeric_value_array = event_tag_utils.get_numeric_value({'value': []}, self.logger) - self.assertIsNone(numeric_value_array, 'Array numeric value is {}'.format(numeric_value_array)) + self.assertIsNone(numeric_value_array, f'Array numeric value is {numeric_value_array}') numeric_value_dict = event_tag_utils.get_numeric_value({'value': []}, self.logger) - self.assertIsNone(numeric_value_dict, 'Dict numeric value is {}'.format(numeric_value_dict)) + self.assertIsNone(numeric_value_dict, f'Dict numeric value is {numeric_value_dict}') numeric_value_none = event_tag_utils.get_numeric_value({'value': None}, self.logger) - self.assertIsNone(numeric_value_none, 'None numeric value is {}'.format(numeric_value_none)) + self.assertIsNone(numeric_value_none, f'None numeric value is {numeric_value_none}') numeric_value_invalid_literal = event_tag_utils.get_numeric_value( {'value': '1,234'}, self.logger ) self.assertIsNone( - numeric_value_invalid_literal, 'Invalid string literal value is {}'.format(numeric_value_invalid_literal), + numeric_value_invalid_literal, f'Invalid string literal value is {numeric_value_invalid_literal}', ) numeric_value_overflow = event_tag_utils.get_numeric_value( {'value': sys.float_info.max * 10}, self.logger ) self.assertIsNone( - numeric_value_overflow, 'Max numeric value is {}'.format(numeric_value_overflow), + numeric_value_overflow, f'Max numeric value is {numeric_value_overflow}', ) numeric_value_inf = event_tag_utils.get_numeric_value({'value': float('inf')}, self.logger) - self.assertIsNone(numeric_value_inf, 'Infinity numeric value is {}'.format(numeric_value_inf)) + self.assertIsNone(numeric_value_inf, f'Infinity numeric value is {numeric_value_inf}') numeric_value_neg_inf = event_tag_utils.get_numeric_value( {'value': float('-inf')}, self.logger ) self.assertIsNone( - numeric_value_neg_inf, 'Negative infinity numeric value is {}'.format(numeric_value_neg_inf), + numeric_value_neg_inf, f'Negative infinity numeric value is {numeric_value_neg_inf}', ) self.assertEqual( diff --git a/tests/helpers_tests/test_experiment.py b/tests/helpers_tests/test_experiment.py index 58f9b6d8..ae6a5047 100644 --- a/tests/helpers_tests/test_experiment.py +++ b/tests/helpers_tests/test_experiment.py @@ -11,7 +11,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import mock +from unittest import mock from tests import base from optimizely import entities diff --git a/tests/helpers_tests/test_validator.py b/tests/helpers_tests/test_validator.py index 2a97a538..6d9e3f20 100644 --- a/tests/helpers_tests/test_validator.py +++ b/tests/helpers_tests/test_validator.py @@ -12,7 +12,7 @@ # limitations under the License. import json -import mock +from unittest import mock from optimizely import config_manager from optimizely import error_handler @@ -34,7 +34,7 @@ def test_is_config_manager_valid__returns_true(self): def test_is_config_manager_valid__returns_false(self): """ Test that invalid config_manager returns False for invalid config manager implementation. """ - class CustomConfigManager(object): + class CustomConfigManager: def some_other_method(self): pass @@ -48,7 +48,7 @@ def test_is_event_processor_valid__returns_true(self): def test_is_event_processor_valid__returns_false(self): """ Test that invalid event_processor returns False. """ - class CustomEventProcessor(object): + class CustomEventProcessor: def some_other_method(self): pass @@ -59,6 +59,11 @@ def test_is_datafile_valid__returns_true(self): self.assertTrue(validator.is_datafile_valid(json.dumps(self.config_dict))) + def test_is_datafile_valid__returns_true_with_audience_segments(self): + """ Test that valid datafile with audience segments returns True. """ + + self.assertTrue(validator.is_datafile_valid(json.dumps(self.config_dict_with_audience_segments))) + def test_is_datafile_valid__returns_false(self): """ Test that invalid datafile returns False. """ @@ -72,7 +77,7 @@ def test_is_event_dispatcher_valid__returns_true(self): def test_is_event_dispatcher_valid__returns_false(self): """ Test that invalid event_dispatcher returns False. """ - class CustomEventDispatcher(object): + class CustomEventDispatcher: def some_other_method(self): pass @@ -86,7 +91,7 @@ def test_is_logger_valid__returns_true(self): def test_is_logger_valid__returns_false(self): """ Test that invalid logger returns False. """ - class CustomLogger(object): + class CustomLogger: def some_other_method(self): pass @@ -100,7 +105,7 @@ def test_is_error_handler_valid__returns_true(self): def test_is_error_handler_valid__returns_false(self): """ Test that invalid error_handler returns False. """ - class CustomErrorHandler(object): + class CustomErrorHandler: def some_other_method(self): pass diff --git a/tests/test_bucketing.py b/tests/test_bucketing.py index e71ae8af..36adce75 100644 --- a/tests/test_bucketing.py +++ b/tests/test_bucketing.py @@ -12,7 +12,7 @@ # limitations under the License. import json -import mock +from unittest import mock import random from optimizely import bucketer diff --git a/tests/test_cmab_client.py b/tests/test_cmab_client.py new file mode 100644 index 00000000..3aac5fd9 --- /dev/null +++ b/tests/test_cmab_client.py @@ -0,0 +1,247 @@ +# Copyright 2025, Optimizely +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import unittest +import json +from unittest.mock import MagicMock, patch, call +from optimizely.cmab.cmab_client import DefaultCmabClient, CmabRetryConfig +from requests.exceptions import RequestException +from optimizely.helpers.enums import Errors +from optimizely.exceptions import CmabFetchError, CmabInvalidResponseError + + +class TestDefaultCmabClient(unittest.TestCase): + def setUp(self): + self.mock_http_client = MagicMock() + self.mock_logger = MagicMock() + self.retry_config = CmabRetryConfig(max_retries=3, initial_backoff=0.01, max_backoff=1, backoff_multiplier=2) + self.client = DefaultCmabClient( + http_client=self.mock_http_client, + logger=self.mock_logger, + retry_config=None + ) + self.rule_id = 'test_rule' + self.user_id = 'user123' + self.attributes = {'attr1': 'value1', 'attr2': 'value2'} + self.cmab_uuid = 'uuid-1234' + self.expected_url = f"https://prediction.cmab.optimizely.com/predict/{self.rule_id}" + self.expected_body = { + "instances": [{ + "visitorId": self.user_id, + "experimentId": self.rule_id, + "attributes": [ + {"id": "attr1", "value": "value1", "type": "custom_attribute"}, + {"id": "attr2", "value": "value2", "type": "custom_attribute"} + ], + "cmabUUID": self.cmab_uuid, + }] + } + self.expected_headers = {'Content-Type': 'application/json'} + + def test_fetch_decision_returns_success_no_retry(self): + mock_response = MagicMock() + mock_response.status_code = 200 + mock_response.json.return_value = { + 'predictions': [{'variation_id': 'abc123'}] + } + self.mock_http_client.post.return_value = mock_response + result = self.client.fetch_decision(self.rule_id, self.user_id, self.attributes, self.cmab_uuid) + self.assertEqual(result, 'abc123') + self.mock_http_client.post.assert_called_once_with( + self.expected_url, + data=json.dumps(self.expected_body), + headers=self.expected_headers, + timeout=10.0 + ) + + def test_fetch_decision_returns_http_exception_no_retry(self): + self.mock_http_client.post.side_effect = RequestException('Connection error') + + with self.assertRaises(CmabFetchError) as context: + self.client.fetch_decision(self.rule_id, self.user_id, self.attributes, self.cmab_uuid) + + self.mock_http_client.post.assert_called_once() + self.mock_logger.error.assert_called_with(Errors.CMAB_FETCH_FAILED.format('Connection error')) + self.assertIn('Connection error', str(context.exception)) + + def test_fetch_decision_returns_non_2xx_status_no_retry(self): + mock_response = MagicMock() + mock_response.status_code = 500 + self.mock_http_client.post.return_value = mock_response + + with self.assertRaises(CmabFetchError) as context: + self.client.fetch_decision(self.rule_id, self.user_id, self.attributes, self.cmab_uuid) + + self.mock_http_client.post.assert_called_once_with( + self.expected_url, + data=json.dumps(self.expected_body), + headers=self.expected_headers, + timeout=10.0 + ) + self.mock_logger.error.assert_called_with(Errors.CMAB_FETCH_FAILED.format(str(mock_response.status_code))) + self.assertIn(str(mock_response.status_code), str(context.exception)) + + def test_fetch_decision_returns_invalid_json_no_retry(self): + mock_response = MagicMock() + mock_response.status_code = 200 + mock_response.json.side_effect = json.JSONDecodeError("Expecting value", "", 0) + self.mock_http_client.post.return_value = mock_response + + with self.assertRaises(CmabInvalidResponseError) as context: + self.client.fetch_decision(self.rule_id, self.user_id, self.attributes, self.cmab_uuid) + + self.mock_http_client.post.assert_called_once_with( + self.expected_url, + data=json.dumps(self.expected_body), + headers=self.expected_headers, + timeout=10.0 + ) + self.mock_logger.error.assert_called_with(Errors.INVALID_CMAB_FETCH_RESPONSE) + self.assertIn(Errors.INVALID_CMAB_FETCH_RESPONSE, str(context.exception)) + + def test_fetch_decision_returns_invalid_response_structure_no_retry(self): + mock_response = MagicMock() + mock_response.status_code = 200 + mock_response.json.return_value = {'no_predictions': []} + self.mock_http_client.post.return_value = mock_response + + with self.assertRaises(CmabInvalidResponseError) as context: + self.client.fetch_decision(self.rule_id, self.user_id, self.attributes, self.cmab_uuid) + + self.mock_http_client.post.assert_called_once_with( + self.expected_url, + data=json.dumps(self.expected_body), + headers=self.expected_headers, + timeout=10.0 + ) + self.mock_logger.error.assert_called_with(Errors.INVALID_CMAB_FETCH_RESPONSE) + self.assertIn(Errors.INVALID_CMAB_FETCH_RESPONSE, str(context.exception)) + + @patch('time.sleep', return_value=None) + def test_fetch_decision_returns_success_with_retry_on_first_try(self, mock_sleep): + # Create client with retry + client_with_retry = DefaultCmabClient( + http_client=self.mock_http_client, + logger=self.mock_logger, + retry_config=self.retry_config + ) + + # Mock successful response + mock_response = MagicMock() + mock_response.status_code = 200 + mock_response.json.return_value = { + 'predictions': [{'variation_id': 'abc123'}] + } + self.mock_http_client.post.return_value = mock_response + + result = client_with_retry.fetch_decision(self.rule_id, self.user_id, self.attributes, self.cmab_uuid) + + # Verify result and request parameters + self.assertEqual(result, 'abc123') + self.mock_http_client.post.assert_called_once_with( + self.expected_url, + data=json.dumps(self.expected_body), + headers=self.expected_headers, + timeout=10.0 + ) + self.assertEqual(self.mock_http_client.post.call_count, 1) + mock_sleep.assert_not_called() + + @patch('time.sleep', return_value=None) + def test_fetch_decision_returns_success_with_retry_on_third_try(self, mock_sleep): + client_with_retry = DefaultCmabClient( + http_client=self.mock_http_client, + logger=self.mock_logger, + retry_config=self.retry_config + ) + + # Create failure and success responses + failure_response = MagicMock() + failure_response.status_code = 500 + + success_response = MagicMock() + success_response.status_code = 200 + success_response.json.return_value = { + 'predictions': [{'variation_id': 'xyz456'}] + } + + # First two calls fail, third succeeds + self.mock_http_client.post.side_effect = [ + failure_response, + failure_response, + success_response + ] + + result = client_with_retry.fetch_decision(self.rule_id, self.user_id, self.attributes, self.cmab_uuid) + + self.assertEqual(result, 'xyz456') + self.assertEqual(self.mock_http_client.post.call_count, 3) + + # Verify all HTTP calls used correct parameters + self.mock_http_client.post.assert_called_with( + self.expected_url, + data=json.dumps(self.expected_body), + headers=self.expected_headers, + timeout=10.0 + ) + + # Verify retry logging + self.mock_logger.info.assert_has_calls([ + call("Retrying CMAB request (attempt: 1) after 0.01 seconds..."), + call("Retrying CMAB request (attempt: 2) after 0.02 seconds...") + ]) + + # Verify sleep was called with correct backoff times + mock_sleep.assert_has_calls([ + call(0.01), + call(0.02) + ]) + + @patch('time.sleep', return_value=None) + def test_fetch_decision_exhausts_all_retry_attempts(self, mock_sleep): + client_with_retry = DefaultCmabClient( + http_client=self.mock_http_client, + logger=self.mock_logger, + retry_config=self.retry_config + ) + + # Create failure response + failure_response = MagicMock() + failure_response.status_code = 500 + + # All attempts fail + self.mock_http_client.post.return_value = failure_response + + with self.assertRaises(CmabFetchError): + client_with_retry.fetch_decision(self.rule_id, self.user_id, self.attributes, self.cmab_uuid) + + # Verify all attempts were made (1 initial + 3 retries) + self.assertEqual(self.mock_http_client.post.call_count, 4) + + # Verify retry logging + self.mock_logger.info.assert_has_calls([ + call("Retrying CMAB request (attempt: 1) after 0.01 seconds..."), + call("Retrying CMAB request (attempt: 2) after 0.02 seconds..."), + call("Retrying CMAB request (attempt: 3) after 0.08 seconds...") + ]) + + # Verify sleep was called for each retry + mock_sleep.assert_has_calls([ + call(0.01), + call(0.02), + call(0.08) + ]) + + # Verify final error + self.mock_logger.error.assert_called_with( + Errors.CMAB_FETCH_FAILED.format('Exhausted all retries for CMAB request.') + ) diff --git a/tests/test_cmab_service.py b/tests/test_cmab_service.py new file mode 100644 index 00000000..0b3c593a --- /dev/null +++ b/tests/test_cmab_service.py @@ -0,0 +1,187 @@ +# Copyright 2025, Optimizely +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import unittest +from unittest.mock import MagicMock +from optimizely.cmab.cmab_service import DefaultCmabService +from optimizely.optimizely_user_context import OptimizelyUserContext +from optimizely.decision.optimizely_decide_option import OptimizelyDecideOption +from optimizely.odp.lru_cache import LRUCache +from optimizely.cmab.cmab_client import DefaultCmabClient +from optimizely.project_config import ProjectConfig +from optimizely.entities import Attribute + + +class TestDefaultCmabService(unittest.TestCase): + def setUp(self): + self.mock_cmab_cache = MagicMock(spec=LRUCache) + self.mock_cmab_client = MagicMock(spec=DefaultCmabClient) + self.mock_logger = MagicMock() + + self.cmab_service = DefaultCmabService( + cmab_cache=self.mock_cmab_cache, + cmab_client=self.mock_cmab_client, + logger=self.mock_logger + ) + + self.mock_project_config = MagicMock(spec=ProjectConfig) + self.mock_user_context = MagicMock(spec=OptimizelyUserContext) + self.mock_user_context.user_id = 'user123' + self.mock_user_context.get_user_attributes.return_value = {'age': 25, 'location': 'USA'} + + # Setup mock experiment and attribute mapping + self.mock_project_config.experiment_id_map = { + 'exp1': MagicMock(cmab={'attributeIds': ['66', '77']}) + } + attr1 = Attribute(id="66", key="age") + attr2 = Attribute(id="77", key="location") + self.mock_project_config.attribute_id_map = { + "66": attr1, + "77": attr2 + } + + def test_returns_decision_from_cache_when_valid(self): + expected_key = self.cmab_service._get_cache_key("user123", "exp1") + expected_attributes = {"age": 25, "location": "USA"} + expected_hash = self.cmab_service._hash_attributes(expected_attributes) + + self.mock_cmab_cache.lookup.return_value = { + "attributes_hash": expected_hash, + "variation_id": "varA", + "cmab_uuid": "uuid-123" + } + + decision = self.cmab_service.get_decision( + self.mock_project_config, self.mock_user_context, "exp1", [] + ) + + self.mock_cmab_cache.lookup.assert_called_once_with(expected_key) + self.assertEqual(decision["variation_id"], "varA") + self.assertEqual(decision["cmab_uuid"], "uuid-123") + + def test_ignores_cache_when_option_given(self): + self.mock_cmab_client.fetch_decision.return_value = "varB" + expected_attributes = {"age": 25, "location": "USA"} + + decision = self.cmab_service.get_decision( + self.mock_project_config, + self.mock_user_context, + "exp1", + [OptimizelyDecideOption.IGNORE_CMAB_CACHE] + ) + + self.assertEqual(decision["variation_id"], "varB") + self.assertIn('cmab_uuid', decision) + self.mock_cmab_client.fetch_decision.assert_called_once_with( + "exp1", + self.mock_user_context.user_id, + expected_attributes, + decision["cmab_uuid"] + ) + + def test_invalidates_user_cache_when_option_given(self): + self.mock_cmab_client.fetch_decision.return_value = "varC" + self.mock_cmab_cache.lookup.return_value = None + self.cmab_service.get_decision( + self.mock_project_config, + self.mock_user_context, + "exp1", + [OptimizelyDecideOption.INVALIDATE_USER_CMAB_CACHE] + ) + + key = self.cmab_service._get_cache_key("user123", "exp1") + self.mock_cmab_cache.remove.assert_called_with(key) + self.mock_cmab_cache.remove.assert_called_once() + + def test_resets_cache_when_option_given(self): + self.mock_cmab_client.fetch_decision.return_value = "varD" + + decision = self.cmab_service.get_decision( + self.mock_project_config, + self.mock_user_context, + "exp1", + [OptimizelyDecideOption.RESET_CMAB_CACHE] + ) + + self.mock_cmab_cache.reset.assert_called_once() + self.assertEqual(decision["variation_id"], "varD") + self.assertIn('cmab_uuid', decision) + + def test_new_decision_when_hash_changes(self): + self.mock_cmab_cache.lookup.return_value = { + "attributes_hash": "old_hash", + "variation_id": "varA", + "cmab_uuid": "uuid-123" + } + self.mock_cmab_client.fetch_decision.return_value = "varE" + + expected_attribute = {"age": 25, "location": "USA"} + expected_hash = self.cmab_service._hash_attributes(expected_attribute) + expected_key = self.cmab_service._get_cache_key("user123", "exp1") + + decision = self.cmab_service.get_decision(self.mock_project_config, self.mock_user_context, "exp1", []) + self.mock_cmab_cache.remove.assert_called_once_with(expected_key) + self.mock_cmab_cache.save.assert_called_once_with( + expected_key, + { + "cmab_uuid": decision["cmab_uuid"], + "variation_id": decision["variation_id"], + "attributes_hash": expected_hash + } + ) + self.assertEqual(decision["variation_id"], "varE") + self.mock_cmab_client.fetch_decision.assert_called_once_with( + "exp1", + self.mock_user_context.user_id, + expected_attribute, + decision["cmab_uuid"] + ) + + def test_filter_attributes_returns_correct_subset(self): + filtered = self.cmab_service._filter_attributes(self.mock_project_config, self.mock_user_context, "exp1") + self.assertEqual(filtered["age"], 25) + self.assertEqual(filtered["location"], "USA") + + def test_filter_attributes_empty_when_no_cmab(self): + self.mock_project_config.experiment_id_map["exp1"].cmab = None + filtered = self.cmab_service._filter_attributes(self.mock_project_config, self.mock_user_context, "exp1") + self.assertEqual(filtered, {}) + + def test_hash_attributes_produces_stable_output(self): + attrs = {"b": 2, "a": 1} + hash1 = self.cmab_service._hash_attributes(attrs) + hash2 = self.cmab_service._hash_attributes({"a": 1, "b": 2}) + self.assertEqual(hash1, hash2) + + def test_only_cmab_attributes_passed_to_client(self): + self.mock_user_context.get_user_attributes.return_value = { + 'age': 25, + 'location': 'USA', + 'extra_attr': 'value', # This shouldn't be passed to CMAB + 'another_extra': 123 # This shouldn't be passed to CMAB + } + self.mock_cmab_client.fetch_decision.return_value = "varF" + + decision = self.cmab_service.get_decision( + self.mock_project_config, + self.mock_user_context, + "exp1", + [OptimizelyDecideOption.IGNORE_CMAB_CACHE] + ) + + # Verify only age and location are passed (attributes configured in setUp) + self.mock_cmab_client.fetch_decision.assert_called_once_with( + "exp1", + self.mock_user_context.user_id, + {"age": 25, "location": "USA"}, + decision["cmab_uuid"] + ) diff --git a/tests/test_config.py b/tests/test_config.py index 96450368..9ec5c761 100644 --- a/tests/test_config.py +++ b/tests/test_config.py @@ -12,7 +12,8 @@ # limitations under the License. import json -import mock +from unittest import mock +import copy from optimizely import entities from optimizely import error_handler @@ -20,6 +21,7 @@ from optimizely import logger from optimizely import optimizely from optimizely.helpers import enums +from optimizely.project_config import ProjectConfig from . import base @@ -152,12 +154,30 @@ def test_init(self): self.assertEqual(expected_variation_key_map, self.project_config.variation_key_map) self.assertEqual(expected_variation_id_map, self.project_config.variation_id_map) + def test_cmab_field_population(self): + """ Test that the cmab field is populated correctly in experiments.""" + + # Deep copy existing datafile and add cmab config to the first experiment + config_dict = copy.deepcopy(self.config_dict_with_multiple_experiments) + config_dict['experiments'][0]['cmab'] = {'attributeIds': ['808797688', '808797689'], 'trafficAllocation': 4000} + config_dict['experiments'][0]['trafficAllocation'] = [] + + opt_obj = optimizely.Optimizely(json.dumps(config_dict)) + project_config = opt_obj.config_manager.get_config() + + experiment = project_config.get_experiment_from_key('test_experiment') + self.assertEqual(experiment.cmab, {'attributeIds': ['808797688', '808797689'], 'trafficAllocation': 4000}) + + experiment_2 = project_config.get_experiment_from_key('test_experiment_2') + self.assertIsNone(experiment_2.cmab) + def test_init__with_v4_datafile(self): """ Test that on creating object, properties are initiated correctly for version 4 datafile. """ # Adding some additional fields like live variables and IP anonymization config_dict = { 'revision': '42', + 'sdkKey': 'test', 'version': '4', 'anonymizeIP': False, 'botFiltering': True, @@ -1011,6 +1031,78 @@ def test_to_datafile(self): self.assertEqual(expected_datafile, actual_datafile) + def test_to_datafile_from_bytes(self): + """ Test that to_datafile returns the expected datafile when given bytes. """ + + expected_datafile = json.dumps(self.config_dict_with_features) + bytes_datafile = bytes(expected_datafile, 'utf-8') + + opt_obj = optimizely.Optimizely(bytes_datafile) + project_config = opt_obj.config_manager.get_config() + + actual_datafile = project_config.to_datafile() + + self.assertEqual(expected_datafile, actual_datafile) + + def test_datafile_with_integrations(self): + """ Test to confirm that integration conversion works and has expected output """ + opt_obj = optimizely.Optimizely( + json.dumps(self.config_dict_with_audience_segments) + ) + project_config = opt_obj.config_manager.get_config() + self.assertIsInstance(project_config, ProjectConfig) + + for integration in project_config.integration_key_map.values(): + self.assertIsInstance(integration, entities.Integration) + + integrations = self.config_dict_with_audience_segments['integrations'] + self.assertGreater(len(integrations), 0) + self.assertEqual(len(project_config.integrations), len(integrations)) + + integration = integrations[0] + self.assertEqual(project_config.host_for_odp, integration['host']) + self.assertEqual(project_config.public_key_for_odp, integration['publicKey']) + + self.assertEqual(sorted(project_config.all_segments), ['odp-segment-1', 'odp-segment-2', 'odp-segment-3']) + + def test_datafile_with_no_integrations(self): + """ Test to confirm that datafile with empty integrations still works """ + config_dict_with_audience_segments = copy.deepcopy(self.config_dict_with_audience_segments) + config_dict_with_audience_segments['integrations'] = [] + opt_obj = optimizely.Optimizely( + json.dumps(config_dict_with_audience_segments) + ) + + project_config = opt_obj.config_manager.get_config() + + self.assertIsInstance(project_config, ProjectConfig) + self.assertEqual(len(project_config.integrations), 0) + + def test_datafile_with_integrations_missing_key(self): + """ Test to confirm that datafile without key fails""" + config_dict_with_audience_segments = copy.deepcopy(self.config_dict_with_audience_segments) + del config_dict_with_audience_segments['integrations'][0]['key'] + opt_obj = optimizely.Optimizely( + json.dumps(config_dict_with_audience_segments) + ) + + project_config = opt_obj.config_manager.get_config() + + self.assertIsNone(project_config) + + def test_datafile_with_integrations_only_key(self): + """ Test to confirm that datafile with integrations and only key field still work """ + config_dict_with_audience_segments = copy.deepcopy(self.config_dict_with_audience_segments) + config_dict_with_audience_segments['integrations'].clear() + config_dict_with_audience_segments['integrations'].append({'key': '123'}) + opt_obj = optimizely.Optimizely( + json.dumps(config_dict_with_audience_segments) + ) + + project_config = opt_obj.config_manager.get_config() + + self.assertIsInstance(project_config, ProjectConfig) + class ConfigLoggingTest(base.BaseTest): def setUp(self): @@ -1226,6 +1318,18 @@ def test_get_variation_from_id_by_experiment_id(self): self.assertIsInstance(variation, entities.Variation) + def test_get_variation_from_id_by_experiment_id_missing(self): + + opt_obj = optimizely.Optimizely(json.dumps(self.config_dict)) + project_config = opt_obj.config_manager.get_config() + + experiment_id = '111127' + variation_id = 'missing' + + variation = project_config.get_variation_from_id_by_experiment_id(experiment_id, variation_id) + + self.assertIsNone(variation) + def test_get_variation_from_key_by_experiment_id(self): opt_obj = optimizely.Optimizely(json.dumps(self.config_dict)) @@ -1237,3 +1341,15 @@ def test_get_variation_from_key_by_experiment_id(self): variation = project_config.get_variation_from_key_by_experiment_id(experiment_id, variation_key) self.assertIsInstance(variation, entities.Variation) + + def test_get_variation_from_key_by_experiment_id_missing(self): + + opt_obj = optimizely.Optimizely(json.dumps(self.config_dict)) + project_config = opt_obj.config_manager.get_config() + + experiment_id = '111127' + variation_key = 'missing' + + variation = project_config.get_variation_from_key_by_experiment_id(experiment_id, variation_key) + + self.assertIsNone(variation) diff --git a/tests/test_config_manager.py b/tests/test_config_manager.py index 272e2f92..56674381 100644 --- a/tests/test_config_manager.py +++ b/tests/test_config_manager.py @@ -1,4 +1,4 @@ -# Copyright 2019-2021, Optimizely +# Copyright 2019-2022, Optimizely # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -12,7 +12,7 @@ # limitations under the License. import json -import mock +from unittest import mock import requests import time @@ -29,7 +29,7 @@ class StaticConfigManagerTest(base.BaseTest): def test_init__invalid_logger_fails(self): """ Test that initialization fails if logger is invalid. """ - class InvalidLogger(object): + class InvalidLogger: pass with self.assertRaisesRegex( @@ -40,7 +40,7 @@ class InvalidLogger(object): def test_init__invalid_error_handler_fails(self): """ Test that initialization fails if error_handler is invalid. """ - class InvalidErrorHandler(object): + class InvalidErrorHandler: pass with self.assertRaisesRegex( @@ -51,7 +51,7 @@ class InvalidErrorHandler(object): def test_init__invalid_notification_center_fails(self): """ Test that initialization fails if notification_center is invalid. """ - class InvalidNotificationCenter(object): + class InvalidNotificationCenter: pass with self.assertRaisesRegex( @@ -218,16 +218,16 @@ def test_get_config_blocks(self): self.assertEqual(1, round(end_time - start_time)) -@mock.patch('requests.get') +@mock.patch('requests.Session.get') class PollingConfigManagerTest(base.BaseTest): - def test_init__no_sdk_key_no_url__fails(self, _): - """ Test that initialization fails if there is no sdk_key or url provided. """ + def test_init__no_sdk_key_no_datafile__fails(self, _): + """ Test that initialization fails if there is no sdk_key or datafile provided. """ self.assertRaisesRegex( optimizely_exceptions.InvalidInputException, - 'Must provide at least one of sdk_key or url.', + enums.Errors.MISSING_SDK_KEY, config_manager.PollingConfigManager, sdk_key=None, - url=None, + datafile=None, ) def test_get_datafile_url__no_sdk_key_no_url_raises(self, _): @@ -257,7 +257,7 @@ def test_get_datafile_url__invalid_url_template_raises(self, _): test_url_template = 'invalid_url_template_without_sdk_key_field_{key}' self.assertRaisesRegex( optimizely_exceptions.InvalidInputException, - 'Invalid url_template {} provided'.format(test_url_template), + f'Invalid url_template {test_url_template} provided', config_manager.PollingConfigManager.get_datafile_url, 'optly_datafile_key', None, @@ -294,8 +294,8 @@ def test_get_datafile_url__sdk_key_and_url_and_template_provided(self, _): def test_set_update_interval(self, _): """ Test set_update_interval with different inputs. """ - with mock.patch('optimizely.config_manager.PollingConfigManager.fetch_datafile'): - project_config_manager = config_manager.PollingConfigManager(sdk_key='some_key') + + project_config_manager = config_manager.PollingConfigManager(sdk_key='some_key') # Assert that if invalid update_interval is set, then exception is raised. with self.assertRaisesRegex( @@ -319,10 +319,12 @@ def test_set_update_interval(self, _): project_config_manager.set_update_interval(42) self.assertEqual(42, project_config_manager.update_interval) + project_config_manager.stop() + def test_set_blocking_timeout(self, _): """ Test set_blocking_timeout with different inputs. """ - with mock.patch('optimizely.config_manager.PollingConfigManager.fetch_datafile'): - project_config_manager = config_manager.PollingConfigManager(sdk_key='some_key') + + project_config_manager = config_manager.PollingConfigManager(sdk_key='some_key') # Assert that if invalid blocking_timeout is set, then exception is raised. with self.assertRaisesRegex( @@ -350,10 +352,12 @@ def test_set_blocking_timeout(self, _): project_config_manager.set_blocking_timeout(5) self.assertEqual(5, project_config_manager.blocking_timeout) + project_config_manager.stop() + def test_set_last_modified(self, _): """ Test that set_last_modified sets last_modified field based on header. """ - with mock.patch('optimizely.config_manager.PollingConfigManager.fetch_datafile'): - project_config_manager = config_manager.PollingConfigManager(sdk_key='some_key') + + project_config_manager = config_manager.PollingConfigManager(sdk_key='some_key') last_modified_time = 'Test Last Modified Time' test_response_headers = { @@ -362,12 +366,12 @@ def test_set_last_modified(self, _): } project_config_manager.set_last_modified(test_response_headers) self.assertEqual(last_modified_time, project_config_manager.last_modified) + project_config_manager.stop() def test_fetch_datafile(self, _): """ Test that fetch_datafile sets config and last_modified based on response. """ sdk_key = 'some_key' - with mock.patch('optimizely.config_manager.PollingConfigManager.fetch_datafile'): - project_config_manager = config_manager.PollingConfigManager(sdk_key=sdk_key) + expected_datafile_url = enums.ConfigManager.DATAFILE_URL_TEMPLATE.format(sdk_key=sdk_key) test_headers = {'Last-Modified': 'New Time'} test_datafile = json.dumps(self.config_dict_with_features) @@ -375,15 +379,23 @@ def test_fetch_datafile(self, _): test_response.status_code = 200 test_response.headers = test_headers test_response._content = test_datafile - with mock.patch('requests.get', return_value=test_response): - project_config_manager.fetch_datafile() + with mock.patch('requests.Session.get', return_value=test_response) as mock_request: + project_config_manager = config_manager.PollingConfigManager(sdk_key=sdk_key) + project_config_manager.stop() + mock_request.assert_called_once_with( + expected_datafile_url, + headers={}, + timeout=enums.ConfigManager.REQUEST_TIMEOUT + ) self.assertEqual(test_headers['Last-Modified'], project_config_manager.last_modified) self.assertIsInstance(project_config_manager.get_config(), project_config.ProjectConfig) # Call fetch_datafile again and assert that request to URL is with If-Modified-Since header. - with mock.patch('requests.get', return_value=test_response) as mock_requests: - project_config_manager.fetch_datafile() + with mock.patch('requests.Session.get', return_value=test_response) as mock_requests: + project_config_manager._initialize_thread() + project_config_manager.start() + project_config_manager.stop() mock_requests.assert_called_once_with( expected_datafile_url, @@ -392,18 +404,15 @@ def test_fetch_datafile(self, _): ) self.assertEqual(test_headers['Last-Modified'], project_config_manager.last_modified) self.assertIsInstance(project_config_manager.get_config(), project_config.ProjectConfig) - self.assertTrue(project_config_manager.is_running) def test_fetch_datafile__status_exception_raised(self, _): """ Test that config_manager keeps running if status code exception is raised when fetching datafile. """ - class MockExceptionResponse(object): + class MockExceptionResponse: def raise_for_status(self): raise requests.exceptions.RequestException('Error Error !!') sdk_key = 'some_key' mock_logger = mock.Mock() - with mock.patch('optimizely.config_manager.PollingConfigManager.fetch_datafile'): - project_config_manager = config_manager.PollingConfigManager(sdk_key=sdk_key, logger=mock_logger) expected_datafile_url = enums.ConfigManager.DATAFILE_URL_TEMPLATE.format(sdk_key=sdk_key) test_headers = {'Last-Modified': 'New Time'} test_datafile = json.dumps(self.config_dict_with_features) @@ -411,35 +420,41 @@ def raise_for_status(self): test_response.status_code = 200 test_response.headers = test_headers test_response._content = test_datafile - with mock.patch('requests.get', return_value=test_response): - project_config_manager.fetch_datafile() + with mock.patch('requests.Session.get', return_value=test_response) as mock_request: + project_config_manager = config_manager.PollingConfigManager(sdk_key=sdk_key, logger=mock_logger) + project_config_manager.stop() + + mock_request.assert_called_once_with( + expected_datafile_url, + headers={}, + timeout=enums.ConfigManager.REQUEST_TIMEOUT + ) self.assertEqual(test_headers['Last-Modified'], project_config_manager.last_modified) self.assertIsInstance(project_config_manager.get_config(), project_config.ProjectConfig) # Call fetch_datafile again, but raise exception this time - with mock.patch('requests.get', return_value=MockExceptionResponse()) as mock_requests: - project_config_manager.fetch_datafile() + with mock.patch('requests.Session.get', return_value=MockExceptionResponse()) as mock_requests: + project_config_manager._initialize_thread() + project_config_manager.start() + project_config_manager.stop() mock_requests.assert_called_once_with( expected_datafile_url, headers={'If-Modified-Since': test_headers['Last-Modified']}, timeout=enums.ConfigManager.REQUEST_TIMEOUT, ) - mock_logger.error.assert_called_once_with('Fetching datafile from {} failed. Error: Error Error !!'.format( - expected_datafile_url - )) + mock_logger.error.assert_called_once_with( + f'Fetching datafile from {expected_datafile_url} failed. Error: Error Error !!' + ) self.assertEqual(test_headers['Last-Modified'], project_config_manager.last_modified) self.assertIsInstance(project_config_manager.get_config(), project_config.ProjectConfig) - # Confirm that config manager keeps running - self.assertTrue(project_config_manager.is_running) def test_fetch_datafile__request_exception_raised(self, _): """ Test that config_manager keeps running if a request exception is raised when fetching datafile. """ sdk_key = 'some_key' mock_logger = mock.Mock() - with mock.patch('optimizely.config_manager.PollingConfigManager.fetch_datafile'): - project_config_manager = config_manager.PollingConfigManager(sdk_key=sdk_key, logger=mock_logger) + expected_datafile_url = enums.ConfigManager.DATAFILE_URL_TEMPLATE.format(sdk_key=sdk_key) test_headers = {'Last-Modified': 'New Time'} test_datafile = json.dumps(self.config_dict_with_features) @@ -447,31 +462,64 @@ def test_fetch_datafile__request_exception_raised(self, _): test_response.status_code = 200 test_response.headers = test_headers test_response._content = test_datafile - with mock.patch('requests.get', return_value=test_response): - project_config_manager.fetch_datafile() + with mock.patch('requests.Session.get', return_value=test_response) as mock_request: + project_config_manager = config_manager.PollingConfigManager(sdk_key=sdk_key, logger=mock_logger) + project_config_manager.stop() + mock_request.assert_called_once_with( + expected_datafile_url, + headers={}, + timeout=enums.ConfigManager.REQUEST_TIMEOUT + ) self.assertEqual(test_headers['Last-Modified'], project_config_manager.last_modified) self.assertIsInstance(project_config_manager.get_config(), project_config.ProjectConfig) # Call fetch_datafile again, but raise exception this time with mock.patch( - 'requests.get', + 'requests.Session.get', side_effect=requests.exceptions.RequestException('Error Error !!'), ) as mock_requests: - project_config_manager.fetch_datafile() + project_config_manager._initialize_thread() + project_config_manager.start() + project_config_manager.stop() mock_requests.assert_called_once_with( expected_datafile_url, headers={'If-Modified-Since': test_headers['Last-Modified']}, timeout=enums.ConfigManager.REQUEST_TIMEOUT, ) - mock_logger.error.assert_called_once_with('Fetching datafile from {} failed. Error: Error Error !!'.format( - expected_datafile_url - )) + mock_logger.error.assert_called_once_with( + f'Fetching datafile from {expected_datafile_url} failed. Error: Error Error !!' + ) self.assertEqual(test_headers['Last-Modified'], project_config_manager.last_modified) self.assertIsInstance(project_config_manager.get_config(), project_config.ProjectConfig) - # Confirm that config manager keeps running - self.assertTrue(project_config_manager.is_running) + + def test_fetch_datafile__exception_polling_thread_failed(self, _): + """ Test that exception is raised when polling thread stops. """ + sdk_key = 'some_key' + mock_logger = mock.Mock() + + test_headers = {'Last-Modified': 'New Time'} + test_datafile = json.dumps(self.config_dict_with_features) + test_response = requests.Response() + test_response.status_code = 200 + test_response.headers = test_headers + test_response._content = test_datafile + + with mock.patch('requests.Session.get', return_value=test_response): + project_config_manager = config_manager.PollingConfigManager(sdk_key=sdk_key, + logger=mock_logger, + update_interval=12345678912345) + + project_config_manager.stop() + + # verify the error log message + log_messages = [args[0] for args, _ in mock_logger.error.call_args_list] + for message in log_messages: + print(message) + if "Thread for background datafile polling failed. " \ + "Error: timestamp too large to convert to C PyTime_t" not in message: + assert False def test_is_running(self, _): """ Test that polling thread is running after instance of PollingConfigManager is created. """ @@ -479,8 +527,10 @@ def test_is_running(self, _): project_config_manager = config_manager.PollingConfigManager(sdk_key='some_key') self.assertTrue(project_config_manager.is_running) + project_config_manager.stop() -@mock.patch('requests.get') + +@mock.patch('requests.Session.get') class AuthDatafilePollingConfigManagerTest(base.BaseTest): def test_init__datafile_access_token_none__fails(self, _): """ Test that initialization fails if datafile_access_token is None. """ @@ -495,11 +545,12 @@ def test_set_datafile_access_token(self, _): """ Test that datafile_access_token is properly set as instance variable. """ datafile_access_token = 'some_token' sdk_key = 'some_key' - with mock.patch('optimizely.config_manager.AuthDatafilePollingConfigManager.fetch_datafile'): - project_config_manager = config_manager.AuthDatafilePollingConfigManager( - datafile_access_token=datafile_access_token, sdk_key=sdk_key) + + project_config_manager = config_manager.AuthDatafilePollingConfigManager( + datafile_access_token=datafile_access_token, sdk_key=sdk_key) self.assertEqual(datafile_access_token, project_config_manager.datafile_access_token) + project_config_manager.stop() def test_fetch_datafile(self, _): """ Test that fetch_datafile sets authorization header in request header and sets config based on response. """ @@ -519,14 +570,13 @@ def test_fetch_datafile(self, _): test_response._content = test_datafile # Call fetch_datafile and assert that request was sent with correct authorization header - with mock.patch('requests.get', + with mock.patch('requests.Session.get', return_value=test_response) as mock_request: project_config_manager.fetch_datafile() mock_request.assert_called_once_with( expected_datafile_url, - headers={'Authorization': 'Bearer {datafile_access_token}'.format( - datafile_access_token=datafile_access_token)}, + headers={'Authorization': f'Bearer {datafile_access_token}'}, timeout=enums.ConfigManager.REQUEST_TIMEOUT, ) @@ -538,9 +588,6 @@ def test_fetch_datafile__request_exception_raised(self, _): sdk_key = 'some_key' mock_logger = mock.Mock() - with mock.patch('optimizely.config_manager.AuthDatafilePollingConfigManager.fetch_datafile'): - project_config_manager = config_manager.AuthDatafilePollingConfigManager( - datafile_access_token=datafile_access_token, sdk_key=sdk_key, logger=mock_logger) expected_datafile_url = enums.ConfigManager.AUTHENTICATED_DATAFILE_URL_TEMPLATE.format(sdk_key=sdk_key) test_headers = {'Last-Modified': 'New Time'} test_datafile = json.dumps(self.config_dict_with_features) @@ -550,14 +597,17 @@ def test_fetch_datafile__request_exception_raised(self, _): test_response._content = test_datafile # Call fetch_datafile and assert that request was sent with correct authorization header - with mock.patch('requests.get', - return_value=test_response) as mock_request: - project_config_manager.fetch_datafile() + with mock.patch('requests.Session.get', return_value=test_response) as mock_request: + project_config_manager = config_manager.AuthDatafilePollingConfigManager( + datafile_access_token=datafile_access_token, + sdk_key=sdk_key, + logger=mock_logger + ) + project_config_manager.stop() mock_request.assert_called_once_with( expected_datafile_url, - headers={'Authorization': 'Bearer {datafile_access_token}'.format( - datafile_access_token=datafile_access_token)}, + headers={'Authorization': f'Bearer {datafile_access_token}'}, timeout=enums.ConfigManager.REQUEST_TIMEOUT, ) @@ -565,24 +615,23 @@ def test_fetch_datafile__request_exception_raised(self, _): # Call fetch_datafile again, but raise exception this time with mock.patch( - 'requests.get', + 'requests.Session.get', side_effect=requests.exceptions.RequestException('Error Error !!'), ) as mock_requests: - project_config_manager.fetch_datafile() + project_config_manager._initialize_thread() + project_config_manager.start() + project_config_manager.stop() mock_requests.assert_called_once_with( expected_datafile_url, headers={ 'If-Modified-Since': test_headers['Last-Modified'], - 'Authorization': 'Bearer {datafile_access_token}'.format( - datafile_access_token=datafile_access_token), + 'Authorization': f'Bearer {datafile_access_token}', }, timeout=enums.ConfigManager.REQUEST_TIMEOUT, ) - mock_logger.error.assert_called_once_with('Fetching datafile from {} failed. Error: Error Error !!'.format( - expected_datafile_url - )) + mock_logger.error.assert_called_once_with( + f'Fetching datafile from {expected_datafile_url} failed. Error: Error Error !!' + ) self.assertEqual(test_headers['Last-Modified'], project_config_manager.last_modified) self.assertIsInstance(project_config_manager.get_config(), project_config.ProjectConfig) - # Confirm that config manager keeps running - self.assertTrue(project_config_manager.is_running) diff --git a/tests/test_decision_service.py b/tests/test_decision_service.py index dc5bbfe7..6c5862a5 100644 --- a/tests/test_decision_service.py +++ b/tests/test_decision_service.py @@ -13,7 +13,7 @@ import json -import mock +from unittest import mock from optimizely import decision_service from optimizely import entities @@ -485,6 +485,8 @@ def test_get_variation__bucketing_id_provided(self): "random_key": "random_value", "$opt_bucketing_id": "user_bucket_value", }) + user_profile_service = user_profile.UserProfileService() + user_profile_tracker = user_profile.UserProfileTracker(user.user_id, user_profile_service) experiment = self.project_config.get_experiment_from_key("test_experiment") with mock.patch( "optimizely.decision_service.DecisionService.get_forced_variation", @@ -501,7 +503,8 @@ def test_get_variation__bucketing_id_provided(self): variation, _ = self.decision_service.get_variation( self.project_config, experiment, - user + user, + user_profile_tracker ) # Assert that bucket is called with appropriate bucketing ID @@ -515,6 +518,8 @@ def test_get_variation__user_whitelisted_for_variation(self): user = optimizely_user_context.OptimizelyUserContext(optimizely_client=None, logger=None, user_id="test_user", user_attributes={}) + user_profile_service = user_profile.UserProfileService() + user_profile_tracker = user_profile.UserProfileTracker(user.user_id, user_profile_service) experiment = self.project_config.get_experiment_from_key("test_experiment") with mock.patch( "optimizely.decision_service.DecisionService.get_whitelisted_variation", @@ -531,7 +536,7 @@ def test_get_variation__user_whitelisted_for_variation(self): "optimizely.user_profile.UserProfileService.save" ) as mock_save: variation, _ = self.decision_service.get_variation( - self.project_config, experiment, user + self.project_config, experiment, user, user_profile_tracker ) self.assertEqual( entities.Variation("111128", "control"), @@ -554,6 +559,8 @@ def test_get_variation__user_has_stored_decision(self): user = optimizely_user_context.OptimizelyUserContext(optimizely_client=None, logger=None, user_id="test_user", user_attributes={}) + user_profile_service = user_profile.UserProfileService() + user_profile_tracker = user_profile.UserProfileTracker(user.user_id, user_profile_service) experiment = self.project_config.get_experiment_from_key("test_experiment") with mock.patch( "optimizely.decision_service.DecisionService.get_whitelisted_variation", @@ -565,49 +572,38 @@ def test_get_variation__user_has_stored_decision(self): "optimizely.helpers.audience.does_user_meet_audience_conditions" ) as mock_audience_check, mock.patch( "optimizely.bucketer.Bucketer.bucket" - ) as mock_bucket, mock.patch( - "optimizely.user_profile.UserProfileService.lookup", - return_value={ - "user_id": "test_user", - "experiment_bucket_map": {"111127": {"variation_id": "111128"}}, - }, - ) as mock_lookup, mock.patch( - "optimizely.user_profile.UserProfileService.save" - ) as mock_save: + ) as mock_bucket: variation, _ = self.decision_service.get_variation( - self.project_config, experiment, user, None + self.project_config, experiment, user, user_profile_tracker ) self.assertEqual( entities.Variation("111128", "control"), variation, ) - # Assert that stored variation is returned and bucketing service is not involved mock_get_whitelisted_variation.assert_called_once_with( self.project_config, experiment, "test_user" ) - mock_lookup.assert_called_once_with("test_user") mock_get_stored_variation.assert_called_once_with( self.project_config, experiment, - user_profile.UserProfile( - "test_user", {"111127": {"variation_id": "111128"}} - ), + user_profile_tracker.user_profile ) self.assertEqual(0, mock_audience_check.call_count) self.assertEqual(0, mock_bucket.call_count) - self.assertEqual(0, mock_save.call_count) - def test_get_variation__user_bucketed_for_new_experiment__user_profile_service_available( + def test_get_variation__user_bucketed_for_new_experiment__user_profile_tracker_available( self, ): """ Test that get_variation buckets and returns variation if no forced variation or decision available. - Also, stores decision if user profile service is available. """ + """ user = optimizely_user_context.OptimizelyUserContext(optimizely_client=None, logger=None, user_id="test_user", user_attributes={}) + user_profile_service = user_profile.UserProfileService() + user_profile_tracker = user_profile.UserProfileTracker(user.user_id, user_profile_service) experiment = self.project_config.get_experiment_from_key("test_experiment") with mock.patch.object( self.decision_service, "logger" @@ -622,14 +618,9 @@ def test_get_variation__user_bucketed_for_new_experiment__user_profile_service_a ) as mock_audience_check, mock.patch( "optimizely.bucketer.Bucketer.bucket", return_value=[entities.Variation("111129", "variation"), []], - ) as mock_bucket, mock.patch( - "optimizely.user_profile.UserProfileService.lookup", - return_value={"user_id": "test_user", "experiment_bucket_map": {}}, - ) as mock_lookup, mock.patch( - "optimizely.user_profile.UserProfileService.save" - ) as mock_save: + ) as mock_bucket: variation, _ = self.decision_service.get_variation( - self.project_config, experiment, user, None + self.project_config, experiment, user, user_profile_tracker ) self.assertEqual( entities.Variation("111129", "variation"), @@ -640,83 +631,19 @@ def test_get_variation__user_bucketed_for_new_experiment__user_profile_service_a mock_get_whitelisted_variation.assert_called_once_with( self.project_config, experiment, user.user_id ) - mock_lookup.assert_called_once_with("test_user") - self.assertEqual(1, mock_get_stored_variation.call_count) - mock_audience_check.assert_called_once_with( - self.project_config, - experiment.get_audience_conditions_or_ids(), - enums.ExperimentAudienceEvaluationLogs, - "test_experiment", - user.get_user_attributes(), - mock_decision_service_logging - ) - mock_bucket.assert_called_once_with( - self.project_config, experiment, "test_user", "test_user" - ) - mock_save.assert_called_once_with( - { - "user_id": "test_user", - "experiment_bucket_map": {"111127": {"variation_id": "111129"}}, - } - ) - - def test_get_variation__user_bucketed_for_new_experiment__user_profile_service_not_available( - self, - ): - """ Test that get_variation buckets and returns variation if - no forced variation and no user profile service available. """ - - # Unset user profile service - self.decision_service.user_profile_service = None - - user = optimizely_user_context.OptimizelyUserContext(optimizely_client=None, - logger=None, - user_id="test_user", - user_attributes={}) - experiment = self.project_config.get_experiment_from_key("test_experiment") - with mock.patch.object( - self.decision_service, "logger" - ) as mock_decision_service_logging, mock.patch( - "optimizely.decision_service.DecisionService.get_whitelisted_variation", - return_value=[None, []], - ) as mock_get_whitelisted_variation, mock.patch( - "optimizely.decision_service.DecisionService.get_stored_variation" - ) as mock_get_stored_variation, mock.patch( - "optimizely.helpers.audience.does_user_meet_audience_conditions", return_value=[True, []] - ) as mock_audience_check, mock.patch( - "optimizely.bucketer.Bucketer.bucket", - return_value=[entities.Variation("111129", "variation"), []], - ) as mock_bucket, mock.patch( - "optimizely.user_profile.UserProfileService.lookup" - ) as mock_lookup, mock.patch( - "optimizely.user_profile.UserProfileService.save" - ) as mock_save: - variation, _ = self.decision_service.get_variation( - self.project_config, experiment, user, None - ) - self.assertEqual( - entities.Variation("111129", "variation"), - variation, - ) - # Assert that user is bucketed and new decision is not stored as user profile service is not available - mock_get_whitelisted_variation.assert_called_once_with( - self.project_config, experiment, "test_user" - ) - self.assertEqual(0, mock_lookup.call_count) - self.assertEqual(0, mock_get_stored_variation.call_count) + self.assertEqual(1, mock_get_stored_variation.call_count) mock_audience_check.assert_called_once_with( self.project_config, experiment.get_audience_conditions_or_ids(), enums.ExperimentAudienceEvaluationLogs, "test_experiment", - user.get_user_attributes(), + user, mock_decision_service_logging ) mock_bucket.assert_called_once_with( self.project_config, experiment, "test_user", "test_user" ) - self.assertEqual(0, mock_save.call_count) def test_get_variation__user_does_not_meet_audience_conditions(self): """ Test that get_variation returns None if user is not in experiment. """ @@ -725,6 +652,7 @@ def test_get_variation__user_does_not_meet_audience_conditions(self): logger=None, user_id="test_user", user_attributes={}) + user_profile_tracker = user_profile.UserProfileTracker(user.user_id, self.decision_service.user_profile_service) experiment = self.project_config.get_experiment_from_key("test_experiment") with mock.patch.object( self.decision_service, "logger" @@ -739,13 +667,10 @@ def test_get_variation__user_does_not_meet_audience_conditions(self): ) as mock_audience_check, mock.patch( "optimizely.bucketer.Bucketer.bucket" ) as mock_bucket, mock.patch( - "optimizely.user_profile.UserProfileService.lookup", - return_value={"user_id": "test_user", "experiment_bucket_map": {}}, - ) as mock_lookup, mock.patch( "optimizely.user_profile.UserProfileService.save" ) as mock_save: variation, _ = self.decision_service.get_variation( - self.project_config, experiment, user, None + self.project_config, experiment, user, user_profile_tracker ) self.assertIsNone( variation @@ -755,207 +680,20 @@ def test_get_variation__user_does_not_meet_audience_conditions(self): mock_get_whitelisted_variation.assert_called_once_with( self.project_config, experiment, "test_user" ) - mock_lookup.assert_called_once_with("test_user") mock_get_stored_variation.assert_called_once_with( - self.project_config, experiment, user_profile.UserProfile("test_user") + self.project_config, experiment, user_profile_tracker.get_user_profile() ) mock_audience_check.assert_called_once_with( self.project_config, experiment.get_audience_conditions_or_ids(), enums.ExperimentAudienceEvaluationLogs, "test_experiment", - user.get_user_attributes(), + user, mock_decision_service_logging ) self.assertEqual(0, mock_bucket.call_count) self.assertEqual(0, mock_save.call_count) - def test_get_variation__user_profile_in_invalid_format(self): - """ Test that get_variation handles invalid user profile gracefully. """ - - user = optimizely_user_context.OptimizelyUserContext(optimizely_client=None, - logger=None, - user_id="test_user", - user_attributes={}) - experiment = self.project_config.get_experiment_from_key("test_experiment") - with mock.patch.object( - self.decision_service, "logger" - ) as mock_decision_service_logging, mock.patch( - "optimizely.decision_service.DecisionService.get_whitelisted_variation", - return_value=[None, []], - ) as mock_get_whitelisted_variation, mock.patch( - "optimizely.decision_service.DecisionService.get_stored_variation" - ) as mock_get_stored_variation, mock.patch( - "optimizely.helpers.audience.does_user_meet_audience_conditions", return_value=[True, []] - ) as mock_audience_check, mock.patch( - "optimizely.bucketer.Bucketer.bucket", - return_value=[entities.Variation("111129", "variation"), []], - ) as mock_bucket, mock.patch( - "optimizely.user_profile.UserProfileService.lookup", - return_value="invalid_profile", - ) as mock_lookup, mock.patch( - "optimizely.user_profile.UserProfileService.save" - ) as mock_save: - variation, _ = self.decision_service.get_variation( - self.project_config, experiment, user, None - ) - self.assertEqual( - entities.Variation("111129", "variation"), - variation, - ) - - # Assert that user is bucketed and new decision is stored - mock_get_whitelisted_variation.assert_called_once_with( - self.project_config, experiment, "test_user" - ) - mock_lookup.assert_called_once_with("test_user") - # Stored decision is not consulted as user profile is invalid - self.assertEqual(0, mock_get_stored_variation.call_count) - mock_audience_check.assert_called_once_with( - self.project_config, - experiment.get_audience_conditions_or_ids(), - enums.ExperimentAudienceEvaluationLogs, - "test_experiment", - user.get_user_attributes(), - mock_decision_service_logging - ) - mock_decision_service_logging.warning.assert_called_once_with( - "User profile has invalid format." - ) - mock_bucket.assert_called_once_with( - self.project_config, experiment, "test_user", "test_user" - ) - mock_save.assert_called_once_with( - { - "user_id": "test_user", - "experiment_bucket_map": {"111127": {"variation_id": "111129"}}, - } - ) - - def test_get_variation__user_profile_lookup_fails(self): - """ Test that get_variation acts gracefully when lookup fails. """ - - user = optimizely_user_context.OptimizelyUserContext(optimizely_client=None, - logger=None, - user_id="test_user", - user_attributes={}) - experiment = self.project_config.get_experiment_from_key("test_experiment") - with mock.patch.object( - self.decision_service, "logger" - ) as mock_decision_service_logging, mock.patch( - "optimizely.decision_service.DecisionService.get_whitelisted_variation", - return_value=[None, []], - ) as mock_get_whitelisted_variation, mock.patch( - "optimizely.decision_service.DecisionService.get_stored_variation" - ) as mock_get_stored_variation, mock.patch( - "optimizely.helpers.audience.does_user_meet_audience_conditions", return_value=[True, []] - ) as mock_audience_check, mock.patch( - "optimizely.bucketer.Bucketer.bucket", - return_value=[entities.Variation("111129", "variation"), []], - ) as mock_bucket, mock.patch( - "optimizely.user_profile.UserProfileService.lookup", - side_effect=Exception("major problem"), - ) as mock_lookup, mock.patch( - "optimizely.user_profile.UserProfileService.save" - ) as mock_save: - variation, _ = self.decision_service.get_variation( - self.project_config, experiment, user, None - ) - self.assertEqual( - entities.Variation("111129", "variation"), - variation, - ) - - # Assert that user is bucketed and new decision is stored - mock_get_whitelisted_variation.assert_called_once_with( - self.project_config, experiment, "test_user" - ) - mock_lookup.assert_called_once_with("test_user") - # Stored decision is not consulted as lookup failed - self.assertEqual(0, mock_get_stored_variation.call_count) - mock_audience_check.assert_called_once_with( - self.project_config, - experiment.get_audience_conditions_or_ids(), - enums.ExperimentAudienceEvaluationLogs, - "test_experiment", - user.get_user_attributes(), - mock_decision_service_logging - ) - mock_decision_service_logging.exception.assert_called_once_with( - 'Unable to retrieve user profile for user "test_user" as lookup failed.' - ) - mock_bucket.assert_called_once_with( - self.project_config, experiment, "test_user", "test_user" - ) - mock_save.assert_called_once_with( - { - "user_id": "test_user", - "experiment_bucket_map": {"111127": {"variation_id": "111129"}}, - } - ) - - def test_get_variation__user_profile_save_fails(self): - """ Test that get_variation acts gracefully when save fails. """ - - user = optimizely_user_context.OptimizelyUserContext(optimizely_client=None, - logger=None, - user_id="test_user", - user_attributes={}) - experiment = self.project_config.get_experiment_from_key("test_experiment") - with mock.patch.object( - self.decision_service, "logger" - ) as mock_decision_service_logging, mock.patch( - "optimizely.decision_service.DecisionService.get_whitelisted_variation", - return_value=[None, []], - ) as mock_get_whitelisted_variation, mock.patch( - "optimizely.decision_service.DecisionService.get_stored_variation" - ) as mock_get_stored_variation, mock.patch( - "optimizely.helpers.audience.does_user_meet_audience_conditions", return_value=[True, []] - ) as mock_audience_check, mock.patch( - "optimizely.bucketer.Bucketer.bucket", - return_value=[entities.Variation("111129", "variation"), []], - ) as mock_bucket, mock.patch( - "optimizely.user_profile.UserProfileService.lookup", return_value=None - ) as mock_lookup, mock.patch( - "optimizely.user_profile.UserProfileService.save", - side_effect=Exception("major problem"), - ) as mock_save: - variation, _ = self.decision_service.get_variation( - self.project_config, experiment, user, None - ) - self.assertEqual( - entities.Variation("111129", "variation"), - variation, - ) - - # Assert that user is bucketed and new decision is stored - mock_get_whitelisted_variation.assert_called_once_with( - self.project_config, experiment, "test_user" - ) - mock_lookup.assert_called_once_with("test_user") - self.assertEqual(0, mock_get_stored_variation.call_count) - mock_audience_check.assert_called_once_with( - self.project_config, - experiment.get_audience_conditions_or_ids(), - enums.ExperimentAudienceEvaluationLogs, - "test_experiment", - user.get_user_attributes(), - mock_decision_service_logging - ) - - mock_decision_service_logging.exception.assert_called_once_with( - 'Unable to save user profile for user "test_user".' - ) - mock_bucket.assert_called_once_with( - self.project_config, experiment, "test_user", "test_user" - ) - mock_save.assert_called_once_with( - { - "user_id": "test_user", - "experiment_bucket_map": {"111127": {"variation_id": "111129"}}, - } - ) - def test_get_variation__ignore_user_profile_when_specified(self): """ Test that we ignore the user profile service if specified. """ @@ -963,6 +701,8 @@ def test_get_variation__ignore_user_profile_when_specified(self): logger=None, user_id="test_user", user_attributes={}) + user_profile_service = user_profile.UserProfileService() + user_profile_tracker = user_profile.UserProfileTracker(user.user_id, user_profile_service) experiment = self.project_config.get_experiment_from_key("test_experiment") with mock.patch.object( self.decision_service, "logger" @@ -983,6 +723,8 @@ def test_get_variation__ignore_user_profile_when_specified(self): self.project_config, experiment, user, + user_profile_tracker, + [], options=['IGNORE_USER_PROFILE_SERVICE'], ) self.assertEqual( @@ -999,7 +741,7 @@ def test_get_variation__ignore_user_profile_when_specified(self): experiment.get_audience_conditions_or_ids(), enums.ExperimentAudienceEvaluationLogs, "test_experiment", - user.get_user_attributes(), + user, mock_decision_service_logging ) mock_bucket.assert_called_once_with( @@ -1163,7 +905,7 @@ def test_get_variation_for_rollout__skips_to_everyone_else_rule(self): self.project_config.get_experiment_from_key("211127").get_audience_conditions_or_ids(), enums.RolloutRuleAudienceEvaluationLogs, '1', - user.get_user_attributes(), + user, mock_decision_service_logging, ), mock.call( @@ -1171,7 +913,7 @@ def test_get_variation_for_rollout__skips_to_everyone_else_rule(self): self.project_config.get_experiment_from_key("211147").get_audience_conditions_or_ids(), enums.RolloutRuleAudienceEvaluationLogs, 'Everyone Else', - user.get_user_attributes(), + user, mock_decision_service_logging, ), ], @@ -1216,7 +958,7 @@ def test_get_variation_for_rollout__returns_none_for_user_not_in_rollout(self): self.project_config.get_experiment_from_key("211127").get_audience_conditions_or_ids(), enums.RolloutRuleAudienceEvaluationLogs, "1", - user.get_user_attributes(), + user, mock_decision_service_logging, ), mock.call( @@ -1224,7 +966,7 @@ def test_get_variation_for_rollout__returns_none_for_user_not_in_rollout(self): self.project_config.get_experiment_from_key("211137").get_audience_conditions_or_ids(), enums.RolloutRuleAudienceEvaluationLogs, "2", - user.get_user_attributes(), + user, mock_decision_service_logging, ), mock.call( @@ -1232,7 +974,7 @@ def test_get_variation_for_rollout__returns_none_for_user_not_in_rollout(self): self.project_config.get_experiment_from_key("211147").get_audience_conditions_or_ids(), enums.RolloutRuleAudienceEvaluationLogs, "Everyone Else", - user.get_user_attributes(), + user, mock_decision_service_logging, ), ], @@ -1290,6 +1032,8 @@ def test_get_variation_for_feature__returns_variation_for_feature_in_experiment( self.project_config, self.project_config.get_experiment_from_key("test_experiment"), user, + None, + [], None ) @@ -1370,7 +1114,7 @@ def test_get_variation_for_feature__returns_variation_if_user_not_in_experiment_ self.project_config.get_experiment_from_key("group_exp_2").get_audience_conditions_or_ids(), enums.ExperimentAudienceEvaluationLogs, "group_exp_2", - {}, + user, mock_decision_service_logging, ) @@ -1379,7 +1123,7 @@ def test_get_variation_for_feature__returns_variation_if_user_not_in_experiment_ self.project_config.get_experiment_from_key("211127").get_audience_conditions_or_ids(), enums.RolloutRuleAudienceEvaluationLogs, "1", - user.get_user_attributes(), + user, mock_decision_service_logging, ) @@ -1417,6 +1161,8 @@ def test_get_variation_for_feature__returns_variation_for_feature_in_group(self) self.project_config, self.project_config.get_experiment_from_key("group_exp_1"), user, + None, + [], None ) @@ -1445,6 +1191,8 @@ def test_get_variation_for_feature__returns_none_for_user_not_in_experiment(self self.project_config, self.project_config.get_experiment_from_key("test_experiment"), user, + None, + [], None ) @@ -1472,7 +1220,7 @@ def test_get_variation_for_feature__returns_none_for_user_in_group_experiment_no ) mock_decision.assert_called_once_with( - self.project_config, self.project_config.get_experiment_from_id("32222"), user, False + self.project_config, self.project_config.get_experiment_from_id("32222"), user, None, [], False ) def test_get_variation_for_feature__returns_variation_for_feature_in_mutex_group_bucket_less_than_2500( @@ -1560,6 +1308,7 @@ def test_get_variation_for_feature__returns_variation_for_feature_in_mutex_group with mock.patch( 'optimizely.bucketer.Bucketer._generate_bucket_value', return_value=6500) as mock_generate_bucket_value, \ mock.patch.object(self.project_config, 'logger') as mock_config_logging: + variation_received, _ = self.decision_service.get_variation_for_feature( self.project_config, feature, user ) @@ -1789,6 +1538,13 @@ def test_get_variation_for_feature_returns_rollout_in_experiment_bucket_range_25 variation_received, _ = self.decision_service.get_variation_for_feature( self.project_config, feature, user ) + print(f"variation received is: {variation_received}") + x = decision_service.Decision( + expected_experiment, + expected_variation, + enums.DecisionSources.ROLLOUT, + ) + print(f"need to be:{x}") self.assertEqual( decision_service.Decision( expected_experiment, @@ -1797,6 +1553,7 @@ def test_get_variation_for_feature_returns_rollout_in_experiment_bucket_range_25 ), variation_received, ) + mock_config_logging.debug.assert_called_with( 'Assigned bucket 4000 to user with bucketing ID "test_user".') mock_generate_bucket_value.assert_called_with("test_user211147") diff --git a/tests/test_event_builder.py b/tests/test_event_builder.py index 6147c9db..fb4d7a0d 100644 --- a/tests/test_event_builder.py +++ b/tests/test_event_builder.py @@ -11,7 +11,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import mock +from unittest import mock import unittest from operator import itemgetter diff --git a/tests/test_event_dispatcher.py b/tests/test_event_dispatcher.py index 15e89180..30311e35 100644 --- a/tests/test_event_dispatcher.py +++ b/tests/test_event_dispatcher.py @@ -11,13 +11,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -import mock +from unittest import mock import json import unittest from requests import exceptions as request_exception from optimizely import event_builder from optimizely import event_dispatcher +from optimizely.helpers.enums import EventDispatchConfig class EventDispatcherTest(unittest.TestCase): @@ -28,10 +29,10 @@ def test_dispatch_event__get_request(self): params = {'a': '111001', 'n': 'test_event', 'g': '111028', 'u': 'oeutest_user'} event = event_builder.Event(url, params) - with mock.patch('requests.get') as mock_request_get: + with mock.patch('requests.Session.get') as mock_request_get: event_dispatcher.EventDispatcher.dispatch_event(event) - mock_request_get.assert_called_once_with(url, params=params, timeout=event_dispatcher.REQUEST_TIMEOUT) + mock_request_get.assert_called_once_with(url, params=params, timeout=EventDispatchConfig.REQUEST_TIMEOUT) def test_dispatch_event__post_request(self): """ Test that dispatch event fires off requests call with provided URL, params, HTTP verb and headers. """ @@ -45,14 +46,14 @@ def test_dispatch_event__post_request(self): } event = event_builder.Event(url, params, http_verb='POST', headers={'Content-Type': 'application/json'}) - with mock.patch('requests.post') as mock_request_post: + with mock.patch('requests.Session.post') as mock_request_post: event_dispatcher.EventDispatcher.dispatch_event(event) mock_request_post.assert_called_once_with( url, data=json.dumps(params), headers={'Content-Type': 'application/json'}, - timeout=event_dispatcher.REQUEST_TIMEOUT, + timeout=EventDispatchConfig.REQUEST_TIMEOUT, ) def test_dispatch_event__handle_request_exception(self): @@ -68,7 +69,7 @@ def test_dispatch_event__handle_request_exception(self): event = event_builder.Event(url, params, http_verb='POST', headers={'Content-Type': 'application/json'}) with mock.patch( - 'requests.post', side_effect=request_exception.RequestException('Failed Request'), + 'requests.Session.post', side_effect=request_exception.RequestException('Failed Request'), ) as mock_request_post, mock.patch('logging.error') as mock_log_error: event_dispatcher.EventDispatcher.dispatch_event(event) @@ -76,6 +77,6 @@ def test_dispatch_event__handle_request_exception(self): url, data=json.dumps(params), headers={'Content-Type': 'application/json'}, - timeout=event_dispatcher.REQUEST_TIMEOUT, + timeout=EventDispatchConfig.REQUEST_TIMEOUT, ) mock_log_error.assert_called_once_with('Dispatch event failed. Error: Failed Request') diff --git a/tests/test_event_factory.py b/tests/test_event_factory.py index ec92a3dd..adbebd35 100644 --- a/tests/test_event_factory.py +++ b/tests/test_event_factory.py @@ -11,7 +11,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import mock +from unittest import mock import time import unittest import uuid diff --git a/tests/test_event_processor.py b/tests/test_event_processor.py index 707ac00f..4e45e6fc 100644 --- a/tests/test_event_processor.py +++ b/tests/test_event_processor.py @@ -12,9 +12,9 @@ # limitations under the License. import datetime -import mock +from unittest import mock import time -from six.moves import queue +import queue from optimizely.event.payload import Decision, Visitor from optimizely.event.event_processor import ( @@ -30,7 +30,7 @@ from . import base -class CanonicalEvent(object): +class CanonicalEvent: def __init__(self, experiment_id, variation_id, event_name, visitor_id, attributes, tags): self._experiment_id = experiment_id self._variation_id = variation_id @@ -46,7 +46,7 @@ def __eq__(self, other): return self.__dict__ == other.__dict__ -class CustomEventDispatcher(object): +class CustomEventDispatcher: IMPRESSION_EVENT_NAME = 'campaign_activated' @@ -116,7 +116,7 @@ class BatchEventProcessorTest(base.BaseTest): MAX_BATCH_SIZE = 10 MAX_DURATION_SEC = 0.2 MAX_TIMEOUT_INTERVAL_SEC = 0.1 - TEST_TIMEOUT = 0.3 + TEST_TIMEOUT = 15 def setUp(self, *args, **kwargs): base.BaseTest.setUp(self, 'config_dict_with_multiple_experiments') @@ -155,7 +155,11 @@ def test_drain_on_stop(self): self.event_processor.process(user_event) event_dispatcher.expect_conversion(self.event_name, self.test_user_id) - time.sleep(self.TEST_TIMEOUT) + # wait for events to finish processing, up to TEST_TIMEOUT + start_time = time.time() + while not event_dispatcher.compare_events() or not self.event_processor.event_queue.empty(): + if time.time() - start_time >= self.TEST_TIMEOUT: + break self.assertStrictTrue(event_dispatcher.compare_events()) self.assertEqual(0, self.event_processor.event_queue.qsize()) @@ -170,7 +174,11 @@ def test_flush_on_max_timeout(self): self.event_processor.process(user_event) event_dispatcher.expect_conversion(self.event_name, self.test_user_id) - time.sleep(self.TEST_TIMEOUT) + # wait for events to finish processing, up to TEST_TIMEOUT + start_time = time.time() + while not event_dispatcher.compare_events(): + if time.time() - start_time >= self.TEST_TIMEOUT: + break self.assertStrictTrue(event_dispatcher.compare_events()) self.assertEqual(0, self.event_processor.event_queue.qsize()) @@ -187,7 +195,11 @@ def test_flush_once_max_timeout(self): self.event_processor.process(user_event) event_dispatcher.expect_conversion(self.event_name, self.test_user_id) - time.sleep(self.TEST_TIMEOUT) + # wait for events to finish processing, up to TEST_TIMEOUT + start_time = time.time() + while not event_dispatcher.compare_events() or mock_config_logging.debug.call_count < 3: + if time.time() - start_time >= self.TEST_TIMEOUT: + break self.assertStrictTrue(event_dispatcher.compare_events()) self.assertEqual(0, self.event_processor.event_queue.qsize()) @@ -209,7 +221,11 @@ def test_flush_max_batch_size(self): self.event_processor.process(user_event) event_dispatcher.expect_conversion(self.event_name, self.test_user_id) - time.sleep(self.TEST_TIMEOUT) + # wait for events to finish processing, up to TEST_TIMEOUT + start_time = time.time() + while not event_dispatcher.compare_events(): + if time.time() - start_time >= self.TEST_TIMEOUT: + break self.assertStrictTrue(event_dispatcher.compare_events()) self.assertEqual(0, self.event_processor.event_queue.qsize()) @@ -229,7 +245,11 @@ def test_flush(self): self.event_processor.flush() event_dispatcher.expect_conversion(self.event_name, self.test_user_id) - time.sleep(self.TEST_TIMEOUT) + # wait for events to finish processing, up to TEST_TIMEOUT + start_time = time.time() + while not event_dispatcher.compare_events(): + if time.time() - start_time >= self.TEST_TIMEOUT: + break self.assertStrictTrue(event_dispatcher.compare_events()) self.assertEqual(0, self.event_processor.event_queue.qsize()) @@ -254,7 +274,11 @@ def test_flush_on_mismatch_revision(self): self.event_processor.process(user_event_2) event_dispatcher.expect_conversion(self.event_name, self.test_user_id) - time.sleep(self.TEST_TIMEOUT) + # wait for events to finish processing, up to TEST_TIMEOUT + start_time = time.time() + while not event_dispatcher.compare_events(): + if time.time() - start_time >= self.TEST_TIMEOUT: + break self.assertStrictTrue(event_dispatcher.compare_events()) self.assertEqual(0, self.event_processor.event_queue.qsize()) @@ -279,7 +303,11 @@ def test_flush_on_mismatch_project_id(self): self.event_processor.process(user_event_2) event_dispatcher.expect_conversion(self.event_name, self.test_user_id) - time.sleep(self.TEST_TIMEOUT) + # wait for events to finish processing, up to TEST_TIMEOUT + start_time = time.time() + while not event_dispatcher.compare_events(): + if time.time() - start_time >= self.TEST_TIMEOUT: + break self.assertStrictTrue(event_dispatcher.compare_events()) self.assertEqual(0, self.event_processor.event_queue.qsize()) @@ -294,7 +322,11 @@ def test_stop_and_start(self): self.event_processor.process(user_event) event_dispatcher.expect_conversion(self.event_name, self.test_user_id) - time.sleep(self.TEST_TIMEOUT) + # wait for events to finish processing, up to TEST_TIMEOUT + start_time = time.time() + while not event_dispatcher.compare_events(): + if time.time() - start_time >= self.TEST_TIMEOUT: + break self.assertStrictTrue(event_dispatcher.compare_events()) self.event_processor.stop() @@ -517,15 +549,29 @@ def test_warning_log_level_on_queue_overflow(self): self.event_processor.process(user_event) event_dispatcher.expect_conversion(self.event_name, self.test_user_id) - time.sleep(self.TEST_TIMEOUT) + # wait for events to finish processing and queue to clear, up to TEST_TIMEOUT + start_time = time.time() + while not self.event_processor.event_queue.empty(): + if time.time() - start_time >= self.TEST_TIMEOUT: + break # queue is flushed, even though events overflow self.assertEqual(0, self.event_processor.event_queue.qsize()) - mock_config_logging.warning.assert_called_with('Payload not accepted by the queue. Current size: {}' - .format(str(test_max_queue_size))) + class AnyStringWith(str): + '''allows a partial match on the log message''' + def __eq__(self, other): + return self in other -class CustomForwardingEventDispatcher(object): + # the qsize method is approximate and since no lock is taken on the queue + # it can return an indeterminate count + # thus we can't rely on this error message to always report the max_queue_size + mock_config_logging.warning.assert_called_with( + AnyStringWith('Payload not accepted by the queue. Current size: ') + ) + + +class CustomForwardingEventDispatcher: def __init__(self, is_updated=False): self.is_updated = is_updated @@ -568,7 +614,7 @@ def test_event_processor__dispatch_raises_exception(self): event_processor.process(user_event) mock_client_logging.exception.assert_called_once_with( - 'Error dispatching event: ' + str(log_event) + ' Failed to send.' + f'Error dispatching event: {log_event} Failed to send.' ) def test_event_processor__with_test_event_dispatcher(self): diff --git a/tests/test_logger.py b/tests/test_logger.py index 64cd1378..ee432735 100644 --- a/tests/test_logger.py +++ b/tests/test_logger.py @@ -14,7 +14,7 @@ import unittest import uuid -import mock +from unittest import mock from optimizely import logger as _logger @@ -105,7 +105,7 @@ def test_reset_logger(self): def test_reset_logger__replaces_handlers(self): """Test that reset_logger replaces existing handlers with a StreamHandler.""" - logger_name = 'test-logger-{}'.format(uuid.uuid4()) + logger_name = f'test-logger-{uuid.uuid4()}' logger = logging.getLogger(logger_name) logger.handlers = [logging.StreamHandler() for _ in range(10)] @@ -121,7 +121,7 @@ def test_reset_logger__replaces_handlers(self): def test_reset_logger__with_handler__existing(self): """Test that reset_logger deals with provided handlers correctly.""" existing_handler = logging.NullHandler() - logger_name = 'test-logger-{}'.format(uuid.uuid4()) + logger_name = f'test-logger-{uuid.uuid4()}' reset_logger = _logger.reset_logger(logger_name, handler=existing_handler) self.assertEqual(1, len(reset_logger.handlers)) @@ -133,6 +133,6 @@ def test_reset_logger__with_handler__existing(self): def test_reset_logger__with_level(self): """Test that reset_logger sets log levels correctly.""" - logger_name = 'test-logger-{}'.format(uuid.uuid4()) + logger_name = f'test-logger-{uuid.uuid4()}' reset_logger = _logger.reset_logger(logger_name, level=logging.DEBUG) self.assertEqual(logging.DEBUG, reset_logger.level) diff --git a/tests/test_lru_cache.py b/tests/test_lru_cache.py new file mode 100644 index 00000000..b30617b3 --- /dev/null +++ b/tests/test_lru_cache.py @@ -0,0 +1,211 @@ +# Copyright 2022, Optimizely +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations +import time +from unittest import TestCase +from optimizely.odp.lru_cache import LRUCache, OptimizelySegmentsCache + + +class LRUCacheTest(TestCase): + def test_min_config(self): + cache = LRUCache(1000, 2000) + self.assertEqual(1000, cache.capacity) + self.assertEqual(2000, cache.timeout) + + cache = LRUCache(0, 0) + self.assertEqual(0, cache.capacity) + self.assertEqual(0, cache.timeout) + + def test_save_and_lookup(self): + max_size = 2 + cache = LRUCache(max_size, 1000) + + self.assertIsNone(cache.peek(1)) + cache.save(1, 100) # [1] + cache.save(2, 200) # [1, 2] + cache.save(3, 300) # [2, 3] + self.assertIsNone(cache.peek(1)) + self.assertEqual(200, cache.peek(2)) + self.assertEqual(300, cache.peek(3)) + + cache.save(2, 201) # [3, 2] + cache.save(1, 101) # [2, 1] + self.assertEqual(101, cache.peek(1)) + self.assertEqual(201, cache.peek(2)) + self.assertIsNone(cache.peek(3)) + + self.assertIsNone(cache.lookup(3)) # [2, 1] + self.assertEqual(201, cache.lookup(2)) # [1, 2] + cache.save(3, 302) # [2, 3] + self.assertIsNone(cache.peek(1)) + self.assertEqual(201, cache.peek(2)) + self.assertEqual(302, cache.peek(3)) + + self.assertEqual(302, cache.lookup(3)) # [2, 3] + cache.save(1, 103) # [3, 1] + self.assertEqual(103, cache.peek(1)) + self.assertIsNone(cache.peek(2)) + self.assertEqual(302, cache.peek(3)) + + self.assertEqual(len(cache.map), max_size) + self.assertEqual(len(cache.map), cache.capacity) + + def test_size_zero(self): + cache = LRUCache(0, 1000) + + self.assertIsNone(cache.lookup(1)) + cache.save(1, 100) # [1] + self.assertIsNone(cache.lookup(1)) + + def test_size_less_than_zero(self): + cache = LRUCache(-2, 1000) + + self.assertIsNone(cache.lookup(1)) + cache.save(1, 100) # [1] + self.assertIsNone(cache.lookup(1)) + + def test_timeout(self): + max_timeout = .5 + + cache = LRUCache(1000, max_timeout) + + cache.save(1, 100) # [1] + cache.save(2, 200) # [1, 2] + cache.save(3, 300) # [1, 2, 3] + time.sleep(1.1) # wait to expire + cache.save(4, 400) # [1, 2, 3, 4] + cache.save(1, 101) # [2, 3, 4, 1] + + self.assertEqual(101, cache.lookup(1)) # [4, 1] + self.assertIsNone(cache.lookup(2)) + self.assertIsNone(cache.lookup(3)) + self.assertEqual(400, cache.lookup(4)) + + def test_timeout_zero(self): + max_timeout = 0 + cache = LRUCache(1000, max_timeout) + + cache.save(1, 100) # [1] + cache.save(2, 200) # [1, 2] + time.sleep(1) # wait to expire + + self.assertEqual(100, cache.lookup(1), "should not expire when timeout is 0") + self.assertEqual(200, cache.lookup(2)) + + def test_timeout_less_than_zero(self): + max_timeout = -2 + cache = LRUCache(1000, max_timeout) + + cache.save(1, 100) # [1] + cache.save(2, 200) # [1, 2] + time.sleep(1) # wait to expire + + self.assertEqual(100, cache.lookup(1), "should not expire when timeout is less than 0") + self.assertEqual(200, cache.lookup(2)) + + def test_reset(self): + cache = LRUCache(1000, 600) + cache.save('wow', 'great') + cache.save('tow', 'freight') + + self.assertEqual(cache.lookup('wow'), 'great') + self.assertEqual(len(cache.map), 2) + + cache.reset() + + self.assertEqual(cache.lookup('wow'), None) + self.assertEqual(len(cache.map), 0) + + cache.save('cow', 'crate') + self.assertEqual(cache.lookup('cow'), 'crate') + + def test_remove_non_existent_key(self): + cache = LRUCache(3, 1000) + cache.save("1", 100) + cache.save("2", 200) + + cache.remove("3") # Doesn't exist + + self.assertEqual(cache.lookup("1"), 100) + self.assertEqual(cache.lookup("2"), 200) + + def test_remove_existing_key(self): + cache = LRUCache(3, 1000) + + cache.save("1", 100) + cache.save("2", 200) + cache.save("3", 300) + + self.assertEqual(cache.lookup("1"), 100) + self.assertEqual(cache.lookup("2"), 200) + self.assertEqual(cache.lookup("3"), 300) + + cache.remove("2") + + self.assertEqual(cache.lookup("1"), 100) + self.assertIsNone(cache.lookup("2")) + self.assertEqual(cache.lookup("3"), 300) + + def test_remove_from_zero_sized_cache(self): + cache = LRUCache(0, 1000) + cache.save("1", 100) + cache.remove("1") + + self.assertIsNone(cache.lookup("1")) + + def test_remove_and_add_back(self): + cache = LRUCache(3, 1000) + cache.save("1", 100) + cache.save("2", 200) + cache.save("3", 300) + + cache.remove("2") + cache.save("2", 201) + + self.assertEqual(cache.lookup("1"), 100) + self.assertEqual(cache.lookup("2"), 201) + self.assertEqual(cache.lookup("3"), 300) + + def test_thread_safety(self): + import threading + + max_size = 100 + cache = LRUCache(max_size, 1000) + + for i in range(1, max_size + 1): + cache.save(str(i), i * 100) + + def remove_key(k): + cache.remove(str(k)) + + threads = [] + for i in range(1, (max_size // 2) + 1): + thread = threading.Thread(target=remove_key, args=(i,)) + threads.append(thread) + thread.start() + + for thread in threads: + thread.join() + + for i in range(1, max_size + 1): + if i <= max_size // 2: + self.assertIsNone(cache.lookup(str(i))) + else: + self.assertEqual(cache.lookup(str(i)), i * 100) + + self.assertEqual(len(cache.map), max_size // 2) + + # type checker test + # confirm that LRUCache matches OptimizelySegmentsCache protocol + _: OptimizelySegmentsCache = LRUCache(0, 0) diff --git a/tests/test_notification_center.py b/tests/test_notification_center.py index 2ac30903..02ef5951 100644 --- a/tests/test_notification_center.py +++ b/tests/test_notification_center.py @@ -11,7 +11,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import mock +from unittest import mock import unittest from optimizely import notification_center @@ -309,5 +309,5 @@ def some_listener(arg_1, arg_2): # Not providing any of the 2 expected arguments during send. test_notification_center.send_notifications(enums.NotificationTypes.ACTIVATE) mock_logger.exception.assert_called_once_with( - 'Unknown problem when sending "{}" type notification.'.format(enums.NotificationTypes.ACTIVATE) + f'Unknown problem when sending "{enums.NotificationTypes.ACTIVATE}" type notification.' ) diff --git a/tests/test_notification_center_registry.py b/tests/test_notification_center_registry.py new file mode 100644 index 00000000..81984059 --- /dev/null +++ b/tests/test_notification_center_registry.py @@ -0,0 +1,85 @@ +# Copyright 2023, Optimizely +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +from unittest import mock +import copy + +from optimizely.notification_center_registry import _NotificationCenterRegistry +from optimizely.notification_center import NotificationCenter +from optimizely.optimizely import Optimizely +from optimizely.helpers.enums import NotificationTypes, Errors +from .base import BaseTest + + +class NotificationCenterRegistryTest(BaseTest): + def test_get_notification_center(self): + logger = mock.MagicMock() + sdk_key = 'test' + client = Optimizely(sdk_key=sdk_key, logger=logger) + notification_center = _NotificationCenterRegistry.get_notification_center(sdk_key, logger) + self.assertIsInstance(notification_center, NotificationCenter) + config_notifications = notification_center.notification_listeners[NotificationTypes.OPTIMIZELY_CONFIG_UPDATE] + + self.assertIn((mock.ANY, client._update_odp_config_on_datafile_update), config_notifications) + + logger.error.assert_not_called() + + _NotificationCenterRegistry.get_notification_center(None, logger) + + logger.error.assert_called_once_with(f'{Errors.MISSING_SDK_KEY} ODP may not work properly without it.') + + client.close() + + def test_only_one_notification_center_created(self): + logger = mock.MagicMock() + sdk_key = 'single' + notification_center = _NotificationCenterRegistry.get_notification_center(sdk_key, logger) + client = Optimizely(sdk_key=sdk_key, logger=logger) + + self.assertIs(notification_center, _NotificationCenterRegistry.get_notification_center(sdk_key, logger)) + + logger.error.assert_not_called() + + client.close() + + def test_remove_notification_center(self): + logger = mock.MagicMock() + sdk_key = 'segments-test' + test_datafile = json.dumps(self.config_dict_with_audience_segments) + test_response = self.fake_server_response(status_code=200, content=test_datafile) + notification_center = _NotificationCenterRegistry.get_notification_center(sdk_key, logger) + + with mock.patch('requests.Session.get', return_value=test_response), \ + mock.patch.object(notification_center, 'send_notifications') as mock_send: + + client = Optimizely(sdk_key=sdk_key, logger=logger) + client.config_manager.get_config() + + mock_send.assert_called_once() + mock_send.reset_mock() + + self.assertIn(notification_center, _NotificationCenterRegistry._notification_centers.values()) + _NotificationCenterRegistry.remove_notification_center(sdk_key) + self.assertNotIn(notification_center, _NotificationCenterRegistry._notification_centers.values()) + + revised_datafile = copy.deepcopy(self.config_dict_with_audience_segments) + revised_datafile['revision'] = str(int(revised_datafile['revision']) + 1) + + # trigger notification + client.config_manager._set_config(json.dumps(revised_datafile)) + mock_send.assert_not_called() + + logger.error.assert_not_called() + + client.close() diff --git a/tests/test_odp_config.py b/tests/test_odp_config.py new file mode 100644 index 00000000..b7a48e84 --- /dev/null +++ b/tests/test_odp_config.py @@ -0,0 +1,41 @@ +# Copyright 2022, Optimizely +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations +from tests import base +from optimizely.odp.odp_config import OdpConfig + + +class OdpConfigTest(base.BaseTest): + api_host = 'test-host' + api_key = 'test-key' + segments_to_check = ['test-segment'] + + def test_init_config(self): + config = OdpConfig(self.api_key, self.api_host, self.segments_to_check) + + self.assertEqual(config.get_api_key(), self.api_key) + self.assertEqual(config.get_api_host(), self.api_host) + self.assertEqual(config.get_segments_to_check(), self.segments_to_check) + + def test_update_config(self): + config = OdpConfig() + updated = config.update(self.api_key, self.api_host, self.segments_to_check) + + self.assertStrictTrue(updated) + self.assertEqual(config.get_api_key(), self.api_key) + self.assertEqual(config.get_api_host(), self.api_host) + self.assertEqual(config.get_segments_to_check(), self.segments_to_check) + + updated = config.update(self.api_key, self.api_host, self.segments_to_check) + self.assertStrictFalse(updated) diff --git a/tests/test_odp_event_api_manager.py b/tests/test_odp_event_api_manager.py new file mode 100644 index 00000000..0e7c50d8 --- /dev/null +++ b/tests/test_odp_event_api_manager.py @@ -0,0 +1,153 @@ +# Copyright 2022, Optimizely +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +from unittest import mock + +from requests import exceptions as request_exception + +from optimizely.helpers.enums import OdpEventApiConfig +from optimizely.odp.odp_event import OdpEvent, OdpEventEncoder +from optimizely.odp.odp_event_api_manager import OdpEventApiManager +from . import base + + +class OdpEventApiManagerTest(base.BaseTest): + user_key = "vuid" + user_value = "test-user-value" + api_key = "test-api-key" + api_host = "test-host" + events = [ + OdpEvent('t1', 'a1', {"id-key-1": "id-value-1"}, {"key-1": "value1"}), + OdpEvent('t2', 'a2', {"id-key-2": "id-value-2"}, {"key-2": "value2"}) + ] + + def test_send_odp_events__valid_request(self): + with mock.patch('requests.post') as mock_request_post: + api = OdpEventApiManager() + api.send_odp_events(api_key=self.api_key, + api_host=self.api_host, + events=self.events) + + request_headers = {'content-type': 'application/json', 'x-api-key': self.api_key} + mock_request_post.assert_called_once_with(url=self.api_host + "/v3/events", + headers=request_headers, + data=json.dumps(self.events, cls=OdpEventEncoder), + timeout=OdpEventApiConfig.REQUEST_TIMEOUT) + + def test_send_odp_events__custom_timeout(self): + with mock.patch('requests.post') as mock_request_post: + api = OdpEventApiManager(timeout=14) + api.send_odp_events(api_key=self.api_key, + api_host=self.api_host, + events=self.events) + + request_headers = {'content-type': 'application/json', 'x-api-key': self.api_key} + mock_request_post.assert_called_once_with(url=self.api_host + "/v3/events", + headers=request_headers, + data=json.dumps(self.events, cls=OdpEventEncoder), + timeout=14) + + def test_send_odp_ovents_success(self): + with mock.patch('requests.post') as mock_request_post: + # no need to mock url and content because we're not returning the response + mock_request_post.return_value = self.fake_server_response(status_code=200) + + api = OdpEventApiManager() + should_retry = api.send_odp_events(api_key=self.api_key, + api_host=self.api_host, + events=self.events) # content of events doesn't matter for the test + + self.assertFalse(should_retry) + + def test_send_odp_events_invalid_json_no_retry(self): + """Using a set to trigger JSON-not-serializable error.""" + events = {1, 2, 3} + + with mock.patch('requests.post') as mock_request_post, \ + mock.patch('optimizely.logger') as mock_logger: + api = OdpEventApiManager(logger=mock_logger) + should_retry = api.send_odp_events(api_key=self.api_key, + api_host=self.api_host, + events=events) + + self.assertFalse(should_retry) + mock_request_post.assert_not_called() + mock_logger.error.assert_called_once_with( + 'ODP event send failed (Object of type set is not JSON serializable).') + + def test_send_odp_events_invalid_url_no_retry(self): + invalid_url = 'https://*api.zaius.com' + + with mock.patch('requests.post', + side_effect=request_exception.InvalidURL('Invalid URL')) as mock_request_post, \ + mock.patch('optimizely.logger') as mock_logger: + api = OdpEventApiManager(logger=mock_logger) + should_retry = api.send_odp_events(api_key=self.api_key, + api_host=invalid_url, + events=self.events) + + self.assertFalse(should_retry) + mock_request_post.assert_called_once() + mock_logger.error.assert_called_once_with('ODP event send failed (Invalid URL).') + + def test_send_odp_events_network_error_retry(self): + with mock.patch('requests.post', + side_effect=request_exception.ConnectionError('Connection error')) as mock_request_post, \ + mock.patch('optimizely.logger') as mock_logger: + api = OdpEventApiManager(logger=mock_logger) + should_retry = api.send_odp_events(api_key=self.api_key, + api_host=self.api_host, + events=self.events) + + self.assertTrue(should_retry) + mock_request_post.assert_called_once() + mock_logger.error.assert_called_once_with('ODP event send failed (network error).') + + def test_send_odp_events_400_no_retry(self): + with mock.patch('requests.post') as mock_request_post, \ + mock.patch('optimizely.logger') as mock_logger: + mock_request_post.return_value = self.fake_server_response(status_code=400, + url=self.api_host, + content=self.failure_response_data) + + api = OdpEventApiManager(logger=mock_logger) + should_retry = api.send_odp_events(api_key=self.api_key, + api_host=self.api_host, + events=self.events) + + self.assertFalse(should_retry) + mock_request_post.assert_called_once() + mock_logger.error.assert_called_once_with('ODP event send failed ({"title":"Bad Request","status":400,' + '"timestamp":"2022-07-01T20:44:00.945Z","detail":{"invalids":' + '[{"event":0,"message":"missing \'type\' field"}]}}).') + + def test_send_odp_events_500_retry(self): + with mock.patch('requests.post') as mock_request_post, \ + mock.patch('optimizely.logger') as mock_logger: + mock_request_post.return_value = self.fake_server_response(status_code=500, url=self.api_host) + + api = OdpEventApiManager(logger=mock_logger) + should_retry = api.send_odp_events(api_key=self.api_key, + api_host=self.api_host, + events=self.events) + + self.assertTrue(should_retry) + mock_request_post.assert_called_once() + mock_logger.error.assert_called_once_with('ODP event send failed (500 Server Error: None for url: test-host).') + + # test json responses + success_response_data = '{"title":"Accepted","status":202,"timestamp":"2022-07-01T16:04:06.786Z"}' + + failure_response_data = '{"title":"Bad Request","status":400,"timestamp":"2022-07-01T20:44:00.945Z",' \ + '"detail":{"invalids":[{"event":0,"message":"missing \'type\' field"}]}}' diff --git a/tests/test_odp_event_manager.py b/tests/test_odp_event_manager.py new file mode 100644 index 00000000..d9d29eab --- /dev/null +++ b/tests/test_odp_event_manager.py @@ -0,0 +1,569 @@ +# Copyright 2022, Optimizely +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import time +from unittest import mock +from copy import deepcopy +import uuid + +from optimizely.odp.odp_event import OdpEvent +from optimizely.odp.odp_event_manager import OdpEventManager +from optimizely.odp.odp_config import OdpConfig +from .base import BaseTest, CopyingMock +from optimizely.version import __version__ +from optimizely.helpers import validator +from optimizely.helpers.enums import Errors + + +class MockOdpEventManager(OdpEventManager): + def _add_to_batch(self, *args): + raise Exception("Unexpected error") + + +TEST_UUID = str(uuid.uuid4()) + + +@mock.patch('uuid.uuid4', return_value=TEST_UUID, new=mock.DEFAULT) +class OdpEventManagerTest(BaseTest): + user_key = "vuid" + user_value = "test-user-value" + api_key = "test-api-key" + api_host = "https://test-host.com" + odp_config = OdpConfig(api_key, api_host) + + events = [ + { + "type": "t1", + "action": "a1", + "identifiers": {"id-key-1": "id-value-1"}, + "data": {"key-1": "value1", "key-2": 2, "key-3": 3.0, "key-4": None, 'key-5': True} + }, + { + "type": "t2", + "action": "a2", + "identifiers": {"id-key-2": "id-value-2"}, + "data": {"key-2": "value2"} + } + ] + + processed_events = [ + { + "type": "t1", + "action": "a1", + "identifiers": {"id-key-1": "id-value-1"}, + "data": { + "idempotence_id": TEST_UUID, + "data_source_type": "sdk", + "data_source": "python-sdk", + "data_source_version": __version__, + "key-1": "value1", + "key-2": 2, + "key-3": 3.0, + "key-4": None, + "key-5": True + }, + }, + { + "type": "t2", + "action": "a2", + "identifiers": {"id-key-2": "id-value-2"}, + "data": { + "idempotence_id": TEST_UUID, + "data_source_type": "sdk", + "data_source": "python-sdk", + "data_source_version": __version__, + "key-2": "value2" + } + } + ] + + def test_odp_event_init(self, *args): + event = self.events[0] + self.assertStrictTrue(validator.are_odp_data_types_valid(event['data'])) + odp_event = OdpEvent(**event) + self.assertEqual(odp_event, self.processed_events[0]) + + def test_invalid_odp_event(self, *args): + event = deepcopy(self.events[0]) + event['data']['invalid-item'] = {} + self.assertStrictFalse(validator.are_odp_data_types_valid(event['data'])) + + def test_odp_event_identifier_conversion(self, *args): + event = OdpEvent('type', 'action', {'fs-user-id': 'great'}, {}) + self.assertDictEqual(event.identifiers, {'fs_user_id': 'great'}) + + event = OdpEvent('type', 'action', {'FS-user-ID': 'great'}, {}) + self.assertDictEqual(event.identifiers, {'fs_user_id': 'great'}) + + event = OdpEvent('type', 'action', {'FS_USER_ID': 'great', 'fs.user.id': 'wow'}, {}) + self.assertDictEqual(event.identifiers, {'fs_user_id': 'great', 'fs.user.id': 'wow'}) + + event = OdpEvent('type', 'action', {'fs_user_id': 'great', 'fsuserid': 'wow'}, {}) + self.assertDictEqual(event.identifiers, {'fs_user_id': 'great', 'fsuserid': 'wow'}) + + def test_odp_event_manager_success(self, *args): + mock_logger = mock.Mock() + event_manager = OdpEventManager(mock_logger) + event_manager.start(self.odp_config) + + with mock.patch('requests.post', return_value=self.fake_server_response(status_code=200)): + event_manager.send_event(**self.events[0]) + event_manager.send_event(**self.events[1]) + event_manager.stop() + + self.assertEqual(len(event_manager._current_batch), 0) + mock_logger.error.assert_not_called() + mock_logger.debug.assert_any_call('ODP event queue: flushing batch size 2.') + mock_logger.debug.assert_any_call('ODP event queue: received shutdown signal.') + self.assertStrictFalse(event_manager.is_running) + + def test_odp_event_manager_batch(self, *args): + mock_logger = mock.Mock() + event_manager = OdpEventManager(mock_logger) + event_manager.start(self.odp_config) + + event_manager.batch_size = 2 + with mock.patch.object( + event_manager.api_manager, 'send_odp_events', new_callable=CopyingMock, return_value=False + ) as mock_send: + event_manager.send_event(**self.events[0]) + event_manager.send_event(**self.events[1]) + event_manager.event_queue.join() + + mock_send.assert_called_once_with(self.api_key, self.api_host, self.processed_events) + self.assertEqual(len(event_manager._current_batch), 0) + mock_logger.error.assert_not_called() + mock_logger.debug.assert_any_call('ODP event queue: flushing on batch size.') + event_manager.stop() + + def test_odp_event_manager_multiple_batches(self, *args): + mock_logger = mock.Mock() + event_manager = OdpEventManager(mock_logger) + event_manager.start(self.odp_config) + + event_manager.batch_size = 2 + batch_count = 4 + + with mock.patch.object( + event_manager.api_manager, 'send_odp_events', new_callable=CopyingMock, return_value=False + ) as mock_send: + for _ in range(batch_count): + event_manager.send_event(**self.events[0]) + event_manager.send_event(**self.events[1]) + event_manager.event_queue.join() + + self.assertEqual(mock_send.call_count, batch_count) + mock_send.assert_has_calls( + [mock.call(self.api_key, self.api_host, self.processed_events)] * batch_count + ) + + self.assertEqual(len(event_manager._current_batch), 0) + mock_logger.error.assert_not_called() + mock_logger.debug.assert_has_calls([ + mock.call('ODP event queue: flushing on batch size.'), + mock.call('ODP event queue: flushing batch size 2.') + ] * batch_count, any_order=True) + event_manager.stop() + + def test_odp_event_manager_backlog(self, *args): + mock_logger = mock.Mock() + event_manager = OdpEventManager(mock_logger) + event_manager.odp_config = self.odp_config + + event_manager.batch_size = 2 + batch_count = 4 + + # create events before starting processing to simulate backlog + with mock.patch('optimizely.odp.odp_event_manager.OdpEventManager.is_running', True): + for _ in range(batch_count - 1): + event_manager.send_event(**self.events[0]) + event_manager.send_event(**self.events[1]) + + with mock.patch.object( + event_manager.api_manager, 'send_odp_events', new_callable=CopyingMock, return_value=False + ) as mock_send: + event_manager.start(self.odp_config) + event_manager.send_event(**self.events[0]) + event_manager.send_event(**self.events[1]) + event_manager.stop() + event_manager.event_queue.join() + + self.assertEqual(mock_send.call_count, batch_count) + mock_send.assert_has_calls( + [mock.call(self.api_key, self.api_host, self.processed_events)] * batch_count + ) + + self.assertEqual(len(event_manager._current_batch), 0) + mock_logger.error.assert_not_called() + mock_logger.debug.assert_has_calls([ + mock.call('ODP event queue: flushing on batch size.'), + mock.call('ODP event queue: flushing batch size 2.') + ] * batch_count, any_order=True) + + def test_odp_event_manager_flush(self, *args): + mock_logger = mock.Mock() + event_manager = OdpEventManager(mock_logger) + event_manager.start(self.odp_config) + + with mock.patch.object( + event_manager.api_manager, 'send_odp_events', new_callable=CopyingMock, return_value=False + ) as mock_send: + event_manager.send_event(**self.events[0]) + event_manager.send_event(**self.events[1]) + event_manager.flush() + event_manager.event_queue.join() + + mock_send.assert_called_once_with(self.api_key, self.api_host, self.processed_events) + mock_logger.error.assert_not_called() + self.assertEqual(len(event_manager._current_batch), 0) + mock_logger.debug.assert_any_call('ODP event queue: received flush signal.') + event_manager.stop() + + def test_odp_event_manager_multiple_flushes(self, *args): + mock_logger = mock.Mock() + event_manager = OdpEventManager(mock_logger) + event_manager.start(self.odp_config) + flush_count = 4 + + with mock.patch.object( + event_manager.api_manager, 'send_odp_events', new_callable=CopyingMock, return_value=False + ) as mock_send: + for _ in range(flush_count): + event_manager.send_event(**self.events[0]) + event_manager.send_event(**self.events[1]) + event_manager.flush() + event_manager.event_queue.join() + + self.assertEqual(mock_send.call_count, flush_count) + for call in mock_send.call_args_list: + self.assertEqual(call, mock.call(self.api_key, self.api_host, self.processed_events)) + mock_logger.error.assert_not_called() + + self.assertEqual(len(event_manager._current_batch), 0) + mock_logger.debug.assert_has_calls([ + mock.call('ODP event queue: received flush signal.'), + mock.call('ODP event queue: flushing batch size 2.') + ] * flush_count, any_order=True) + event_manager.stop() + + def test_odp_event_manager_retry_failure(self, *args): + mock_logger = mock.Mock() + event_manager = OdpEventManager(mock_logger) + event_manager.start(self.odp_config) + + number_of_tries = event_manager.retry_count + 1 + + with mock.patch.object( + event_manager.api_manager, 'send_odp_events', new_callable=CopyingMock, return_value=True + ) as mock_send: + event_manager.send_event(**self.events[0]) + event_manager.send_event(**self.events[1]) + event_manager.flush() + event_manager.event_queue.join() + + mock_send.assert_has_calls( + [mock.call(self.api_key, self.api_host, self.processed_events)] * number_of_tries + ) + self.assertEqual(len(event_manager._current_batch), 0) + mock_logger.debug.assert_any_call('Error dispatching ODP events, scheduled to retry.') + mock_logger.error.assert_called_once_with( + f'ODP event send failed (Failed after 3 retries: {self.processed_events}).' + ) + event_manager.stop() + + def test_odp_event_manager_retry_success(self, *args): + mock_logger = mock.Mock() + event_manager = OdpEventManager(mock_logger) + event_manager.start(self.odp_config) + + with mock.patch.object( + event_manager.api_manager, 'send_odp_events', new_callable=CopyingMock, side_effect=[True, True, False] + ) as mock_send: + event_manager.send_event(**self.events[0]) + event_manager.send_event(**self.events[1]) + event_manager.flush() + event_manager.event_queue.join() + + mock_send.assert_has_calls([mock.call(self.api_key, self.api_host, self.processed_events)] * 3) + self.assertEqual(len(event_manager._current_batch), 0) + mock_logger.debug.assert_any_call('Error dispatching ODP events, scheduled to retry.') + mock_logger.error.assert_not_called() + self.assertStrictTrue(event_manager.is_running) + event_manager.stop() + + def test_odp_event_manager_send_failure(self, *args): + mock_logger = mock.Mock() + event_manager = OdpEventManager(mock_logger) + event_manager.start(self.odp_config) + + with mock.patch.object( + event_manager.api_manager, + 'send_odp_events', + new_callable=CopyingMock, + side_effect=Exception('Unexpected error') + ) as mock_send: + event_manager.send_event(**self.events[0]) + event_manager.send_event(**self.events[1]) + event_manager.flush() + event_manager.event_queue.join() + + mock_send.assert_called_once_with(self.api_key, self.api_host, self.processed_events) + self.assertEqual(len(event_manager._current_batch), 0) + mock_logger.error.assert_any_call(f"ODP event send failed (Error: Unexpected error {self.processed_events}).") + self.assertStrictTrue(event_manager.is_running) + event_manager.stop() + + def test_odp_event_manager_disabled(self, *args): + mock_logger = mock.Mock() + odp_config = OdpConfig() + odp_config.update(None, None, None) + event_manager = OdpEventManager(mock_logger) + event_manager.start(odp_config) + + event_manager.send_event(**self.events[0]) + event_manager.send_event(**self.events[1]) + event_manager.event_queue.join() + + self.assertEqual(len(event_manager._current_batch), 0) + mock_logger.error.assert_not_called() + mock_logger.debug.assert_any_call(Errors.ODP_NOT_INTEGRATED) + self.assertStrictTrue(event_manager.is_running) + event_manager.stop() + + def test_odp_event_manager_queue_full(self, *args): + mock_logger = mock.Mock() + + with mock.patch('optimizely.helpers.enums.OdpEventManagerConfig.DEFAULT_QUEUE_CAPACITY', 1): + event_manager = OdpEventManager(mock_logger) + + event_manager.odp_config = self.odp_config + + with mock.patch('optimizely.odp.odp_event_manager.OdpEventManager.is_running', True): + event_manager.send_event(**self.events[0]) + event_manager.send_event(**self.events[1]) + event_manager.flush() + + # warning when adding event to full queue + mock_logger.warning.assert_called_once_with('ODP event send failed (Queue is full).') + # error when trying to flush with full queue + mock_logger.error.assert_called_once_with('Error flushing ODP event queue') + + def test_odp_event_manager_thread_exception(self, *args): + mock_logger = mock.Mock() + event_manager = MockOdpEventManager(mock_logger) + event_manager.start(self.odp_config) + + event_manager.send_event(**self.events[0]) + time.sleep(.1) + event_manager.send_event(**self.events[0]) + + event_manager.thread.join() + mock_logger.error.assert_has_calls([ + mock.call('Uncaught exception processing ODP events. Error: Unexpected error'), + mock.call('ODP event send failed (Queue is down).') + ]) + event_manager.stop() + + def test_odp_event_manager_override_default_data(self, *args): + mock_logger = mock.Mock() + event_manager = OdpEventManager(mock_logger) + event_manager.start(self.odp_config) + + event = deepcopy(self.events[0]) + event['data']['data_source'] = 'my-app' + + processed_event = deepcopy(self.processed_events[0]) + processed_event['data']['data_source'] = 'my-app' + + with mock.patch.object( + event_manager.api_manager, 'send_odp_events', new_callable=CopyingMock, return_value=False + ) as mock_send: + event_manager.send_event(**event) + event_manager.flush() + event_manager.event_queue.join() + + mock_send.assert_called_once_with(self.api_key, self.api_host, [processed_event]) + event_manager.stop() + + def test_odp_event_manager_flush_interval(self, *args): + """Verify that both events have been sent together after they have been batched.""" + mock_logger = mock.Mock() + event_manager = OdpEventManager(mock_logger, flush_interval=.5) + event_manager.start(self.odp_config) + + with mock.patch.object( + event_manager.api_manager, 'send_odp_events', new_callable=CopyingMock, return_value=False + ) as mock_send: + event_manager.send_event(**self.events[0]) + event_manager.send_event(**self.events[1]) + event_manager.event_queue.join() + time.sleep(1) # ensures that the flush interval time has passed + + mock_logger.error.assert_not_called() + mock_logger.debug.assert_any_call('ODP event queue: flushing on interval.') + mock_send.assert_called_once_with(self.api_key, self.api_host, self.processed_events) + event_manager.stop() + + def test_odp_event_manager_flush_interval_is_zero(self, *args): + """Verify that event is immediately if flush interval is zero.""" + mock_logger = mock.Mock() + event_manager = OdpEventManager(mock_logger, flush_interval=0) + event_manager.start(self.odp_config) + + with mock.patch.object( + event_manager.api_manager, 'send_odp_events', new_callable=CopyingMock, return_value=False + ) as mock_send: + event_manager.send_event(**self.events[0]) + event_manager.send_event(**self.events[1]) + event_manager.event_queue.join() + + mock_send.assert_has_calls( + [mock.call(self.api_key, self.api_host, [self.processed_events[0]]), + mock.call(self.api_key, self.api_host, [self.processed_events[1]])] + ) + mock_logger.error.assert_not_called() + mock_logger.debug.assert_any_call('ODP event queue: flushing batch size 1.') + event_manager.stop() + + def test_odp_event_manager_events_before_odp_ready(self, *args): + mock_logger = mock.Mock() + odp_config = OdpConfig() + event_manager = OdpEventManager(mock_logger) + event_manager.start(odp_config) + + with mock.patch.object( + event_manager.api_manager, 'send_odp_events', new_callable=CopyingMock, return_value=False + ) as mock_send: + event_manager.send_event(**self.events[0]) + event_manager.send_event(**self.events[1]) + + odp_config.update(self.api_key, self.api_host, []) + event_manager.update_config() + event_manager.event_queue.join() + + event_manager.send_event(**self.events[0]) + event_manager.send_event(**self.events[1]) + event_manager.flush() + + event_manager.event_queue.join() + + mock_logger.error.assert_not_called() + mock_logger.debug.assert_has_calls([ + mock.call('ODP event queue: cannot send before the datafile has loaded.'), + mock.call('ODP event queue: cannot send before the datafile has loaded.'), + mock.call('ODP event queue: received update config signal.'), + mock.call('ODP event queue: adding event.'), + mock.call('ODP event queue: adding event.'), + mock.call('ODP event queue: received flush signal.'), + mock.call('ODP event queue: flushing batch size 2.') + ]) + mock_send.assert_called_once_with(self.api_key, self.api_host, self.processed_events) + event_manager.stop() + + def test_odp_event_manager_events_before_odp_disabled(self, *args): + mock_logger = mock.Mock() + odp_config = OdpConfig() + event_manager = OdpEventManager(mock_logger) + event_manager.start(odp_config) + + with mock.patch.object(event_manager.api_manager, 'send_odp_events') as mock_send: + event_manager.send_event(**self.events[0]) + event_manager.send_event(**self.events[1]) + + odp_config.update(None, None, []) + event_manager.update_config() + event_manager.event_queue.join() + + event_manager.send_event(**self.events[0]) + event_manager.send_event(**self.events[1]) + + event_manager.event_queue.join() + + mock_logger.error.assert_not_called() + mock_logger.debug.assert_has_calls([ + mock.call('ODP event queue: cannot send before the datafile has loaded.'), + mock.call('ODP event queue: cannot send before the datafile has loaded.'), + mock.call('ODP event queue: received update config signal.'), + mock.call(Errors.ODP_NOT_INTEGRATED), + mock.call(Errors.ODP_NOT_INTEGRATED) + ]) + self.assertEqual(len(event_manager._current_batch), 0) + mock_send.assert_not_called() + event_manager.stop() + + def test_odp_event_manager_disabled_after_init(self, *args): + mock_logger = mock.Mock() + odp_config = OdpConfig(self.api_key, self.api_host) + event_manager = OdpEventManager(mock_logger) + event_manager.start(odp_config) + event_manager.batch_size = 2 + + with mock.patch.object( + event_manager.api_manager, 'send_odp_events', new_callable=CopyingMock, return_value=False + ) as mock_send: + event_manager.send_event(**self.events[0]) + event_manager.send_event(**self.events[1]) + event_manager.event_queue.join() + + odp_config.update(None, None, []) + + event_manager.send_event(**self.events[0]) + event_manager.send_event(**self.events[1]) + + event_manager.event_queue.join() + + mock_logger.error.assert_not_called() + mock_logger.debug.assert_has_calls([ + mock.call('ODP event queue: flushing batch size 2.'), + mock.call(Errors.ODP_NOT_INTEGRATED), + mock.call(Errors.ODP_NOT_INTEGRATED) + ]) + self.assertEqual(len(event_manager._current_batch), 0) + mock_send.assert_called_once_with(self.api_key, self.api_host, self.processed_events) + event_manager.stop() + + def test_odp_event_manager_disabled_after_events_in_queue(self, *args): + mock_logger = mock.Mock() + odp_config = OdpConfig(self.api_key, self.api_host) + + event_manager = OdpEventManager(mock_logger) + event_manager.odp_config = odp_config + event_manager.batch_size = 3 + + with mock.patch('optimizely.odp.odp_event_manager.OdpEventManager.is_running', True): + event_manager.send_event(**self.events[0]) + event_manager.send_event(**self.events[1]) + + with mock.patch.object( + event_manager.api_manager, 'send_odp_events', new_callable=CopyingMock, return_value=False + ) as mock_send: + event_manager.start(odp_config) + odp_config.update(None, None, []) + event_manager.update_config() + event_manager.send_event(**self.events[0]) + event_manager.send_event(**self.events[1]) + event_manager.send_event(**self.events[0]) + event_manager.event_queue.join() + + self.assertEqual(len(event_manager._current_batch), 0) + mock_logger.debug.assert_any_call(Errors.ODP_NOT_INTEGRATED) + mock_logger.error.assert_not_called() + mock_send.assert_called_once_with(self.api_key, self.api_host, self.processed_events) + event_manager.stop() + + def test_send_event_before_config_set(self, *args): + mock_logger = mock.Mock() + + event_manager = OdpEventManager(mock_logger) + event_manager.send_event(**self.events[0]) + mock_logger.debug.assert_called_with('ODP event queue: cannot send before config has been set.') diff --git a/tests/test_odp_manager.py b/tests/test_odp_manager.py new file mode 100644 index 00000000..ae0e4a1a --- /dev/null +++ b/tests/test_odp_manager.py @@ -0,0 +1,402 @@ +# Copyright 2022, Optimizely +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from unittest import mock + +from optimizely import version +from optimizely.helpers.enums import Errors +from optimizely.odp.lru_cache import OptimizelySegmentsCache, LRUCache +from optimizely.odp.odp_config import OdpConfig +from optimizely.odp.odp_event_manager import OdpEventManager +from optimizely.odp.odp_manager import OdpManager +from optimizely.odp.odp_segment_manager import OdpSegmentManager +from optimizely.odp.odp_segment_api_manager import OdpSegmentApiManager +from optimizely.odp.odp_event_api_manager import OdpEventApiManager +from tests import base + + +class CustomCache: + def reset(self) -> None: + pass + + +class OdpManagerTest(base.BaseTest): + + def test_configurations_disable_odp(self): + mock_logger = mock.MagicMock() + manager = OdpManager(True, OptimizelySegmentsCache, logger=mock_logger) + + mock_logger.info.assert_called_once_with('ODP is disabled.') + manager.update_odp_config('valid', 'host', []) + self.assertIsNone(manager.odp_config.get_api_key()) + self.assertIsNone(manager.odp_config.get_api_host()) + + manager.fetch_qualified_segments('user1', []) + mock_logger.error.assert_called_once_with(Errors.ODP_NOT_ENABLED) + mock_logger.reset_mock() + + # these call should be dropped gracefully with None + manager.identify_user('user1') + + manager.send_event('t1', 'a1', {}, {}) + mock_logger.error.assert_called_once_with('ODP is not enabled.') + + self.assertIsNone(manager.event_manager) + self.assertIsNone(manager.segment_manager) + + def test_fetch_qualified_segments(self): + mock_logger = mock.MagicMock() + segment_manager = OdpSegmentManager(OptimizelySegmentsCache, + OdpSegmentApiManager(mock_logger), mock_logger) + + manager = OdpManager(False, OptimizelySegmentsCache, segment_manager, logger=mock_logger) + + with mock.patch.object(segment_manager, 'fetch_qualified_segments') as mock_fetch_qualif_segments: + manager.fetch_qualified_segments('user1', []) + + mock_logger.debug.assert_not_called() + mock_logger.error.assert_not_called() + mock_fetch_qualif_segments.assert_called_once_with('fs_user_id', 'user1', []) + + with mock.patch.object(segment_manager, 'fetch_qualified_segments') as mock_fetch_qualif_segments: + manager.fetch_qualified_segments('user1', ['IGNORE_CACHE']) + + mock_logger.debug.assert_not_called() + mock_logger.error.assert_not_called() + mock_fetch_qualif_segments.assert_called_once_with('fs_user_id', 'user1', ['IGNORE_CACHE']) + + def test_fetch_qualified_segments__disabled(self): + mock_logger = mock.MagicMock() + segment_manager = OdpSegmentManager(OptimizelySegmentsCache, + OdpSegmentApiManager(mock_logger), mock_logger) + + manager = OdpManager(True, OptimizelySegmentsCache, segment_manager, logger=mock_logger) + + with mock.patch.object(segment_manager, 'fetch_qualified_segments') as mock_fetch_qualif_segments: + manager.fetch_qualified_segments('user1', []) + mock_logger.error.assert_called_once_with(Errors.ODP_NOT_ENABLED) + mock_fetch_qualif_segments.assert_not_called() + + def test_fetch_qualified_segments__segment_mgr_is_none(self): + """ + When segment manager is None, then fetching segment + should take place using the default segment manager. + """ + mock_logger = mock.MagicMock() + manager = OdpManager(False, LRUCache(10, 20), logger=mock_logger) + manager.update_odp_config('api_key', 'api_host', []) + + with mock.patch.object(manager.segment_manager, 'fetch_qualified_segments') as mock_fetch_qualif_segments: + manager.fetch_qualified_segments('user1', []) + + mock_logger.error.assert_not_called() + mock_fetch_qualif_segments.assert_called_once_with('fs_user_id', 'user1', []) + + def test_fetch_qualified_segments__seg_cache_and_seg_mgr_are_none(self): + """ + When segment cache and segment manager are None, then fetching segment + should take place using the default managers. + """ + mock_logger = mock.MagicMock() + manager = OdpManager(False, mock_logger) + manager.update_odp_config('api_key', 'api_host', []) + + with mock.patch.object(manager.segment_manager, 'fetch_qualified_segments') as mock_fetch_qualif_segments: + manager.fetch_qualified_segments('user1', []) + + mock_logger.debug.assert_not_called() + mock_logger.error.assert_not_called() + mock_fetch_qualif_segments.assert_called_once_with('fs_user_id', 'user1', []) + + def test_identify_user_datafile_not_ready(self): + mock_logger = mock.MagicMock() + event_manager = OdpEventManager(OdpConfig(), mock_logger) + + manager = OdpManager(False, OptimizelySegmentsCache, event_manager=event_manager, logger=mock_logger) + + with mock.patch.object(event_manager, 'identify_user') as mock_identify_user: + manager.identify_user('user1') + + mock_identify_user.assert_called_once_with('user1') + mock_logger.error.assert_not_called() + + def test_identify_user_odp_integrated(self): + mock_logger = mock.MagicMock() + event_manager = OdpEventManager(mock_logger, OdpEventApiManager()) + + manager = OdpManager(False, LRUCache(10, 20), event_manager=event_manager, logger=mock_logger) + manager.update_odp_config('key1', 'host1', []) + + with mock.patch.object(event_manager, 'dispatch') as mock_dispatch_event: + manager.identify_user('user1') + + mock_dispatch_event.assert_called_once_with({ + 'type': 'fullstack', + 'action': 'identified', + 'identifiers': {'fs_user_id': 'user1'}, + 'data': { + 'idempotence_id': mock.ANY, + 'data_source_type': 'sdk', + 'data_source': 'python-sdk', + 'data_source_version': version.__version__ + }}) + mock_logger.error.assert_not_called() + + def test_identify_user_odp_not_integrated(self): + mock_logger = mock.MagicMock() + event_manager = OdpEventManager(mock_logger, OdpEventApiManager()) + + manager = OdpManager(False, CustomCache(), event_manager=event_manager, logger=mock_logger) + manager.update_odp_config(None, None, []) + + with mock.patch.object(event_manager, 'dispatch') as mock_dispatch_event: + manager.identify_user('user1') + + mock_dispatch_event.assert_not_called() + mock_logger.error.assert_not_called() + mock_logger.debug.assert_any_call('ODP identify event is not dispatched (ODP not integrated).') + + def test_identify_user_odp_disabled(self): + mock_logger = mock.MagicMock() + event_manager = OdpEventManager(mock_logger, OdpEventApiManager()) + + manager = OdpManager(False, OptimizelySegmentsCache, event_manager=event_manager, logger=mock_logger) + manager.enabled = False + + with mock.patch.object(event_manager, 'identify_user') as mock_identify_user: + manager.identify_user('user1') + + mock_identify_user.assert_not_called() + mock_logger.error.assert_not_called() + mock_logger.debug.assert_called_with('ODP identify event is not dispatched (ODP disabled).') + + def test_send_event_datafile_not_ready(self): + mock_logger = mock.MagicMock() + event_manager = OdpEventManager(mock_logger, OdpEventApiManager()) + + manager = OdpManager(False, OptimizelySegmentsCache, event_manager=event_manager, logger=mock_logger) + + with mock.patch.object(event_manager, 'dispatch') as mock_dispatch_event: + manager.send_event('t1', 'a1', {'id-key1': 'id-val-1'}, {'key1': 'val1'}) + + mock_dispatch_event.assert_not_called() + mock_logger.error.assert_not_called() + mock_logger.debug.assert_called_with('ODP event queue: cannot send before config has been set.') + + def test_send_event_odp_integrated(self): + mock_logger = mock.MagicMock() + event_manager = OdpEventManager(mock_logger, OdpEventApiManager()) + + manager = OdpManager(False, LRUCache(10, 20), event_manager=event_manager, logger=mock_logger) + manager.update_odp_config('key1', 'host1', []) + + with mock.patch.object(event_manager, 'dispatch') as mock_dispatch_event: + manager.send_event('t1', 'a1', {'id-key1': 'id-val-1'}, {'key1': 'val1'}) + + mock_dispatch_event.assert_called_once_with({ + 'type': 't1', + 'action': 'a1', + 'identifiers': {'id-key1': 'id-val-1'}, + 'data': { + 'idempotence_id': mock.ANY, + 'data_source_type': 'sdk', + 'data_source': 'python-sdk', + 'data_source_version': version.__version__, + 'key1': 'val1' + }}) + + def test_send_event_odp_not_integrated(self): + mock_logger = mock.MagicMock() + event_manager = OdpEventManager(mock_logger, OdpEventApiManager()) + + manager = OdpManager(False, CustomCache(), event_manager=event_manager, logger=mock_logger) + manager.update_odp_config('api_key', 'api_host', []) + manager.update_odp_config(None, None, []) + + with mock.patch.object(event_manager, 'dispatch') as mock_dispatch_event: + manager.send_event('t1', 'a1', {'id-key1': 'id-val-1'}, {'key1': 'val1'}) + + mock_dispatch_event.assert_not_called() + mock_logger.error.assert_called_once_with('ODP is not integrated.') + + def test_send_event_odp_disabled(self): + mock_logger = mock.MagicMock() + event_manager = OdpEventManager(mock_logger, OdpEventApiManager()) + + manager = OdpManager(True, OptimizelySegmentsCache, event_manager=event_manager, logger=mock_logger) + + with mock.patch.object(event_manager, 'dispatch') as mock_dispatch_event: + manager.send_event('t1', 'a1', {'id-key1': 'id-val-1'}, {'key1': 'val1'}) + + mock_dispatch_event.assert_not_called() + mock_logger.error.assert_called_once_with('ODP is not enabled.') + + def test_send_event_odp_disabled__event_manager_not_available(self): + mock_logger = mock.MagicMock() + event_manager = OdpEventManager(mock_logger, OdpEventApiManager()) + + manager = OdpManager(False, OptimizelySegmentsCache, event_manager=event_manager, logger=mock_logger) + manager.event_manager = False + + with mock.patch.object(event_manager, 'dispatch') as mock_dispatch_event: + manager.send_event('t1', 'a1', {'id-key1': 'id-val-1'}, {'key1': 'val1'}) + + mock_dispatch_event.assert_not_called() + mock_logger.error.assert_called_once_with('ODP is not enabled.') + + def test_config_not_changed(self): + mock_logger = mock.MagicMock() + event_manager = OdpEventManager(mock_logger, OdpEventApiManager()) + + manager = OdpManager(False, CustomCache(), event_manager=event_manager, logger=mock_logger) + # finish initialization + manager.update_odp_config(None, None, []) + # update without change + manager.update_odp_config(None, None, []) + mock_logger.debug.assert_any_call('Odp config was not changed.') + mock_logger.error.assert_not_called() + + def test_update_odp_config__reset_called(self): + # build segment manager + mock_logger = mock.MagicMock() + segment_manager = OdpSegmentManager(OptimizelySegmentsCache, + OdpSegmentApiManager(mock_logger), mock_logger) + # build event manager + event_manager = OdpEventManager(mock_logger, OdpEventApiManager()) + + manager = OdpManager(False, OptimizelySegmentsCache, segment_manager, event_manager, mock_logger) + + with mock.patch.object(segment_manager, 'reset') as mock_reset: + manager.update_odp_config('key1', 'host1', []) + mock_reset.assert_called_once() + mock_reset.reset_mock() + + manager.update_odp_config('key1', 'host1', []) + mock_reset.assert_not_called() + + manager.update_odp_config('key2', 'host1', []) + mock_reset.assert_called_once() + mock_reset.reset_mock() + + manager.update_odp_config('key2', 'host2', []) + mock_reset.assert_called_once() + mock_reset.reset_mock() + + manager.update_odp_config('key2', 'host2', ['a']) + mock_reset.assert_called_once() + mock_reset.reset_mock() + + manager.update_odp_config('key2', 'host2', ['a', 'b']) + mock_reset.assert_called_once() + mock_reset.reset_mock() + + manager.update_odp_config('key2', 'host2', ['c']) + mock_reset.assert_called_once() + mock_reset.reset_mock() + + manager.update_odp_config('key2', 'host2', ['c']) + mock_reset.assert_not_called() + + manager.update_odp_config(None, None, []) + mock_reset.assert_called_once() + mock_logger.error.assert_not_called() + + def test_update_odp_config__update_config_called(self): + """ + Test if event_manager.update_config is called when change + to odp_config is made or not in OdpManager. + """ + mock_logger = mock.MagicMock() + event_manager = OdpEventManager(mock_logger, OdpEventApiManager()) + manager = OdpManager(False, LRUCache(10, 20), event_manager=event_manager, logger=mock_logger) + event_manager.start(manager.odp_config) + + with mock.patch.object(event_manager, 'update_config') as mock_update: + first_api_key = manager.odp_config.get_api_key() + manager.update_odp_config('key1', 'host1', []) + second_api_key = manager.odp_config.get_api_key() + + mock_update.assert_called_once() + mock_logger.debug.assert_not_called() + self.assertEqual(first_api_key, None) + self.assertEqual(second_api_key, 'key1') + + with mock.patch.object(event_manager, 'update_config') as mock_update: + first_api_key = manager.odp_config.get_api_key() + manager.update_odp_config('key2', 'host1', []) + second_api_key = manager.odp_config.get_api_key() + + mock_update.assert_called_once() + mock_logger.debug.assert_not_called() + self.assertEqual(first_api_key, 'key1') + self.assertEqual(second_api_key, 'key2') + + with mock.patch.object(event_manager, 'update_config') as mock_update: + first_api_key = manager.odp_config.get_api_key() + manager.update_odp_config('key2', 'host1', []) + second_api_key = manager.odp_config.get_api_key() + + # event_manager.update_config not called when no change to odp_config + mock_update.assert_not_called() + mock_logger.error.assert_not_called() + mock_logger.debug.assert_called_with('Odp config was not changed.') + self.assertEqual(first_api_key, 'key2') + self.assertEqual(second_api_key, 'key2') + + def test_update_odp_config__odp_config_propagated_properly(self): + mock_logger = mock.MagicMock() + event_manager = OdpEventManager(mock_logger, OdpEventApiManager()) + manager = OdpManager(False, LRUCache(10, 20), event_manager=event_manager, logger=mock_logger) + manager.update_odp_config('key1', 'host1', ['a', 'b']) + + self.assertEqual(manager.segment_manager.odp_config.get_api_key(), 'key1') + self.assertEqual(manager.segment_manager.odp_config.get_api_host(), 'host1') + self.assertEqual(manager.segment_manager.odp_config.get_segments_to_check(), ['a', 'b']) + self.assertEqual(manager.event_manager.odp_config.get_api_key(), 'key1') + self.assertEqual(manager.event_manager.odp_config.get_api_host(), 'host1') + self.assertEqual(manager.event_manager.odp_config.get_segments_to_check(), ['a', 'b']) + + # odp disabled with invalid apiKey (apiKey/apiHost propagated into submanagers) + manager.update_odp_config(None, None, []) + + self.assertEqual(manager.segment_manager.odp_config.get_api_key(), None) + self.assertEqual(manager.segment_manager.odp_config.get_api_host(), None) + self.assertEqual(manager.segment_manager.odp_config.get_segments_to_check(), []) + self.assertEqual(manager.event_manager.odp_config.get_api_key(), None) + self.assertEqual(manager.event_manager.odp_config.get_api_host(), None) + self.assertEqual(manager.event_manager.odp_config.get_segments_to_check(), []) + + manager.update_odp_config(None, None, ['a', 'b']) + self.assertEqual(manager.segment_manager.odp_config.get_segments_to_check(), ['a', 'b']) + self.assertEqual(manager.event_manager.odp_config.get_segments_to_check(), ['a', 'b']) + mock_logger.error.assert_not_called() + + def test_update_odp_config__odp_config_starts_event_manager(self): + mock_logger = mock.MagicMock() + event_manager = OdpEventManager(mock_logger) + manager = OdpManager(False, event_manager=event_manager, logger=mock_logger) + self.assertFalse(event_manager.is_running) + + manager.update_odp_config('key1', 'host1', ['a', 'b']) + self.assertTrue(event_manager.is_running) + + mock_logger.error.assert_not_called() + manager.close() + + def test_segments_cache_default_settings(self): + manager = OdpManager(False) + segments_cache = manager.segment_manager.segments_cache + self.assertEqual(segments_cache.capacity, 10_000) + self.assertEqual(segments_cache.timeout, 600) diff --git a/tests/test_odp_segment_api_manager.py b/tests/test_odp_segment_api_manager.py new file mode 100644 index 00000000..f45af4d2 --- /dev/null +++ b/tests/test_odp_segment_api_manager.py @@ -0,0 +1,487 @@ +# Copyright 2022, Optimizely +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +from unittest import mock + +from requests import exceptions as request_exception + +from optimizely.helpers.enums import OdpSegmentApiConfig +from optimizely.odp.odp_segment_api_manager import OdpSegmentApiManager +from . import base + + +class OdpSegmentApiManagerTest(base.BaseTest): + user_key = "vuid" + user_value = "test-user-value" + api_key = "test-api-key" + api_host = "test-host" + + def test_fetch_qualified_segments__valid_request(self): + with mock.patch('requests.post') as mock_request_post: + api = OdpSegmentApiManager() + api.fetch_segments(api_key=self.api_key, + api_host=self.api_host, + user_key=self.user_key, + user_value=self.user_value, + segments_to_check=["a", "b", "c"]) + + test_payload = { + 'query': 'query($userId: String, $audiences: [String]) {' + 'customer(vuid: $userId) ' + '{audiences(subset: $audiences) {edges {node {name state}}}}}', + 'variables': {'userId': self.user_value, 'audiences': ["a", "b", "c"]} + } + request_headers = {'content-type': 'application/json', 'x-api-key': self.api_key} + mock_request_post.assert_called_once_with(url=self.api_host + "/v3/graphql", + headers=request_headers, + data=json.dumps(test_payload), + timeout=OdpSegmentApiConfig.REQUEST_TIMEOUT) + + def test_fetch_qualified_segments__custom_timeout(self): + with mock.patch('requests.post') as mock_request_post: + api = OdpSegmentApiManager(timeout=12) + api.fetch_segments(api_key=self.api_key, + api_host=self.api_host, + user_key=self.user_key, + user_value=self.user_value, + segments_to_check=["a", "b", "c"]) + + test_payload = { + 'query': 'query($userId: String, $audiences: [String]) {' + 'customer(vuid: $userId) ' + '{audiences(subset: $audiences) {edges {node {name state}}}}}', + 'variables': {'userId': self.user_value, 'audiences': ["a", "b", "c"]} + } + request_headers = {'content-type': 'application/json', 'x-api-key': self.api_key} + mock_request_post.assert_called_once_with(url=self.api_host + "/v3/graphql", + headers=request_headers, + data=json.dumps(test_payload), + timeout=12) + + def test_fetch_qualified_segments__success(self): + with mock.patch('requests.post') as mock_request_post: + mock_request_post.return_value = \ + self.fake_server_response(status_code=200, content=self.good_response_data) + + api = OdpSegmentApiManager() + response = api.fetch_segments(api_key=self.api_key, + api_host=self.api_host, + user_key=self.user_key, + user_value=self.user_value, + segments_to_check=['dummy1', 'dummy2', 'dummy3']) + + self.assertEqual(response, ['a', 'b']) + + def test_fetch_qualified_segments__node_missing(self): + with mock.patch('requests.post') as mock_request_post, \ + mock.patch('optimizely.logger') as mock_logger: + mock_request_post.return_value = \ + self.fake_server_response(status_code=200, content=self.node_missing_response_data) + + api = OdpSegmentApiManager(logger=mock_logger) + api.fetch_segments(api_key=self.api_key, + api_host=self.api_host, + user_key=self.user_key, + user_value=self.user_value, + segments_to_check=['dummy1', 'dummy2', 'dummy3']) + + mock_request_post.assert_called_once() + mock_logger.error.assert_called_once_with('Audience segments fetch failed (decode error).') + + def test_fetch_qualified_segments__mixed_missing_keys(self): + with mock.patch('requests.post') as mock_request_post, \ + mock.patch('optimizely.logger') as mock_logger: + mock_request_post.return_value = \ + self.fake_server_response(status_code=200, + content=self.mixed_missing_keys_response_data) + + api = OdpSegmentApiManager(logger=mock_logger) + api.fetch_segments(api_key=self.api_key, + api_host=self.api_host, + user_key=self.user_key, + user_value=self.user_value, + segments_to_check=['dummy1', 'dummy2', 'dummy3']) + + mock_request_post.assert_called_once() + mock_logger.error.assert_called_once_with('Audience segments fetch failed (decode error).') + + def test_fetch_qualified_segments__success_with_empty_segments(self): + with mock.patch('requests.post') as mock_request_post: + mock_request_post.return_value = \ + self.fake_server_response(status_code=200, content=self.good_empty_response_data) + + api = OdpSegmentApiManager() + response = api.fetch_segments(api_key=self.api_key, + api_host=self.api_host, + user_key=self.user_key, + user_value=self.user_value, + segments_to_check=['dummy']) + + self.assertEqual(response, []) + + def test_fetch_qualified_segments__invalid_identifier(self): + with mock.patch('requests.post') as mock_request_post, \ + mock.patch('optimizely.logger') as mock_logger: + mock_request_post.return_value = \ + self.fake_server_response(status_code=200, + content=self.invalid_identifier_response_data) + + api = OdpSegmentApiManager(logger=mock_logger) + api.fetch_segments(api_key=self.api_key, + api_host=self.api_host, + user_key=self.user_key, + user_value=self.user_value, + segments_to_check=[]) + + mock_request_post.assert_called_once() + mock_logger.warning.assert_called_once_with('Audience segments fetch failed (invalid identifier).') + + def test_fetch_qualified_segments__other_exception(self): + with mock.patch('requests.post') as mock_request_post, \ + mock.patch('optimizely.logger') as mock_logger: + mock_request_post.return_value = \ + self.fake_server_response(status_code=200, content=self.other_exception_response_data) + + api = OdpSegmentApiManager(logger=mock_logger) + api.fetch_segments(api_key=self.api_key, + api_host=self.api_host, + user_key=self.user_key, + user_value=self.user_value, + segments_to_check=[]) + + mock_request_post.assert_called_once() + mock_logger.error.assert_called_once_with('Audience segments fetch failed (TestExceptionClass).') + + def test_fetch_qualified_segments__bad_response(self): + with mock.patch('requests.post') as mock_request_post, \ + mock.patch('optimizely.logger') as mock_logger: + mock_request_post.return_value = \ + self.fake_server_response(status_code=200, content=self.bad_response_data) + + api = OdpSegmentApiManager(logger=mock_logger) + api.fetch_segments(api_key=self.api_key, + api_host=self.api_host, + user_key=self.user_key, + user_value=self.user_value, + segments_to_check=[]) + + mock_request_post.assert_called_once() + mock_logger.error.assert_called_once_with('Audience segments fetch failed (decode error).') + + def test_fetch_qualified_segments__name_invalid(self): + with mock.patch('requests.post') as mock_request_post, \ + mock.patch('optimizely.logger') as mock_logger: + mock_request_post.return_value = \ + self.fake_server_response(status_code=200, content=self.name_invalid_response_data) + + api = OdpSegmentApiManager(logger=mock_logger) + api.fetch_segments(api_key=self.api_key, + api_host=self.api_host, + user_key=self.user_key, + user_value=self.user_value, + segments_to_check=[]) + + mock_request_post.assert_called_once() + mock_logger.error.assert_called_once_with('Audience segments fetch failed (JSON decode error).') + + def test_fetch_qualified_segments__invalid_key(self): + with mock.patch('requests.post') as mock_request_post, \ + mock.patch('optimizely.logger') as mock_logger: + mock_request_post.return_value = self.fake_server_response(status_code=200, + content=self.invalid_edges_key_response_data) + + api = OdpSegmentApiManager(logger=mock_logger) + api.fetch_segments(api_key=self.api_key, + api_host=self.api_host, + user_key=self.user_key, + user_value=self.user_value, + segments_to_check=[]) + + mock_request_post.assert_called_once() + mock_logger.error.assert_called_once_with('Audience segments fetch failed (decode error).') + + def test_fetch_qualified_segments__invalid_key_in_error_body(self): + with mock.patch('requests.post') as mock_request_post, \ + mock.patch('optimizely.logger') as mock_logger: + mock_request_post.return_value = self.fake_server_response(status_code=200, + content=self.invalid_key_for_error_response_data) + + api = OdpSegmentApiManager(logger=mock_logger) + api.fetch_segments(api_key=self.api_key, + api_host=self.api_host, + user_key=self.user_key, + user_value=self.user_value, + segments_to_check=[]) + + mock_request_post.assert_called_once() + mock_logger.error.assert_called_once_with('Audience segments fetch failed (decode error).') + + def test_fetch_qualified_segments__network_error(self): + with mock.patch('requests.post', + side_effect=request_exception.ConnectionError('Connection error')) as mock_request_post, \ + mock.patch('optimizely.logger') as mock_logger: + api = OdpSegmentApiManager(logger=mock_logger) + api.fetch_segments(api_key=self.api_key, + api_host=self.api_host, + user_key=self.user_key, + user_value=self.user_value, + segments_to_check=[]) + + mock_request_post.assert_called_once() + mock_logger.error.assert_called_once_with('Audience segments fetch failed (network error).') + mock_logger.debug.assert_called_once_with('GraphQL download failed: Connection error') + + def test_fetch_qualified_segments__400(self): + with mock.patch('requests.post') as mock_request_post, \ + mock.patch('optimizely.logger') as mock_logger: + mock_request_post.return_value = self.fake_server_response(status_code=403, url=self.api_host) + + api = OdpSegmentApiManager(logger=mock_logger) + api.fetch_segments(api_key=self.api_key, + api_host=self.api_host, + user_key=self.user_key, + user_value=self.user_value, + segments_to_check=["a", "b", "c"]) + + # make sure that fetch_segments() is called (once). + # could use assert_called_once_with() but it's not needed, + # we already it assert_called_once_with() in test_fetch_qualified_segments__valid_request() + mock_request_post.assert_called_once() + # assert 403 error log + mock_logger.error.assert_called_once_with('Audience segments fetch failed ' + f'(403 Client Error: None for url: {self.api_host}).') + + def test_fetch_qualified_segments__500(self): + with mock.patch('requests.post') as mock_request_post, \ + mock.patch('optimizely.logger') as mock_logger: + mock_request_post.return_value = self.fake_server_response(status_code=500, url=self.api_host) + + api = OdpSegmentApiManager(logger=mock_logger) + api.fetch_segments(api_key=self.api_key, + api_host=self.api_host, + user_key=self.user_key, + user_value=self.user_value, + segments_to_check=["a", "b", "c"]) + + # make sure that fetch_segments() is called (once). + mock_request_post.assert_called_once() + # assert 500 error log + mock_logger.error.assert_called_once_with('Audience segments fetch failed ' + f'(500 Server Error: None for url: {self.api_host}).') + + # test json responses + + good_response_data = """ + { + "data": { + "customer": { + "audiences": { + "edges": [ + { + "node": { + "name": "a", + "state": "qualified", + "description": "qualifed sample 1" + } + }, + { + "node": { + "name": "b", + "state": "qualified", + "description": "qualifed sample 2" + } + }, + { + "node": { + "name": "c", + "state": "not_qualified", + "description": "not-qualified sample" + } + } + ] + } + } + } + } + """ + + good_empty_response_data = """ + { + "data": { + "customer": { + "audiences": { + "edges": [] + } + } + } + } + """ + + invalid_identifier_response_data = """ + { + "errors": [ + { + "message": "Exception while fetching data (/customer) :\ + java.lang.RuntimeException: could not resolve _fs_user_id = asdsdaddddd", + "locations": [ + { + "line": 2, + "column": 3 + } + ], + "path": [ + "customer" + ], + "extensions": { + "classification": "DataFetchingException", + "code": "INVALID_IDENTIFIER_EXCEPTION" + } + } + ], + "data": { + "customer": null + } + } + """ + + other_exception_response_data = """ + { + "errors": [ + { + "message": "Exception while fetching data (/customer) :\ + java.lang.RuntimeException: could not resolve _fs_user_id = asdsdaddddd", + "extensions": { + "classification": "TestExceptionClass" + } + } + ], + "data": { + "customer": null + } + } + """ + + bad_response_data = """ + { + "data": {} + } + """ + + invalid_edges_key_response_data = """ + { + "data": { + "customer": { + "audiences": { + "invalid_test_key": [ + { + "node": { + "name": "a", + "state": "qualified", + "description": "qualifed sample 1" + } + } + ] + } + } + } + } + """ + + invalid_key_for_error_response_data = """ + { + "errors": [ + { + "message": "Exception while fetching data (/customer) :\ + java.lang.RuntimeException: could not resolve _fs_user_id = asdsdaddddd", + "locations": [ + { + "line": 2, + "column": 3 + } + ], + "path": [ + "customer" + ], + "invalid_test_key": { + "classification": "InvalidIdentifierException" + } + } + ], + "data": { + "customer": null + } + } + """ + name_invalid_response_data = """ + { + "data": { + "customer": { + "audiences": { + "edges": [ + { + "node": { + "name": "a":::invalid-part-here:::, + "state": "qualified", + "description": "qualifed sample 1" + } + } + ] + } + } + } + } + """ + + node_missing_response_data = """ + { + "data": { + "customer": { + "audiences": { + "edges": [ + {} + ] + } + } + } + } + """ + + mixed_missing_keys_response_data = """ + { + "data": { + "customer": { + "audiences": { + "edges": [ + { + "node": { + "state": "qualified" + } + }, + { + "node": { + "name": "a" + } + }, + { + "other-name": { + "name": "a", + "state": "qualified" + } + } + ] + } + } + } + } + """ diff --git a/tests/test_odp_segment_manager.py b/tests/test_odp_segment_manager.py new file mode 100644 index 00000000..50794746 --- /dev/null +++ b/tests/test_odp_segment_manager.py @@ -0,0 +1,213 @@ +# Copyright 2022, Optimizely +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from unittest import mock +from unittest.mock import call + +from requests import exceptions as request_exception + +from optimizely.odp.lru_cache import LRUCache +from optimizely.odp.odp_config import OdpConfig +from optimizely.odp.optimizely_odp_option import OptimizelyOdpOption +from optimizely.odp.odp_segment_manager import OdpSegmentManager +from optimizely.odp.odp_segment_api_manager import OdpSegmentApiManager +from tests import base + + +class OdpSegmentManagerTest(base.BaseTest): + api_host = 'host' + api_key = 'valid' + user_key = 'fs_user_id' + user_value = 'test-user-value' + + def test_empty_list_with_no_segments_to_check(self): + odp_config = OdpConfig(self.api_key, self.api_host, []) + mock_logger = mock.MagicMock() + segments_cache = LRUCache(1000, 1000) + api = OdpSegmentApiManager(mock_logger) + segment_manager = OdpSegmentManager(segments_cache, api, mock_logger) + segment_manager.odp_config = odp_config + + with mock.patch.object(api, 'fetch_segments') as mock_fetch_segments: + segments = segment_manager.fetch_qualified_segments(self.user_key, self.user_value, []) + + self.assertEqual(segments, []) + mock_logger.debug.assert_called_once_with('No segments are used in the project. Returning empty list.') + mock_logger.error.assert_not_called() + mock_fetch_segments.assert_not_called() + + def test_fetch_segments_success_cache_miss(self): + """ + we are fetching user key/value 'fs_user_id'/'test-user-value' + which is different from what we have passed to cache (fs_user_id-$-123/['d']) + ---> hence we trigger a cache miss + """ + odp_config = OdpConfig(self.api_key, self.api_host, ["a", "b", "c"]) + mock_logger = mock.MagicMock() + segments_cache = LRUCache(1000, 1000) + + segment_manager = OdpSegmentManager(segments_cache, logger=mock_logger) + segment_manager.odp_config = odp_config + cache_key = segment_manager.make_cache_key(self.user_key, '123') + segment_manager.segments_cache.save(cache_key, ["d"]) + + with mock.patch('requests.post') as mock_request_post: + mock_request_post.return_value = self.fake_server_response(status_code=200, + content=self.good_response_data) + + segments = segment_manager.fetch_qualified_segments(self.user_key, self.user_value, []) + + self.assertEqual(segments, ["a", "b"]) + actual_cache_key = segment_manager.make_cache_key(self.user_key, self.user_value) + self.assertEqual(segment_manager.segments_cache.lookup(actual_cache_key), ["a", "b"]) + + self.assertEqual(mock_logger.debug.call_count, 2) + mock_logger.debug.assert_has_calls([call('ODP cache miss.'), call('Making a call to ODP server.')]) + mock_logger.error.assert_not_called() + + def test_fetch_segments_success_cache_hit(self): + odp_config = OdpConfig() + odp_config.update(self.api_key, self.api_host, ['c']) + mock_logger = mock.MagicMock() + segments_cache = LRUCache(1000, 1000) + + segment_manager = OdpSegmentManager(segments_cache, logger=mock_logger) + segment_manager.odp_config = odp_config + cache_key = segment_manager.make_cache_key(self.user_key, self.user_value) + segment_manager.segments_cache.save(cache_key, ['c']) + + with mock.patch.object(segment_manager.api_manager, 'fetch_segments') as mock_fetch_segments: + segments = segment_manager.fetch_qualified_segments(self.user_key, self.user_value, []) + + self.assertEqual(segments, ['c']) + mock_logger.debug.assert_called_once_with('ODP cache hit. Returning segments from cache.') + mock_logger.error.assert_not_called() + mock_fetch_segments.assert_not_called() + + def test_fetch_segments_missing_api_host_api_key(self): + with mock.patch('optimizely.logger') as mock_logger: + segment_manager = OdpSegmentManager(LRUCache(1000, 1000), logger=mock_logger) + segment_manager.odp_config = OdpConfig() + segments = segment_manager.fetch_qualified_segments(self.user_key, self.user_value, []) + + self.assertEqual(segments, None) + mock_logger.error.assert_called_once_with('Audience segments fetch failed (api_key/api_host not defined).') + + def test_fetch_segments_network_error(self): + """ + Trigger connection error with mock side_effect. Note that Python's requests don't + have a status code for connection error, that's why we need to trigger the exception + instead of returning a fake server response with status code 500. + The error log should come form the GraphQL API manager, not from ODP Segment Manager. + The active mock logger should be placed as parameter in OdpSegmentApiManager object. + """ + odp_config = OdpConfig(self.api_key, self.api_host, ["a", "b", "c"]) + mock_logger = mock.MagicMock() + segments_cache = LRUCache(1000, 1000) + segment_manager = OdpSegmentManager(segments_cache, logger=mock_logger) + segment_manager.odp_config = odp_config + + with mock.patch('requests.post', + side_effect=request_exception.ConnectionError('Connection error')): + segments = segment_manager.fetch_qualified_segments(self.user_key, self.user_value, []) + + self.assertEqual(segments, None) + mock_logger.error.assert_called_once_with('Audience segments fetch failed (network error).') + + def test_options_ignore_cache(self): + odp_config = OdpConfig(self.api_key, self.api_host, ["a", "b", "c"]) + mock_logger = mock.MagicMock() + segments_cache = LRUCache(1000, 1000) + + segment_manager = OdpSegmentManager(segments_cache, logger=mock_logger) + segment_manager.odp_config = odp_config + cache_key = segment_manager.make_cache_key(self.user_key, self.user_value) + segment_manager.segments_cache.save(cache_key, ['d']) + + with mock.patch('requests.post') as mock_request_post: + mock_request_post.return_value = self.fake_server_response(status_code=200, + content=self.good_response_data) + + segments = segment_manager.fetch_qualified_segments(self.user_key, self.user_value, + [OptimizelyOdpOption.IGNORE_CACHE]) + + self.assertEqual(segments, ["a", "b"]) + self.assertEqual(segment_manager.segments_cache.lookup(cache_key), ['d']) + mock_logger.debug.assert_called_once_with('Making a call to ODP server.') + mock_logger.error.assert_not_called() + + def test_options_reset_cache(self): + odp_config = OdpConfig(self.api_key, self.api_host, ["a", "b", "c"]) + mock_logger = mock.MagicMock() + segments_cache = LRUCache(1000, 1000) + + segment_manager = OdpSegmentManager(segments_cache, logger=mock_logger) + segment_manager.odp_config = odp_config + cache_key = segment_manager.make_cache_key(self.user_key, self.user_value) + segment_manager.segments_cache.save(cache_key, ['d']) + segment_manager.segments_cache.save('123', ['c', 'd']) + + with mock.patch('requests.post') as mock_request_post: + mock_request_post.return_value = self.fake_server_response(status_code=200, + content=self.good_response_data) + + segments = segment_manager.fetch_qualified_segments(self.user_key, self.user_value, + [OptimizelyOdpOption.RESET_CACHE]) + + self.assertEqual(segments, ["a", "b"]) + self.assertEqual(segment_manager.segments_cache.lookup(cache_key), ['a', 'b']) + self.assertTrue(len(segment_manager.segments_cache.map) == 1) + mock_logger.debug.assert_called_once_with('Making a call to ODP server.') + mock_logger.error.assert_not_called() + + def test_make_correct_cache_key(self): + segment_manager = OdpSegmentManager(None) + cache_key = segment_manager.make_cache_key(self.user_key, self.user_value) + self.assertEqual(cache_key, 'fs_user_id-$-test-user-value') + + # test json response + good_response_data = """ + { + "data": { + "customer": { + "audiences": { + "edges": [ + { + "node": { + "name": "a", + "state": "qualified", + "description": "qualifed sample 1" + } + }, + { + "node": { + "name": "b", + "state": "qualified", + "description": "qualifed sample 2" + } + }, + { + "node": { + "name": "c", + "state": "not_qualified", + "description": "not-qualified sample" + } + } + ] + } + } + } + } + """ diff --git a/tests/test_optimizely.py b/tests/test_optimizely.py index f1956cf1..1f4293cd 100644 --- a/tests/test_optimizely.py +++ b/tests/test_optimizely.py @@ -12,10 +12,10 @@ # limitations under the License. import json +import time from operator import itemgetter -import mock -import six +from unittest import mock from optimizely import config_manager from optimizely import decision_service @@ -26,10 +26,12 @@ from optimizely import logger from optimizely import optimizely from optimizely import optimizely_config +from optimizely.odp.odp_config import OdpConfigState from optimizely import project_config from optimizely import version from optimizely.event.event_factory import EventFactory from optimizely.helpers import enums +from optimizely.helpers.sdk_settings import OptimizelySdkSettings from . import base @@ -37,12 +39,12 @@ class OptimizelyTest(base.BaseTest): strTest = None try: - isinstance("test", six.string_types) # attempt to evaluate string + isinstance("test", str) # attempt to evaluate string _expected_notification_failure = 'Problem calling notify callback.' def isstr(self, s): - return isinstance(s, six.string_types) + return isinstance(s, str) strTest = isstr @@ -92,7 +94,10 @@ def test_init__invalid_datafile__logs_error(self): with mock.patch('optimizely.logger.reset_logger', return_value=mock_client_logger): opt_obj = optimizely.Optimizely('invalid_datafile') - mock_client_logger.error.assert_called_once_with('Provided "datafile" is in an invalid format.') + mock_client_logger.error.assert_has_calls([ + mock.call('Provided "datafile" is in an invalid format.'), + mock.call(f'{enums.Errors.MISSING_SDK_KEY} ODP may not work properly without it.') + ], any_order=True) self.assertIsNone(opt_obj.config_manager.get_config()) def test_init__null_datafile__logs_error(self): @@ -102,7 +107,10 @@ def test_init__null_datafile__logs_error(self): with mock.patch('optimizely.logger.reset_logger', return_value=mock_client_logger): opt_obj = optimizely.Optimizely(None) - mock_client_logger.error.assert_called_once_with('Provided "datafile" is in an invalid format.') + mock_client_logger.error.assert_has_calls([ + mock.call('Provided "datafile" is in an invalid format.'), + mock.call(f'{enums.Errors.MISSING_SDK_KEY} ODP may not work properly without it.') + ], any_order=True) self.assertIsNone(opt_obj.config_manager.get_config()) def test_init__empty_datafile__logs_error(self): @@ -112,13 +120,16 @@ def test_init__empty_datafile__logs_error(self): with mock.patch('optimizely.logger.reset_logger', return_value=mock_client_logger): opt_obj = optimizely.Optimizely("") - mock_client_logger.error.assert_called_once_with('Provided "datafile" is in an invalid format.') + mock_client_logger.error.assert_has_calls([ + mock.call('Provided "datafile" is in an invalid format.'), + mock.call(f'{enums.Errors.MISSING_SDK_KEY} ODP may not work properly without it.') + ], any_order=True) self.assertIsNone(opt_obj.config_manager.get_config()) def test_init__invalid_config_manager__logs_error(self): """ Test that invalid config_manager logs error on init. """ - class InvalidConfigManager(object): + class InvalidConfigManager: pass mock_client_logger = mock.MagicMock() @@ -131,7 +142,7 @@ class InvalidConfigManager(object): def test_init__invalid_event_dispatcher__logs_error(self): """ Test that invalid event_dispatcher logs error on init. """ - class InvalidDispatcher(object): + class InvalidDispatcher: pass mock_client_logger = mock.MagicMock() @@ -144,7 +155,7 @@ class InvalidDispatcher(object): def test_init__invalid_event_processor__logs_error(self): """ Test that invalid event_processor logs error on init. """ - class InvalidProcessor(object): + class InvalidProcessor: pass mock_client_logger = mock.MagicMock() @@ -157,7 +168,7 @@ class InvalidProcessor(object): def test_init__invalid_logger__logs_error(self): """ Test that invalid logger logs error on init. """ - class InvalidLogger(object): + class InvalidLogger: pass mock_client_logger = mock.MagicMock() @@ -170,7 +181,7 @@ class InvalidLogger(object): def test_init__invalid_error_handler__logs_error(self): """ Test that invalid error_handler logs error on init. """ - class InvalidErrorHandler(object): + class InvalidErrorHandler: pass mock_client_logger = mock.MagicMock() @@ -183,7 +194,7 @@ class InvalidErrorHandler(object): def test_init__invalid_notification_center__logs_error(self): """ Test that invalid notification_center logs error on init. """ - class InvalidNotificationCenter(object): + class InvalidNotificationCenter: pass mock_client_logger = mock.MagicMock() @@ -204,9 +215,10 @@ def test_init__unsupported_datafile_version__logs_error(self): ) as mock_error_handler: opt_obj = optimizely.Optimizely(json.dumps(self.config_dict_with_unsupported_version)) - mock_client_logger.error.assert_called_once_with( - 'This version of the Python SDK does not support the given datafile version: "5".' - ) + mock_client_logger.error.assert_has_calls([ + mock.call(f'{enums.Errors.MISSING_SDK_KEY} ODP may not work properly without it.'), + mock.call('This version of the Python SDK does not support the given datafile version: "5".') + ], any_order=True) args, kwargs = mock_error_handler.call_args self.assertIsInstance(args[0], exceptions.UnsupportedDatafileVersionException) @@ -276,7 +288,10 @@ def test_invalid_json_raises_schema_validation_off(self): ) as mock_error_handler: opt_obj = optimizely.Optimizely('invalid_json', skip_json_validation=True) - mock_client_logger.error.assert_called_once_with('Provided "datafile" is in an invalid format.') + mock_client_logger.error.assert_has_calls([ + mock.call('Provided "datafile" is in an invalid format.'), + mock.call(f'{enums.Errors.MISSING_SDK_KEY} ODP may not work properly without it.') + ], any_order=True) args, kwargs = mock_error_handler.call_args self.assertIsInstance(args[0], exceptions.InvalidInputException) self.assertEqual(args[0].args[0], 'Provided "datafile" is in an invalid format.') @@ -293,7 +308,10 @@ def test_invalid_json_raises_schema_validation_off(self): {'version': '2', 'events': 'invalid_value', 'experiments': 'invalid_value'}, skip_json_validation=True, ) - mock_client_logger.error.assert_called_once_with('Provided "datafile" is in an invalid format.') + mock_client_logger.error.assert_has_calls([ + mock.call('Provided "datafile" is in an invalid format.'), + mock.call(f'{enums.Errors.MISSING_SDK_KEY} ODP may not work properly without it.') + ], any_order=True) args, kwargs = mock_error_handler.call_args self.assertIsInstance(args[0], exceptions.InvalidInputException) self.assertEqual(args[0].args[0], 'Provided "datafile" is in an invalid format.') @@ -308,7 +326,7 @@ def test_activate(self): ) as mock_decision, mock.patch('time.time', return_value=42), mock.patch( 'uuid.uuid4', return_value='a68cf1ad-0393-4e18-af87-efe8f01a7c9c' ), mock.patch( - 'optimizely.event.event_processor.ForwardingEventProcessor.process' + 'optimizely.event.event_processor.BatchEventProcessor.process' ) as mock_process: self.assertEqual('variation', self.optimizely.activate('test_experiment', 'test_user')) @@ -351,9 +369,11 @@ def test_activate(self): log_event = EventFactory.create_log_event(mock_process.call_args[0][0], self.optimizely.logger) user_context = mock_decision.call_args[0][2] + user_profile_tracker = mock_decision.call_args[0][3] mock_decision.assert_called_once_with( - self.project_config, self.project_config.get_experiment_from_key('test_experiment'), user_context + self.project_config, self.project_config.get_experiment_from_key('test_experiment'), + user_context, user_profile_tracker ) self.assertEqual(1, mock_process.call_count) @@ -376,7 +396,7 @@ def on_activate(experiment, user_id, attributes, variation, event): self.assertTrue(isinstance(attributes, dict)) self.assertTrue(isinstance(variation, entities.Variation)) # self.assertTrue(isinstance(event, event_builder.Event)) - print("Activated experiment {0}".format(experiment.key)) + print(f"Activated experiment {experiment.key}") callbackhit[0] = True notification_id = self.optimizely.notification_center.add_notification_listener( @@ -447,7 +467,7 @@ def on_activate(event_key, user_id, attributes, event_tags, event): with mock.patch( 'optimizely.decision_service.DecisionService.get_variation', return_value=variation, - ), mock.patch('optimizely.event.event_processor.ForwardingEventProcessor.process') as mock_process, mock.patch( + ), mock.patch('optimizely.event.event_processor.BatchEventProcessor.process') as mock_process, mock.patch( 'optimizely.notification_center.NotificationCenter.send_notifications' ) as mock_broadcast: self.assertEqual('variation', self.optimizely.activate('test_experiment', 'test_user')) @@ -488,7 +508,7 @@ def on_activate(event_key, user_id, attributes, event_tags, event): with mock.patch( 'optimizely.decision_service.DecisionService.get_variation', return_value=variation, - ), mock.patch('optimizely.event.event_processor.ForwardingEventProcessor.process') as mock_process, mock.patch( + ), mock.patch('optimizely.event.event_processor.BatchEventProcessor.process') as mock_process, mock.patch( 'optimizely.notification_center.NotificationCenter.send_notifications' ) as mock_broadcast: self.assertEqual( @@ -541,7 +561,7 @@ def test_decision_listener__user_not_in_experiment(self): ) as mock_broadcast_decision: self.assertEqual(None, self.optimizely.activate('test_experiment', 'test_user')) - mock_broadcast_decision.assert_called_once_with( + mock_broadcast_decision.assert_any_call( enums.NotificationTypes.DECISION, 'ab-test', 'test_user', @@ -560,7 +580,7 @@ def on_track(event_key, user_id, attributes, event_tags, event): with mock.patch( 'optimizely.decision_service.DecisionService.get_variation', return_value=(self.project_config.get_variation_from_id('test_experiment', '111128'), []), - ), mock.patch('optimizely.event.event_processor.ForwardingEventProcessor.process') as mock_process, mock.patch( + ), mock.patch('optimizely.event.event_processor.BatchEventProcessor.process') as mock_process, mock.patch( 'optimizely.notification_center.NotificationCenter.send_notifications' ) as mock_event_tracked: self.optimizely.track('test_event', 'test_user') @@ -582,7 +602,7 @@ def on_track(event_key, user_id, attributes, event_tags, event): with mock.patch( 'optimizely.decision_service.DecisionService.get_variation', return_value=(self.project_config.get_variation_from_id('test_experiment', '111128'), []), - ), mock.patch('optimizely.event.event_processor.ForwardingEventProcessor.process') as mock_process, mock.patch( + ), mock.patch('optimizely.event.event_processor.BatchEventProcessor.process') as mock_process, mock.patch( 'optimizely.notification_center.NotificationCenter.send_notifications' ) as mock_event_tracked: self.optimizely.track('test_event', 'test_user', attributes={'test_attribute': 'test_value'}) @@ -609,7 +629,7 @@ def on_track(event_key, user_id, attributes, event_tags, event): with mock.patch( 'optimizely.decision_service.DecisionService.get_variation', return_value=(self.project_config.get_variation_from_id('test_experiment', '111128'), []), - ), mock.patch('optimizely.event.event_processor.ForwardingEventProcessor.process') as mock_process, mock.patch( + ), mock.patch('optimizely.event.event_processor.BatchEventProcessor.process') as mock_process, mock.patch( 'optimizely.notification_center.NotificationCenter.send_notifications' ) as mock_event_tracked: self.optimizely.track( @@ -681,7 +701,7 @@ def on_activate(experiment, user_id, attributes, variation, event): return_value=(decision_service.Decision(mock_experiment, mock_variation, enums.DecisionSources.ROLLOUT), []), ) as mock_decision, mock.patch( - 'optimizely.event.event_processor.ForwardingEventProcessor.process' + 'optimizely.event.event_processor.BatchEventProcessor.process' ) as mock_process: self.assertTrue(opt_obj.is_feature_enabled('test_feature_in_experiment', 'test_user')) @@ -702,7 +722,7 @@ def test_activate__with_attributes__audience_match(self): ) as mock_get_variation, mock.patch('time.time', return_value=42), mock.patch( 'uuid.uuid4', return_value='a68cf1ad-0393-4e18-af87-efe8f01a7c9c' ), mock.patch( - 'optimizely.event.event_processor.ForwardingEventProcessor.process' + 'optimizely.event.event_processor.BatchEventProcessor.process' ) as mock_process: self.assertEqual( 'variation', self.optimizely.activate('test_experiment', 'test_user', {'test_attribute': 'test_value'}), @@ -748,11 +768,13 @@ def test_activate__with_attributes__audience_match(self): log_event = EventFactory.create_log_event(mock_process.call_args[0][0], self.optimizely.logger) user_context = mock_get_variation.call_args[0][2] + user_profile_tracker = mock_get_variation.call_args[0][3] mock_get_variation.assert_called_once_with( self.project_config, self.project_config.get_experiment_from_key('test_experiment'), - user_context + user_context, + user_profile_tracker ) self.assertEqual(1, mock_process.call_count) self._validate_event_object( @@ -773,7 +795,7 @@ def test_activate__with_attributes_of_different_types(self): ) as mock_bucket, mock.patch('time.time', return_value=42), mock.patch( 'uuid.uuid4', return_value='a68cf1ad-0393-4e18-af87-efe8f01a7c9c' ), mock.patch( - 'optimizely.event.event_processor.ForwardingEventProcessor.process' + 'optimizely.event.event_processor.BatchEventProcessor.process' ) as mock_process: attributes = { 'test_attribute': 'test_value_1', @@ -850,7 +872,7 @@ def test_activate__with_attributes__typed_audience_match(self): variation when attributes are provided and typed audience conditions are met. """ opt_obj = optimizely.Optimizely(json.dumps(self.config_dict_with_typed_audiences)) - with mock.patch('optimizely.event.event_processor.ForwardingEventProcessor.process') as mock_process: + with mock.patch('optimizely.event.event_processor.BatchEventProcessor.process') as mock_process: # Should be included via exact match string audience with id '3468206642' self.assertEqual( 'A', opt_obj.activate('typed_audience_experiment', 'test_user', {'house': 'Gryffindor'}), @@ -866,7 +888,7 @@ def test_activate__with_attributes__typed_audience_match(self): mock_process.reset() - with mock.patch('optimizely.event.event_processor.ForwardingEventProcessor.process') as mock_process: + with mock.patch('optimizely.event.event_processor.BatchEventProcessor.process') as mock_process: # Should be included via exact match number audience with id '3468206646' self.assertEqual( 'A', opt_obj.activate('typed_audience_experiment', 'test_user', {'lasers': 45.5}), @@ -885,7 +907,7 @@ def test_activate__with_attributes__typed_audience_with_semver_match(self): variation when attributes are provided and typed audience conditions are met. """ opt_obj = optimizely.Optimizely(json.dumps(self.config_dict_with_typed_audiences)) - with mock.patch('optimizely.event.event_processor.ForwardingEventProcessor.process') as mock_process: + with mock.patch('optimizely.event.event_processor.BatchEventProcessor.process') as mock_process: # Should be included via exact match string audience with id '18278344267' self.assertEqual( 'A', opt_obj.activate('typed_audience_experiment', 'test_user', {'android-release': '1.0.1'}), @@ -901,7 +923,7 @@ def test_activate__with_attributes__typed_audience_with_semver_match(self): mock_process.reset() - with mock.patch('optimizely.event.event_processor.ForwardingEventProcessor.process') as mock_process: + with mock.patch('optimizely.event.event_processor.BatchEventProcessor.process') as mock_process: self.assertEqual( 'A', opt_obj.activate('typed_audience_experiment', 'test_user', {'android-release': "1.2.2"}), ) @@ -936,7 +958,7 @@ def test_activate__with_attributes__complex_audience_match(self): opt_obj = optimizely.Optimizely(json.dumps(self.config_dict_with_typed_audiences)) - with mock.patch('optimizely.event.event_processor.ForwardingEventProcessor.process') as mock_process: + with mock.patch('optimizely.event.event_processor.BatchEventProcessor.process') as mock_process: # Should be included via substring match string audience with id '3988293898', and # exact match number audience with id '3468206646' user_attr = {'house': 'Welcome to Slytherin!', 'lasers': 45.5} @@ -979,7 +1001,7 @@ def test_activate__with_attributes__audience_match__forced_bucketing(self): with mock.patch('time.time', return_value=42), mock.patch( 'uuid.uuid4', return_value='a68cf1ad-0393-4e18-af87-efe8f01a7c9c' - ), mock.patch('optimizely.event.event_processor.ForwardingEventProcessor.process') as mock_process: + ), mock.patch('optimizely.event.event_processor.BatchEventProcessor.process') as mock_process: self.assertTrue(self.optimizely.set_forced_variation('test_experiment', 'test_user', 'control')) self.assertEqual( 'control', self.optimizely.activate('test_experiment', 'test_user', {'test_attribute': 'test_value'}), @@ -1045,7 +1067,7 @@ def test_activate__with_attributes__audience_match__bucketing_id_provided(self): ) as mock_get_variation, mock.patch('time.time', return_value=42), mock.patch( 'uuid.uuid4', return_value='a68cf1ad-0393-4e18-af87-efe8f01a7c9c' ), mock.patch( - 'optimizely.event.event_processor.ForwardingEventProcessor.process' + 'optimizely.event.event_processor.BatchEventProcessor.process' ) as mock_process: self.assertEqual( 'variation', @@ -1102,11 +1124,12 @@ def test_activate__with_attributes__audience_match__bucketing_id_provided(self): log_event = EventFactory.create_log_event(mock_process.call_args[0][0], self.optimizely.logger) user_context = mock_get_variation.call_args[0][2] - + user_profile_tracker = mock_get_variation.call_args[0][3] mock_get_variation.assert_called_once_with( self.project_config, self.project_config.get_experiment_from_key('test_experiment'), - user_context + user_context, + user_profile_tracker ) self.assertEqual(1, mock_process.call_count) self._validate_event_object( @@ -1131,7 +1154,7 @@ def test_activate__with_attributes__no_audience_match(self): expected_experiment.get_audience_conditions_or_ids(), enums.ExperimentAudienceEvaluationLogs, 'test_experiment', - {'test_attribute': 'test_value'}, + mock.ANY, self.optimizely.logger, ) @@ -1207,7 +1230,7 @@ def test_activate__bucketer_returns_none(self): def test_activate__invalid_object(self): """ Test that activate logs error if Optimizely instance is invalid. """ - class InvalidConfigManager(object): + class InvalidConfigManager: pass opt_obj = optimizely.Optimizely(json.dumps(self.config_dict), config_manager=InvalidConfigManager()) @@ -1234,7 +1257,7 @@ def test_track__with_attributes(self): with mock.patch('time.time', return_value=42), mock.patch( 'uuid.uuid4', return_value='a68cf1ad-0393-4e18-af87-efe8f01a7c9c' - ), mock.patch('optimizely.event.event_processor.ForwardingEventProcessor.process') as mock_process: + ), mock.patch('optimizely.event.event_processor.BatchEventProcessor.process') as mock_process: self.optimizely.track('test_event', 'test_user', attributes={'test_attribute': 'test_value'}) expected_params = { @@ -1284,7 +1307,7 @@ def test_track__with_attributes__typed_audience_match(self): opt_obj = optimizely.Optimizely(json.dumps(self.config_dict_with_typed_audiences)) - with mock.patch('optimizely.event.event_processor.ForwardingEventProcessor.process') as mock_process: + with mock.patch('optimizely.event.event_processor.BatchEventProcessor.process') as mock_process: # Should be included via substring match string audience with id '3988293898' opt_obj.track('item_bought', 'test_user', {'house': 'Welcome to Slytherin!'}) @@ -1304,7 +1327,7 @@ def test_track__with_attributes__typed_audience_mismatch(self): opt_obj = optimizely.Optimizely(json.dumps(self.config_dict_with_typed_audiences)) - with mock.patch('optimizely.event.event_processor.ForwardingEventProcessor.process') as mock_process: + with mock.patch('optimizely.event.event_processor.BatchEventProcessor.process') as mock_process: opt_obj.track('item_bought', 'test_user', {'house': 'Welcome to Hufflepuff!'}) self.assertEqual(1, mock_process.call_count) @@ -1315,7 +1338,7 @@ def test_track__with_attributes__complex_audience_match(self): opt_obj = optimizely.Optimizely(json.dumps(self.config_dict_with_typed_audiences)) - with mock.patch('optimizely.event.event_processor.ForwardingEventProcessor.process') as mock_process: + with mock.patch('optimizely.event.event_processor.BatchEventProcessor.process') as mock_process: # Should be included via exact match string audience with id '3468206642', and # exact match boolean audience with id '3468206643' user_attr = {'house': 'Gryffindor', 'should_do_it': True} @@ -1346,7 +1369,7 @@ def test_track__with_attributes__complex_audience_mismatch(self): opt_obj = optimizely.Optimizely(json.dumps(self.config_dict_with_typed_audiences)) - with mock.patch('optimizely.event.event_processor.ForwardingEventProcessor.process') as mock_process: + with mock.patch('optimizely.event.event_processor.BatchEventProcessor.process') as mock_process: # Should be excluded - exact match boolean audience with id '3468206643' does not match, # so the overall conditions fail user_attr = {'house': 'Gryffindor', 'should_do_it': False} @@ -1360,7 +1383,7 @@ def test_track__with_attributes__bucketing_id_provided(self): with mock.patch('time.time', return_value=42), mock.patch( 'uuid.uuid4', return_value='a68cf1ad-0393-4e18-af87-efe8f01a7c9c' - ), mock.patch('optimizely.event.event_processor.ForwardingEventProcessor.process') as mock_process: + ), mock.patch('optimizely.event.event_processor.BatchEventProcessor.process') as mock_process: self.optimizely.track( 'test_event', 'test_user', @@ -1418,7 +1441,7 @@ def test_track__with_attributes__no_audience_match(self): """ Test that track calls process even if audience conditions do not match. """ with mock.patch('time.time', return_value=42), mock.patch( - 'optimizely.event.event_processor.ForwardingEventProcessor.process' + 'optimizely.event.event_processor.BatchEventProcessor.process' ) as mock_process: self.optimizely.track( 'test_event', 'test_user', attributes={'test_attribute': 'wrong_test_value'}, @@ -1442,7 +1465,7 @@ def test_track__with_event_tags(self): with mock.patch('time.time', return_value=42), mock.patch( 'uuid.uuid4', return_value='a68cf1ad-0393-4e18-af87-efe8f01a7c9c' - ), mock.patch('optimizely.event.event_processor.ForwardingEventProcessor.process') as mock_process: + ), mock.patch('optimizely.event.event_processor.BatchEventProcessor.process') as mock_process: self.optimizely.track( 'test_event', 'test_user', @@ -1499,7 +1522,7 @@ def test_track__with_event_tags_revenue(self): with mock.patch('time.time', return_value=42), mock.patch( 'uuid.uuid4', return_value='a68cf1ad-0393-4e18-af87-efe8f01a7c9c' - ), mock.patch('optimizely.event.event_processor.ForwardingEventProcessor.process') as mock_process: + ), mock.patch('optimizely.event.event_processor.BatchEventProcessor.process') as mock_process: self.optimizely.track( 'test_event', 'test_user', @@ -1554,7 +1577,7 @@ def test_track__with_event_tags_numeric_metric(self): """ Test that track calls process with right params when only numeric metric event tags are provided. """ - with mock.patch('optimizely.event.event_processor.ForwardingEventProcessor.process') as mock_process: + with mock.patch('optimizely.event.event_processor.BatchEventProcessor.process') as mock_process: self.optimizely.track( 'test_event', 'test_user', @@ -1585,7 +1608,7 @@ def test_track__with_event_tags__forced_bucketing(self): with mock.patch('time.time', return_value=42), mock.patch( 'uuid.uuid4', return_value='a68cf1ad-0393-4e18-af87-efe8f01a7c9c' - ), mock.patch('optimizely.event.event_processor.ForwardingEventProcessor.process') as mock_process: + ), mock.patch('optimizely.event.event_processor.BatchEventProcessor.process') as mock_process: self.assertTrue(self.optimizely.set_forced_variation('test_experiment', 'test_user', 'variation')) self.optimizely.track( 'test_event', @@ -1643,7 +1666,7 @@ def test_track__with_invalid_event_tags(self): with mock.patch('time.time', return_value=42), mock.patch( 'uuid.uuid4', return_value='a68cf1ad-0393-4e18-af87-efe8f01a7c9c' - ), mock.patch('optimizely.event.event_processor.ForwardingEventProcessor.process') as mock_process: + ), mock.patch('optimizely.event.event_processor.BatchEventProcessor.process') as mock_process: self.optimizely.track( 'test_event', 'test_user', @@ -1699,7 +1722,7 @@ def test_track__experiment_not_running(self): with mock.patch( 'optimizely.helpers.experiment.is_experiment_running', return_value=False ) as mock_is_experiment_running, mock.patch('time.time', return_value=42), mock.patch( - 'optimizely.event.event_processor.ForwardingEventProcessor.process' + 'optimizely.event.event_processor.BatchEventProcessor.process' ) as mock_process: self.optimizely.track('test_event', 'test_user') @@ -1723,7 +1746,7 @@ def test_track__whitelisted_user_overrides_audience_check(self): with mock.patch('time.time', return_value=42), mock.patch( 'uuid.uuid4', return_value='a68cf1ad-0393-4e18-af87-efe8f01a7c9c' - ), mock.patch('optimizely.event.event_processor.ForwardingEventProcessor.process') as mock_process: + ), mock.patch('optimizely.event.event_processor.BatchEventProcessor.process') as mock_process: self.optimizely.track('test_event', 'user_1') self.assertEqual(1, mock_process.call_count) @@ -1731,7 +1754,7 @@ def test_track__whitelisted_user_overrides_audience_check(self): def test_track__invalid_object(self): """ Test that track logs error if Optimizely instance is invalid. """ - class InvalidConfigManager(object): + class InvalidConfigManager: pass opt_obj = optimizely.Optimizely(json.dumps(self.config_dict), config_manager=InvalidConfigManager()) @@ -1788,7 +1811,36 @@ def test_get_variation(self): self.assertEqual(mock_broadcast.call_count, 1) - mock_broadcast.assert_called_once_with( + mock_broadcast.assert_any_call( + enums.NotificationTypes.DECISION, + 'ab-test', + 'test_user', + {}, + {'experiment_key': 'test_experiment', 'variation_key': variation}, + ) + + def test_get_variation_lookup_and_save_is_called(self): + """ Test that lookup is called, get_variation returns valid variation and then save is called""" + + with mock.patch( + 'optimizely.decision_service.DecisionService.get_variation', + return_value=(self.project_config.get_variation_from_id('test_experiment', '111129'), []), + ), mock.patch( + 'optimizely.notification_center.NotificationCenter.send_notifications' + ) as mock_broadcast, mock.patch( + 'optimizely.user_profile.UserProfileTracker.load_user_profile' + ) as mock_load_user_profile, mock.patch( + 'optimizely.user_profile.UserProfileTracker.save_user_profile' + ) as mock_save_user_profile: + variation = self.optimizely.get_variation('test_experiment', 'test_user') + self.assertEqual( + 'variation', variation, + ) + self.assertEqual(mock_load_user_profile.call_count, 1) + self.assertEqual(mock_save_user_profile.call_count, 1) + self.assertEqual(mock_broadcast.call_count, 1) + + mock_broadcast.assert_any_call( enums.NotificationTypes.DECISION, 'ab-test', 'test_user', @@ -1847,7 +1899,7 @@ def test_get_variation__returns_none(self): def test_get_variation__invalid_object(self): """ Test that get_variation logs error if Optimizely instance is invalid. """ - class InvalidConfigManager(object): + class InvalidConfigManager: pass opt_obj = optimizely.Optimizely(json.dumps(self.config_dict), config_manager=InvalidConfigManager()) @@ -1985,7 +2037,7 @@ def test_is_feature_enabled__returns_true_for_feature_experiment_if_feature_enab return_value=(decision_service.Decision(mock_experiment, mock_variation, enums.DecisionSources.FEATURE_TEST), []), ) as mock_decision, mock.patch( - 'optimizely.event.event_processor.ForwardingEventProcessor.process' + 'optimizely.event.event_processor.BatchEventProcessor.process' ) as mock_process, mock.patch( 'optimizely.notification_center.NotificationCenter.send_notifications' ) as mock_broadcast_decision, mock.patch( @@ -2085,7 +2137,7 @@ def test_is_feature_enabled__returns_false_for_feature_experiment_if_feature_dis return_value=(decision_service.Decision(mock_experiment, mock_variation, enums.DecisionSources.FEATURE_TEST), []), ) as mock_decision, mock.patch( - 'optimizely.event.event_processor.ForwardingEventProcessor.process' + 'optimizely.event.event_processor.BatchEventProcessor.process' ) as mock_process, mock.patch( 'optimizely.notification_center.NotificationCenter.send_notifications' ) as mock_broadcast_decision, mock.patch( @@ -2185,7 +2237,7 @@ def test_is_feature_enabled__returns_true_for_feature_rollout_if_feature_enabled return_value=(decision_service.Decision(mock_experiment, mock_variation, enums.DecisionSources.ROLLOUT), []), ) as mock_decision, mock.patch( - 'optimizely.event.event_processor.ForwardingEventProcessor.process' + 'optimizely.event.event_processor.BatchEventProcessor.process' ) as mock_process, mock.patch( 'optimizely.notification_center.NotificationCenter.send_notifications' ) as mock_broadcast_decision, mock.patch( @@ -2235,7 +2287,7 @@ def test_is_feature_enabled__returns_true_for_feature_rollout_if_feature_enabled return_value=(decision_service.Decision(mock_experiment, mock_variation, enums.DecisionSources.ROLLOUT), []), ) as mock_decision, mock.patch( - 'optimizely.event.event_processor.ForwardingEventProcessor.process' + 'optimizely.event.event_processor.BatchEventProcessor.process' ) as mock_process, mock.patch( 'optimizely.notification_center.NotificationCenter.send_notifications' ) as mock_broadcast_decision, mock.patch( @@ -2337,7 +2389,7 @@ def test_is_feature_enabled__returns_false_for_feature_rollout_if_feature_disabl return_value=(decision_service.Decision(mock_experiment, mock_variation, enums.DecisionSources.ROLLOUT), []), ) as mock_decision, mock.patch( - 'optimizely.event.event_processor.ForwardingEventProcessor.process' + 'optimizely.event.event_processor.BatchEventProcessor.process' ) as mock_process, mock.patch( 'optimizely.notification_center.NotificationCenter.send_notifications' ) as mock_broadcast_decision, mock.patch( @@ -2379,7 +2431,7 @@ def test_is_feature_enabled__returns_false_when_user_is_not_bucketed_into_any_va 'optimizely.decision_service.DecisionService.get_variation_for_feature', return_value=(decision_service.Decision(None, None, enums.DecisionSources.ROLLOUT), []), ) as mock_decision, mock.patch( - 'optimizely.event.event_processor.ForwardingEventProcessor.process' + 'optimizely.event.event_processor.BatchEventProcessor.process' ) as mock_process, mock.patch( 'optimizely.notification_center.NotificationCenter.send_notifications' ) as mock_broadcast_decision, mock.patch( @@ -2423,7 +2475,7 @@ def test_is_feature_enabled__returns_false_when_variation_is_nil(self, ): 'optimizely.decision_service.DecisionService.get_variation_for_feature', return_value=(decision_service.Decision(None, None, enums.DecisionSources.ROLLOUT), []), ) as mock_decision, mock.patch( - 'optimizely.event.event_processor.ForwardingEventProcessor.process' + 'optimizely.event.event_processor.BatchEventProcessor.process' ) as mock_process, mock.patch( 'optimizely.notification_center.NotificationCenter.send_notifications' ) as mock_broadcast_decision, mock.patch( @@ -2458,7 +2510,7 @@ def test_is_feature_enabled__returns_false_when_variation_is_nil(self, ): def test_is_feature_enabled__invalid_object(self): """ Test that is_feature_enabled returns False and logs error if Optimizely instance is invalid. """ - class InvalidConfigManager(object): + class InvalidConfigManager: pass opt_obj = optimizely.Optimizely(json.dumps(self.config_dict), config_manager=InvalidConfigManager()) @@ -2628,7 +2680,7 @@ def test_get_enabled_features__invalid_attributes(self): def test_get_enabled_features__invalid_object(self): """ Test that get_enabled_features returns empty list if Optimizely instance is invalid. """ - class InvalidConfigManager(object): + class InvalidConfigManager: pass opt_obj = optimizely.Optimizely(json.dumps(self.config_dict), config_manager=InvalidConfigManager()) @@ -2674,7 +2726,7 @@ def test_get_feature_variable_boolean(self): 'Got variable value "true" for variable "is_working" of feature flag "test_feature_in_experiment".' ) - mock_broadcast_decision.assert_called_once_with( + mock_broadcast_decision.assert_any_call( enums.NotificationTypes.DECISION, 'feature-variable', 'test_user', @@ -2712,7 +2764,7 @@ def test_get_feature_variable_double(self): 'Got variable value "10.02" for variable "cost" of feature flag "test_feature_in_experiment".' ) - mock_broadcast_decision.assert_called_once_with( + mock_broadcast_decision.assert_any_call( enums.NotificationTypes.DECISION, 'feature-variable', 'test_user', @@ -2750,7 +2802,7 @@ def test_get_feature_variable_integer(self): 'Got variable value "4243" for variable "count" of feature flag "test_feature_in_experiment".' ) - mock_broadcast_decision.assert_called_once_with( + mock_broadcast_decision.assert_any_call( enums.NotificationTypes.DECISION, 'feature-variable', 'test_user', @@ -2789,7 +2841,7 @@ def test_get_feature_variable_string(self): 'Got variable value "staging" for variable "environment" of feature flag "test_feature_in_experiment".' ) - mock_broadcast_decision.assert_called_once_with( + mock_broadcast_decision.assert_any_call( enums.NotificationTypes.DECISION, 'feature-variable', 'test_user', @@ -2828,7 +2880,7 @@ def test_get_feature_variable_json(self): 'Got variable value "{"test": 123}" for variable "object" of feature flag "test_feature_in_experiment".' ) - mock_broadcast_decision.assert_called_once_with( + mock_broadcast_decision.assert_any_call( enums.NotificationTypes.DECISION, 'feature-variable', 'test_user', @@ -2892,7 +2944,7 @@ def test_get_all_feature_variables(self): ], any_order=True ) - mock_broadcast_decision.assert_called_once_with( + mock_broadcast_decision.assert_any_call( enums.NotificationTypes.DECISION, 'all-feature-variables', 'test_user', @@ -2929,7 +2981,7 @@ def test_get_feature_variable(self): 'Got variable value "true" for variable "is_working" of feature flag "test_feature_in_experiment".' ) - mock_broadcast_decision.assert_called_once_with( + mock_broadcast_decision.assert_any_call( enums.NotificationTypes.DECISION, 'feature-variable', 'test_user', @@ -2960,7 +3012,7 @@ def test_get_feature_variable(self): 'Got variable value "10.02" for variable "cost" of feature flag "test_feature_in_experiment".' ) - mock_broadcast_decision.assert_called_once_with( + mock_broadcast_decision.assert_any_call( enums.NotificationTypes.DECISION, 'feature-variable', 'test_user', @@ -2991,7 +3043,7 @@ def test_get_feature_variable(self): 'Got variable value "4243" for variable "count" of feature flag "test_feature_in_experiment".' ) - mock_broadcast_decision.assert_called_once_with( + mock_broadcast_decision.assert_any_call( enums.NotificationTypes.DECISION, 'feature-variable', 'test_user', @@ -3022,7 +3074,8 @@ def test_get_feature_variable(self): 'Got variable value "staging" for variable "environment" of feature flag "test_feature_in_experiment".' ) - mock_broadcast_decision.assert_called_once_with( + # sometimes event processor flushes before this check, so can't assert called once + mock_broadcast_decision.assert_any_call( enums.NotificationTypes.DECISION, 'feature-variable', 'test_user', @@ -3053,7 +3106,7 @@ def test_get_feature_variable(self): 'Got variable value "{"test": 123}" for variable "object" of feature flag "test_feature_in_experiment".' ) - mock_broadcast_decision.assert_called_once_with( + mock_broadcast_decision.assert_any_call( enums.NotificationTypes.DECISION, 'feature-variable', 'test_user', @@ -3095,7 +3148,7 @@ def test_get_feature_variable_boolean_for_feature_in_rollout(self): 'Got variable value "true" for variable "is_running" of feature flag "test_feature_in_rollout".' ) - mock_broadcast_decision.assert_called_once_with( + mock_broadcast_decision.assert_any_call( enums.NotificationTypes.DECISION, 'feature-variable', 'test_user', @@ -3137,7 +3190,7 @@ def test_get_feature_variable_double_for_feature_in_rollout(self): 'Got variable value "39.99" for variable "price" of feature flag "test_feature_in_rollout".' ) - mock_broadcast_decision.assert_called_once_with( + mock_broadcast_decision.assert_any_call( enums.NotificationTypes.DECISION, 'feature-variable', 'test_user', @@ -3179,7 +3232,7 @@ def test_get_feature_variable_integer_for_feature_in_rollout(self): 'Got variable value "399" for variable "count" of feature flag "test_feature_in_rollout".' ) - mock_broadcast_decision.assert_called_once_with( + mock_broadcast_decision.assert_any_call( enums.NotificationTypes.DECISION, 'feature-variable', 'test_user', @@ -3221,7 +3274,7 @@ def test_get_feature_variable_string_for_feature_in_rollout(self): 'Got variable value "Hello audience" for variable "message" of feature flag "test_feature_in_rollout".' ) - mock_broadcast_decision.assert_called_once_with( + mock_broadcast_decision.assert_any_call( enums.NotificationTypes.DECISION, 'feature-variable', 'test_user', @@ -3263,7 +3316,7 @@ def test_get_feature_variable_json_for_feature_in_rollout(self): 'Got variable value "{"field": 12}" for variable "object" of feature flag "test_feature_in_rollout".' ) - mock_broadcast_decision.assert_called_once_with( + mock_broadcast_decision.assert_any_call( enums.NotificationTypes.DECISION, 'feature-variable', 'test_user', @@ -3318,7 +3371,7 @@ def test_get_all_feature_variables_for_feature_in_rollout(self): ], any_order=True ) - mock_broadcast_decision.assert_called_once_with( + mock_broadcast_decision.assert_any_call( enums.NotificationTypes.DECISION, 'all-feature-variables', 'test_user', @@ -3336,7 +3389,11 @@ def test_get_all_feature_variables_for_feature_in_rollout(self): def test_get_feature_variable_for_feature_in_rollout(self): """ Test that get_feature_variable returns value as expected and broadcasts decision with proper parameters. """ - opt_obj = optimizely.Optimizely(json.dumps(self.config_dict_with_features)) + opt_obj = optimizely.Optimizely( + json.dumps(self.config_dict_with_features), + # prevent event processor from injecting notification calls + event_processor_options={'start_on_init': False} + ) mock_experiment = opt_obj.config_manager.get_config().get_experiment_from_key('211127') mock_variation = opt_obj.config_manager.get_config().get_variation_from_id('211127', '211129') user_attributes = {'test_attribute': 'test_value'} @@ -3359,7 +3416,7 @@ def test_get_feature_variable_for_feature_in_rollout(self): 'Got variable value "true" for variable "is_running" of feature flag "test_feature_in_rollout".' ) - mock_broadcast_decision.assert_called_once_with( + mock_broadcast_decision.assert_any_call( enums.NotificationTypes.DECISION, 'feature-variable', 'test_user', @@ -3392,7 +3449,7 @@ def test_get_feature_variable_for_feature_in_rollout(self): 'Got variable value "39.99" for variable "price" of feature flag "test_feature_in_rollout".' ) - mock_broadcast_decision.assert_called_once_with( + mock_broadcast_decision.assert_any_call( enums.NotificationTypes.DECISION, 'feature-variable', 'test_user', @@ -3425,7 +3482,7 @@ def test_get_feature_variable_for_feature_in_rollout(self): 'Got variable value "399" for variable "count" of feature flag "test_feature_in_rollout".' ) - mock_broadcast_decision.assert_called_once_with( + mock_broadcast_decision.assert_any_call( enums.NotificationTypes.DECISION, 'feature-variable', 'test_user', @@ -3458,7 +3515,7 @@ def test_get_feature_variable_for_feature_in_rollout(self): 'Got variable value "Hello audience" for variable "message" of feature flag "test_feature_in_rollout".' ) - mock_broadcast_decision.assert_called_once_with( + mock_broadcast_decision.assert_any_call( enums.NotificationTypes.DECISION, 'feature-variable', 'test_user', @@ -3492,7 +3549,7 @@ def test_get_feature_variable_for_feature_in_rollout(self): 'Got variable value "{"field": 12}" for variable "object" of feature flag "test_feature_in_rollout".' ) - mock_broadcast_decision.assert_called_once_with( + mock_broadcast_decision.assert_any_call( enums.NotificationTypes.DECISION, 'feature-variable', 'test_user', @@ -3625,7 +3682,7 @@ def test_get_feature_variable__returns_default_value_if_no_variation(self): 'Returning default value for variable "is_working" of feature flag "test_feature_in_experiment".' ) - mock_broadcast_decision.assert_called_once_with( + mock_broadcast_decision.assert_any_call( enums.NotificationTypes.DECISION, 'feature-variable', 'test_user', @@ -3659,7 +3716,7 @@ def test_get_feature_variable__returns_default_value_if_no_variation(self): 'Returning default value for variable "cost" of feature flag "test_feature_in_experiment".' ) - mock_broadcast_decision.assert_called_once_with( + mock_broadcast_decision.assert_any_call( enums.NotificationTypes.DECISION, 'feature-variable', 'test_user', @@ -3693,7 +3750,8 @@ def test_get_feature_variable__returns_default_value_if_no_variation(self): 'Returning default value for variable "count" of feature flag "test_feature_in_experiment".' ) - mock_broadcast_decision.assert_called_once_with( + # sometimes event processor flushes before this check, so can't assert called once + mock_broadcast_decision.assert_any_call( enums.NotificationTypes.DECISION, 'feature-variable', 'test_user', @@ -3727,7 +3785,7 @@ def test_get_feature_variable__returns_default_value_if_no_variation(self): 'Returning default value for variable "environment" of feature flag "test_feature_in_experiment".' ) - mock_broadcast_decision.assert_called_once_with( + mock_broadcast_decision.assert_any_call( enums.NotificationTypes.DECISION, 'feature-variable', 'test_user', @@ -3761,7 +3819,7 @@ def test_get_feature_variable__returns_default_value_if_no_variation(self): 'Returning default value for variable "object" of feature flag "test_feature_in_experiment".' ) - mock_broadcast_decision.assert_called_once_with( + mock_broadcast_decision.assert_any_call( enums.NotificationTypes.DECISION, 'feature-variable', 'test_user', @@ -3793,7 +3851,7 @@ def test_get_feature_variable__returns_default_value_if_no_variation(self): 'Returning default value for variable "is_working" of feature flag "test_feature_in_experiment".' ) - mock_broadcast_decision.assert_called_once_with( + mock_broadcast_decision.assert_any_call( enums.NotificationTypes.DECISION, 'feature-variable', 'test_user', @@ -3826,7 +3884,7 @@ def test_get_feature_variable__returns_default_value_if_no_variation(self): 'Returning default value for variable "cost" of feature flag "test_feature_in_experiment".' ) - mock_broadcast_decision.assert_called_once_with( + mock_broadcast_decision.assert_any_call( enums.NotificationTypes.DECISION, 'feature-variable', 'test_user', @@ -3859,7 +3917,7 @@ def test_get_feature_variable__returns_default_value_if_no_variation(self): 'Returning default value for variable "count" of feature flag "test_feature_in_experiment".' ) - mock_broadcast_decision.assert_called_once_with( + mock_broadcast_decision.assert_any_call( enums.NotificationTypes.DECISION, 'feature-variable', 'test_user', @@ -3892,7 +3950,7 @@ def test_get_feature_variable__returns_default_value_if_no_variation(self): 'Returning default value for variable "environment" of feature flag "test_feature_in_experiment".' ) - mock_broadcast_decision.assert_called_once_with( + mock_broadcast_decision.assert_any_call( enums.NotificationTypes.DECISION, 'feature-variable', 'test_user', @@ -4572,7 +4630,7 @@ def test_get_feature_variable_returns__default_value__complex_audience_match(sel def test_get_optimizely_config__invalid_object(self): """ Test that get_optimizely_config logs error if Optimizely instance is invalid. """ - class InvalidConfigManager(object): + class InvalidConfigManager: pass opt_obj = optimizely.Optimizely(json.dumps(self.config_dict), config_manager=InvalidConfigManager()) @@ -4609,7 +4667,10 @@ def test_get_optimizely_config_with_custom_config_manager(self): some_obj = optimizely.Optimizely(json.dumps(self.config_dict_with_features)) return_config = some_obj.config_manager.get_config() - class SomeConfigManager(object): + class SomeConfigManager: + def get_sdk_key(self): + return return_config.sdk_key + def get_config(self): return return_config @@ -4625,6 +4686,57 @@ def get_config(self): self.assertEqual(1, mock_opt_service.call_count) + def test_odp_updated_with_custom_polling_config(self): + logger = mock.MagicMock() + + test_datafile = json.dumps(self.config_dict_with_audience_segments) + test_response = self.fake_server_response(status_code=200, content=test_datafile) + + def delay(*args, **kwargs): + time.sleep(.5) + return mock.DEFAULT + + with mock.patch('requests.Session.get', return_value=test_response, side_effect=delay): + # initialize config_manager with delay, so it will receive the datafile after client initialization + custom_config_manager = config_manager.PollingConfigManager(sdk_key='segments-test', logger=logger) + client = optimizely.Optimizely(config_manager=custom_config_manager) + odp_manager = client.odp_manager + + # confirm odp config has not yet been updated + self.assertEqual(odp_manager.odp_config.odp_state(), OdpConfigState.UNDETERMINED) + + # wait for datafile + custom_config_manager.get_config() + + # wait for odp config to be updated + odp_manager.event_manager.event_queue.join() + + self.assertEqual(odp_manager.odp_config.odp_state(), OdpConfigState.INTEGRATED) + + logger.error.assert_not_called() + + client.close() + + def test_odp_events_not_sent_with_legacy_apis(self): + logger = mock.MagicMock() + experiment_key = 'experiment-segment' + feature_key = 'flag-segment' + user_id = 'test_user' + + test_datafile = json.dumps(self.config_dict_with_audience_segments) + client = optimizely.Optimizely(test_datafile, logger=logger) + + with mock.patch.object(client.odp_manager.event_manager, 'send_event') as send_event_mock: + client.activate(experiment_key, user_id) + client.track('event1', user_id) + client.get_variation(experiment_key, user_id) + client.get_all_feature_variables(feature_key, user_id) + client.is_feature_enabled(feature_key, user_id) + + send_event_mock.assert_not_called() + + client.close() + class OptimizelyWithExceptionTest(base.BaseTest): def setUp(self): @@ -4721,7 +4833,7 @@ def test_track(self): self.optimizely.track(event_key, user_id) mock_client_logging.info.assert_has_calls( - [mock.call('Tracking event "%s" for user "%s".' % (event_key, user_id))] + [mock.call(f'Tracking event "{event_key}" for user "{user_id}".')] ) def test_activate__experiment_not_running(self): @@ -4960,7 +5072,7 @@ def test_get_variation__invalid_attributes__forced_bucketing(self): def test_set_forced_variation__invalid_object(self): """ Test that set_forced_variation logs error if Optimizely instance is invalid. """ - class InvalidConfigManager(object): + class InvalidConfigManager: pass opt_obj = optimizely.Optimizely(json.dumps(self.config_dict), config_manager=InvalidConfigManager()) @@ -5008,7 +5120,7 @@ def test_set_forced_variation__invalid_user_id(self): def test_get_forced_variation__invalid_object(self): """ Test that get_forced_variation logs error if Optimizely instance is invalid. """ - class InvalidConfigManager(object): + class InvalidConfigManager: pass opt_obj = optimizely.Optimizely(json.dumps(self.config_dict), config_manager=InvalidConfigManager()) @@ -5055,18 +5167,393 @@ def test_get_forced_variation__invalid_user_id(self): mock_client_logging.error.assert_called_once_with('Provided "user_id" is in an invalid format.') def test_user_context_invalid_user_id(self): - """ - Tests user context. - """ + """Tests user context.""" user_ids = [5, 5.5, None, True, [], {}] for u in user_ids: uc = self.optimizely.create_user_context(u) self.assertIsNone(uc, "invalid user id should return none") - def test_invalid_flag_key(self): - """ - Tests invalid flag key in function get_flag_variation_by_key(). - """ - # TODO mock function get_flag_variation_by_key - pass + def test_send_identify_event__when_called_with_odp_enabled(self): + mock_logger = mock.Mock() + client = optimizely.Optimizely(json.dumps(self.config_dict_with_audience_segments), logger=mock_logger) + with mock.patch.object(client, '_identify_user') as identify: + client.create_user_context('user-id') + + identify.assert_called_once_with('user-id') + mock_logger.error.assert_not_called() + client.close() + + def test_sdk_settings__accept_zero_for_flush_interval(self): + mock_logger = mock.Mock() + sdk_settings = OptimizelySdkSettings(odp_event_flush_interval=0) + client = optimizely.Optimizely( + json.dumps(self.config_dict_with_audience_segments), + logger=mock_logger, + settings=sdk_settings + ) + flush_interval = client.odp_manager.event_manager.flush_interval + + self.assertEqual(flush_interval, 0) + mock_logger.error.assert_not_called() + client.close() + + def test_sdk_settings__should_use_default_when_odp_flush_interval_none(self): + mock_logger = mock.Mock() + sdk_settings = OptimizelySdkSettings(odp_event_flush_interval=None) + client = optimizely.Optimizely( + json.dumps(self.config_dict_with_audience_segments), + logger=mock_logger, + settings=sdk_settings + ) + flush_interval = client.odp_manager.event_manager.flush_interval + self.assertEqual(flush_interval, enums.OdpEventManagerConfig.DEFAULT_FLUSH_INTERVAL) + + mock_logger.error.assert_not_called() + client.close() + + def test_sdk_settings__log_info_when_disabled(self): + mock_logger = mock.Mock() + sdk_settings = OptimizelySdkSettings(odp_disabled=True) + client = optimizely.Optimizely( + json.dumps(self.config_dict_with_audience_segments), + logger=mock_logger, + settings=sdk_settings + ) + + self.assertIsNone(client.odp_manager.event_manager) + self.assertIsNone(client.odp_manager.segment_manager) + mock_logger.info.assert_called_once_with('ODP is disabled.') + mock_logger.error.assert_not_called() + client.close() + + def test_sdk_settings__accept_cache_size(self): + mock_logger = mock.Mock() + sdk_settings = OptimizelySdkSettings(segments_cache_size=5) + client = optimizely.Optimizely( + json.dumps(self.config_dict_with_audience_segments), + logger=mock_logger, + settings=sdk_settings + ) + segments_cache = client.odp_manager.segment_manager.segments_cache + self.assertEqual(segments_cache.capacity, 5) + + mock_logger.error.assert_not_called() + client.close() + + def test_sdk_settings__accept_cache_timeout(self): + mock_logger = mock.Mock() + sdk_settings = OptimizelySdkSettings(segments_cache_timeout_in_secs=5) + client = optimizely.Optimizely( + json.dumps(self.config_dict_with_audience_segments), + logger=mock_logger, + settings=sdk_settings + ) + segments_cache = client.odp_manager.segment_manager.segments_cache + self.assertEqual(segments_cache.timeout, 5) + + mock_logger.error.assert_not_called() + client.close() + + def test_sdk_settings__accept_cache_size_and_cache_timeout(self): + mock_logger = mock.Mock() + sdk_settings = OptimizelySdkSettings(segments_cache_size=10, segments_cache_timeout_in_secs=5) + client = optimizely.Optimizely( + json.dumps(self.config_dict_with_audience_segments), + logger=mock_logger, + settings=sdk_settings + ) + segments_cache = client.odp_manager.segment_manager.segments_cache + self.assertEqual(segments_cache.capacity, 10) + self.assertEqual(segments_cache.timeout, 5) + + mock_logger.error.assert_not_called() + client.close() + + def test_sdk_settings__use_default_cache_size_and_timeout_when_odp_flush_interval_none(self): + mock_logger = mock.Mock() + sdk_settings = OptimizelySdkSettings() + client = optimizely.Optimizely( + json.dumps(self.config_dict_with_audience_segments), + logger=mock_logger, + settings=sdk_settings + ) + segments_cache = client.odp_manager.segment_manager.segments_cache + self.assertEqual(segments_cache.timeout, enums.OdpSegmentsCacheConfig.DEFAULT_TIMEOUT_SECS) + self.assertEqual(segments_cache.capacity, enums.OdpSegmentsCacheConfig.DEFAULT_CAPACITY) + + mock_logger.error.assert_not_called() + client.close() + + def test_sdk_settings__accept_zero_cache_size_timeout_and_cache_size(self): + mock_logger = mock.Mock() + sdk_settings = OptimizelySdkSettings(segments_cache_size=0, segments_cache_timeout_in_secs=0) + client = optimizely.Optimizely( + json.dumps(self.config_dict_with_audience_segments), + logger=mock_logger, + settings=sdk_settings + ) + segments_cache = client.odp_manager.segment_manager.segments_cache + self.assertEqual(segments_cache.capacity, 0) + self.assertEqual(segments_cache.timeout, 0) + + mock_logger.error.assert_not_called() + client.close() + + def test_sdk_settings__accept_valid_custom_cache(self): + class CustomCache: + def reset(self): + pass + + def lookup(self): + pass + + def save(self): + pass + + mock_logger = mock.Mock() + sdk_settings = OptimizelySdkSettings(odp_segments_cache=CustomCache()) + client = optimizely.Optimizely( + json.dumps(self.config_dict_with_audience_segments), + logger=mock_logger, + settings=sdk_settings + ) + segments_cache = client.odp_manager.segment_manager.segments_cache + self.assertIsInstance(segments_cache, CustomCache) + mock_logger.error.assert_not_called() + client.close() + + def test_sdk_settings__log_error_when_custom_cache_is_invalid(self): + class InvalidCache: + pass + mock_logger = mock.Mock() + sdk_settings = OptimizelySdkSettings(odp_segments_cache=InvalidCache()) + with mock.patch('optimizely.logger.reset_logger', return_value=mock_logger): + optimizely.Optimizely( + json.dumps(self.config_dict_with_audience_segments), + settings=sdk_settings + ) + mock_logger.exception.assert_called_once_with('Provided "segments_cache" is in an invalid format.') + + def test_sdk_settings__accept_custom_segment_manager(self): + class CustomSegmentManager: + def reset(self): + pass + + def fetch_qualified_segments(self): + pass + + mock_logger = mock.Mock() + sdk_settings = OptimizelySdkSettings(odp_segment_manager=CustomSegmentManager()) + client = optimizely.Optimizely( + json.dumps(self.config_dict_with_audience_segments), + logger=mock_logger, + settings=sdk_settings + ) + segment_manager = client.odp_manager.segment_manager + self.assertIsInstance(segment_manager, CustomSegmentManager) + mock_logger.error.assert_not_called() + client.close() + + def test_sdk_settings__log_error_when_custom_segment_manager_is_invalid(self): + class InvalidSegmentManager: + pass + mock_logger = mock.Mock() + sdk_settings = OptimizelySdkSettings(odp_segment_manager=InvalidSegmentManager()) + with mock.patch('optimizely.logger.reset_logger', return_value=mock_logger): + optimizely.Optimizely( + json.dumps(self.config_dict_with_audience_segments), + settings=sdk_settings + ) + mock_logger.exception.assert_called_once_with('Provided "segment_manager" is in an invalid format.') + + def test_sdk_settings__accept_valid_custom_event_manager(self): + class CustomEventManager: + is_running = True + + def send_event(self): + pass + + def update_config(self): + pass + + def stop(self): + pass + + mock_logger = mock.Mock() + sdk_settings = OptimizelySdkSettings(odp_event_manager=CustomEventManager()) + client = optimizely.Optimizely( + json.dumps(self.config_dict_with_audience_segments), + logger=mock_logger, + settings=sdk_settings + ) + event_manager = client.odp_manager.event_manager + self.assertIsInstance(event_manager, CustomEventManager) + mock_logger.error.assert_not_called() + client.close() + + def test_sdk_settings__log_error_when_custom_event_manager_is_invalid(self): + class InvalidEventManager: + pass + mock_logger = mock.Mock() + sdk_settings = OptimizelySdkSettings(odp_event_manager=InvalidEventManager()) + with mock.patch('optimizely.logger.reset_logger', return_value=mock_logger): + optimizely.Optimizely( + json.dumps(self.config_dict_with_audience_segments), + settings=sdk_settings + ) + mock_logger.exception.assert_called_once_with('Provided "event_manager" is in an invalid format.') + + def test_sdk_settings__log_error_when_sdk_settings_isnt_correct(self): + mock_logger = mock.Mock() + optimizely.Optimizely( + json.dumps(self.config_dict_with_audience_segments), + logger=mock_logger, + settings={} + ) + mock_logger.debug.assert_any_call('Provided sdk_settings is not an OptimizelySdkSettings instance.') + + def test_send_odp_event__send_event_with_static_config_manager(self): + mock_logger = mock.Mock() + client = optimizely.Optimizely( + json.dumps(self.config_dict_with_audience_segments), + logger=mock_logger, + ) + with mock.patch('requests.post', return_value=self.fake_server_response(status_code=200)): + client.send_odp_event(type='wow', action='great', identifiers={'amazing': 'fantastic'}, data={}) + client.close() + mock_logger.error.assert_not_called() + mock_logger.debug.assert_called_with('ODP event queue: flushing batch size 1.') + + def test_send_odp_event__send_event_with_polling_config_manager(self): + mock_logger = mock.Mock() + with mock.patch( + 'requests.Session.get', + return_value=self.fake_server_response( + status_code=200, + content=json.dumps(self.config_dict_with_audience_segments) + ) + ), mock.patch('requests.post', return_value=self.fake_server_response(status_code=200)): + client = optimizely.Optimizely(sdk_key='test', logger=mock_logger) + client.send_odp_event(type='wow', action='great', identifiers={'amazing': 'fantastic'}, data={}) + client.close() + + mock_logger.error.assert_not_called() + mock_logger.debug.assert_called_with('ODP event queue: flushing batch size 1.') + + def test_send_odp_event__log_error_when_odp_disabled(self): + mock_logger = mock.Mock() + client = optimizely.Optimizely( + json.dumps(self.config_dict_with_audience_segments), + logger=mock_logger, + settings=OptimizelySdkSettings(odp_disabled=True) + ) + with mock.patch('requests.post', return_value=self.fake_server_response(status_code=200)): + client.send_odp_event(type='wow', action='great', identifiers={'amazing': 'fantastic'}, data={}) + client.close() + mock_logger.error.assert_called_with('ODP is not enabled.') + + def test_send_odp_event__log_debug_if_datafile_not_ready(self): + mock_logger = mock.Mock() + client = optimizely.Optimizely(sdk_key='test', logger=mock_logger) + client.config_manager.set_blocking_timeout(0) + client.send_odp_event(type='wow', action='great', identifiers={'amazing': 'fantastic'}, data={}) + + mock_logger.error.assert_called_with( + 'Invalid config. Optimizely instance is not valid. Failing "send_odp_event".' + ) + client.close() + + def test_send_odp_event__log_error_if_odp_not_enabled_with_polling_config_manager(self): + mock_logger = mock.Mock() + with mock.patch( + 'requests.Session.get', + return_value=self.fake_server_response( + status_code=200, + content=json.dumps(self.config_dict_with_audience_segments) + ) + ), mock.patch('requests.post', return_value=self.fake_server_response(status_code=200)): + client = optimizely.Optimizely( + sdk_key='test', + logger=mock_logger, + settings=OptimizelySdkSettings(odp_disabled=True) + ) + client.send_odp_event(type='wow', action='great', identifiers={'amazing': 'fantastic'}, data={}) + client.close() + + mock_logger.error.assert_called_with('ODP is not enabled.') + + def test_send_odp_event__log_error_with_invalid_data(self): + mock_logger = mock.Mock() + client = optimizely.Optimizely(json.dumps(self.config_dict_with_audience_segments), logger=mock_logger) + + client.send_odp_event(type='wow', action='great', identifiers={'amazing': 'fantastic'}, data={'test': {}}) + client.close() + + mock_logger.error.assert_called_with('ODP data is not valid.') + + def test_send_odp_event__log_error_with_empty_identifiers(self): + mock_logger = mock.Mock() + client = optimizely.Optimizely(json.dumps(self.config_dict_with_audience_segments), logger=mock_logger) + + client.send_odp_event(type='wow', action='great', identifiers={}, data={}) + client.close() + + mock_logger.error.assert_called_with('ODP events must have at least one key-value pair in identifiers.') + + def test_send_odp_event__log_error_with_no_identifiers(self): + mock_logger = mock.Mock() + client = optimizely.Optimizely(json.dumps(self.config_dict_with_audience_segments), logger=mock_logger) + + client.send_odp_event(type='wow', action='great', identifiers=None, data={}) + client.close() + + mock_logger.error.assert_called_with('ODP events must have at least one key-value pair in identifiers.') + + def test_send_odp_event__log_error_with_missing_integrations_data(self): + mock_logger = mock.Mock() + client = optimizely.Optimizely(json.dumps(self.config_dict_with_typed_audiences), logger=mock_logger) + client.send_odp_event(type='wow', action='great', identifiers={'amazing': 'fantastic'}, data={}) + + mock_logger.error.assert_called_with('ODP is not integrated.') + client.close() + + def test_send_odp_event__log_error_with_action_none(self): + mock_logger = mock.Mock() + client = optimizely.Optimizely(json.dumps(self.config_dict_with_audience_segments), logger=mock_logger) + + client.send_odp_event(type='wow', action=None, identifiers={'amazing': 'fantastic'}, data={}) + client.close() + + mock_logger.error.assert_called_once_with('ODP action is not valid (cannot be empty).') + + def test_send_odp_event__log_error_with_action_empty_string(self): + mock_logger = mock.Mock() + client = optimizely.Optimizely(json.dumps(self.config_dict_with_audience_segments), logger=mock_logger) + + client.send_odp_event(type='wow', action="", identifiers={'amazing': 'fantastic'}, data={}) + client.close() + + mock_logger.error.assert_called_once_with('ODP action is not valid (cannot be empty).') + + def test_send_odp_event__default_type_when_none(self): + mock_logger = mock.Mock() + + client = optimizely.Optimizely(json.dumps(self.config_dict_with_audience_segments), logger=mock_logger) + with mock.patch.object(client.odp_manager, 'send_event') as mock_send_event: + client.send_odp_event(type=None, action="great", identifiers={'amazing': 'fantastic'}, data={}) + client.close() + + mock_send_event.assert_called_with('fullstack', 'great', {'amazing': 'fantastic'}, {}) + mock_logger.error.assert_not_called() + + def test_send_odp_event__default_type_when_empty_string(self): + mock_logger = mock.Mock() + + client = optimizely.Optimizely(json.dumps(self.config_dict_with_audience_segments), logger=mock_logger) + with mock.patch.object(client.odp_manager, 'send_event') as mock_send_event: + client.send_odp_event(type="", action="great", identifiers={'amazing': 'fantastic'}, data={}) + client.close() + + mock_send_event.assert_called_with('fullstack', 'great', {'amazing': 'fantastic'}, {}) + mock_logger.error.assert_not_called() diff --git a/tests/test_optimizely_config.py b/tests/test_optimizely_config.py index c37a8434..b6b60adf 100644 --- a/tests/test_optimizely_config.py +++ b/tests/test_optimizely_config.py @@ -4,7 +4,6 @@ # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 - # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -12,9 +11,11 @@ # limitations under the License. import json +from unittest.mock import patch from optimizely import optimizely, project_config from optimizely import optimizely_config +from optimizely import logger from . import base @@ -23,10 +24,11 @@ def setUp(self): base.BaseTest.setUp(self) opt_instance = optimizely.Optimizely(json.dumps(self.config_dict_with_features)) self.project_config = opt_instance.config_manager.get_config() - self.opt_config_service = optimizely_config.OptimizelyConfigService(self.project_config) + self.opt_config_service = optimizely_config.OptimizelyConfigService(self.project_config, + logger=logger.SimpleLogger()) self.expected_config = { - 'sdk_key': '', + 'sdk_key': 'features-test', 'environment_key': '', 'attributes': [{'key': 'test_attribute', 'id': '111094'}], 'events': [{'key': 'test_event', 'experiment_ids': ['111127'], 'id': '111095'}], @@ -1452,7 +1454,7 @@ def test__get_config(self): def test__get_config__invalid_project_config(self): """ Test that get_config returns None when invalid project config supplied. """ - opt_service = optimizely_config.OptimizelyConfigService({"key": "invalid"}) + opt_service = optimizely_config.OptimizelyConfigService({"key": "invalid"}, None) self.assertIsNone(opt_service.get_config()) def test__get_experiments_maps(self): @@ -1473,6 +1475,81 @@ def test__get_experiments_maps(self): self.assertEqual(expected_id_map, self.to_dict(actual_id_map)) + def test__duplicate_experiment_keys(self): + """ Test that multiple features don't have the same experiment key. """ + + # update the test datafile with an additional feature flag with the same experiment rule key + new_experiment = { + 'key': 'test_experiment', # added duplicate "test_experiment" + 'status': 'Running', + 'layerId': '8', + "audienceConditions": [ + "or", + "11160" + ], + 'audienceIds': ['11160'], + 'id': '111137', + 'forcedVariations': {}, + 'trafficAllocation': [ + {'entityId': '222242', 'endOfRange': 8000}, + {'entityId': '', 'endOfRange': 10000} + ], + 'variations': [ + { + 'id': '222242', + 'key': 'control', + 'variables': [], + } + ], + } + + new_feature = { + 'id': '91117', + 'key': 'new_feature', + 'experimentIds': ['111137'], + 'rolloutId': '', + 'variables': [ + {'id': '127', 'key': 'is_working', 'defaultValue': 'true', 'type': 'boolean'}, + {'id': '128', 'key': 'environment', 'defaultValue': 'devel', 'type': 'string'}, + {'id': '129', 'key': 'cost', 'defaultValue': '10.99', 'type': 'double'}, + {'id': '130', 'key': 'count', 'defaultValue': '999', 'type': 'integer'}, + {'id': '131', 'key': 'variable_without_usage', 'defaultValue': '45', 'type': 'integer'}, + {'id': '132', 'key': 'object', 'defaultValue': '{"test": 12}', 'type': 'string', + 'subType': 'json'}, + {'id': '133', 'key': 'true_object', 'defaultValue': '{"true_test": 23.54}', 'type': 'json'}, + ], + } + + # add new experiment rule with the same key and a new feature with the same rule key + self.config_dict_with_features['experiments'].append(new_experiment) + self.config_dict_with_features['featureFlags'].append(new_feature) + + config_with_duplicate_key = self.config_dict_with_features + opt_instance = optimizely.Optimizely(json.dumps(config_with_duplicate_key)) + self.project_config = opt_instance.config_manager.get_config() + + with patch('optimizely.logger.SimpleLogger.warning') as mock_logger: + self.opt_config_service = optimizely_config.OptimizelyConfigService(self.project_config, + logger=logger.SimpleLogger()) + + actual_key_map, actual_id_map = self.opt_config_service._get_experiments_maps() + + self.assertIsInstance(actual_key_map, dict) + for exp in actual_key_map.values(): + self.assertIsInstance(exp, optimizely_config.OptimizelyExperiment) + + # Assert that the warning method of the mock logger was called with the expected message + expected_warning_message = f"Duplicate experiment keys found in datafile: {new_experiment['key']}" + mock_logger.assert_called_with(expected_warning_message) + + # assert we get ID of the duplicated experiment + assert actual_key_map.get('test_experiment').id == "111137" + + # assert we get one duplicated experiment + keys_list = list(actual_key_map.keys()) + assert "test_experiment" in keys_list, "Key 'test_experiment' not found in actual key map" + assert keys_list.count("test_experiment") == 1, "Key 'test_experiment' found more than once in actual key map" + def test__get_features_map(self): """ Test that get_features_map returns expected features map. """ @@ -1525,6 +1602,18 @@ def test__get_datafile(self): self.assertEqual(expected_datafile, actual_datafile) + def test__get_datafile_from_bytes(self): + """ Test that get_datafile returns the expected datafile when provided as bytes. """ + + expected_datafile = json.dumps(self.config_dict_with_features) + bytes_datafile = bytes(expected_datafile, 'utf-8') + + opt_instance = optimizely.Optimizely(bytes_datafile) + opt_config = opt_instance.config_manager.optimizely_config + actual_datafile = opt_config.get_datafile() + + self.assertEqual(expected_datafile, actual_datafile) + def test__get_sdk_key(self): """ Test that get_sdk_key returns the expected value. """ @@ -1662,7 +1751,7 @@ def test_get_audiences(self): error_handler=None ) - config_service = optimizely_config.OptimizelyConfigService(proj_conf) + config_service = optimizely_config.OptimizelyConfigService(proj_conf, logger=logger.SimpleLogger()) for audience in config_service.audiences: self.assertIsInstance(audience, optimizely_config.OptimizelyAudience) @@ -1730,7 +1819,7 @@ def test_stringify_audience_conditions_all_cases(self): '("us" OR ("female" AND "adult")) AND ("fr" AND ("male" OR "adult"))' ] - config_service = optimizely_config.OptimizelyConfigService(config) + config_service = optimizely_config.OptimizelyConfigService(config, None) for i in range(len(audiences_input)): result = config_service.stringify_conditions(audiences_input[i], audiences_map) @@ -1748,7 +1837,7 @@ def test_optimizely_audience_conversion(self): error_handler=None ) - config_service = optimizely_config.OptimizelyConfigService(proj_conf) + config_service = optimizely_config.OptimizelyConfigService(proj_conf, None) for audience in config_service.audiences: self.assertIsInstance(audience, optimizely_config.OptimizelyAudience) @@ -1764,7 +1853,7 @@ def test_get_variations_from_experiments_map(self): error_handler=None ) - config_service = optimizely_config.OptimizelyConfigService(proj_conf) + config_service = optimizely_config.OptimizelyConfigService(proj_conf, None) experiments_key_map, experiments_id_map = config_service._get_experiments_maps() diff --git a/tests/test_optimizely_factory.py b/tests/test_optimizely_factory.py index 5db45680..989d960c 100644 --- a/tests/test_optimizely_factory.py +++ b/tests/test_optimizely_factory.py @@ -11,20 +11,29 @@ # See the License for the specific language governing permissions and # limitations under the License. -import mock +import json +import time +from unittest import mock from optimizely.config_manager import PollingConfigManager +from optimizely.odp.odp_config import OdpConfigState from optimizely.error_handler import NoOpErrorHandler from optimizely.event_dispatcher import EventDispatcher from optimizely.notification_center import NotificationCenter from optimizely.optimizely_factory import OptimizelyFactory from optimizely.user_profile import UserProfileService + from . import base -@mock.patch('requests.get') +@mock.patch('requests.Session.get') class OptimizelyFactoryTest(base.BaseTest): + def delay(*args, **kwargs): + time.sleep(.5) + return mock.DEFAULT + def setUp(self): + super().setUp() self.datafile = '{ revision: "42" }' self.error_handler = NoOpErrorHandler() self.mock_client_logger = mock.MagicMock() @@ -160,3 +169,100 @@ def test_set_batch_size_and_set_flush_interval___should_set_values_valid_or_inva optimizely_instance = OptimizelyFactory.custom_instance('sdk_key') self.assertEqual(optimizely_instance.event_processor.flush_interval.seconds, 30) self.assertEqual(optimizely_instance.event_processor.batch_size, 10) + + def test_update_odp_config_correctly(self, _): + with mock.patch('requests.Session.get') as mock_request_post: + mock_request_post.return_value = self.fake_server_response( + status_code=200, + content=json.dumps(self.config_dict_with_audience_segments) + ) + client = OptimizelyFactory.custom_instance('instance-test') + + # wait for config to be ready + client.config_manager.get_config() + + odp_config = client.odp_manager.odp_config + odp_settings = self.config_dict_with_audience_segments['integrations'][0] + self.assertEqual(odp_config.get_api_key(), odp_settings['publicKey']) + self.assertEqual(odp_config.get_api_host(), odp_settings['host']) + + client.close() + + def test_update_odp_config_correctly_with_custom_config_manager_and_delay(self, _): + logger = mock.MagicMock() + + test_datafile = json.dumps(self.config_dict_with_audience_segments) + test_response = self.fake_server_response(status_code=200, content=test_datafile) + + with mock.patch('requests.Session.get', return_value=test_response, side_effect=self.delay): + # initialize config_manager with delay, so it will receive the datafile after client initialization + config_manager = PollingConfigManager(sdk_key='test', logger=logger) + client = OptimizelyFactory.default_instance_with_config_manager(config_manager=config_manager) + odp_manager = client.odp_manager + + # confirm odp config has not yet been updated + self.assertEqual(odp_manager.odp_config.odp_state(), OdpConfigState.UNDETERMINED) + + # wait for datafile + client.config_manager.get_config() + + # wait for odp config to be updated + odp_manager.event_manager.event_queue.join() + + self.assertEqual(odp_manager.odp_config.odp_state(), OdpConfigState.INTEGRATED) + + logger.error.assert_not_called() + + client.close() + + def test_update_odp_config_correctly_with_delay(self, _): + logger = mock.MagicMock() + + test_datafile = json.dumps(self.config_dict_with_audience_segments) + test_response = self.fake_server_response(status_code=200, content=test_datafile) + + with mock.patch('requests.Session.get', return_value=test_response, side_effect=self.delay): + # initialize config_manager with delay, so it will receive the datafile after client initialization + client = OptimizelyFactory.default_instance(sdk_key='test') + odp_manager = client.odp_manager + + # confirm odp config has not yet been updated + self.assertEqual(odp_manager.odp_config.odp_state(), OdpConfigState.UNDETERMINED) + + # wait for datafile + client.config_manager.get_config() + + # wait for odp config to be updated + odp_manager.event_manager.event_queue.join() + + self.assertEqual(odp_manager.odp_config.odp_state(), OdpConfigState.INTEGRATED) + + logger.error.assert_not_called() + + client.close() + + def test_odp_updated_with_custom_instance(self, _): + logger = mock.MagicMock() + + test_datafile = json.dumps(self.config_dict_with_audience_segments) + test_response = self.fake_server_response(status_code=200, content=test_datafile) + + with mock.patch('requests.Session.get', return_value=test_response, side_effect=self.delay): + # initialize config_manager with delay, so it will receive the datafile after client initialization + client = OptimizelyFactory.custom_instance(sdk_key='test') + odp_manager = client.odp_manager + + # confirm odp config has not yet been updated + self.assertEqual(odp_manager.odp_config.odp_state(), OdpConfigState.UNDETERMINED) + + # wait for datafile + client.config_manager.get_config() + + # wait for odp config to be updated + odp_manager.event_manager.event_queue.join() + + self.assertEqual(odp_manager.odp_config.odp_state(), OdpConfigState.INTEGRATED) + + logger.error.assert_not_called() + + client.close() diff --git a/tests/test_user_context.py b/tests/test_user_context.py index dc52c648..6705e414 100644 --- a/tests/test_user_context.py +++ b/tests/test_user_context.py @@ -1,4 +1,4 @@ -# Copyright 2021, Optimizely +# Copyright 2021-2022, Optimizely # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -12,7 +12,7 @@ # limitations under the License. import json -import mock +from unittest import mock import threading from optimizely import optimizely, decision_service @@ -27,6 +27,37 @@ class UserContextTest(base.BaseTest): def setUp(self): base.BaseTest.setUp(self, 'config_dict_with_multiple_experiments') + self.good_response_data = { + "data": { + "customer": { + "audiences": { + "edges": [ + { + "node": { + "name": "a", + "state": "qualified", + "description": "qualifed sample 1" + } + }, + { + "node": { + "name": "b", + "state": "qualified", + "description": "qualifed sample 2" + } + }, + { + "node": { + "name": "c", + "state": "not_qualified", + "description": "not-qualified sample" + } + } + ] + } + } + } + } def compare_opt_decisions(self, expected, actual): self.assertEqual(expected.variation_key, actual.variation_key) @@ -197,9 +228,17 @@ def test_decide__feature_test(self): mock_variation = project_config.get_variation_from_id('test_experiment', '111129') with mock.patch( - 'optimizely.decision_service.DecisionService.get_variation_for_feature', - return_value=(decision_service.Decision(mock_experiment, mock_variation, - enums.DecisionSources.FEATURE_TEST), []), + 'optimizely.decision_service.DecisionService.get_variations_for_feature_list', + return_value=[ + ( + decision_service.Decision( + mock_experiment, + mock_variation, + enums.DecisionSources.FEATURE_TEST + ), + [] + ) + ] ), mock.patch( 'optimizely.notification_center.NotificationCenter.send_notifications' ) as mock_broadcast_decision, mock.patch( @@ -244,6 +283,8 @@ def test_decide__feature_test(self): 'reasons': expected.reasons, 'decision_event_dispatched': True, 'variables': expected.variables, + 'experiment_id': mock_experiment.id, + 'variation_id': mock_variation.id }, ) @@ -272,9 +313,17 @@ def test_decide__feature_test__send_flag_decision_false(self): mock_variation = project_config.get_variation_from_id('test_experiment', '111129') with mock.patch( - 'optimizely.decision_service.DecisionService.get_variation_for_feature', - return_value=(decision_service.Decision(mock_experiment, mock_variation, - enums.DecisionSources.FEATURE_TEST), []), + 'optimizely.decision_service.DecisionService.get_variations_for_feature_list', + return_value=[ + ( + decision_service.Decision( + mock_experiment, + mock_variation, + enums.DecisionSources.FEATURE_TEST + ), + [] + ) + ] ), mock.patch( 'optimizely.notification_center.NotificationCenter.send_notifications' ) as mock_broadcast_decision, mock.patch( @@ -344,6 +393,24 @@ def test_decide_feature_rollout(self): self.compare_opt_decisions(expected, actual) + # assert event count + self.assertEqual(1, mock_send_event.call_count) + + # assert event payload + expected_experiment = project_config.get_experiment_from_key(expected.rule_key) + expected_var = project_config.get_variation_from_key(expected.rule_key, expected.variation_key) + mock_send_event.assert_called_with( + project_config, + expected_experiment, + expected_var, + expected.flag_key, + expected.rule_key, + 'rollout', + expected.enabled, + 'test_user', + user_attributes + ) + # assert notification count self.assertEqual(1, mock_broadcast_decision.call_count) @@ -361,27 +428,11 @@ def test_decide_feature_rollout(self): 'reasons': expected.reasons, 'decision_event_dispatched': True, 'variables': expected.variables, + 'experiment_id': expected_experiment.id, + 'variation_id': expected_var.id }, ) - # assert event count - self.assertEqual(1, mock_send_event.call_count) - - # assert event payload - expected_experiment = project_config.get_experiment_from_key(expected.rule_key) - expected_var = project_config.get_variation_from_key(expected.rule_key, expected.variation_key) - mock_send_event.assert_called_with( - project_config, - expected_experiment, - expected_var, - expected.flag_key, - expected.rule_key, - 'rollout', - expected.enabled, - 'test_user', - user_attributes - ) - def test_decide_feature_rollout__send_flag_decision_false(self): opt_obj = optimizely.Optimizely(json.dumps(self.config_dict_with_features)) project_config = opt_obj.config_manager.get_config() @@ -420,6 +471,8 @@ def test_decide_feature_rollout__send_flag_decision_false(self): self.assertEqual(1, mock_broadcast_decision.call_count) # assert notification + expected_experiment = project_config.get_experiment_from_key(expected.rule_key) + expected_var = project_config.get_variation_from_key(expected.rule_key, expected.variation_key) mock_broadcast_decision.assert_called_with( enums.NotificationTypes.DECISION, 'flag', @@ -433,6 +486,8 @@ def test_decide_feature_rollout__send_flag_decision_false(self): 'reasons': expected.reasons, 'decision_event_dispatched': False, 'variables': expected.variables, + 'experiment_id': expected_experiment.id, + 'variation_id': expected_var.id }, ) @@ -447,9 +502,17 @@ def test_decide_feature_null_variation(self): mock_variation = None with mock.patch( - 'optimizely.decision_service.DecisionService.get_variation_for_feature', - return_value=(decision_service.Decision(mock_experiment, mock_variation, - enums.DecisionSources.ROLLOUT), []), + 'optimizely.decision_service.DecisionService.get_variations_for_feature_list', + return_value=[ + ( + decision_service.Decision( + mock_experiment, + mock_variation, + enums.DecisionSources.ROLLOUT + ), + [] + ) + ] ), mock.patch( 'optimizely.notification_center.NotificationCenter.send_notifications' ) as mock_broadcast_decision, mock.patch( @@ -494,7 +557,9 @@ def test_decide_feature_null_variation(self): 'reasons': expected.reasons, 'decision_event_dispatched': True, 'variables': expected.variables, - }, + 'experiment_id': None, + 'variation_id': None + } ) # assert event count @@ -522,9 +587,17 @@ def test_decide_feature_null_variation__send_flag_decision_false(self): mock_variation = None with mock.patch( - 'optimizely.decision_service.DecisionService.get_variation_for_feature', - return_value=(decision_service.Decision(mock_experiment, mock_variation, - enums.DecisionSources.ROLLOUT), []), + 'optimizely.decision_service.DecisionService.get_variations_for_feature_list', + return_value=[ + ( + decision_service.Decision( + mock_experiment, + mock_variation, + enums.DecisionSources.ROLLOUT + ), + [] + ) + ] ), mock.patch( 'optimizely.notification_center.NotificationCenter.send_notifications' ) as mock_broadcast_decision, mock.patch( @@ -569,6 +642,8 @@ def test_decide_feature_null_variation__send_flag_decision_false(self): 'reasons': expected.reasons, 'decision_event_dispatched': False, 'variables': expected.variables, + 'experiment_id': None, + 'variation_id': None }, ) @@ -583,9 +658,17 @@ def test_decide__option__disable_decision_event(self): mock_variation = project_config.get_variation_from_id('test_experiment', '111129') with mock.patch( - 'optimizely.decision_service.DecisionService.get_variation_for_feature', - return_value=(decision_service.Decision(mock_experiment, mock_variation, - enums.DecisionSources.FEATURE_TEST), []), + 'optimizely.decision_service.DecisionService.get_variations_for_feature_list', + return_value=[ + ( + decision_service.Decision( + mock_experiment, + mock_variation, + enums.DecisionSources.FEATURE_TEST + ), + [] + ) + ] ), mock.patch( 'optimizely.notification_center.NotificationCenter.send_notifications' ) as mock_broadcast_decision, mock.patch( @@ -630,6 +713,8 @@ def test_decide__option__disable_decision_event(self): 'reasons': expected.reasons, 'decision_event_dispatched': False, 'variables': expected.variables, + 'experiment_id': mock_experiment.id, + 'variation_id': mock_variation.id, }, ) @@ -647,9 +732,17 @@ def test_decide__default_option__disable_decision_event(self): mock_variation = project_config.get_variation_from_id('test_experiment', '111129') with mock.patch( - 'optimizely.decision_service.DecisionService.get_variation_for_feature', - return_value=(decision_service.Decision(mock_experiment, mock_variation, - enums.DecisionSources.FEATURE_TEST), []), + 'optimizely.decision_service.DecisionService.get_variations_for_feature_list', + return_value=[ + ( + decision_service.Decision( + mock_experiment, + mock_variation, + enums.DecisionSources.FEATURE_TEST + ), + [] + ) + ] ), mock.patch( 'optimizely.notification_center.NotificationCenter.send_notifications' ) as mock_broadcast_decision, mock.patch( @@ -694,6 +787,8 @@ def test_decide__default_option__disable_decision_event(self): 'reasons': expected.reasons, 'decision_event_dispatched': False, 'variables': expected.variables, + 'experiment_id': mock_experiment.id, + 'variation_id': mock_variation.id }, ) @@ -708,9 +803,17 @@ def test_decide__option__exclude_variables(self): mock_variation = project_config.get_variation_from_id('test_experiment', '111129') with mock.patch( - 'optimizely.decision_service.DecisionService.get_variation_for_feature', - return_value=(decision_service.Decision(mock_experiment, mock_variation, - enums.DecisionSources.FEATURE_TEST), []), + 'optimizely.decision_service.DecisionService.get_variations_for_feature_list', + return_value=[ + ( + decision_service.Decision( + mock_experiment, + mock_variation, + enums.DecisionSources.FEATURE_TEST + ), + [] + ) + ] ), mock.patch( 'optimizely.notification_center.NotificationCenter.send_notifications' ) as mock_broadcast_decision, mock.patch( @@ -747,6 +850,8 @@ def test_decide__option__exclude_variables(self): 'reasons': expected.reasons, 'decision_event_dispatched': True, 'variables': expected.variables, + 'experiment_id': mock_experiment.id, + 'variation_id': mock_variation.id, }, ) @@ -804,9 +909,17 @@ def test_decide__option__enabled_flags_only(self): expected_var = project_config.get_variation_from_key('211127', '211229') with mock.patch( - 'optimizely.decision_service.DecisionService.get_variation_for_feature', - return_value=(decision_service.Decision(expected_experiment, expected_var, - enums.DecisionSources.ROLLOUT), []), + 'optimizely.decision_service.DecisionService.get_variations_for_feature_list', + return_value=[ + ( + decision_service.Decision( + expected_experiment, + expected_var, + enums.DecisionSources.ROLLOUT + ), + [] + ) + ] ), mock.patch( 'optimizely.notification_center.NotificationCenter.send_notifications' ) as mock_broadcast_decision, mock.patch( @@ -853,6 +966,8 @@ def test_decide__option__enabled_flags_only(self): 'reasons': expected.reasons, 'decision_event_dispatched': True, 'variables': expected.variables, + 'experiment_id': expected_experiment.id, + 'variation_id': expected_var.id, }, ) @@ -883,9 +998,17 @@ def test_decide__default_options__with__options(self): mock_variation = project_config.get_variation_from_id('test_experiment', '111129') with mock.patch( - 'optimizely.decision_service.DecisionService.get_variation_for_feature', - return_value=(decision_service.Decision(mock_experiment, mock_variation, - enums.DecisionSources.FEATURE_TEST), []), + 'optimizely.decision_service.DecisionService.get_variations_for_feature_list', + return_value=[ + ( + decision_service.Decision( + mock_experiment, + mock_variation, + enums.DecisionSources.FEATURE_TEST + ), + [] + ) + ] ), mock.patch( 'optimizely.notification_center.NotificationCenter.send_notifications' ) as mock_broadcast_decision, mock.patch( @@ -903,7 +1026,7 @@ def test_decide__default_options__with__options(self): enabled=True, variables=expected_variables, flag_key='test_feature_in_experiment', - user_context=user_context + user_context=user_context, ) self.compare_opt_decisions(expected, actual) @@ -922,6 +1045,8 @@ def test_decide__default_options__with__options(self): 'reasons': expected.reasons, 'decision_event_dispatched': False, 'variables': expected.variables, + 'experiment_id': mock_experiment.id, + 'variation_id': mock_variation.id }, ) @@ -937,14 +1062,17 @@ def test_decide_for_keys(self): mocked_decision_2 = OptimizelyDecision(flag_key='test_feature_in_rollout', enabled=False) def side_effect(*args, **kwargs): - flag = args[1] - if flag == 'test_feature_in_experiment': - return mocked_decision_1 - else: - return mocked_decision_2 + flags = args[1] + res = {} + for flag in flags: + if flag == 'test_feature_in_experiment': + res[flag] = mocked_decision_1 + else: + res[flag] = mocked_decision_2 + return res with mock.patch( - 'optimizely.optimizely.Optimizely._decide', side_effect=side_effect + 'optimizely.optimizely.Optimizely._decide_for_keys', side_effect=side_effect ) as mock_decide, mock.patch( 'optimizely.optimizely_user_context.OptimizelyUserContext._clone', return_value=user_context @@ -953,18 +1081,10 @@ def side_effect(*args, **kwargs): flags = ['test_feature_in_rollout', 'test_feature_in_experiment'] options = [] decisions = user_context.decide_for_keys(flags, options) - self.assertEqual(2, len(decisions)) - mock_decide.assert_any_call( user_context, - 'test_feature_in_experiment', - options - ) - - mock_decide.assert_any_call( - user_context, - 'test_feature_in_rollout', + ['test_feature_in_rollout', 'test_feature_in_experiment'], options ) @@ -980,14 +1100,17 @@ def test_decide_for_keys__option__enabled_flags_only(self): mocked_decision_2 = OptimizelyDecision(flag_key='test_feature_in_rollout', enabled=False) def side_effect(*args, **kwargs): - flag = args[1] - if flag == 'test_feature_in_experiment': - return mocked_decision_1 - else: - return mocked_decision_2 + flags = args[1] + res = {} + for flag in flags: + if flag == 'test_feature_in_experiment': + res[flag] = mocked_decision_1 + else: + res[flag] = mocked_decision_2 + return res with mock.patch( - 'optimizely.optimizely.Optimizely._decide', side_effect=side_effect + 'optimizely.optimizely.Optimizely._decide_for_keys', side_effect=side_effect ) as mock_decide, mock.patch( 'optimizely.optimizely_user_context.OptimizelyUserContext._clone', return_value=user_context @@ -997,20 +1120,13 @@ def side_effect(*args, **kwargs): options = ['ENABLED_FLAGS_ONLY'] decisions = user_context.decide_for_keys(flags, options) - self.assertEqual(1, len(decisions)) - - mock_decide.assert_any_call( - user_context, - 'test_feature_in_experiment', - options - ) + self.assertEqual(2, len(decisions)) mock_decide.assert_any_call( user_context, - 'test_feature_in_rollout', + ['test_feature_in_rollout', 'test_feature_in_experiment'], options ) - self.assertEqual(mocked_decision_1, decisions['test_feature_in_experiment']) def test_decide_for_keys__default_options__with__options(self): @@ -1022,20 +1138,29 @@ def test_decide_for_keys__default_options__with__options(self): user_context = opt_obj.create_user_context('test_user') with mock.patch( - 'optimizely.optimizely.Optimizely._decide' - ) as mock_decide, mock.patch( + 'optimizely.decision_service.DecisionService.get_variations_for_feature_list' + ) as mock_get_variations, mock.patch( 'optimizely.optimizely_user_context.OptimizelyUserContext._clone', return_value=user_context ): flags = ['test_feature_in_experiment'] options = ['EXCLUDE_VARIABLES'] + + mock_decision = mock.MagicMock() + mock_decision.experiment = mock.MagicMock(key='test_experiment') + mock_decision.variation = mock.MagicMock(key='variation') + mock_decision.source = enums.DecisionSources.FEATURE_TEST + + mock_get_variations.return_value = [(mock_decision, [])] + user_context.decide_for_keys(flags, options) - mock_decide.assert_called_with( - user_context, - 'test_feature_in_experiment', - ['EXCLUDE_VARIABLES'] + mock_get_variations.assert_called_with( + mock.ANY, # ProjectConfig + mock.ANY, # FeatureFlag list + user_context, # UserContext object + ['EXCLUDE_VARIABLES', 'ENABLED_FLAGS_ONLY'] ) def test_decide_for_all(self): @@ -1292,9 +1417,17 @@ def test_decide_experiment(self): mock_experiment = project_config.get_experiment_from_key('test_experiment') mock_variation = project_config.get_variation_from_id('test_experiment', '111129') with mock.patch( - 'optimizely.decision_service.DecisionService.get_variation_for_feature', - return_value=(decision_service.Decision(mock_experiment, - mock_variation, enums.DecisionSources.FEATURE_TEST), []), + 'optimizely.decision_service.DecisionService.get_variations_for_feature_list', + return_value=[ + ( + decision_service.Decision( + mock_experiment, + mock_variation, + enums.DecisionSources.FEATURE_TEST + ), + [] + ), + ] ): user_context = opt_obj.create_user_context('test_user') decision = user_context.decide('test_feature_in_experiment', [DecideOption.DISABLE_DECISION_EVENT]) @@ -1379,6 +1512,9 @@ def test_should_return_valid_decision_after_setting_and_removing_forced_decision 'User "test_user" is in variation "control" of experiment test_experiment.'] ) + expected_experiment = project_config.get_experiment_from_key(expected.rule_key) + expected_var = project_config.get_variation_from_key('test_experiment', expected.variation_key) + # assert notification count self.assertEqual(1, mock_broadcast_decision.call_count) @@ -1396,12 +1532,11 @@ def test_should_return_valid_decision_after_setting_and_removing_forced_decision 'reasons': expected.reasons, 'decision_event_dispatched': True, 'variables': expected.variables, + 'experiment_id': expected_experiment.id, + 'variation_id': expected_var.id }, ) - expected_experiment = project_config.get_experiment_from_key(expected.rule_key) - expected_var = project_config.get_variation_from_key('test_experiment', expected.variation_key) - mock_send_event.assert_called_with( project_config, expected_experiment, @@ -1600,6 +1735,8 @@ def test_should_return_valid_decision_after_setting_invalid_experiment_rule_vari self.assertEqual(decide_decision.user_context.get_user_attributes(), {}) expected_reasons = [ + 'Invalid variation is mapped to flag (test_feature_in_experiment), rule (test_experiment) ' + 'and user (test_user) in the forced decision map.', 'Invalid variation is mapped to flag (test_feature_in_experiment), rule (test_experiment) ' 'and user (test_user) in the forced decision map.', 'Evaluating audiences for experiment "test_experiment": [].', @@ -1784,12 +1921,14 @@ def test_forced_decision_return_status(self): status = user_context.remove_all_forced_decisions() self.assertTrue(status) - def test_forced_decision_clone_return_valid_forced_decision(self): + def test_user_context__clone_return_valid(self): """ - Should return valid forced decision on cloning. + Should return valid objects. """ opt_obj = optimizely.Optimizely(json.dumps(self.config_dict_with_features)) user_context = opt_obj.create_user_context("test_user", {}) + qualified_segments = ['seg1', 'seg2'] + user_context.set_qualified_segments(qualified_segments) context_with_flag = OptimizelyUserContext.OptimizelyDecisionContext('f1', None) decision_for_flag = OptimizelyUserContext.OptimizelyForcedDecision('v1') @@ -1806,6 +1945,11 @@ def test_forced_decision_clone_return_valid_forced_decision(self): self.assertEqual(user_context_2.user_id, 'test_user') self.assertEqual(user_context_2.get_user_attributes(), {}) self.assertIsNotNone(user_context_2.forced_decisions_map) + self.assertIsNot(user_context.forced_decisions_map, user_context_2.forced_decisions_map) + + self.assertTrue(user_context_2.get_qualified_segments()) + self.assertEqual(user_context_2.get_qualified_segments(), qualified_segments) + self.assertIsNot(user_context.get_qualified_segments(), user_context_2.get_qualified_segments()) self.assertEqual(user_context_2.get_forced_decision(context_with_flag).variation_key, 'v1') self.assertEqual(user_context_2.get_forced_decision(context_with_rule).variation_key, 'v2') @@ -1859,6 +2003,28 @@ def clone_loop(user_context): for x in range(100): user_context._clone() + # custom call counter because the mock call_count is not thread safe + class MockCounter: + def __init__(self): + self.lock = threading.Lock() + self.call_count = 0 + + def increment(self, *args): + with self.lock: + self.call_count += 1 + + set_forced_decision_counter = MockCounter() + get_forced_decision_counter = MockCounter() + remove_forced_decision_counter = MockCounter() + remove_all_forced_decisions_counter = MockCounter() + clone_counter = MockCounter() + + set_forced_decision_mock.side_effect = set_forced_decision_counter.increment + get_forced_decision_mock.side_effect = get_forced_decision_counter.increment + remove_forced_decision_mock.side_effect = remove_forced_decision_counter.increment + remove_all_forced_decisions_mock.side_effect = remove_all_forced_decisions_counter.increment + clone_mock.side_effect = clone_counter.increment + set_thread_1 = threading.Thread(target=set_forced_decision_loop, args=(user_context, context_1, decision_1)) set_thread_2 = threading.Thread(target=set_forced_decision_loop, args=(user_context, context_2, decision_2)) set_thread_3 = threading.Thread(target=get_forced_decision_loop, args=(user_context, context_1)) @@ -1888,8 +2054,319 @@ def clone_loop(user_context): set_thread_7.join() set_thread_8.join() - self.assertEqual(200, set_forced_decision_mock.call_count) - self.assertEqual(200, get_forced_decision_mock.call_count) - self.assertEqual(200, remove_forced_decision_mock.call_count) - self.assertEqual(100, remove_all_forced_decisions_mock.call_count) - self.assertEqual(100, clone_mock.call_count) + self.assertEqual(200, set_forced_decision_counter.call_count) + self.assertEqual(200, get_forced_decision_counter.call_count) + self.assertEqual(200, remove_forced_decision_counter.call_count) + self.assertEqual(100, remove_all_forced_decisions_counter.call_count) + self.assertEqual(100, clone_counter.call_count) + + def test_decide_with_qualified_segments__segment_hit_in_ab_test(self): + client = optimizely.Optimizely(json.dumps(self.config_dict_with_audience_segments)) + user = client.create_user_context('user-id') + user.set_qualified_segments(["odp-segment-1", "odp-segment-none"]) + + decision = user.decide('flag-segment', ['IGNORE_USER_PROFILE_SERVICE']) + + self.assertEqual(decision.variation_key, "variation-a") + + def test_decide_with_qualified_segments__other_audience_hit_in_ab_test(self): + client = optimizely.Optimizely(json.dumps(self.config_dict_with_audience_segments)) + user = client.create_user_context('user-id', {"age": 30}) + user.set_qualified_segments(["odp-segment-none"]) + + decision = user.decide('flag-segment', ['IGNORE_USER_PROFILE_SERVICE']) + + self.assertEqual(decision.variation_key, "variation-a") + + def test_decide_with_qualified_segments__segment_hit_in_rollout(self): + client = optimizely.Optimizely(json.dumps(self.config_dict_with_audience_segments)) + user = client.create_user_context('user-id') + user.set_qualified_segments(["odp-segment-2"]) + + decision = user.decide('flag-segment', ['IGNORE_USER_PROFILE_SERVICE']) + + self.assertEqual(decision.variation_key, "rollout-variation-on") + + def test_decide_with_qualified_segments__segment_miss_in_rollout(self): + client = optimizely.Optimizely(json.dumps(self.config_dict_with_audience_segments)) + user = client.create_user_context('user-id') + user.qualified_segments = ["odp-segment-none"] + + decision = user.decide('flag-segment', ['IGNORE_USER_PROFILE_SERVICE']) + + self.assertEqual(decision.variation_key, "rollout-variation-off") + + def test_decide_with_qualified_segments__empty_segments(self): + client = optimizely.Optimizely(json.dumps(self.config_dict_with_audience_segments)) + user = client.create_user_context('user-id') + user.set_qualified_segments([]) + + decision = user.decide('flag-segment', ['IGNORE_USER_PROFILE_SERVICE']) + + self.assertEqual(decision.variation_key, "rollout-variation-off") + + def test_decide_with_qualified_segments__default(self): + client = optimizely.Optimizely(json.dumps(self.config_dict_with_audience_segments)) + user = client.create_user_context('user-id') + + decision = user.decide('flag-segment', ['IGNORE_USER_PROFILE_SERVICE']) + + self.assertEqual(decision.variation_key, "rollout-variation-off") + + def test_none_client_should_not_fail(self): + uc = OptimizelyUserContext(None, None, 'test-user', None) + self.assertIsInstance(uc, OptimizelyUserContext) + + def test_send_identify_event_when_user_context_created(self): + mock_logger = mock.Mock() + client = optimizely.Optimizely(json.dumps(self.config_dict_with_audience_segments), logger=mock_logger) + with mock.patch.object(client, '_identify_user') as identify: + OptimizelyUserContext(client, mock_logger, 'user-id') + + identify.assert_called_once_with('user-id') + mock_logger.error.assert_not_called() + client.close() + + def test_identify_is_skipped_with_decisions(self): + mock_logger = mock.Mock() + client = optimizely.Optimizely(json.dumps(self.config_dict_with_features), logger=mock_logger) + with mock.patch.object(client, '_identify_user') as identify: + user_context = OptimizelyUserContext(client, mock_logger, 'user-id') + + identify.assert_called_once_with('user-id') + mock_logger.error.assert_not_called() + + with mock.patch.object(client, '_identify_user') as identify: + user_context.decide('test_feature_in_rollout') + user_context.decide_all() + user_context.decide_for_keys(['test_feature_in_rollout']) + + identify.assert_not_called() + mock_logger.error.assert_not_called() + client.close() + + # fetch qualified segments + def test_fetch_segments(self): + mock_logger = mock.Mock() + client = optimizely.Optimizely(json.dumps(self.config_dict_with_audience_segments), logger=mock_logger) + user = OptimizelyUserContext(client, mock_logger, 'user-id') + + with mock.patch('requests.post', return_value=self.fake_server_response( + status_code=200, content=json.dumps(self.good_response_data) + )): + success = user.fetch_qualified_segments() + + self.assertTrue(success) + self.assertEqual(user.get_qualified_segments(), ['a', 'b']) + mock_logger.error.assert_not_called() + client.close() + + def test_return_empty_array_when_not_qualified_for_any_segments(self): + for edge in self.good_response_data['data']['customer']['audiences']['edges']: + edge['node']['state'] = 'unqualified' + + mock_logger = mock.Mock() + client = optimizely.Optimizely(json.dumps(self.config_dict_with_audience_segments), logger=mock_logger) + user = OptimizelyUserContext(client, mock_logger, 'user-id') + + with mock.patch('requests.post', return_value=self.fake_server_response( + status_code=200, content=json.dumps(self.good_response_data) + )): + success = user.fetch_qualified_segments() + + self.assertTrue(success) + self.assertEqual(user.get_qualified_segments(), []) + mock_logger.error.assert_not_called() + client.close() + + def test_fetch_segments_and_reset_cache(self): + mock_logger = mock.Mock() + client = optimizely.Optimizely(json.dumps(self.config_dict_with_audience_segments), logger=mock_logger) + segments_cache = client.odp_manager.segment_manager.segments_cache + segments_cache.save('wow', 'great') + self.assertEqual(segments_cache.lookup('wow'), 'great') + + user = OptimizelyUserContext(client, mock_logger, 'user-id') + with mock.patch('requests.post', return_value=self.fake_server_response( + status_code=200, content=json.dumps(self.good_response_data) + )): + success = user.fetch_qualified_segments(options=['RESET_CACHE']) + + self.assertTrue(success) + self.assertEqual(user.get_qualified_segments(), ['a', 'b']) + self.assertIsNone(segments_cache.lookup('wow')) + mock_logger.error.assert_not_called() + client.close() + + def test_fetch_segments_from_cache(self): + mock_logger = mock.Mock() + client = optimizely.Optimizely(json.dumps(self.config_dict_with_audience_segments), logger=mock_logger) + segment_manager = client.odp_manager.segment_manager + cache_key = segment_manager.make_cache_key(enums.OdpManagerConfig.KEY_FOR_USER_ID, 'user-id') + segments_cache = segment_manager.segments_cache + segments_cache.save(cache_key, ['great']) + self.assertEqual(segments_cache.lookup(cache_key), ['great']) + + user = OptimizelyUserContext(client, mock_logger, 'user-id') + with mock.patch('requests.post', return_value=self.fake_server_response( + status_code=200, content=json.dumps(self.good_response_data) + )): + success = user.fetch_qualified_segments() + + self.assertTrue(success) + self.assertEqual(user.get_qualified_segments(), ['great']) + mock_logger.error.assert_not_called() + client.close() + + def test_fetch_segments_and_ignore_cache(self): + mock_logger = mock.Mock() + client = optimizely.Optimizely(json.dumps(self.config_dict_with_audience_segments), logger=mock_logger) + segment_manager = client.odp_manager.segment_manager + cache_key = segment_manager.make_cache_key(enums.OdpManagerConfig.KEY_FOR_USER_ID, 'user-id') + segments_cache = segment_manager.segments_cache + segments_cache.save(cache_key, ['great']) + self.assertEqual(segments_cache.lookup(cache_key), ['great']) + + user = OptimizelyUserContext(client, mock_logger, 'user-id') + with mock.patch('requests.post', return_value=self.fake_server_response( + status_code=200, content=json.dumps(self.good_response_data) + )): + success = user.fetch_qualified_segments(options=['IGNORE_CACHE']) + + self.assertTrue(success) + self.assertEqual(segments_cache.lookup(cache_key), ['great']) + self.assertEqual(user.get_qualified_segments(), ['a', 'b']) + mock_logger.error.assert_not_called() + client.close() + + def test_return_false_on_error(self): + mock_logger = mock.Mock() + client = optimizely.Optimizely(json.dumps(self.config_dict_with_audience_segments), logger=mock_logger) + user = OptimizelyUserContext(client, mock_logger, 'user-id') + + with mock.patch('requests.post', return_value=self.fake_server_response(status_code=500)): + success = user.fetch_qualified_segments() + + self.assertFalse(success) + self.assertIsNone(user.get_qualified_segments()) + mock_logger.error.assert_called_once_with( + 'Audience segments fetch failed (500 Server Error: None for url: None).' + ) + client.close() + + def test_no_error_when_client_is_none(self): + mock_logger = mock.Mock() + user = OptimizelyUserContext(None, mock_logger, 'user-id') + success = user.fetch_qualified_segments() + + self.assertFalse(success) + self.assertIsNone(user.get_qualified_segments()) + mock_logger.error.assert_not_called() + + def test_fetch_segments_when_non_blocking(self): + mock_logger = mock.Mock() + client = optimizely.Optimizely(json.dumps(self.config_dict_with_audience_segments), logger=mock_logger) + user = OptimizelyUserContext(client, mock_logger, 'user-id') + + with mock.patch('requests.post', return_value=self.fake_server_response( + status_code=200, content=json.dumps(self.good_response_data) + )): + thread = user.fetch_qualified_segments(callback=True) + thread.join() + + self.assertEqual(user.get_qualified_segments(), ['a', 'b']) + mock_logger.error.assert_not_called() + client.close() + + def test_fetch_segments_with_callback(self): + mock_logger = mock.Mock() + client = optimizely.Optimizely(json.dumps(self.config_dict_with_audience_segments), logger=mock_logger) + user = OptimizelyUserContext(client, mock_logger, 'user-id') + result = [] + + with mock.patch('requests.post', return_value=self.fake_server_response( + status_code=200, content=json.dumps(self.good_response_data) + )): + thread = user.fetch_qualified_segments(callback=lambda x: result.append(x)) + thread.join() + + self.assertEqual(user.get_qualified_segments(), ['a', 'b']) + self.assertTrue(result.pop()) + mock_logger.error.assert_not_called() + client.close() + + def test_pass_false_to_callback_when_failed_and_non_blocking(self): + mock_logger = mock.Mock() + client = optimizely.Optimizely(json.dumps(self.config_dict_with_audience_segments), logger=mock_logger) + user = OptimizelyUserContext(client, mock_logger, 'user-id') + result = [] + + with mock.patch('requests.post', return_value=self.fake_server_response(status_code=500)): + thread = user.fetch_qualified_segments(callback=lambda x: result.append(x)) + thread.join() + + self.assertIsNone(user.get_qualified_segments()) + self.assertFalse(result.pop()) + mock_logger.error.assert_called_once_with( + 'Audience segments fetch failed (500 Server Error: None for url: None).' + ) + client.close() + + def test_fetch_segments_from_cache_with_non_blocking(self): + mock_logger = mock.Mock() + client = optimizely.Optimizely(json.dumps(self.config_dict_with_audience_segments), logger=mock_logger) + segment_manager = client.odp_manager.segment_manager + cache_key = segment_manager.make_cache_key(enums.OdpManagerConfig.KEY_FOR_USER_ID, 'user-id') + segments_cache = segment_manager.segments_cache + segments_cache.save(cache_key, ['great']) + self.assertEqual(segments_cache.lookup(cache_key), ['great']) + + user = OptimizelyUserContext(client, mock_logger, 'user-id') + with mock.patch('requests.post', return_value=self.fake_server_response( + status_code=200, content=json.dumps(self.good_response_data) + )): + thread = user.fetch_qualified_segments(callback=True) + thread.join() + + self.assertEqual(user.get_qualified_segments(), ['great']) + mock_logger.error.assert_not_called() + client.close() + + def test_decide_correctly_with_non_blocking(self): + self.good_response_data['data']['customer']['audiences']['edges'][0]['node']['name'] = 'odp-segment-2' + mock_logger = mock.Mock() + client = optimizely.Optimizely(json.dumps(self.config_dict_with_audience_segments), logger=mock_logger) + user = OptimizelyUserContext(client, mock_logger, 'user-id') + results = [] + + def callback(success): + results.append(success) + decision = user.decide('flag-segment') + results.append(decision.variation_key) + + with mock.patch('requests.post', return_value=self.fake_server_response( + status_code=200, content=json.dumps(self.good_response_data) + )): + thread = user.fetch_qualified_segments(callback=callback) + thread.join() + + self.assertEqual(user.get_qualified_segments(), ['odp-segment-2', 'b']) + self.assertEqual(results.pop(), 'rollout-variation-on') + self.assertStrictTrue(results.pop()) + mock_logger.error.assert_not_called() + client.close() + + def test_fetch_segments_error(self): + mock_logger = mock.Mock() + client = optimizely.Optimizely(json.dumps(self.config_dict_with_audience_segments), logger=mock_logger) + user = OptimizelyUserContext(client, mock_logger, 'user"id') + + with mock.patch('requests.post', return_value=self.fake_server_response( + status_code=200, content=json.dumps(self.good_response_data) + )): + success = user.fetch_qualified_segments() + + self.assertTrue(success) + self.assertEqual(user.get_qualified_segments(), ['a', 'b']) + mock_logger.error.assert_not_called() + client.close() diff --git a/tests/test_user_profile.py b/tests/test_user_profile.py index ffeb3e34..84aacd05 100644 --- a/tests/test_user_profile.py +++ b/tests/test_user_profile.py @@ -14,6 +14,7 @@ import unittest from optimizely import user_profile +from unittest import mock class UserProfileTest(unittest.TestCase): @@ -63,3 +64,76 @@ def test_save(self): user_profile_service = user_profile.UserProfileService() self.assertIsNone(user_profile_service.save({'user_id': 'test_user', 'experiment_bucket_map': {}})) + + +class UserProfileTrackerTest(unittest.TestCase): + def test_load_user_profile_failure(self): + """Test that load_user_profile handles exceptions gracefully.""" + mock_user_profile_service = mock.MagicMock() + mock_logger = mock.MagicMock() + + user_profile_tracker = user_profile.UserProfileTracker( + user_id="test_user", + user_profile_service=mock_user_profile_service, + logger=mock_logger + ) + mock_user_profile_service.lookup.side_effect = Exception("Lookup failure") + + user_profile_tracker.load_user_profile() + + # Verify that the logger recorded the exception + mock_logger.exception.assert_called_once_with( + 'Unable to retrieve user profile for user "test_user" as lookup failed.' + ) + + # Verify that the user profile is reset to an empty profile + self.assertEqual(user_profile_tracker.user_profile.user_id, "test_user") + self.assertEqual(user_profile_tracker.user_profile.experiment_bucket_map, {}) + + def test_load_user_profile__user_profile_invalid(self): + """Test that load_user_profile handles an invalid user profile format.""" + mock_user_profile_service = mock.MagicMock() + mock_logger = mock.MagicMock() + + user_profile_tracker = user_profile.UserProfileTracker( + user_id="test_user", + user_profile_service=mock_user_profile_service, + logger=mock_logger + ) + + mock_user_profile_service.lookup.return_value = {"invalid_key": "value"} + + reasons = [] + user_profile_tracker.load_user_profile(reasons=reasons) + + # Verify that the logger recorded a warning for the missing keys + missing_keys_message = "User profile is missing keys: user_id, experiment_bucket_map" + self.assertIn(missing_keys_message, reasons) + + # Ensure the logger logs the invalid format + mock_logger.info.assert_not_called() + self.assertEqual(user_profile_tracker.user_profile.user_id, "test_user") + self.assertEqual(user_profile_tracker.user_profile.experiment_bucket_map, {}) + + # Verify the reasons list was updated + self.assertIn(missing_keys_message, reasons) + + def test_save_user_profile_failure(self): + """Test that save_user_profile handles exceptions gracefully.""" + mock_user_profile_service = mock.MagicMock() + mock_logger = mock.MagicMock() + + user_profile_tracker = user_profile.UserProfileTracker( + user_id="test_user", + user_profile_service=mock_user_profile_service, + logger=mock_logger + ) + + user_profile_tracker.profile_updated = True + mock_user_profile_service.save.side_effect = Exception("Save failure") + + user_profile_tracker.save_user_profile() + + mock_logger.warning.assert_called_once_with( + 'Failed to save user profile of user "test_user" for exception:Save failure".' + ) diff --git a/tests/testapp/Dockerfile b/tests/testapp/Dockerfile index 3a146d7b..1042c462 100644 --- a/tests/testapp/Dockerfile +++ b/tests/testapp/Dockerfile @@ -1,4 +1,4 @@ -FROM python:2.7.10 +FROM python:3.11 LABEL maintainer="developers@optimizely.com" diff --git a/tests/testapp/application.py b/tests/testapp/application.py index 7b2a81ee..5848cfd1 100644 --- a/tests/testapp/application.py +++ b/tests/testapp/application.py @@ -16,16 +16,21 @@ import types from os import environ -from flask import Flask -from flask import request - import user_profile_service -from optimizely import logger -from optimizely import optimizely +from flask import Flask, request +from flask_wtf.csrf import CSRFProtect + +from optimizely import logger, optimizely from optimizely.helpers import enums +# Create the flask app app = Flask(__name__) +# Set up CSRF protection +app.config["SECRET_KEY"] = environ.get("CSRF_SECRET_KEY", "default_csrf_secret_key") +csrf = CSRFProtect(app) + +# Read in the datafile datafile = open('datafile.json', 'r') datafile_content = datafile.read() datafile.close() @@ -118,7 +123,7 @@ def before_request(): @app.after_request def after_request(response): - global optimizely_instance + global optimizely_instance # noqa: F824 global listener_return_maps optimizely_instance.notification_center.clear_all_notifications() diff --git a/tests/testapp/requirements.txt b/tests/testapp/requirements.txt index 46a48dd9..dae26c1f 100644 --- a/tests/testapp/requirements.txt +++ b/tests/testapp/requirements.txt @@ -1 +1,2 @@ -Flask==1.1.2 +Flask==3.1.0 +flask-wtf==1.2.2 \ No newline at end of file diff --git a/tests/testapp/user_profile_service.py b/tests/testapp/user_profile_service.py index 144697e5..381993dc 100644 --- a/tests/testapp/user_profile_service.py +++ b/tests/testapp/user_profile_service.py @@ -12,7 +12,7 @@ # limitations under the License. -class BaseUserProfileService(object): +class BaseUserProfileService: def __init__(self, user_profiles): self.user_profiles = {profile['user_id']: profile for profile in user_profiles} if user_profiles else {}