From b6c2c63589c3bf7c52d863cab7f7858f919eff3d Mon Sep 17 00:00:00 2001 From: Matjaz Pirnovar Date: Wed, 12 Jan 2022 16:48:58 -0800 Subject: [PATCH 01/68] Update README.md (#375) --- README.md | 5 ----- 1 file changed, 5 deletions(-) diff --git a/README.md b/README.md index b2cae17b..70dd0771 100644 --- a/README.md +++ b/README.md @@ -32,11 +32,6 @@ To install: pip install optimizely-sdk -Note: -If you are running the SDK with PyPy or PyPy3 and you are experiencing issues, install this cryptography package **first** and then optimizely-sdk package: - - pip install "cryptography>=1.3.4,<=3.1.1" - ### Feature Management Access To access the Feature Management configuration in the Optimizely From 53171be715f1887c3176c9e2d43626748a2999ea Mon Sep 17 00:00:00 2001 From: Muhammad Noman Date: Wed, 27 Apr 2022 04:00:43 +0500 Subject: [PATCH 02/68] chore: removed travis yml and added git action support (#380) - git actions integrated. --- .github/workflows/integration_test.yml | 55 ++++++++++++++++ .github/workflows/python.yml | 80 +++++++++++++++++++++++ .github/workflows/source_clear_cron.yml | 16 +++++ .travis.yml | 84 ------------------------- 4 files changed, 151 insertions(+), 84 deletions(-) create mode 100644 .github/workflows/integration_test.yml create mode 100644 .github/workflows/python.yml create mode 100644 .github/workflows/source_clear_cron.yml delete mode 100644 .travis.yml diff --git a/.github/workflows/integration_test.yml b/.github/workflows/integration_test.yml new file mode 100644 index 00000000..c0bc8908 --- /dev/null +++ b/.github/workflows/integration_test.yml @@ -0,0 +1,55 @@ +name: Reusable action of running integration of production suite + +on: + workflow_call: + inputs: + FULLSTACK_TEST_REPO: + required: false + type: string + secrets: + CI_USER_TOKEN: + required: true + TRAVIS_COM_TOKEN: + required: true +jobs: + test: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + with: + # You should create a personal access token and store it in your repository + token: ${{ secrets.CI_USER_TOKEN }} + repository: 'optimizely/travisci-tools' + path: 'home/runner/travisci-tools' + ref: 'master' + - name: set SDK Branch if PR + if: ${{ github.event_name == 'pull_request' }} + run: | + echo "SDK_BRANCH=${{ github.head_ref }}" >> $GITHUB_ENV + - name: set SDK Branch if not pull request + if: ${{ github.event_name != 'pull_request' }} + run: | + echo "SDK_BRANCH=${{ github.ref_name }}" >> $GITHUB_ENV + echo "TRAVIS_BRANCH=${{ github.ref_name }}" >> $GITHUB_ENV + - name: Trigger build + env: + SDK: python + FULLSTACK_TEST_REPO: ${{ inputs.FULLSTACK_TEST_REPO }} + BUILD_NUMBER: ${{ github.run_id }} + TESTAPP_BRANCH: master + GITHUB_TOKEN: ${{ secrets.CI_USER_TOKEN }} + TRAVIS_EVENT_TYPE: ${{ github.event_name }} + GITHUB_CONTEXT: ${{ toJson(github) }} + TRAVIS_REPO_SLUG: ${{ github.repository }} + TRAVIS_PULL_REQUEST_SLUG: ${{ github.repository }} + UPSTREAM_REPO: ${{ github.repository }} + TRAVIS_COMMIT: ${{ github.sha }} + TRAVIS_PULL_REQUEST_SHA: ${{ github.event.pull_request.head.sha }} + TRAVIS_PULL_REQUEST: ${{ github.event.pull_request.number }} + UPSTREAM_SHA: ${{ github.sha }} + TRAVIS_COM_TOKEN: ${{ secrets.TRAVIS_COM_TOKEN }} + EVENT_MESSAGE: ${{ github.event.message }} + HOME: 'home/runner' + run: | + echo "$GITHUB_CONTEXT" + home/runner/travisci-tools/trigger-script-with-status-update.sh diff --git a/.github/workflows/python.yml b/.github/workflows/python.yml new file mode 100644 index 00000000..574472de --- /dev/null +++ b/.github/workflows/python.yml @@ -0,0 +1,80 @@ +# This workflow will install Python dependencies, run tests and lint with a variety of Python versions +# For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions + +name: Python package + +on: + push: + branches: [ master ] + pull_request: + branches: [ master ] + +jobs: + lint_markdown_files: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - name: Set up Ruby + uses: ruby/setup-ruby@v1 + with: + ruby-version: '2.6' + bundler-cache: true # runs 'bundle install' and caches installed gems automatically + - name: Install gem + run: | + gem install awesome_bot + - name: Run tests + run: find . -type f -name '*.md' -exec awesome_bot {} \; + + linting: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - name: Set up Python 3.9 + uses: actions/setup-python@v3 + with: + python-version: 3.9 + # flake8 version should be same as the version in requirements/test.txt + # to avoid lint errors on CI + - name: pip install flak8 + run: pip install flake8>=4.1.0 + - name: Lint with flake8 + run: | + flake8 + # stop the build if there are Python syntax errors or undefined names + flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics + # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide + flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics + + integration_tests: + uses: optimizely/python-sdk/.github/workflows/integration_test.yml@master + secrets: + CI_USER_TOKEN: ${{ secrets.CI_USER_TOKEN }} + TRAVIS_COM_TOKEN: ${{ secrets.TRAVIS_COM_TOKEN }} + + fullstack_production_suite: + uses: optimizely/python-sdk/.github/workflows/integration_test.yml@master + with: + FULLSTACK_TEST_REPO: ProdTesting + secrets: + CI_USER_TOKEN: ${{ secrets.CI_USER_TOKEN }} + TRAVIS_COM_TOKEN: ${{ secrets.TRAVIS_COM_TOKEN }} + + test: + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + python-version: ["pypy-3.7-v7.3.5", "3.7", "3.8", "3.9", "3.10.0"] + steps: + - uses: actions/checkout@v3 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v3 + with: + python-version: ${{ matrix.python-version }} + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install -r requirements/core.txt;pip install -r requirements/test.txt + - name: Test with pytest + run: | + pytest --cov=optimizely diff --git a/.github/workflows/source_clear_cron.yml b/.github/workflows/source_clear_cron.yml new file mode 100644 index 00000000..862b4a3f --- /dev/null +++ b/.github/workflows/source_clear_cron.yml @@ -0,0 +1,16 @@ +name: Source clear + +on: + schedule: + # Runs "weekly" + - cron: '0 0 * * 0' + +jobs: + source_clear: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - name: Source clear scan + env: + SRCCLR_API_TOKEN: ${{ secrets.SRCCLR_API_TOKEN }} + run: curl -sSL https://download.sourceclear.com/ci.sh | bash -s – scan diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index dc008188..00000000 --- a/.travis.yml +++ /dev/null @@ -1,84 +0,0 @@ -dist: focal -language: python -python: - - "pypy3.7-7.3.5" - - "3.7" - - "3.8" - - "3.9" - - "3.10.0" -before_install: "python -m pip install --upgrade pip" -install: "pip install -r requirements/core.txt;pip install -r requirements/test.txt" -script: "pytest --cov=optimizely" -after_success: - - coveralls - -# Linting and Integration tests need to run first to reset the PR build status to pending. -stages: - - 'Source Clear' - - 'Lint markdown files' - - 'Linting' - - 'Integration tests' - - 'Full stack production tests' - - 'Test' - -jobs: - include: - - stage: 'Lint markdown files' - os: linux - language: generic - install: gem install awesome_bot - script: - - find . -type f -name '*.md' -exec awesome_bot {} \; - notifications: - email: false - - - stage: 'Linting' - language: python - python: "3.9" - # flake8 version should be same as the version in requirements/test.txt - # to avoid lint errors on CI - install: "pip install flake8>=4.1.0" - script: "flake8" - after_success: travis_terminate 0 - - - &integrationtest - stage: 'Integration tests' - merge_mode: replace - env: SDK=python SDK_BRANCH=$TRAVIS_PULL_REQUEST_BRANCH - cache: false - language: minimal - install: skip - before_script: - - mkdir $HOME/travisci-tools && pushd $HOME/travisci-tools && git init && git pull https://$CI_USER_TOKEN@github.com/optimizely/travisci-tools.git && popd - script: - - $HOME/travisci-tools/trigger-script-with-status-update.sh - after_success: travis_terminate 0 - - - <<: *integrationtest - stage: 'Full stack production tests' - env: - SDK=python - SDK_BRANCH=$TRAVIS_PULL_REQUEST_BRANCH - FULLSTACK_TEST_REPO=ProdTesting - - stage: 'Test' - python: "pypy3.7-7.3.5" -# before_install: -# - pip install "cryptography>=1.3.4" - - stage: 'Test' - python: "3.7" - - stage: 'Test' - python: "3.8" - - stage: 'Test' - python: "3.9" - - stage: 'Test' - python: "3.10.0" - - - stage: 'Source Clear' - if: type = cron - addons: - srcclr: true - before_install: skip - install: skip - before_script: skip - script: skip - after_success: skip From 1545fb8d5c1f7733be2db28a7e22c6974595c07a Mon Sep 17 00:00:00 2001 From: Matjaz Pirnovar Date: Thu, 19 May 2022 11:49:18 -0700 Subject: [PATCH 03/68] fx formatting of the error log for flag key (#381) --- optimizely/optimizely.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/optimizely/optimizely.py b/optimizely/optimizely.py index 10464a72..98fd9d89 100644 --- a/optimizely/optimizely.py +++ b/optimizely/optimizely.py @@ -1009,7 +1009,7 @@ def _decide(self, user_context, key, decide_options=None): feature_flag = config.get_feature_from_key(key) if feature_flag is None: - self.logger.error("No feature flag was found for key '#{key}'.") + self.logger.error(f"No feature flag was found for key '{key}'.") reasons.append(OptimizelyDecisionMessage.FLAG_KEY_INVALID.format(key)) return OptimizelyDecision(flag_key=key, user_context=user_context, reasons=reasons) From eee3aa0b7d6398d69948332a126f158259278d51 Mon Sep 17 00:00:00 2001 From: Andy Leap <104936100+andrewleap-optimizely@users.noreply.github.com> Date: Wed, 1 Jun 2022 15:39:07 -0400 Subject: [PATCH 04/68] fix: tests that utilize threading from failing with pypy (#383) * fix pypy error with thread safe call counter * fix polling thread running after test completion * fix thread interfering with fetch_datafile --- tests/test_config_manager.py | 171 ++++++++++++++++++++++++++++++----- tests/test_user_context.py | 34 +++++-- 2 files changed, 174 insertions(+), 31 deletions(-) diff --git a/tests/test_config_manager.py b/tests/test_config_manager.py index 272e2f92..13f22019 100644 --- a/tests/test_config_manager.py +++ b/tests/test_config_manager.py @@ -1,4 +1,4 @@ -# Copyright 2019-2021, Optimizely +# Copyright 2019-2022, Optimizely # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -218,6 +218,38 @@ def test_get_config_blocks(self): self.assertEqual(1, round(end_time - start_time)) +class MockPollingConfigManager(config_manager.PollingConfigManager): + ''' Wrapper class to allow manual call of fetch_datafile in the polling thread by + overriding the _run method.''' + def __init__(self, *args, **kwargs): + self.run = False + self.stop = False + super().__init__(*args, **kwargs) + + def _run(self): + '''Parent thread can use self.run to start fetch_datafile in polling thread and wait for it to complete.''' + while self.is_running and not self.stop: + if self.run: + self.fetch_datafile() + self.run = False + + +class MockAuthDatafilePollingConfigManager(config_manager.AuthDatafilePollingConfigManager): + ''' Wrapper class to allow manual call of fetch_datafile in the polling thread by + overriding the _run method.''' + def __init__(self, *args, **kwargs): + self.run = False + self.stop = False + super().__init__(*args, **kwargs) + + def _run(self): + '''Parent thread can use self.run to start fetch_datafile and wait for it to complete.''' + while self.is_running and not self.stop: + if self.run: + self.fetch_datafile() + self.run = False + + @mock.patch('requests.get') class PollingConfigManagerTest(base.BaseTest): def test_init__no_sdk_key_no_url__fails(self, _): @@ -294,9 +326,13 @@ def test_get_datafile_url__sdk_key_and_url_and_template_provided(self, _): def test_set_update_interval(self, _): """ Test set_update_interval with different inputs. """ - with mock.patch('optimizely.config_manager.PollingConfigManager.fetch_datafile'): + + # prevent polling thread from starting in PollingConfigManager.__init__ + # otherwise it can outlive this test and get out of sync with pytest + with mock.patch('threading.Thread.start') as mock_thread: project_config_manager = config_manager.PollingConfigManager(sdk_key='some_key') + mock_thread.assert_called_once() # Assert that if invalid update_interval is set, then exception is raised. with self.assertRaisesRegex( optimizely_exceptions.InvalidInputException, 'Invalid update_interval "invalid interval" provided.', @@ -321,9 +357,13 @@ def test_set_update_interval(self, _): def test_set_blocking_timeout(self, _): """ Test set_blocking_timeout with different inputs. """ - with mock.patch('optimizely.config_manager.PollingConfigManager.fetch_datafile'): + + # prevent polling thread from starting in PollingConfigManager.__init__ + # otherwise it can outlive this test and get out of sync with pytest + with mock.patch('threading.Thread.start') as mock_thread: project_config_manager = config_manager.PollingConfigManager(sdk_key='some_key') + mock_thread.assert_called_once() # Assert that if invalid blocking_timeout is set, then exception is raised. with self.assertRaisesRegex( optimizely_exceptions.InvalidInputException, 'Invalid blocking timeout "invalid timeout" provided.', @@ -352,9 +392,13 @@ def test_set_blocking_timeout(self, _): def test_set_last_modified(self, _): """ Test that set_last_modified sets last_modified field based on header. """ - with mock.patch('optimizely.config_manager.PollingConfigManager.fetch_datafile'): + + # prevent polling thread from starting in PollingConfigManager.__init__ + # otherwise it can outlive this test and get out of sync with pytest + with mock.patch('threading.Thread.start') as mock_thread: project_config_manager = config_manager.PollingConfigManager(sdk_key='some_key') + mock_thread.assert_called_once() last_modified_time = 'Test Last Modified Time' test_response_headers = { 'Last-Modified': last_modified_time, @@ -366,8 +410,11 @@ def test_set_last_modified(self, _): def test_fetch_datafile(self, _): """ Test that fetch_datafile sets config and last_modified based on response. """ sdk_key = 'some_key' - with mock.patch('optimizely.config_manager.PollingConfigManager.fetch_datafile'): - project_config_manager = config_manager.PollingConfigManager(sdk_key=sdk_key) + + # use wrapper class to control start and stop of fetch_datafile + # this prevents the polling thread from outliving the test + # and getting out of sync with pytest + project_config_manager = MockPollingConfigManager(sdk_key=sdk_key) expected_datafile_url = enums.ConfigManager.DATAFILE_URL_TEMPLATE.format(sdk_key=sdk_key) test_headers = {'Last-Modified': 'New Time'} test_datafile = json.dumps(self.config_dict_with_features) @@ -375,15 +422,28 @@ def test_fetch_datafile(self, _): test_response.status_code = 200 test_response.headers = test_headers test_response._content = test_datafile - with mock.patch('requests.get', return_value=test_response): - project_config_manager.fetch_datafile() + with mock.patch('requests.get', return_value=test_response) as mock_request: + # manually trigger fetch_datafile in the polling thread + project_config_manager.run = True + # Wait for polling thread to finish + while project_config_manager.run: + pass + mock_request.assert_called_once_with( + expected_datafile_url, + headers={}, + timeout=enums.ConfigManager.REQUEST_TIMEOUT + ) self.assertEqual(test_headers['Last-Modified'], project_config_manager.last_modified) self.assertIsInstance(project_config_manager.get_config(), project_config.ProjectConfig) # Call fetch_datafile again and assert that request to URL is with If-Modified-Since header. with mock.patch('requests.get', return_value=test_response) as mock_requests: - project_config_manager.fetch_datafile() + # manually trigger fetch_datafile in the polling thread + project_config_manager.run = True + # Wait for polling thread to finish + while project_config_manager.run: + pass mock_requests.assert_called_once_with( expected_datafile_url, @@ -394,6 +454,9 @@ def test_fetch_datafile(self, _): self.assertIsInstance(project_config_manager.get_config(), project_config.ProjectConfig) self.assertTrue(project_config_manager.is_running) + # Shut down the polling thread + project_config_manager.stop = True + def test_fetch_datafile__status_exception_raised(self, _): """ Test that config_manager keeps running if status code exception is raised when fetching datafile. """ class MockExceptionResponse(object): @@ -402,8 +465,6 @@ def raise_for_status(self): sdk_key = 'some_key' mock_logger = mock.Mock() - with mock.patch('optimizely.config_manager.PollingConfigManager.fetch_datafile'): - project_config_manager = config_manager.PollingConfigManager(sdk_key=sdk_key, logger=mock_logger) expected_datafile_url = enums.ConfigManager.DATAFILE_URL_TEMPLATE.format(sdk_key=sdk_key) test_headers = {'Last-Modified': 'New Time'} test_datafile = json.dumps(self.config_dict_with_features) @@ -411,15 +472,33 @@ def raise_for_status(self): test_response.status_code = 200 test_response.headers = test_headers test_response._content = test_datafile - with mock.patch('requests.get', return_value=test_response): - project_config_manager.fetch_datafile() + # use wrapper class to control start and stop of fetch_datafile + # this prevents the polling thread from outliving the test + # and getting out of sync with pytest + project_config_manager = MockPollingConfigManager(sdk_key=sdk_key, logger=mock_logger) + with mock.patch('requests.get', return_value=test_response) as mock_request: + # manually trigger fetch_datafile in the polling thread + project_config_manager.run = True + # Wait for polling thread to finish + while project_config_manager.run: + pass + + mock_request.assert_called_once_with( + expected_datafile_url, + headers={}, + timeout=enums.ConfigManager.REQUEST_TIMEOUT + ) self.assertEqual(test_headers['Last-Modified'], project_config_manager.last_modified) self.assertIsInstance(project_config_manager.get_config(), project_config.ProjectConfig) # Call fetch_datafile again, but raise exception this time with mock.patch('requests.get', return_value=MockExceptionResponse()) as mock_requests: - project_config_manager.fetch_datafile() + # manually trigger fetch_datafile in the polling thread + project_config_manager.run = True + # Wait for polling thread to finish + while project_config_manager.run: + pass mock_requests.assert_called_once_with( expected_datafile_url, @@ -434,12 +513,18 @@ def raise_for_status(self): # Confirm that config manager keeps running self.assertTrue(project_config_manager.is_running) + # Shut down the polling thread + project_config_manager.stop = True + def test_fetch_datafile__request_exception_raised(self, _): """ Test that config_manager keeps running if a request exception is raised when fetching datafile. """ sdk_key = 'some_key' mock_logger = mock.Mock() - with mock.patch('optimizely.config_manager.PollingConfigManager.fetch_datafile'): - project_config_manager = config_manager.PollingConfigManager(sdk_key=sdk_key, logger=mock_logger) + + # use wrapper class to control start and stop of fetch_datafile + # this prevents the polling thread from outliving the test + # and getting out of sync with pytest + project_config_manager = MockPollingConfigManager(sdk_key=sdk_key, logger=mock_logger) expected_datafile_url = enums.ConfigManager.DATAFILE_URL_TEMPLATE.format(sdk_key=sdk_key) test_headers = {'Last-Modified': 'New Time'} test_datafile = json.dumps(self.config_dict_with_features) @@ -447,9 +532,18 @@ def test_fetch_datafile__request_exception_raised(self, _): test_response.status_code = 200 test_response.headers = test_headers test_response._content = test_datafile - with mock.patch('requests.get', return_value=test_response): - project_config_manager.fetch_datafile() + with mock.patch('requests.get', return_value=test_response) as mock_request: + # manually trigger fetch_datafile in the polling thread + project_config_manager.run = True + # Wait for polling thread to finish + while project_config_manager.run: + pass + mock_request.assert_called_once_with( + expected_datafile_url, + headers={}, + timeout=enums.ConfigManager.REQUEST_TIMEOUT + ) self.assertEqual(test_headers['Last-Modified'], project_config_manager.last_modified) self.assertIsInstance(project_config_manager.get_config(), project_config.ProjectConfig) @@ -458,7 +552,11 @@ def test_fetch_datafile__request_exception_raised(self, _): 'requests.get', side_effect=requests.exceptions.RequestException('Error Error !!'), ) as mock_requests: - project_config_manager.fetch_datafile() + # manually trigger fetch_datafile in the polling thread + project_config_manager.run = True + # Wait for polling thread to finish + while project_config_manager.run: + pass mock_requests.assert_called_once_with( expected_datafile_url, @@ -473,12 +571,18 @@ def test_fetch_datafile__request_exception_raised(self, _): # Confirm that config manager keeps running self.assertTrue(project_config_manager.is_running) + # Shut down the polling thread + project_config_manager.stop = True + def test_is_running(self, _): """ Test that polling thread is running after instance of PollingConfigManager is created. """ with mock.patch('optimizely.config_manager.PollingConfigManager.fetch_datafile'): project_config_manager = config_manager.PollingConfigManager(sdk_key='some_key') self.assertTrue(project_config_manager.is_running) + # Prevent the polling thread from running fetch_datafile if it hasn't already + project_config_manager._polling_thread._is_stopped = True + @mock.patch('requests.get') class AuthDatafilePollingConfigManagerTest(base.BaseTest): @@ -495,10 +599,14 @@ def test_set_datafile_access_token(self, _): """ Test that datafile_access_token is properly set as instance variable. """ datafile_access_token = 'some_token' sdk_key = 'some_key' - with mock.patch('optimizely.config_manager.AuthDatafilePollingConfigManager.fetch_datafile'): + + # prevent polling thread from starting in PollingConfigManager.__init__ + # otherwise it can outlive this test and get out of sync with pytest + with mock.patch('threading.Thread.start') as mock_thread: project_config_manager = config_manager.AuthDatafilePollingConfigManager( datafile_access_token=datafile_access_token, sdk_key=sdk_key) + mock_thread.assert_called_once() self.assertEqual(datafile_access_token, project_config_manager.datafile_access_token) def test_fetch_datafile(self, _): @@ -538,9 +646,11 @@ def test_fetch_datafile__request_exception_raised(self, _): sdk_key = 'some_key' mock_logger = mock.Mock() - with mock.patch('optimizely.config_manager.AuthDatafilePollingConfigManager.fetch_datafile'): - project_config_manager = config_manager.AuthDatafilePollingConfigManager( - datafile_access_token=datafile_access_token, sdk_key=sdk_key, logger=mock_logger) + # use wrapper class to control start and stop of fetch_datafile + # this prevents the polling thread from outliving the test + # and getting out of sync with pytest + project_config_manager = MockAuthDatafilePollingConfigManager(datafile_access_token=datafile_access_token, + sdk_key=sdk_key, logger=mock_logger) expected_datafile_url = enums.ConfigManager.AUTHENTICATED_DATAFILE_URL_TEMPLATE.format(sdk_key=sdk_key) test_headers = {'Last-Modified': 'New Time'} test_datafile = json.dumps(self.config_dict_with_features) @@ -552,7 +662,11 @@ def test_fetch_datafile__request_exception_raised(self, _): # Call fetch_datafile and assert that request was sent with correct authorization header with mock.patch('requests.get', return_value=test_response) as mock_request: - project_config_manager.fetch_datafile() + # manually trigger fetch_datafile in the polling thread + project_config_manager.run = True + # Wait for polling thread to finish + while project_config_manager.run: + pass mock_request.assert_called_once_with( expected_datafile_url, @@ -568,7 +682,11 @@ def test_fetch_datafile__request_exception_raised(self, _): 'requests.get', side_effect=requests.exceptions.RequestException('Error Error !!'), ) as mock_requests: - project_config_manager.fetch_datafile() + # manually trigger fetch_datafile in the polling thread + project_config_manager.run = True + # Wait for polling thread to finish + while project_config_manager.run: + pass mock_requests.assert_called_once_with( expected_datafile_url, @@ -586,3 +704,6 @@ def test_fetch_datafile__request_exception_raised(self, _): self.assertIsInstance(project_config_manager.get_config(), project_config.ProjectConfig) # Confirm that config manager keeps running self.assertTrue(project_config_manager.is_running) + + # Shut down the polling thread + project_config_manager.stop = True diff --git a/tests/test_user_context.py b/tests/test_user_context.py index dc52c648..382ac999 100644 --- a/tests/test_user_context.py +++ b/tests/test_user_context.py @@ -1,4 +1,4 @@ -# Copyright 2021, Optimizely +# Copyright 2021-2022, Optimizely # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -1859,6 +1859,28 @@ def clone_loop(user_context): for x in range(100): user_context._clone() + # custom call counter because the mock call_count is not thread safe + class MockCounter: + def __init__(self): + self.lock = threading.Lock() + self.call_count = 0 + + def increment(self, *args): + with self.lock: + self.call_count += 1 + + set_forced_decision_counter = MockCounter() + get_forced_decision_counter = MockCounter() + remove_forced_decision_counter = MockCounter() + remove_all_forced_decisions_counter = MockCounter() + clone_counter = MockCounter() + + set_forced_decision_mock.side_effect = set_forced_decision_counter.increment + get_forced_decision_mock.side_effect = get_forced_decision_counter.increment + remove_forced_decision_mock.side_effect = remove_forced_decision_counter.increment + remove_all_forced_decisions_mock.side_effect = remove_all_forced_decisions_counter.increment + clone_mock.side_effect = clone_counter.increment + set_thread_1 = threading.Thread(target=set_forced_decision_loop, args=(user_context, context_1, decision_1)) set_thread_2 = threading.Thread(target=set_forced_decision_loop, args=(user_context, context_2, decision_2)) set_thread_3 = threading.Thread(target=get_forced_decision_loop, args=(user_context, context_1)) @@ -1888,8 +1910,8 @@ def clone_loop(user_context): set_thread_7.join() set_thread_8.join() - self.assertEqual(200, set_forced_decision_mock.call_count) - self.assertEqual(200, get_forced_decision_mock.call_count) - self.assertEqual(200, remove_forced_decision_mock.call_count) - self.assertEqual(100, remove_all_forced_decisions_mock.call_count) - self.assertEqual(100, clone_mock.call_count) + self.assertEqual(200, set_forced_decision_counter.call_count) + self.assertEqual(200, get_forced_decision_counter.call_count) + self.assertEqual(200, remove_forced_decision_counter.call_count) + self.assertEqual(100, remove_all_forced_decisions_counter.call_count) + self.assertEqual(100, clone_counter.call_count) From 42f66635b590064453de5ddafbc2e938856c7032 Mon Sep 17 00:00:00 2001 From: Andy Leap <104936100+andrewleap-optimizely@users.noreply.github.com> Date: Thu, 2 Jun 2022 09:30:46 -0400 Subject: [PATCH 05/68] handle datafile provided as bytes (#384) --- optimizely/project_config.py | 2 +- tests/test_config.py | 13 +++++++++++++ tests/test_optimizely_config.py | 12 ++++++++++++ 3 files changed, 26 insertions(+), 1 deletion(-) diff --git a/optimizely/project_config.py b/optimizely/project_config.py index 82da17c9..12fd1086 100644 --- a/optimizely/project_config.py +++ b/optimizely/project_config.py @@ -41,7 +41,7 @@ def __init__(self, datafile, logger, error_handler): """ config = json.loads(datafile) - self._datafile = u'{}'.format(datafile) + self._datafile = datafile.decode('utf-8') if isinstance(datafile, bytes) else datafile self.logger = logger self.error_handler = error_handler self.version = config.get('version') diff --git a/tests/test_config.py b/tests/test_config.py index 96450368..83ebb18c 100644 --- a/tests/test_config.py +++ b/tests/test_config.py @@ -1011,6 +1011,19 @@ def test_to_datafile(self): self.assertEqual(expected_datafile, actual_datafile) + def test_to_datafile_from_bytes(self): + """ Test that to_datafile returns the expected datafile when given bytes. """ + + expected_datafile = json.dumps(self.config_dict_with_features) + bytes_datafile = bytes(expected_datafile, 'utf-8') + + opt_obj = optimizely.Optimizely(bytes_datafile) + project_config = opt_obj.config_manager.get_config() + + actual_datafile = project_config.to_datafile() + + self.assertEqual(expected_datafile, actual_datafile) + class ConfigLoggingTest(base.BaseTest): def setUp(self): diff --git a/tests/test_optimizely_config.py b/tests/test_optimizely_config.py index c37a8434..640100d7 100644 --- a/tests/test_optimizely_config.py +++ b/tests/test_optimizely_config.py @@ -1525,6 +1525,18 @@ def test__get_datafile(self): self.assertEqual(expected_datafile, actual_datafile) + def test__get_datafile_from_bytes(self): + """ Test that get_datafile returns the expected datafile when provided as bytes. """ + + expected_datafile = json.dumps(self.config_dict_with_features) + bytes_datafile = bytes(expected_datafile, 'utf-8') + + opt_instance = optimizely.Optimizely(bytes_datafile) + opt_config = opt_instance.config_manager.optimizely_config + actual_datafile = opt_config.get_datafile() + + self.assertEqual(expected_datafile, actual_datafile) + def test__get_sdk_key(self): """ Test that get_sdk_key returns the expected value. """ From 3b1a62210f929960118f3dba3a730ecc11ca95ac Mon Sep 17 00:00:00 2001 From: Andy Leap <104936100+andrewleap-optimizely@users.noreply.github.com> Date: Fri, 3 Jun 2022 10:21:53 -0400 Subject: [PATCH 06/68] refactor: remove unnecessary python 2 deps/syntax (#385) * python version update * convert to f-strings * remove redundant super params * swap deprecated threading method * remove unnecessary py2 deps * fix abstract class * remove py2 wrapper func/format * remove py2 unittest patch * remove redundant inherit object * fix event queue test timing issue --- optimizely/bucketer.py | 20 +- optimizely/config_manager.py | 48 ++--- .../decision/optimizely_decide_option.py | 2 +- optimizely/decision/optimizely_decision.py | 2 +- .../decision/optimizely_decision_message.py | 2 +- optimizely/decision_service.py | 94 ++++----- optimizely/entities.py | 6 +- optimizely/error_handler.py | 2 +- optimizely/event/event_factory.py | 2 +- optimizely/event/event_processor.py | 28 ++- optimizely/event/log_event.py | 4 +- optimizely/event/payload.py | 14 +- optimizely/event/user_event.py | 8 +- optimizely/event/user_event_factory.py | 2 +- optimizely/event_builder.py | 6 +- optimizely/event_dispatcher.py | 4 +- optimizely/helpers/condition.py | 36 ++-- optimizely/helpers/enums.py | 26 +-- optimizely/helpers/event_tag_utils.py | 11 +- optimizely/helpers/validator.py | 9 +- optimizely/lib/pymmh3.py | 34 +-- optimizely/logger.py | 4 +- optimizely/notification_center.py | 10 +- optimizely/optimizely.py | 72 ++++--- optimizely/optimizely_config.py | 18 +- optimizely/optimizely_factory.py | 2 +- optimizely/optimizely_user_context.py | 6 +- optimizely/project_config.py | 46 +++-- optimizely/user_profile.py | 4 +- requirements/core.txt | 1 - requirements/test.txt | 1 - setup.py | 6 +- tests/base.py | 12 +- tests/helpers_tests/test_audience.py | 22 +- tests/helpers_tests/test_condition.py | 193 +++++++----------- .../test_condition_tree_evaluator.py | 2 +- tests/helpers_tests/test_event_tag_utils.py | 16 +- tests/helpers_tests/test_experiment.py | 2 +- tests/helpers_tests/test_validator.py | 12 +- tests/test_bucketing.py | 2 +- tests/test_config.py | 2 +- tests/test_config_manager.py | 39 ++-- tests/test_decision_service.py | 2 +- tests/test_event_builder.py | 2 +- tests/test_event_dispatcher.py | 2 +- tests/test_event_factory.py | 2 +- tests/test_event_processor.py | 82 ++++++-- tests/test_logger.py | 8 +- tests/test_notification_center.py | 4 +- tests/test_optimizely.py | 41 ++-- tests/test_optimizely_factory.py | 2 +- tests/test_user_context.py | 2 +- tests/testapp/user_profile_service.py | 2 +- 53 files changed, 455 insertions(+), 526 deletions(-) diff --git a/optimizely/bucketer.py b/optimizely/bucketer.py index dcfec3ea..24ecf266 100644 --- a/optimizely/bucketer.py +++ b/optimizely/bucketer.py @@ -24,7 +24,7 @@ GROUP_POLICIES = ['random'] -class Bucketer(object): +class Bucketer: """ Optimizely bucketing algorithm that evenly distributes visitors. """ def __init__(self): @@ -72,9 +72,8 @@ def find_bucket(self, project_config, bucketing_id, parent_id, traffic_allocatio """ bucketing_key = BUCKETING_ID_TEMPLATE.format(bucketing_id=bucketing_id, parent_id=parent_id) bucketing_number = self._generate_bucket_value(bucketing_key) - message = 'Assigned bucket %s to user with bucketing ID "%s".' % (bucketing_number, bucketing_id) project_config.logger.debug( - message + f'Assigned bucket {bucketing_number} to user with bucketing ID "{bucketing_id}".' ) for traffic_allocation in traffic_allocations: @@ -115,24 +114,19 @@ def bucket(self, project_config, experiment, user_id, bucketing_id): ) if not user_experiment_id: - message = 'User "%s" is in no experiment.' % user_id + message = f'User "{user_id}" is in no experiment.' project_config.logger.info(message) decide_reasons.append(message) return None, decide_reasons if user_experiment_id != experiment.id: - message = 'User "%s" is not in experiment "%s" of group %s.' \ - % (user_id, experiment.key, experiment.groupId) - project_config.logger.info( - message - ) + message = f'User "{user_id}" is not in experiment "{experiment.key}" of group {experiment.groupId}.' + project_config.logger.info(message) decide_reasons.append(message) return None, decide_reasons - message = 'User "%s" is in experiment %s of group %s.' % (user_id, experiment.key, experiment.groupId) - project_config.logger.info( - message - ) + message = f'User "{user_id}" is in experiment {experiment.key} of group {experiment.groupId}.' + project_config.logger.info(message) decide_reasons.append(message) # Bucket user if not in white-list and in group (if any) diff --git a/optimizely/config_manager.py b/optimizely/config_manager.py index b0f959bf..5ef8a530 100644 --- a/optimizely/config_manager.py +++ b/optimizely/config_manager.py @@ -11,7 +11,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import abc +from abc import ABC, abstractmethod import numbers import requests import threading @@ -28,8 +28,6 @@ from .helpers import validator from .optimizely_config import OptimizelyConfigService -ABC = abc.ABCMeta('ABC', (object,), {'__slots__': ()}) - class BaseConfigManager(ABC): """ Base class for Optimizely's config manager. """ @@ -62,7 +60,7 @@ def _validate_instantiation_options(self): if not validator.is_notification_center_valid(self.notification_center): raise optimizely_exceptions.InvalidInputException(enums.Errors.INVALID_INPUT.format('notification_center')) - @abc.abstractmethod + @abstractmethod def get_config(self): """ Get config for use by optimizely.Optimizely. The config should be an instance of project_config.ProjectConfig.""" @@ -86,7 +84,7 @@ def __init__( validation upon object invocation. By default JSON schema validation will be performed. """ - super(StaticConfigManager, self).__init__( + super().__init__( logger=logger, error_handler=error_handler, notification_center=notification_center, ) self._config = None @@ -134,7 +132,7 @@ def _set_config(self, datafile): self.notification_center.send_notifications(enums.NotificationTypes.OPTIMIZELY_CONFIG_UPDATE) self.logger.debug( 'Received new datafile and updated config. ' - 'Old revision number: {}. New revision number: {}.'.format(previous_revision, config.get_revision()) + f'Old revision number: {previous_revision}. New revision number: {config.get_revision()}.' ) def get_config(self): @@ -186,7 +184,7 @@ def __init__( """ self._config_ready_event = threading.Event() - super(PollingConfigManager, self).__init__( + super().__init__( datafile=datafile, logger=logger, error_handler=error_handler, @@ -200,7 +198,7 @@ def __init__( self.set_blocking_timeout(blocking_timeout) self.last_modified = None self._polling_thread = threading.Thread(target=self._run) - self._polling_thread.setDaemon(True) + self._polling_thread.daemon = True self._polling_thread.start() @staticmethod @@ -231,7 +229,7 @@ def get_datafile_url(sdk_key, url, url_template): return url_template.format(sdk_key=sdk_key) except (AttributeError, KeyError): raise optimizely_exceptions.InvalidInputException( - 'Invalid url_template {} provided.'.format(url_template) + f'Invalid url_template {url_template} provided.' ) return url @@ -243,7 +241,7 @@ def _set_config(self, datafile): datafile: JSON string representing the Optimizely project. """ if datafile or self._config_ready_event.is_set(): - super(PollingConfigManager, self)._set_config(datafile=datafile) + super()._set_config(datafile=datafile) self._config_ready_event.set() def get_config(self): @@ -265,19 +263,18 @@ def set_update_interval(self, update_interval): """ if update_interval is None: update_interval = enums.ConfigManager.DEFAULT_UPDATE_INTERVAL - self.logger.debug('Setting config update interval to default value {}.'.format(update_interval)) + self.logger.debug(f'Setting config update interval to default value {update_interval}.') if not isinstance(update_interval, (int, float)): raise optimizely_exceptions.InvalidInputException( - 'Invalid update_interval "{}" provided.'.format(update_interval) + f'Invalid update_interval "{update_interval}" provided.' ) # If polling interval is less than or equal to 0 then set it to default update interval. if update_interval <= 0: self.logger.debug( - 'update_interval value {} too small. Defaulting to {}'.format( - update_interval, enums.ConfigManager.DEFAULT_UPDATE_INTERVAL - ) + f'update_interval value {update_interval} too small. ' + f'Defaulting to {enums.ConfigManager.DEFAULT_UPDATE_INTERVAL}' ) update_interval = enums.ConfigManager.DEFAULT_UPDATE_INTERVAL @@ -291,19 +288,18 @@ def set_blocking_timeout(self, blocking_timeout): """ if blocking_timeout is None: blocking_timeout = enums.ConfigManager.DEFAULT_BLOCKING_TIMEOUT - self.logger.debug('Setting config blocking timeout to default value {}.'.format(blocking_timeout)) + self.logger.debug(f'Setting config blocking timeout to default value {blocking_timeout}.') if not isinstance(blocking_timeout, (numbers.Integral, float)): raise optimizely_exceptions.InvalidInputException( - 'Invalid blocking timeout "{}" provided.'.format(blocking_timeout) + f'Invalid blocking timeout "{blocking_timeout}" provided.' ) # If blocking timeout is less than 0 then set it to default blocking timeout. if blocking_timeout < 0: self.logger.debug( - 'blocking timeout value {} too small. Defaulting to {}'.format( - blocking_timeout, enums.ConfigManager.DEFAULT_BLOCKING_TIMEOUT - ) + f'blocking timeout value {blocking_timeout} too small. ' + f'Defaulting to {enums.ConfigManager.DEFAULT_BLOCKING_TIMEOUT}' ) blocking_timeout = enums.ConfigManager.DEFAULT_BLOCKING_TIMEOUT @@ -326,12 +322,12 @@ def _handle_response(self, response): try: response.raise_for_status() except requests_exceptions.RequestException as err: - self.logger.error('Fetching datafile from {} failed. Error: {}'.format(self.datafile_url, str(err))) + self.logger.error(f'Fetching datafile from {self.datafile_url} failed. Error: {err}') return # Leave datafile and config unchanged if it has not been modified. if response.status_code == http_status_codes.not_modified: - self.logger.debug('Not updating config as datafile has not updated since {}.'.format(self.last_modified)) + self.logger.debug(f'Not updating config as datafile has not updated since {self.last_modified}.') return self.set_last_modified(response.headers) @@ -349,7 +345,7 @@ def fetch_datafile(self): self.datafile_url, headers=request_headers, timeout=enums.ConfigManager.REQUEST_TIMEOUT, ) except requests_exceptions.RequestException as err: - self.logger.error('Fetching datafile from {} failed. Error: {}'.format(self.datafile_url, str(err))) + self.logger.error(f'Fetching datafile from {self.datafile_url} failed. Error: {err}') return self._handle_response(response) @@ -367,7 +363,7 @@ def _run(self): time.sleep(self.update_interval) except (OSError, OverflowError) as err: self.logger.error( - 'Error in time.sleep. ' 'Provided update_interval value may be too big. Error: {}'.format(str(err)) + f'Error in time.sleep. Provided update_interval value may be too big. Error: {err}' ) raise @@ -396,7 +392,7 @@ def __init__( **kwargs: Refer to keyword arguments descriptions in PollingConfigManager. """ self._set_datafile_access_token(datafile_access_token) - super(AuthDatafilePollingConfigManager, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) def _set_datafile_access_token(self, datafile_access_token): """ Checks for valid access token input and sets it. """ @@ -421,7 +417,7 @@ def fetch_datafile(self): self.datafile_url, headers=request_headers, timeout=enums.ConfigManager.REQUEST_TIMEOUT, ) except requests_exceptions.RequestException as err: - self.logger.error('Fetching datafile from {} failed. Error: {}'.format(self.datafile_url, str(err))) + self.logger.error(f'Fetching datafile from {self.datafile_url} failed. Error: {err}') return self._handle_response(response) diff --git a/optimizely/decision/optimizely_decide_option.py b/optimizely/decision/optimizely_decide_option.py index 4eb8e7e5..e409befa 100644 --- a/optimizely/decision/optimizely_decide_option.py +++ b/optimizely/decision/optimizely_decide_option.py @@ -12,7 +12,7 @@ # limitations under the License. -class OptimizelyDecideOption(object): +class OptimizelyDecideOption: DISABLE_DECISION_EVENT = 'DISABLE_DECISION_EVENT' ENABLED_FLAGS_ONLY = 'ENABLED_FLAGS_ONLY' IGNORE_USER_PROFILE_SERVICE = 'IGNORE_USER_PROFILE_SERVICE' diff --git a/optimizely/decision/optimizely_decision.py b/optimizely/decision/optimizely_decision.py index 781ab2bb..cbca9558 100644 --- a/optimizely/decision/optimizely_decision.py +++ b/optimizely/decision/optimizely_decision.py @@ -12,7 +12,7 @@ # limitations under the License. -class OptimizelyDecision(object): +class OptimizelyDecision: def __init__(self, variation_key=None, enabled=None, variables=None, rule_key=None, flag_key=None, user_context=None, reasons=None): self.variation_key = variation_key diff --git a/optimizely/decision/optimizely_decision_message.py b/optimizely/decision/optimizely_decision_message.py index 5b1ab417..0c038196 100644 --- a/optimizely/decision/optimizely_decision_message.py +++ b/optimizely/decision/optimizely_decision_message.py @@ -12,7 +12,7 @@ # limitations under the License. -class OptimizelyDecisionMessage(object): +class OptimizelyDecisionMessage: SDK_NOT_READY = 'Optimizely SDK not configured properly yet.' FLAG_KEY_INVALID = 'No flag was found for key "{}".' VARIABLE_VALUE_INVALID = 'Variable value for key "{}" is invalid or wrong type.' diff --git a/optimizely/decision_service.py b/optimizely/decision_service.py index 3aff4719..f7e07cae 100644 --- a/optimizely/decision_service.py +++ b/optimizely/decision_service.py @@ -13,8 +13,6 @@ from collections import namedtuple -from six import string_types - from . import bucketer from .decision.optimizely_decide_option import OptimizelyDecideOption from .helpers import audience as audience_helper @@ -27,7 +25,7 @@ Decision = namedtuple('Decision', 'experiment variation source') -class DecisionService(object): +class DecisionService: """ Class encapsulating all decision related capabilities. """ def __init__(self, logger, user_profile_service): @@ -57,7 +55,7 @@ def _get_bucketing_id(self, user_id, attributes): bucketing_id = attributes.get(enums.ControlAttributes.BUCKETING_ID) if bucketing_id is not None: - if isinstance(bucketing_id, string_types): + if isinstance(bucketing_id, str): return bucketing_id, decide_reasons message = 'Bucketing ID attribute is not a string. Defaulted to user_id.' self.logger.warning(message) @@ -89,16 +87,15 @@ def set_forced_variation(self, project_config, experiment_key, user_id, variatio if experiment_id in experiment_to_variation_map: del self.forced_variation_map[user_id][experiment_id] self.logger.debug( - 'Variation mapped to experiment "%s" has been removed for user "%s".' - % (experiment_key, user_id) + f'Variation mapped to experiment "{experiment_key}" has been removed for user "{user_id}".' ) else: self.logger.debug( - 'Nothing to remove. Variation mapped to experiment "%s" for user "%s" does not exist.' - % (experiment_key, user_id) + f'Nothing to remove. Variation mapped to experiment "{experiment_key}" for ' + f'user "{user_id}" does not exist.' ) else: - self.logger.debug('Nothing to remove. User "%s" does not exist in the forced variation map.' % user_id) + self.logger.debug(f'Nothing to remove. User "{user_id}" does not exist in the forced variation map.') return True if not validator.is_non_empty_string(variation_key): @@ -118,8 +115,8 @@ def set_forced_variation(self, project_config, experiment_key, user_id, variatio self.forced_variation_map[user_id][experiment_id] = variation_id self.logger.debug( - 'Set variation "%s" for experiment "%s" and user "%s" in the forced variation map.' - % (variation_id, experiment_id, user_id) + f'Set variation "{variation_id}" for experiment "{experiment_id}" and ' + f'user "{user_id}" in the forced variation map.' ) return True @@ -137,7 +134,7 @@ def get_forced_variation(self, project_config, experiment_key, user_id): """ decide_reasons = [] if user_id not in self.forced_variation_map: - message = 'User "%s" is not in the forced variation map.' % user_id + message = f'User "{user_id}" is not in the forced variation map.' self.logger.debug(message) return None, decide_reasons @@ -149,24 +146,20 @@ def get_forced_variation(self, project_config, experiment_key, user_id): experiment_to_variation_map = self.forced_variation_map.get(user_id) if not experiment_to_variation_map: - message = 'No experiment "%s" mapped to user "%s" in the forced variation map.' % (experiment_key, user_id) - self.logger.debug( - message - ) + message = f'No experiment "{experiment_key}" mapped to user "{user_id}" in the forced variation map.' + self.logger.debug(message) return None, decide_reasons variation_id = experiment_to_variation_map.get(experiment.id) if variation_id is None: - message = 'No variation mapped to experiment "%s" in the forced variation map.' % experiment_key + message = f'No variation mapped to experiment "{experiment_key}" in the forced variation map.' self.logger.debug(message) return None, decide_reasons variation = project_config.get_variation_from_id(experiment_key, variation_id) - message = 'Variation "%s" is mapped to experiment "%s" and user "%s" in the forced variation map' \ - % (variation.key, experiment_key, user_id) - self.logger.debug( - message - ) + message = f'Variation "{variation.key}" is mapped to experiment "{experiment_key}" and ' \ + f'user "{user_id}" in the forced variation map' + self.logger.debug(message) decide_reasons.append(message) return variation, decide_reasons @@ -191,7 +184,7 @@ def get_whitelisted_variation(self, project_config, experiment, user_id): forced_variation = project_config.get_variation_from_key(experiment.key, forced_variation_key) if forced_variation: - message = 'User "%s" is forced in variation "%s".' % (user_id, forced_variation_key) + message = f'User "{user_id}" is forced in variation "{forced_variation_key}".' self.logger.info(message) decide_reasons.append(message) @@ -216,11 +209,9 @@ def get_stored_variation(self, project_config, experiment, user_profile): if variation_id: variation = project_config.get_variation_from_id(experiment.key, variation_id) if variation: - message = 'Found a stored decision. User "%s" is in variation "%s" of experiment "%s".' \ - % (user_id, variation.key, experiment.key) - self.logger.info( - message - ) + message = f'Found a stored decision. User "{user_id}" is in ' \ + f'variation "{variation.key}" of experiment "{experiment.key}".' + self.logger.info(message) return variation return None @@ -255,7 +246,7 @@ def get_variation(self, project_config, experiment, user_context, options=None): decide_reasons = [] # Check if experiment is running if not experiment_helper.is_experiment_running(experiment): - message = 'Experiment "%s" is not running.' % experiment.key + message = f'Experiment "{experiment.key}" is not running.' self.logger.info(message) decide_reasons.append(message) return None, decide_reasons @@ -278,15 +269,15 @@ def get_variation(self, project_config, experiment, user_context, options=None): try: retrieved_profile = self.user_profile_service.lookup(user_id) except: - self.logger.exception('Unable to retrieve user profile for user "{}" as lookup failed.'.format(user_id)) + self.logger.exception(f'Unable to retrieve user profile for user "{user_id}" as lookup failed.') retrieved_profile = None if validator.is_user_profile_valid(retrieved_profile): user_profile = UserProfile(**retrieved_profile) variation = self.get_stored_variation(project_config, experiment, user_profile) if variation: - message = 'Returning previously activated variation ID "{}" of experiment ' \ - '"{}" for user "{}" from user profile.'.format(variation, experiment, user_id) + message = f'Returning previously activated variation ID "{variation}" of experiment ' \ + f'"{experiment}" for user "{user_id}" from user profile.' self.logger.info(message) decide_reasons.append(message) return variation, decide_reasons @@ -302,10 +293,8 @@ def get_variation(self, project_config, experiment, user_context, options=None): attributes, self.logger) decide_reasons += reasons_received if not user_meets_audience_conditions: - message = 'User "{}" does not meet conditions to be in experiment "{}".'.format(user_id, experiment.key) - self.logger.info( - message - ) + message = f'User "{user_id}" does not meet conditions to be in experiment "{experiment.key}".' + self.logger.info(message) decide_reasons.append(message) return None, decide_reasons @@ -315,10 +304,8 @@ def get_variation(self, project_config, experiment, user_context, options=None): variation, bucket_reasons = self.bucketer.bucket(project_config, experiment, user_id, bucketing_id) decide_reasons += bucket_reasons if variation: - message = 'User "%s" is in variation "%s" of experiment %s.' % (user_id, variation.key, experiment.key) - self.logger.info( - message - ) + message = f'User "{user_id}" is in variation "{variation.key}" of experiment {experiment.key}.' + self.logger.info(message) decide_reasons.append(message) # Store this new decision and return the variation for the user if not ignore_user_profile and self.user_profile_service: @@ -326,9 +313,9 @@ def get_variation(self, project_config, experiment, user_context, options=None): user_profile.save_variation_for_experiment(experiment.id, variation.id) self.user_profile_service.save(user_profile.__dict__) except: - self.logger.exception('Unable to save user profile for user "{}".'.format(user_id)) + self.logger.exception(f'Unable to save user profile for user "{user_id}".') return variation, decide_reasons - message = 'User "%s" is in no variation.' % user_id + message = f'User "{user_id}" is in no variation.' self.logger.info(message) decide_reasons.append(message) return None, decide_reasons @@ -358,7 +345,7 @@ def get_variation_for_rollout(self, project_config, feature, user): rollout = project_config.get_rollout_from_id(feature.rolloutId) if not rollout: - message = 'There is no rollout of feature {}.'.format(feature.key) + message = f'There is no rollout of feature {feature.key}.' self.logger.debug(message) decide_reasons.append(message) return Decision(None, None, enums.DecisionSources.ROLLOUT), decide_reasons @@ -366,7 +353,7 @@ def get_variation_for_rollout(self, project_config, feature, user): rollout_rules = project_config.get_rollout_experiments(rollout) if not rollout_rules: - message = 'Rollout {} has no experiments.'.format(rollout.id) + message = f'Rollout {rollout.id} has no experiments.' self.logger.debug(message) decide_reasons.append(message) return Decision(None, None, enums.DecisionSources.ROLLOUT), decide_reasons @@ -402,7 +389,7 @@ def get_variation_for_rollout(self, project_config, feature, user): decide_reasons += reasons_received_audience if audience_decision_response: - message = 'User "{}" meets audience conditions for targeting rule {}.'.format(user_id, logging_key) + message = f'User "{user_id}" meets audience conditions for targeting rule {logging_key}.' self.logger.debug(message) decide_reasons.append(message) @@ -411,7 +398,7 @@ def get_variation_for_rollout(self, project_config, feature, user): decide_reasons.extend(bucket_reasons) if bucketed_variation: - message = 'User "{}" bucketed into a targeting rule {}.'.format(user_id, logging_key) + message = f'User "{user_id}" bucketed into a targeting rule {logging_key}.' self.logger.debug(message) decide_reasons.append(message) return Decision(experiment=rule, variation=bucketed_variation, @@ -419,8 +406,8 @@ def get_variation_for_rollout(self, project_config, feature, user): elif not everyone_else: # skip this logging for EveryoneElse since this has a message not for everyone_else - message = 'User "{}" not bucketed into a targeting rule {}. ' \ - 'Checking "Everyone Else" rule now.'.format(user_id, logging_key) + message = f'User "{user_id}" not bucketed into a targeting rule {logging_key}. ' \ + 'Checking "Everyone Else" rule now.' self.logger.debug(message) decide_reasons.append(message) @@ -428,8 +415,7 @@ def get_variation_for_rollout(self, project_config, feature, user): skip_to_everyone_else = True else: - message = 'User "{}" does not meet audience conditions for targeting rule {}.'.format( - user_id, logging_key) + message = f'User "{user_id}" does not meet audience conditions for targeting rule {logging_key}.' self.logger.debug(message) decide_reasons.append(message) @@ -476,14 +462,14 @@ def get_variation_for_feature(self, project_config, feature, user_context, optio decide_reasons += variation_reasons if decision_variation: - message = 'User "{}" bucketed into a experiment "{}" of feature "{}".'.format( - user_context.user_id, experiment.key, feature.key) + message = f'User "{user_context.user_id}" bucketed into a ' \ + f'experiment "{experiment.key}" of feature "{feature.key}".' self.logger.debug(message) return Decision(experiment, decision_variation, enums.DecisionSources.FEATURE_TEST), decide_reasons - message = 'User "{}" is not bucketed into any of the experiments on the feature "{}".'.format( - user_context.user_id, feature.key) + message = f'User "{user_context.user_id}" is not bucketed into any of the ' \ + f'experiments on the feature "{feature.key}".' self.logger.debug(message) variation, rollout_variation_reasons = self.get_variation_for_rollout(project_config, feature, user_context) if rollout_variation_reasons: diff --git a/optimizely/entities.py b/optimizely/entities.py index 15576568..483610e9 100644 --- a/optimizely/entities.py +++ b/optimizely/entities.py @@ -12,7 +12,7 @@ # limitations under the License. -class BaseEntity(object): +class BaseEntity: def __eq__(self, other): return self.__dict__ == other.__dict__ @@ -118,7 +118,7 @@ def __init__(self, id, experiments, **kwargs): class Variable(BaseEntity): - class Type(object): + class Type: BOOLEAN = 'boolean' DOUBLE = 'double' INTEGER = 'integer' @@ -134,7 +134,7 @@ def __init__(self, id, key, type, defaultValue, **kwargs): class Variation(BaseEntity): class VariableUsage(BaseEntity): - def __init__(self, id, value, **kwards): + def __init__(self, id, value, **kwargs): self.id = id self.value = value diff --git a/optimizely/error_handler.py b/optimizely/error_handler.py index ed88625e..8fe631f3 100644 --- a/optimizely/error_handler.py +++ b/optimizely/error_handler.py @@ -12,7 +12,7 @@ # limitations under the License. -class BaseErrorHandler(object): +class BaseErrorHandler: """ Class encapsulating exception handling functionality. Override with your own exception handler providing handle_error method. """ diff --git a/optimizely/event/event_factory.py b/optimizely/event/event_factory.py index 54155358..237bdbe9 100644 --- a/optimizely/event/event_factory.py +++ b/optimizely/event/event_factory.py @@ -21,7 +21,7 @@ CUSTOM_ATTRIBUTE_FEATURE_TYPE = 'custom' -class EventFactory(object): +class EventFactory: """ EventFactory builds LogEvent object from a given UserEvent. This class serves to separate concerns between events in the SDK and the API used to record the events via the Optimizely Events API ("https://developers.optimizely.com/x/events/api/index.html") diff --git a/optimizely/event/event_processor.py b/optimizely/event/event_processor.py index f6dfa312..eb71287d 100644 --- a/optimizely/event/event_processor.py +++ b/optimizely/event/event_processor.py @@ -11,13 +11,13 @@ # See the License for the specific language governing permissions and # limitations under the License. -import abc +from abc import ABC, abstractmethod import numbers import threading import time from datetime import timedelta -from six.moves import queue +import queue from optimizely import logger as _logging from optimizely import notification_center as _notification_center @@ -27,13 +27,11 @@ from .event_factory import EventFactory from .user_event import UserEvent -ABC = abc.ABCMeta('ABC', (object,), {'__slots__': ()}) - class BaseEventProcessor(ABC): """ Class encapsulating event processing. Override with your own implementation. """ - @abc.abstractmethod + @abstractmethod def process(self, user_event): """ Method to provide intermediary processing stage within event production. Args: @@ -145,7 +143,7 @@ def _validate_instantiation_props(self, prop, prop_name, default_value): is_valid = False if is_valid is False: - self.logger.info('Using default value {} for {}.'.format(default_value, prop_name)) + self.logger.info(f'Using default value {default_value} for {prop_name}.') return is_valid @@ -171,7 +169,7 @@ def start(self): self.flushing_interval_deadline = self._get_time() + self._get_time(self.flush_interval.total_seconds()) self.executor = threading.Thread(target=self._run) - self.executor.setDaemon(True) + self.executor.daemon = True self.executor.start() def _run(self): @@ -211,7 +209,7 @@ def _run(self): self._add_to_batch(item) except Exception as exception: - self.logger.error('Uncaught exception processing buffer. Error: ' + str(exception)) + self.logger.error(f'Uncaught exception processing buffer. Error: {exception}') finally: self.logger.info('Exiting processing loop. Attempting to flush pending events.') @@ -229,7 +227,7 @@ def _flush_batch(self): self.logger.debug('Nothing to flush.') return - self.logger.debug('Flushing batch size ' + str(batch_len)) + self.logger.debug(f'Flushing batch size {batch_len}') with self.LOCK: to_process_batch = list(self._current_batch) @@ -242,7 +240,7 @@ def _flush_batch(self): try: self.event_dispatcher.dispatch_event(log_event) except Exception as e: - self.logger.error('Error dispatching event: ' + str(log_event) + ' ' + str(e)) + self.logger.error(f'Error dispatching event: {log_event} {e}') def process(self, user_event): """ Method to process the user_event by putting it in event_queue. @@ -255,14 +253,14 @@ def process(self, user_event): return self.logger.debug( - 'Received event of type {} for user {}.'.format(type(user_event).__name__, user_event.user_id) + f'Received event of type {type(user_event).__name__} for user {user_event.user_id}.' ) try: self.event_queue.put_nowait(user_event) except queue.Full: self.logger.warning( - 'Payload not accepted by the queue. Current size: {}'.format(str(self.event_queue.qsize())) + f'Payload not accepted by the queue. Current size: {self.event_queue.qsize()}' ) def _add_to_batch(self, user_event): @@ -319,7 +317,7 @@ def stop(self): self.executor.join(self.timeout_interval.total_seconds()) if self.is_running: - self.logger.error('Timeout exceeded while attempting to close for ' + str(self.timeout_interval) + ' ms.') + self.logger.error(f'Timeout exceeded while attempting to close for {self.timeout_interval} ms.') class ForwardingEventProcessor(BaseEventProcessor): @@ -356,7 +354,7 @@ def process(self, user_event): return self.logger.debug( - 'Received event of type {} for user {}.'.format(type(user_event).__name__, user_event.user_id) + f'Received event of type {type(user_event).__name__} for user {user_event.user_id}.' ) log_event = EventFactory.create_log_event(user_event, self.logger) @@ -366,4 +364,4 @@ def process(self, user_event): try: self.event_dispatcher.dispatch_event(log_event) except Exception as e: - self.logger.exception('Error dispatching event: ' + str(log_event) + ' ' + str(e)) + self.logger.exception(f'Error dispatching event: {log_event} {e}') diff --git a/optimizely/event/log_event.py b/optimizely/event/log_event.py index 1c5ce71d..2a6b8b78 100644 --- a/optimizely/event/log_event.py +++ b/optimizely/event/log_event.py @@ -12,7 +12,7 @@ # limitations under the License. -class LogEvent(object): +class LogEvent: """ Representation of an event which can be sent to Optimizely events API. """ def __init__(self, url, params, http_verb=None, headers=None): @@ -22,4 +22,4 @@ def __init__(self, url, params, http_verb=None, headers=None): self.headers = headers def __str__(self): - return str(self.__class__) + ": " + str(self.__dict__) + return f'{self.__class__}: {self.__dict__}' diff --git a/optimizely/event/payload.py b/optimizely/event/payload.py index b7e51a24..15e23db2 100644 --- a/optimizely/event/payload.py +++ b/optimizely/event/payload.py @@ -14,7 +14,7 @@ import json -class EventBatch(object): +class EventBatch: """ Class respresenting Event Batch. """ def __init__( @@ -58,7 +58,7 @@ def get_event_params(self): return json.loads(json.dumps(self.__dict__, default=lambda o: o.__dict__), object_pairs_hook=self._dict_clean,) -class Decision(object): +class Decision: """ Class respresenting Decision. """ def __init__(self, campaign_id, experiment_id, variation_id, metadata): @@ -68,7 +68,7 @@ def __init__(self, campaign_id, experiment_id, variation_id, metadata): self.metadata = metadata -class Metadata(object): +class Metadata: """ Class respresenting Metadata. """ def __init__(self, flag_key, rule_key, rule_type, variation_key, enabled): @@ -79,7 +79,7 @@ def __init__(self, flag_key, rule_key, rule_type, variation_key, enabled): self.enabled = enabled -class Snapshot(object): +class Snapshot: """ Class representing Snapshot. """ def __init__(self, events, decisions=None): @@ -87,7 +87,7 @@ def __init__(self, events, decisions=None): self.decisions = decisions -class SnapshotEvent(object): +class SnapshotEvent: """ Class representing Snapshot Event. """ def __init__(self, entity_id, uuid, key, timestamp, revenue=None, value=None, tags=None): @@ -100,7 +100,7 @@ def __init__(self, entity_id, uuid, key, timestamp, revenue=None, value=None, ta self.tags = tags -class Visitor(object): +class Visitor: """ Class representing Visitor. """ def __init__(self, snapshots, attributes, visitor_id): @@ -109,7 +109,7 @@ def __init__(self, snapshots, attributes, visitor_id): self.visitor_id = visitor_id -class VisitorAttribute(object): +class VisitorAttribute: """ Class representing Visitor Attribute. """ def __init__(self, entity_id, key, attribute_type, value): diff --git a/optimizely/event/user_event.py b/optimizely/event/user_event.py index 0c4e021a..67838410 100644 --- a/optimizely/event/user_event.py +++ b/optimizely/event/user_event.py @@ -19,7 +19,7 @@ CLIENT_NAME = 'python-sdk' -class UserEvent(object): +class UserEvent: """ Class respresenting User Event. """ def __init__(self, event_context, user_id, visitor_attributes, bot_filtering=None): @@ -44,7 +44,7 @@ def __init__( self, event_context, user_id, experiment, visitor_attributes, variation, flag_key, rule_key, rule_type, enabled, bot_filtering=None ): - super(ImpressionEvent, self).__init__(event_context, user_id, visitor_attributes, bot_filtering) + super().__init__(event_context, user_id, visitor_attributes, bot_filtering) self.experiment = experiment self.variation = variation self.flag_key = flag_key @@ -59,12 +59,12 @@ class ConversionEvent(UserEvent): def __init__( self, event_context, event, user_id, visitor_attributes, event_tags, bot_filtering=None, ): - super(ConversionEvent, self).__init__(event_context, user_id, visitor_attributes, bot_filtering) + super().__init__(event_context, user_id, visitor_attributes, bot_filtering) self.event = event self.event_tags = event_tags -class EventContext(object): +class EventContext: """ Class respresenting User Event Context. """ def __init__(self, account_id, project_id, revision, anonymize_ip): diff --git a/optimizely/event/user_event_factory.py b/optimizely/event/user_event_factory.py index fb5c70ed..75741aef 100644 --- a/optimizely/event/user_event_factory.py +++ b/optimizely/event/user_event_factory.py @@ -16,7 +16,7 @@ from optimizely.helpers import enums -class UserEventFactory(object): +class UserEventFactory: """ UserEventFactory builds impression and conversion events from a given UserEvent. """ @classmethod diff --git a/optimizely/event_builder.py b/optimizely/event_builder.py index befe2700..882f8518 100644 --- a/optimizely/event_builder.py +++ b/optimizely/event_builder.py @@ -20,7 +20,7 @@ from .helpers import validator -class Event(object): +class Event: """ Representation of an event which can be sent to the Optimizely logging endpoint. """ def __init__(self, url, params, http_verb=None, headers=None): @@ -30,7 +30,7 @@ def __init__(self, url, params, http_verb=None, headers=None): self.headers = headers -class EventBuilder(object): +class EventBuilder: """ Class which encapsulates methods to build events for tracking impressions and conversions using the new V3 event API (batch). """ @@ -38,7 +38,7 @@ class EventBuilder(object): HTTP_VERB = 'POST' HTTP_HEADERS = {'Content-Type': 'application/json'} - class EventParams(object): + class EventParams: ACCOUNT_ID = 'account_id' PROJECT_ID = 'project_id' EXPERIMENT_ID = 'experiment_id' diff --git a/optimizely/event_dispatcher.py b/optimizely/event_dispatcher.py index f21b47a1..1f922012 100644 --- a/optimizely/event_dispatcher.py +++ b/optimizely/event_dispatcher.py @@ -22,7 +22,7 @@ REQUEST_TIMEOUT = 10 -class EventDispatcher(object): +class EventDispatcher: @staticmethod def dispatch_event(event): """ Dispatch the event being represented by the Event object. @@ -40,4 +40,4 @@ def dispatch_event(event): ).raise_for_status() except request_exception.RequestException as error: - logging.error('Dispatch event failed. Error: %s' % str(error)) + logging.error(f'Dispatch event failed. Error: {error}') diff --git a/optimizely/helpers/condition.py b/optimizely/helpers/condition.py index 57ec558c..48dc00d9 100644 --- a/optimizely/helpers/condition.py +++ b/optimizely/helpers/condition.py @@ -14,22 +14,20 @@ import json import numbers -from six import string_types - from . import validator from .enums import CommonAudienceEvaluationLogs as audience_logs from .enums import Errors from .enums import VersionType -class ConditionOperatorTypes(object): +class ConditionOperatorTypes: AND = 'and' OR = 'or' NOT = 'not' operators = [AND, OR, NOT] -class ConditionMatchTypes(object): +class ConditionMatchTypes: EXACT = 'exact' EXISTS = 'exists' GREATER_THAN = 'gt' @@ -44,7 +42,7 @@ class ConditionMatchTypes(object): SUBSTRING = 'substring' -class CustomAttributeConditionEvaluator(object): +class CustomAttributeConditionEvaluator: """ Class encapsulating methods to be used in audience leaf condition evaluation. """ CUSTOM_ATTRIBUTE_CONDITION_TYPE = 'custom_attribute' @@ -83,7 +81,7 @@ def is_value_type_valid_for_exact_conditions(self, value): Boolean: True if value is a string, boolean, or number. Otherwise False. """ # No need to check for bool since bool is a subclass of int - if isinstance(value, string_types) or isinstance(value, (numbers.Integral, float)): + if isinstance(value, str) or isinstance(value, (numbers.Integral, float)): return True return False @@ -405,11 +403,11 @@ def substring_evaluator(self, index): condition_value = self.condition_data[index][1] user_value = self.attributes.get(condition_name) - if not isinstance(condition_value, string_types): + if not isinstance(condition_value, str): self.logger.warning(audience_logs.UNKNOWN_CONDITION_VALUE.format(self._get_condition_json(index),)) return None - if not isinstance(user_value, string_types): + if not isinstance(user_value, str): self.logger.warning( audience_logs.UNEXPECTED_TYPE.format(self._get_condition_json(index), type(user_value), condition_name) ) @@ -435,11 +433,11 @@ def semver_equal_evaluator(self, index): target_version = self.condition_data[index][1] user_version = self.attributes.get(condition_name) - if not isinstance(target_version, string_types): + if not isinstance(target_version, str): self.logger.warning(audience_logs.UNKNOWN_CONDITION_VALUE.format(self._get_condition_json(index), )) return None - if not isinstance(user_version, string_types): + if not isinstance(user_version, str): self.logger.warning( audience_logs.UNEXPECTED_TYPE.format( self._get_condition_json(index), type(user_version), condition_name @@ -470,11 +468,11 @@ def semver_greater_than_evaluator(self, index): target_version = self.condition_data[index][1] user_version = self.attributes.get(condition_name) - if not isinstance(target_version, string_types): + if not isinstance(target_version, str): self.logger.warning(audience_logs.UNKNOWN_CONDITION_VALUE.format(self._get_condition_json(index), )) return None - if not isinstance(user_version, string_types): + if not isinstance(user_version, str): self.logger.warning( audience_logs.UNEXPECTED_TYPE.format( self._get_condition_json(index), type(user_version), condition_name @@ -505,11 +503,11 @@ def semver_less_than_evaluator(self, index): target_version = self.condition_data[index][1] user_version = self.attributes.get(condition_name) - if not isinstance(target_version, string_types): + if not isinstance(target_version, str): self.logger.warning(audience_logs.UNKNOWN_CONDITION_VALUE.format(self._get_condition_json(index), )) return None - if not isinstance(user_version, string_types): + if not isinstance(user_version, str): self.logger.warning( audience_logs.UNEXPECTED_TYPE.format( self._get_condition_json(index), type(user_version), condition_name @@ -540,11 +538,11 @@ def semver_less_than_or_equal_evaluator(self, index): target_version = self.condition_data[index][1] user_version = self.attributes.get(condition_name) - if not isinstance(target_version, string_types): + if not isinstance(target_version, str): self.logger.warning(audience_logs.UNKNOWN_CONDITION_VALUE.format(self._get_condition_json(index), )) return None - if not isinstance(user_version, string_types): + if not isinstance(user_version, str): self.logger.warning( audience_logs.UNEXPECTED_TYPE.format( self._get_condition_json(index), type(user_version), condition_name @@ -575,11 +573,11 @@ def semver_greater_than_or_equal_evaluator(self, index): target_version = self.condition_data[index][1] user_version = self.attributes.get(condition_name) - if not isinstance(target_version, string_types): + if not isinstance(target_version, str): self.logger.warning(audience_logs.UNKNOWN_CONDITION_VALUE.format(self._get_condition_json(index), )) return None - if not isinstance(user_version, string_types): + if not isinstance(user_version, str): self.logger.warning( audience_logs.UNEXPECTED_TYPE.format( self._get_condition_json(index), type(user_version), condition_name @@ -705,7 +703,7 @@ def evaluate(self, index): return self.EVALUATORS_BY_MATCH_TYPE[condition_match](self, index) -class ConditionDecoder(object): +class ConditionDecoder: """ Class which provides an object_hook method for decoding dict objects into a list when given a condition_decoder. """ diff --git a/optimizely/helpers/enums.py b/optimizely/helpers/enums.py index aed202eb..54145f9c 100644 --- a/optimizely/helpers/enums.py +++ b/optimizely/helpers/enums.py @@ -14,7 +14,7 @@ import logging -class CommonAudienceEvaluationLogs(object): +class CommonAudienceEvaluationLogs: AUDIENCE_EVALUATION_RESULT = 'Audience "{}" evaluated to {}.' EVALUATING_AUDIENCE = 'Starting to evaluate audience "{}" with conditions: {}.' INFINITE_ATTRIBUTE_VALUE = ( @@ -56,7 +56,7 @@ class RolloutRuleAudienceEvaluationLogs(CommonAudienceEvaluationLogs): EVALUATING_AUDIENCES_COMBINED = 'Evaluating audiences for rule {}: {}.' -class ConfigManager(object): +class ConfigManager: AUTHENTICATED_DATAFILE_URL_TEMPLATE = 'https://config.optimizely.com/datafiles/auth/{sdk_key}.json' AUTHORIZATION_HEADER_DATA_TEMPLATE = 'Bearer {datafile_access_token}' DATAFILE_URL_TEMPLATE = 'https://cdn.optimizely.com/datafiles/{sdk_key}.json' @@ -68,19 +68,19 @@ class ConfigManager(object): REQUEST_TIMEOUT = 10 -class ControlAttributes(object): +class ControlAttributes: BOT_FILTERING = '$opt_bot_filtering' BUCKETING_ID = '$opt_bucketing_id' USER_AGENT = '$opt_user_agent' -class DatafileVersions(object): +class DatafileVersions: V2 = '2' V3 = '3' V4 = '4' -class DecisionNotificationTypes(object): +class DecisionNotificationTypes: AB_TEST = 'ab-test' ALL_FEATURE_VARIABLES = 'all-feature-variables' FEATURE = 'feature' @@ -89,13 +89,13 @@ class DecisionNotificationTypes(object): FLAG = 'flag' -class DecisionSources(object): +class DecisionSources: EXPERIMENT = 'experiment' FEATURE_TEST = 'feature-test' ROLLOUT = 'rollout' -class Errors(object): +class Errors: INVALID_ATTRIBUTE = 'Provided attribute is not in datafile.' INVALID_ATTRIBUTE_FORMAT = 'Attributes provided are in an invalid format.' INVALID_AUDIENCE = 'Provided audience is not in datafile.' @@ -115,7 +115,7 @@ class Errors(object): UNSUPPORTED_DATAFILE_VERSION = 'This version of the Python SDK does not support the given datafile version: "{}".' -class ForcedDecisionLogs(object): +class ForcedDecisionLogs: USER_HAS_FORCED_DECISION_WITH_RULE_SPECIFIED = 'Variation ({}) is mapped to flag ({}), rule ({}) and user ({}) ' \ 'in the forced decision map.' USER_HAS_FORCED_DECISION_WITHOUT_RULE_SPECIFIED = 'Variation ({}) is mapped to flag ({}) and user ({}) ' \ @@ -126,18 +126,18 @@ class ForcedDecisionLogs(object): 'and user ({}) in the forced decision map.' -class HTTPHeaders(object): +class HTTPHeaders: AUTHORIZATION = 'Authorization' IF_MODIFIED_SINCE = 'If-Modified-Since' LAST_MODIFIED = 'Last-Modified' -class HTTPVerbs(object): +class HTTPVerbs: GET = 'GET' POST = 'POST' -class LogLevels(object): +class LogLevels: NOTSET = logging.NOTSET DEBUG = logging.DEBUG INFO = logging.INFO @@ -146,7 +146,7 @@ class LogLevels(object): CRITICAL = logging.CRITICAL -class NotificationTypes(object): +class NotificationTypes: """ NotificationTypes for the notification_center.NotificationCenter format is NOTIFICATION TYPE: list of parameters to callback. @@ -172,6 +172,6 @@ class NotificationTypes(object): LOG_EVENT = 'LOG_EVENT:log_event' -class VersionType(object): +class VersionType: IS_PRE_RELEASE = '-' IS_BUILD = '+' diff --git a/optimizely/helpers/event_tag_utils.py b/optimizely/helpers/event_tag_utils.py index 0a5ae264..cecf1008 100644 --- a/optimizely/helpers/event_tag_utils.py +++ b/optimizely/helpers/event_tag_utils.py @@ -87,9 +87,7 @@ def get_numeric_value(event_tags, logger=None): if not isinstance(cast_numeric_metric_value, float) or \ math.isnan(cast_numeric_metric_value) or \ math.isinf(cast_numeric_metric_value): - logger_message_debug = 'Provided numeric value {} is in an invalid format.'.format( - numeric_metric_value - ) + logger_message_debug = f'Provided numeric value {numeric_metric_value} is in an invalid format.' numeric_metric_value = None else: # Handle booleans as a special case. @@ -116,15 +114,14 @@ def get_numeric_value(event_tags, logger=None): if logger: logger.log( enums.LogLevels.INFO, - 'The numeric metric value {} will be sent to results.'.format(numeric_metric_value), + f'The numeric metric value {numeric_metric_value} will be sent to results.' ) else: if logger: logger.log( enums.LogLevels.WARNING, - 'The provided numeric metric value {} is in an invalid format and will not be sent to results.'.format( - numeric_metric_value - ), + f'The provided numeric metric value {numeric_metric_value}' + ' is in an invalid format and will not be sent to results.' ) return numeric_metric_value diff --git a/optimizely/helpers/validator.py b/optimizely/helpers/validator.py index 522faccd..7d1e4f00 100644 --- a/optimizely/helpers/validator.py +++ b/optimizely/helpers/validator.py @@ -15,7 +15,6 @@ import jsonschema import math import numbers -from six import string_types from optimizely.notification_center import NotificationCenter from optimizely.user_profile import UserProfile @@ -205,7 +204,7 @@ def is_non_empty_string(input_id_key): Returns: Boolean depending upon whether input is valid or not. """ - if input_id_key and isinstance(input_id_key, string_types): + if input_id_key and isinstance(input_id_key, str): return True return False @@ -224,10 +223,10 @@ def is_attribute_valid(attribute_key, attribute_value): True otherwise """ - if not isinstance(attribute_key, string_types): + if not isinstance(attribute_key, str): return False - if isinstance(attribute_value, (string_types, bool)): + if isinstance(attribute_value, (str, bool)): return True if isinstance(attribute_value, (numbers.Integral, float)): @@ -281,7 +280,7 @@ def are_values_same_type(first_val, second_val): second_val_type = type(second_val) # use isinstance to accomodate Python 2 unicode and str types. - if isinstance(first_val, string_types) and isinstance(second_val, string_types): + if isinstance(first_val, str) and isinstance(second_val, str): return True # Compare types if one of the values is bool because bool is a subclass on Integer. diff --git a/optimizely/lib/pymmh3.py b/optimizely/lib/pymmh3.py index 4997de21..1a3de699 100755 --- a/optimizely/lib/pymmh3.py +++ b/optimizely/lib/pymmh3.py @@ -17,27 +17,12 @@ https://pypi.python.org/pypi/mmh3/2.3.1 ''' -import sys as _sys -if _sys.version_info > (3, 0): - - def xrange(a, b, c): - return range(a, b, c) - - def xencode(x): - if isinstance(x, bytes) or isinstance(x, bytearray): - return x - else: - return x.encode() - - -else: - - def xencode(x): +def xencode(x): + if isinstance(x, bytes) or isinstance(x, bytearray): return x - - -del _sys + else: + return x.encode() def hash(key, seed=0x0): @@ -62,7 +47,7 @@ def fmix(h): c2 = 0x1B873593 # body - for block_start in xrange(0, nblocks * 4, 4): + for block_start in range(0, nblocks * 4, 4): # ??? big endian? k1 = key[block_start + 3] << 24 | key[block_start + 2] << 16 | key[block_start + 1] << 8 | key[block_start + 0] @@ -124,7 +109,7 @@ def fmix(k): c2 = 0x4CF5AD432745937F # body - for block_start in xrange(0, nblocks * 8, 8): + for block_start in range(0, nblocks * 8, 8): # ??? big endian? k1 = ( key[2 * block_start + 7] << 56 | @@ -256,7 +241,7 @@ def fmix(h): c4 = 0xA1E38B93 # body - for block_start in xrange(0, nblocks * 16, 16): + for block_start in range(0, nblocks * 16, 16): k1 = ( key[block_start + 3] << 24 | key[block_start + 2] << 16 | @@ -449,7 +434,7 @@ def hash_bytes(key, seed=0x0, x64arch=True): bytestring = '' - for i in xrange(0, 16, 1): + for i in range(0, 16, 1): lsbyte = hash_128 & 0xFF bytestring = bytestring + str(chr(lsbyte)) hash_128 = hash_128 >> 8 @@ -459,6 +444,7 @@ def hash_bytes(key, seed=0x0, x64arch=True): if __name__ == "__main__": import argparse + import sys parser = argparse.ArgumentParser('pymurmur3', 'pymurmur [options] "string to hash"') parser.add_argument('--seed', type=int, default=0) @@ -467,4 +453,4 @@ def hash_bytes(key, seed=0x0, x64arch=True): opts = parser.parse_args() for str_to_hash in opts.strings: - sys.stdout.write('"%s" = 0x%08X\n' % (str_to_hash, hash(str_to_hash))) + sys.stdout.write(f'"{str_to_hash}" = 0x{hash(str_to_hash):08X}\n') diff --git a/optimizely/logger.py b/optimizely/logger.py index 4754e347..2220266d 100644 --- a/optimizely/logger.py +++ b/optimizely/logger.py @@ -52,7 +52,7 @@ def reset_logger(name, level=None, handler=None): return logger -class BaseLogger(object): +class BaseLogger: """ Class encapsulating logging functionality. Override with your own logger providing log method. """ @staticmethod @@ -79,7 +79,7 @@ def __init__(self, min_level=enums.LogLevels.INFO): def log(self, log_level, message): # Log a deprecation/runtime warning. # Clients should be using standard loggers instead of this wrapper. - warning = '{} is deprecated. Please use standard python loggers.'.format(self.__class__) + warning = f'{self.__class__} is deprecated. Please use standard python loggers.' warnings.warn(warning, DeprecationWarning) # Log the message. diff --git a/optimizely/notification_center.py b/optimizely/notification_center.py index 539088a8..179e39f9 100644 --- a/optimizely/notification_center.py +++ b/optimizely/notification_center.py @@ -20,7 +20,7 @@ ) -class NotificationCenter(object): +class NotificationCenter: """ Class encapsulating methods to manage notifications and their listeners. The enums.NotificationTypes includes predefined notifications.""" @@ -45,7 +45,7 @@ def add_notification_listener(self, notification_type, notification_callback): """ if notification_type not in NOTIFICATION_TYPES: - self.logger.error('Invalid notification_type: {} provided. Not adding listener.'.format(notification_type)) + self.logger.error(f'Invalid notification_type: {notification_type} provided. Not adding listener.') return -1 for _, listener in self.notification_listeners[notification_type]: @@ -86,7 +86,7 @@ def clear_notification_listeners(self, notification_type): if notification_type not in NOTIFICATION_TYPES: self.logger.error( - 'Invalid notification_type: {} provided. Not removing any listener.'.format(notification_type) + f'Invalid notification_type: {notification_type} provided. Not removing any listener.' ) self.notification_listeners[notification_type] = [] @@ -120,7 +120,7 @@ def send_notifications(self, notification_type, *args): if notification_type not in NOTIFICATION_TYPES: self.logger.error( - 'Invalid notification_type: {} provided. ' 'Not triggering any notification.'.format(notification_type) + f'Invalid notification_type: {notification_type} provided. ' 'Not triggering any notification.' ) return @@ -130,5 +130,5 @@ def send_notifications(self, notification_type, *args): callback(*args) except: self.logger.exception( - 'Unknown problem when sending "{}" type notification.'.format(notification_type) + f'Unknown problem when sending "{notification_type}" type notification.' ) diff --git a/optimizely/optimizely.py b/optimizely/optimizely.py index 98fd9d89..7299129e 100644 --- a/optimizely/optimizely.py +++ b/optimizely/optimizely.py @@ -11,8 +11,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from six import string_types - from . import decision_service from . import entities from . import event_builder @@ -36,7 +34,7 @@ from .optimizely_user_context import OptimizelyUserContext -class Optimizely(object): +class Optimizely: """ Class encapsulating all SDK functionality. """ def __init__( @@ -244,7 +242,7 @@ def _get_feature_variable_for_type( self.logger.error(enums.Errors.INVALID_INPUT.format('variable_key')) return None - if not isinstance(user_id, string_types): + if not isinstance(user_id, str): self.logger.error(enums.Errors.INVALID_INPUT.format('user_id')) return None @@ -263,8 +261,8 @@ def _get_feature_variable_for_type( variable_type = variable_type or variable.type if variable.type != variable_type: self.logger.warning( - 'Requested variable type "%s", but variable is of type "%s". ' - 'Use correct API to retrieve value. Returning None.' % (variable_type, variable.type) + f'Requested variable type "{variable_type}", but variable is of ' + f'type "{variable.type}". Use correct API to retrieve value. Returning None.' ) return None @@ -281,18 +279,18 @@ def _get_feature_variable_for_type( if feature_enabled: variable_value = project_config.get_variable_value_for_variation(variable, decision.variation) self.logger.info( - 'Got variable value "%s" for variable "%s" of feature flag "%s".' - % (variable_value, variable_key, feature_key) + f'Got variable value "{variable_value}" for ' + f'variable "{variable_key}" of feature flag "{feature_key}".' ) else: self.logger.info( - 'Feature "%s" is not enabled for user "%s". ' - 'Returning the default variable value "%s".' % (feature_key, user_id, variable_value) + f'Feature "{feature_key}" is not enabled for user "{user_id}". ' + f'Returning the default variable value "{variable_value}".' ) else: self.logger.info( - 'User "%s" is not in any variation or rollout rule. ' - 'Returning default value for variable "%s" of feature flag "%s".' % (user_id, variable_key, feature_key) + f'User "{user_id}" is not in any variation or rollout rule. ' + f'Returning default value for variable "{variable_key}" of feature flag "{feature_key}".' ) if decision.source == enums.DecisionSources.FEATURE_TEST: @@ -343,7 +341,7 @@ def _get_all_feature_variables_for_type( self.logger.error(enums.Errors.INVALID_INPUT.format('feature_key')) return None - if not isinstance(user_id, string_types): + if not isinstance(user_id, str): self.logger.error(enums.Errors.INVALID_INPUT.format('user_id')) return None @@ -365,16 +363,16 @@ def _get_all_feature_variables_for_type( feature_enabled = decision.variation.featureEnabled if feature_enabled: self.logger.info( - 'Feature "%s" is enabled for user "%s".' % (feature_key, user_id) + f'Feature "{feature_key}" is enabled for user "{user_id}".' ) else: self.logger.info( - 'Feature "%s" is not enabled for user "%s".' % (feature_key, user_id) + f'Feature "{feature_key}" is not enabled for user "{user_id}".' ) else: self.logger.info( - 'User "%s" is not in any variation or rollout rule. ' - 'Returning default value for all variables of feature flag "%s".' % (user_id, feature_key) + f'User "{user_id}" is not in any variation or rollout rule. ' + f'Returning default value for all variables of feature flag "{feature_key}".' ) all_variables = {} @@ -384,8 +382,8 @@ def _get_all_feature_variables_for_type( if feature_enabled: variable_value = project_config.get_variable_value_for_variation(variable, decision.variation) self.logger.debug( - 'Got variable value "%s" for variable "%s" of feature flag "%s".' - % (variable_value, variable_key, feature_key) + f'Got variable value "{variable_value}" for ' + f'variable "{variable_key}" of feature flag "{feature_key}".' ) try: @@ -438,7 +436,7 @@ def activate(self, experiment_key, user_id, attributes=None): self.logger.error(enums.Errors.INVALID_INPUT.format('experiment_key')) return None - if not isinstance(user_id, string_types): + if not isinstance(user_id, str): self.logger.error(enums.Errors.INVALID_INPUT.format('user_id')) return None @@ -450,14 +448,14 @@ def activate(self, experiment_key, user_id, attributes=None): variation_key = self.get_variation(experiment_key, user_id, attributes) if not variation_key: - self.logger.info('Not activating user "%s".' % user_id) + self.logger.info(f'Not activating user "{user_id}".') return None experiment = project_config.get_experiment_from_key(experiment_key) variation = project_config.get_variation_from_key(experiment_key, variation_key) # Create and dispatch impression event - self.logger.info('Activating user "%s" in experiment "%s".' % (user_id, experiment.key)) + self.logger.info(f'Activating user "{user_id}" in experiment "{experiment.key}".') self._send_impression_event(project_config, experiment, variation, '', experiment.key, enums.DecisionSources.EXPERIMENT, True, user_id, attributes) @@ -481,7 +479,7 @@ def track(self, event_key, user_id, attributes=None, event_tags=None): self.logger.error(enums.Errors.INVALID_INPUT.format('event_key')) return - if not isinstance(user_id, string_types): + if not isinstance(user_id, str): self.logger.error(enums.Errors.INVALID_INPUT.format('user_id')) return @@ -495,7 +493,7 @@ def track(self, event_key, user_id, attributes=None, event_tags=None): event = project_config.get_event(event_key) if not event: - self.logger.info('Not tracking user "%s" for event "%s".' % (user_id, event_key)) + self.logger.info(f'Not tracking user "{user_id}" for event "{event_key}".') return user_event = user_event_factory.UserEventFactory.create_conversion_event( @@ -503,7 +501,7 @@ def track(self, event_key, user_id, attributes=None, event_tags=None): ) self.event_processor.process(user_event) - self.logger.info('Tracking event "%s" for user "%s".' % (event_key, user_id)) + self.logger.info(f'Tracking event "{event_key}" for user "{user_id}".') if len(self.notification_center.notification_listeners[enums.NotificationTypes.TRACK]) > 0: log_event = event_factory.EventFactory.create_log_event(user_event, self.logger) @@ -532,7 +530,7 @@ def get_variation(self, experiment_key, user_id, attributes=None): self.logger.error(enums.Errors.INVALID_INPUT.format('experiment_key')) return None - if not isinstance(user_id, string_types): + if not isinstance(user_id, str): self.logger.error(enums.Errors.INVALID_INPUT.format('user_id')) return None @@ -545,7 +543,7 @@ def get_variation(self, experiment_key, user_id, attributes=None): variation_key = None if not experiment: - self.logger.info('Experiment key "%s" is invalid. Not activating user "%s".' % (experiment_key, user_id)) + self.logger.info(f'Experiment key "{experiment_key}" is invalid. Not activating user "{user_id}".') return None if not self._validate_user_inputs(attributes): @@ -592,7 +590,7 @@ def is_feature_enabled(self, feature_key, user_id, attributes=None): self.logger.error(enums.Errors.INVALID_INPUT.format('feature_key')) return False - if not isinstance(user_id, string_types): + if not isinstance(user_id, str): self.logger.error(enums.Errors.INVALID_INPUT.format('user_id')) return False @@ -637,9 +635,9 @@ def is_feature_enabled(self, feature_key, user_id, attributes=None): ) if feature_enabled: - self.logger.info('Feature "%s" is enabled for user "%s".' % (feature_key, user_id)) + self.logger.info(f'Feature "{feature_key}" is enabled for user "{user_id}".') else: - self.logger.info('Feature "%s" is not enabled for user "%s".' % (feature_key, user_id)) + self.logger.info(f'Feature "{feature_key}" is not enabled for user "{user_id}".') self.notification_center.send_notifications( enums.NotificationTypes.DECISION, @@ -672,7 +670,7 @@ def get_enabled_features(self, user_id, attributes=None): self.logger.error(enums.Errors.INVALID_OPTIMIZELY.format('get_enabled_features')) return enabled_features - if not isinstance(user_id, string_types): + if not isinstance(user_id, str): self.logger.error(enums.Errors.INVALID_INPUT.format('user_id')) return enabled_features @@ -884,7 +882,7 @@ def set_forced_variation(self, experiment_key, user_id, variation_key): self.logger.error(enums.Errors.INVALID_INPUT.format('experiment_key')) return False - if not isinstance(user_id, string_types): + if not isinstance(user_id, str): self.logger.error(enums.Errors.INVALID_INPUT.format('user_id')) return False @@ -914,7 +912,7 @@ def get_forced_variation(self, experiment_key, user_id): self.logger.error(enums.Errors.INVALID_INPUT.format('experiment_key')) return None - if not isinstance(user_id, string_types): + if not isinstance(user_id, str): self.logger.error(enums.Errors.INVALID_INPUT.format('user_id')) return None @@ -960,7 +958,7 @@ def create_user_context(self, user_id, attributes=None): Returns: UserContext instance or None if the user id or attributes are invalid. """ - if not isinstance(user_id, string_types): + if not isinstance(user_id, str): self.logger.error(enums.Errors.INVALID_INPUT.format('user_id')) return None @@ -995,7 +993,7 @@ def _decide(self, user_context, key, decide_options=None): return OptimizelyDecision(flag_key=key, user_context=user_context, reasons=reasons) # validate that key is a string - if not isinstance(key, string_types): + if not isinstance(key, str): self.logger.error('Key parameter is invalid') reasons.append(OptimizelyDecisionMessage.FLAG_KEY_INVALID.format(key)) return OptimizelyDecision(flag_key=key, user_context=user_context, reasons=reasons) @@ -1082,8 +1080,8 @@ def _decide(self, user_context, key, decide_options=None): if feature_enabled: variable_value = config.get_variable_value_for_variation(variable, decision.variation) self.logger.debug( - 'Got variable value "%s" for variable "%s" of feature flag "%s".' - % (variable_value, variable_key, flag_key) + f'Got variable value "{variable_value}" for ' + f'variable "{variable_key}" of feature flag "{flag_key}".' ) try: diff --git a/optimizely/optimizely_config.py b/optimizely/optimizely_config.py index 5e9b58d2..16cf4fce 100644 --- a/optimizely/optimizely_config.py +++ b/optimizely/optimizely_config.py @@ -17,7 +17,7 @@ from .project_config import ProjectConfig -class OptimizelyConfig(object): +class OptimizelyConfig: def __init__(self, revision, experiments_map, features_map, datafile=None, sdk_key=None, environment_key=None, attributes=None, events=None, audiences=None): @@ -46,7 +46,7 @@ def get_datafile(self): return self._datafile -class OptimizelyExperiment(object): +class OptimizelyExperiment: def __init__(self, id, key, variations_map, audiences=''): self.id = id self.key = key @@ -54,7 +54,7 @@ def __init__(self, id, key, variations_map, audiences=''): self.audiences = audiences -class OptimizelyFeature(object): +class OptimizelyFeature: def __init__(self, id, key, experiments_map, variables_map): self.id = id self.key = key @@ -68,7 +68,7 @@ def __init__(self, id, key, experiments_map, variables_map): self.experiment_rules = [] -class OptimizelyVariation(object): +class OptimizelyVariation: def __init__(self, id, key, feature_enabled, variables_map): self.id = id self.key = key @@ -76,7 +76,7 @@ def __init__(self, id, key, feature_enabled, variables_map): self.variables_map = variables_map -class OptimizelyVariable(object): +class OptimizelyVariable: def __init__(self, id, key, variable_type, value): self.id = id self.key = key @@ -84,27 +84,27 @@ def __init__(self, id, key, variable_type, value): self.value = value -class OptimizelyAttribute(object): +class OptimizelyAttribute: def __init__(self, id, key): self.id = id self.key = key -class OptimizelyEvent(object): +class OptimizelyEvent: def __init__(self, id, key, experiment_ids): self.id = id self.key = key self.experiment_ids = experiment_ids -class OptimizelyAudience(object): +class OptimizelyAudience: def __init__(self, id, name, conditions): self.id = id self.name = name self.conditions = conditions -class OptimizelyConfigService(object): +class OptimizelyConfigService: """ Class encapsulating methods to be used in creating instance of OptimizelyConfig. """ def __init__(self, project_config): diff --git a/optimizely/optimizely_factory.py b/optimizely/optimizely_factory.py index d9da72ba..a5ff2995 100644 --- a/optimizely/optimizely_factory.py +++ b/optimizely/optimizely_factory.py @@ -19,7 +19,7 @@ from .optimizely import Optimizely -class OptimizelyFactory(object): +class OptimizelyFactory: """ Optimizely factory to provides basic utility to instantiate the Optimizely SDK with a minimal number of configuration options.""" diff --git a/optimizely/optimizely_user_context.py b/optimizely/optimizely_user_context.py index f096ced5..32a06a8e 100644 --- a/optimizely/optimizely_user_context.py +++ b/optimizely/optimizely_user_context.py @@ -17,7 +17,7 @@ import threading -class OptimizelyUserContext(object): +class OptimizelyUserContext: """ Representation of an Optimizely User Context using which APIs are to be called. """ @@ -47,7 +47,7 @@ def __init__(self, optimizely_client, logger, user_id, user_attributes=None): self.forced_decisions_map = {} # decision context - class OptimizelyDecisionContext(object): + class OptimizelyDecisionContext: """ Using class with attributes here instead of namedtuple because class is extensible, it's easy to add another attribute if we wanted to extend decision context. @@ -63,7 +63,7 @@ def __eq__(self, other): return (self.flag_key, self.rule_key) == (other.flag_key, other.rule_key) # forced decision - class OptimizelyForcedDecision(object): + class OptimizelyForcedDecision: def __init__(self, variation_key): self.variation_key = variation_key diff --git a/optimizely/project_config.py b/optimizely/project_config.py index 12fd1086..9c0afe7a 100644 --- a/optimizely/project_config.py +++ b/optimizely/project_config.py @@ -28,7 +28,7 @@ RESERVED_ATTRIBUTE_PREFIX = '$opt_' -class ProjectConfig(object): +class ProjectConfig: """ Representation of the Optimizely project config. """ def __init__(self, datafile, logger, error_handler): @@ -309,7 +309,7 @@ def get_experiment_from_key(self, experiment_key): if experiment: return experiment - self.logger.error('Experiment key "%s" is not in datafile.' % experiment_key) + self.logger.error(f'Experiment key "{experiment_key}" is not in datafile.') self.error_handler.handle_error(exceptions.InvalidExperimentException(enums.Errors.INVALID_EXPERIMENT_KEY)) return None @@ -328,7 +328,7 @@ def get_experiment_from_id(self, experiment_id): if experiment: return experiment - self.logger.error('Experiment ID "%s" is not in datafile.' % experiment_id) + self.logger.error(f'Experiment ID "{experiment_id}" is not in datafile.') self.error_handler.handle_error(exceptions.InvalidExperimentException(enums.Errors.INVALID_EXPERIMENT_KEY)) return None @@ -347,7 +347,7 @@ def get_group(self, group_id): if group: return group - self.logger.error('Group ID "%s" is not in datafile.' % group_id) + self.logger.error(f'Group ID "{group_id}" is not in datafile.') self.error_handler.handle_error(exceptions.InvalidGroupException(enums.Errors.INVALID_GROUP_ID)) return None @@ -365,7 +365,7 @@ def get_audience(self, audience_id): if audience: return audience - self.logger.error('Audience ID "%s" is not in datafile.' % audience_id) + self.logger.error(f'Audience ID "{audience_id}" is not in datafile.') self.error_handler.handle_error(exceptions.InvalidAudienceException((enums.Errors.INVALID_AUDIENCE))) def get_variation_from_key(self, experiment_key, variation_key): @@ -387,11 +387,11 @@ def get_variation_from_key(self, experiment_key, variation_key): if variation: return variation else: - self.logger.error('Variation key "%s" is not in datafile.' % variation_key) + self.logger.error(f'Variation key "{variation_key}" is not in datafile.') self.error_handler.handle_error(exceptions.InvalidVariationException(enums.Errors.INVALID_VARIATION)) return None - self.logger.error('Experiment key "%s" is not in datafile.' % experiment_key) + self.logger.error(f'Experiment key "{experiment_key}" is not in datafile.') self.error_handler.handle_error(exceptions.InvalidExperimentException(enums.Errors.INVALID_EXPERIMENT_KEY)) return None @@ -413,11 +413,11 @@ def get_variation_from_id(self, experiment_key, variation_id): if variation: return variation else: - self.logger.error('Variation ID "%s" is not in datafile.' % variation_id) + self.logger.error(f'Variation ID "{variation_id}" is not in datafile.') self.error_handler.handle_error(exceptions.InvalidVariationException(enums.Errors.INVALID_VARIATION)) return None - self.logger.error('Experiment key "%s" is not in datafile.' % experiment_key) + self.logger.error(f'Experiment key "{experiment_key}" is not in datafile.') self.error_handler.handle_error(exceptions.InvalidExperimentException(enums.Errors.INVALID_EXPERIMENT_KEY)) return None @@ -436,7 +436,7 @@ def get_event(self, event_key): if event: return event - self.logger.error('Event "%s" is not in datafile.' % event_key) + self.logger.error(f'Event "{event_key}" is not in datafile.') self.error_handler.handle_error(exceptions.InvalidEventException(enums.Errors.INVALID_EVENT_KEY)) return None @@ -457,8 +457,8 @@ def get_attribute_id(self, attribute_key): if has_reserved_prefix: self.logger.warning( ( - 'Attribute %s unexpectedly has reserved prefix %s; using attribute ID ' - 'instead of reserved attribute name.' % (attribute_key, RESERVED_ATTRIBUTE_PREFIX) + f'Attribute {attribute_key} unexpectedly has reserved prefix {RESERVED_ATTRIBUTE_PREFIX};' + f' using attribute ID instead of reserved attribute name.' ) ) @@ -467,7 +467,7 @@ def get_attribute_id(self, attribute_key): if has_reserved_prefix: return attribute_key - self.logger.error('Attribute "%s" is not in datafile.' % attribute_key) + self.logger.error(f'Attribute "{attribute_key}" is not in datafile.') self.error_handler.handle_error(exceptions.InvalidAttributeException(enums.Errors.INVALID_ATTRIBUTE)) return None @@ -486,7 +486,7 @@ def get_feature_from_key(self, feature_key): if feature: return feature - self.logger.error('Feature "%s" is not in datafile.' % feature_key) + self.logger.error(f'Feature "{feature_key}" is not in datafile.') return None def get_rollout_from_id(self, rollout_id): @@ -504,7 +504,7 @@ def get_rollout_from_id(self, rollout_id): if layer: return layer - self.logger.error('Rollout with ID "%s" is not in datafile.' % rollout_id) + self.logger.error(f'Rollout with ID "{rollout_id}" is not in datafile.') return None def get_variable_value_for_variation(self, variable, variation): @@ -521,7 +521,7 @@ def get_variable_value_for_variation(self, variable, variation): if not variable or not variation: return None if variation.id not in self.variation_variable_usage_map: - self.logger.error('Variation with ID "%s" is not in the datafile.' % variation.id) + self.logger.error(f'Variation with ID "{variation.id}" is not in the datafile.') return None # Get all variable usages for the given variation @@ -553,11 +553,11 @@ def get_variable_for_feature(self, feature_key, variable_key): feature = self.feature_key_map.get(feature_key) if not feature: - self.logger.error('Feature with key "%s" not found in the datafile.' % feature_key) + self.logger.error(f'Feature with key "{feature_key}" not found in the datafile.') return None if variable_key not in feature.variables: - self.logger.error('Variable with key "%s" not found in the datafile.' % variable_key) + self.logger.error(f'Variable with key "{variable_key}" not found in the datafile.') return None return feature.variables.get(variable_key) @@ -612,8 +612,9 @@ def get_variation_from_id_by_experiment_id(self, experiment_id, variation_id): variation_id in self.variation_id_map_by_experiment_id[experiment_id]): return self.variation_id_map_by_experiment_id[experiment_id][variation_id] - self.logger.error('Variation with id "%s" not defined in the datafile for experiment "%s".' % - (variation_id, experiment_id)) + self.logger.error( + f'Variation with id "{variation_id}" not defined in the datafile for experiment "{experiment_id}".' + ) return {} @@ -628,8 +629,9 @@ def get_variation_from_key_by_experiment_id(self, experiment_id, variation_key): variation_key in self.variation_key_map_by_experiment_id[experiment_id]): return self.variation_key_map_by_experiment_id[experiment_id][variation_key] - self.logger.error('Variation with key "%s" not defined in the datafile for experiment "%s".' % - (variation_key, experiment_id)) + self.logger.error( + f'Variation with key "{variation_key}" not defined in the datafile for experiment "{experiment_id}".' + ) return {} diff --git a/optimizely/user_profile.py b/optimizely/user_profile.py index 177bfc7c..2ff9e038 100644 --- a/optimizely/user_profile.py +++ b/optimizely/user_profile.py @@ -12,7 +12,7 @@ # limitations under the License. -class UserProfile(object): +class UserProfile: """ Class encapsulating information representing a user's profile. user_id: User's identifier. @@ -54,7 +54,7 @@ def save_variation_for_experiment(self, experiment_id, variation_id): self.experiment_bucket_map.update({experiment_id: {self.VARIATION_ID_KEY: variation_id}}) -class UserProfileService(object): +class UserProfileService: """ Class encapsulating user profile service functionality. Override with your own implementation for storing and retrieving the user profile. """ diff --git a/requirements/core.txt b/requirements/core.txt index f5362041..45db2ece 100644 --- a/requirements/core.txt +++ b/requirements/core.txt @@ -4,4 +4,3 @@ requests>=2.21 pyOpenSSL>=19.1.0 cryptography>=2.8.0 idna>=2.10 -six>=1.12.0 diff --git a/requirements/test.txt b/requirements/test.txt index 069b65b7..c2e086c8 100644 --- a/requirements/test.txt +++ b/requirements/test.txt @@ -1,7 +1,6 @@ coverage flake8 >= 4.0.1 funcsigs >= 0.4 -mock >= 4.0.0 pytest >= 6.2.0 pytest-cov python-coveralls \ No newline at end of file diff --git a/setup.py b/setup.py index e66ce1fe..d40a23b6 100644 --- a/setup.py +++ b/setup.py @@ -46,12 +46,10 @@ 'License :: OSI Approved :: Apache Software License', 'Operating System :: OS Independent', 'Programming Language :: Python', - 'Programming Language :: Python :: 2.7', - 'Programming Language :: Python :: 3.4', - 'Programming Language :: Python :: 3.5', - 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', + 'Programming Language :: Python :: 3.9', + 'Programming Language :: Python :: 3.10', ], packages=find_packages(exclude=['docs', 'tests']), extras_require={'test': TEST_REQUIREMENTS}, diff --git a/tests/base.py b/tests/base.py index 05127caf..d2bc9692 100644 --- a/tests/base.py +++ b/tests/base.py @@ -13,20 +13,12 @@ import json import unittest -from six import PY3 from optimizely import optimizely -if PY3: - def long(a): - raise NotImplementedError('Tests should only call `long` if running in PY2') - -# Check to verify if TestCase has the attribute assertRasesRegex or assertRaisesRegexp -# This check depends on the version of python with assertRaisesRegexp being used by -# python2.7. Later versions of python are using the non-deprecated assertRaisesRegex. -if not hasattr(unittest.TestCase, 'assertRaisesRegex'): - unittest.TestCase.assertRaisesRegex = getattr(unittest.TestCase, 'assertRaisesRegexp') +def long(a): + raise NotImplementedError('Tests should only call `long` if running in PY2') class BaseTest(unittest.TestCase): diff --git a/tests/helpers_tests/test_audience.py b/tests/helpers_tests/test_audience.py index 719705d6..9c29bb72 100644 --- a/tests/helpers_tests/test_audience.py +++ b/tests/helpers_tests/test_audience.py @@ -12,7 +12,7 @@ # limitations under the License. import json -import mock +from unittest import mock from optimizely import optimizely from optimizely.helpers import audience @@ -361,11 +361,11 @@ def test_does_user_meet_audience_conditions__evaluates_audience_ids(self): [ mock.call.debug('Evaluating audiences for experiment "test_experiment": ["11154", "11159"].'), mock.call.debug( - 'Starting to evaluate audience "11154" with conditions: ' + audience_11154.conditions + '.' + f'Starting to evaluate audience "11154" with conditions: {audience_11154.conditions}.' ), mock.call.debug('Audience "11154" evaluated to UNKNOWN.'), mock.call.debug( - 'Starting to evaluate audience "11159" with conditions: ' + audience_11159.conditions + '.' + f'Starting to evaluate audience "11159" with conditions: {audience_11159.conditions}.' ), mock.call.debug('Audience "11159" evaluated to UNKNOWN.'), mock.call.info('Audiences for experiment "test_experiment" collectively evaluated to FALSE.'), @@ -409,17 +409,17 @@ def test_does_user_meet_audience_conditions__evaluates_audience_conditions(self) ), mock.call.debug( 'Starting to evaluate audience "3468206642" with ' - 'conditions: ' + audience_3468206642.conditions + '.' + f'conditions: {audience_3468206642.conditions}.' ), mock.call.debug('Audience "3468206642" evaluated to FALSE.'), mock.call.debug( 'Starting to evaluate audience "3988293898" with ' - 'conditions: ' + audience_3988293898.conditions + '.' + f'conditions: {audience_3988293898.conditions}.' ), mock.call.debug('Audience "3988293898" evaluated to UNKNOWN.'), mock.call.debug( 'Starting to evaluate audience "3988293899" with ' - 'conditions: ' + audience_3988293899.conditions + '.' + f'conditions: {audience_3988293899.conditions}.' ), mock.call.debug('Audience "3988293899" evaluated to TRUE.'), mock.call.info( @@ -484,11 +484,11 @@ def test_does_user_meet_audience_conditions__evaluates_audience_ids(self): [ mock.call.debug('Evaluating audiences for rule test_rule: ["11154", "11159"].'), mock.call.debug( - 'Starting to evaluate audience "11154" with conditions: ' + audience_11154.conditions + '.' + f'Starting to evaluate audience "11154" with conditions: {audience_11154.conditions}.' ), mock.call.debug('Audience "11154" evaluated to UNKNOWN.'), mock.call.debug( - 'Starting to evaluate audience "11159" with conditions: ' + audience_11159.conditions + '.' + f'Starting to evaluate audience "11159" with conditions: {audience_11159.conditions}.' ), mock.call.debug('Audience "11159" evaluated to UNKNOWN.'), mock.call.info('Audiences for rule test_rule collectively evaluated to FALSE.'), @@ -533,17 +533,17 @@ def test_does_user_meet_audience_conditions__evaluates_audience_conditions(self) ), mock.call.debug( 'Starting to evaluate audience "3468206642" with ' - 'conditions: ' + audience_3468206642.conditions + '.' + f'conditions: {audience_3468206642.conditions}.' ), mock.call.debug('Audience "3468206642" evaluated to FALSE.'), mock.call.debug( 'Starting to evaluate audience "3988293898" with ' - 'conditions: ' + audience_3988293898.conditions + '.' + f'conditions: {audience_3988293898.conditions}.' ), mock.call.debug('Audience "3988293898" evaluated to UNKNOWN.'), mock.call.debug( 'Starting to evaluate audience "3988293899" with ' - 'conditions: ' + audience_3988293899.conditions + '.' + f'conditions: {audience_3988293899.conditions}.' ), mock.call.debug('Audience "3988293899" evaluated to TRUE.'), mock.call.info( diff --git a/tests/helpers_tests/test_condition.py b/tests/helpers_tests/test_condition.py index 78dfe38c..3f8c6c16 100644 --- a/tests/helpers_tests/test_condition.py +++ b/tests/helpers_tests/test_condition.py @@ -12,7 +12,7 @@ # limitations under the License. import json -import mock +from unittest import mock from optimizely.helpers import condition as condition_helper @@ -118,7 +118,7 @@ def test_semver_eq__returns_true(self): evaluator = condition_helper.CustomAttributeConditionEvaluator( semver_equal_2_0_condition_list, {'Android': user_version}, self.mock_client_logger) result = evaluator.evaluate(0) - custom_err_msg = "Got {} in result. Failed for user version: {}".format(result, user_version) + custom_err_msg = f"Got {result} in result. Failed for user version: {user_version}" self.assertTrue(result, custom_err_msg) def test_semver_eq__returns_false(self): @@ -128,7 +128,7 @@ def test_semver_eq__returns_false(self): evaluator = condition_helper.CustomAttributeConditionEvaluator( semver_equal_2_0_condition_list, {'Android': user_version}, self.mock_client_logger) result = evaluator.evaluate(0) - custom_err_msg = "Got {} in result. Failed for user version: {}".format(result, user_version) + custom_err_msg = f"Got {result} in result. Failed for user version: {user_version}" self.assertFalse(result, custom_err_msg) def test_semver_le__returns_true(self): @@ -138,7 +138,7 @@ def test_semver_le__returns_true(self): evaluator = condition_helper.CustomAttributeConditionEvaluator( semver_less_than_or_equal_2_0_condition_list, {'Android': user_version}, self.mock_client_logger) result = evaluator.evaluate(0) - custom_err_msg = "Got {} in result. Failed for user version: {}".format(result, user_version) + custom_err_msg = f"Got {result} in result. Failed for user version: {user_version}" self.assertTrue(result, custom_err_msg) def test_semver_le__returns_false(self): @@ -148,7 +148,7 @@ def test_semver_le__returns_false(self): evaluator = condition_helper.CustomAttributeConditionEvaluator( semver_less_than_or_equal_2_0_condition_list, {'Android': user_version}, self.mock_client_logger) result = evaluator.evaluate(0) - custom_err_msg = "Got {} in result. Failed for user version: {}".format(result, user_version) + custom_err_msg = f"Got {result} in result. Failed for user version: {user_version}" self.assertFalse(result, custom_err_msg) def test_semver_ge__returns_true(self): @@ -158,7 +158,7 @@ def test_semver_ge__returns_true(self): evaluator = condition_helper.CustomAttributeConditionEvaluator( semver_greater_than_or_equal_2_0_condition_list, {'Android': user_version}, self.mock_client_logger) result = evaluator.evaluate(0) - custom_err_msg = "Got {} in result. Failed for user version: {}".format(result, user_version) + custom_err_msg = f"Got {result} in result. Failed for user version: {user_version}" self.assertTrue(result, custom_err_msg) def test_semver_ge__returns_false(self): @@ -168,7 +168,7 @@ def test_semver_ge__returns_false(self): evaluator = condition_helper.CustomAttributeConditionEvaluator( semver_greater_than_or_equal_2_0_condition_list, {'Android': user_version}, self.mock_client_logger) result = evaluator.evaluate(0) - custom_err_msg = "Got {} in result. Failed for user version: {}".format(result, user_version) + custom_err_msg = f"Got {result} in result. Failed for user version: {user_version}" self.assertFalse(result, custom_err_msg) def test_semver_lt__returns_true(self): @@ -178,7 +178,7 @@ def test_semver_lt__returns_true(self): evaluator = condition_helper.CustomAttributeConditionEvaluator( semver_less_than_2_0_condition_list, {'Android': user_version}, self.mock_client_logger) result = evaluator.evaluate(0) - custom_err_msg = "Got {} in result. Failed for user version: {}".format(result, user_version) + custom_err_msg = f"Got {result} in result. Failed for user version: {user_version}" self.assertTrue(result, custom_err_msg) def test_semver_lt__returns_false(self): @@ -188,7 +188,7 @@ def test_semver_lt__returns_false(self): evaluator = condition_helper.CustomAttributeConditionEvaluator( semver_less_than_2_0_condition_list, {'Android': user_version}, self.mock_client_logger) result = evaluator.evaluate(0) - custom_err_msg = "Got {} in result. Failed for user version: {}".format(result, user_version) + custom_err_msg = f"Got {result} in result. Failed for user version: {user_version}" self.assertFalse(result, custom_err_msg) def test_semver_gt__returns_true(self): @@ -198,7 +198,7 @@ def test_semver_gt__returns_true(self): evaluator = condition_helper.CustomAttributeConditionEvaluator( semver_greater_than_2_0_condition_list, {'Android': user_version}, self.mock_client_logger) result = evaluator.evaluate(0) - custom_err_msg = "Got {} in result. Failed for user version: {}".format(result, user_version) + custom_err_msg = f"Got {result} in result. Failed for user version: {user_version}" self.assertTrue(result, custom_err_msg) def test_semver_gt__returns_false(self): @@ -208,7 +208,7 @@ def test_semver_gt__returns_false(self): evaluator = condition_helper.CustomAttributeConditionEvaluator( semver_greater_than_2_0_condition_list, {'Android': user_version}, self.mock_client_logger) result = evaluator.evaluate(0) - custom_err_msg = "Got {} in result. Failed for user version: {}".format(result, user_version) + custom_err_msg = f"Got {result} in result. Failed for user version: {user_version}" self.assertFalse(result, custom_err_msg) def test_evaluate__returns_None__when_user_version_is_not_string(self): @@ -218,7 +218,7 @@ def test_evaluate__returns_None__when_user_version_is_not_string(self): evaluator = condition_helper.CustomAttributeConditionEvaluator( semver_greater_than_2_0_condition_list, {'Android': user_version}, self.mock_client_logger) result = evaluator.evaluate(0) - custom_err_msg = "Got {} in result. Failed for user version: {}".format(result, user_version) + custom_err_msg = f"Got {result} in result. Failed for user version: {user_version}" self.assertIsNone(result, custom_err_msg) def test_evaluate__returns_None__when_user_version_with_invalid_semantic(self): @@ -228,7 +228,7 @@ def test_evaluate__returns_None__when_user_version_with_invalid_semantic(self): evaluator = condition_helper.CustomAttributeConditionEvaluator( semver_greater_than_2_0_condition_list, {'Android': user_version}, self.mock_client_logger) result = evaluator.evaluate(0) - custom_err_msg = "Got {} in result. Failed for user version: {}".format(result, user_version) + custom_err_msg = f"Got {result} in result. Failed for user version: {user_version}" self.assertIsNone(result, custom_err_msg) def test_compare_user_version_with_target_version_equal_to_0(self): @@ -245,11 +245,8 @@ def test_compare_user_version_with_target_version_equal_to_0(self): evaluator = condition_helper.CustomAttributeConditionEvaluator( semver_greater_than_2_0_condition_list, {'Android': user_version}, self.mock_client_logger) result = evaluator.compare_user_version_with_target_version(target_version, user_version) - custom_err_msg = "Got {} in result. Failed for user version:" \ - " {} and target version: {}".format(result, - user_version, - target_version - ) + custom_err_msg = f"Got {result} in result. Failed for user version:" \ + f" {user_version} and target version: {target_version}" self.assertEqual(result, 0, custom_err_msg) def test_compare_user_version_with_target_version_greater_than_0(self): @@ -270,10 +267,8 @@ def test_compare_user_version_with_target_version_greater_than_0(self): evaluator = condition_helper.CustomAttributeConditionEvaluator( semver_greater_than_2_0_condition_list, {'Android': user_version}, self.mock_client_logger) result = evaluator.compare_user_version_with_target_version(target_version, user_version) - custom_err_msg = "Got {} in result. Failed for user version:" \ - " {} and target version: {}".format(result, - user_version, - target_version) + custom_err_msg = f"Got {result} in result. Failed for user version:" \ + f" {user_version} and target version: {target_version}" self.assertEqual(result, 1, custom_err_msg) def test_compare_user_version_with_target_version_less_than_0(self): @@ -294,10 +289,8 @@ def test_compare_user_version_with_target_version_less_than_0(self): evaluator = condition_helper.CustomAttributeConditionEvaluator( semver_greater_than_2_0_condition_list, {'Android': user_version}, self.mock_client_logger) result = evaluator.compare_user_version_with_target_version(target_version, user_version) - custom_err_msg = "Got {} in result. Failed for user version: {} " \ - "and target version: {}".format(result, - user_version, - target_version) + custom_err_msg = f"Got {result} in result. Failed for user version:" \ + f" {user_version} and target version: {target_version}" self.assertEqual(result, -1, custom_err_msg) def test_compare_invalid_user_version_with(self): @@ -310,7 +303,7 @@ def test_compare_invalid_user_version_with(self): evaluator = condition_helper.CustomAttributeConditionEvaluator( semver_greater_than_2_0_condition_list, {'Android': user_version}, self.mock_client_logger) result = evaluator.compare_user_version_with_target_version(user_version, target_version) - custom_err_msg = "Got {} in result. Failed for user version: {}".format(result, user_version) + custom_err_msg = f"Got {result} in result. Failed for user version: {user_version}" self.assertIsNone(result, custom_err_msg) def test_exists__returns_false__when_no_user_provided_value(self): @@ -1159,7 +1152,7 @@ def test_invalid_semver__returns_None__when_semver_is_invalid(self): semver_less_than_or_equal_2_0_1_condition_list, {'Android': user_version}, self.mock_client_logger) result = evaluator.evaluate(0) - custom_err_msg = "Got {} in result. Failed for user version: {}".format(result, user_version) + custom_err_msg = f"Got {result} in result. Failed for user version: {user_version}" self.assertIsNone(result, custom_err_msg) @@ -1211,10 +1204,8 @@ def test_evaluate__match_type__invalid(self): mock_log = getattr(self.mock_client_logger, log_level) mock_log.assert_called_once_with( - ( - 'Audience condition "{}" uses an unknown match ' - 'type. You may need to upgrade to a newer release of the Optimizely SDK.' - ).format(json.dumps(expected_condition_log)) + f'Audience condition "{json.dumps(expected_condition_log)}" uses an unknown match ' + 'type. You may need to upgrade to a newer release of the Optimizely SDK.' ) def test_evaluate__condition_type__invalid(self): @@ -1237,10 +1228,8 @@ def test_evaluate__condition_type__invalid(self): mock_log = getattr(self.mock_client_logger, log_level) mock_log.assert_called_once_with( - ( - 'Audience condition "{}" uses an unknown condition type. ' - 'You may need to upgrade to a newer release of the Optimizely SDK.' - ).format(json.dumps(expected_condition_log)) + f'Audience condition "{json.dumps(expected_condition_log)}" uses an unknown condition type. ' + 'You may need to upgrade to a newer release of the Optimizely SDK.' ) def test_exact__user_value__missing(self): @@ -1263,10 +1252,8 @@ def test_exact__user_value__missing(self): mock_log = getattr(self.mock_client_logger, log_level) mock_log.assert_called_once_with( - ( - 'Audience condition {} evaluated to UNKNOWN because ' - 'no value was passed for user attribute "favorite_constellation".' - ).format(json.dumps(expected_condition_log)) + f'Audience condition {json.dumps(expected_condition_log)} evaluated to UNKNOWN because ' + 'no value was passed for user attribute "favorite_constellation".' ) def test_greater_than__user_value__missing(self): @@ -1289,10 +1276,8 @@ def test_greater_than__user_value__missing(self): mock_log = getattr(self.mock_client_logger, log_level) mock_log.assert_called_once_with( - ( - 'Audience condition {} evaluated to UNKNOWN because no value was passed for user ' - 'attribute "meters_travelled".' - ).format(json.dumps(expected_condition_log)) + f'Audience condition {json.dumps(expected_condition_log)} evaluated to UNKNOWN ' + 'because no value was passed for user attribute "meters_travelled".' ) def test_less_than__user_value__missing(self): @@ -1315,10 +1300,8 @@ def test_less_than__user_value__missing(self): mock_log = getattr(self.mock_client_logger, log_level) mock_log.assert_called_once_with( - ( - 'Audience condition {} evaluated to UNKNOWN because no value was passed for user attribute ' - '"meters_travelled".' - ).format(json.dumps(expected_condition_log)) + f'Audience condition {json.dumps(expected_condition_log)} evaluated to UNKNOWN ' + 'because no value was passed for user attribute "meters_travelled".' ) def test_substring__user_value__missing(self): @@ -1341,10 +1324,8 @@ def test_substring__user_value__missing(self): mock_log = getattr(self.mock_client_logger, log_level) mock_log.assert_called_once_with( - ( - 'Audience condition {} evaluated to UNKNOWN because no value was passed for ' - 'user attribute "headline_text".' - ).format(json.dumps(expected_condition_log)) + f'Audience condition {json.dumps(expected_condition_log)} evaluated to UNKNOWN ' + 'because no value was passed for user attribute "headline_text".' ) def test_exists__user_value__missing(self): @@ -1381,10 +1362,8 @@ def test_exact__user_value__None(self): mock_log = getattr(self.mock_client_logger, log_level) mock_log.assert_called_once_with( - ( - 'Audience condition "{}" evaluated to UNKNOWN because a null value was passed for user attribute ' - '"favorite_constellation".' - ).format(json.dumps(expected_condition_log)) + f'Audience condition "{json.dumps(expected_condition_log)}" evaluated to UNKNOWN ' + 'because a null value was passed for user attribute "favorite_constellation".' ) def test_greater_than__user_value__None(self): @@ -1407,10 +1386,8 @@ def test_greater_than__user_value__None(self): mock_log = getattr(self.mock_client_logger, log_level) mock_log.assert_called_once_with( - ( - 'Audience condition "{}" evaluated to UNKNOWN because a null value was passed for ' - 'user attribute "meters_travelled".' - ).format(json.dumps(expected_condition_log)) + f'Audience condition "{json.dumps(expected_condition_log)}" evaluated to UNKNOWN ' + 'because a null value was passed for user attribute "meters_travelled".' ) def test_less_than__user_value__None(self): @@ -1433,10 +1410,8 @@ def test_less_than__user_value__None(self): mock_log = getattr(self.mock_client_logger, log_level) mock_log.assert_called_once_with( - ( - 'Audience condition "{}" evaluated to UNKNOWN because a null value was passed ' - 'for user attribute "meters_travelled".' - ).format(json.dumps(expected_condition_log)) + f'Audience condition "{json.dumps(expected_condition_log)}" evaluated to UNKNOWN ' + 'because a null value was passed for user attribute "meters_travelled".' ) def test_substring__user_value__None(self): @@ -1459,10 +1434,8 @@ def test_substring__user_value__None(self): mock_log = getattr(self.mock_client_logger, log_level) mock_log.assert_called_once_with( - ( - 'Audience condition "{}" evaluated to UNKNOWN because a null value was ' - 'passed for user attribute "headline_text".' - ).format(json.dumps(expected_condition_log)) + f'Audience condition "{json.dumps(expected_condition_log)}" evaluated to UNKNOWN ' + 'because a null value was passed for user attribute "headline_text".' ) def test_exists__user_value__None(self): @@ -1499,10 +1472,8 @@ def test_exact__user_value__unexpected_type(self): mock_log = getattr(self.mock_client_logger, log_level) mock_log.assert_called_once_with( - ( - 'Audience condition "{}" evaluated to UNKNOWN because a value of type "{}" was passed for ' - 'user attribute "favorite_constellation".' - ).format(json.dumps(expected_condition_log), type({})) + f'Audience condition "{json.dumps(expected_condition_log)}" evaluated to UNKNOWN because ' + f'a value of type "{dict}" was passed for user attribute "favorite_constellation".' ) def test_greater_than__user_value__unexpected_type(self): @@ -1525,11 +1496,8 @@ def test_greater_than__user_value__unexpected_type(self): mock_log = getattr(self.mock_client_logger, log_level) mock_log.assert_called_once_with( - ( - 'Audience condition "{}"' - ' evaluated to UNKNOWN because a value of type "{}" was passed for user attribute ' - '"meters_travelled".' - ).format(json.dumps(expected_condition_log), type('48')) + f'Audience condition "{json.dumps(expected_condition_log)}" evaluated to UNKNOWN because ' + f'a value of type "{str}" was passed for user attribute "meters_travelled".' ) def test_less_than__user_value__unexpected_type(self): @@ -1552,11 +1520,8 @@ def test_less_than__user_value__unexpected_type(self): mock_log = getattr(self.mock_client_logger, log_level) mock_log.assert_called_once_with( - ( - 'Audience condition "{}"' - ' evaluated to UNKNOWN because a value of type "{}" was passed for user attribute ' - '"meters_travelled".' - ).format(json.dumps(expected_condition_log), type(True)) + f'Audience condition "{json.dumps(expected_condition_log)}" evaluated to UNKNOWN because ' + f'a value of type "{bool}" was passed for user attribute "meters_travelled".' ) def test_substring__user_value__unexpected_type(self): @@ -1579,10 +1544,8 @@ def test_substring__user_value__unexpected_type(self): mock_log = getattr(self.mock_client_logger, log_level) mock_log.assert_called_once_with( - ( - 'Audience condition "{}" evaluated to UNKNOWN because a value of type "{}" was passed for ' - 'user attribute "headline_text".' - ).format(json.dumps(expected_condition_log), type(1234)) + f'Audience condition "{json.dumps(expected_condition_log)}" evaluated to UNKNOWN because ' + f'a value of type "{int}" was passed for user attribute "headline_text".' ) def test_exact__user_value__infinite(self): @@ -1605,10 +1568,8 @@ def test_exact__user_value__infinite(self): mock_log = getattr(self.mock_client_logger, log_level) mock_log.assert_called_once_with( - ( - 'Audience condition "{}" evaluated to UNKNOWN because the number value for ' - 'user attribute "meters_travelled" is not in the range [-2^53, +2^53].' - ).format(json.dumps(expected_condition_log)) + f'Audience condition "{json.dumps(expected_condition_log)}" evaluated to UNKNOWN because ' + 'the number value for user attribute "meters_travelled" is not in the range [-2^53, +2^53].' ) def test_greater_than__user_value__infinite(self): @@ -1631,11 +1592,9 @@ def test_greater_than__user_value__infinite(self): mock_log = getattr(self.mock_client_logger, log_level) mock_log.assert_called_once_with( - ( - 'Audience condition "{}" ' - 'evaluated to UNKNOWN because the number value for user attribute "meters_travelled" is not' - ' in the range [-2^53, +2^53].' - ).format(json.dumps(expected_condition_log)) + f'Audience condition "{json.dumps(expected_condition_log)}" ' + 'evaluated to UNKNOWN because the number value for user attribute "meters_travelled" is not' + ' in the range [-2^53, +2^53].' ) def test_less_than__user_value__infinite(self): @@ -1658,11 +1617,9 @@ def test_less_than__user_value__infinite(self): mock_log = getattr(self.mock_client_logger, log_level) mock_log.assert_called_once_with( - ( - 'Audience condition "{}" ' - 'evaluated to UNKNOWN because the number value for user attribute "meters_travelled" is not in ' - 'the range [-2^53, +2^53].' - ).format(json.dumps(expected_condition_log)) + f'Audience condition "{json.dumps(expected_condition_log)}" ' + 'evaluated to UNKNOWN because the number value for user attribute "meters_travelled" is not in ' + 'the range [-2^53, +2^53].' ) def test_exact__user_value_type_mismatch(self): @@ -1685,10 +1642,8 @@ def test_exact__user_value_type_mismatch(self): mock_log = getattr(self.mock_client_logger, log_level) mock_log.assert_called_once_with( - ( - 'Audience condition "{}" evaluated to UNKNOWN because a value of type "{}" was passed for ' - 'user attribute "favorite_constellation".' - ).format(json.dumps(expected_condition_log), type(5)) + f'Audience condition "{json.dumps(expected_condition_log)}" evaluated to UNKNOWN because ' + f'a value of type "{int}" was passed for user attribute "favorite_constellation".' ) def test_exact__condition_value_invalid(self): @@ -1711,10 +1666,8 @@ def test_exact__condition_value_invalid(self): mock_log = getattr(self.mock_client_logger, log_level) mock_log.assert_called_once_with( - ( - 'Audience condition "{}" has an unsupported condition value. You may need to upgrade to a ' - 'newer release of the Optimizely SDK.' - ).format(json.dumps(expected_condition_log)) + f'Audience condition "{json.dumps(expected_condition_log)}" has an unsupported condition value. ' + 'You may need to upgrade to a newer release of the Optimizely SDK.' ) def test_exact__condition_value_infinite(self): @@ -1737,10 +1690,8 @@ def test_exact__condition_value_infinite(self): mock_log = getattr(self.mock_client_logger, log_level) mock_log.assert_called_once_with( - ( - 'Audience condition "{}" has an unsupported condition value. You may need to upgrade to a ' - 'newer release of the Optimizely SDK.' - ).format(json.dumps(expected_condition_log)) + f'Audience condition "{json.dumps(expected_condition_log)}" has an unsupported condition value. ' + 'You may need to upgrade to a newer release of the Optimizely SDK.' ) def test_greater_than__condition_value_invalid(self): @@ -1763,10 +1714,8 @@ def test_greater_than__condition_value_invalid(self): mock_log = getattr(self.mock_client_logger, log_level) mock_log.assert_called_once_with( - ( - 'Audience condition "{}" has an unsupported condition value. You may need to upgrade to a ' - 'newer release of the Optimizely SDK.' - ).format(json.dumps(expected_condition_log)) + f'Audience condition "{json.dumps(expected_condition_log)}" has an unsupported condition value. ' + 'You may need to upgrade to a newer release of the Optimizely SDK.' ) def test_less_than__condition_value_invalid(self): @@ -1789,10 +1738,8 @@ def test_less_than__condition_value_invalid(self): mock_log = getattr(self.mock_client_logger, log_level) mock_log.assert_called_once_with( - ( - 'Audience condition "{}" has an unsupported condition value. You may need to upgrade to a ' - 'newer release of the Optimizely SDK.' - ).format(json.dumps(expected_condition_log)) + f'Audience condition "{json.dumps(expected_condition_log)}" has an unsupported condition value. ' + 'You may need to upgrade to a newer release of the Optimizely SDK.' ) def test_substring__condition_value_invalid(self): @@ -1815,8 +1762,6 @@ def test_substring__condition_value_invalid(self): mock_log = getattr(self.mock_client_logger, log_level) mock_log.assert_called_once_with( - ( - 'Audience condition "{}" has an unsupported condition value. You may need to upgrade to a ' - 'newer release of the Optimizely SDK.' - ).format(json.dumps(expected_condition_log)) + f'Audience condition "{json.dumps(expected_condition_log)}" has an unsupported condition value. ' + 'You may need to upgrade to a newer release of the Optimizely SDK.' ) diff --git a/tests/helpers_tests/test_condition_tree_evaluator.py b/tests/helpers_tests/test_condition_tree_evaluator.py index 63405b90..233a895e 100644 --- a/tests/helpers_tests/test_condition_tree_evaluator.py +++ b/tests/helpers_tests/test_condition_tree_evaluator.py @@ -11,7 +11,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import mock +from unittest import mock from optimizely.helpers.condition_tree_evaluator import evaluate from tests import base diff --git a/tests/helpers_tests/test_event_tag_utils.py b/tests/helpers_tests/test_event_tag_utils.py index 9b081629..011e11f5 100644 --- a/tests/helpers_tests/test_event_tag_utils.py +++ b/tests/helpers_tests/test_event_tag_utils.py @@ -115,39 +115,39 @@ def test_get_numeric_metric__value_tag(self): self.assertIsNone(event_tag_utils.get_numeric_value({'value': None}, self.logger)) numeric_value_nan = event_tag_utils.get_numeric_value({'value': float('nan')}, self.logger) - self.assertIsNone(numeric_value_nan, 'nan numeric value is {}'.format(numeric_value_nan)) + self.assertIsNone(numeric_value_nan, f'nan numeric value is {numeric_value_nan}') numeric_value_array = event_tag_utils.get_numeric_value({'value': []}, self.logger) - self.assertIsNone(numeric_value_array, 'Array numeric value is {}'.format(numeric_value_array)) + self.assertIsNone(numeric_value_array, f'Array numeric value is {numeric_value_array}') numeric_value_dict = event_tag_utils.get_numeric_value({'value': []}, self.logger) - self.assertIsNone(numeric_value_dict, 'Dict numeric value is {}'.format(numeric_value_dict)) + self.assertIsNone(numeric_value_dict, f'Dict numeric value is {numeric_value_dict}') numeric_value_none = event_tag_utils.get_numeric_value({'value': None}, self.logger) - self.assertIsNone(numeric_value_none, 'None numeric value is {}'.format(numeric_value_none)) + self.assertIsNone(numeric_value_none, f'None numeric value is {numeric_value_none}') numeric_value_invalid_literal = event_tag_utils.get_numeric_value( {'value': '1,234'}, self.logger ) self.assertIsNone( - numeric_value_invalid_literal, 'Invalid string literal value is {}'.format(numeric_value_invalid_literal), + numeric_value_invalid_literal, f'Invalid string literal value is {numeric_value_invalid_literal}', ) numeric_value_overflow = event_tag_utils.get_numeric_value( {'value': sys.float_info.max * 10}, self.logger ) self.assertIsNone( - numeric_value_overflow, 'Max numeric value is {}'.format(numeric_value_overflow), + numeric_value_overflow, f'Max numeric value is {numeric_value_overflow}', ) numeric_value_inf = event_tag_utils.get_numeric_value({'value': float('inf')}, self.logger) - self.assertIsNone(numeric_value_inf, 'Infinity numeric value is {}'.format(numeric_value_inf)) + self.assertIsNone(numeric_value_inf, f'Infinity numeric value is {numeric_value_inf}') numeric_value_neg_inf = event_tag_utils.get_numeric_value( {'value': float('-inf')}, self.logger ) self.assertIsNone( - numeric_value_neg_inf, 'Negative infinity numeric value is {}'.format(numeric_value_neg_inf), + numeric_value_neg_inf, f'Negative infinity numeric value is {numeric_value_neg_inf}', ) self.assertEqual( diff --git a/tests/helpers_tests/test_experiment.py b/tests/helpers_tests/test_experiment.py index 58f9b6d8..ae6a5047 100644 --- a/tests/helpers_tests/test_experiment.py +++ b/tests/helpers_tests/test_experiment.py @@ -11,7 +11,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import mock +from unittest import mock from tests import base from optimizely import entities diff --git a/tests/helpers_tests/test_validator.py b/tests/helpers_tests/test_validator.py index 2a97a538..ecee3b74 100644 --- a/tests/helpers_tests/test_validator.py +++ b/tests/helpers_tests/test_validator.py @@ -12,7 +12,7 @@ # limitations under the License. import json -import mock +from unittest import mock from optimizely import config_manager from optimizely import error_handler @@ -34,7 +34,7 @@ def test_is_config_manager_valid__returns_true(self): def test_is_config_manager_valid__returns_false(self): """ Test that invalid config_manager returns False for invalid config manager implementation. """ - class CustomConfigManager(object): + class CustomConfigManager: def some_other_method(self): pass @@ -48,7 +48,7 @@ def test_is_event_processor_valid__returns_true(self): def test_is_event_processor_valid__returns_false(self): """ Test that invalid event_processor returns False. """ - class CustomEventProcessor(object): + class CustomEventProcessor: def some_other_method(self): pass @@ -72,7 +72,7 @@ def test_is_event_dispatcher_valid__returns_true(self): def test_is_event_dispatcher_valid__returns_false(self): """ Test that invalid event_dispatcher returns False. """ - class CustomEventDispatcher(object): + class CustomEventDispatcher: def some_other_method(self): pass @@ -86,7 +86,7 @@ def test_is_logger_valid__returns_true(self): def test_is_logger_valid__returns_false(self): """ Test that invalid logger returns False. """ - class CustomLogger(object): + class CustomLogger: def some_other_method(self): pass @@ -100,7 +100,7 @@ def test_is_error_handler_valid__returns_true(self): def test_is_error_handler_valid__returns_false(self): """ Test that invalid error_handler returns False. """ - class CustomErrorHandler(object): + class CustomErrorHandler: def some_other_method(self): pass diff --git a/tests/test_bucketing.py b/tests/test_bucketing.py index e71ae8af..36adce75 100644 --- a/tests/test_bucketing.py +++ b/tests/test_bucketing.py @@ -12,7 +12,7 @@ # limitations under the License. import json -import mock +from unittest import mock import random from optimizely import bucketer diff --git a/tests/test_config.py b/tests/test_config.py index 83ebb18c..bf324052 100644 --- a/tests/test_config.py +++ b/tests/test_config.py @@ -12,7 +12,7 @@ # limitations under the License. import json -import mock +from unittest import mock from optimizely import entities from optimizely import error_handler diff --git a/tests/test_config_manager.py b/tests/test_config_manager.py index 13f22019..75b5aaf7 100644 --- a/tests/test_config_manager.py +++ b/tests/test_config_manager.py @@ -12,7 +12,7 @@ # limitations under the License. import json -import mock +from unittest import mock import requests import time @@ -29,7 +29,7 @@ class StaticConfigManagerTest(base.BaseTest): def test_init__invalid_logger_fails(self): """ Test that initialization fails if logger is invalid. """ - class InvalidLogger(object): + class InvalidLogger: pass with self.assertRaisesRegex( @@ -40,7 +40,7 @@ class InvalidLogger(object): def test_init__invalid_error_handler_fails(self): """ Test that initialization fails if error_handler is invalid. """ - class InvalidErrorHandler(object): + class InvalidErrorHandler: pass with self.assertRaisesRegex( @@ -51,7 +51,7 @@ class InvalidErrorHandler(object): def test_init__invalid_notification_center_fails(self): """ Test that initialization fails if notification_center is invalid. """ - class InvalidNotificationCenter(object): + class InvalidNotificationCenter: pass with self.assertRaisesRegex( @@ -289,7 +289,7 @@ def test_get_datafile_url__invalid_url_template_raises(self, _): test_url_template = 'invalid_url_template_without_sdk_key_field_{key}' self.assertRaisesRegex( optimizely_exceptions.InvalidInputException, - 'Invalid url_template {} provided'.format(test_url_template), + f'Invalid url_template {test_url_template} provided', config_manager.PollingConfigManager.get_datafile_url, 'optly_datafile_key', None, @@ -459,7 +459,7 @@ def test_fetch_datafile(self, _): def test_fetch_datafile__status_exception_raised(self, _): """ Test that config_manager keeps running if status code exception is raised when fetching datafile. """ - class MockExceptionResponse(object): + class MockExceptionResponse: def raise_for_status(self): raise requests.exceptions.RequestException('Error Error !!') @@ -505,9 +505,9 @@ def raise_for_status(self): headers={'If-Modified-Since': test_headers['Last-Modified']}, timeout=enums.ConfigManager.REQUEST_TIMEOUT, ) - mock_logger.error.assert_called_once_with('Fetching datafile from {} failed. Error: Error Error !!'.format( - expected_datafile_url - )) + mock_logger.error.assert_called_once_with( + f'Fetching datafile from {expected_datafile_url} failed. Error: Error Error !!' + ) self.assertEqual(test_headers['Last-Modified'], project_config_manager.last_modified) self.assertIsInstance(project_config_manager.get_config(), project_config.ProjectConfig) # Confirm that config manager keeps running @@ -563,9 +563,9 @@ def test_fetch_datafile__request_exception_raised(self, _): headers={'If-Modified-Since': test_headers['Last-Modified']}, timeout=enums.ConfigManager.REQUEST_TIMEOUT, ) - mock_logger.error.assert_called_once_with('Fetching datafile from {} failed. Error: Error Error !!'.format( - expected_datafile_url - )) + mock_logger.error.assert_called_once_with( + f'Fetching datafile from {expected_datafile_url} failed. Error: Error Error !!' + ) self.assertEqual(test_headers['Last-Modified'], project_config_manager.last_modified) self.assertIsInstance(project_config_manager.get_config(), project_config.ProjectConfig) # Confirm that config manager keeps running @@ -633,8 +633,7 @@ def test_fetch_datafile(self, _): mock_request.assert_called_once_with( expected_datafile_url, - headers={'Authorization': 'Bearer {datafile_access_token}'.format( - datafile_access_token=datafile_access_token)}, + headers={'Authorization': f'Bearer {datafile_access_token}'}, timeout=enums.ConfigManager.REQUEST_TIMEOUT, ) @@ -670,8 +669,7 @@ def test_fetch_datafile__request_exception_raised(self, _): mock_request.assert_called_once_with( expected_datafile_url, - headers={'Authorization': 'Bearer {datafile_access_token}'.format( - datafile_access_token=datafile_access_token)}, + headers={'Authorization': f'Bearer {datafile_access_token}'}, timeout=enums.ConfigManager.REQUEST_TIMEOUT, ) @@ -692,14 +690,13 @@ def test_fetch_datafile__request_exception_raised(self, _): expected_datafile_url, headers={ 'If-Modified-Since': test_headers['Last-Modified'], - 'Authorization': 'Bearer {datafile_access_token}'.format( - datafile_access_token=datafile_access_token), + 'Authorization': f'Bearer {datafile_access_token}', }, timeout=enums.ConfigManager.REQUEST_TIMEOUT, ) - mock_logger.error.assert_called_once_with('Fetching datafile from {} failed. Error: Error Error !!'.format( - expected_datafile_url - )) + mock_logger.error.assert_called_once_with( + f'Fetching datafile from {expected_datafile_url} failed. Error: Error Error !!' + ) self.assertEqual(test_headers['Last-Modified'], project_config_manager.last_modified) self.assertIsInstance(project_config_manager.get_config(), project_config.ProjectConfig) # Confirm that config manager keeps running diff --git a/tests/test_decision_service.py b/tests/test_decision_service.py index dc5bbfe7..dd1f7fee 100644 --- a/tests/test_decision_service.py +++ b/tests/test_decision_service.py @@ -13,7 +13,7 @@ import json -import mock +from unittest import mock from optimizely import decision_service from optimizely import entities diff --git a/tests/test_event_builder.py b/tests/test_event_builder.py index 6147c9db..fb4d7a0d 100644 --- a/tests/test_event_builder.py +++ b/tests/test_event_builder.py @@ -11,7 +11,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import mock +from unittest import mock import unittest from operator import itemgetter diff --git a/tests/test_event_dispatcher.py b/tests/test_event_dispatcher.py index 15e89180..aa6ddc32 100644 --- a/tests/test_event_dispatcher.py +++ b/tests/test_event_dispatcher.py @@ -11,7 +11,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import mock +from unittest import mock import json import unittest from requests import exceptions as request_exception diff --git a/tests/test_event_factory.py b/tests/test_event_factory.py index ec92a3dd..adbebd35 100644 --- a/tests/test_event_factory.py +++ b/tests/test_event_factory.py @@ -11,7 +11,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import mock +from unittest import mock import time import unittest import uuid diff --git a/tests/test_event_processor.py b/tests/test_event_processor.py index 707ac00f..0656453c 100644 --- a/tests/test_event_processor.py +++ b/tests/test_event_processor.py @@ -12,9 +12,9 @@ # limitations under the License. import datetime -import mock +from unittest import mock import time -from six.moves import queue +import queue from optimizely.event.payload import Decision, Visitor from optimizely.event.event_processor import ( @@ -30,7 +30,7 @@ from . import base -class CanonicalEvent(object): +class CanonicalEvent: def __init__(self, experiment_id, variation_id, event_name, visitor_id, attributes, tags): self._experiment_id = experiment_id self._variation_id = variation_id @@ -46,7 +46,7 @@ def __eq__(self, other): return self.__dict__ == other.__dict__ -class CustomEventDispatcher(object): +class CustomEventDispatcher: IMPRESSION_EVENT_NAME = 'campaign_activated' @@ -116,7 +116,7 @@ class BatchEventProcessorTest(base.BaseTest): MAX_BATCH_SIZE = 10 MAX_DURATION_SEC = 0.2 MAX_TIMEOUT_INTERVAL_SEC = 0.1 - TEST_TIMEOUT = 0.3 + TEST_TIMEOUT = 10 def setUp(self, *args, **kwargs): base.BaseTest.setUp(self, 'config_dict_with_multiple_experiments') @@ -155,7 +155,11 @@ def test_drain_on_stop(self): self.event_processor.process(user_event) event_dispatcher.expect_conversion(self.event_name, self.test_user_id) - time.sleep(self.TEST_TIMEOUT) + # wait for events to finish processing, up to TEST_TIMEOUT + start_time = time.time() + while not event_dispatcher.compare_events() or not self.event_processor.event_queue.empty(): + if time.time() - start_time >= self.TEST_TIMEOUT: + break self.assertStrictTrue(event_dispatcher.compare_events()) self.assertEqual(0, self.event_processor.event_queue.qsize()) @@ -170,7 +174,11 @@ def test_flush_on_max_timeout(self): self.event_processor.process(user_event) event_dispatcher.expect_conversion(self.event_name, self.test_user_id) - time.sleep(self.TEST_TIMEOUT) + # wait for events to finish processing, up to TEST_TIMEOUT + start_time = time.time() + while not event_dispatcher.compare_events(): + if time.time() - start_time >= self.TEST_TIMEOUT: + break self.assertStrictTrue(event_dispatcher.compare_events()) self.assertEqual(0, self.event_processor.event_queue.qsize()) @@ -187,7 +195,11 @@ def test_flush_once_max_timeout(self): self.event_processor.process(user_event) event_dispatcher.expect_conversion(self.event_name, self.test_user_id) - time.sleep(self.TEST_TIMEOUT) + # wait for events to finish processing, up to TEST_TIMEOUT + start_time = time.time() + while not event_dispatcher.compare_events() or mock_config_logging.debug.call_count < 3: + if time.time() - start_time >= self.TEST_TIMEOUT: + break self.assertStrictTrue(event_dispatcher.compare_events()) self.assertEqual(0, self.event_processor.event_queue.qsize()) @@ -209,7 +221,11 @@ def test_flush_max_batch_size(self): self.event_processor.process(user_event) event_dispatcher.expect_conversion(self.event_name, self.test_user_id) - time.sleep(self.TEST_TIMEOUT) + # wait for events to finish processing, up to TEST_TIMEOUT + start_time = time.time() + while not event_dispatcher.compare_events(): + if time.time() - start_time >= self.TEST_TIMEOUT: + break self.assertStrictTrue(event_dispatcher.compare_events()) self.assertEqual(0, self.event_processor.event_queue.qsize()) @@ -229,7 +245,11 @@ def test_flush(self): self.event_processor.flush() event_dispatcher.expect_conversion(self.event_name, self.test_user_id) - time.sleep(self.TEST_TIMEOUT) + # wait for events to finish processing, up to TEST_TIMEOUT + start_time = time.time() + while not event_dispatcher.compare_events(): + if time.time() - start_time >= self.TEST_TIMEOUT: + break self.assertStrictTrue(event_dispatcher.compare_events()) self.assertEqual(0, self.event_processor.event_queue.qsize()) @@ -254,7 +274,11 @@ def test_flush_on_mismatch_revision(self): self.event_processor.process(user_event_2) event_dispatcher.expect_conversion(self.event_name, self.test_user_id) - time.sleep(self.TEST_TIMEOUT) + # wait for events to finish processing, up to TEST_TIMEOUT + start_time = time.time() + while not event_dispatcher.compare_events(): + if time.time() - start_time >= self.TEST_TIMEOUT: + break self.assertStrictTrue(event_dispatcher.compare_events()) self.assertEqual(0, self.event_processor.event_queue.qsize()) @@ -279,7 +303,11 @@ def test_flush_on_mismatch_project_id(self): self.event_processor.process(user_event_2) event_dispatcher.expect_conversion(self.event_name, self.test_user_id) - time.sleep(self.TEST_TIMEOUT) + # wait for events to finish processing, up to TEST_TIMEOUT + start_time = time.time() + while not event_dispatcher.compare_events(): + if time.time() - start_time >= self.TEST_TIMEOUT: + break self.assertStrictTrue(event_dispatcher.compare_events()) self.assertEqual(0, self.event_processor.event_queue.qsize()) @@ -294,7 +322,11 @@ def test_stop_and_start(self): self.event_processor.process(user_event) event_dispatcher.expect_conversion(self.event_name, self.test_user_id) - time.sleep(self.TEST_TIMEOUT) + # wait for events to finish processing, up to TEST_TIMEOUT + start_time = time.time() + while not event_dispatcher.compare_events(): + if time.time() - start_time >= self.TEST_TIMEOUT: + break self.assertStrictTrue(event_dispatcher.compare_events()) self.event_processor.stop() @@ -517,15 +549,29 @@ def test_warning_log_level_on_queue_overflow(self): self.event_processor.process(user_event) event_dispatcher.expect_conversion(self.event_name, self.test_user_id) - time.sleep(self.TEST_TIMEOUT) + # wait for events to finish processing and queue to clear, up to TEST_TIMEOUT + start_time = time.time() + while not self.event_processor.event_queue.empty(): + if time.time() - start_time >= self.TEST_TIMEOUT: + break # queue is flushed, even though events overflow self.assertEqual(0, self.event_processor.event_queue.qsize()) - mock_config_logging.warning.assert_called_with('Payload not accepted by the queue. Current size: {}' - .format(str(test_max_queue_size))) + class AnyStringWith(str): + '''allows a partial match on the log message''' + def __eq__(self, other): + return self in other -class CustomForwardingEventDispatcher(object): + # the qsize method is approximate and since no lock is taken on the queue + # it can return an indeterminate count + # thus we can't rely on this error message to always report the max_queue_size + mock_config_logging.warning.assert_called_with( + AnyStringWith('Payload not accepted by the queue. Current size: ') + ) + + +class CustomForwardingEventDispatcher: def __init__(self, is_updated=False): self.is_updated = is_updated @@ -568,7 +614,7 @@ def test_event_processor__dispatch_raises_exception(self): event_processor.process(user_event) mock_client_logging.exception.assert_called_once_with( - 'Error dispatching event: ' + str(log_event) + ' Failed to send.' + f'Error dispatching event: {log_event} Failed to send.' ) def test_event_processor__with_test_event_dispatcher(self): diff --git a/tests/test_logger.py b/tests/test_logger.py index 64cd1378..ee432735 100644 --- a/tests/test_logger.py +++ b/tests/test_logger.py @@ -14,7 +14,7 @@ import unittest import uuid -import mock +from unittest import mock from optimizely import logger as _logger @@ -105,7 +105,7 @@ def test_reset_logger(self): def test_reset_logger__replaces_handlers(self): """Test that reset_logger replaces existing handlers with a StreamHandler.""" - logger_name = 'test-logger-{}'.format(uuid.uuid4()) + logger_name = f'test-logger-{uuid.uuid4()}' logger = logging.getLogger(logger_name) logger.handlers = [logging.StreamHandler() for _ in range(10)] @@ -121,7 +121,7 @@ def test_reset_logger__replaces_handlers(self): def test_reset_logger__with_handler__existing(self): """Test that reset_logger deals with provided handlers correctly.""" existing_handler = logging.NullHandler() - logger_name = 'test-logger-{}'.format(uuid.uuid4()) + logger_name = f'test-logger-{uuid.uuid4()}' reset_logger = _logger.reset_logger(logger_name, handler=existing_handler) self.assertEqual(1, len(reset_logger.handlers)) @@ -133,6 +133,6 @@ def test_reset_logger__with_handler__existing(self): def test_reset_logger__with_level(self): """Test that reset_logger sets log levels correctly.""" - logger_name = 'test-logger-{}'.format(uuid.uuid4()) + logger_name = f'test-logger-{uuid.uuid4()}' reset_logger = _logger.reset_logger(logger_name, level=logging.DEBUG) self.assertEqual(logging.DEBUG, reset_logger.level) diff --git a/tests/test_notification_center.py b/tests/test_notification_center.py index 2ac30903..02ef5951 100644 --- a/tests/test_notification_center.py +++ b/tests/test_notification_center.py @@ -11,7 +11,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import mock +from unittest import mock import unittest from optimizely import notification_center @@ -309,5 +309,5 @@ def some_listener(arg_1, arg_2): # Not providing any of the 2 expected arguments during send. test_notification_center.send_notifications(enums.NotificationTypes.ACTIVATE) mock_logger.exception.assert_called_once_with( - 'Unknown problem when sending "{}" type notification.'.format(enums.NotificationTypes.ACTIVATE) + f'Unknown problem when sending "{enums.NotificationTypes.ACTIVATE}" type notification.' ) diff --git a/tests/test_optimizely.py b/tests/test_optimizely.py index f1956cf1..a2a4e036 100644 --- a/tests/test_optimizely.py +++ b/tests/test_optimizely.py @@ -14,8 +14,7 @@ import json from operator import itemgetter -import mock -import six +from unittest import mock from optimizely import config_manager from optimizely import decision_service @@ -37,12 +36,12 @@ class OptimizelyTest(base.BaseTest): strTest = None try: - isinstance("test", six.string_types) # attempt to evaluate string + isinstance("test", str) # attempt to evaluate string _expected_notification_failure = 'Problem calling notify callback.' def isstr(self, s): - return isinstance(s, six.string_types) + return isinstance(s, str) strTest = isstr @@ -118,7 +117,7 @@ def test_init__empty_datafile__logs_error(self): def test_init__invalid_config_manager__logs_error(self): """ Test that invalid config_manager logs error on init. """ - class InvalidConfigManager(object): + class InvalidConfigManager: pass mock_client_logger = mock.MagicMock() @@ -131,7 +130,7 @@ class InvalidConfigManager(object): def test_init__invalid_event_dispatcher__logs_error(self): """ Test that invalid event_dispatcher logs error on init. """ - class InvalidDispatcher(object): + class InvalidDispatcher: pass mock_client_logger = mock.MagicMock() @@ -144,7 +143,7 @@ class InvalidDispatcher(object): def test_init__invalid_event_processor__logs_error(self): """ Test that invalid event_processor logs error on init. """ - class InvalidProcessor(object): + class InvalidProcessor: pass mock_client_logger = mock.MagicMock() @@ -157,7 +156,7 @@ class InvalidProcessor(object): def test_init__invalid_logger__logs_error(self): """ Test that invalid logger logs error on init. """ - class InvalidLogger(object): + class InvalidLogger: pass mock_client_logger = mock.MagicMock() @@ -170,7 +169,7 @@ class InvalidLogger(object): def test_init__invalid_error_handler__logs_error(self): """ Test that invalid error_handler logs error on init. """ - class InvalidErrorHandler(object): + class InvalidErrorHandler: pass mock_client_logger = mock.MagicMock() @@ -183,7 +182,7 @@ class InvalidErrorHandler(object): def test_init__invalid_notification_center__logs_error(self): """ Test that invalid notification_center logs error on init. """ - class InvalidNotificationCenter(object): + class InvalidNotificationCenter: pass mock_client_logger = mock.MagicMock() @@ -376,7 +375,7 @@ def on_activate(experiment, user_id, attributes, variation, event): self.assertTrue(isinstance(attributes, dict)) self.assertTrue(isinstance(variation, entities.Variation)) # self.assertTrue(isinstance(event, event_builder.Event)) - print("Activated experiment {0}".format(experiment.key)) + print(f"Activated experiment {experiment.key}") callbackhit[0] = True notification_id = self.optimizely.notification_center.add_notification_listener( @@ -1207,7 +1206,7 @@ def test_activate__bucketer_returns_none(self): def test_activate__invalid_object(self): """ Test that activate logs error if Optimizely instance is invalid. """ - class InvalidConfigManager(object): + class InvalidConfigManager: pass opt_obj = optimizely.Optimizely(json.dumps(self.config_dict), config_manager=InvalidConfigManager()) @@ -1731,7 +1730,7 @@ def test_track__whitelisted_user_overrides_audience_check(self): def test_track__invalid_object(self): """ Test that track logs error if Optimizely instance is invalid. """ - class InvalidConfigManager(object): + class InvalidConfigManager: pass opt_obj = optimizely.Optimizely(json.dumps(self.config_dict), config_manager=InvalidConfigManager()) @@ -1847,7 +1846,7 @@ def test_get_variation__returns_none(self): def test_get_variation__invalid_object(self): """ Test that get_variation logs error if Optimizely instance is invalid. """ - class InvalidConfigManager(object): + class InvalidConfigManager: pass opt_obj = optimizely.Optimizely(json.dumps(self.config_dict), config_manager=InvalidConfigManager()) @@ -2458,7 +2457,7 @@ def test_is_feature_enabled__returns_false_when_variation_is_nil(self, ): def test_is_feature_enabled__invalid_object(self): """ Test that is_feature_enabled returns False and logs error if Optimizely instance is invalid. """ - class InvalidConfigManager(object): + class InvalidConfigManager: pass opt_obj = optimizely.Optimizely(json.dumps(self.config_dict), config_manager=InvalidConfigManager()) @@ -2628,7 +2627,7 @@ def test_get_enabled_features__invalid_attributes(self): def test_get_enabled_features__invalid_object(self): """ Test that get_enabled_features returns empty list if Optimizely instance is invalid. """ - class InvalidConfigManager(object): + class InvalidConfigManager: pass opt_obj = optimizely.Optimizely(json.dumps(self.config_dict), config_manager=InvalidConfigManager()) @@ -4572,7 +4571,7 @@ def test_get_feature_variable_returns__default_value__complex_audience_match(sel def test_get_optimizely_config__invalid_object(self): """ Test that get_optimizely_config logs error if Optimizely instance is invalid. """ - class InvalidConfigManager(object): + class InvalidConfigManager: pass opt_obj = optimizely.Optimizely(json.dumps(self.config_dict), config_manager=InvalidConfigManager()) @@ -4609,7 +4608,7 @@ def test_get_optimizely_config_with_custom_config_manager(self): some_obj = optimizely.Optimizely(json.dumps(self.config_dict_with_features)) return_config = some_obj.config_manager.get_config() - class SomeConfigManager(object): + class SomeConfigManager: def get_config(self): return return_config @@ -4721,7 +4720,7 @@ def test_track(self): self.optimizely.track(event_key, user_id) mock_client_logging.info.assert_has_calls( - [mock.call('Tracking event "%s" for user "%s".' % (event_key, user_id))] + [mock.call(f'Tracking event "{event_key}" for user "{user_id}".')] ) def test_activate__experiment_not_running(self): @@ -4960,7 +4959,7 @@ def test_get_variation__invalid_attributes__forced_bucketing(self): def test_set_forced_variation__invalid_object(self): """ Test that set_forced_variation logs error if Optimizely instance is invalid. """ - class InvalidConfigManager(object): + class InvalidConfigManager: pass opt_obj = optimizely.Optimizely(json.dumps(self.config_dict), config_manager=InvalidConfigManager()) @@ -5008,7 +5007,7 @@ def test_set_forced_variation__invalid_user_id(self): def test_get_forced_variation__invalid_object(self): """ Test that get_forced_variation logs error if Optimizely instance is invalid. """ - class InvalidConfigManager(object): + class InvalidConfigManager: pass opt_obj = optimizely.Optimizely(json.dumps(self.config_dict), config_manager=InvalidConfigManager()) diff --git a/tests/test_optimizely_factory.py b/tests/test_optimizely_factory.py index 5db45680..7bed42af 100644 --- a/tests/test_optimizely_factory.py +++ b/tests/test_optimizely_factory.py @@ -11,7 +11,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import mock +from unittest import mock from optimizely.config_manager import PollingConfigManager from optimizely.error_handler import NoOpErrorHandler diff --git a/tests/test_user_context.py b/tests/test_user_context.py index 382ac999..25d58bc2 100644 --- a/tests/test_user_context.py +++ b/tests/test_user_context.py @@ -12,7 +12,7 @@ # limitations under the License. import json -import mock +from unittest import mock import threading from optimizely import optimizely, decision_service diff --git a/tests/testapp/user_profile_service.py b/tests/testapp/user_profile_service.py index 144697e5..381993dc 100644 --- a/tests/testapp/user_profile_service.py +++ b/tests/testapp/user_profile_service.py @@ -12,7 +12,7 @@ # limitations under the License. -class BaseUserProfileService(object): +class BaseUserProfileService: def __init__(self, user_profiles): self.user_profiles = {profile['user_id']: profile for profile in user_profiles} if user_profiles else {} From 6b590eb8deb6a2fc9ef1d1b57ebdadd9a1297039 Mon Sep 17 00:00:00 2001 From: Ozayr <54209343+ozayr-zaviar@users.noreply.github.com> Date: Tue, 28 Jun 2022 23:27:38 +0500 Subject: [PATCH 07/68] feat: BatchEventProcessor as Default Event Processor (#378) ForwardingEventProcessor sends calls in a synchronous manner so to reduce the time it is replaced with BatchEventProcessor which sends calls in an asynchronous manner. --- optimizely/optimizely.py | 21 +++++++-- tests/test_event_processor.py | 2 +- tests/test_optimizely.py | 80 ++++++++++++++++++----------------- 3 files changed, 60 insertions(+), 43 deletions(-) diff --git a/optimizely/optimizely.py b/optimizely/optimizely.py index 7299129e..336cd151 100644 --- a/optimizely/optimizely.py +++ b/optimizely/optimizely.py @@ -25,7 +25,7 @@ from .decision_service import Decision from .error_handler import NoOpErrorHandler as noop_error_handler from .event import event_factory, user_event_factory -from .event.event_processor import ForwardingEventProcessor +from .event.event_processor import BatchEventProcessor from .event_dispatcher import EventDispatcher as default_event_dispatcher from .helpers import enums, validator from .helpers.enums import DecisionSources @@ -50,7 +50,8 @@ def __init__( notification_center=None, event_processor=None, datafile_access_token=None, - default_decide_options=None + default_decide_options=None, + event_processor_options=None ): """ Optimizely init method for managing Custom projects. @@ -78,6 +79,7 @@ def __init__( optimizely.event.event_processor.BatchEventProcessor. datafile_access_token: Optional string used to fetch authenticated datafile for a secure project environment. default_decide_options: Optional list of decide options used with the decide APIs. + event_processor_options: Optional dict of options to be passed to the default batch event processor. """ self.logger_name = '.'.join([__name__, self.__class__.__name__]) self.is_valid = True @@ -86,8 +88,19 @@ def __init__( self.error_handler = error_handler or noop_error_handler self.config_manager = config_manager self.notification_center = notification_center or NotificationCenter(self.logger) - self.event_processor = event_processor or ForwardingEventProcessor( - self.event_dispatcher, logger=self.logger, notification_center=self.notification_center, + event_processor_defaults = { + 'batch_size': 1, + 'flush_interval': 30, + 'timeout_interval': 5, + 'start_on_init': True + } + if event_processor_options: + event_processor_defaults.update(event_processor_options) + self.event_processor = event_processor or BatchEventProcessor( + self.event_dispatcher, + logger=self.logger, + notification_center=self.notification_center, + **event_processor_defaults ) if default_decide_options is None: diff --git a/tests/test_event_processor.py b/tests/test_event_processor.py index 0656453c..4e45e6fc 100644 --- a/tests/test_event_processor.py +++ b/tests/test_event_processor.py @@ -116,7 +116,7 @@ class BatchEventProcessorTest(base.BaseTest): MAX_BATCH_SIZE = 10 MAX_DURATION_SEC = 0.2 MAX_TIMEOUT_INTERVAL_SEC = 0.1 - TEST_TIMEOUT = 10 + TEST_TIMEOUT = 15 def setUp(self, *args, **kwargs): base.BaseTest.setUp(self, 'config_dict_with_multiple_experiments') diff --git a/tests/test_optimizely.py b/tests/test_optimizely.py index a2a4e036..380a5088 100644 --- a/tests/test_optimizely.py +++ b/tests/test_optimizely.py @@ -307,7 +307,7 @@ def test_activate(self): ) as mock_decision, mock.patch('time.time', return_value=42), mock.patch( 'uuid.uuid4', return_value='a68cf1ad-0393-4e18-af87-efe8f01a7c9c' ), mock.patch( - 'optimizely.event.event_processor.ForwardingEventProcessor.process' + 'optimizely.event.event_processor.BatchEventProcessor.process' ) as mock_process: self.assertEqual('variation', self.optimizely.activate('test_experiment', 'test_user')) @@ -446,7 +446,7 @@ def on_activate(event_key, user_id, attributes, event_tags, event): with mock.patch( 'optimizely.decision_service.DecisionService.get_variation', return_value=variation, - ), mock.patch('optimizely.event.event_processor.ForwardingEventProcessor.process') as mock_process, mock.patch( + ), mock.patch('optimizely.event.event_processor.BatchEventProcessor.process') as mock_process, mock.patch( 'optimizely.notification_center.NotificationCenter.send_notifications' ) as mock_broadcast: self.assertEqual('variation', self.optimizely.activate('test_experiment', 'test_user')) @@ -487,7 +487,7 @@ def on_activate(event_key, user_id, attributes, event_tags, event): with mock.patch( 'optimizely.decision_service.DecisionService.get_variation', return_value=variation, - ), mock.patch('optimizely.event.event_processor.ForwardingEventProcessor.process') as mock_process, mock.patch( + ), mock.patch('optimizely.event.event_processor.BatchEventProcessor.process') as mock_process, mock.patch( 'optimizely.notification_center.NotificationCenter.send_notifications' ) as mock_broadcast: self.assertEqual( @@ -559,7 +559,7 @@ def on_track(event_key, user_id, attributes, event_tags, event): with mock.patch( 'optimizely.decision_service.DecisionService.get_variation', return_value=(self.project_config.get_variation_from_id('test_experiment', '111128'), []), - ), mock.patch('optimizely.event.event_processor.ForwardingEventProcessor.process') as mock_process, mock.patch( + ), mock.patch('optimizely.event.event_processor.BatchEventProcessor.process') as mock_process, mock.patch( 'optimizely.notification_center.NotificationCenter.send_notifications' ) as mock_event_tracked: self.optimizely.track('test_event', 'test_user') @@ -581,7 +581,7 @@ def on_track(event_key, user_id, attributes, event_tags, event): with mock.patch( 'optimizely.decision_service.DecisionService.get_variation', return_value=(self.project_config.get_variation_from_id('test_experiment', '111128'), []), - ), mock.patch('optimizely.event.event_processor.ForwardingEventProcessor.process') as mock_process, mock.patch( + ), mock.patch('optimizely.event.event_processor.BatchEventProcessor.process') as mock_process, mock.patch( 'optimizely.notification_center.NotificationCenter.send_notifications' ) as mock_event_tracked: self.optimizely.track('test_event', 'test_user', attributes={'test_attribute': 'test_value'}) @@ -608,7 +608,7 @@ def on_track(event_key, user_id, attributes, event_tags, event): with mock.patch( 'optimizely.decision_service.DecisionService.get_variation', return_value=(self.project_config.get_variation_from_id('test_experiment', '111128'), []), - ), mock.patch('optimizely.event.event_processor.ForwardingEventProcessor.process') as mock_process, mock.patch( + ), mock.patch('optimizely.event.event_processor.BatchEventProcessor.process') as mock_process, mock.patch( 'optimizely.notification_center.NotificationCenter.send_notifications' ) as mock_event_tracked: self.optimizely.track( @@ -680,7 +680,7 @@ def on_activate(experiment, user_id, attributes, variation, event): return_value=(decision_service.Decision(mock_experiment, mock_variation, enums.DecisionSources.ROLLOUT), []), ) as mock_decision, mock.patch( - 'optimizely.event.event_processor.ForwardingEventProcessor.process' + 'optimizely.event.event_processor.BatchEventProcessor.process' ) as mock_process: self.assertTrue(opt_obj.is_feature_enabled('test_feature_in_experiment', 'test_user')) @@ -701,7 +701,7 @@ def test_activate__with_attributes__audience_match(self): ) as mock_get_variation, mock.patch('time.time', return_value=42), mock.patch( 'uuid.uuid4', return_value='a68cf1ad-0393-4e18-af87-efe8f01a7c9c' ), mock.patch( - 'optimizely.event.event_processor.ForwardingEventProcessor.process' + 'optimizely.event.event_processor.BatchEventProcessor.process' ) as mock_process: self.assertEqual( 'variation', self.optimizely.activate('test_experiment', 'test_user', {'test_attribute': 'test_value'}), @@ -772,7 +772,7 @@ def test_activate__with_attributes_of_different_types(self): ) as mock_bucket, mock.patch('time.time', return_value=42), mock.patch( 'uuid.uuid4', return_value='a68cf1ad-0393-4e18-af87-efe8f01a7c9c' ), mock.patch( - 'optimizely.event.event_processor.ForwardingEventProcessor.process' + 'optimizely.event.event_processor.BatchEventProcessor.process' ) as mock_process: attributes = { 'test_attribute': 'test_value_1', @@ -849,7 +849,7 @@ def test_activate__with_attributes__typed_audience_match(self): variation when attributes are provided and typed audience conditions are met. """ opt_obj = optimizely.Optimizely(json.dumps(self.config_dict_with_typed_audiences)) - with mock.patch('optimizely.event.event_processor.ForwardingEventProcessor.process') as mock_process: + with mock.patch('optimizely.event.event_processor.BatchEventProcessor.process') as mock_process: # Should be included via exact match string audience with id '3468206642' self.assertEqual( 'A', opt_obj.activate('typed_audience_experiment', 'test_user', {'house': 'Gryffindor'}), @@ -865,7 +865,7 @@ def test_activate__with_attributes__typed_audience_match(self): mock_process.reset() - with mock.patch('optimizely.event.event_processor.ForwardingEventProcessor.process') as mock_process: + with mock.patch('optimizely.event.event_processor.BatchEventProcessor.process') as mock_process: # Should be included via exact match number audience with id '3468206646' self.assertEqual( 'A', opt_obj.activate('typed_audience_experiment', 'test_user', {'lasers': 45.5}), @@ -884,7 +884,7 @@ def test_activate__with_attributes__typed_audience_with_semver_match(self): variation when attributes are provided and typed audience conditions are met. """ opt_obj = optimizely.Optimizely(json.dumps(self.config_dict_with_typed_audiences)) - with mock.patch('optimizely.event.event_processor.ForwardingEventProcessor.process') as mock_process: + with mock.patch('optimizely.event.event_processor.BatchEventProcessor.process') as mock_process: # Should be included via exact match string audience with id '18278344267' self.assertEqual( 'A', opt_obj.activate('typed_audience_experiment', 'test_user', {'android-release': '1.0.1'}), @@ -900,7 +900,7 @@ def test_activate__with_attributes__typed_audience_with_semver_match(self): mock_process.reset() - with mock.patch('optimizely.event.event_processor.ForwardingEventProcessor.process') as mock_process: + with mock.patch('optimizely.event.event_processor.BatchEventProcessor.process') as mock_process: self.assertEqual( 'A', opt_obj.activate('typed_audience_experiment', 'test_user', {'android-release': "1.2.2"}), ) @@ -935,7 +935,7 @@ def test_activate__with_attributes__complex_audience_match(self): opt_obj = optimizely.Optimizely(json.dumps(self.config_dict_with_typed_audiences)) - with mock.patch('optimizely.event.event_processor.ForwardingEventProcessor.process') as mock_process: + with mock.patch('optimizely.event.event_processor.BatchEventProcessor.process') as mock_process: # Should be included via substring match string audience with id '3988293898', and # exact match number audience with id '3468206646' user_attr = {'house': 'Welcome to Slytherin!', 'lasers': 45.5} @@ -978,7 +978,7 @@ def test_activate__with_attributes__audience_match__forced_bucketing(self): with mock.patch('time.time', return_value=42), mock.patch( 'uuid.uuid4', return_value='a68cf1ad-0393-4e18-af87-efe8f01a7c9c' - ), mock.patch('optimizely.event.event_processor.ForwardingEventProcessor.process') as mock_process: + ), mock.patch('optimizely.event.event_processor.BatchEventProcessor.process') as mock_process: self.assertTrue(self.optimizely.set_forced_variation('test_experiment', 'test_user', 'control')) self.assertEqual( 'control', self.optimizely.activate('test_experiment', 'test_user', {'test_attribute': 'test_value'}), @@ -1044,7 +1044,7 @@ def test_activate__with_attributes__audience_match__bucketing_id_provided(self): ) as mock_get_variation, mock.patch('time.time', return_value=42), mock.patch( 'uuid.uuid4', return_value='a68cf1ad-0393-4e18-af87-efe8f01a7c9c' ), mock.patch( - 'optimizely.event.event_processor.ForwardingEventProcessor.process' + 'optimizely.event.event_processor.BatchEventProcessor.process' ) as mock_process: self.assertEqual( 'variation', @@ -1233,7 +1233,7 @@ def test_track__with_attributes(self): with mock.patch('time.time', return_value=42), mock.patch( 'uuid.uuid4', return_value='a68cf1ad-0393-4e18-af87-efe8f01a7c9c' - ), mock.patch('optimizely.event.event_processor.ForwardingEventProcessor.process') as mock_process: + ), mock.patch('optimizely.event.event_processor.BatchEventProcessor.process') as mock_process: self.optimizely.track('test_event', 'test_user', attributes={'test_attribute': 'test_value'}) expected_params = { @@ -1283,7 +1283,7 @@ def test_track__with_attributes__typed_audience_match(self): opt_obj = optimizely.Optimizely(json.dumps(self.config_dict_with_typed_audiences)) - with mock.patch('optimizely.event.event_processor.ForwardingEventProcessor.process') as mock_process: + with mock.patch('optimizely.event.event_processor.BatchEventProcessor.process') as mock_process: # Should be included via substring match string audience with id '3988293898' opt_obj.track('item_bought', 'test_user', {'house': 'Welcome to Slytherin!'}) @@ -1303,7 +1303,7 @@ def test_track__with_attributes__typed_audience_mismatch(self): opt_obj = optimizely.Optimizely(json.dumps(self.config_dict_with_typed_audiences)) - with mock.patch('optimizely.event.event_processor.ForwardingEventProcessor.process') as mock_process: + with mock.patch('optimizely.event.event_processor.BatchEventProcessor.process') as mock_process: opt_obj.track('item_bought', 'test_user', {'house': 'Welcome to Hufflepuff!'}) self.assertEqual(1, mock_process.call_count) @@ -1314,7 +1314,7 @@ def test_track__with_attributes__complex_audience_match(self): opt_obj = optimizely.Optimizely(json.dumps(self.config_dict_with_typed_audiences)) - with mock.patch('optimizely.event.event_processor.ForwardingEventProcessor.process') as mock_process: + with mock.patch('optimizely.event.event_processor.BatchEventProcessor.process') as mock_process: # Should be included via exact match string audience with id '3468206642', and # exact match boolean audience with id '3468206643' user_attr = {'house': 'Gryffindor', 'should_do_it': True} @@ -1345,7 +1345,7 @@ def test_track__with_attributes__complex_audience_mismatch(self): opt_obj = optimizely.Optimizely(json.dumps(self.config_dict_with_typed_audiences)) - with mock.patch('optimizely.event.event_processor.ForwardingEventProcessor.process') as mock_process: + with mock.patch('optimizely.event.event_processor.BatchEventProcessor.process') as mock_process: # Should be excluded - exact match boolean audience with id '3468206643' does not match, # so the overall conditions fail user_attr = {'house': 'Gryffindor', 'should_do_it': False} @@ -1359,7 +1359,7 @@ def test_track__with_attributes__bucketing_id_provided(self): with mock.patch('time.time', return_value=42), mock.patch( 'uuid.uuid4', return_value='a68cf1ad-0393-4e18-af87-efe8f01a7c9c' - ), mock.patch('optimizely.event.event_processor.ForwardingEventProcessor.process') as mock_process: + ), mock.patch('optimizely.event.event_processor.BatchEventProcessor.process') as mock_process: self.optimizely.track( 'test_event', 'test_user', @@ -1417,7 +1417,7 @@ def test_track__with_attributes__no_audience_match(self): """ Test that track calls process even if audience conditions do not match. """ with mock.patch('time.time', return_value=42), mock.patch( - 'optimizely.event.event_processor.ForwardingEventProcessor.process' + 'optimizely.event.event_processor.BatchEventProcessor.process' ) as mock_process: self.optimizely.track( 'test_event', 'test_user', attributes={'test_attribute': 'wrong_test_value'}, @@ -1441,7 +1441,7 @@ def test_track__with_event_tags(self): with mock.patch('time.time', return_value=42), mock.patch( 'uuid.uuid4', return_value='a68cf1ad-0393-4e18-af87-efe8f01a7c9c' - ), mock.patch('optimizely.event.event_processor.ForwardingEventProcessor.process') as mock_process: + ), mock.patch('optimizely.event.event_processor.BatchEventProcessor.process') as mock_process: self.optimizely.track( 'test_event', 'test_user', @@ -1498,7 +1498,7 @@ def test_track__with_event_tags_revenue(self): with mock.patch('time.time', return_value=42), mock.patch( 'uuid.uuid4', return_value='a68cf1ad-0393-4e18-af87-efe8f01a7c9c' - ), mock.patch('optimizely.event.event_processor.ForwardingEventProcessor.process') as mock_process: + ), mock.patch('optimizely.event.event_processor.BatchEventProcessor.process') as mock_process: self.optimizely.track( 'test_event', 'test_user', @@ -1553,7 +1553,7 @@ def test_track__with_event_tags_numeric_metric(self): """ Test that track calls process with right params when only numeric metric event tags are provided. """ - with mock.patch('optimizely.event.event_processor.ForwardingEventProcessor.process') as mock_process: + with mock.patch('optimizely.event.event_processor.BatchEventProcessor.process') as mock_process: self.optimizely.track( 'test_event', 'test_user', @@ -1584,7 +1584,7 @@ def test_track__with_event_tags__forced_bucketing(self): with mock.patch('time.time', return_value=42), mock.patch( 'uuid.uuid4', return_value='a68cf1ad-0393-4e18-af87-efe8f01a7c9c' - ), mock.patch('optimizely.event.event_processor.ForwardingEventProcessor.process') as mock_process: + ), mock.patch('optimizely.event.event_processor.BatchEventProcessor.process') as mock_process: self.assertTrue(self.optimizely.set_forced_variation('test_experiment', 'test_user', 'variation')) self.optimizely.track( 'test_event', @@ -1642,7 +1642,7 @@ def test_track__with_invalid_event_tags(self): with mock.patch('time.time', return_value=42), mock.patch( 'uuid.uuid4', return_value='a68cf1ad-0393-4e18-af87-efe8f01a7c9c' - ), mock.patch('optimizely.event.event_processor.ForwardingEventProcessor.process') as mock_process: + ), mock.patch('optimizely.event.event_processor.BatchEventProcessor.process') as mock_process: self.optimizely.track( 'test_event', 'test_user', @@ -1698,7 +1698,7 @@ def test_track__experiment_not_running(self): with mock.patch( 'optimizely.helpers.experiment.is_experiment_running', return_value=False ) as mock_is_experiment_running, mock.patch('time.time', return_value=42), mock.patch( - 'optimizely.event.event_processor.ForwardingEventProcessor.process' + 'optimizely.event.event_processor.BatchEventProcessor.process' ) as mock_process: self.optimizely.track('test_event', 'test_user') @@ -1722,7 +1722,7 @@ def test_track__whitelisted_user_overrides_audience_check(self): with mock.patch('time.time', return_value=42), mock.patch( 'uuid.uuid4', return_value='a68cf1ad-0393-4e18-af87-efe8f01a7c9c' - ), mock.patch('optimizely.event.event_processor.ForwardingEventProcessor.process') as mock_process: + ), mock.patch('optimizely.event.event_processor.BatchEventProcessor.process') as mock_process: self.optimizely.track('test_event', 'user_1') self.assertEqual(1, mock_process.call_count) @@ -1984,7 +1984,7 @@ def test_is_feature_enabled__returns_true_for_feature_experiment_if_feature_enab return_value=(decision_service.Decision(mock_experiment, mock_variation, enums.DecisionSources.FEATURE_TEST), []), ) as mock_decision, mock.patch( - 'optimizely.event.event_processor.ForwardingEventProcessor.process' + 'optimizely.event.event_processor.BatchEventProcessor.process' ) as mock_process, mock.patch( 'optimizely.notification_center.NotificationCenter.send_notifications' ) as mock_broadcast_decision, mock.patch( @@ -2084,7 +2084,7 @@ def test_is_feature_enabled__returns_false_for_feature_experiment_if_feature_dis return_value=(decision_service.Decision(mock_experiment, mock_variation, enums.DecisionSources.FEATURE_TEST), []), ) as mock_decision, mock.patch( - 'optimizely.event.event_processor.ForwardingEventProcessor.process' + 'optimizely.event.event_processor.BatchEventProcessor.process' ) as mock_process, mock.patch( 'optimizely.notification_center.NotificationCenter.send_notifications' ) as mock_broadcast_decision, mock.patch( @@ -2184,7 +2184,7 @@ def test_is_feature_enabled__returns_true_for_feature_rollout_if_feature_enabled return_value=(decision_service.Decision(mock_experiment, mock_variation, enums.DecisionSources.ROLLOUT), []), ) as mock_decision, mock.patch( - 'optimizely.event.event_processor.ForwardingEventProcessor.process' + 'optimizely.event.event_processor.BatchEventProcessor.process' ) as mock_process, mock.patch( 'optimizely.notification_center.NotificationCenter.send_notifications' ) as mock_broadcast_decision, mock.patch( @@ -2234,7 +2234,7 @@ def test_is_feature_enabled__returns_true_for_feature_rollout_if_feature_enabled return_value=(decision_service.Decision(mock_experiment, mock_variation, enums.DecisionSources.ROLLOUT), []), ) as mock_decision, mock.patch( - 'optimizely.event.event_processor.ForwardingEventProcessor.process' + 'optimizely.event.event_processor.BatchEventProcessor.process' ) as mock_process, mock.patch( 'optimizely.notification_center.NotificationCenter.send_notifications' ) as mock_broadcast_decision, mock.patch( @@ -2336,7 +2336,7 @@ def test_is_feature_enabled__returns_false_for_feature_rollout_if_feature_disabl return_value=(decision_service.Decision(mock_experiment, mock_variation, enums.DecisionSources.ROLLOUT), []), ) as mock_decision, mock.patch( - 'optimizely.event.event_processor.ForwardingEventProcessor.process' + 'optimizely.event.event_processor.BatchEventProcessor.process' ) as mock_process, mock.patch( 'optimizely.notification_center.NotificationCenter.send_notifications' ) as mock_broadcast_decision, mock.patch( @@ -2378,7 +2378,7 @@ def test_is_feature_enabled__returns_false_when_user_is_not_bucketed_into_any_va 'optimizely.decision_service.DecisionService.get_variation_for_feature', return_value=(decision_service.Decision(None, None, enums.DecisionSources.ROLLOUT), []), ) as mock_decision, mock.patch( - 'optimizely.event.event_processor.ForwardingEventProcessor.process' + 'optimizely.event.event_processor.BatchEventProcessor.process' ) as mock_process, mock.patch( 'optimizely.notification_center.NotificationCenter.send_notifications' ) as mock_broadcast_decision, mock.patch( @@ -2422,7 +2422,7 @@ def test_is_feature_enabled__returns_false_when_variation_is_nil(self, ): 'optimizely.decision_service.DecisionService.get_variation_for_feature', return_value=(decision_service.Decision(None, None, enums.DecisionSources.ROLLOUT), []), ) as mock_decision, mock.patch( - 'optimizely.event.event_processor.ForwardingEventProcessor.process' + 'optimizely.event.event_processor.BatchEventProcessor.process' ) as mock_process, mock.patch( 'optimizely.notification_center.NotificationCenter.send_notifications' ) as mock_broadcast_decision, mock.patch( @@ -3335,7 +3335,11 @@ def test_get_all_feature_variables_for_feature_in_rollout(self): def test_get_feature_variable_for_feature_in_rollout(self): """ Test that get_feature_variable returns value as expected and broadcasts decision with proper parameters. """ - opt_obj = optimizely.Optimizely(json.dumps(self.config_dict_with_features)) + opt_obj = optimizely.Optimizely( + json.dumps(self.config_dict_with_features), + # prevent event processor from injecting notification calls + event_processor_options={'start_on_init': False} + ) mock_experiment = opt_obj.config_manager.get_config().get_experiment_from_key('211127') mock_variation = opt_obj.config_manager.get_config().get_variation_from_id('211127', '211129') user_attributes = {'test_attribute': 'test_value'} From e2a77c57f086318938137db011fd453024de6f1e Mon Sep 17 00:00:00 2001 From: Andy Leap <104936100+andrewleap-optimizely@users.noreply.github.com> Date: Fri, 1 Jul 2022 09:28:29 -0400 Subject: [PATCH 08/68] refactor: type hints public interface (#387) * add type hints * add null checks/disambiguation for typing * add type checking to workflows --- .github/workflows/python.yml | 34 ++++- mypy.ini | 23 +++ optimizely/entities.py | 3 +- optimizely/event/event_processor.py | 96 +++++++----- optimizely/event_dispatcher.py | 19 ++- optimizely/helpers/types.py | 78 ++++++++++ optimizely/logger.py | 7 +- optimizely/notification_center.py | 22 +-- optimizely/optimizely.py | 206 +++++++++++++++++--------- optimizely/optimizely_config.py | 111 +++++++++----- optimizely/optimizely_user_context.py | 57 ++++--- requirements/typing.txt | 4 + tests/test_optimizely.py | 6 +- 13 files changed, 479 insertions(+), 187 deletions(-) create mode 100644 mypy.ini create mode 100644 optimizely/helpers/types.py create mode 100644 requirements/typing.txt diff --git a/.github/workflows/python.yml b/.github/workflows/python.yml index 574472de..9a801aea 100644 --- a/.github/workflows/python.yml +++ b/.github/workflows/python.yml @@ -24,11 +24,11 @@ jobs: gem install awesome_bot - name: Run tests run: find . -type f -name '*.md' -exec awesome_bot {} \; - + linting: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v3 - name: Set up Python 3.9 uses: actions/setup-python@v3 with: @@ -44,13 +44,13 @@ jobs: flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics - + integration_tests: uses: optimizely/python-sdk/.github/workflows/integration_test.yml@master secrets: CI_USER_TOKEN: ${{ secrets.CI_USER_TOKEN }} TRAVIS_COM_TOKEN: ${{ secrets.TRAVIS_COM_TOKEN }} - + fullstack_production_suite: uses: optimizely/python-sdk/.github/workflows/integration_test.yml@master with: @@ -58,13 +58,13 @@ jobs: secrets: CI_USER_TOKEN: ${{ secrets.CI_USER_TOKEN }} TRAVIS_COM_TOKEN: ${{ secrets.TRAVIS_COM_TOKEN }} - + test: runs-on: ubuntu-latest strategy: fail-fast: false matrix: - python-version: ["pypy-3.7-v7.3.5", "3.7", "3.8", "3.9", "3.10.0"] + python-version: ["pypy-3.7-v7.3.5", "3.7", "3.8", "3.9", "3.10"] steps: - uses: actions/checkout@v3 - name: Set up Python ${{ matrix.python-version }} @@ -78,3 +78,25 @@ jobs: - name: Test with pytest run: | pytest --cov=optimizely + + type-check: + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + python-version: ["3.7", "3.8", "3.9", "3.10"] + steps: + - uses: actions/checkout@v3 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v3 + with: + python-version: ${{ matrix.python-version }} + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install -r requirements/typing.txt + - name: Type check with mypy + run: | + mypy . + # disabled until entire sdk is type hinted + # mypy . --exclude "tests/" --strict diff --git a/mypy.ini b/mypy.ini new file mode 100644 index 00000000..51b2f56c --- /dev/null +++ b/mypy.ini @@ -0,0 +1,23 @@ +[mypy] +# regex to exclude: +# - docs folder +# - setup.py +# https://mypy.readthedocs.io/en/stable/config_file.html#confval-exclude +exclude = (?x)( + ^docs/ + | ^setup\.py$ + ) +show_error_codes = True +pretty = True + +# suppress error on conditional import of typing_extensions module +[mypy-optimizely.entities] +no_warn_unused_ignores = True + +# suppress error on conditional import of typing_extensions module +[mypy-event_dispatcher] +no_warn_unused_ignores = True + +# suppress error on conditional import of typing_extensions module +[mypy-optimizely.condition] +no_warn_unused_ignores = True diff --git a/optimizely/entities.py b/optimizely/entities.py index 483610e9..a5987e1b 100644 --- a/optimizely/entities.py +++ b/optimizely/entities.py @@ -1,4 +1,4 @@ -# Copyright 2016-2021, Optimizely +# Copyright 2016-2022, Optimizely # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -11,7 +11,6 @@ # See the License for the specific language governing permissions and # limitations under the License. - class BaseEntity: def __eq__(self, other): return self.__dict__ == other.__dict__ diff --git a/optimizely/event/event_processor.py b/optimizely/event/event_processor.py index eb71287d..be0aca55 100644 --- a/optimizely/event/event_processor.py +++ b/optimizely/event/event_processor.py @@ -1,4 +1,4 @@ -# Copyright 2019-2021 Optimizely +# Copyright 2019-2022, Optimizely # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -11,17 +11,19 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations from abc import ABC, abstractmethod import numbers import threading import time +from typing import Optional from datetime import timedelta import queue from optimizely import logger as _logging from optimizely import notification_center as _notification_center -from optimizely.event_dispatcher import EventDispatcher as default_event_dispatcher +from optimizely.event_dispatcher import EventDispatcher, CustomEventDispatcher from optimizely.helpers import enums from optimizely.helpers import validator from .event_factory import EventFactory @@ -32,7 +34,7 @@ class BaseEventProcessor(ABC): """ Class encapsulating event processing. Override with your own implementation. """ @abstractmethod - def process(self, user_event): + def process(self, user_event: UserEvent) -> None: """ Method to provide intermediary processing stage within event production. Args: user_event: UserEvent instance that needs to be processed and dispatched. @@ -49,24 +51,28 @@ class BatchEventProcessor(BaseEventProcessor): maximum duration before the resulting LogEvent is sent to the EventDispatcher. """ + class Signal: + '''Used to create unique objects for sending signals to event queue.''' + pass + _DEFAULT_QUEUE_CAPACITY = 1000 _DEFAULT_BATCH_SIZE = 10 _DEFAULT_FLUSH_INTERVAL = 30 _DEFAULT_TIMEOUT_INTERVAL = 5 - _SHUTDOWN_SIGNAL = object() - _FLUSH_SIGNAL = object() + _SHUTDOWN_SIGNAL = Signal() + _FLUSH_SIGNAL = Signal() LOCK = threading.Lock() def __init__( self, - event_dispatcher, - logger=None, - start_on_init=False, - event_queue=None, - batch_size=None, - flush_interval=None, - timeout_interval=None, - notification_center=None, + event_dispatcher: Optional[type[EventDispatcher] | CustomEventDispatcher] = None, + logger: Optional[_logging.Logger] = None, + start_on_init: bool = False, + event_queue: Optional[queue.Queue[UserEvent | Signal]] = None, + batch_size: Optional[int] = None, + flush_interval: Optional[float] = None, + timeout_interval: Optional[float] = None, + notification_center: Optional[_notification_center.NotificationCenter] = None, ): """ BatchEventProcessor init method to configure event batching. @@ -84,43 +90,48 @@ def __init__( thread. notification_center: Optional instance of notification_center.NotificationCenter. """ - self.event_dispatcher = event_dispatcher or default_event_dispatcher + self.event_dispatcher = event_dispatcher or EventDispatcher self.logger = _logging.adapt_logger(logger or _logging.NoOpLogger()) self.event_queue = event_queue or queue.Queue(maxsize=self._DEFAULT_QUEUE_CAPACITY) - self.batch_size = ( - batch_size + self.batch_size: int = ( + batch_size # type: ignore if self._validate_instantiation_props(batch_size, 'batch_size', self._DEFAULT_BATCH_SIZE) else self._DEFAULT_BATCH_SIZE ) - self.flush_interval = ( - timedelta(seconds=flush_interval) + self.flush_interval: timedelta = ( + timedelta(seconds=flush_interval) # type: ignore if self._validate_instantiation_props(flush_interval, 'flush_interval', self._DEFAULT_FLUSH_INTERVAL) else timedelta(seconds=self._DEFAULT_FLUSH_INTERVAL) ) - self.timeout_interval = ( - timedelta(seconds=timeout_interval) + self.timeout_interval: timedelta = ( + timedelta(seconds=timeout_interval) # type: ignore if self._validate_instantiation_props(timeout_interval, 'timeout_interval', self._DEFAULT_TIMEOUT_INTERVAL) else timedelta(seconds=self._DEFAULT_TIMEOUT_INTERVAL) ) self.notification_center = notification_center or _notification_center.NotificationCenter(self.logger) - self._current_batch = list() + self._current_batch: list[UserEvent] = [] if not validator.is_notification_center_valid(self.notification_center): self.logger.error(enums.Errors.INVALID_INPUT.format('notification_center')) self.logger.debug('Creating notification center for use.') self.notification_center = _notification_center.NotificationCenter(self.logger) - self.executor = None + self.executor: Optional[threading.Thread] = None if start_on_init is True: self.start() @property - def is_running(self): + def is_running(self) -> bool: """ Property to check if consumer thread is alive or not. """ return self.executor.is_alive() if self.executor else False - def _validate_instantiation_props(self, prop, prop_name, default_value): + def _validate_instantiation_props( + self, + prop: Optional[numbers.Integral | int | float], + prop_name: str, + default_value: numbers.Integral | int | float + ) -> bool: """ Method to determine if instantiation properties like batch_size, flush_interval and timeout_interval are valid. @@ -147,7 +158,7 @@ def _validate_instantiation_props(self, prop, prop_name, default_value): return is_valid - def _get_time(self, _time=None): + def _get_time(self, _time: Optional[float] = None) -> float: """ Method to return time as float in seconds. If _time is None, uses current time. Args: @@ -161,7 +172,7 @@ def _get_time(self, _time=None): return _time - def start(self): + def start(self) -> None: """ Starts the batch processing thread to batch events. """ if hasattr(self, 'executor') and self.is_running: self.logger.warning('BatchEventProcessor already started.') @@ -172,7 +183,7 @@ def start(self): self.executor.daemon = True self.executor.start() - def _run(self): + def _run(self) -> None: """ Triggered as part of the thread which batches events or flushes event_queue and hangs on get for flush interval if queue is empty. """ @@ -215,12 +226,12 @@ def _run(self): self.logger.info('Exiting processing loop. Attempting to flush pending events.') self._flush_batch() - def flush(self): + def flush(self) -> None: """ Adds flush signal to event_queue. """ self.event_queue.put(self._FLUSH_SIGNAL) - def _flush_batch(self): + def _flush_batch(self) -> None: """ Flushes current batch by dispatching event. """ batch_len = len(self._current_batch) if batch_len == 0: @@ -237,12 +248,16 @@ def _flush_batch(self): self.notification_center.send_notifications(enums.NotificationTypes.LOG_EVENT, log_event) + if log_event is None: + self.logger.exception('Error dispatching event: Cannot dispatch None event.') + return + try: self.event_dispatcher.dispatch_event(log_event) except Exception as e: self.logger.error(f'Error dispatching event: {log_event} {e}') - def process(self, user_event): + def process(self, user_event: UserEvent) -> None: """ Method to process the user_event by putting it in event_queue. Args: @@ -263,7 +278,7 @@ def process(self, user_event): f'Payload not accepted by the queue. Current size: {self.event_queue.qsize()}' ) - def _add_to_batch(self, user_event): + def _add_to_batch(self, user_event: UserEvent) -> None: """ Method to append received user event to current batch. Args: @@ -283,7 +298,7 @@ def _add_to_batch(self, user_event): self.logger.debug('Flushing on batch size.') self._flush_batch() - def _should_split(self, user_event): + def _should_split(self, user_event: UserEvent) -> bool: """ Method to check if current event batch should split into two. Args: @@ -308,7 +323,7 @@ def _should_split(self, user_event): return False - def stop(self): + def stop(self) -> None: """ Stops and disposes batch event processor. """ self.event_queue.put(self._SHUTDOWN_SIGNAL) self.logger.warning('Stopping Scheduler.') @@ -327,7 +342,12 @@ class ForwardingEventProcessor(BaseEventProcessor): The ForwardingEventProcessor sends the LogEvent to EventDispatcher as soon as it is received. """ - def __init__(self, event_dispatcher, logger=None, notification_center=None): + def __init__( + self, + event_dispatcher: type[EventDispatcher] | CustomEventDispatcher, + logger: Optional[_logging.Logger] = None, + notification_center: Optional[_notification_center.NotificationCenter] = None + ): """ ForwardingEventProcessor init method to configure event dispatching. Args: @@ -335,7 +355,7 @@ def __init__(self, event_dispatcher, logger=None, notification_center=None): logger: Optional component which provides a log method to log messages. By default nothing would be logged. notification_center: Optional instance of notification_center.NotificationCenter. """ - self.event_dispatcher = event_dispatcher or default_event_dispatcher + self.event_dispatcher = event_dispatcher or EventDispatcher self.logger = _logging.adapt_logger(logger or _logging.NoOpLogger()) self.notification_center = notification_center or _notification_center.NotificationCenter(self.logger) @@ -343,7 +363,7 @@ def __init__(self, event_dispatcher, logger=None, notification_center=None): self.logger.error(enums.Errors.INVALID_INPUT.format('notification_center')) self.notification_center = _notification_center.NotificationCenter() - def process(self, user_event): + def process(self, user_event: UserEvent) -> None: """ Method to process the user_event by dispatching it. Args: @@ -361,6 +381,10 @@ def process(self, user_event): self.notification_center.send_notifications(enums.NotificationTypes.LOG_EVENT, log_event) + if log_event is None: + self.logger.exception('Error dispatching event: Cannot dispatch None event.') + return + try: self.event_dispatcher.dispatch_event(log_event) except Exception as e: diff --git a/optimizely/event_dispatcher.py b/optimizely/event_dispatcher.py index 1f922012..ed65d944 100644 --- a/optimizely/event_dispatcher.py +++ b/optimizely/event_dispatcher.py @@ -1,4 +1,4 @@ -# Copyright 2016, Optimizely +# Copyright 2016, 2022, Optimizely # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -16,21 +16,34 @@ import requests from requests import exceptions as request_exception +from sys import version_info from .helpers import enums +from . import event_builder + +if version_info >= (3, 8): + from typing import Protocol +else: + from typing_extensions import Protocol # type: ignore[misc] + REQUEST_TIMEOUT = 10 +class CustomEventDispatcher(Protocol): + """Interface for a custom event dispatcher and required method `dispatch_event`. """ + def dispatch_event(self, event: event_builder.Event) -> None: + ... + + class EventDispatcher: @staticmethod - def dispatch_event(event): + def dispatch_event(event: event_builder.Event) -> None: """ Dispatch the event being represented by the Event object. Args: event: Object holding information about the request to be dispatched to the Optimizely backend. """ - try: if event.http_verb == enums.HTTPVerbs.GET: requests.get(event.url, params=event.params, timeout=REQUEST_TIMEOUT).raise_for_status() diff --git a/optimizely/helpers/types.py b/optimizely/helpers/types.py new file mode 100644 index 00000000..10252e32 --- /dev/null +++ b/optimizely/helpers/types.py @@ -0,0 +1,78 @@ +# Copyright 2022, Optimizely +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import annotations + +from typing import Optional +from sys import version_info + + +if version_info >= (3, 8): + from typing import TypedDict # type: ignore[attr-defined] +else: + from typing_extensions import TypedDict + + +# Intermediate types for type checking deserialized datafile json before actual class instantiation. +# These aren't used for anything other than type signatures + +class BaseDict(TypedDict): + '''Base type for parsed datafile json, before instantiation of class objects.''' + id: str + key: str + + +class EventDict(BaseDict): + '''Event dict from parsed datafile json.''' + experimentIds: list[str] + + +class AttributeDict(BaseDict): + '''Attribute dict from parsed datafile json.''' + pass + + +class TrafficAllocation(TypedDict): + '''Traffic Allocation dict from parsed datafile json.''' + endOfRange: int + entityId: str + + +class VariableDict(BaseDict): + '''Variable dict from parsed datafile json.''' + value: str + type: str + defaultValue: str + subType: str + + +class VariationDict(BaseDict): + '''Variation dict from parsed datafile json.''' + variables: list[VariableDict] + featureEnabled: Optional[bool] + + +class ExperimentDict(BaseDict): + '''Experiment dict from parsed datafile json.''' + status: str + forcedVariations: dict[str, str] + variations: list[VariationDict] + layerId: str + audienceIds: list[str] + audienceConditions: list[str | list[str]] + trafficAllocation: list[TrafficAllocation] + + +class RolloutDict(TypedDict): + '''Rollout dict from parsed datafile json.''' + id: str + experiments: list[ExperimentDict] diff --git a/optimizely/logger.py b/optimizely/logger.py index 2220266d..009cb44c 100644 --- a/optimizely/logger.py +++ b/optimizely/logger.py @@ -1,4 +1,4 @@ -# Copyright 2016, 2018-2019, Optimizely +# Copyright 2016, 2018-2019, 2022, Optimizely # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -12,6 +12,7 @@ # limitations under the License. import logging import warnings +from typing import Union from .helpers import enums @@ -60,6 +61,10 @@ def log(*args): pass # pragma: no cover +# type alias for optimizely logger +Logger = Union[logging.Logger, BaseLogger] + + class NoOpLogger(BaseLogger): """ Class providing log method which logs nothing. """ diff --git a/optimizely/notification_center.py b/optimizely/notification_center.py index 179e39f9..e0f26349 100644 --- a/optimizely/notification_center.py +++ b/optimizely/notification_center.py @@ -1,4 +1,4 @@ -# Copyright 2017-2019, Optimizely +# Copyright 2017-2019, 2022, Optimizely # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -11,6 +11,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations +from typing import Any, Callable, Optional from .helpers import enums from . import logger as optimizely_logger @@ -24,14 +26,14 @@ class NotificationCenter: """ Class encapsulating methods to manage notifications and their listeners. The enums.NotificationTypes includes predefined notifications.""" - def __init__(self, logger=None): + def __init__(self, logger: Optional[optimizely_logger.Logger] = None): self.listener_id = 1 - self.notification_listeners = {} + self.notification_listeners: dict[str, list[tuple[int, Callable[..., None]]]] = {} for notification_type in NOTIFICATION_TYPES: self.notification_listeners[notification_type] = [] self.logger = optimizely_logger.adapt_logger(logger or optimizely_logger.NoOpLogger()) - def add_notification_listener(self, notification_type, notification_callback): + def add_notification_listener(self, notification_type: str, notification_callback: Callable[..., None]) -> int: """ Add a notification callback to the notification center for a given notification type. Args: @@ -59,7 +61,7 @@ def add_notification_listener(self, notification_type, notification_callback): return current_listener_id - def remove_notification_listener(self, notification_id): + def remove_notification_listener(self, notification_id: int) -> bool: """ Remove a previously added notification callback. Args: @@ -77,7 +79,7 @@ def remove_notification_listener(self, notification_id): return False - def clear_notification_listeners(self, notification_type): + def clear_notification_listeners(self, notification_type: str) -> None: """ Remove notification listeners for a certain notification type. Args: @@ -90,7 +92,7 @@ def clear_notification_listeners(self, notification_type): ) self.notification_listeners[notification_type] = [] - def clear_notifications(self, notification_type): + def clear_notifications(self, notification_type: str) -> None: """ (DEPRECATED since 3.2.0, use clear_notification_listeners) Remove notification listeners for a certain notification type. @@ -99,17 +101,17 @@ def clear_notifications(self, notification_type): """ self.clear_notification_listeners(notification_type) - def clear_all_notification_listeners(self): + def clear_all_notification_listeners(self) -> None: """ Remove all notification listeners. """ for notification_type in self.notification_listeners.keys(): self.clear_notification_listeners(notification_type) - def clear_all_notifications(self): + def clear_all_notifications(self) -> None: """ (DEPRECATED since 3.2.0, use clear_all_notification_listeners) Remove all notification listeners. """ self.clear_all_notification_listeners() - def send_notifications(self, notification_type, *args): + def send_notifications(self, notification_type: str, *args: Any) -> None: """ Fires off the notification for the specific event. Uses var args to pass in a arbitrary list of parameter according to which notification type was fired. diff --git a/optimizely/optimizely.py b/optimizely/optimizely.py index 336cd151..e33b14de 100644 --- a/optimizely/optimizely.py +++ b/optimizely/optimizely.py @@ -11,11 +11,15 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations + +from . import project_config from . import decision_service from . import entities from . import event_builder from . import exceptions from . import logger as _logging +from .config_manager import BaseConfigManager from .config_manager import AuthDatafilePollingConfigManager from .config_manager import PollingConfigManager from .config_manager import StaticConfigManager @@ -23,15 +27,18 @@ from .decision.optimizely_decision import OptimizelyDecision from .decision.optimizely_decision_message import OptimizelyDecisionMessage from .decision_service import Decision -from .error_handler import NoOpErrorHandler as noop_error_handler +from .error_handler import NoOpErrorHandler, BaseErrorHandler from .event import event_factory, user_event_factory -from .event.event_processor import BatchEventProcessor -from .event_dispatcher import EventDispatcher as default_event_dispatcher +from .event.event_processor import BatchEventProcessor, BaseEventProcessor +from .event_dispatcher import EventDispatcher, CustomEventDispatcher + from .helpers import enums, validator from .helpers.enums import DecisionSources from .notification_center import NotificationCenter -from .optimizely_config import OptimizelyConfigService +from .optimizely_config import OptimizelyConfig, OptimizelyConfigService from .optimizely_user_context import OptimizelyUserContext +from .user_profile import UserProfileService +from typing import Any, Optional, Sequence class Optimizely: @@ -39,20 +46,20 @@ class Optimizely: def __init__( self, - datafile=None, - event_dispatcher=None, - logger=None, - error_handler=None, - skip_json_validation=False, - user_profile_service=None, - sdk_key=None, - config_manager=None, - notification_center=None, - event_processor=None, - datafile_access_token=None, - default_decide_options=None, - event_processor_options=None - ): + datafile: Optional[str] = None, + event_dispatcher: Optional[CustomEventDispatcher] = None, + logger: Optional[_logging.Logger] = None, + error_handler: Optional[BaseErrorHandler] = None, + skip_json_validation: Optional[bool] = False, + user_profile_service: Optional[UserProfileService] = None, + sdk_key: Optional[str] = None, + config_manager: Optional[BaseConfigManager] = None, + notification_center: Optional[NotificationCenter] = None, + event_processor: Optional[BaseEventProcessor] = None, + datafile_access_token: Optional[str] = None, + default_decide_options: Optional[list[str]] = None, + event_processor_options: Optional[dict[str, Any]] = None + ) -> None: """ Optimizely init method for managing Custom projects. Args: @@ -83,10 +90,10 @@ def __init__( """ self.logger_name = '.'.join([__name__, self.__class__.__name__]) self.is_valid = True - self.event_dispatcher = event_dispatcher or default_event_dispatcher + self.event_dispatcher = event_dispatcher or EventDispatcher self.logger = _logging.adapt_logger(logger or _logging.NoOpLogger()) - self.error_handler = error_handler or noop_error_handler - self.config_manager = config_manager + self.error_handler = error_handler or NoOpErrorHandler + self.config_manager: BaseConfigManager = config_manager # type: ignore self.notification_center = notification_center or NotificationCenter(self.logger) event_processor_defaults = { 'batch_size': 1, @@ -96,12 +103,14 @@ def __init__( } if event_processor_options: event_processor_defaults.update(event_processor_options) + self.event_processor = event_processor or BatchEventProcessor( self.event_dispatcher, logger=self.logger, notification_center=self.notification_center, - **event_processor_defaults + **event_processor_defaults # type: ignore[arg-type] ) + self.default_decide_options: list[str] if default_decide_options is None: self.default_decide_options = [] @@ -146,7 +155,7 @@ def __init__( self.event_builder = event_builder.EventBuilder() self.decision_service = decision_service.DecisionService(self.logger, user_profile_service) - def _validate_instantiation_options(self): + def _validate_instantiation_options(self) -> None: """ Helper method to validate all instantiation parameters. Raises: @@ -170,7 +179,9 @@ def _validate_instantiation_options(self): if not validator.is_event_processor_valid(self.event_processor): raise exceptions.InvalidInputException(enums.Errors.INVALID_INPUT.format('event_processor')) - def _validate_user_inputs(self, attributes=None, event_tags=None): + def _validate_user_inputs( + self, attributes: Optional[dict[str, Any]] = None, event_tags: Optional[dict[str, Any]] = None + ) -> bool: """ Helper method to validate user inputs. Args: @@ -194,8 +205,11 @@ def _validate_user_inputs(self, attributes=None, event_tags=None): return True - def _send_impression_event(self, project_config, experiment, variation, flag_key, rule_key, rule_type, enabled, - user_id, attributes): + def _send_impression_event( + self, project_config: project_config.ProjectConfig, experiment: Optional[entities.Experiment], + variation: Optional[entities.Variation], flag_key: str, rule_key: str, rule_type: str, + enabled: bool, user_id: str, attributes: Optional[dict[str, Any]] + ) -> None: """ Helper method to send impression event. Args: @@ -217,6 +231,10 @@ def _send_impression_event(self, project_config, experiment, variation, flag_key project_config, experiment, variation_id, flag_key, rule_key, rule_type, enabled, user_id, attributes ) + if user_event is None: + self.logger.error('Cannot process None event.') + return + self.event_processor.process(user_event) # Kept for backward compatibility. @@ -229,8 +247,9 @@ def _send_impression_event(self, project_config, experiment, variation, flag_key ) def _get_feature_variable_for_type( - self, project_config, feature_key, variable_key, variable_type, user_id, attributes - ): + self, project_config: project_config.ProjectConfig, feature_key: str, variable_key: str, + variable_type: Optional[str], user_id: str, attributes: Optional[dict[str, Any]] + ) -> Any: """ Helper method to determine value for a certain variable attached to a feature flag based on type of variable. Args: @@ -284,6 +303,9 @@ def _get_feature_variable_for_type( variable_value = variable.defaultValue user_context = self.create_user_context(user_id, attributes) + # error is logged in create_user_context + if user_context is None: + return None decision, _ = self.decision_service.get_variation_for_feature(project_config, feature_flag, user_context) if decision.variation: @@ -308,8 +330,8 @@ def _get_feature_variable_for_type( if decision.source == enums.DecisionSources.FEATURE_TEST: source_info = { - 'experiment_key': decision.experiment.key, - 'variation_key': decision.variation.key, + 'experiment_key': decision.experiment.key if decision.experiment else None, + 'variation_key': decision.variation.key if decision.variation else None, } try: @@ -336,8 +358,9 @@ def _get_feature_variable_for_type( return actual_value def _get_all_feature_variables_for_type( - self, project_config, feature_key, user_id, attributes, - ): + self, project_config: project_config.ProjectConfig, feature_key: str, + user_id: str, attributes: Optional[dict[str, Any]], + ) -> Optional[dict[str, Any]]: """ Helper method to determine value for all variables attached to a feature flag. Args: @@ -369,6 +392,9 @@ def _get_all_feature_variables_for_type( source_info = {} user_context = self.create_user_context(user_id, attributes) + # error is logged in create_user_context + if user_context is None: + return None decision, _ = self.decision_service.get_variation_for_feature(project_config, feature_flag, user_context) if decision.variation: @@ -389,8 +415,7 @@ def _get_all_feature_variables_for_type( ) all_variables = {} - for variable_key in feature_flag.variables: - variable = project_config.get_variable_for_feature(feature_key, variable_key) + for variable_key, variable in feature_flag.variables.items(): variable_value = variable.defaultValue if feature_enabled: variable_value = project_config.get_variable_value_for_variation(variable, decision.variation) @@ -409,8 +434,8 @@ def _get_all_feature_variables_for_type( if decision.source == enums.DecisionSources.FEATURE_TEST: source_info = { - 'experiment_key': decision.experiment.key, - 'variation_key': decision.variation.key, + 'experiment_key': decision.experiment.key if decision.experiment else None, + 'variation_key': decision.variation.key if decision.variation else None, } self.notification_center.send_notifications( @@ -428,7 +453,7 @@ def _get_all_feature_variables_for_type( ) return all_variables - def activate(self, experiment_key, user_id, attributes=None): + def activate(self, experiment_key: str, user_id: str, attributes: Optional[dict[str, Any]] = None) -> Optional[str]: """ Buckets visitor and sends impression event to Optimizely. Args: @@ -466,6 +491,9 @@ def activate(self, experiment_key, user_id, attributes=None): experiment = project_config.get_experiment_from_key(experiment_key) variation = project_config.get_variation_from_key(experiment_key, variation_key) + if not variation or not experiment: + self.logger.info(f'Not activating user "{user_id}".') + return None # Create and dispatch impression event self.logger.info(f'Activating user "{user_id}" in experiment "{experiment.key}".') @@ -474,7 +502,11 @@ def activate(self, experiment_key, user_id, attributes=None): return variation.key - def track(self, event_key, user_id, attributes=None, event_tags=None): + def track( + self, event_key: str, user_id: str, + attributes: Optional[dict[str, Any]] = None, + event_tags: Optional[dict[str, Any]] = None + ) -> None: """ Send conversion event to Optimizely. Args: @@ -513,6 +545,10 @@ def track(self, event_key, user_id, attributes=None, event_tags=None): project_config, event_key, user_id, attributes, event_tags ) + if user_event is None: + self.logger.error('Cannot process None event.') + return + self.event_processor.process(user_event) self.logger.info(f'Tracking event "{event_key}" for user "{user_id}".') @@ -522,7 +558,9 @@ def track(self, event_key, user_id, attributes=None, event_tags=None): enums.NotificationTypes.TRACK, event_key, user_id, attributes, event_tags, log_event.__dict__, ) - def get_variation(self, experiment_key, user_id, attributes=None): + def get_variation( + self, experiment_key: str, user_id: str, attributes: Optional[dict[str, Any]] = None + ) -> Optional[str]: """ Gets variation where user will be bucketed. Args: @@ -563,6 +601,9 @@ def get_variation(self, experiment_key, user_id, attributes=None): return None user_context = self.create_user_context(user_id, attributes) + # error is logged in create_user_context + if not user_context: + return None variation, _ = self.decision_service.get_variation(project_config, experiment, user_context) if variation: @@ -583,7 +624,7 @@ def get_variation(self, experiment_key, user_id, attributes=None): return variation_key - def is_feature_enabled(self, feature_key, user_id, attributes=None): + def is_feature_enabled(self, feature_key: str, user_id: str, attributes: Optional[dict[str, Any]] = None) -> bool: """ Returns true if the feature is enabled for the given user. Args: @@ -622,6 +663,10 @@ def is_feature_enabled(self, feature_key, user_id, attributes=None): feature_enabled = False source_info = {} user_context = self.create_user_context(user_id, attributes) + # error is logged in create_user_context + if not user_context: + return False + decision, _ = self.decision_service.get_variation_for_feature(project_config, feature, user_context) is_source_experiment = decision.source == enums.DecisionSources.FEATURE_TEST is_source_rollout = decision.source == enums.DecisionSources.ROLLOUT @@ -637,7 +682,7 @@ def is_feature_enabled(self, feature_key, user_id, attributes=None): ) # Send event if Decision came from an experiment. - if is_source_experiment and decision.variation: + if is_source_experiment and decision.variation and decision.experiment: source_info = { 'experiment_key': decision.experiment.key, 'variation_key': decision.variation.key, @@ -667,7 +712,7 @@ def is_feature_enabled(self, feature_key, user_id, attributes=None): return feature_enabled - def get_enabled_features(self, user_id, attributes=None): + def get_enabled_features(self, user_id: str, attributes: Optional[dict[str, Any]] = None) -> list[str]: """ Returns the list of features that are enabled for the user. Args: @@ -678,7 +723,7 @@ def get_enabled_features(self, user_id, attributes=None): A list of the keys of the features that are enabled for the user. """ - enabled_features = [] + enabled_features: list[str] = [] if not self.is_valid: self.logger.error(enums.Errors.INVALID_OPTIMIZELY.format('get_enabled_features')) return enabled_features @@ -701,7 +746,9 @@ def get_enabled_features(self, user_id, attributes=None): return enabled_features - def get_feature_variable(self, feature_key, variable_key, user_id, attributes=None): + def get_feature_variable( + self, feature_key: str, variable_key: str, user_id: str, attributes: Optional[dict[str, Any]] = None + ) -> Any: """ Returns value for a variable attached to a feature flag. Args: @@ -722,7 +769,9 @@ def get_feature_variable(self, feature_key, variable_key, user_id, attributes=No return self._get_feature_variable_for_type(project_config, feature_key, variable_key, None, user_id, attributes) - def get_feature_variable_boolean(self, feature_key, variable_key, user_id, attributes=None): + def get_feature_variable_boolean( + self, feature_key: str, variable_key: str, user_id: str, attributes: Optional[dict[str, Any]] = None + ) -> Optional[bool]: """ Returns value for a certain boolean variable attached to a feature flag. Args: @@ -744,11 +793,13 @@ def get_feature_variable_boolean(self, feature_key, variable_key, user_id, attri self.logger.error(enums.Errors.INVALID_PROJECT_CONFIG.format('get_feature_variable_boolean')) return None - return self._get_feature_variable_for_type( + return self._get_feature_variable_for_type( # type: ignore[no-any-return] project_config, feature_key, variable_key, variable_type, user_id, attributes, ) - def get_feature_variable_double(self, feature_key, variable_key, user_id, attributes=None): + def get_feature_variable_double( + self, feature_key: str, variable_key: str, user_id: str, attributes: Optional[dict[str, Any]] = None + ) -> Optional[float]: """ Returns value for a certain double variable attached to a feature flag. Args: @@ -770,11 +821,13 @@ def get_feature_variable_double(self, feature_key, variable_key, user_id, attrib self.logger.error(enums.Errors.INVALID_PROJECT_CONFIG.format('get_feature_variable_double')) return None - return self._get_feature_variable_for_type( + return self._get_feature_variable_for_type( # type: ignore[no-any-return] project_config, feature_key, variable_key, variable_type, user_id, attributes, ) - def get_feature_variable_integer(self, feature_key, variable_key, user_id, attributes=None): + def get_feature_variable_integer( + self, feature_key: str, variable_key: str, user_id: str, attributes: Optional[dict[str, Any]] = None + ) -> Optional[int]: """ Returns value for a certain integer variable attached to a feature flag. Args: @@ -796,11 +849,13 @@ def get_feature_variable_integer(self, feature_key, variable_key, user_id, attri self.logger.error(enums.Errors.INVALID_PROJECT_CONFIG.format('get_feature_variable_integer')) return None - return self._get_feature_variable_for_type( + return self._get_feature_variable_for_type( # type: ignore[no-any-return] project_config, feature_key, variable_key, variable_type, user_id, attributes, ) - def get_feature_variable_string(self, feature_key, variable_key, user_id, attributes=None): + def get_feature_variable_string( + self, feature_key: str, variable_key: str, user_id: str, attributes: Optional[dict[str, Any]] = None + ) -> Optional[str]: """ Returns value for a certain string variable attached to a feature. Args: @@ -822,11 +877,13 @@ def get_feature_variable_string(self, feature_key, variable_key, user_id, attrib self.logger.error(enums.Errors.INVALID_PROJECT_CONFIG.format('get_feature_variable_string')) return None - return self._get_feature_variable_for_type( + return self._get_feature_variable_for_type( # type: ignore[no-any-return] project_config, feature_key, variable_key, variable_type, user_id, attributes, ) - def get_feature_variable_json(self, feature_key, variable_key, user_id, attributes=None): + def get_feature_variable_json( + self, feature_key: str, variable_key: str, user_id: str, attributes: Optional[dict[str, Any]] = None + ) -> Optional[dict[str, Any]]: """ Returns value for a certain JSON variable attached to a feature. Args: @@ -848,11 +905,13 @@ def get_feature_variable_json(self, feature_key, variable_key, user_id, attribut self.logger.error(enums.Errors.INVALID_PROJECT_CONFIG.format('get_feature_variable_json')) return None - return self._get_feature_variable_for_type( + return self._get_feature_variable_for_type( # type: ignore[no-any-return] project_config, feature_key, variable_key, variable_type, user_id, attributes, ) - def get_all_feature_variables(self, feature_key, user_id, attributes=None): + def get_all_feature_variables( + self, feature_key: str, user_id: str, attributes: Optional[dict[str, Any]] = None + ) -> Optional[dict[str, Any]]: """ Returns dictionary of all variables and their corresponding values in the context of a feature. Args: @@ -874,7 +933,7 @@ def get_all_feature_variables(self, feature_key, user_id, attributes=None): project_config, feature_key, user_id, attributes, ) - def set_forced_variation(self, experiment_key, user_id, variation_key): + def set_forced_variation(self, experiment_key: str, user_id: str, variation_key: Optional[str]) -> bool: """ Force a user into a variation for a given experiment. Args: @@ -906,7 +965,7 @@ def set_forced_variation(self, experiment_key, user_id, variation_key): return self.decision_service.set_forced_variation(project_config, experiment_key, user_id, variation_key) - def get_forced_variation(self, experiment_key, user_id): + def get_forced_variation(self, experiment_key: str, user_id: str) -> Optional[str]: """ Gets the forced variation for a given user and experiment. Args: @@ -937,7 +996,7 @@ def get_forced_variation(self, experiment_key, user_id): forced_variation, _ = self.decision_service.get_forced_variation(project_config, experiment_key, user_id) return forced_variation.key if forced_variation else None - def get_optimizely_config(self): + def get_optimizely_config(self) -> Optional[OptimizelyConfig]: """ Gets OptimizelyConfig instance for the current project config. Returns: @@ -955,11 +1014,13 @@ def get_optimizely_config(self): # Customized Config Manager may not have optimizely_config defined. if hasattr(self.config_manager, 'optimizely_config'): - return self.config_manager.optimizely_config + return self.config_manager.optimizely_config # type: ignore return OptimizelyConfigService(project_config).get_config() - def create_user_context(self, user_id, attributes=None): + def create_user_context( + self, user_id: str, attributes: Optional[dict[str, Any]] = None + ) -> Optional[OptimizelyUserContext]: """ We do not check for is_valid here as a user context can be created successfully even when the SDK is not fully configured. @@ -981,7 +1042,10 @@ def create_user_context(self, user_id, attributes=None): return OptimizelyUserContext(self, self.logger, user_id, attributes) - def _decide(self, user_context, key, decide_options=None): + def _decide( + self, user_context: Optional[OptimizelyUserContext], key: str, + decide_options: Optional[Sequence[OptimizelyDecideOption | str]] = None + ) -> OptimizelyDecision: """ decide calls optimizely decide with feature key provided Args: @@ -1042,7 +1106,7 @@ def _decide(self, user_context, key, decide_options=None): all_variables = {} experiment = None decision_source = DecisionSources.ROLLOUT - source_info = {} + source_info: dict[str, Any] = {} decision_event_dispatched = False # Check forced decisions first @@ -1087,8 +1151,7 @@ def _decide(self, user_context, key, decide_options=None): # Generate all variables map if decide options doesn't include excludeVariables if OptimizelyDecideOption.EXCLUDE_VARIABLES not in decide_options: - for variable_key in feature_flag.variables: - variable = config.get_variable_for_feature(flag_key, variable_key) + for variable_key, variable in feature_flag.variables.items(): variable_value = variable.defaultValue if feature_enabled: variable_value = config.get_variable_value_for_variation(variable, decision.variation) @@ -1130,7 +1193,11 @@ def _decide(self, user_context, key, decide_options=None): user_context=user_context, reasons=reasons if should_include_reasons else [] ) - def _decide_all(self, user_context, decide_options=None): + def _decide_all( + self, + user_context: Optional[OptimizelyUserContext], + decide_options: Optional[list[str]] = None + ) -> dict[str, OptimizelyDecision]: """ decide_all will return a decision for every feature key in the current config Args: @@ -1159,7 +1226,12 @@ def _decide_all(self, user_context, decide_options=None): keys.append(f['key']) return self._decide_for_keys(user_context, keys, decide_options) - def _decide_for_keys(self, user_context, keys, decide_options=None): + def _decide_for_keys( + self, + user_context: Optional[OptimizelyUserContext], + keys: list[str], + decide_options: Optional[list[str]] = None + ) -> dict[str, OptimizelyDecision]: """ Args: user_context: UserContent @@ -1179,7 +1251,7 @@ def _decide_for_keys(self, user_context, keys, decide_options=None): return {} # merge decide_options and default_decide_options - merged_decide_options = [] + merged_decide_options: list[str] = [] if isinstance(decide_options, list): merged_decide_options = decide_options[:] merged_decide_options += self.default_decide_options diff --git a/optimizely/optimizely_config.py b/optimizely/optimizely_config.py index 16cf4fce..397ddba5 100644 --- a/optimizely/optimizely_config.py +++ b/optimizely/optimizely_config.py @@ -1,4 +1,4 @@ -# Copyright 2020-2021, Optimizely +# Copyright 2020-2022, Optimizely # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -11,16 +11,27 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations import copy -from .helpers.condition import ConditionOperatorTypes +from typing import Any, Optional +from .helpers.condition import ConditionOperatorTypes +from .helpers.types import VariationDict, ExperimentDict, RolloutDict, AttributeDict, EventDict from .project_config import ProjectConfig class OptimizelyConfig: - def __init__(self, revision, experiments_map, features_map, datafile=None, - sdk_key=None, environment_key=None, attributes=None, events=None, - audiences=None): + def __init__( + self, revision: str, + experiments_map: dict[str, OptimizelyExperiment], + features_map: dict[str, OptimizelyFeature], + datafile: Optional[str] = None, + sdk_key: Optional[str] = None, + environment_key: Optional[str] = None, + attributes: Optional[list[OptimizelyAttribute]] = None, + events: Optional[list[OptimizelyEvent]] = None, + audiences: Optional[list[OptimizelyAudience]] = None + ): self.revision = revision # This experiments_map is for experiments of legacy projects only. @@ -37,7 +48,7 @@ def __init__(self, revision, experiments_map, features_map, datafile=None, self.events = events or [] self.audiences = audiences or [] - def get_datafile(self): + def get_datafile(self) -> Optional[str]: """ Get the datafile associated with OptimizelyConfig. Returns: @@ -47,7 +58,7 @@ def get_datafile(self): class OptimizelyExperiment: - def __init__(self, id, key, variations_map, audiences=''): + def __init__(self, id: str, key: str, variations_map: dict[str, OptimizelyVariation], audiences: str = ''): self.id = id self.key = key self.variations_map = variations_map @@ -55,7 +66,13 @@ def __init__(self, id, key, variations_map, audiences=''): class OptimizelyFeature: - def __init__(self, id, key, experiments_map, variables_map): + def __init__( + self, + id: str, + key: str, + experiments_map: dict[str, OptimizelyExperiment], + variables_map: dict[str, OptimizelyVariable] + ): self.id = id self.key = key @@ -64,12 +81,14 @@ def __init__(self, id, key, experiments_map, variables_map): self.experiments_map = experiments_map self.variables_map = variables_map - self.delivery_rules = [] - self.experiment_rules = [] + self.delivery_rules: list[OptimizelyExperiment] = [] + self.experiment_rules: list[OptimizelyExperiment] = [] class OptimizelyVariation: - def __init__(self, id, key, feature_enabled, variables_map): + def __init__( + self, id: str, key: str, feature_enabled: Optional[bool], variables_map: dict[str, OptimizelyVariable] + ): self.id = id self.key = key self.feature_enabled = feature_enabled @@ -77,7 +96,7 @@ def __init__(self, id, key, feature_enabled, variables_map): class OptimizelyVariable: - def __init__(self, id, key, variable_type, value): + def __init__(self, id: str, key: str, variable_type: str, value: Any): self.id = id self.key = key self.type = variable_type @@ -85,20 +104,20 @@ def __init__(self, id, key, variable_type, value): class OptimizelyAttribute: - def __init__(self, id, key): + def __init__(self, id: str, key: str): self.id = id self.key = key class OptimizelyEvent: - def __init__(self, id, key, experiment_ids): + def __init__(self, id: str, key: str, experiment_ids: list[str]): self.id = id self.key = key self.experiment_ids = experiment_ids class OptimizelyAudience: - def __init__(self, id, name, conditions): + def __init__(self, id: Optional[str], name: Optional[str], conditions: Optional[list[Any] | str]): self.id = id self.name = name self.conditions = conditions @@ -107,7 +126,7 @@ def __init__(self, id, name, conditions): class OptimizelyConfigService: """ Class encapsulating methods to be used in creating instance of OptimizelyConfig. """ - def __init__(self, project_config): + def __init__(self, project_config: ProjectConfig): """ Args: project_config ProjectConfig @@ -135,7 +154,7 @@ def __init__(self, project_config): Merging typed_audiences with audiences from project_config. The typed_audiences has higher precedence. ''' - optly_typed_audiences = [] + optly_typed_audiences: list[OptimizelyAudience] = [] id_lookup_dict = {} for typed_audience in project_config.typed_audiences: optly_audience = OptimizelyAudience( @@ -159,7 +178,7 @@ def __init__(self, project_config): self.audiences = optly_typed_audiences - def replace_ids_with_names(self, conditions, audiences_map): + def replace_ids_with_names(self, conditions: str | list[Any], audiences_map: dict[str, str]) -> str: ''' Gets conditions and audiences_map [id:name] @@ -173,7 +192,7 @@ def replace_ids_with_names(self, conditions, audiences_map): else: return '' - def lookup_name_from_id(self, audience_id, audiences_map): + def lookup_name_from_id(self, audience_id: str, audiences_map: dict[str, str]) -> str: ''' Gets and audience ID and audiences map @@ -189,7 +208,7 @@ def lookup_name_from_id(self, audience_id, audiences_map): return name - def stringify_conditions(self, conditions, audiences_map): + def stringify_conditions(self, conditions: str | list[Any], audiences_map: dict[str, str]) -> str: ''' Gets a list of conditions from an entities.Experiment and an audiences_map [id:name] @@ -246,7 +265,7 @@ def stringify_conditions(self, conditions, audiences_map): return conditions_str or '' - def get_config(self): + def get_config(self) -> Optional[OptimizelyConfig]: """ Gets instance of OptimizelyConfig Returns: @@ -271,7 +290,7 @@ def get_config(self): self.audiences ) - def _create_lookup_maps(self): + def _create_lookup_maps(self) -> None: """ Creates lookup maps to avoid redundant iteration of config objects. """ self.exp_id_to_feature_map = {} @@ -298,7 +317,9 @@ def _create_lookup_maps(self): self.feature_key_variable_key_to_variable_map[feature['key']] = variables_key_map self.feature_key_variable_id_to_variable_map[feature['key']] = variables_id_map - def _get_variables_map(self, experiment, variation, feature_id=None): + def _get_variables_map( + self, experiment: ExperimentDict, variation: VariationDict, feature_id: Optional[str] = None + ) -> dict[str, OptimizelyVariable]: """ Gets variables map for given experiment and variation. Args: @@ -308,7 +329,7 @@ def _get_variables_map(self, experiment, variation, feature_id=None): Returns: dict - Map of variable key to OptimizelyVariable for the given variation. """ - variables_map = {} + variables_map: dict[str, OptimizelyVariable] = {} feature_flag = self.exp_id_to_feature_map.get(experiment['id'], None) if feature_flag is None and feature_id is None: @@ -317,7 +338,7 @@ def _get_variables_map(self, experiment, variation, feature_id=None): # set default variables for each variation if feature_id: variables_map = copy.deepcopy(self.feature_id_variable_key_to_feature_variables_map[feature_id]) - else: + elif feature_flag: variables_map = copy.deepcopy(self.feature_key_variable_key_to_variable_map[feature_flag['key']]) # set variation specific variable value if any @@ -328,7 +349,9 @@ def _get_variables_map(self, experiment, variation, feature_id=None): return variables_map - def _get_variations_map(self, experiment, feature_id=None): + def _get_variations_map( + self, experiment: ExperimentDict, feature_id: Optional[str] = None + ) -> dict[str, OptimizelyVariation]: """ Gets variation map for the given experiment. Args: @@ -337,7 +360,7 @@ def _get_variations_map(self, experiment, feature_id=None): Returns: dict -- Map of variation key to OptimizelyVariation. """ - variations_map = {} + variations_map: dict[str, OptimizelyVariation] = {} for variation in experiment.get('variations', []): variables_map = self._get_variables_map(experiment, variation, feature_id) @@ -351,7 +374,7 @@ def _get_variations_map(self, experiment, feature_id=None): return variations_map - def _get_all_experiments(self): + def _get_all_experiments(self) -> list[ExperimentDict]: """ Gets all experiments in the project config. Returns: @@ -364,7 +387,7 @@ def _get_all_experiments(self): return experiments - def _get_experiments_maps(self): + def _get_experiments_maps(self) -> tuple[dict[str, OptimizelyExperiment], dict[str, OptimizelyExperiment]]: """ Gets maps for all the experiments in the project config and updates the experiment with updated experiment audiences string. @@ -376,11 +399,14 @@ def _get_experiments_maps(self): # Id map comes in handy to figure out feature experiment. experiments_id_map = {} # Audiences map to use for updating experiments with new audience conditions string - audiences_map = {} + audiences_map: dict[str, str] = {} # Build map from OptimizelyAudience array for optly_audience in self.audiences: - audiences_map[optly_audience.id] = optly_audience.name + audience_id = optly_audience.id + audience_name = optly_audience.name + if audience_id is not None: + audiences_map[audience_id] = audience_name if audience_name is not None else '' all_experiments = self._get_all_experiments() for exp in all_experiments: @@ -396,7 +422,7 @@ def _get_experiments_maps(self): return experiments_key_map, experiments_id_map - def _get_features_map(self, experiments_id_map): + def _get_features_map(self, experiments_id_map: dict[str, OptimizelyExperiment]) -> dict[str, OptimizelyFeature]: """ Gets features map for the project config. Args: @@ -406,7 +432,7 @@ def _get_features_map(self, experiments_id_map): dict -- feaure key to OptimizelyFeature map """ features_map = {} - experiment_rules = [] + experiment_rules: list[OptimizelyExperiment] = [] for feature in self.feature_flags: @@ -431,7 +457,9 @@ def _get_features_map(self, experiments_id_map): return features_map - def _get_delivery_rules(self, rollouts, rollout_id, feature_id): + def _get_delivery_rules( + self, rollouts: list[RolloutDict], rollout_id: Optional[str], feature_id: str + ) -> list[OptimizelyExperiment]: """ Gets an array of rollouts for the project config returns: @@ -440,19 +468,22 @@ def _get_delivery_rules(self, rollouts, rollout_id, feature_id): # Return list for delivery rules delivery_rules = [] # Audiences map to use for updating experiments with new audience conditions string - audiences_map = {} + audiences_map: dict[str, str] = {} # Gets a rollout based on provided rollout_id rollout = [rollout for rollout in rollouts if rollout.get('id') == rollout_id] if rollout: - rollout = rollout[0] + found_rollout = rollout[0] # Build map from OptimizelyAudience array for optly_audience in self.audiences: - audiences_map[optly_audience.id] = optly_audience.name + audience_id = optly_audience.id + audience_name = optly_audience.name + if audience_id is not None: + audiences_map[audience_id] = audience_name if audience_name is not None else '' # Get the experiments for that rollout - experiments = rollout.get('experiments') + experiments = found_rollout.get('experiments') if experiments: for experiment in experiments: optly_exp = OptimizelyExperiment( @@ -465,7 +496,7 @@ def _get_delivery_rules(self, rollouts, rollout_id, feature_id): return delivery_rules - def _get_attributes_list(self, attributes): + def _get_attributes_list(self, attributes: list[AttributeDict]) -> list[OptimizelyAttribute]: """ Gets attributes list for the project config Returns: @@ -482,7 +513,7 @@ def _get_attributes_list(self, attributes): return attributes_list - def _get_events_list(self, events): + def _get_events_list(self, events: list[EventDict]) -> list[OptimizelyEvent]: """ Gets events list for the project_config Returns: diff --git a/optimizely/optimizely_user_context.py b/optimizely/optimizely_user_context.py index 32a06a8e..2a0e0ee2 100644 --- a/optimizely/optimizely_user_context.py +++ b/optimizely/optimizely_user_context.py @@ -12,9 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. # - +from __future__ import annotations import copy import threading +from typing import Any, Optional + +from optimizely.decision import optimizely_decision +from . import optimizely +from .logger import Logger class OptimizelyUserContext: @@ -22,7 +27,10 @@ class OptimizelyUserContext: Representation of an Optimizely User Context using which APIs are to be called. """ - def __init__(self, optimizely_client, logger, user_id, user_attributes=None): + def __init__( + self, optimizely_client: optimizely.Optimizely, logger: Logger, + user_id: str, user_attributes: Optional[dict[str, Any]] = None + ): """ Create an instance of the Optimizely User Context. Args: @@ -44,7 +52,10 @@ def __init__(self, optimizely_client, logger, user_id, user_attributes=None): self._user_attributes = user_attributes.copy() if user_attributes else {} self.lock = threading.Lock() - self.forced_decisions_map = {} + self.forced_decisions_map: dict[ + OptimizelyUserContext.OptimizelyDecisionContext, + OptimizelyUserContext.OptimizelyForcedDecision + ] = {} # decision context class OptimizelyDecisionContext: @@ -52,22 +63,22 @@ class OptimizelyDecisionContext: class is extensible, it's easy to add another attribute if we wanted to extend decision context. """ - def __init__(self, flag_key, rule_key=None): + def __init__(self, flag_key: str, rule_key: Optional[str] = None): self.flag_key = flag_key self.rule_key = rule_key - def __hash__(self): + def __hash__(self) -> int: return hash((self.flag_key, self.rule_key)) - def __eq__(self, other): + def __eq__(self, other: OptimizelyUserContext.OptimizelyDecisionContext) -> bool: # type: ignore return (self.flag_key, self.rule_key) == (other.flag_key, other.rule_key) # forced decision class OptimizelyForcedDecision: - def __init__(self, variation_key): + def __init__(self, variation_key: str): self.variation_key = variation_key - def _clone(self): + def _clone(self) -> Optional[OptimizelyUserContext]: if not self.client: return None @@ -79,11 +90,11 @@ def _clone(self): return user_context - def get_user_attributes(self): + def get_user_attributes(self) -> dict[str, Any]: with self.lock: return self._user_attributes.copy() - def set_attribute(self, attribute_key, attribute_value): + def set_attribute(self, attribute_key: str, attribute_value: Any) -> None: """ sets a attribute by key for this user context. Args: @@ -96,7 +107,9 @@ def set_attribute(self, attribute_key, attribute_value): with self.lock: self._user_attributes[attribute_key] = attribute_value - def decide(self, key, options=None): + def decide( + self, key: str, options: Optional[list[str]] = None + ) -> optimizely_decision.OptimizelyDecision: """ Call decide on contained Optimizely object Args: @@ -111,7 +124,9 @@ def decide(self, key, options=None): return self.client._decide(self._clone(), key, options) - def decide_for_keys(self, keys, options=None): + def decide_for_keys( + self, keys: list[str], options: Optional[list[str]] = None + ) -> dict[str, optimizely_decision.OptimizelyDecision]: """ Call decide_for_keys on contained optimizely object Args: @@ -126,7 +141,7 @@ def decide_for_keys(self, keys, options=None): return self.client._decide_for_keys(self._clone(), keys, options) - def decide_all(self, options=None): + def decide_all(self, options: Optional[list[str]] = None) -> dict[str, optimizely_decision.OptimizelyDecision]: """ Call decide_all on contained optimizely instance Args: @@ -140,16 +155,18 @@ def decide_all(self, options=None): return self.client._decide_all(self._clone(), options) - def track_event(self, event_key, event_tags=None): + def track_event(self, event_key: str, event_tags: Optional[dict[str, Any]] = None) -> None: return self.client.track(event_key, self.user_id, self.get_user_attributes(), event_tags) - def as_json(self): + def as_json(self) -> dict[str, Any]: return { 'user_id': self.user_id, 'attributes': self.get_user_attributes(), } - def set_forced_decision(self, decision_context, decision): + def set_forced_decision( + self, decision_context: OptimizelyDecisionContext, decision: OptimizelyForcedDecision + ) -> bool: """ Sets the forced decision for a given decision context. @@ -165,7 +182,7 @@ def set_forced_decision(self, decision_context, decision): return True - def get_forced_decision(self, decision_context): + def get_forced_decision(self, decision_context: OptimizelyDecisionContext) -> Optional[OptimizelyForcedDecision]: """ Gets the forced decision (variation key) for a given decision context. @@ -178,7 +195,7 @@ def get_forced_decision(self, decision_context): forced_decision = self.find_forced_decision(decision_context) return forced_decision - def remove_forced_decision(self, decision_context): + def remove_forced_decision(self, decision_context: OptimizelyDecisionContext) -> bool: """ Removes the forced decision for a given decision context. @@ -195,7 +212,7 @@ def remove_forced_decision(self, decision_context): return False - def remove_all_forced_decisions(self): + def remove_all_forced_decisions(self) -> bool: """ Removes all forced decisions bound to this user context. @@ -207,7 +224,7 @@ def remove_all_forced_decisions(self): return True - def find_forced_decision(self, decision_context): + def find_forced_decision(self, decision_context: OptimizelyDecisionContext) -> Optional[OptimizelyForcedDecision]: """ Gets forced decision from forced decision map. diff --git a/requirements/typing.txt b/requirements/typing.txt new file mode 100644 index 00000000..ba65f536 --- /dev/null +++ b/requirements/typing.txt @@ -0,0 +1,4 @@ +mypy +types-jsonschema +types-requests +types-Flask \ No newline at end of file diff --git a/tests/test_optimizely.py b/tests/test_optimizely.py index 380a5088..fae2992c 100644 --- a/tests/test_optimizely.py +++ b/tests/test_optimizely.py @@ -3021,7 +3021,8 @@ def test_get_feature_variable(self): 'Got variable value "staging" for variable "environment" of feature flag "test_feature_in_experiment".' ) - mock_broadcast_decision.assert_called_once_with( + # sometimes event processor flushes before this check, so can't assert called once + mock_broadcast_decision.assert_any_call( enums.NotificationTypes.DECISION, 'feature-variable', 'test_user', @@ -3696,7 +3697,8 @@ def test_get_feature_variable__returns_default_value_if_no_variation(self): 'Returning default value for variable "count" of feature flag "test_feature_in_experiment".' ) - mock_broadcast_decision.assert_called_once_with( + # sometimes event processor flushes before this check, so can't assert called once + mock_broadcast_decision.assert_any_call( enums.NotificationTypes.DECISION, 'feature-variable', 'test_user', From 48af0732db0667fb6e9e25901e0cbda38e88c3a1 Mon Sep 17 00:00:00 2001 From: Andy Leap <104936100+andrewleap-optimizely@users.noreply.github.com> Date: Thu, 7 Jul 2022 15:25:50 -0400 Subject: [PATCH 09/68] chore: prepare for 4.1.0 release (#391) * prep for 4.1.0 release --- CHANGELOG.md | 12 +++++++++--- optimizely/version.py | 4 ++-- 2 files changed, 11 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 892d8ad3..aafa1f33 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,11 @@ # Optimizely Python SDK Changelog +## 4.1.0 +July 7th, 2022 + +### Bug Fixes +* Fix invalid datafile returned from `ProjectConfig.to_datafile` and `OptimizelyConfig.get_datafile` ([#321](https://github.com/optimizely/python-sdk/pull/321), [#384](https://github.com/optimizely/python-sdk/pull/384)) + ## 4.0.0 January 12th, 2022 @@ -22,10 +28,10 @@ January 12th, 2022 September 16th, 2021 ### New Features -* Added new public properties to OptimizelyConfig. +* Added new public properties to OptimizelyConfig. - sdk_key and environment_key [#338] (https://github.com/optimizely/python-sdk/pull/338) - attributes and events [#339] (https://github.com/optimizely/python-sdk/pull/339) - - experiment_rules, delivery_rules, audiences and audiences in OptimizelyExperiment + - experiment_rules, delivery_rules, audiences and audiences in OptimizelyExperiment - [#342] (https://github.com/optimizely/python-sdk/pull/342) - [#351] (https://github.com/optimizely/python-sdk/pull/351/files) * For details please refer to our documentation page: @@ -150,7 +156,7 @@ October 28th, 2019 * To configure event batching, set the `batch_size` and `flush_interval` properties when initializing instance of [BatchEventProcessor](https://github.com/optimizely/python-sdk/blob/3.3.x/optimizely/event/event_processor.py#L45). * Event batching is disabled by default. You can pass in instance of `BatchEventProcessor` when creating `Optimizely` instance to enable event batching. * Users can subscribe to `LogEvent` notification to be notified of whenever a payload consisting of a batch of user events is handed off to the event dispatcher to send to Optimizely's backend. -* Introduced blocking timeout in `PollingConfigManager`. By default, calls to `get_config` will block for maximum of 10 seconds until config is available. +* Introduced blocking timeout in `PollingConfigManager`. By default, calls to `get_config` will block for maximum of 10 seconds until config is available. ### Bug Fixes: * Fixed incorrect log message when numeric metric is not used. ([#217](https://github.com/optimizely/python-sdk/pull/217)) diff --git a/optimizely/version.py b/optimizely/version.py index d6504ce4..f3265ea2 100644 --- a/optimizely/version.py +++ b/optimizely/version.py @@ -1,4 +1,4 @@ -# Copyright 2016-2020, Optimizely +# Copyright 2016-2020, 2022, Optimizely # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -11,5 +11,5 @@ # See the License for the specific language governing permissions and # limitations under the License. -version_info = (4, 0, 0) +version_info = (4, 1, 0) __version__ = '.'.join(str(v) for v in version_info) From 2a8d2e78a90681f8f1e9a4acc948a6c508f3f9c8 Mon Sep 17 00:00:00 2001 From: Ozayr <54209343+ozayr-zaviar@users.noreply.github.com> Date: Mon, 11 Jul 2022 23:57:11 +0500 Subject: [PATCH 10/68] feat: updated for fsc git action (#388) * variables and branche changed * updated branch to master Co-authored-by: Mirza Sohail Hussain --- .github/workflows/integration_test.yml | 13 ++++++------- .github/workflows/python.yml | 2 +- 2 files changed, 7 insertions(+), 8 deletions(-) diff --git a/.github/workflows/integration_test.yml b/.github/workflows/integration_test.yml index c0bc8908..9a4e5eb1 100644 --- a/.github/workflows/integration_test.yml +++ b/.github/workflows/integration_test.yml @@ -38,16 +38,15 @@ jobs: BUILD_NUMBER: ${{ github.run_id }} TESTAPP_BRANCH: master GITHUB_TOKEN: ${{ secrets.CI_USER_TOKEN }} - TRAVIS_EVENT_TYPE: ${{ github.event_name }} + EVENT_TYPE: ${{ github.event_name }} GITHUB_CONTEXT: ${{ toJson(github) }} - TRAVIS_REPO_SLUG: ${{ github.repository }} - TRAVIS_PULL_REQUEST_SLUG: ${{ github.repository }} + #REPO_SLUG: ${{ github.repository }} + PULL_REQUEST_SLUG: ${{ github.repository }} UPSTREAM_REPO: ${{ github.repository }} - TRAVIS_COMMIT: ${{ github.sha }} - TRAVIS_PULL_REQUEST_SHA: ${{ github.event.pull_request.head.sha }} - TRAVIS_PULL_REQUEST: ${{ github.event.pull_request.number }} + PULL_REQUEST_SHA: ${{ github.event.pull_request.head.sha }} + PULL_REQUEST_NUMBER: ${{ github.event.pull_request.number }} UPSTREAM_SHA: ${{ github.sha }} - TRAVIS_COM_TOKEN: ${{ secrets.TRAVIS_COM_TOKEN }} + TOKEN: ${{ secrets.TRAVIS_COM_TOKEN }} EVENT_MESSAGE: ${{ github.event.message }} HOME: 'home/runner' run: | diff --git a/.github/workflows/python.yml b/.github/workflows/python.yml index 9a801aea..80971bf5 100644 --- a/.github/workflows/python.yml +++ b/.github/workflows/python.yml @@ -46,7 +46,7 @@ jobs: flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics integration_tests: - uses: optimizely/python-sdk/.github/workflows/integration_test.yml@master + uses: optimizely/python-sdk/.github/workflows/integration_test.yml@uzair/test-with-fsc secrets: CI_USER_TOKEN: ${{ secrets.CI_USER_TOKEN }} TRAVIS_COM_TOKEN: ${{ secrets.TRAVIS_COM_TOKEN }} From f539d7d64626a93dc3837cbdf895fe84d5f70a74 Mon Sep 17 00:00:00 2001 From: Andy Leap <104936100+andrewleap-optimizely@users.noreply.github.com> Date: Tue, 12 Jul 2022 10:34:01 -0400 Subject: [PATCH 11/68] refactor: type hints private interface (#389) * add type hints * add null checks/disambiguation for typing * enable mypy strict check * bucket returns None instead of empty dict --- .github/workflows/python.yml | 5 +- mypy.ini | 10 +- optimizely/bucketer.py | 50 +++-- optimizely/config_manager.py | 104 ++++++---- .../decision/optimizely_decide_option.py | 19 +- optimizely/decision/optimizely_decision.py | 27 ++- .../decision/optimizely_decision_message.py | 15 +- optimizely/decision_service.py | 104 +++++++--- optimizely/entities.py | 94 ++++++--- optimizely/error_handler.py | 6 +- optimizely/event/event_factory.py | 54 +++-- optimizely/event/event_processor.py | 27 ++- optimizely/event/log_event.py | 25 ++- optimizely/event/payload.py | 57 ++++-- optimizely/event/user_event.py | 50 ++++- optimizely/event/user_event_factory.py | 42 +++- optimizely/event_builder.py | 113 +++++++---- optimizely/event_dispatcher.py | 8 +- optimizely/helpers/audience.py | 37 ++-- optimizely/helpers/condition.py | 124 +++++++----- .../helpers/condition_tree_evaluator.py | 16 +- optimizely/helpers/enums.py | 165 ++++++++------- optimizely/helpers/event_tag_utils.py | 29 ++- optimizely/helpers/experiment.py | 11 +- optimizely/helpers/types.py | 40 +++- optimizely/helpers/validator.py | 46 +++-- optimizely/lib/pymmh3.py | 22 +- optimizely/logger.py | 42 +++- optimizely/notification_center.py | 8 +- optimizely/optimizely.py | 55 ++--- optimizely/optimizely_factory.py | 90 +++++---- optimizely/optimizely_user_context.py | 28 ++- optimizely/project_config.py | 190 ++++++++++-------- optimizely/user_profile.py | 34 +++- tests/test_config.py | 24 +++ 35 files changed, 1155 insertions(+), 616 deletions(-) diff --git a/.github/workflows/python.yml b/.github/workflows/python.yml index 80971bf5..798648d1 100644 --- a/.github/workflows/python.yml +++ b/.github/workflows/python.yml @@ -46,7 +46,7 @@ jobs: flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics integration_tests: - uses: optimizely/python-sdk/.github/workflows/integration_test.yml@uzair/test-with-fsc + uses: optimizely/python-sdk/.github/workflows/integration_test.yml@master secrets: CI_USER_TOKEN: ${{ secrets.CI_USER_TOKEN }} TRAVIS_COM_TOKEN: ${{ secrets.TRAVIS_COM_TOKEN }} @@ -98,5 +98,4 @@ jobs: - name: Type check with mypy run: | mypy . - # disabled until entire sdk is type hinted - # mypy . --exclude "tests/" --strict + mypy . --exclude "tests/" --strict diff --git a/mypy.ini b/mypy.ini index 51b2f56c..5de83593 100644 --- a/mypy.ini +++ b/mypy.ini @@ -11,13 +11,5 @@ show_error_codes = True pretty = True # suppress error on conditional import of typing_extensions module -[mypy-optimizely.entities] -no_warn_unused_ignores = True - -# suppress error on conditional import of typing_extensions module -[mypy-event_dispatcher] -no_warn_unused_ignores = True - -# suppress error on conditional import of typing_extensions module -[mypy-optimizely.condition] +[mypy-optimizely.helpers.types] no_warn_unused_ignores = True diff --git a/optimizely/bucketer.py b/optimizely/bucketer.py index 24ecf266..38da3798 100644 --- a/optimizely/bucketer.py +++ b/optimizely/bucketer.py @@ -1,4 +1,4 @@ -# Copyright 2016-2017, 2019-2021 Optimizely +# Copyright 2016-2017, 2019-2022 Optimizely # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -11,28 +11,44 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations +from typing import Optional, TYPE_CHECKING import math +from sys import version_info from .lib import pymmh3 as mmh3 -MAX_TRAFFIC_VALUE = 10000 -UNSIGNED_MAX_32_BIT_VALUE = 0xFFFFFFFF -MAX_HASH_VALUE = math.pow(2, 32) -HASH_SEED = 1 -BUCKETING_ID_TEMPLATE = '{bucketing_id}{parent_id}' -GROUP_POLICIES = ['random'] +if version_info < (3, 8): + from typing_extensions import Final +else: + from typing import Final # type: ignore + + +if TYPE_CHECKING: + # prevent circular dependenacy by skipping import at runtime + from .project_config import ProjectConfig + from .entities import Experiment, Variation + from .helpers.types import TrafficAllocation + + +MAX_TRAFFIC_VALUE: Final = 10000 +UNSIGNED_MAX_32_BIT_VALUE: Final = 0xFFFFFFFF +MAX_HASH_VALUE: Final = math.pow(2, 32) +HASH_SEED: Final = 1 +BUCKETING_ID_TEMPLATE: Final = '{bucketing_id}{parent_id}' +GROUP_POLICIES: Final = ['random'] class Bucketer: """ Optimizely bucketing algorithm that evenly distributes visitors. """ - def __init__(self): + def __init__(self) -> None: """ Bucketer init method to set bucketing seed and logger instance. """ self.bucket_seed = HASH_SEED - def _generate_unsigned_hash_code_32_bit(self, bucketing_id): + def _generate_unsigned_hash_code_32_bit(self, bucketing_id: str) -> int: """ Helper method to retrieve hash code. Args: @@ -45,7 +61,7 @@ def _generate_unsigned_hash_code_32_bit(self, bucketing_id): # Adjusting MurmurHash code to be unsigned return mmh3.hash(bucketing_id, self.bucket_seed) & UNSIGNED_MAX_32_BIT_VALUE - def _generate_bucket_value(self, bucketing_id): + def _generate_bucket_value(self, bucketing_id: str) -> int: """ Helper function to generate bucket value in half-closed interval [0, MAX_TRAFFIC_VALUE). Args: @@ -58,7 +74,10 @@ def _generate_bucket_value(self, bucketing_id): ratio = float(self._generate_unsigned_hash_code_32_bit(bucketing_id)) / MAX_HASH_VALUE return math.floor(ratio * MAX_TRAFFIC_VALUE) - def find_bucket(self, project_config, bucketing_id, parent_id, traffic_allocations): + def find_bucket( + self, project_config: ProjectConfig, bucketing_id: str, + parent_id: Optional[str], traffic_allocations: list[TrafficAllocation] + ) -> Optional[str]: """ Determine entity based on bucket value and traffic allocations. Args: @@ -78,12 +97,15 @@ def find_bucket(self, project_config, bucketing_id, parent_id, traffic_allocatio for traffic_allocation in traffic_allocations: current_end_of_range = traffic_allocation.get('endOfRange') - if bucketing_number < current_end_of_range: + if current_end_of_range is not None and bucketing_number < current_end_of_range: return traffic_allocation.get('entityId') return None - def bucket(self, project_config, experiment, user_id, bucketing_id): + def bucket( + self, project_config: ProjectConfig, + experiment: Experiment, user_id: str, bucketing_id: str + ) -> tuple[Optional[Variation], list[str]]: """ For a given experiment and bucketing ID determines variation to be shown to user. Args: @@ -97,7 +119,7 @@ def bucket(self, project_config, experiment, user_id, bucketing_id): and array of log messages representing decision making. */. """ - decide_reasons = [] + decide_reasons: list[str] = [] if not experiment: return None, decide_reasons diff --git a/optimizely/config_manager.py b/optimizely/config_manager.py index 5ef8a530..68a04b26 100644 --- a/optimizely/config_manager.py +++ b/optimizely/config_manager.py @@ -1,4 +1,4 @@ -# Copyright 2019-2020, Optimizely +# Copyright 2019-2020, 2022, Optimizely # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -11,8 +11,10 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations from abc import ABC, abstractmethod import numbers +from typing import TYPE_CHECKING, Any, Optional import requests import threading import time @@ -22,17 +24,27 @@ from . import exceptions as optimizely_exceptions from . import logger as optimizely_logger from . import project_config -from .error_handler import NoOpErrorHandler +from .error_handler import NoOpErrorHandler, BaseErrorHandler from .notification_center import NotificationCenter from .helpers import enums from .helpers import validator -from .optimizely_config import OptimizelyConfigService +from .optimizely_config import OptimizelyConfig, OptimizelyConfigService + + +if TYPE_CHECKING: + # prevent circular dependenacy by skipping import at runtime + from requests.models import CaseInsensitiveDict class BaseConfigManager(ABC): """ Base class for Optimizely's config manager. """ - def __init__(self, logger=None, error_handler=None, notification_center=None): + def __init__( + self, + logger: Optional[optimizely_logger.Logger] = None, + error_handler: Optional[BaseErrorHandler] = None, + notification_center: Optional[NotificationCenter] = None + ): """ Initialize config manager. Args: @@ -43,9 +55,10 @@ def __init__(self, logger=None, error_handler=None, notification_center=None): self.logger = optimizely_logger.adapt_logger(logger or optimizely_logger.NoOpLogger()) self.error_handler = error_handler or NoOpErrorHandler() self.notification_center = notification_center or NotificationCenter(self.logger) + self.optimizely_config: Optional[OptimizelyConfig] self._validate_instantiation_options() - def _validate_instantiation_options(self): + def _validate_instantiation_options(self) -> None: """ Helper method to validate all parameters. Raises: @@ -61,7 +74,7 @@ def _validate_instantiation_options(self): raise optimizely_exceptions.InvalidInputException(enums.Errors.INVALID_INPUT.format('notification_center')) @abstractmethod - def get_config(self): + def get_config(self) -> Optional[project_config.ProjectConfig]: """ Get config for use by optimizely.Optimizely. The config should be an instance of project_config.ProjectConfig.""" pass @@ -71,7 +84,12 @@ class StaticConfigManager(BaseConfigManager): """ Config manager that returns ProjectConfig based on provided datafile. """ def __init__( - self, datafile=None, logger=None, error_handler=None, notification_center=None, skip_json_validation=False, + self, + datafile: Optional[str] = None, + logger: Optional[optimizely_logger.Logger] = None, + error_handler: Optional[BaseErrorHandler] = None, + notification_center: Optional[NotificationCenter] = None, + skip_json_validation: Optional[bool] = False, ): """ Initialize config manager. Datafile has to be provided to use. @@ -87,12 +105,12 @@ def __init__( super().__init__( logger=logger, error_handler=error_handler, notification_center=notification_center, ) - self._config = None - self.optimizely_config = None + self._config: project_config.ProjectConfig = None # type: ignore[assignment] + self.optimizely_config: Optional[OptimizelyConfig] = None self.validate_schema = not skip_json_validation self._set_config(datafile) - def _set_config(self, datafile): + def _set_config(self, datafile: Optional[str | bytes]) -> None: """ Looks up and sets datafile and config based on response body. Args: @@ -105,10 +123,11 @@ def _set_config(self, datafile): return error_msg = None - error_to_handle = None + error_to_handle: Optional[Exception] = None config = None try: + assert datafile is not None config = project_config.ProjectConfig(datafile, self.logger, self.error_handler) except optimizely_exceptions.UnsupportedDatafileVersionException as error: error_msg = error.args[0] @@ -117,9 +136,9 @@ def _set_config(self, datafile): error_msg = enums.Errors.INVALID_INPUT.format('datafile') error_to_handle = optimizely_exceptions.InvalidInputException(error_msg) finally: - if error_msg: + if error_msg or config is None: self.logger.error(error_msg) - self.error_handler.handle_error(error_to_handle) + self.error_handler.handle_error(error_to_handle or Exception('Unknown Error')) return previous_revision = self._config.get_revision() if self._config else None @@ -135,7 +154,7 @@ def _set_config(self, datafile): f'Old revision number: {previous_revision}. New revision number: {config.get_revision()}.' ) - def get_config(self): + def get_config(self) -> Optional[project_config.ProjectConfig]: """ Returns instance of ProjectConfig. Returns: @@ -152,16 +171,16 @@ class PollingConfigManager(StaticConfigManager): def __init__( self, - sdk_key=None, - datafile=None, - update_interval=None, - blocking_timeout=None, - url=None, - url_template=None, - logger=None, - error_handler=None, - notification_center=None, - skip_json_validation=False, + sdk_key: Optional[str] = None, + datafile: Optional[str] = None, + update_interval: Optional[float] = None, + blocking_timeout: Optional[int] = None, + url: Optional[str] = None, + url_template: Optional[str] = None, + logger: Optional[optimizely_logger.Logger] = None, + error_handler: Optional[BaseErrorHandler] = None, + notification_center: Optional[NotificationCenter] = None, + skip_json_validation: Optional[bool] = False, ): """ Initialize config manager. One of sdk_key or url has to be set to be able to use. @@ -196,13 +215,13 @@ def __init__( ) self.set_update_interval(update_interval) self.set_blocking_timeout(blocking_timeout) - self.last_modified = None + self.last_modified: Optional[str] = None self._polling_thread = threading.Thread(target=self._run) self._polling_thread.daemon = True self._polling_thread.start() @staticmethod - def get_datafile_url(sdk_key, url, url_template): + def get_datafile_url(sdk_key: Optional[str], url: Optional[str], url_template: Optional[str]) -> str: """ Helper method to determine URL from where to fetch the datafile. Args: @@ -226,15 +245,16 @@ def get_datafile_url(sdk_key, url, url_template): # Return URL if one is provided or use template and SDK key to get it. if url is None: try: + assert url_template is not None return url_template.format(sdk_key=sdk_key) - except (AttributeError, KeyError): + except (AssertionError, AttributeError, KeyError): raise optimizely_exceptions.InvalidInputException( f'Invalid url_template {url_template} provided.' ) return url - def _set_config(self, datafile): + def _set_config(self, datafile: Optional[str | bytes]) -> None: """ Looks up and sets datafile and config based on response body. Args: @@ -244,7 +264,7 @@ def _set_config(self, datafile): super()._set_config(datafile=datafile) self._config_ready_event.set() - def get_config(self): + def get_config(self) -> Optional[project_config.ProjectConfig]: """ Returns instance of ProjectConfig. Returns immediately if project config is ready otherwise blocks maximum for value of blocking_timeout in seconds. @@ -255,7 +275,7 @@ def get_config(self): self._config_ready_event.wait(self.blocking_timeout) return self._config - def set_update_interval(self, update_interval): + def set_update_interval(self, update_interval: Optional[int | float]) -> None: """ Helper method to set frequency at which datafile has to be polled and ProjectConfig updated. Args: @@ -280,7 +300,7 @@ def set_update_interval(self, update_interval): self.update_interval = update_interval - def set_blocking_timeout(self, blocking_timeout): + def set_blocking_timeout(self, blocking_timeout: Optional[int | float]) -> None: """ Helper method to set time in seconds to block the config call until config has been initialized. Args: @@ -305,7 +325,7 @@ def set_blocking_timeout(self, blocking_timeout): self.blocking_timeout = blocking_timeout - def set_last_modified(self, response_headers): + def set_last_modified(self, response_headers: CaseInsensitiveDict[str]) -> None: """ Looks up and sets last modified time based on Last-Modified header in the response. Args: @@ -313,7 +333,7 @@ def set_last_modified(self, response_headers): """ self.last_modified = response_headers.get(enums.HTTPHeaders.LAST_MODIFIED) - def _handle_response(self, response): + def _handle_response(self, response: requests.Response) -> None: """ Helper method to handle response containing datafile. Args: @@ -333,7 +353,7 @@ def _handle_response(self, response): self.set_last_modified(response.headers) self._set_config(response.content) - def fetch_datafile(self): + def fetch_datafile(self) -> None: """ Fetch datafile and set ProjectConfig. """ request_headers = {} @@ -351,11 +371,11 @@ def fetch_datafile(self): self._handle_response(response) @property - def is_running(self): + def is_running(self) -> bool: """ Check if polling thread is alive or not. """ return self._polling_thread.is_alive() - def _run(self): + def _run(self) -> None: """ Triggered as part of the thread which fetches the datafile and sleeps until next update interval. """ try: while self.is_running: @@ -367,7 +387,7 @@ def _run(self): ) raise - def start(self): + def start(self) -> None: """ Start the config manager and the thread to periodically fetch datafile. """ if not self.is_running: self._polling_thread.start() @@ -380,9 +400,9 @@ class AuthDatafilePollingConfigManager(PollingConfigManager): def __init__( self, - datafile_access_token, - *args, - **kwargs + datafile_access_token: str, + *args: Any, + **kwargs: Any ): """ Initialize config manager. One of sdk_key or url has to be set to be able to use. @@ -394,14 +414,14 @@ def __init__( self._set_datafile_access_token(datafile_access_token) super().__init__(*args, **kwargs) - def _set_datafile_access_token(self, datafile_access_token): + def _set_datafile_access_token(self, datafile_access_token: str) -> None: """ Checks for valid access token input and sets it. """ if not datafile_access_token: raise optimizely_exceptions.InvalidInputException( 'datafile_access_token cannot be empty or None.') self.datafile_access_token = datafile_access_token - def fetch_datafile(self): + def fetch_datafile(self) -> None: """ Fetch authenticated datafile and set ProjectConfig. """ request_headers = { enums.HTTPHeaders.AUTHORIZATION: enums.ConfigManager.AUTHORIZATION_HEADER_DATA_TEMPLATE.format( diff --git a/optimizely/decision/optimizely_decide_option.py b/optimizely/decision/optimizely_decide_option.py index e409befa..8b091d96 100644 --- a/optimizely/decision/optimizely_decide_option.py +++ b/optimizely/decision/optimizely_decide_option.py @@ -1,4 +1,4 @@ -# Copyright 2021, Optimizely +# Copyright 2021, 2022, Optimizely # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -11,10 +11,17 @@ # See the License for the specific language governing permissions and # limitations under the License. +from sys import version_info + +if version_info < (3, 8): + from typing_extensions import Final +else: + from typing import Final # type: ignore + class OptimizelyDecideOption: - DISABLE_DECISION_EVENT = 'DISABLE_DECISION_EVENT' - ENABLED_FLAGS_ONLY = 'ENABLED_FLAGS_ONLY' - IGNORE_USER_PROFILE_SERVICE = 'IGNORE_USER_PROFILE_SERVICE' - INCLUDE_REASONS = 'INCLUDE_REASONS' - EXCLUDE_VARIABLES = 'EXCLUDE_VARIABLES' + DISABLE_DECISION_EVENT: Final = 'DISABLE_DECISION_EVENT' + ENABLED_FLAGS_ONLY: Final = 'ENABLED_FLAGS_ONLY' + IGNORE_USER_PROFILE_SERVICE: Final = 'IGNORE_USER_PROFILE_SERVICE' + INCLUDE_REASONS: Final = 'INCLUDE_REASONS' + EXCLUDE_VARIABLES: Final = 'EXCLUDE_VARIABLES' diff --git a/optimizely/decision/optimizely_decision.py b/optimizely/decision/optimizely_decision.py index cbca9558..7ae3f136 100644 --- a/optimizely/decision/optimizely_decision.py +++ b/optimizely/decision/optimizely_decision.py @@ -1,4 +1,4 @@ -# Copyright 2021, Optimizely +# Copyright 2021, 2022, Optimizely # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -11,25 +11,40 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations +from typing import Optional, Any, TYPE_CHECKING + +if TYPE_CHECKING: + # prevent circular dependenacy by skipping import at runtime + from optimizely.optimizely_user_context import OptimizelyUserContext + class OptimizelyDecision: - def __init__(self, variation_key=None, enabled=None, - variables=None, rule_key=None, flag_key=None, user_context=None, reasons=None): + def __init__( + self, + variation_key: Optional[str] = None, + enabled: bool = False, + variables: Optional[dict[str, Any]] = None, + rule_key: Optional[str] = None, + flag_key: Optional[str] = None, + user_context: Optional[OptimizelyUserContext] = None, + reasons: Optional[list[str]] = None + ): self.variation_key = variation_key - self.enabled = enabled or False + self.enabled = enabled self.variables = variables or {} self.rule_key = rule_key self.flag_key = flag_key self.user_context = user_context self.reasons = reasons or [] - def as_json(self): + def as_json(self) -> dict[str, Any]: return { 'variation_key': self.variation_key, 'enabled': self.enabled, 'variables': self.variables, 'rule_key': self.rule_key, 'flag_key': self.flag_key, - 'user_context': self.user_context.as_json(), + 'user_context': self.user_context.as_json() if self.user_context else None, 'reasons': self.reasons } diff --git a/optimizely/decision/optimizely_decision_message.py b/optimizely/decision/optimizely_decision_message.py index 0c038196..20231ea5 100644 --- a/optimizely/decision/optimizely_decision_message.py +++ b/optimizely/decision/optimizely_decision_message.py @@ -1,4 +1,4 @@ -# Copyright 2021, Optimizely +# Copyright 2021, 2022, Optimizely # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -11,8 +11,15 @@ # See the License for the specific language governing permissions and # limitations under the License. +from sys import version_info + +if version_info < (3, 8): + from typing_extensions import Final +else: + from typing import Final # type: ignore + class OptimizelyDecisionMessage: - SDK_NOT_READY = 'Optimizely SDK not configured properly yet.' - FLAG_KEY_INVALID = 'No flag was found for key "{}".' - VARIABLE_VALUE_INVALID = 'Variable value for key "{}" is invalid or wrong type.' + SDK_NOT_READY: Final = 'Optimizely SDK not configured properly yet.' + FLAG_KEY_INVALID: Final = 'No flag was found for key "{}".' + VARIABLE_VALUE_INVALID: Final = 'Variable value for key "{}" is invalid or wrong type.' diff --git a/optimizely/decision_service.py b/optimizely/decision_service.py index f7e07cae..15532fe0 100644 --- a/optimizely/decision_service.py +++ b/optimizely/decision_service.py @@ -11,24 +11,37 @@ # See the License for the specific language governing permissions and # limitations under the License. -from collections import namedtuple +from __future__ import annotations +from typing import TYPE_CHECKING, NamedTuple, Optional, Sequence from . import bucketer +from . import entities from .decision.optimizely_decide_option import OptimizelyDecideOption from .helpers import audience as audience_helper from .helpers import enums from .helpers import experiment as experiment_helper from .helpers import validator -from .optimizely_user_context import OptimizelyUserContext -from .user_profile import UserProfile +from .optimizely_user_context import OptimizelyUserContext, UserAttributes +from .user_profile import UserProfile, UserProfileService -Decision = namedtuple('Decision', 'experiment variation source') +if TYPE_CHECKING: + # prevent circular dependenacy by skipping import at runtime + from .project_config import ProjectConfig + from .logger import Logger + + +class Decision(NamedTuple): + """Named tuple containing selected experiment, variation and source. + None if no experiment/variation was selected.""" + experiment: Optional[entities.Experiment] + variation: Optional[entities.Variation] + source: str class DecisionService: """ Class encapsulating all decision related capabilities. """ - def __init__(self, logger, user_profile_service): + def __init__(self, logger: Logger, user_profile_service: Optional[UserProfileService]): self.bucketer = bucketer.Bucketer() self.logger = logger self.user_profile_service = user_profile_service @@ -37,9 +50,9 @@ def __init__(self, logger, user_profile_service): # This contains all the forced variations set by the user # by calling set_forced_variation (it is not the same as the # whitelisting forcedVariations data structure). - self.forced_variation_map = {} + self.forced_variation_map: dict[str, dict[str, str]] = {} - def _get_bucketing_id(self, user_id, attributes): + def _get_bucketing_id(self, user_id: str, attributes: Optional[UserAttributes]) -> tuple[str, list[str]]: """ Helper method to determine bucketing ID for the user. Args: @@ -50,8 +63,8 @@ def _get_bucketing_id(self, user_id, attributes): String representing bucketing ID if it is a String type in attributes else return user ID array of log messages representing decision making. """ - decide_reasons = [] - attributes = attributes or {} + decide_reasons: list[str] = [] + attributes = attributes or UserAttributes({}) bucketing_id = attributes.get(enums.ControlAttributes.BUCKETING_ID) if bucketing_id is not None: @@ -63,7 +76,10 @@ def _get_bucketing_id(self, user_id, attributes): return user_id, decide_reasons - def set_forced_variation(self, project_config, experiment_key, user_id, variation_key): + def set_forced_variation( + self, project_config: ProjectConfig, experiment_key: str, + user_id: str, variation_key: Optional[str] + ) -> bool: """ Sets users to a map of experiments to forced variations. Args: @@ -83,7 +99,7 @@ def set_forced_variation(self, project_config, experiment_key, user_id, variatio experiment_id = experiment.id if variation_key is None: if user_id in self.forced_variation_map: - experiment_to_variation_map = self.forced_variation_map.get(user_id) + experiment_to_variation_map = self.forced_variation_map[user_id] if experiment_id in experiment_to_variation_map: del self.forced_variation_map[user_id][experiment_id] self.logger.debug( @@ -120,7 +136,9 @@ def set_forced_variation(self, project_config, experiment_key, user_id, variatio ) return True - def get_forced_variation(self, project_config, experiment_key, user_id): + def get_forced_variation( + self, project_config: ProjectConfig, experiment_key: str, user_id: str + ) -> tuple[Optional[entities.Variation], list[str]]: """ Gets the forced variation key for the given user and experiment. Args: @@ -132,7 +150,7 @@ def get_forced_variation(self, project_config, experiment_key, user_id): The variation which the given user and experiment should be forced into and array of log messages representing decision making. """ - decide_reasons = [] + decide_reasons: list[str] = [] if user_id not in self.forced_variation_map: message = f'User "{user_id}" is not in the forced variation map.' self.logger.debug(message) @@ -157,13 +175,19 @@ def get_forced_variation(self, project_config, experiment_key, user_id): return None, decide_reasons variation = project_config.get_variation_from_id(experiment_key, variation_id) + # this case is logged in get_variation_from_id + if variation is None: + return None, decide_reasons + message = f'Variation "{variation.key}" is mapped to experiment "{experiment_key}" and ' \ f'user "{user_id}" in the forced variation map' self.logger.debug(message) decide_reasons.append(message) return variation, decide_reasons - def get_whitelisted_variation(self, project_config, experiment, user_id): + def get_whitelisted_variation( + self, project_config: ProjectConfig, experiment: entities.Experiment, user_id: str + ) -> tuple[Optional[entities.Variation], list[str]]: """ Determine if a user is forced into a variation (through whitelisting) for the given experiment and return that variation. @@ -180,7 +204,7 @@ def get_whitelisted_variation(self, project_config, experiment, user_id): forced_variations = experiment.forcedVariations if forced_variations and user_id in forced_variations: - forced_variation_key = forced_variations.get(user_id) + forced_variation_key = forced_variations[user_id] forced_variation = project_config.get_variation_from_key(experiment.key, forced_variation_key) if forced_variation: @@ -192,7 +216,9 @@ def get_whitelisted_variation(self, project_config, experiment, user_id): return None, decide_reasons - def get_stored_variation(self, project_config, experiment, user_profile): + def get_stored_variation( + self, project_config: ProjectConfig, experiment: entities.Experiment, user_profile: UserProfile + ) -> Optional[entities.Variation]: """ Determine if the user has a stored variation available for the given experiment and return that. Args: @@ -216,7 +242,13 @@ def get_stored_variation(self, project_config, experiment, user_profile): return None - def get_variation(self, project_config, experiment, user_context, options=None): + def get_variation( + self, + project_config: ProjectConfig, + experiment: entities.Experiment, + user_context: OptimizelyUserContext, + options: Optional[Sequence[str]] = None + ) -> tuple[Optional[entities.Variation], list[str]]: """ Top-level function to help determine variation user should be put in. First, check if experiment is running. @@ -252,6 +284,7 @@ def get_variation(self, project_config, experiment, user_context, options=None): return None, decide_reasons # Check if the user is forced into a variation + variation: Optional[entities.Variation] variation, reasons_received = self.get_forced_variation(project_config, experiment.key, user_id) decide_reasons += reasons_received if variation: @@ -272,7 +305,7 @@ def get_variation(self, project_config, experiment, user_context, options=None): self.logger.exception(f'Unable to retrieve user profile for user "{user_id}" as lookup failed.') retrieved_profile = None - if validator.is_user_profile_valid(retrieved_profile): + if retrieved_profile and validator.is_user_profile_valid(retrieved_profile): user_profile = UserProfile(**retrieved_profile) variation = self.get_stored_variation(project_config, experiment, user_profile) if variation: @@ -303,7 +336,7 @@ def get_variation(self, project_config, experiment, user_context, options=None): decide_reasons += bucketing_id_reasons variation, bucket_reasons = self.bucketer.bucket(project_config, experiment, user_id, bucketing_id) decide_reasons += bucket_reasons - if variation: + if isinstance(variation, entities.Variation): message = f'User "{user_id}" is in variation "{variation.key}" of experiment {experiment.key}.' self.logger.info(message) decide_reasons.append(message) @@ -320,7 +353,9 @@ def get_variation(self, project_config, experiment, user_context, options=None): decide_reasons.append(message) return None, decide_reasons - def get_variation_for_rollout(self, project_config, feature, user): + def get_variation_for_rollout( + self, project_config: ProjectConfig, feature: entities.FeatureFlag, user: OptimizelyUserContext + ) -> tuple[Decision, list[str]]: """ Determine which experiment/variation the user is in for a given rollout. Returns the variation of the first experiment the user qualifies for. @@ -335,7 +370,7 @@ def get_variation_for_rollout(self, project_config, feature, user): Decision namedtuple consisting of experiment and variation for the user and array of log messages representing decision making. """ - decide_reasons = [] + decide_reasons: list[str] = [] user_id = user.user_id attributes = user.get_user_attributes() @@ -380,6 +415,9 @@ def get_variation_for_rollout(self, project_config, feature, user): logging_key = "Everyone Else" if everyone_else else str(index + 1) rollout_rule = project_config.get_experiment_from_id(rule.id) + # error is logged in get_experiment_from_id + if rollout_rule is None: + continue audience_conditions = rollout_rule.get_audience_conditions_or_ids() audience_decision_response, reasons_received_audience = audience_helper.does_user_meet_audience_conditions( @@ -424,14 +462,19 @@ def get_variation_for_rollout(self, project_config, feature, user): return Decision(None, None, enums.DecisionSources.ROLLOUT), decide_reasons - def get_variation_for_feature(self, project_config, feature, user_context, options=None): + def get_variation_for_feature( + self, + project_config: ProjectConfig, + feature: entities.FeatureFlag, + user_context: OptimizelyUserContext, + options: Optional[list[str]] = None + ) -> tuple[Decision, list[str]]: """ Returns the experiment/variation the user is bucketed in for the given feature. Args: project_config: Instance of ProjectConfig. feature: Feature for which we are determining if it is enabled or not for the given user. - user: user context for user. - attributes: Dict representing user attributes. + user_context: user context for user. options: Decide options. Returns: @@ -442,8 +485,8 @@ def get_variation_for_feature(self, project_config, feature, user_context, optio # Check if the feature flag is under an experiment and the the user is bucketed into one of these experiments if feature.experimentIds: # Evaluate each experiment ID and return the first bucketed experiment variation - for experiment in feature.experimentIds: - experiment = project_config.get_experiment_from_id(experiment) + for experiment_id in feature.experimentIds: + experiment = project_config.get_experiment_from_id(experiment_id) decision_variation = None if experiment: @@ -476,7 +519,12 @@ def get_variation_for_feature(self, project_config, feature, user_context, optio decide_reasons += rollout_variation_reasons return variation, decide_reasons - def validated_forced_decision(self, project_config, decision_context, user_context): + def validated_forced_decision( + self, + project_config: ProjectConfig, + decision_context: OptimizelyUserContext.OptimizelyDecisionContext, + user_context: OptimizelyUserContext + ) -> tuple[Optional[entities.Variation], list[str]]: """ Gets forced decisions based on flag key, rule key and variation. @@ -488,7 +536,7 @@ def validated_forced_decision(self, project_config, decision_context, user_conte Returns: Variation of the forced decision. """ - reasons = [] + reasons: list[str] = [] forced_decision = user_context.get_forced_decision(decision_context) diff --git a/optimizely/entities.py b/optimizely/entities.py index a5987e1b..c0eb602a 100644 --- a/optimizely/entities.py +++ b/optimizely/entities.py @@ -10,20 +10,42 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations +from typing import TYPE_CHECKING, Any, Optional, Sequence +from sys import version_info + +if version_info < (3, 8): + from typing_extensions import Final +else: + from typing import Final # type: ignore + + +if TYPE_CHECKING: + # prevent circular dependenacy by skipping import at runtime + from .helpers.types import ExperimentDict, TrafficAllocation, VariableDict, VariationDict + class BaseEntity: - def __eq__(self, other): + def __eq__(self, other: object) -> bool: return self.__dict__ == other.__dict__ class Attribute(BaseEntity): - def __init__(self, id, key, **kwargs): + def __init__(self, id: str, key: str, **kwargs: Any): self.id = id self.key = key class Audience(BaseEntity): - def __init__(self, id, name, conditions, conditionStructure=None, conditionList=None, **kwargs): + def __init__( + self, + id: str, + name: str, + conditions: str, + conditionStructure: Optional[list[str | list[str]]] = None, + conditionList: Optional[list[str | list[str]]] = None, + **kwargs: Any + ): self.id = id self.name = name self.conditions = conditions @@ -32,7 +54,7 @@ def __init__(self, id, name, conditions, conditionStructure=None, conditionList= class Event(BaseEntity): - def __init__(self, id, key, experimentIds, **kwargs): + def __init__(self, id: str, key: str, experimentIds: list[str], **kwargs: Any): self.id = id self.key = key self.experimentIds = experimentIds @@ -41,18 +63,18 @@ def __init__(self, id, key, experimentIds, **kwargs): class Experiment(BaseEntity): def __init__( self, - id, - key, - status, - audienceIds, - variations, - forcedVariations, - trafficAllocation, - layerId, - audienceConditions=None, - groupId=None, - groupPolicy=None, - **kwargs + id: str, + key: str, + status: str, + audienceIds: list[str], + variations: list[VariationDict], + forcedVariations: dict[str, str], + trafficAllocation: list[TrafficAllocation], + layerId: str, + audienceConditions: Optional[Sequence[str | list[str]]] = None, + groupId: Optional[str] = None, + groupPolicy: Optional[str] = None, + **kwargs: Any ): self.id = id self.key = key @@ -66,15 +88,15 @@ def __init__( self.groupId = groupId self.groupPolicy = groupPolicy - def get_audience_conditions_or_ids(self): + def get_audience_conditions_or_ids(self) -> Sequence[str | list[str]]: """ Returns audienceConditions if present, otherwise audienceIds. """ return self.audienceConditions if self.audienceConditions is not None else self.audienceIds - def __str__(self): + def __str__(self) -> str: return self.key @staticmethod - def get_default(): + def get_default() -> Experiment: """ returns an empty experiment object. """ experiment = Experiment( id='', @@ -92,17 +114,23 @@ def get_default(): class FeatureFlag(BaseEntity): - def __init__(self, id, key, experimentIds, rolloutId, variables, groupId=None, **kwargs): + def __init__( + self, id: str, key: str, experimentIds: list[str], rolloutId: str, + variables: list[VariableDict], groupId: Optional[str] = None, **kwargs: Any + ): self.id = id self.key = key self.experimentIds = experimentIds self.rolloutId = rolloutId - self.variables = variables + self.variables: dict[str, Variable] = variables # type: ignore[assignment] self.groupId = groupId class Group(BaseEntity): - def __init__(self, id, policy, experiments, trafficAllocation, **kwargs): + def __init__( + self, id: str, policy: str, experiments: list[Experiment], + trafficAllocation: list[TrafficAllocation], **kwargs: Any + ): self.id = id self.policy = policy self.experiments = experiments @@ -111,20 +139,20 @@ def __init__(self, id, policy, experiments, trafficAllocation, **kwargs): class Layer(BaseEntity): """Layer acts as rollout.""" - def __init__(self, id, experiments, **kwargs): + def __init__(self, id: str, experiments: list[ExperimentDict], **kwargs: Any): self.id = id self.experiments = experiments class Variable(BaseEntity): class Type: - BOOLEAN = 'boolean' - DOUBLE = 'double' - INTEGER = 'integer' - JSON = 'json' - STRING = 'string' + BOOLEAN: Final = 'boolean' + DOUBLE: Final = 'double' + INTEGER: Final = 'integer' + JSON: Final = 'json' + STRING: Final = 'string' - def __init__(self, id, key, type, defaultValue, **kwargs): + def __init__(self, id: str, key: str, type: str, defaultValue: Any, **kwargs: Any): self.id = id self.key = key self.type = type @@ -133,15 +161,17 @@ def __init__(self, id, key, type, defaultValue, **kwargs): class Variation(BaseEntity): class VariableUsage(BaseEntity): - def __init__(self, id, value, **kwargs): + def __init__(self, id: str, value: str, **kwargs: Any): self.id = id self.value = value - def __init__(self, id, key, featureEnabled=False, variables=None, **kwargs): + def __init__( + self, id: str, key: str, featureEnabled: bool = False, variables: Optional[list[Variable]] = None, **kwargs: Any + ): self.id = id self.key = key self.featureEnabled = featureEnabled self.variables = variables or [] - def __str__(self): + def __str__(self) -> str: return self.key diff --git a/optimizely/error_handler.py b/optimizely/error_handler.py index 8fe631f3..69411fb0 100644 --- a/optimizely/error_handler.py +++ b/optimizely/error_handler.py @@ -1,4 +1,4 @@ -# Copyright 2016, Optimizely +# Copyright 2016, 2022, Optimizely # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -17,7 +17,7 @@ class BaseErrorHandler: Override with your own exception handler providing handle_error method. """ @staticmethod - def handle_error(*args): + def handle_error(error: Exception) -> None: pass @@ -29,5 +29,5 @@ class RaiseExceptionErrorHandler(BaseErrorHandler): """ Class providing handle_error method which raises provided exception. """ @staticmethod - def handle_error(error): + def handle_error(error: Exception) -> None: raise error diff --git a/optimizely/event/event_factory.py b/optimizely/event/event_factory.py index 237bdbe9..8a4bb0cf 100644 --- a/optimizely/event/event_factory.py +++ b/optimizely/event/event_factory.py @@ -1,4 +1,4 @@ -# Copyright 2019 Optimizely +# Copyright 2019, 2022, Optimizely # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -11,6 +11,10 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations +from typing import TYPE_CHECKING, Optional, Sequence, cast, List +from sys import version_info +from optimizely import entities from optimizely.helpers import enums from optimizely.helpers import event_tag_utils from optimizely.helpers import validator @@ -18,7 +22,18 @@ from . import payload from . import user_event -CUSTOM_ATTRIBUTE_FEATURE_TYPE = 'custom' +if version_info < (3, 8): + from typing_extensions import Final +else: + from typing import Final # type: ignore + +if TYPE_CHECKING: + # prevent circular dependenacy by skipping import at runtime + from optimizely.project_config import ProjectConfig + from optimizely.optimizely_user_context import UserAttributes + from optimizely.logger import Logger + +CUSTOM_ATTRIBUTE_FEATURE_TYPE: Final = 'custom' class EventFactory: @@ -27,13 +42,17 @@ class EventFactory: to record the events via the Optimizely Events API ("https://developers.optimizely.com/x/events/api/index.html") """ - EVENT_ENDPOINT = 'https://logx.optimizely.com/v1/events' - HTTP_VERB = 'POST' - HTTP_HEADERS = {'Content-Type': 'application/json'} - ACTIVATE_EVENT_KEY = 'campaign_activated' + EVENT_ENDPOINT: Final = 'https://logx.optimizely.com/v1/events' + HTTP_VERB: Final = 'POST' + HTTP_HEADERS: Final = {'Content-Type': 'application/json'} + ACTIVATE_EVENT_KEY: Final = 'campaign_activated' @classmethod - def create_log_event(cls, user_events, logger): + def create_log_event( + cls, + user_events: Sequence[Optional[user_event.UserEvent]] | Optional[user_event.UserEvent], + logger: Logger + ) -> Optional[log_event.LogEvent]: """ Create LogEvent instance. Args: @@ -45,7 +64,7 @@ def create_log_event(cls, user_events, logger): """ if not isinstance(user_events, list): - user_events = [user_events] + user_events = cast(List[Optional[user_event.UserEvent]], [user_events]) visitors = [] @@ -58,7 +77,12 @@ def create_log_event(cls, user_events, logger): if len(visitors) == 0: return None - user_context = user_events[0].event_context + first_event = user_events[0] + + if not first_event: + return None + + user_context = first_event.event_context event_batch = payload.EventBatch( user_context.account_id, user_context.project_id, @@ -76,7 +100,7 @@ def create_log_event(cls, user_events, logger): return log_event.LogEvent(cls.EVENT_ENDPOINT, event_params, cls.HTTP_VERB, cls.HTTP_HEADERS) @classmethod - def _create_visitor(cls, event, logger): + def _create_visitor(cls, event: Optional[user_event.UserEvent], logger: Logger) -> Optional[payload.Visitor]: """ Helper method to create Visitor instance for event_batch. Args: @@ -91,7 +115,7 @@ def _create_visitor(cls, event, logger): if isinstance(event, user_event.ImpressionEvent): experiment_layerId, experiment_id, variation_id, variation_key = '', '', '', '' - if event.variation: + if isinstance(event.variation, entities.Variation): variation_id = event.variation.id variation_key = event.variation.key @@ -111,7 +135,7 @@ def _create_visitor(cls, event, logger): return visitor - elif isinstance(event, user_event.ConversionEvent): + elif isinstance(event, user_event.ConversionEvent) and event.event: revenue = event_tag_utils.get_revenue_value(event.event_tags) value = event_tag_utils.get_numeric_value(event.event_tags, logger) @@ -130,7 +154,9 @@ def _create_visitor(cls, event, logger): return None @staticmethod - def build_attribute_list(attributes, project_config): + def build_attribute_list( + attributes: Optional[UserAttributes], project_config: ProjectConfig + ) -> list[payload.VisitorAttribute]: """ Create Vistor Attribute List. Args: @@ -141,7 +167,7 @@ def build_attribute_list(attributes, project_config): List consisting of valid attributes for the user. Empty otherwise. """ - attributes_list = [] + attributes_list: list[payload.VisitorAttribute] = [] if project_config is None: return attributes_list diff --git a/optimizely/event/event_processor.py b/optimizely/event/event_processor.py index be0aca55..0341c1e4 100644 --- a/optimizely/event/event_processor.py +++ b/optimizely/event/event_processor.py @@ -20,6 +20,7 @@ from typing import Optional from datetime import timedelta import queue +from sys import version_info from optimizely import logger as _logging from optimizely import notification_center as _notification_center @@ -30,6 +31,12 @@ from .user_event import UserEvent +if version_info < (3, 8): + from typing_extensions import Final +else: + from typing import Final # type: ignore + + class BaseEventProcessor(ABC): """ Class encapsulating event processing. Override with your own implementation. """ @@ -55,13 +62,13 @@ class Signal: '''Used to create unique objects for sending signals to event queue.''' pass - _DEFAULT_QUEUE_CAPACITY = 1000 - _DEFAULT_BATCH_SIZE = 10 - _DEFAULT_FLUSH_INTERVAL = 30 - _DEFAULT_TIMEOUT_INTERVAL = 5 - _SHUTDOWN_SIGNAL = Signal() - _FLUSH_SIGNAL = Signal() - LOCK = threading.Lock() + _DEFAULT_QUEUE_CAPACITY: Final = 1000 + _DEFAULT_BATCH_SIZE: Final = 10 + _DEFAULT_FLUSH_INTERVAL: Final = 30 + _DEFAULT_TIMEOUT_INTERVAL: Final = 5 + _SHUTDOWN_SIGNAL: Final = Signal() + _FLUSH_SIGNAL: Final = Signal() + LOCK: Final = threading.Lock() def __init__( self, @@ -94,17 +101,17 @@ def __init__( self.logger = _logging.adapt_logger(logger or _logging.NoOpLogger()) self.event_queue = event_queue or queue.Queue(maxsize=self._DEFAULT_QUEUE_CAPACITY) self.batch_size: int = ( - batch_size # type: ignore + batch_size # type: ignore[assignment] if self._validate_instantiation_props(batch_size, 'batch_size', self._DEFAULT_BATCH_SIZE) else self._DEFAULT_BATCH_SIZE ) self.flush_interval: timedelta = ( - timedelta(seconds=flush_interval) # type: ignore + timedelta(seconds=flush_interval) # type: ignore[arg-type] if self._validate_instantiation_props(flush_interval, 'flush_interval', self._DEFAULT_FLUSH_INTERVAL) else timedelta(seconds=self._DEFAULT_FLUSH_INTERVAL) ) self.timeout_interval: timedelta = ( - timedelta(seconds=timeout_interval) # type: ignore + timedelta(seconds=timeout_interval) # type: ignore[arg-type] if self._validate_instantiation_props(timeout_interval, 'timeout_interval', self._DEFAULT_TIMEOUT_INTERVAL) else timedelta(seconds=self._DEFAULT_TIMEOUT_INTERVAL) ) diff --git a/optimizely/event/log_event.py b/optimizely/event/log_event.py index 2a6b8b78..7c0beeb6 100644 --- a/optimizely/event/log_event.py +++ b/optimizely/event/log_event.py @@ -1,4 +1,4 @@ -# Copyright 2019 Optimizely +# Copyright 2019, 2022, Optimizely # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -11,15 +11,32 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations +from typing import Optional, Any +from sys import version_info +from optimizely import event_builder -class LogEvent: + +if version_info < (3, 8): + from typing_extensions import Literal +else: + from typing import Literal # type: ignore + + +class LogEvent(event_builder.Event): """ Representation of an event which can be sent to Optimizely events API. """ - def __init__(self, url, params, http_verb=None, headers=None): + def __init__( + self, + url: str, + params: dict[str, Any], + http_verb: Optional[Literal['POST', 'GET']] = None, + headers: Optional[dict[str, str]] = None + ): self.url = url self.params = params self.http_verb = http_verb or 'POST' self.headers = headers - def __str__(self): + def __str__(self) -> str: return f'{self.__class__}: {self.__dict__}' diff --git a/optimizely/event/payload.py b/optimizely/event/payload.py index 15e23db2..ac6f35e4 100644 --- a/optimizely/event/payload.py +++ b/optimizely/event/payload.py @@ -1,4 +1,4 @@ -# Copyright 2019 Optimizely +# Copyright 2019, 2022, Optimizely # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -11,7 +11,14 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations import json +from numbers import Integral +from typing import TYPE_CHECKING, Any, Optional + + +if TYPE_CHECKING: + from optimizely.helpers.event_tag_utils import EventTags class EventBatch: @@ -19,14 +26,14 @@ class EventBatch: def __init__( self, - account_id, - project_id, - revision, - client_name, - client_version, - anonymize_ip, - enrich_decisions=True, - visitors=None, + account_id: str, + project_id: str, + revision: str, + client_name: str, + client_version: str, + anonymize_ip: bool, + enrich_decisions: bool = True, + visitors: Optional[list[Visitor]] = None, ): self.account_id = account_id self.project_id = project_id @@ -37,11 +44,11 @@ def __init__( self.enrich_decisions = enrich_decisions self.visitors = visitors or [] - def __eq__(self, other): + def __eq__(self, other: object) -> bool: batch_obj = self.get_event_params() return batch_obj == other - def _dict_clean(self, obj): + def _dict_clean(self, obj: list[tuple[str, Any]]) -> dict[str, Any]: """ Helper method to remove keys from dictionary with None values. """ result = {} @@ -52,16 +59,19 @@ def _dict_clean(self, obj): result[k] = v return result - def get_event_params(self): + def get_event_params(self) -> dict[str, Any]: """ Method to return valid params for LogEvent payload. """ - return json.loads(json.dumps(self.__dict__, default=lambda o: o.__dict__), object_pairs_hook=self._dict_clean,) + return json.loads( # type: ignore[no-any-return] + json.dumps(self.__dict__, default=lambda o: o.__dict__), + object_pairs_hook=self._dict_clean, + ) class Decision: """ Class respresenting Decision. """ - def __init__(self, campaign_id, experiment_id, variation_id, metadata): + def __init__(self, campaign_id: str, experiment_id: str, variation_id: str, metadata: Metadata): self.campaign_id = campaign_id self.experiment_id = experiment_id self.variation_id = variation_id @@ -71,7 +81,7 @@ def __init__(self, campaign_id, experiment_id, variation_id, metadata): class Metadata: """ Class respresenting Metadata. """ - def __init__(self, flag_key, rule_key, rule_type, variation_key, enabled): + def __init__(self, flag_key: str, rule_key: str, rule_type: str, variation_key: str, enabled: bool): self.flag_key = flag_key self.rule_key = rule_key self.rule_type = rule_type @@ -82,7 +92,7 @@ def __init__(self, flag_key, rule_key, rule_type, variation_key, enabled): class Snapshot: """ Class representing Snapshot. """ - def __init__(self, events, decisions=None): + def __init__(self, events: list[SnapshotEvent], decisions: Optional[list[Decision]] = None): self.events = events self.decisions = decisions @@ -90,7 +100,16 @@ def __init__(self, events, decisions=None): class SnapshotEvent: """ Class representing Snapshot Event. """ - def __init__(self, entity_id, uuid, key, timestamp, revenue=None, value=None, tags=None): + def __init__( + self, + entity_id: str, + uuid: str, + key: str, + timestamp: int, + revenue: Optional[Integral] = None, + value: Any = None, + tags: Optional[EventTags] = None + ): self.entity_id = entity_id self.uuid = uuid self.key = key @@ -103,7 +122,7 @@ def __init__(self, entity_id, uuid, key, timestamp, revenue=None, value=None, ta class Visitor: """ Class representing Visitor. """ - def __init__(self, snapshots, attributes, visitor_id): + def __init__(self, snapshots: list[Snapshot], attributes: list[VisitorAttribute], visitor_id: str): self.snapshots = snapshots self.attributes = attributes self.visitor_id = visitor_id @@ -112,7 +131,7 @@ def __init__(self, snapshots, attributes, visitor_id): class VisitorAttribute: """ Class representing Visitor Attribute. """ - def __init__(self, entity_id, key, attribute_type, value): + def __init__(self, entity_id: str, key: str, attribute_type: str, value: Any): self.entity_id = entity_id self.key = key self.type = attribute_type diff --git a/optimizely/event/user_event.py b/optimizely/event/user_event.py index 67838410..9cdb623a 100644 --- a/optimizely/event/user_event.py +++ b/optimizely/event/user_event.py @@ -1,4 +1,4 @@ -# Copyright 2019 Optimizely +# Copyright 2019, 2022, Optimizely # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -10,19 +10,38 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - +from __future__ import annotations import time import uuid +from typing import TYPE_CHECKING, Optional +from sys import version_info from optimizely import version -CLIENT_NAME = 'python-sdk' + +if version_info < (3, 8): + from typing_extensions import Final +else: + from typing import Final # type: ignore + + +if TYPE_CHECKING: + # prevent circular dependenacy by skipping import at runtime + from optimizely.entities import Experiment, Variation, Event + from optimizely.event.payload import VisitorAttribute + from optimizely.helpers.event_tag_utils import EventTags + + +CLIENT_NAME: Final = 'python-sdk' class UserEvent: """ Class respresenting User Event. """ - def __init__(self, event_context, user_id, visitor_attributes, bot_filtering=None): + def __init__( + self, event_context: EventContext, user_id: str, + visitor_attributes: list[VisitorAttribute], bot_filtering: Optional[bool] = None + ): self.event_context = event_context self.user_id = user_id self.visitor_attributes = visitor_attributes @@ -30,10 +49,10 @@ def __init__(self, event_context, user_id, visitor_attributes, bot_filtering=Non self.uuid = self._get_uuid() self.timestamp = self._get_time() - def _get_time(self): + def _get_time(self) -> int: return int(round(time.time() * 1000)) - def _get_uuid(self): + def _get_uuid(self) -> str: return str(uuid.uuid4()) @@ -41,8 +60,17 @@ class ImpressionEvent(UserEvent): """ Class representing Impression Event. """ def __init__( - self, event_context, user_id, experiment, visitor_attributes, variation, flag_key, - rule_key, rule_type, enabled, bot_filtering=None + self, + event_context: EventContext, + user_id: str, + experiment: Experiment, + visitor_attributes: list[VisitorAttribute], + variation: Optional[Variation], + flag_key: str, + rule_key: str, + rule_type: str, + enabled: bool, + bot_filtering: Optional[bool] = None ): super().__init__(event_context, user_id, visitor_attributes, bot_filtering) self.experiment = experiment @@ -57,7 +85,9 @@ class ConversionEvent(UserEvent): """ Class representing Conversion Event. """ def __init__( - self, event_context, event, user_id, visitor_attributes, event_tags, bot_filtering=None, + self, event_context: EventContext, event: Optional[Event], user_id: str, + visitor_attributes: list[VisitorAttribute], event_tags: Optional[EventTags], + bot_filtering: Optional[bool] = None, ): super().__init__(event_context, user_id, visitor_attributes, bot_filtering) self.event = event @@ -67,7 +97,7 @@ def __init__( class EventContext: """ Class respresenting User Event Context. """ - def __init__(self, account_id, project_id, revision, anonymize_ip): + def __init__(self, account_id: str, project_id: str, revision: str, anonymize_ip: bool): self.account_id = account_id self.project_id = project_id self.revision = revision diff --git a/optimizely/event/user_event_factory.py b/optimizely/event/user_event_factory.py index 75741aef..ef07d06b 100644 --- a/optimizely/event/user_event_factory.py +++ b/optimizely/event/user_event_factory.py @@ -1,4 +1,4 @@ -# Copyright 2019, 2021 Optimizely +# Copyright 2019, 2021-2022, Optimizely # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -11,19 +11,37 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations +from typing import TYPE_CHECKING, Optional +from optimizely.helpers.event_tag_utils import EventTags from . import event_factory from . import user_event from optimizely.helpers import enums +if TYPE_CHECKING: + # prevent circular dependenacy by skipping import at runtime + from optimizely.optimizely_user_context import UserAttributes + from optimizely.project_config import ProjectConfig + from optimizely.entities import Experiment, Variation + + class UserEventFactory: """ UserEventFactory builds impression and conversion events from a given UserEvent. """ @classmethod def create_impression_event( - cls, project_config, activated_experiment, variation_id, flag_key, rule_key, rule_type, - enabled, user_id, user_attributes - ): + cls, + project_config: ProjectConfig, + activated_experiment: Experiment, + variation_id: Optional[str], + flag_key: str, + rule_key: str, + rule_type: str, + enabled: bool, + user_id: str, + user_attributes: Optional[UserAttributes] + ) -> Optional[user_event.ImpressionEvent]: """ Create impression Event to be sent to the logging endpoint. Args: @@ -35,7 +53,7 @@ def create_impression_event( rule_type: type for the source. enabled: boolean representing if feature is enabled user_id: ID for user. - attributes: Dict representing user attributes and values which need to be recorded. + user_attributes: Dict representing user attributes and values which need to be recorded. Returns: Event object encapsulating the impression event. None if: @@ -45,7 +63,8 @@ def create_impression_event( if not activated_experiment and rule_type is not enums.DecisionSources.ROLLOUT: return None - variation, experiment_id = None, None + variation: Optional[Variation] = None + experiment_id = None if activated_experiment: experiment_id = activated_experiment.id @@ -74,14 +93,21 @@ def create_impression_event( ) @classmethod - def create_conversion_event(cls, project_config, event_key, user_id, user_attributes, event_tags): + def create_conversion_event( + cls, + project_config: ProjectConfig, + event_key: str, + user_id: str, + user_attributes: Optional[UserAttributes], + event_tags: Optional[EventTags] + ) -> Optional[user_event.ConversionEvent]: """ Create conversion Event to be sent to the logging endpoint. Args: project_config: Instance of ProjectConfig. event_key: Key representing the event which needs to be recorded. user_id: ID for user. - attributes: Dict representing user attributes and values. + user_attributes: Dict representing user attributes and values. event_tags: Dict representing metadata associated with the event. Returns: diff --git a/optimizely/event_builder.py b/optimizely/event_builder.py index 882f8518..ecabf14c 100644 --- a/optimizely/event_builder.py +++ b/optimizely/event_builder.py @@ -1,4 +1,4 @@ -# Copyright 2016-2019, Optimizely +# Copyright 2016-2019, 2022, Optimizely # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -11,19 +11,39 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations import time +from typing import TYPE_CHECKING, Any, Optional import uuid +from sys import version_info from . import version from .helpers import enums from .helpers import event_tag_utils from .helpers import validator +if version_info < (3, 8): + from typing_extensions import Final, Literal +else: + from typing import Final, Literal # type: ignore + +if TYPE_CHECKING: + # prevent circular dependenacy by skipping import at runtime + from .entities import Experiment + from .optimizely_user_context import UserAttributes + from .project_config import ProjectConfig + class Event: """ Representation of an event which can be sent to the Optimizely logging endpoint. """ - def __init__(self, url, params, http_verb=None, headers=None): + def __init__( + self, + url: str, + params: dict[str, Any], + http_verb: Optional[Literal['POST', 'GET']] = None, + headers: Optional[dict[str, str]] = None + ): self.url = url self.params = params self.http_verb = http_verb or 'GET' @@ -34,35 +54,37 @@ class EventBuilder: """ Class which encapsulates methods to build events for tracking impressions and conversions using the new V3 event API (batch). """ - EVENTS_URL = 'https://logx.optimizely.com/v1/events' - HTTP_VERB = 'POST' - HTTP_HEADERS = {'Content-Type': 'application/json'} + EVENTS_URL: Final = 'https://logx.optimizely.com/v1/events' + HTTP_VERB: Final = 'POST' + HTTP_HEADERS: Final = {'Content-Type': 'application/json'} class EventParams: - ACCOUNT_ID = 'account_id' - PROJECT_ID = 'project_id' - EXPERIMENT_ID = 'experiment_id' - CAMPAIGN_ID = 'campaign_id' - VARIATION_ID = 'variation_id' - END_USER_ID = 'visitor_id' - ENRICH_DECISIONS = 'enrich_decisions' - EVENTS = 'events' - EVENT_ID = 'entity_id' - ATTRIBUTES = 'attributes' - DECISIONS = 'decisions' - TIME = 'timestamp' - KEY = 'key' - TAGS = 'tags' - UUID = 'uuid' - USERS = 'visitors' - SNAPSHOTS = 'snapshots' - SOURCE_SDK_TYPE = 'client_name' - SOURCE_SDK_VERSION = 'client_version' - CUSTOM = 'custom' - ANONYMIZE_IP = 'anonymize_ip' - REVISION = 'revision' - - def _get_attributes_data(self, project_config, attributes): + ACCOUNT_ID: Final = 'account_id' + PROJECT_ID: Final = 'project_id' + EXPERIMENT_ID: Final = 'experiment_id' + CAMPAIGN_ID: Final = 'campaign_id' + VARIATION_ID: Final = 'variation_id' + END_USER_ID: Final = 'visitor_id' + ENRICH_DECISIONS: Final = 'enrich_decisions' + EVENTS: Final = 'events' + EVENT_ID: Final = 'entity_id' + ATTRIBUTES: Final = 'attributes' + DECISIONS: Final = 'decisions' + TIME: Final = 'timestamp' + KEY: Final = 'key' + TAGS: Final = 'tags' + UUID: Final = 'uuid' + USERS: Final = 'visitors' + SNAPSHOTS: Final = 'snapshots' + SOURCE_SDK_TYPE: Final = 'client_name' + SOURCE_SDK_VERSION: Final = 'client_version' + CUSTOM: Final = 'custom' + ANONYMIZE_IP: Final = 'anonymize_ip' + REVISION: Final = 'revision' + + def _get_attributes_data( + self, project_config: ProjectConfig, attributes: UserAttributes + ) -> list[dict[str, Any]]: """ Get attribute(s) information. Args: @@ -105,7 +127,7 @@ def _get_attributes_data(self, project_config, attributes): return params - def _get_time(self): + def _get_time(self) -> int: """ Get time in milliseconds to be added. Returns: @@ -114,7 +136,9 @@ def _get_time(self): return int(round(time.time() * 1000)) - def _get_common_params(self, project_config, user_id, attributes): + def _get_common_params( + self, project_config: ProjectConfig, user_id: str, attributes: UserAttributes + ) -> dict[str, Any]: """ Get params which are used same in both conversion and impression events. Args: @@ -125,7 +149,7 @@ def _get_common_params(self, project_config, user_id, attributes): Returns: Dict consisting of parameters common to both impression and conversion events. """ - common_params = { + common_params: dict[str, Any] = { self.EventParams.PROJECT_ID: project_config.get_project_id(), self.EventParams.ACCOUNT_ID: project_config.get_account_id(), } @@ -149,7 +173,9 @@ def _get_common_params(self, project_config, user_id, attributes): return common_params - def _get_required_params_for_impression(self, experiment, variation_id): + def _get_required_params_for_impression( + self, experiment: Experiment, variation_id: str + ) -> dict[str, list[dict[str, str | int]]]: """ Get parameters that are required for the impression event to register. Args: @@ -159,7 +185,7 @@ def _get_required_params_for_impression(self, experiment, variation_id): Returns: Dict consisting of decisions and events info for impression event. """ - snapshot = {} + snapshot: dict[str, list[dict[str, str | int]]] = {} snapshot[self.EventParams.DECISIONS] = [ { @@ -180,7 +206,9 @@ def _get_required_params_for_impression(self, experiment, variation_id): return snapshot - def _get_required_params_for_conversion(self, project_config, event_key, event_tags): + def _get_required_params_for_conversion( + self, project_config: ProjectConfig, event_key: str, event_tags: event_tag_utils.EventTags + ) -> dict[str, list[dict[str, Any]]]: """ Get parameters that are required for the conversion event to register. Args: @@ -192,9 +220,10 @@ def _get_required_params_for_conversion(self, project_config, event_key, event_t Dict consisting of the decisions and events info for conversion event. """ snapshot = {} + event = project_config.get_event(event_key) - event_dict = { - self.EventParams.EVENT_ID: project_config.get_event(event_key).id, + event_dict: dict[str, Any] = { + self.EventParams.EVENT_ID: event.id if event else None, self.EventParams.TIME: self._get_time(), self.EventParams.KEY: event_key, self.EventParams.UUID: str(uuid.uuid4()), @@ -215,7 +244,10 @@ def _get_required_params_for_conversion(self, project_config, event_key, event_t snapshot[self.EventParams.EVENTS] = [event_dict] return snapshot - def create_impression_event(self, project_config, experiment, variation_id, user_id, attributes): + def create_impression_event( + self, project_config: ProjectConfig, experiment: Experiment, + variation_id: str, user_id: str, attributes: UserAttributes + ) -> Event: """ Create impression Event to be sent to the logging endpoint. Args: @@ -236,7 +268,10 @@ def create_impression_event(self, project_config, experiment, variation_id, user return Event(self.EVENTS_URL, params, http_verb=self.HTTP_VERB, headers=self.HTTP_HEADERS) - def create_conversion_event(self, project_config, event_key, user_id, attributes, event_tags): + def create_conversion_event( + self, project_config: ProjectConfig, event_key: str, + user_id: str, attributes: UserAttributes, event_tags: event_tag_utils.EventTags + ) -> Event: """ Create conversion Event to be sent to the logging endpoint. Args: diff --git a/optimizely/event_dispatcher.py b/optimizely/event_dispatcher.py index ed65d944..e744cafd 100644 --- a/optimizely/event_dispatcher.py +++ b/optimizely/event_dispatcher.py @@ -21,13 +21,13 @@ from .helpers import enums from . import event_builder -if version_info >= (3, 8): - from typing import Protocol +if version_info < (3, 8): + from typing_extensions import Protocol, Final else: - from typing_extensions import Protocol # type: ignore[misc] + from typing import Protocol, Final # type: ignore -REQUEST_TIMEOUT = 10 +REQUEST_TIMEOUT: Final = 10 class CustomEventDispatcher(Protocol): diff --git a/optimizely/helpers/audience.py b/optimizely/helpers/audience.py index e9914c66..39ec69c5 100644 --- a/optimizely/helpers/audience.py +++ b/optimizely/helpers/audience.py @@ -1,4 +1,4 @@ -# Copyright 2016, 2018-2021, Optimizely +# Copyright 2016, 2018-2022, Optimizely # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -11,18 +11,29 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations import json +from typing import TYPE_CHECKING, Optional, Sequence, Type from . import condition as condition_helper from . import condition_tree_evaluator - - -def does_user_meet_audience_conditions(config, - audience_conditions, - audience_logs, - logging_key, - attributes, - logger): +from optimizely import optimizely_user_context + +if TYPE_CHECKING: + # prevent circular dependenacy by skipping import at runtime + from optimizely.project_config import ProjectConfig + from optimizely.logger import Logger + from optimizely.helpers.enums import ExperimentAudienceEvaluationLogs, RolloutRuleAudienceEvaluationLogs + + +def does_user_meet_audience_conditions( + config: ProjectConfig, + audience_conditions: Optional[Sequence[str | list[str]]], + audience_logs: Type[ExperimentAudienceEvaluationLogs | RolloutRuleAudienceEvaluationLogs], + logging_key: str, + attributes: Optional[optimizely_user_context.UserAttributes], + logger: Logger +) -> tuple[bool, list[str]]: """ Determine for given experiment if user satisfies the audiences for the experiment. Args: @@ -52,17 +63,19 @@ def does_user_meet_audience_conditions(config, return True, decide_reasons if attributes is None: - attributes = {} + attributes = optimizely_user_context.UserAttributes({}) - def evaluate_custom_attr(audience_id, index): + def evaluate_custom_attr(audience_id: str, index: int) -> Optional[bool]: audience = config.get_audience(audience_id) + if not audience or audience.conditionList is None: + return None custom_attr_condition_evaluator = condition_helper.CustomAttributeConditionEvaluator( audience.conditionList, attributes, logger ) return custom_attr_condition_evaluator.evaluate(index) - def evaluate_audience(audience_id): + def evaluate_audience(audience_id: str) -> Optional[bool]: audience = config.get_audience(audience_id) if audience is None: diff --git a/optimizely/helpers/condition.py b/optimizely/helpers/condition.py index 48dc00d9..a6b8057c 100644 --- a/optimizely/helpers/condition.py +++ b/optimizely/helpers/condition.py @@ -1,4 +1,4 @@ -# Copyright 2016, 2018-2020, Optimizely +# Copyright 2016, 2018-2020, 2022, Optimizely # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -11,48 +11,68 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations import json import numbers +from typing import TYPE_CHECKING, Any, Callable, Optional +from sys import version_info from . import validator +from optimizely import optimizely_user_context from .enums import CommonAudienceEvaluationLogs as audience_logs from .enums import Errors from .enums import VersionType +if TYPE_CHECKING: + # prevent circular dependenacy by skipping import at runtime + from optimizely.logger import Logger + + +if version_info < (3, 8): + from typing_extensions import Literal, Final +else: + from typing import Literal, Final # type: ignore + + class ConditionOperatorTypes: - AND = 'and' - OR = 'or' - NOT = 'not' + AND: Final = 'and' + OR: Final = 'or' + NOT: Final = 'not' operators = [AND, OR, NOT] class ConditionMatchTypes: - EXACT = 'exact' - EXISTS = 'exists' - GREATER_THAN = 'gt' - GREATER_THAN_OR_EQUAL = 'ge' - LESS_THAN = 'lt' - LESS_THAN_OR_EQUAL = 'le' - SEMVER_EQ = 'semver_eq' - SEMVER_GE = 'semver_ge' - SEMVER_GT = 'semver_gt' - SEMVER_LE = 'semver_le' - SEMVER_LT = 'semver_lt' - SUBSTRING = 'substring' + EXACT: Final = 'exact' + EXISTS: Final = 'exists' + GREATER_THAN: Final = 'gt' + GREATER_THAN_OR_EQUAL: Final = 'ge' + LESS_THAN: Final = 'lt' + LESS_THAN_OR_EQUAL: Final = 'le' + SEMVER_EQ: Final = 'semver_eq' + SEMVER_GE: Final = 'semver_ge' + SEMVER_GT: Final = 'semver_gt' + SEMVER_LE: Final = 'semver_le' + SEMVER_LT: Final = 'semver_lt' + SUBSTRING: Final = 'substring' class CustomAttributeConditionEvaluator: """ Class encapsulating methods to be used in audience leaf condition evaluation. """ - CUSTOM_ATTRIBUTE_CONDITION_TYPE = 'custom_attribute' + CUSTOM_ATTRIBUTE_CONDITION_TYPE: Final = 'custom_attribute' - def __init__(self, condition_data, attributes, logger): + def __init__( + self, + condition_data: list[str | list[str]], + attributes: Optional[optimizely_user_context.UserAttributes], + logger: Logger + ): self.condition_data = condition_data - self.attributes = attributes or {} + self.attributes = attributes or optimizely_user_context.UserAttributes({}) self.logger = logger - def _get_condition_json(self, index): + def _get_condition_json(self, index: int) -> str: """ Method to generate json for logging audience condition. Args: @@ -71,7 +91,7 @@ def _get_condition_json(self, index): return json.dumps(condition_log) - def is_value_type_valid_for_exact_conditions(self, value): + def is_value_type_valid_for_exact_conditions(self, value: Any) -> bool: """ Method to validate if the value is valid for exact match type evaluation. Args: @@ -86,13 +106,13 @@ def is_value_type_valid_for_exact_conditions(self, value): return False - def is_value_a_number(self, value): + def is_value_a_number(self, value: Any) -> bool: if isinstance(value, (numbers.Integral, float)) and not isinstance(value, bool): return True return False - def is_pre_release_version(self, version): + def is_pre_release_version(self, version: str) -> bool: """ Method to check if given version is pre-release. Criteria for pre-release includes: - Version includes "-" @@ -112,7 +132,7 @@ def is_pre_release_version(self, version): return True return False - def is_build_version(self, version): + def is_build_version(self, version: str) -> bool: """ Method to check given version is a build version. Criteria for build version includes: - Version includes "+" @@ -132,7 +152,7 @@ def is_build_version(self, version): return True return False - def has_white_space(self, version): + def has_white_space(self, version: str) -> bool: """ Method to check if the given version contains " " (white space) Args: @@ -145,7 +165,9 @@ def has_white_space(self, version): """ return ' ' in version - def compare_user_version_with_target_version(self, target_version, user_version): + def compare_user_version_with_target_version( + self, target_version: str, user_version: str + ) -> Optional[Literal[0] | Literal[1] | Literal[-1]]: """ Method to compare user version with target version. Args: @@ -198,7 +220,7 @@ def compare_user_version_with_target_version(self, target_version, user_version) return -1 return 0 - def exact_evaluator(self, index): + def exact_evaluator(self, index: int) -> Optional[bool]: """ Evaluate the given exact match condition for the user attributes. Args: @@ -238,7 +260,7 @@ def exact_evaluator(self, index): return condition_value == user_value - def exists_evaluator(self, index): + def exists_evaluator(self, index: int) -> bool: """ Evaluate the given exists match condition for the user attributes. Args: @@ -251,7 +273,7 @@ def exists_evaluator(self, index): attr_name = self.condition_data[index][0] return self.attributes.get(attr_name) is not None - def greater_than_evaluator(self, index): + def greater_than_evaluator(self, index: int) -> Optional[bool]: """ Evaluate the given greater than match condition for the user attributes. Args: @@ -283,9 +305,9 @@ def greater_than_evaluator(self, index): ) return None - return user_value > condition_value + return user_value > condition_value # type: ignore[operator] - def greater_than_or_equal_evaluator(self, index): + def greater_than_or_equal_evaluator(self, index: int) -> Optional[bool]: """ Evaluate the given greater than or equal to match condition for the user attributes. Args: @@ -317,9 +339,9 @@ def greater_than_or_equal_evaluator(self, index): ) return None - return user_value >= condition_value + return user_value >= condition_value # type: ignore[operator] - def less_than_evaluator(self, index): + def less_than_evaluator(self, index: int) -> Optional[bool]: """ Evaluate the given less than match condition for the user attributes. Args: @@ -351,9 +373,9 @@ def less_than_evaluator(self, index): ) return None - return user_value < condition_value + return user_value < condition_value # type: ignore[operator] - def less_than_or_equal_evaluator(self, index): + def less_than_or_equal_evaluator(self, index: int) -> Optional[bool]: """ Evaluate the given less than or equal to match condition for the user attributes. Args: @@ -385,9 +407,9 @@ def less_than_or_equal_evaluator(self, index): ) return None - return user_value <= condition_value + return user_value <= condition_value # type: ignore[operator] - def substring_evaluator(self, index): + def substring_evaluator(self, index: int) -> Optional[bool]: """ Evaluate the given substring match condition for the given user attributes. Args: @@ -415,7 +437,7 @@ def substring_evaluator(self, index): return condition_value in user_value - def semver_equal_evaluator(self, index): + def semver_equal_evaluator(self, index: int) -> Optional[bool]: """ Evaluate the given semantic version equal match target version for the user version. Args: @@ -451,7 +473,7 @@ def semver_equal_evaluator(self, index): return result == 0 - def semver_greater_than_evaluator(self, index): + def semver_greater_than_evaluator(self, index: int) -> Optional[bool]: """ Evaluate the given semantic version greater than match target version for the user version. Args: @@ -486,7 +508,7 @@ def semver_greater_than_evaluator(self, index): return result > 0 - def semver_less_than_evaluator(self, index): + def semver_less_than_evaluator(self, index: int) -> Optional[bool]: """ Evaluate the given semantic version less than match target version for the user version. Args: @@ -521,7 +543,7 @@ def semver_less_than_evaluator(self, index): return result < 0 - def semver_less_than_or_equal_evaluator(self, index): + def semver_less_than_or_equal_evaluator(self, index: int) -> Optional[bool]: """ Evaluate the given semantic version less than or equal to match target version for the user version. Args: @@ -556,7 +578,7 @@ def semver_less_than_or_equal_evaluator(self, index): return result <= 0 - def semver_greater_than_or_equal_evaluator(self, index): + def semver_greater_than_or_equal_evaluator(self, index: int) -> Optional[bool]: """ Evaluate the given semantic version greater than or equal to match target version for the user version. Args: @@ -606,7 +628,7 @@ def semver_greater_than_or_equal_evaluator(self, index): ConditionMatchTypes.SUBSTRING: substring_evaluator } - def split_version(self, version): + def split_version(self, version: str) -> Optional[list[str]]: """ Method to split the given version. Args: @@ -619,7 +641,7 @@ def split_version(self, version): - if the given version is invalid in format """ target_prefix = version - target_suffix = "" + target_suffix = [] target_parts = [] # check that version shouldn't have white space @@ -660,7 +682,7 @@ def split_version(self, version): target_version_parts.extend(target_suffix) return target_version_parts - def evaluate(self, index): + def evaluate(self, index: int) -> Optional[bool]: """ Given a custom attribute audience condition and user attributes, evaluate the condition against the attributes. @@ -707,12 +729,12 @@ class ConditionDecoder: """ Class which provides an object_hook method for decoding dict objects into a list when given a condition_decoder. """ - def __init__(self, condition_decoder): - self.condition_list = [] + def __init__(self, condition_decoder: Callable[[dict[str, str]], list[Optional[str]]]): + self.condition_list: list[Optional[str] | list[str]] = [] self.index = -1 self.decoder = condition_decoder - def object_hook(self, object_dict): + def object_hook(self, object_dict: dict[str, str]) -> int: """ Hook which when passed into a json.JSONDecoder will replace each dict in a json string with its index and convert the dict to an object as defined by the passed in condition_decoder. The newly created condition object is @@ -725,12 +747,12 @@ def object_hook(self, object_dict): An index which will be used as the placeholder in the condition_structure """ instance = self.decoder(object_dict) - self.condition_list.append(instance) + self.condition_list.append(instance) # type: ignore[arg-type] self.index += 1 return self.index -def _audience_condition_deserializer(obj_dict): +def _audience_condition_deserializer(obj_dict: dict[str, str]) -> list[Optional[str]]: """ Deserializer defining how dict objects need to be decoded for audience conditions. Args: @@ -747,7 +769,7 @@ def _audience_condition_deserializer(obj_dict): ] -def loads(conditions_string): +def loads(conditions_string: str) -> tuple[list[str | list[str]], list[Optional[list[str] | str]]]: """ Deserializes the conditions property into its corresponding components: the condition_structure and the condition_list. diff --git a/optimizely/helpers/condition_tree_evaluator.py b/optimizely/helpers/condition_tree_evaluator.py index c0fe7b87..1e9a95c0 100644 --- a/optimizely/helpers/condition_tree_evaluator.py +++ b/optimizely/helpers/condition_tree_evaluator.py @@ -1,4 +1,4 @@ -# Copyright 2018-2019, Optimizely +# Copyright 2018-2019, 2022, Optimizely # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -11,10 +11,16 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations +from typing import Any, Callable, Optional, Sequence + from .condition import ConditionOperatorTypes -def and_evaluator(conditions, leaf_evaluator): +LeafEvaluator = Callable[[Any], Optional[bool]] + + +def and_evaluator(conditions: Sequence[str | list[str]], leaf_evaluator: LeafEvaluator) -> Optional[bool]: """ Evaluates a list of conditions as if the evaluator had been applied to each entry and the results AND-ed together. @@ -40,7 +46,7 @@ def and_evaluator(conditions, leaf_evaluator): return None if saw_null_result else True -def or_evaluator(conditions, leaf_evaluator): +def or_evaluator(conditions: Sequence[str | list[str]], leaf_evaluator: LeafEvaluator) -> Optional[bool]: """ Evaluates a list of conditions as if the evaluator had been applied to each entry and the results OR-ed together. @@ -66,7 +72,7 @@ def or_evaluator(conditions, leaf_evaluator): return None if saw_null_result else False -def not_evaluator(conditions, leaf_evaluator): +def not_evaluator(conditions: Sequence[str | list[str]], leaf_evaluator: LeafEvaluator) -> Optional[bool]: """ Evaluates a list of conditions as if the evaluator had been applied to a single entry and NOT was applied to the result. @@ -94,7 +100,7 @@ def not_evaluator(conditions, leaf_evaluator): } -def evaluate(conditions, leaf_evaluator): +def evaluate(conditions: Optional[Sequence[str | list[str]]], leaf_evaluator: LeafEvaluator) -> Optional[bool]: """ Top level method to evaluate conditions. Args: diff --git a/optimizely/helpers/enums.py b/optimizely/helpers/enums.py index 54145f9c..65af4843 100644 --- a/optimizely/helpers/enums.py +++ b/optimizely/helpers/enums.py @@ -12,138 +12,149 @@ # limitations under the License. import logging +from sys import version_info + +if version_info < (3, 8): + from typing_extensions import Final +else: + from typing import Final # type: ignore class CommonAudienceEvaluationLogs: - AUDIENCE_EVALUATION_RESULT = 'Audience "{}" evaluated to {}.' - EVALUATING_AUDIENCE = 'Starting to evaluate audience "{}" with conditions: {}.' - INFINITE_ATTRIBUTE_VALUE = ( + AUDIENCE_EVALUATION_RESULT: Final = 'Audience "{}" evaluated to {}.' + EVALUATING_AUDIENCE: Final = 'Starting to evaluate audience "{}" with conditions: {}.' + INFINITE_ATTRIBUTE_VALUE: Final = ( 'Audience condition "{}" evaluated to UNKNOWN because the number value ' 'for user attribute "{}" is not in the range [-2^53, +2^53].' ) - MISSING_ATTRIBUTE_VALUE = ( + MISSING_ATTRIBUTE_VALUE: Final = ( 'Audience condition {} evaluated to UNKNOWN because no value was passed for ' 'user attribute "{}".' ) - NULL_ATTRIBUTE_VALUE = ( + NULL_ATTRIBUTE_VALUE: Final = ( 'Audience condition "{}" evaluated to UNKNOWN because a null value was passed ' 'for user attribute "{}".' ) - UNEXPECTED_TYPE = ( + UNEXPECTED_TYPE: Final = ( 'Audience condition "{}" evaluated to UNKNOWN because a value of type "{}" was passed ' 'for user attribute "{}".' ) - UNKNOWN_CONDITION_TYPE = ( + UNKNOWN_CONDITION_TYPE: Final = ( 'Audience condition "{}" uses an unknown condition type. You may need to upgrade to a ' 'newer release of the Optimizely SDK.' ) - UNKNOWN_CONDITION_VALUE = ( + UNKNOWN_CONDITION_VALUE: Final = ( 'Audience condition "{}" has an unsupported condition value. You may need to upgrade to a ' 'newer release of the Optimizely SDK.' ) - UNKNOWN_MATCH_TYPE = ( + UNKNOWN_MATCH_TYPE: Final = ( 'Audience condition "{}" uses an unknown match type. You may need to upgrade to a ' 'newer release of the Optimizely SDK.' ) class ExperimentAudienceEvaluationLogs(CommonAudienceEvaluationLogs): - AUDIENCE_EVALUATION_RESULT_COMBINED = 'Audiences for experiment "{}" collectively evaluated to {}.' - EVALUATING_AUDIENCES_COMBINED = 'Evaluating audiences for experiment "{}": {}.' + AUDIENCE_EVALUATION_RESULT_COMBINED: Final = 'Audiences for experiment "{}" collectively evaluated to {}.' + EVALUATING_AUDIENCES_COMBINED: Final = 'Evaluating audiences for experiment "{}": {}.' class RolloutRuleAudienceEvaluationLogs(CommonAudienceEvaluationLogs): - AUDIENCE_EVALUATION_RESULT_COMBINED = 'Audiences for rule {} collectively evaluated to {}.' - EVALUATING_AUDIENCES_COMBINED = 'Evaluating audiences for rule {}: {}.' + AUDIENCE_EVALUATION_RESULT_COMBINED: Final = 'Audiences for rule {} collectively evaluated to {}.' + EVALUATING_AUDIENCES_COMBINED: Final = 'Evaluating audiences for rule {}: {}.' class ConfigManager: - AUTHENTICATED_DATAFILE_URL_TEMPLATE = 'https://config.optimizely.com/datafiles/auth/{sdk_key}.json' - AUTHORIZATION_HEADER_DATA_TEMPLATE = 'Bearer {datafile_access_token}' - DATAFILE_URL_TEMPLATE = 'https://cdn.optimizely.com/datafiles/{sdk_key}.json' + AUTHENTICATED_DATAFILE_URL_TEMPLATE: Final = 'https://config.optimizely.com/datafiles/auth/{sdk_key}.json' + AUTHORIZATION_HEADER_DATA_TEMPLATE: Final = 'Bearer {datafile_access_token}' + DATAFILE_URL_TEMPLATE: Final = 'https://cdn.optimizely.com/datafiles/{sdk_key}.json' # Default time in seconds to block the 'get_config' method call until 'config' instance has been initialized. - DEFAULT_BLOCKING_TIMEOUT = 10 + DEFAULT_BLOCKING_TIMEOUT: Final = 10 # Default config update interval of 5 minutes - DEFAULT_UPDATE_INTERVAL = 5 * 60 + DEFAULT_UPDATE_INTERVAL: Final = 5 * 60 # Time in seconds before which request for datafile times out - REQUEST_TIMEOUT = 10 + REQUEST_TIMEOUT: Final = 10 class ControlAttributes: - BOT_FILTERING = '$opt_bot_filtering' - BUCKETING_ID = '$opt_bucketing_id' - USER_AGENT = '$opt_user_agent' + BOT_FILTERING: Final = '$opt_bot_filtering' + BUCKETING_ID: Final = '$opt_bucketing_id' + USER_AGENT: Final = '$opt_user_agent' class DatafileVersions: - V2 = '2' - V3 = '3' - V4 = '4' + V2: Final = '2' + V3: Final = '3' + V4: Final = '4' class DecisionNotificationTypes: - AB_TEST = 'ab-test' - ALL_FEATURE_VARIABLES = 'all-feature-variables' - FEATURE = 'feature' - FEATURE_TEST = 'feature-test' - FEATURE_VARIABLE = 'feature-variable' - FLAG = 'flag' + AB_TEST: Final = 'ab-test' + ALL_FEATURE_VARIABLES: Final = 'all-feature-variables' + FEATURE: Final = 'feature' + FEATURE_TEST: Final = 'feature-test' + FEATURE_VARIABLE: Final = 'feature-variable' + FLAG: Final = 'flag' class DecisionSources: - EXPERIMENT = 'experiment' - FEATURE_TEST = 'feature-test' - ROLLOUT = 'rollout' + EXPERIMENT: Final = 'experiment' + FEATURE_TEST: Final = 'feature-test' + ROLLOUT: Final = 'rollout' class Errors: - INVALID_ATTRIBUTE = 'Provided attribute is not in datafile.' - INVALID_ATTRIBUTE_FORMAT = 'Attributes provided are in an invalid format.' - INVALID_AUDIENCE = 'Provided audience is not in datafile.' - INVALID_EVENT_TAG_FORMAT = 'Event tags provided are in an invalid format.' - INVALID_EXPERIMENT_KEY = 'Provided experiment is not in datafile.' - INVALID_EVENT_KEY = 'Provided event is not in datafile.' - INVALID_FEATURE_KEY = 'Provided feature key is not in the datafile.' - INVALID_GROUP_ID = 'Provided group is not in datafile.' - INVALID_INPUT = 'Provided "{}" is in an invalid format.' - INVALID_OPTIMIZELY = 'Optimizely instance is not valid. Failing "{}".' - INVALID_PROJECT_CONFIG = 'Invalid config. Optimizely instance is not valid. Failing "{}".' - INVALID_VARIATION = 'Provided variation is not in datafile.' - INVALID_VARIABLE_KEY = 'Provided variable key is not in the feature flag.' - NONE_FEATURE_KEY_PARAMETER = '"None" is an invalid value for feature key.' - NONE_USER_ID_PARAMETER = '"None" is an invalid value for user ID.' - NONE_VARIABLE_KEY_PARAMETER = '"None" is an invalid value for variable key.' - UNSUPPORTED_DATAFILE_VERSION = 'This version of the Python SDK does not support the given datafile version: "{}".' + INVALID_ATTRIBUTE: Final = 'Provided attribute is not in datafile.' + INVALID_ATTRIBUTE_FORMAT: Final = 'Attributes provided are in an invalid format.' + INVALID_AUDIENCE: Final = 'Provided audience is not in datafile.' + INVALID_EVENT_TAG_FORMAT: Final = 'Event tags provided are in an invalid format.' + INVALID_EXPERIMENT_KEY: Final = 'Provided experiment is not in datafile.' + INVALID_EVENT_KEY: Final = 'Provided event is not in datafile.' + INVALID_FEATURE_KEY: Final = 'Provided feature key is not in the datafile.' + INVALID_GROUP_ID: Final = 'Provided group is not in datafile.' + INVALID_INPUT: Final = 'Provided "{}" is in an invalid format.' + INVALID_OPTIMIZELY: Final = 'Optimizely instance is not valid. Failing "{}".' + INVALID_PROJECT_CONFIG: Final = 'Invalid config. Optimizely instance is not valid. Failing "{}".' + INVALID_VARIATION: Final = 'Provided variation is not in datafile.' + INVALID_VARIABLE_KEY: Final = 'Provided variable key is not in the feature flag.' + NONE_FEATURE_KEY_PARAMETER: Final = '"None" is an invalid value for feature key.' + NONE_USER_ID_PARAMETER: Final = '"None" is an invalid value for user ID.' + NONE_VARIABLE_KEY_PARAMETER: Final = '"None" is an invalid value for variable key.' + UNSUPPORTED_DATAFILE_VERSION: Final = ( + 'This version of the Python SDK does not support the given datafile version: "{}".') class ForcedDecisionLogs: - USER_HAS_FORCED_DECISION_WITH_RULE_SPECIFIED = 'Variation ({}) is mapped to flag ({}), rule ({}) and user ({}) ' \ - 'in the forced decision map.' - USER_HAS_FORCED_DECISION_WITHOUT_RULE_SPECIFIED = 'Variation ({}) is mapped to flag ({}) and user ({}) ' \ - 'in the forced decision map.' - USER_HAS_FORCED_DECISION_WITH_RULE_SPECIFIED_BUT_INVALID = 'Invalid variation is mapped to flag ({}), rule ({}) ' \ - 'and user ({}) in the forced decision map.' - USER_HAS_FORCED_DECISION_WITHOUT_RULE_SPECIFIED_BUT_INVALID = 'Invalid variation is mapped to flag ({}) ' \ - 'and user ({}) in the forced decision map.' + USER_HAS_FORCED_DECISION_WITH_RULE_SPECIFIED: Final = ( + 'Variation ({}) is mapped to flag ({}), rule ({}) and user ({}) ' + 'in the forced decision map.') + USER_HAS_FORCED_DECISION_WITHOUT_RULE_SPECIFIED: Final = ( + 'Variation ({}) is mapped to flag ({}) and user ({}) ' + 'in the forced decision map.') + USER_HAS_FORCED_DECISION_WITH_RULE_SPECIFIED_BUT_INVALID: Final = ( + 'Invalid variation is mapped to flag ({}), rule ({}) ' + 'and user ({}) in the forced decision map.') + USER_HAS_FORCED_DECISION_WITHOUT_RULE_SPECIFIED_BUT_INVALID: Final = ( + 'Invalid variation is mapped to flag ({}) ' + 'and user ({}) in the forced decision map.') class HTTPHeaders: - AUTHORIZATION = 'Authorization' - IF_MODIFIED_SINCE = 'If-Modified-Since' - LAST_MODIFIED = 'Last-Modified' + AUTHORIZATION: Final = 'Authorization' + IF_MODIFIED_SINCE: Final = 'If-Modified-Since' + LAST_MODIFIED: Final = 'Last-Modified' class HTTPVerbs: - GET = 'GET' - POST = 'POST' + GET: Final = 'GET' + POST: Final = 'POST' class LogLevels: - NOTSET = logging.NOTSET - DEBUG = logging.DEBUG - INFO = logging.INFO - WARNING = logging.WARNING - ERROR = logging.ERROR - CRITICAL = logging.CRITICAL + NOTSET: Final = logging.NOTSET + DEBUG: Final = logging.DEBUG + INFO: Final = logging.INFO + WARNING: Final = logging.WARNING + ERROR: Final = logging.ERROR + CRITICAL: Final = logging.CRITICAL class NotificationTypes: @@ -165,13 +176,13 @@ class NotificationTypes: LogEvent log_event """ - ACTIVATE = 'ACTIVATE:experiment, user_id, attributes, variation, event' - DECISION = 'DECISION:type, user_id, attributes, decision_info' - OPTIMIZELY_CONFIG_UPDATE = 'OPTIMIZELY_CONFIG_UPDATE' - TRACK = 'TRACK:event_key, user_id, attributes, event_tags, event' - LOG_EVENT = 'LOG_EVENT:log_event' + ACTIVATE: Final = 'ACTIVATE:experiment, user_id, attributes, variation, event' + DECISION: Final = 'DECISION:type, user_id, attributes, decision_info' + OPTIMIZELY_CONFIG_UPDATE: Final = 'OPTIMIZELY_CONFIG_UPDATE' + TRACK: Final = 'TRACK:event_key, user_id, attributes, event_tags, event' + LOG_EVENT: Final = 'LOG_EVENT:log_event' class VersionType: - IS_PRE_RELEASE = '-' - IS_BUILD = '+' + IS_PRE_RELEASE: Final = '-' + IS_BUILD: Final = '+' diff --git a/optimizely/helpers/event_tag_utils.py b/optimizely/helpers/event_tag_utils.py index cecf1008..0efbafb7 100644 --- a/optimizely/helpers/event_tag_utils.py +++ b/optimizely/helpers/event_tag_utils.py @@ -1,4 +1,4 @@ -# Copyright 2017, Optimizely +# Copyright 2017, 2022, Optimizely # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -11,15 +11,32 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations +from typing import TYPE_CHECKING, Any, Optional, NewType, Dict from . import enums import math import numbers +from sys import version_info -REVENUE_METRIC_TYPE = 'revenue' -NUMERIC_METRIC_TYPE = 'value' +if version_info < (3, 8): + from typing_extensions import Final +else: + from typing import Final # type: ignore -def get_revenue_value(event_tags): +if TYPE_CHECKING: + # prevent circular dependenacy by skipping import at runtime + from optimizely.logger import Logger + + +REVENUE_METRIC_TYPE: Final = 'revenue' +NUMERIC_METRIC_TYPE: Final = 'value' + +# type for tracking event tags (essentially a sub-type of dict) +EventTags = NewType('EventTags', Dict[str, Any]) + + +def get_revenue_value(event_tags: Optional[EventTags]) -> Optional[numbers.Integral]: if event_tags is None: return None @@ -40,7 +57,7 @@ def get_revenue_value(event_tags): return raw_value -def get_numeric_value(event_tags, logger=None): +def get_numeric_value(event_tags: Optional[EventTags], logger: Optional[Logger] = None) -> Optional[float]: """ A smart getter of the numeric value from the event tags. @@ -124,4 +141,4 @@ def get_numeric_value(event_tags, logger=None): ' is in an invalid format and will not be sent to results.' ) - return numeric_metric_value + return numeric_metric_value # type: ignore[no-any-return] diff --git a/optimizely/helpers/experiment.py b/optimizely/helpers/experiment.py index 45bdd1b5..8a644b43 100644 --- a/optimizely/helpers/experiment.py +++ b/optimizely/helpers/experiment.py @@ -1,4 +1,4 @@ -# Copyright 2016, Optimizely +# Copyright 2016, 2022, Optimizely # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -10,11 +10,18 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + # prevent circular dependenacy by skipping import at runtime + from optimizely.entities import Experiment + ALLOWED_EXPERIMENT_STATUS = ['Running'] -def is_experiment_running(experiment): +def is_experiment_running(experiment: Experiment) -> bool: """ Determine for given experiment if experiment is running. Args: diff --git a/optimizely/helpers/types.py b/optimizely/helpers/types.py index 10252e32..814bc1aa 100644 --- a/optimizely/helpers/types.py +++ b/optimizely/helpers/types.py @@ -12,20 +12,24 @@ # limitations under the License. from __future__ import annotations -from typing import Optional +from typing import Optional, Any from sys import version_info -if version_info >= (3, 8): - from typing import TypedDict # type: ignore[attr-defined] -else: +if version_info < (3, 8): from typing_extensions import TypedDict +else: + from typing import TypedDict # type: ignore # Intermediate types for type checking deserialized datafile json before actual class instantiation. # These aren't used for anything other than type signatures -class BaseDict(TypedDict): +class BaseEntity(TypedDict): + pass + + +class BaseDict(BaseEntity): '''Base type for parsed datafile json, before instantiation of class objects.''' id: str key: str @@ -41,7 +45,7 @@ class AttributeDict(BaseDict): pass -class TrafficAllocation(TypedDict): +class TrafficAllocation(BaseEntity): '''Traffic Allocation dict from parsed datafile json.''' endOfRange: int entityId: str @@ -72,7 +76,29 @@ class ExperimentDict(BaseDict): trafficAllocation: list[TrafficAllocation] -class RolloutDict(TypedDict): +class RolloutDict(BaseEntity): '''Rollout dict from parsed datafile json.''' id: str experiments: list[ExperimentDict] + + +class FeatureFlagDict(BaseDict): + '''Feature flag dict from parsed datafile json.''' + rolloutId: str + variables: list[VariableDict] + experimentIds: list[str] + + +class GroupDict(BaseEntity): + '''Group dict from parsed datafile json.''' + id: str + policy: str + experiments: list[ExperimentDict] + trafficAllocation: list[TrafficAllocation] + + +class AudienceDict(BaseEntity): + '''Audience dict from parsed datafile json.''' + id: str + name: str + conditions: list[Any] | str diff --git a/optimizely/helpers/validator.py b/optimizely/helpers/validator.py index 7d1e4f00..244337b0 100644 --- a/optimizely/helpers/validator.py +++ b/optimizely/helpers/validator.py @@ -1,4 +1,4 @@ -# Copyright 2016-2019, Optimizely +# Copyright 2016-2019, 2022, Optimizely # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -11,7 +11,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations import json +from typing import TYPE_CHECKING, Any, Optional, Type import jsonschema import math import numbers @@ -20,8 +22,18 @@ from optimizely.user_profile import UserProfile from . import constants +if TYPE_CHECKING: + # prevent circular dependenacy by skipping import at runtime + from optimizely.logger import Logger + from optimizely.event_dispatcher import CustomEventDispatcher + from optimizely.error_handler import BaseErrorHandler + from optimizely.config_manager import BaseConfigManager + from optimizely.event.event_processor import BaseEventProcessor + from optimizely.helpers.event_tag_utils import EventTags + from optimizely.optimizely_user_context import UserAttributes -def is_datafile_valid(datafile): + +def is_datafile_valid(datafile: Optional[str | bytes]) -> bool: """ Given a datafile determine if it is valid or not. Args: @@ -30,6 +42,8 @@ def is_datafile_valid(datafile): Returns: Boolean depending upon whether datafile is valid or not. """ + if datafile is None: + return False try: datafile_json = json.loads(datafile) @@ -44,7 +58,7 @@ def is_datafile_valid(datafile): return True -def _has_method(obj, method): +def _has_method(obj: object, method: str) -> bool: """ Given an object determine if it supports the method. Args: @@ -58,7 +72,7 @@ def _has_method(obj, method): return getattr(obj, method, None) is not None -def is_config_manager_valid(config_manager): +def is_config_manager_valid(config_manager: BaseConfigManager) -> bool: """ Given a config_manager determine if it is valid or not i.e. provides a get_config method. Args: @@ -71,7 +85,7 @@ def is_config_manager_valid(config_manager): return _has_method(config_manager, 'get_config') -def is_event_processor_valid(event_processor): +def is_event_processor_valid(event_processor: BaseEventProcessor) -> bool: """ Given an event_processor, determine if it is valid or not i.e. provides a process method. Args: @@ -84,7 +98,7 @@ def is_event_processor_valid(event_processor): return _has_method(event_processor, 'process') -def is_error_handler_valid(error_handler): +def is_error_handler_valid(error_handler: Type[BaseErrorHandler] | BaseErrorHandler) -> bool: """ Given a error_handler determine if it is valid or not i.e. provides a handle_error method. Args: @@ -97,7 +111,7 @@ def is_error_handler_valid(error_handler): return _has_method(error_handler, 'handle_error') -def is_event_dispatcher_valid(event_dispatcher): +def is_event_dispatcher_valid(event_dispatcher: Type[CustomEventDispatcher] | CustomEventDispatcher) -> bool: """ Given a event_dispatcher determine if it is valid or not i.e. provides a dispatch_event method. Args: @@ -110,7 +124,7 @@ def is_event_dispatcher_valid(event_dispatcher): return _has_method(event_dispatcher, 'dispatch_event') -def is_logger_valid(logger): +def is_logger_valid(logger: Logger) -> bool: """ Given a logger determine if it is valid or not i.e. provides a log method. Args: @@ -123,7 +137,7 @@ def is_logger_valid(logger): return _has_method(logger, 'log') -def is_notification_center_valid(notification_center): +def is_notification_center_valid(notification_center: NotificationCenter) -> bool: """ Given notification_center determine if it is valid or not. Args: @@ -136,7 +150,7 @@ def is_notification_center_valid(notification_center): return isinstance(notification_center, NotificationCenter) -def are_attributes_valid(attributes): +def are_attributes_valid(attributes: UserAttributes) -> bool: """ Determine if attributes provided are dict or not. Args: @@ -149,7 +163,7 @@ def are_attributes_valid(attributes): return type(attributes) is dict -def are_event_tags_valid(event_tags): +def are_event_tags_valid(event_tags: EventTags) -> bool: """ Determine if event tags provided are dict or not. Args: @@ -162,7 +176,7 @@ def are_event_tags_valid(event_tags): return type(event_tags) is dict -def is_user_profile_valid(user_profile): +def is_user_profile_valid(user_profile: dict[str, Any]) -> bool: """ Determine if provided user profile is valid or not. Args: @@ -195,7 +209,7 @@ def is_user_profile_valid(user_profile): return True -def is_non_empty_string(input_id_key): +def is_non_empty_string(input_id_key: str) -> bool: """ Determine if provided input_id_key is a non-empty string or not. Args: @@ -210,7 +224,7 @@ def is_non_empty_string(input_id_key): return False -def is_attribute_valid(attribute_key, attribute_value): +def is_attribute_valid(attribute_key: str, attribute_value: Any) -> bool: """ Determine if given attribute is valid. Args: @@ -235,7 +249,7 @@ def is_attribute_valid(attribute_key, attribute_value): return False -def is_finite_number(value): +def is_finite_number(value: Any) -> bool: """ Validates if the given value is a number, enforces absolute limit of 2^53 and restricts NAN, INF, -INF. @@ -264,7 +278,7 @@ def is_finite_number(value): return True -def are_values_same_type(first_val, second_val): +def are_values_same_type(first_val: Any, second_val: Any) -> bool: """ Method to verify that both values belong to same type. Float and integer are considered as same type. diff --git a/optimizely/lib/pymmh3.py b/optimizely/lib/pymmh3.py index 1a3de699..b37bf944 100755 --- a/optimizely/lib/pymmh3.py +++ b/optimizely/lib/pymmh3.py @@ -16,21 +16,21 @@ https://pypi.python.org/pypi/mmh3/2.3.1 ''' +from __future__ import annotations - -def xencode(x): +def xencode(x: bytes | bytearray | str) -> bytes | bytearray: if isinstance(x, bytes) or isinstance(x, bytearray): return x else: return x.encode() -def hash(key, seed=0x0): +def hash(key: str | bytearray, seed: int = 0x0) -> int: ''' Implements 32bit murmur3 hash. ''' key = bytearray(xencode(key)) - def fmix(h): + def fmix(h: int) -> int: h ^= h >> 16 h = (h * 0x85EBCA6B) & 0xFFFFFFFF h ^= h >> 13 @@ -85,13 +85,13 @@ def fmix(h): return -((unsigned_val ^ 0xFFFFFFFF) + 1) -def hash128(key, seed=0x0, x64arch=True): +def hash128(key: bytes, seed: int = 0x0, x64arch: bool = True) -> int: ''' Implements 128bit murmur3 hash. ''' - def hash128_x64(key, seed): + def hash128_x64(key: bytes, seed: int) -> int: ''' Implements 128bit murmur3 hash for x64. ''' - def fmix(k): + def fmix(k: int) -> int: k ^= k >> 33 k = (k * 0xFF51AFD7ED558CCD) & 0xFFFFFFFFFFFFFFFF k ^= k >> 33 @@ -216,10 +216,10 @@ def fmix(k): return h2 << 64 | h1 - def hash128_x86(key, seed): + def hash128_x86(key: bytes, seed: int) -> int: ''' Implements 128bit murmur3 hash for x86. ''' - def fmix(h): + def fmix(h: int) -> int: h ^= h >> 16 h = (h * 0x85EBCA6B) & 0xFFFFFFFF h ^= h >> 13 @@ -407,7 +407,7 @@ def fmix(h): return hash128_x86(key, seed) -def hash64(key, seed=0x0, x64arch=True): +def hash64(key: bytes, seed: int = 0x0, x64arch: bool = True) -> tuple[int, int]: ''' Implements 64bit murmur3 hash. Returns a tuple. ''' hash_128 = hash128(key, seed, x64arch) @@ -427,7 +427,7 @@ def hash64(key, seed=0x0, x64arch=True): return (int(signed_val1), int(signed_val2)) -def hash_bytes(key, seed=0x0, x64arch=True): +def hash_bytes(key: bytes, seed: int = 0x0, x64arch: bool = True) -> str: ''' Implements 128bit murmur3 hash. Returns a byte string. ''' hash_128 = hash128(key, seed, x64arch) diff --git a/optimizely/logger.py b/optimizely/logger.py index 009cb44c..33d3660c 100644 --- a/optimizely/logger.py +++ b/optimizely/logger.py @@ -11,16 +11,22 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging +from typing import Any, Optional, Union import warnings -from typing import Union +from sys import version_info from .helpers import enums +if version_info < (3, 8): + from typing_extensions import Final +else: + from typing import Final # type: ignore -_DEFAULT_LOG_FORMAT = '%(levelname)-8s %(asctime)s %(filename)s:%(lineno)s:%(message)s' +_DEFAULT_LOG_FORMAT: Final = '%(levelname)-8s %(asctime)s %(filename)s:%(lineno)s:%(message)s' -def reset_logger(name, level=None, handler=None): + +def reset_logger(name: str, level: Optional[int] = None, handler: Optional[logging.Handler] = None) -> logging.Logger: """ Make a standard python logger object with default formatter, handler, etc. @@ -57,7 +63,27 @@ class BaseLogger: """ Class encapsulating logging functionality. Override with your own logger providing log method. """ @staticmethod - def log(*args): + def log(*args: Any) -> None: + pass # pragma: no cover + + @staticmethod + def error(*args: Any) -> None: + pass # pragma: no cover + + @staticmethod + def warning(*args: Any) -> None: + pass # pragma: no cover + + @staticmethod + def info(*args: Any) -> None: + pass # pragma: no cover + + @staticmethod + def debug(*args: Any) -> None: + pass # pragma: no cover + + @staticmethod + def exception(*args: Any) -> None: pass # pragma: no cover @@ -68,7 +94,7 @@ def log(*args): class NoOpLogger(BaseLogger): """ Class providing log method which logs nothing. """ - def __init__(self): + def __init__(self) -> None: self.logger = reset_logger( name='.'.join([__name__, self.__class__.__name__]), level=logging.NOTSET, handler=logging.NullHandler(), ) @@ -77,11 +103,11 @@ def __init__(self): class SimpleLogger(BaseLogger): """ Class providing log method which logs to stdout. """ - def __init__(self, min_level=enums.LogLevels.INFO): + def __init__(self, min_level: int = enums.LogLevels.INFO): self.level = min_level self.logger = reset_logger(name='.'.join([__name__, self.__class__.__name__]), level=min_level) - def log(self, log_level, message): + def log(self, log_level: int, message: object) -> None: # type: ignore[override] # Log a deprecation/runtime warning. # Clients should be using standard loggers instead of this wrapper. warning = f'{self.__class__} is deprecated. Please use standard python loggers.' @@ -91,7 +117,7 @@ def log(self, log_level, message): self.logger.log(log_level, message) -def adapt_logger(logger): +def adapt_logger(logger: Logger) -> Logger: """ Adapt our custom logger.BaseLogger object into a standard logging.Logger object. diff --git a/optimizely/notification_center.py b/optimizely/notification_center.py index e0f26349..322a5862 100644 --- a/optimizely/notification_center.py +++ b/optimizely/notification_center.py @@ -15,9 +15,15 @@ from typing import Any, Callable, Optional from .helpers import enums from . import logger as optimizely_logger +from sys import version_info +if version_info < (3, 8): + from typing_extensions import Final +else: + from typing import Final # type: ignore -NOTIFICATION_TYPES = tuple( + +NOTIFICATION_TYPES: Final = tuple( getattr(enums.NotificationTypes, attr) for attr in dir(enums.NotificationTypes) if not attr.startswith('__') ) diff --git a/optimizely/optimizely.py b/optimizely/optimizely.py index e33b14de..86e54aa0 100644 --- a/optimizely/optimizely.py +++ b/optimizely/optimizely.py @@ -36,9 +36,14 @@ from .helpers.enums import DecisionSources from .notification_center import NotificationCenter from .optimizely_config import OptimizelyConfig, OptimizelyConfigService -from .optimizely_user_context import OptimizelyUserContext -from .user_profile import UserProfileService -from typing import Any, Optional, Sequence +from .optimizely_user_context import OptimizelyUserContext, UserAttributes +from typing import TYPE_CHECKING, Any, Optional + + +if TYPE_CHECKING: + # prevent circular dependenacy by skipping import at runtime + from .user_profile import UserProfileService + from .helpers.event_tag_utils import EventTags class Optimizely: @@ -93,7 +98,7 @@ def __init__( self.event_dispatcher = event_dispatcher or EventDispatcher self.logger = _logging.adapt_logger(logger or _logging.NoOpLogger()) self.error_handler = error_handler or NoOpErrorHandler - self.config_manager: BaseConfigManager = config_manager # type: ignore + self.config_manager: BaseConfigManager = config_manager # type: ignore[assignment] self.notification_center = notification_center or NotificationCenter(self.logger) event_processor_defaults = { 'batch_size': 1, @@ -133,7 +138,7 @@ def __init__( self.logger.exception(str(error)) return - config_manager_options = { + config_manager_options: dict[str, Any] = { 'datafile': datafile, 'logger': self.logger, 'error_handler': self.error_handler, @@ -180,7 +185,7 @@ def _validate_instantiation_options(self) -> None: raise exceptions.InvalidInputException(enums.Errors.INVALID_INPUT.format('event_processor')) def _validate_user_inputs( - self, attributes: Optional[dict[str, Any]] = None, event_tags: Optional[dict[str, Any]] = None + self, attributes: Optional[UserAttributes] = None, event_tags: Optional[EventTags] = None ) -> bool: """ Helper method to validate user inputs. @@ -208,7 +213,7 @@ def _validate_user_inputs( def _send_impression_event( self, project_config: project_config.ProjectConfig, experiment: Optional[entities.Experiment], variation: Optional[entities.Variation], flag_key: str, rule_key: str, rule_type: str, - enabled: bool, user_id: str, attributes: Optional[dict[str, Any]] + enabled: bool, user_id: str, attributes: Optional[UserAttributes] ) -> None: """ Helper method to send impression event. @@ -248,7 +253,7 @@ def _send_impression_event( def _get_feature_variable_for_type( self, project_config: project_config.ProjectConfig, feature_key: str, variable_key: str, - variable_type: Optional[str], user_id: str, attributes: Optional[dict[str, Any]] + variable_type: Optional[str], user_id: str, attributes: Optional[UserAttributes] ) -> Any: """ Helper method to determine value for a certain variable attached to a feature flag based on type of variable. @@ -359,7 +364,7 @@ def _get_feature_variable_for_type( def _get_all_feature_variables_for_type( self, project_config: project_config.ProjectConfig, feature_key: str, - user_id: str, attributes: Optional[dict[str, Any]], + user_id: str, attributes: Optional[UserAttributes], ) -> Optional[dict[str, Any]]: """ Helper method to determine value for all variables attached to a feature flag. @@ -453,7 +458,7 @@ def _get_all_feature_variables_for_type( ) return all_variables - def activate(self, experiment_key: str, user_id: str, attributes: Optional[dict[str, Any]] = None) -> Optional[str]: + def activate(self, experiment_key: str, user_id: str, attributes: Optional[UserAttributes] = None) -> Optional[str]: """ Buckets visitor and sends impression event to Optimizely. Args: @@ -504,8 +509,8 @@ def activate(self, experiment_key: str, user_id: str, attributes: Optional[dict[ def track( self, event_key: str, user_id: str, - attributes: Optional[dict[str, Any]] = None, - event_tags: Optional[dict[str, Any]] = None + attributes: Optional[UserAttributes] = None, + event_tags: Optional[EventTags] = None ) -> None: """ Send conversion event to Optimizely. @@ -559,7 +564,7 @@ def track( ) def get_variation( - self, experiment_key: str, user_id: str, attributes: Optional[dict[str, Any]] = None + self, experiment_key: str, user_id: str, attributes: Optional[UserAttributes] = None ) -> Optional[str]: """ Gets variation where user will be bucketed. @@ -624,7 +629,7 @@ def get_variation( return variation_key - def is_feature_enabled(self, feature_key: str, user_id: str, attributes: Optional[dict[str, Any]] = None) -> bool: + def is_feature_enabled(self, feature_key: str, user_id: str, attributes: Optional[UserAttributes] = None) -> bool: """ Returns true if the feature is enabled for the given user. Args: @@ -712,7 +717,7 @@ def is_feature_enabled(self, feature_key: str, user_id: str, attributes: Optiona return feature_enabled - def get_enabled_features(self, user_id: str, attributes: Optional[dict[str, Any]] = None) -> list[str]: + def get_enabled_features(self, user_id: str, attributes: Optional[UserAttributes] = None) -> list[str]: """ Returns the list of features that are enabled for the user. Args: @@ -747,7 +752,7 @@ def get_enabled_features(self, user_id: str, attributes: Optional[dict[str, Any] return enabled_features def get_feature_variable( - self, feature_key: str, variable_key: str, user_id: str, attributes: Optional[dict[str, Any]] = None + self, feature_key: str, variable_key: str, user_id: str, attributes: Optional[UserAttributes] = None ) -> Any: """ Returns value for a variable attached to a feature flag. @@ -770,7 +775,7 @@ def get_feature_variable( return self._get_feature_variable_for_type(project_config, feature_key, variable_key, None, user_id, attributes) def get_feature_variable_boolean( - self, feature_key: str, variable_key: str, user_id: str, attributes: Optional[dict[str, Any]] = None + self, feature_key: str, variable_key: str, user_id: str, attributes: Optional[UserAttributes] = None ) -> Optional[bool]: """ Returns value for a certain boolean variable attached to a feature flag. @@ -798,7 +803,7 @@ def get_feature_variable_boolean( ) def get_feature_variable_double( - self, feature_key: str, variable_key: str, user_id: str, attributes: Optional[dict[str, Any]] = None + self, feature_key: str, variable_key: str, user_id: str, attributes: Optional[UserAttributes] = None ) -> Optional[float]: """ Returns value for a certain double variable attached to a feature flag. @@ -826,7 +831,7 @@ def get_feature_variable_double( ) def get_feature_variable_integer( - self, feature_key: str, variable_key: str, user_id: str, attributes: Optional[dict[str, Any]] = None + self, feature_key: str, variable_key: str, user_id: str, attributes: Optional[UserAttributes] = None ) -> Optional[int]: """ Returns value for a certain integer variable attached to a feature flag. @@ -854,7 +859,7 @@ def get_feature_variable_integer( ) def get_feature_variable_string( - self, feature_key: str, variable_key: str, user_id: str, attributes: Optional[dict[str, Any]] = None + self, feature_key: str, variable_key: str, user_id: str, attributes: Optional[UserAttributes] = None ) -> Optional[str]: """ Returns value for a certain string variable attached to a feature. @@ -882,7 +887,7 @@ def get_feature_variable_string( ) def get_feature_variable_json( - self, feature_key: str, variable_key: str, user_id: str, attributes: Optional[dict[str, Any]] = None + self, feature_key: str, variable_key: str, user_id: str, attributes: Optional[UserAttributes] = None ) -> Optional[dict[str, Any]]: """ Returns value for a certain JSON variable attached to a feature. @@ -910,7 +915,7 @@ def get_feature_variable_json( ) def get_all_feature_variables( - self, feature_key: str, user_id: str, attributes: Optional[dict[str, Any]] = None + self, feature_key: str, user_id: str, attributes: Optional[UserAttributes] = None ) -> Optional[dict[str, Any]]: """ Returns dictionary of all variables and their corresponding values in the context of a feature. @@ -1014,12 +1019,12 @@ def get_optimizely_config(self) -> Optional[OptimizelyConfig]: # Customized Config Manager may not have optimizely_config defined. if hasattr(self.config_manager, 'optimizely_config'): - return self.config_manager.optimizely_config # type: ignore + return self.config_manager.optimizely_config return OptimizelyConfigService(project_config).get_config() def create_user_context( - self, user_id: str, attributes: Optional[dict[str, Any]] = None + self, user_id: str, attributes: Optional[UserAttributes] = None ) -> Optional[OptimizelyUserContext]: """ We do not check for is_valid here as a user context can be created successfully @@ -1044,7 +1049,7 @@ def create_user_context( def _decide( self, user_context: Optional[OptimizelyUserContext], key: str, - decide_options: Optional[Sequence[OptimizelyDecideOption | str]] = None + decide_options: Optional[list[str]] = None ) -> OptimizelyDecision: """ decide calls optimizely decide with feature key provided diff --git a/optimizely/optimizely_factory.py b/optimizely/optimizely_factory.py index a5ff2995..5060780e 100644 --- a/optimizely/optimizely_factory.py +++ b/optimizely/optimizely_factory.py @@ -1,4 +1,4 @@ -# Copyright 2021, Optimizely +# Copyright 2021-2022, Optimizely # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -10,26 +10,33 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations +from typing import TYPE_CHECKING, Optional + from . import logger as optimizely_logger -from .config_manager import PollingConfigManager -from .error_handler import NoOpErrorHandler +from .config_manager import BaseConfigManager, PollingConfigManager +from .error_handler import BaseErrorHandler, NoOpErrorHandler from .event.event_processor import BatchEventProcessor -from .event_dispatcher import EventDispatcher +from .event_dispatcher import EventDispatcher, CustomEventDispatcher from .notification_center import NotificationCenter from .optimizely import Optimizely +if TYPE_CHECKING: + # prevent circular dependenacy by skipping import at runtime + from .user_profile import UserProfileService + class OptimizelyFactory: """ Optimizely factory to provides basic utility to instantiate the Optimizely SDK with a minimal number of configuration options.""" - max_event_batch_size = None - max_event_flush_interval = None - polling_interval = None - blocking_timeout = None + max_event_batch_size: Optional[int] = None + max_event_flush_interval: Optional[int] = None + polling_interval: Optional[float] = None + blocking_timeout: Optional[int] = None @staticmethod - def set_batch_size(batch_size): + def set_batch_size(batch_size: int) -> int: """ Convenience method for setting the maximum number of events contained within a batch. Args: batch_size: Sets size of event_queue. @@ -39,7 +46,7 @@ def set_batch_size(batch_size): return OptimizelyFactory.max_event_batch_size @staticmethod - def set_flush_interval(flush_interval): + def set_flush_interval(flush_interval: int) -> int: """ Convenience method for setting the maximum time interval in milliseconds between event dispatches. Args: flush_interval: Time interval between event dispatches. @@ -49,7 +56,7 @@ def set_flush_interval(flush_interval): return OptimizelyFactory.max_event_flush_interval @staticmethod - def set_polling_interval(polling_interval): + def set_polling_interval(polling_interval: int) -> int: """ Method to set frequency at which datafile has to be polled. Args: polling_interval: Time in seconds after which to update datafile. @@ -58,7 +65,7 @@ def set_polling_interval(polling_interval): return OptimizelyFactory.polling_interval @staticmethod - def set_blocking_timeout(blocking_timeout): + def set_blocking_timeout(blocking_timeout: int) -> int: """ Method to set time in seconds to block the config call until config has been initialized. Args: blocking_timeout: Time in seconds to block the config call. @@ -67,7 +74,7 @@ def set_blocking_timeout(blocking_timeout): return OptimizelyFactory.blocking_timeout @staticmethod - def default_instance(sdk_key, datafile=None): + def default_instance(sdk_key: str, datafile: Optional[str] = None) -> Optimizely: """ Returns a new optimizely instance.. Args: sdk_key: Required string uniquely identifying the fallback datafile corresponding to project. @@ -77,17 +84,15 @@ def default_instance(sdk_key, datafile=None): logger = optimizely_logger.NoOpLogger() notification_center = NotificationCenter(logger) - config_manager_options = { - 'sdk_key': sdk_key, - 'update_interval': OptimizelyFactory.polling_interval, - 'blocking_timeout': OptimizelyFactory.blocking_timeout, - 'datafile': datafile, - 'logger': logger, - 'error_handler': error_handler, - 'notification_center': notification_center, - } - - config_manager = PollingConfigManager(**config_manager_options) + config_manager = PollingConfigManager( + sdk_key=sdk_key, + update_interval=OptimizelyFactory.polling_interval, + blocking_timeout=OptimizelyFactory.blocking_timeout, + datafile=datafile, + logger=logger, + error_handler=error_handler, + notification_center=notification_center + ) event_processor = BatchEventProcessor( event_dispatcher=EventDispatcher(), @@ -104,15 +109,23 @@ def default_instance(sdk_key, datafile=None): return optimizely @staticmethod - def default_instance_with_config_manager(config_manager): + def default_instance_with_config_manager(config_manager: BaseConfigManager) -> Optimizely: return Optimizely( config_manager=config_manager ) @staticmethod - def custom_instance(sdk_key, datafile=None, event_dispatcher=None, logger=None, error_handler=None, - skip_json_validation=None, user_profile_service=None, config_manager=None, - notification_center=None): + def custom_instance( + sdk_key: str, + datafile: Optional[str] = None, + event_dispatcher: Optional[CustomEventDispatcher] = None, + logger: Optional[optimizely_logger.Logger] = None, + error_handler: Optional[BaseErrorHandler] = None, + skip_json_validation: Optional[bool] = None, + user_profile_service: Optional[UserProfileService] = None, + config_manager: Optional[BaseConfigManager] = None, + notification_center: Optional[NotificationCenter] = None + ) -> Optimizely: """ Returns a new optimizely instance. if max_event_batch_size and max_event_flush_interval are None then default batch_size and flush_interval will be used to setup BatchEventProcessor. @@ -146,17 +159,16 @@ def custom_instance(sdk_key, datafile=None, event_dispatcher=None, logger=None, notification_center=notification_center, ) - config_manager_options = { - 'sdk_key': sdk_key, - 'update_interval': OptimizelyFactory.polling_interval, - 'blocking_timeout': OptimizelyFactory.blocking_timeout, - 'datafile': datafile, - 'logger': logger, - 'error_handler': error_handler, - 'skip_json_validation': skip_json_validation, - 'notification_center': notification_center, - } - config_manager = config_manager or PollingConfigManager(**config_manager_options) + config_manager = config_manager or PollingConfigManager( + sdk_key=sdk_key, + update_interval=OptimizelyFactory.polling_interval, + blocking_timeout=OptimizelyFactory.blocking_timeout, + datafile=datafile, + logger=logger, + error_handler=error_handler, + skip_json_validation=skip_json_validation, + notification_center=notification_center, + ) return Optimizely( datafile, event_dispatcher, logger, error_handler, skip_json_validation, user_profile_service, diff --git a/optimizely/optimizely_user_context.py b/optimizely/optimizely_user_context.py index 2a0e0ee2..c5d769f5 100644 --- a/optimizely/optimizely_user_context.py +++ b/optimizely/optimizely_user_context.py @@ -15,11 +15,19 @@ from __future__ import annotations import copy import threading -from typing import Any, Optional +from typing import TYPE_CHECKING, Any, Optional, NewType, Dict from optimizely.decision import optimizely_decision -from . import optimizely -from .logger import Logger + +if TYPE_CHECKING: + # prevent circular dependenacy by skipping import at runtime + from . import optimizely + from optimizely.helpers.event_tag_utils import EventTags + from .logger import Logger + + +# type for tracking user attributes (essentially a sub-type of dict) +UserAttributes = NewType('UserAttributes', Dict[str, Any]) class OptimizelyUserContext: @@ -29,7 +37,7 @@ class OptimizelyUserContext: def __init__( self, optimizely_client: optimizely.Optimizely, logger: Logger, - user_id: str, user_attributes: Optional[dict[str, Any]] = None + user_id: str, user_attributes: Optional[UserAttributes] = None ): """ Create an instance of the Optimizely User Context. @@ -48,9 +56,9 @@ def __init__( self.user_id = user_id if not isinstance(user_attributes, dict): - user_attributes = {} + user_attributes = UserAttributes({}) - self._user_attributes = user_attributes.copy() if user_attributes else {} + self._user_attributes = UserAttributes(user_attributes.copy() if user_attributes else {}) self.lock = threading.Lock() self.forced_decisions_map: dict[ OptimizelyUserContext.OptimizelyDecisionContext, @@ -70,7 +78,7 @@ def __init__(self, flag_key: str, rule_key: Optional[str] = None): def __hash__(self) -> int: return hash((self.flag_key, self.rule_key)) - def __eq__(self, other: OptimizelyUserContext.OptimizelyDecisionContext) -> bool: # type: ignore + def __eq__(self, other: OptimizelyUserContext.OptimizelyDecisionContext) -> bool: # type: ignore[override] return (self.flag_key, self.rule_key) == (other.flag_key, other.rule_key) # forced decision @@ -90,9 +98,9 @@ def _clone(self) -> Optional[OptimizelyUserContext]: return user_context - def get_user_attributes(self) -> dict[str, Any]: + def get_user_attributes(self) -> UserAttributes: with self.lock: - return self._user_attributes.copy() + return UserAttributes(self._user_attributes.copy()) def set_attribute(self, attribute_key: str, attribute_value: Any) -> None: """ @@ -155,7 +163,7 @@ def decide_all(self, options: Optional[list[str]] = None) -> dict[str, optimizel return self.client._decide_all(self._clone(), options) - def track_event(self, event_key: str, event_tags: Optional[dict[str, Any]] = None) -> None: + def track_event(self, event_key: str, event_tags: Optional[EventTags] = None) -> None: return self.client.track(event_key, self.user_id, self.get_user_attributes(), event_tags) def as_json(self) -> dict[str, Any]: diff --git a/optimizely/project_config.py b/optimizely/project_config.py index 9c0afe7a..7fbbdf99 100644 --- a/optimizely/project_config.py +++ b/optimizely/project_config.py @@ -1,4 +1,4 @@ -# Copyright 2016-2019, 2021, Optimizely +# Copyright 2016-2019, 2021-2022, Optimizely # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -10,14 +10,26 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - +from __future__ import annotations import json -from collections import OrderedDict +from typing import TYPE_CHECKING, Optional, Type, TypeVar, cast, Any, Iterable, List +from sys import version_info from . import entities from . import exceptions from .helpers import condition as condition_helper from .helpers import enums +from .helpers import types + +if version_info < (3, 8): + from typing_extensions import Final +else: + from typing import Final # type: ignore + +if TYPE_CHECKING: + # prevent circular dependenacy by skipping import at runtime + from .logger import Logger + SUPPORTED_VERSIONS = [ enums.DatafileVersions.V2, @@ -25,13 +37,15 @@ enums.DatafileVersions.V4, ] -RESERVED_ATTRIBUTE_PREFIX = '$opt_' +RESERVED_ATTRIBUTE_PREFIX: Final = '$opt_' + +EntityClass = TypeVar('EntityClass') class ProjectConfig: """ Representation of the Optimizely project config. """ - def __init__(self, datafile, logger, error_handler): + def __init__(self, datafile: str | bytes, logger: Logger, error_handler: Any): """ ProjectConfig init method to load and set project config data. Args: @@ -44,36 +58,42 @@ def __init__(self, datafile, logger, error_handler): self._datafile = datafile.decode('utf-8') if isinstance(datafile, bytes) else datafile self.logger = logger self.error_handler = error_handler - self.version = config.get('version') + self.version: str = config.get('version') if self.version not in SUPPORTED_VERSIONS: raise exceptions.UnsupportedDatafileVersionException( enums.Errors.UNSUPPORTED_DATAFILE_VERSION.format(self.version) ) - self.account_id = config.get('accountId') - self.project_id = config.get('projectId') - self.revision = config.get('revision') - self.sdk_key = config.get('sdkKey', None) - self.environment_key = config.get('environmentKey', None) - self.groups = config.get('groups', []) - self.experiments = config.get('experiments', []) - self.events = config.get('events', []) - self.attributes = config.get('attributes', []) - self.audiences = config.get('audiences', []) - self.typed_audiences = config.get('typedAudiences', []) - self.feature_flags = config.get('featureFlags', []) - self.rollouts = config.get('rollouts', []) - self.anonymize_ip = config.get('anonymizeIP', False) - self.send_flag_decisions = config.get('sendFlagDecisions', False) - self.bot_filtering = config.get('botFiltering', None) + self.account_id: str = config.get('accountId') + self.project_id: str = config.get('projectId') + self.revision: str = config.get('revision') + self.sdk_key: Optional[str] = config.get('sdkKey', None) + self.environment_key: Optional[str] = config.get('environmentKey', None) + self.groups: list[types.GroupDict] = config.get('groups', []) + self.experiments: list[types.ExperimentDict] = config.get('experiments', []) + self.events: list[types.EventDict] = config.get('events', []) + self.attributes: list[types.AttributeDict] = config.get('attributes', []) + self.audiences: list[types.AudienceDict] = config.get('audiences', []) + self.typed_audiences: list[types.AudienceDict] = config.get('typedAudiences', []) + self.feature_flags: list[types.FeatureFlagDict] = config.get('featureFlags', []) + self.rollouts: list[types.RolloutDict] = config.get('rollouts', []) + self.anonymize_ip: bool = config.get('anonymizeIP', False) + self.send_flag_decisions: bool = config.get('sendFlagDecisions', False) + self.bot_filtering: Optional[bool] = config.get('botFiltering', None) # Utility maps for quick lookup - self.group_id_map = self._generate_key_map(self.groups, 'id', entities.Group) - self.experiment_id_map = self._generate_key_map(self.experiments, 'id', entities.Experiment) - self.event_key_map = self._generate_key_map(self.events, 'key', entities.Event) - self.attribute_key_map = self._generate_key_map(self.attributes, 'key', entities.Attribute) + self.group_id_map: dict[str, entities.Group] = self._generate_key_map(self.groups, 'id', entities.Group) + self.experiment_id_map: dict[str, entities.Experiment] = self._generate_key_map( + self.experiments, 'id', entities.Experiment + ) + self.event_key_map: dict[str, entities.Event] = self._generate_key_map(self.events, 'key', entities.Event) + self.attribute_key_map: dict[str, entities.Attribute] = self._generate_key_map( + self.attributes, 'key', entities.Attribute + ) - self.audience_id_map = self._generate_key_map(self.audiences, 'id', entities.Audience) + self.audience_id_map: dict[str, entities.Audience] = self._generate_key_map( + self.audiences, 'id', entities.Audience + ) # Conditions of audiences in typedAudiences are not expected # to be string-encoded as they are in audiences. @@ -84,8 +104,8 @@ def __init__(self, datafile, logger, error_handler): self.rollout_id_map = self._generate_key_map(self.rollouts, 'id', entities.Layer) for layer in self.rollout_id_map.values(): - for experiment in layer.experiments: - self.experiment_id_map[experiment['id']] = entities.Experiment(**experiment) + for experiment_dict in layer.experiments: + self.experiment_id_map[experiment_dict['id']] = entities.Experiment(**experiment_dict) self.audience_id_map = self._deserialize_audience(self.audience_id_map) for group in self.group_id_map.values(): @@ -94,13 +114,13 @@ def __init__(self, datafile, logger, error_handler): experiment.__dict__.update({'groupId': group.id, 'groupPolicy': group.policy}) self.experiment_id_map.update(experiments_in_group_id_map) - self.experiment_key_map = {} - self.variation_key_map = {} - self.variation_id_map = {} - self.variation_variable_usage_map = {} - self.variation_id_map_by_experiment_id = {} - self.variation_key_map_by_experiment_id = {} - self.flag_variations_map = {} + self.experiment_key_map: dict[str, entities.Experiment] = {} + self.variation_key_map: dict[str, dict[str, entities.Variation]] = {} + self.variation_id_map: dict[str, dict[str, entities.Variation]] = {} + self.variation_variable_usage_map: dict[str, dict[str, entities.Variation.VariableUsage]] = {} + self.variation_id_map_by_experiment_id: dict[str, dict[str, entities.Variation]] = {} + self.variation_key_map_by_experiment_id: dict[str, dict[str, entities.Variation]] = {} + self.flag_variations_map: dict[str, list[entities.Variation]] = {} for experiment in self.experiment_id_map.values(): self.experiment_key_map[experiment.key] = experiment @@ -112,7 +132,7 @@ def __init__(self, datafile, logger, error_handler): self.variation_id_map_by_experiment_id[experiment.id] = {} self.variation_key_map_by_experiment_id[experiment.id] = {} - for variation in self.variation_key_map.get(experiment.key).values(): + for variation in self.variation_key_map[experiment.key].values(): self.variation_id_map[experiment.key][variation.id] = variation self.variation_id_map_by_experiment_id[experiment.id][variation.id] = variation self.variation_key_map_by_experiment_id[experiment.id][variation.key] = variation @@ -124,20 +144,20 @@ def __init__(self, datafile, logger, error_handler): # Dictionary containing dictionary of experiment ID to feature ID. # for checking that experiment is a feature experiment or not. - self.experiment_feature_map = {} + self.experiment_feature_map: dict[str, list[str]] = {} for feature in self.feature_key_map.values(): # As we cannot create json variables in datafile directly, here we convert # the variables of string type and json subType to json type # This is needed to fully support json variables - for variable in self.feature_key_map[feature.key].variables: + for variable in cast(List[types.VariableDict], self.feature_key_map[feature.key].variables): sub_type = variable.get('subType', '') if variable['type'] == entities.Variable.Type.STRING and sub_type == entities.Variable.Type.JSON: variable['type'] = entities.Variable.Type.JSON feature.variables = self._generate_key_map(feature.variables, 'key', entities.Variable) - rules = [] - variations = [] + rules: list[entities.Experiment] = [] + variations: list[entities.Variation] = [] for exp_id in feature.experimentIds: # Add this experiment in experiment-feature map. self.experiment_feature_map[exp_id] = [feature.id] @@ -150,13 +170,15 @@ def __init__(self, datafile, logger, error_handler): for rule in rules: # variation_id_map_by_experiment_id gives variation entity object while # experiment_id_map will give us dictionary - for rule_variation in self.variation_id_map_by_experiment_id.get(rule.id).values(): + for rule_variation in self.variation_id_map_by_experiment_id[rule.id].values(): if len(list(filter(lambda variation: variation.id == rule_variation.id, variations))) == 0: variations.append(rule_variation) self.flag_variations_map[feature.key] = variations @staticmethod - def _generate_key_map(entity_list, key, entity_class): + def _generate_key_map( + entity_list: Iterable[Any], key: str, entity_class: Type[EntityClass] + ) -> dict[str, EntityClass]: """ Helper method to generate map from key to entity object for given list of dicts. Args: @@ -168,17 +190,14 @@ def _generate_key_map(entity_list, key, entity_class): Map mapping key to entity object. """ - # using ordered dict here to preserve insertion order of entities - # OrderedDict() is needed for Py versions 3.5 and less to work. - # Insertion order has been made default in dicts since Py 3.6 - key_map = OrderedDict() + key_map = {} for obj in entity_list: key_map[obj[key]] = entity_class(**obj) return key_map @staticmethod - def _deserialize_audience(audience_map): + def _deserialize_audience(audience_map: dict[str, entities.Audience]) -> dict[str, entities.Audience]: """ Helper method to de-serialize and populate audience map with the condition list and structure. Args: @@ -194,7 +213,7 @@ def _deserialize_audience(audience_map): return audience_map - def get_rollout_experiments(self, rollout): + def get_rollout_experiments(self, rollout: entities.Layer) -> list[entities.Experiment]: """ Helper method to get rollout experiments. Args: @@ -209,7 +228,7 @@ def get_rollout_experiments(self, rollout): return rollout_experiments - def get_typecast_value(self, value, type): + def get_typecast_value(self, value: str, type: str) -> Any: """ Helper method to determine actual value based on type of feature variable. Args: @@ -231,7 +250,7 @@ def get_typecast_value(self, value, type): else: return value - def to_datafile(self): + def to_datafile(self) -> str: """ Get the datafile corresponding to ProjectConfig. Returns: @@ -240,7 +259,7 @@ def to_datafile(self): return self._datafile - def get_version(self): + def get_version(self) -> str: """ Get version of the datafile. Returns: @@ -249,7 +268,7 @@ def get_version(self): return self.version - def get_revision(self): + def get_revision(self) -> str: """ Get revision of the datafile. Returns: @@ -258,7 +277,7 @@ def get_revision(self): return self.revision - def get_sdk_key(self): + def get_sdk_key(self) -> Optional[str]: """ Get sdk key from the datafile. Returns: @@ -267,7 +286,7 @@ def get_sdk_key(self): return self.sdk_key - def get_environment_key(self): + def get_environment_key(self) -> Optional[str]: """ Get environment key from the datafile. Returns: @@ -276,7 +295,7 @@ def get_environment_key(self): return self.environment_key - def get_account_id(self): + def get_account_id(self) -> str: """ Get account ID from the config. Returns: @@ -285,7 +304,7 @@ def get_account_id(self): return self.account_id - def get_project_id(self): + def get_project_id(self) -> str: """ Get project ID from the config. Returns: @@ -294,7 +313,7 @@ def get_project_id(self): return self.project_id - def get_experiment_from_key(self, experiment_key): + def get_experiment_from_key(self, experiment_key: str) -> Optional[entities.Experiment]: """ Get experiment for the provided experiment key. Args: @@ -313,7 +332,7 @@ def get_experiment_from_key(self, experiment_key): self.error_handler.handle_error(exceptions.InvalidExperimentException(enums.Errors.INVALID_EXPERIMENT_KEY)) return None - def get_experiment_from_id(self, experiment_id): + def get_experiment_from_id(self, experiment_id: str) -> Optional[entities.Experiment]: """ Get experiment for the provided experiment ID. Args: @@ -332,7 +351,7 @@ def get_experiment_from_id(self, experiment_id): self.error_handler.handle_error(exceptions.InvalidExperimentException(enums.Errors.INVALID_EXPERIMENT_KEY)) return None - def get_group(self, group_id): + def get_group(self, group_id: Optional[str]) -> Optional[entities.Group]: """ Get group for the provided group ID. Args: @@ -342,7 +361,7 @@ def get_group(self, group_id): Group corresponding to the provided group ID. """ - group = self.group_id_map.get(group_id) + group = self.group_id_map.get(group_id) # type: ignore[arg-type] if group: return group @@ -351,7 +370,7 @@ def get_group(self, group_id): self.error_handler.handle_error(exceptions.InvalidGroupException(enums.Errors.INVALID_GROUP_ID)) return None - def get_audience(self, audience_id): + def get_audience(self, audience_id: str) -> Optional[entities.Audience]: """ Get audience object for the provided audience ID. Args: @@ -367,8 +386,9 @@ def get_audience(self, audience_id): self.logger.error(f'Audience ID "{audience_id}" is not in datafile.') self.error_handler.handle_error(exceptions.InvalidAudienceException((enums.Errors.INVALID_AUDIENCE))) + return None - def get_variation_from_key(self, experiment_key, variation_key): + def get_variation_from_key(self, experiment_key: str, variation_key: str) -> Optional[entities.Variation]: """ Get variation given experiment and variation key. Args: @@ -395,7 +415,7 @@ def get_variation_from_key(self, experiment_key, variation_key): self.error_handler.handle_error(exceptions.InvalidExperimentException(enums.Errors.INVALID_EXPERIMENT_KEY)) return None - def get_variation_from_id(self, experiment_key, variation_id): + def get_variation_from_id(self, experiment_key: str, variation_id: str) -> Optional[entities.Variation]: """ Get variation given experiment and variation ID. Args: @@ -421,7 +441,7 @@ def get_variation_from_id(self, experiment_key, variation_id): self.error_handler.handle_error(exceptions.InvalidExperimentException(enums.Errors.INVALID_EXPERIMENT_KEY)) return None - def get_event(self, event_key): + def get_event(self, event_key: str) -> Optional[entities.Event]: """ Get event for the provided event key. Args: @@ -440,7 +460,7 @@ def get_event(self, event_key): self.error_handler.handle_error(exceptions.InvalidEventException(enums.Errors.INVALID_EVENT_KEY)) return None - def get_attribute_id(self, attribute_key): + def get_attribute_id(self, attribute_key: str) -> Optional[str]: """ Get attribute ID for the provided attribute key. Args: @@ -471,7 +491,7 @@ def get_attribute_id(self, attribute_key): self.error_handler.handle_error(exceptions.InvalidAttributeException(enums.Errors.INVALID_ATTRIBUTE)) return None - def get_feature_from_key(self, feature_key): + def get_feature_from_key(self, feature_key: str) -> Optional[entities.FeatureFlag]: """ Get feature for the provided feature key. Args: @@ -489,7 +509,7 @@ def get_feature_from_key(self, feature_key): self.logger.error(f'Feature "{feature_key}" is not in datafile.') return None - def get_rollout_from_id(self, rollout_id): + def get_rollout_from_id(self, rollout_id: str) -> Optional[entities.Layer]: """ Get rollout for the provided ID. Args: @@ -507,7 +527,9 @@ def get_rollout_from_id(self, rollout_id): self.logger.error(f'Rollout with ID "{rollout_id}" is not in datafile.') return None - def get_variable_value_for_variation(self, variable, variation): + def get_variable_value_for_variation( + self, variable: Optional[entities.Variable], variation: Optional[entities.Variation] + ) -> Optional[str]: """ Get the variable value for the given variation. Args: @@ -540,7 +562,7 @@ def get_variable_value_for_variation(self, variable, variation): return variable_value - def get_variable_for_feature(self, feature_key, variable_key): + def get_variable_for_feature(self, feature_key: str, variable_key: str) -> Optional[entities.Variable]: """ Get the variable with the given variable key for the given feature. Args: @@ -562,7 +584,7 @@ def get_variable_for_feature(self, feature_key, variable_key): return feature.variables.get(variable_key) - def get_anonymize_ip_value(self): + def get_anonymize_ip_value(self) -> bool: """ Gets the anonymize IP value. Returns: @@ -571,7 +593,7 @@ def get_anonymize_ip_value(self): return self.anonymize_ip - def get_send_flag_decisions_value(self): + def get_send_flag_decisions_value(self) -> bool: """ Gets the Send Flag Decisions value. Returns: @@ -580,7 +602,7 @@ def get_send_flag_decisions_value(self): return self.send_flag_decisions - def get_bot_filtering_value(self): + def get_bot_filtering_value(self) -> Optional[bool]: """ Gets the bot filtering value. Returns: @@ -589,7 +611,7 @@ def get_bot_filtering_value(self): return self.bot_filtering - def is_feature_experiment(self, experiment_id): + def is_feature_experiment(self, experiment_id: str) -> bool: """ Determines if given experiment is a feature test. Args: @@ -601,12 +623,14 @@ def is_feature_experiment(self, experiment_id): return experiment_id in self.experiment_feature_map - def get_variation_from_id_by_experiment_id(self, experiment_id, variation_id): + def get_variation_from_id_by_experiment_id( + self, experiment_id: str, variation_id: str + ) -> Optional[entities.Variation]: """ Gets variation from variation id and specific experiment id Returns: The variation for the experiment id and variation id - or empty dict if not found + or None if not found """ if (experiment_id in self.variation_id_map_by_experiment_id and variation_id in self.variation_id_map_by_experiment_id[experiment_id]): @@ -616,14 +640,16 @@ def get_variation_from_id_by_experiment_id(self, experiment_id, variation_id): f'Variation with id "{variation_id}" not defined in the datafile for experiment "{experiment_id}".' ) - return {} + return None - def get_variation_from_key_by_experiment_id(self, experiment_id, variation_key): + def get_variation_from_key_by_experiment_id( + self, experiment_id: str, variation_key: str + ) -> Optional[entities.Variation]: """ Gets variation from variation key and specific experiment id Returns: The variation for the experiment id and variation key - or empty dict if not found + or None if not found """ if (experiment_id in self.variation_key_map_by_experiment_id and variation_key in self.variation_key_map_by_experiment_id[experiment_id]): @@ -633,9 +659,11 @@ def get_variation_from_key_by_experiment_id(self, experiment_id, variation_key): f'Variation with key "{variation_key}" not defined in the datafile for experiment "{experiment_id}".' ) - return {} + return None - def get_flag_variation(self, flag_key, variation_attribute, target_value): + def get_flag_variation( + self, flag_key: str, variation_attribute: str, target_value: str + ) -> Optional[entities.Variation]: """ Gets variation by specified variation attribute. For example if variation_attribute is id, the function gets variation by using variation_id. diff --git a/optimizely/user_profile.py b/optimizely/user_profile.py index 2ff9e038..0410bcf7 100644 --- a/optimizely/user_profile.py +++ b/optimizely/user_profile.py @@ -1,4 +1,4 @@ -# Copyright 2017, Optimizely +# Copyright 2017, 2022, Optimizely # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -11,6 +11,15 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import annotations +from typing import Any, Optional +from sys import version_info + +if version_info < (3, 8): + from typing_extensions import Final +else: + from typing import Final # type: ignore + class UserProfile: """ Class encapsulating information representing a user's profile. @@ -20,18 +29,23 @@ class UserProfile: variation ID identifying the variation for the user. """ - USER_ID_KEY = 'user_id' - EXPERIMENT_BUCKET_MAP_KEY = 'experiment_bucket_map' - VARIATION_ID_KEY = 'variation_id' + USER_ID_KEY: Final = 'user_id' + EXPERIMENT_BUCKET_MAP_KEY: Final = 'experiment_bucket_map' + VARIATION_ID_KEY: Final = 'variation_id' - def __init__(self, user_id, experiment_bucket_map=None, **kwargs): + def __init__( + self, + user_id: str, + experiment_bucket_map: Optional[dict[str, dict[str, Optional[str]]]] = None, + **kwargs: Any + ): self.user_id = user_id self.experiment_bucket_map = experiment_bucket_map or {} - def __eq__(self, other): + def __eq__(self, other: object) -> bool: return self.__dict__ == other.__dict__ - def get_variation_for_experiment(self, experiment_id): + def get_variation_for_experiment(self, experiment_id: str) -> Optional[str]: """ Helper method to retrieve variation ID for given experiment. Args: @@ -43,7 +57,7 @@ def get_variation_for_experiment(self, experiment_id): return self.experiment_bucket_map.get(experiment_id, {self.VARIATION_ID_KEY: None}).get(self.VARIATION_ID_KEY) - def save_variation_for_experiment(self, experiment_id, variation_id): + def save_variation_for_experiment(self, experiment_id: str, variation_id: str) -> None: """ Helper method to save new experiment/variation as part of the user's profile. Args: @@ -58,7 +72,7 @@ class UserProfileService: """ Class encapsulating user profile service functionality. Override with your own implementation for storing and retrieving the user profile. """ - def lookup(self, user_id): + def lookup(self, user_id: str) -> dict[str, Any]: """ Fetch the user profile dict corresponding to the user ID. Args: @@ -69,7 +83,7 @@ def lookup(self, user_id): """ return UserProfile(user_id).__dict__ - def save(self, user_profile): + def save(self, user_profile: dict[str, Any]) -> None: """ Save the user profile dict sent to this method. Args: diff --git a/tests/test_config.py b/tests/test_config.py index bf324052..47cce405 100644 --- a/tests/test_config.py +++ b/tests/test_config.py @@ -1239,6 +1239,18 @@ def test_get_variation_from_id_by_experiment_id(self): self.assertIsInstance(variation, entities.Variation) + def test_get_variation_from_id_by_experiment_id_missing(self): + + opt_obj = optimizely.Optimizely(json.dumps(self.config_dict)) + project_config = opt_obj.config_manager.get_config() + + experiment_id = '111127' + variation_id = 'missing' + + variation = project_config.get_variation_from_id_by_experiment_id(experiment_id, variation_id) + + self.assertIsNone(variation) + def test_get_variation_from_key_by_experiment_id(self): opt_obj = optimizely.Optimizely(json.dumps(self.config_dict)) @@ -1250,3 +1262,15 @@ def test_get_variation_from_key_by_experiment_id(self): variation = project_config.get_variation_from_key_by_experiment_id(experiment_id, variation_key) self.assertIsInstance(variation, entities.Variation) + + def test_get_variation_from_key_by_experiment_id_missing(self): + + opt_obj = optimizely.Optimizely(json.dumps(self.config_dict)) + project_config = opt_obj.config_manager.get_config() + + experiment_id = '111127' + variation_key = 'missing' + + variation = project_config.get_variation_from_key_by_experiment_id(experiment_id, variation_key) + + self.assertIsNone(variation) From ec3d846c922235777304d42b200af7ed9737ea49 Mon Sep 17 00:00:00 2001 From: Andy Leap <104936100+andrewleap-optimizely@users.noreply.github.com> Date: Wed, 13 Jul 2022 16:34:51 -0400 Subject: [PATCH 12/68] docs: fix readme badge (#392) * switch build badge from travis to github actions --- .github/workflows/python.yml | 2 +- README.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/python.yml b/.github/workflows/python.yml index 798648d1..7e17c5ff 100644 --- a/.github/workflows/python.yml +++ b/.github/workflows/python.yml @@ -1,7 +1,7 @@ # This workflow will install Python dependencies, run tests and lint with a variety of Python versions # For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions -name: Python package +name: build on: push: diff --git a/README.md b/README.md index 70dd0771..f2013e68 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,7 @@ # Optimizely Python SDK [![PyPI version](https://badge.fury.io/py/optimizely-sdk.svg)](https://pypi.org/project/optimizely-sdk) -[![Build Status](https://travis-ci.org/optimizely/python-sdk.svg?branch=master)](https://travis-ci.org/optimizely/python-sdk) +[![Build Status](https://github.com/optimizely/python-sdk/actions/workflows/python.yml/badge.svg?branch=master)](https://github.com/optimizely/python-sdk/actions/workflows/python.yml?query=branch%3Amaster) [![Coverage Status](https://coveralls.io/repos/github/optimizely/python-sdk/badge.svg)](https://coveralls.io/github/optimizely/python-sdk) [![Documentation Status](https://readthedocs.org/projects/optimizely-python-sdk/badge/?version=latest)](https://optimizely-python-sdk.readthedocs.io/en/latest/?badge=latest) [![Apache 2.0](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](http://www.apache.org/licenses/LICENSE-2.0) From d9c7905a2b013b300152915039f7e322013762a2 Mon Sep 17 00:00:00 2001 From: Andy Leap <104936100+andrewleap-optimizely@users.noreply.github.com> Date: Wed, 20 Jul 2022 10:06:29 -0400 Subject: [PATCH 13/68] feat: odp datafile parsing and audience evaluation (#393) * swap user attributes for user_context * add integrations * add qualified segments --- optimizely/decision_service.py | 15 +- optimizely/entities.py | 17 + optimizely/helpers/audience.py | 7 +- optimizely/helpers/condition.py | 37 +- optimizely/helpers/constants.py | 8 + optimizely/helpers/types.py | 29 +- optimizely/optimizely_user_context.py | 41 ++ optimizely/project_config.py | 14 + tests/base.py | 190 +++++++++ tests/helpers_tests/test_audience.py | 116 +++--- tests/helpers_tests/test_condition.py | 541 ++++++++++++++++---------- tests/helpers_tests/test_validator.py | 5 + tests/test_config.py | 61 +++ tests/test_decision_service.py | 28 +- tests/test_optimizely.py | 2 +- tests/test_user_context.py | 64 ++- 16 files changed, 856 insertions(+), 319 deletions(-) diff --git a/optimizely/decision_service.py b/optimizely/decision_service.py index 15532fe0..72254ce9 100644 --- a/optimizely/decision_service.py +++ b/optimizely/decision_service.py @@ -268,7 +268,6 @@ def get_variation( And an array of log messages representing decision making. """ user_id = user_context.user_id - attributes = user_context.get_user_attributes() if options: ignore_user_profile = OptimizelyDecideOption.IGNORE_USER_PROFILE_SERVICE in options @@ -323,7 +322,7 @@ def get_variation( project_config, audience_conditions, enums.ExperimentAudienceEvaluationLogs, experiment.key, - attributes, self.logger) + user_context, self.logger) decide_reasons += reasons_received if not user_meets_audience_conditions: message = f'User "{user_id}" does not meet conditions to be in experiment "{experiment.key}".' @@ -332,7 +331,7 @@ def get_variation( return None, decide_reasons # Determine bucketing ID to be used - bucketing_id, bucketing_id_reasons = self._get_bucketing_id(user_id, attributes) + bucketing_id, bucketing_id_reasons = self._get_bucketing_id(user_id, user_context.get_user_attributes()) decide_reasons += bucketing_id_reasons variation, bucket_reasons = self.bucketer.bucket(project_config, experiment, user_id, bucketing_id) decide_reasons += bucket_reasons @@ -354,7 +353,7 @@ def get_variation( return None, decide_reasons def get_variation_for_rollout( - self, project_config: ProjectConfig, feature: entities.FeatureFlag, user: OptimizelyUserContext + self, project_config: ProjectConfig, feature: entities.FeatureFlag, user_context: OptimizelyUserContext ) -> tuple[Decision, list[str]]: """ Determine which experiment/variation the user is in for a given rollout. Returns the variation of the first experiment the user qualifies for. @@ -371,8 +370,8 @@ def get_variation_for_rollout( array of log messages representing decision making. """ decide_reasons: list[str] = [] - user_id = user.user_id - attributes = user.get_user_attributes() + user_id = user_context.user_id + attributes = user_context.get_user_attributes() if not feature or not feature.rolloutId: return Decision(None, None, enums.DecisionSources.ROLLOUT), decide_reasons @@ -401,7 +400,7 @@ def get_variation_for_rollout( rule = rollout_rules[index] optimizely_decision_context = OptimizelyUserContext.OptimizelyDecisionContext(feature.key, rule.key) forced_decision_variation, reasons_received = self.validated_forced_decision( - project_config, optimizely_decision_context, user) + project_config, optimizely_decision_context, user_context) decide_reasons += reasons_received if forced_decision_variation: @@ -422,7 +421,7 @@ def get_variation_for_rollout( audience_decision_response, reasons_received_audience = audience_helper.does_user_meet_audience_conditions( project_config, audience_conditions, enums.RolloutRuleAudienceEvaluationLogs, - logging_key, attributes, self.logger) + logging_key, user_context, self.logger) decide_reasons += reasons_received_audience diff --git a/optimizely/entities.py b/optimizely/entities.py index c0eb602a..63b54f68 100644 --- a/optimizely/entities.py +++ b/optimizely/entities.py @@ -52,6 +52,16 @@ def __init__( self.conditionStructure = conditionStructure self.conditionList = conditionList + def get_segments(self) -> list[str]: + """ Extract all audience segments used in the this audience's conditions. + + Returns: + List of segment names. + """ + if not self.conditionList: + return [] + return list({c[1] for c in self.conditionList if c[3] == 'qualified'}) + class Event(BaseEntity): def __init__(self, id: str, key: str, experimentIds: list[str], **kwargs: Any): @@ -175,3 +185,10 @@ def __init__( def __str__(self) -> str: return self.key + + +class Integration(BaseEntity): + def __init__(self, key: str, host: Optional[str] = None, publicKey: Optional[str] = None): + self.key = key + self.host = host + self.publicKey = publicKey diff --git a/optimizely/helpers/audience.py b/optimizely/helpers/audience.py index 39ec69c5..190a38f8 100644 --- a/optimizely/helpers/audience.py +++ b/optimizely/helpers/audience.py @@ -31,7 +31,7 @@ def does_user_meet_audience_conditions( audience_conditions: Optional[Sequence[str | list[str]]], audience_logs: Type[ExperimentAudienceEvaluationLogs | RolloutRuleAudienceEvaluationLogs], logging_key: str, - attributes: Optional[optimizely_user_context.UserAttributes], + user_context: optimizely_user_context.OptimizelyUserContext, logger: Logger ) -> tuple[bool, list[str]]: """ Determine for given experiment if user satisfies the audiences for the experiment. @@ -62,15 +62,12 @@ def does_user_meet_audience_conditions( return True, decide_reasons - if attributes is None: - attributes = optimizely_user_context.UserAttributes({}) - def evaluate_custom_attr(audience_id: str, index: int) -> Optional[bool]: audience = config.get_audience(audience_id) if not audience or audience.conditionList is None: return None custom_attr_condition_evaluator = condition_helper.CustomAttributeConditionEvaluator( - audience.conditionList, attributes, logger + audience.conditionList, user_context, logger ) return custom_attr_condition_evaluator.evaluate(index) diff --git a/optimizely/helpers/condition.py b/optimizely/helpers/condition.py index a6b8057c..58000a90 100644 --- a/optimizely/helpers/condition.py +++ b/optimizely/helpers/condition.py @@ -55,21 +55,23 @@ class ConditionMatchTypes: SEMVER_LE: Final = 'semver_le' SEMVER_LT: Final = 'semver_lt' SUBSTRING: Final = 'substring' + QUALIFIED: Final = 'qualified' class CustomAttributeConditionEvaluator: """ Class encapsulating methods to be used in audience leaf condition evaluation. """ - CUSTOM_ATTRIBUTE_CONDITION_TYPE: Final = 'custom_attribute' + CONDITION_TYPES: Final = ('custom_attribute', 'third_party_dimension') def __init__( self, condition_data: list[str | list[str]], - attributes: Optional[optimizely_user_context.UserAttributes], + user_context: optimizely_user_context.OptimizelyUserContext, logger: Logger ): self.condition_data = condition_data - self.attributes = attributes or optimizely_user_context.UserAttributes({}) + self.user_context = user_context + self.attributes = user_context.get_user_attributes() self.logger = logger def _get_condition_json(self, index: int) -> str: @@ -613,7 +615,27 @@ def semver_greater_than_or_equal_evaluator(self, index: int) -> Optional[bool]: return result >= 0 - EVALUATORS_BY_MATCH_TYPE = { + def qualified_evaluator(self, index: int) -> Optional[bool]: + """ Check if the user is qualifed for the given segment. + + Args: + index: Index of the condition to be evaluated. + + Returns: + Boolean: + - True if the user is qualified. + - False if the user is not qualified. + None: if the condition value isn't a string. + """ + condition_value = self.condition_data[index][1] + + if not isinstance(condition_value, str): + self.logger.warning(audience_logs.UNKNOWN_CONDITION_VALUE.format(self._get_condition_json(index),)) + return None + + return self.user_context.is_qualified_for(condition_value) + + EVALUATORS_BY_MATCH_TYPE: dict[str, Callable[[CustomAttributeConditionEvaluator, int], Optional[bool]]] = { ConditionMatchTypes.EXACT: exact_evaluator, ConditionMatchTypes.EXISTS: exists_evaluator, ConditionMatchTypes.GREATER_THAN: greater_than_evaluator, @@ -625,7 +647,8 @@ def semver_greater_than_or_equal_evaluator(self, index: int) -> Optional[bool]: ConditionMatchTypes.SEMVER_GT: semver_greater_than_evaluator, ConditionMatchTypes.SEMVER_LE: semver_less_than_or_equal_evaluator, ConditionMatchTypes.SEMVER_LT: semver_less_than_evaluator, - ConditionMatchTypes.SUBSTRING: substring_evaluator + ConditionMatchTypes.SUBSTRING: substring_evaluator, + ConditionMatchTypes.QUALIFIED: qualified_evaluator } def split_version(self, version: str) -> Optional[list[str]]: @@ -696,7 +719,7 @@ def evaluate(self, index: int) -> Optional[bool]: None: if the user attributes and condition can't be evaluated. """ - if self.condition_data[index][2] != self.CUSTOM_ATTRIBUTE_CONDITION_TYPE: + if self.condition_data[index][2] not in self.CONDITION_TYPES: self.logger.warning(audience_logs.UNKNOWN_CONDITION_TYPE.format(self._get_condition_json(index))) return None @@ -708,7 +731,7 @@ def evaluate(self, index: int) -> Optional[bool]: self.logger.warning(audience_logs.UNKNOWN_MATCH_TYPE.format(self._get_condition_json(index))) return None - if condition_match != ConditionMatchTypes.EXISTS: + if condition_match not in (ConditionMatchTypes.EXISTS, ConditionMatchTypes.QUALIFIED): attribute_key = self.condition_data[index][0] if attribute_key not in self.attributes: self.logger.debug( diff --git a/optimizely/helpers/constants.py b/optimizely/helpers/constants.py index 06803152..06f2cb93 100644 --- a/optimizely/helpers/constants.py +++ b/optimizely/helpers/constants.py @@ -149,6 +149,14 @@ }, "version": {"type": "string"}, "revision": {"type": "string"}, + "integrations": { + "type": "array", + "items": { + "type": "object", + "properties": {"key": {"type": "string"}, "host": {"type": "string"}, "publicKey": {"type": "string"}}, + "required": ["key"], + } + } }, "required": [ "projectId", diff --git a/optimizely/helpers/types.py b/optimizely/helpers/types.py index 814bc1aa..a28aca67 100644 --- a/optimizely/helpers/types.py +++ b/optimizely/helpers/types.py @@ -30,29 +30,29 @@ class BaseEntity(TypedDict): class BaseDict(BaseEntity): - '''Base type for parsed datafile json, before instantiation of class objects.''' + """Base type for parsed datafile json, before instantiation of class objects.""" id: str key: str class EventDict(BaseDict): - '''Event dict from parsed datafile json.''' + """Event dict from parsed datafile json.""" experimentIds: list[str] class AttributeDict(BaseDict): - '''Attribute dict from parsed datafile json.''' + """Attribute dict from parsed datafile json.""" pass class TrafficAllocation(BaseEntity): - '''Traffic Allocation dict from parsed datafile json.''' + """Traffic Allocation dict from parsed datafile json.""" endOfRange: int entityId: str class VariableDict(BaseDict): - '''Variable dict from parsed datafile json.''' + """Variable dict from parsed datafile json.""" value: str type: str defaultValue: str @@ -60,13 +60,13 @@ class VariableDict(BaseDict): class VariationDict(BaseDict): - '''Variation dict from parsed datafile json.''' + """Variation dict from parsed datafile json.""" variables: list[VariableDict] featureEnabled: Optional[bool] class ExperimentDict(BaseDict): - '''Experiment dict from parsed datafile json.''' + """Experiment dict from parsed datafile json.""" status: str forcedVariations: dict[str, str] variations: list[VariationDict] @@ -77,20 +77,20 @@ class ExperimentDict(BaseDict): class RolloutDict(BaseEntity): - '''Rollout dict from parsed datafile json.''' + """Rollout dict from parsed datafile json.""" id: str experiments: list[ExperimentDict] class FeatureFlagDict(BaseDict): - '''Feature flag dict from parsed datafile json.''' + """Feature flag dict from parsed datafile json.""" rolloutId: str variables: list[VariableDict] experimentIds: list[str] class GroupDict(BaseEntity): - '''Group dict from parsed datafile json.''' + """Group dict from parsed datafile json.""" id: str policy: str experiments: list[ExperimentDict] @@ -98,7 +98,14 @@ class GroupDict(BaseEntity): class AudienceDict(BaseEntity): - '''Audience dict from parsed datafile json.''' + """Audience dict from parsed datafile json.""" id: str name: str conditions: list[Any] | str + + +class IntegrationDict(BaseEntity): + """Integration dict from parsed datafile json.""" + key: str + host: str + publicKey: str diff --git a/optimizely/optimizely_user_context.py b/optimizely/optimizely_user_context.py index c5d769f5..11b8af9d 100644 --- a/optimizely/optimizely_user_context.py +++ b/optimizely/optimizely_user_context.py @@ -54,6 +54,7 @@ def __init__( self.client = optimizely_client self.logger = logger self.user_id = user_id + self._qualified_segments: list[str] = [] if not isinstance(user_attributes, dict): user_attributes = UserAttributes({}) @@ -94,7 +95,11 @@ def _clone(self) -> Optional[OptimizelyUserContext]: with self.lock: if self.forced_decisions_map: + # makes sure forced_decisions_map is duplicated without any references user_context.forced_decisions_map = copy.deepcopy(self.forced_decisions_map) + if self._qualified_segments: + # no need to use deepcopy here as qualified_segments does not contain anything other than strings + user_context._qualified_segments = self._qualified_segments.copy() return user_context @@ -248,3 +253,39 @@ def find_forced_decision(self, decision_context: OptimizelyDecisionContext) -> O # must allow None to be returned for the Flags only case return self.forced_decisions_map.get(decision_context) + + def is_qualified_for(self, segment: str) -> bool: + """ + Checks is the provided segment is in the qualified_segments list. + + Args: + segment: a segment name. + + Returns: + Returns: true if the segment is in the qualified segments list. + """ + with self.lock: + return segment in self._qualified_segments + + def get_qualified_segments(self) -> list[str]: + """ + Gets the qualified segments. + + Returns: + A list of qualified segment names. + """ + with self.lock: + return self._qualified_segments.copy() + + def set_qualified_segments(self, segments: list[str]) -> None: + """ + Replaces any qualified segments with the provided list of segments. + + Args: + segments: a list of segment names. + + Returns: + None. + """ + with self.lock: + self._qualified_segments = segments.copy() diff --git a/optimizely/project_config.py b/optimizely/project_config.py index 7fbbdf99..9490e735 100644 --- a/optimizely/project_config.py +++ b/optimizely/project_config.py @@ -77,9 +77,13 @@ def __init__(self, datafile: str | bytes, logger: Logger, error_handler: Any): self.typed_audiences: list[types.AudienceDict] = config.get('typedAudiences', []) self.feature_flags: list[types.FeatureFlagDict] = config.get('featureFlags', []) self.rollouts: list[types.RolloutDict] = config.get('rollouts', []) + self.integrations: list[types.IntegrationDict] = config.get('integrations', []) self.anonymize_ip: bool = config.get('anonymizeIP', False) self.send_flag_decisions: bool = config.get('sendFlagDecisions', False) self.bot_filtering: Optional[bool] = config.get('botFiltering', None) + self.public_key_for_odp: Optional[str] = None + self.host_for_odp: Optional[str] = None + self.all_segments: list[str] = [] # Utility maps for quick lookup self.group_id_map: dict[str, entities.Group] = self._generate_key_map(self.groups, 'id', entities.Group) @@ -107,6 +111,13 @@ def __init__(self, datafile: str | bytes, logger: Logger, error_handler: Any): for experiment_dict in layer.experiments: self.experiment_id_map[experiment_dict['id']] = entities.Experiment(**experiment_dict) + if self.integrations: + self.integration_key_map = self._generate_key_map(self.integrations, 'key', entities.Integration) + odp_integration = self.integration_key_map.get('odp') + if odp_integration: + self.public_key_for_odp = odp_integration.publicKey + self.host_for_odp = odp_integration.host + self.audience_id_map = self._deserialize_audience(self.audience_id_map) for group in self.group_id_map.values(): experiments_in_group_id_map = self._generate_key_map(group.experiments, 'id', entities.Experiment) @@ -114,6 +125,9 @@ def __init__(self, datafile: str | bytes, logger: Logger, error_handler: Any): experiment.__dict__.update({'groupId': group.id, 'groupPolicy': group.policy}) self.experiment_id_map.update(experiments_in_group_id_map) + for audience in self.audience_id_map.values(): + self.all_segments += audience.get_segments() + self.experiment_key_map: dict[str, entities.Experiment] = {} self.variation_key_map: dict[str, dict[str, entities.Variation]] = {} self.variation_id_map: dict[str, dict[str, entities.Variation]] = {} diff --git a/tests/base.py b/tests/base.py index d2bc9692..e793d1c3 100644 --- a/tests/base.py +++ b/tests/base.py @@ -1048,6 +1048,196 @@ def setUp(self, config_dict='config_dict'): 'revision': '3', } + self.config_dict_with_audience_segments = { + 'version': '4', + 'sendFlagDecisions': True, + 'rollouts': [ + { + 'experiments': [ + { + 'audienceIds': ['13389130056'], + 'forcedVariations': {}, + 'id': '3332020515', + 'key': 'rollout-rule-1', + 'layerId': '3319450668', + 'status': 'Running', + 'trafficAllocation': [ + { + 'endOfRange': 10000, + 'entityId': '3324490633' + } + ], + 'variations': [ + { + 'featureEnabled': True, + 'id': '3324490633', + 'key': 'rollout-variation-on', + 'variables': [] + } + ] + }, + { + 'audienceIds': [], + 'forcedVariations': {}, + 'id': '3332020556', + 'key': 'rollout-rule-2', + 'layerId': '3319450668', + 'status': 'Running', + 'trafficAllocation': [ + { + 'endOfRange': 10000, + 'entityId': '3324490644' + } + ], + 'variations': [ + { + 'featureEnabled': False, + 'id': '3324490644', + 'key': 'rollout-variation-off', + 'variables': [] + } + ] + } + ], + 'id': '3319450668' + } + ], + 'anonymizeIP': True, + 'botFiltering': True, + 'projectId': '10431130345', + 'variables': [], + 'featureFlags': [ + { + 'experimentIds': ['10390977673'], + 'id': '4482920077', + 'key': 'flag-segment', + 'rolloutId': '3319450668', + 'variables': [ + { + 'defaultValue': '42', + 'id': '2687470095', + 'key': 'i_42', + 'type': 'integer' + } + ] + } + ], + 'experiments': [ + { + 'status': 'Running', + 'key': 'experiment-segment', + 'layerId': '10420273888', + 'trafficAllocation': [ + { + 'entityId': '10389729780', + 'endOfRange': 10000 + } + ], + 'audienceIds': ['$opt_dummy_audience'], + 'audienceConditions': ['or', '13389142234', '13389141123'], + 'variations': [ + { + 'variables': [], + 'featureEnabled': True, + 'id': '10389729780', + 'key': 'variation-a' + }, + { + 'variables': [], + 'id': '10416523121', + 'key': 'variation-b' + } + ], + 'forcedVariations': {}, + 'id': '10390977673' + } + ], + 'groups': [], + 'integrations': [ + { + 'key': 'odp', + 'host': 'https://api.zaius.com', + 'publicKey': 'W4WzcEs-ABgXorzY7h1LCQ' + } + ], + 'typedAudiences': [ + { + 'id': '13389142234', + 'conditions': [ + 'and', + [ + 'or', + [ + 'or', + { + 'value': 'odp-segment-1', + 'type': 'third_party_dimension', + 'name': 'odp.audiences', + 'match': 'qualified' + } + ] + ] + ], + 'name': 'odp-segment-1' + }, + { + 'id': '13389130056', + 'conditions': [ + 'and', + [ + 'or', + [ + 'or', + { + 'value': 'odp-segment-2', + 'type': 'third_party_dimension', + 'name': 'odp.audiences', + 'match': 'qualified' + }, + { + 'value': 'us', + 'type': 'custom_attribute', + 'name': 'country', + 'match': 'exact' + } + ], + [ + 'or', + { + 'value': 'odp-segment-3', + 'type': 'third_party_dimension', + 'name': 'odp.audiences', + 'match': 'qualified' + } + ] + ] + ], + 'name': 'odp-segment-2' + } + ], + 'audiences': [ + { + 'id': '13389141123', + 'name': 'adult', + 'conditions': '["and", ["or", ["or", ' + '{"match": "gt", "name": "age", "type": "custom_attribute", "value": 20}]]]' + } + ], + 'attributes': [ + { + 'id': '10401066117', + 'key': 'gender' + }, + { + 'id': '10401066170', + 'key': 'testvar' + } + ], + 'accountId': '10367498574', + 'events': [], + 'revision': '101' + } + config = getattr(self, config_dict) self.optimizely = optimizely.Optimizely(json.dumps(config)) self.project_config = self.optimizely.config_manager.get_config() diff --git a/tests/helpers_tests/test_audience.py b/tests/helpers_tests/test_audience.py index 9c29bb72..bab80380 100644 --- a/tests/helpers_tests/test_audience.py +++ b/tests/helpers_tests/test_audience.py @@ -15,6 +15,7 @@ from unittest import mock from optimizely import optimizely +from optimizely.entities import Audience from optimizely.helpers import audience from optimizely.helpers import enums from tests import base @@ -24,12 +25,11 @@ class AudienceTest(base.BaseTest): def setUp(self): base.BaseTest.setUp(self) self.mock_client_logger = mock.MagicMock() + self.user_context = self.optimizely.create_user_context('any-user') def test_does_user_meet_audience_conditions__no_audience(self): """ Test that does_user_meet_audience_conditions returns True when experiment is using no audience. """ - user_attributes = {} - # Both Audience Ids and Conditions are Empty experiment = self.project_config.get_experiment_from_key('test_experiment') experiment.audienceIds = [] @@ -39,7 +39,7 @@ def test_does_user_meet_audience_conditions__no_audience(self): experiment.get_audience_conditions_or_ids(), enums.ExperimentAudienceEvaluationLogs, 'test_experiment', - user_attributes, + self.user_context, self.mock_client_logger ) self.assertStrictTrue( @@ -55,7 +55,7 @@ def test_does_user_meet_audience_conditions__no_audience(self): experiment.get_audience_conditions_or_ids(), enums.ExperimentAudienceEvaluationLogs, 'test_experiment', - user_attributes, + self.user_context, self.mock_client_logger ) self.assertStrictTrue( @@ -71,7 +71,7 @@ def test_does_user_meet_audience_conditions__no_audience(self): experiment.get_audience_conditions_or_ids(), enums.ExperimentAudienceEvaluationLogs, 'test_experiment', - user_attributes, + self.user_context, self.mock_client_logger ) self.assertStrictTrue( @@ -84,7 +84,7 @@ def test_does_user_meet_audience_conditions__with_audience(self): Test that does_user_meet_audience_conditions uses audienceIds when audienceConditions is None. """ - user_attributes = {'test_attribute': 'test_value_1'} + self.user_context._user_attributes = {'test_attribute': 'test_value_1'} experiment = self.project_config.get_experiment_from_key('test_experiment') experiment.audienceIds = ['11154'] @@ -101,7 +101,7 @@ def test_does_user_meet_audience_conditions__with_audience(self): experiment.get_audience_conditions_or_ids(), enums.ExperimentAudienceEvaluationLogs, 'test_experiment', - user_attributes, + self.user_context, self.mock_client_logger ) @@ -116,7 +116,7 @@ def test_does_user_meet_audience_conditions__with_audience(self): experiment.get_audience_conditions_or_ids(), enums.ExperimentAudienceEvaluationLogs, 'test_experiment', - user_attributes, + self.user_context, self.mock_client_logger ) @@ -124,41 +124,23 @@ def test_does_user_meet_audience_conditions__with_audience(self): def test_does_user_meet_audience_conditions__no_attributes(self): """ Test that does_user_meet_audience_conditions evaluates audience when attributes are empty. - Test that does_user_meet_audience_conditions defaults attributes to empty dict when attributes is None. """ experiment = self.project_config.get_experiment_from_key('test_experiment') - # attributes set to empty dict - with mock.patch('optimizely.helpers.condition.CustomAttributeConditionEvaluator') as custom_attr_eval: - audience.does_user_meet_audience_conditions( - self.project_config, - experiment.get_audience_conditions_or_ids(), - enums.ExperimentAudienceEvaluationLogs, - 'test_experiment', - {}, - self.mock_client_logger - ) - - self.assertEqual({}, custom_attr_eval.call_args[0][1]) - - # attributes set to None - with mock.patch('optimizely.helpers.condition.CustomAttributeConditionEvaluator') as custom_attr_eval: - audience.does_user_meet_audience_conditions( - self.project_config, - experiment.get_audience_conditions_or_ids(), - enums.ExperimentAudienceEvaluationLogs, - 'test_experiment', - None, - self.mock_client_logger - ) - - self.assertEqual({}, custom_attr_eval.call_args[0][1]) + audience.does_user_meet_audience_conditions( + self.project_config, + experiment.get_audience_conditions_or_ids(), + enums.ExperimentAudienceEvaluationLogs, + 'test_experiment', + self.user_context, + self.mock_client_logger + ) def test_does_user_meet_audience_conditions__returns_true__when_condition_tree_evaluator_returns_true(self): """ Test that does_user_meet_audience_conditions returns True when call to condition_tree_evaluator returns True. """ - user_attributes = {'test_attribute': 'test_value_1'} + self.user_context._user_attributes = {'test_attribute': 'test_value_1'} experiment = self.project_config.get_experiment_from_key('test_experiment') with mock.patch('optimizely.helpers.condition_tree_evaluator.evaluate', return_value=True): user_meets_audience_conditions, _ = audience.does_user_meet_audience_conditions( @@ -166,7 +148,7 @@ def test_does_user_meet_audience_conditions__returns_true__when_condition_tree_e experiment.get_audience_conditions_or_ids(), enums.ExperimentAudienceEvaluationLogs, 'test_experiment', - user_attributes, + self.user_context, self.mock_client_logger ) self.assertStrictTrue( @@ -177,7 +159,7 @@ def test_does_user_meet_audience_conditions_returns_false_when_condition_tree_ev """ Test that does_user_meet_audience_conditions returns False when call to condition_tree_evaluator returns None or False. """ - user_attributes = {'test_attribute': 'test_value_1'} + self.user_context._user_attributes = {'test_attribute': 'test_value_1'} experiment = self.project_config.get_experiment_from_key('test_experiment') with mock.patch('optimizely.helpers.condition_tree_evaluator.evaluate', return_value=None): user_meets_audience_conditions, _ = audience.does_user_meet_audience_conditions( @@ -185,7 +167,7 @@ def test_does_user_meet_audience_conditions_returns_false_when_condition_tree_ev experiment.get_audience_conditions_or_ids(), enums.ExperimentAudienceEvaluationLogs, 'test_experiment', - user_attributes, + self.user_context, self.mock_client_logger ) self.assertStrictFalse( @@ -198,7 +180,7 @@ def test_does_user_meet_audience_conditions_returns_false_when_condition_tree_ev experiment.get_audience_conditions_or_ids(), enums.ExperimentAudienceEvaluationLogs, 'test_experiment', - user_attributes, + self.user_context, self.mock_client_logger ) self.assertStrictFalse( @@ -219,7 +201,7 @@ def test_does_user_meet_audience_conditions__evaluates_audience_ids(self): experiment.get_audience_conditions_or_ids(), enums.ExperimentAudienceEvaluationLogs, 'test_experiment', - {}, + self.user_context, self.mock_client_logger ) @@ -227,8 +209,8 @@ def test_does_user_meet_audience_conditions__evaluates_audience_ids(self): audience_11159 = self.project_config.get_audience('11159') custom_attr_eval.assert_has_calls( [ - mock.call(audience_11154.conditionList, {}, self.mock_client_logger), - mock.call(audience_11159.conditionList, {}, self.mock_client_logger), + mock.call(audience_11154.conditionList, self.user_context, self.mock_client_logger), + mock.call(audience_11159.conditionList, self.user_context, self.mock_client_logger), mock.call().evaluate(0), mock.call().evaluate(0), ], @@ -255,7 +237,7 @@ def test_does_user_meet_audience_conditions__evaluates_audience_conditions(self) experiment.get_audience_conditions_or_ids(), enums.ExperimentAudienceEvaluationLogs, 'audience_combinations_experiment', - {}, + self.user_context, self.mock_client_logger ) @@ -266,10 +248,10 @@ def test_does_user_meet_audience_conditions__evaluates_audience_conditions(self) custom_attr_eval.assert_has_calls( [ - mock.call(audience_3468206642.conditionList, {}, self.mock_client_logger), - mock.call(audience_3988293898.conditionList, {}, self.mock_client_logger), - mock.call(audience_3988293899.conditionList, {}, self.mock_client_logger), - mock.call(audience_3468206646.conditionList, {}, self.mock_client_logger), + mock.call(audience_3468206642.conditionList, self.user_context, self.mock_client_logger), + mock.call(audience_3988293898.conditionList, self.user_context, self.mock_client_logger), + mock.call(audience_3988293899.conditionList, self.user_context, self.mock_client_logger), + mock.call(audience_3468206646.conditionList, self.user_context, self.mock_client_logger), mock.call().evaluate(0), mock.call().evaluate(0), mock.call().evaluate(0), @@ -292,7 +274,7 @@ def test_does_user_meet_audience_conditions__evaluates_audience_conditions_leaf_ experiment.get_audience_conditions_or_ids(), enums.ExperimentAudienceEvaluationLogs, 'audience_combinations_experiment', - {}, + self.user_context, self.mock_client_logger ) @@ -300,18 +282,41 @@ def test_does_user_meet_audience_conditions__evaluates_audience_conditions_leaf_ custom_attr_eval.assert_has_calls( [ - mock.call(audience_3468206645.conditionList, {}, self.mock_client_logger), + mock.call(audience_3468206645.conditionList, self.user_context, self.mock_client_logger), mock.call().evaluate(0), mock.call().evaluate(1), ], any_order=True, ) + def test_get_segments(self): + seg1 = ['odp.audiences', 'seg1', 'third_party_dimension', 'qualified'] + seg2 = ['odp.audiences', 'seg2', 'third_party_dimension', 'qualified'] + seg3 = ['odp.audiences', 'seg3', 'third_party_dimension', 'qualified'] + other = ['other', 'a', 'custom_attribute', 'eq'] + + def make_audience(conditions): + return Audience('12345', 'group-a', '', conditionList=conditions) + + audience = make_audience([seg1]) + self.assertEqual(['seg1'], audience.get_segments()) + + audience = make_audience([seg1, seg2, other]) + self.assertEqual(['seg1', 'seg2'], sorted(audience.get_segments())) + + audience = make_audience([seg1, other, seg2]) + self.assertEqual(['seg1', 'seg2'], sorted(audience.get_segments())) + + audience = make_audience([seg1, other, seg2, seg1, seg2, seg3]) + self.assertEqual(3, len(audience.get_segments())) + self.assertEqual(['seg1', 'seg2', 'seg3'], sorted(audience.get_segments())) + class ExperimentAudienceLoggingTest(base.BaseTest): def setUp(self): base.BaseTest.setUp(self) self.mock_client_logger = mock.MagicMock() + self.user_context = self.optimizely.create_user_context('any-user') def test_does_user_meet_audience_conditions__with_no_audience(self): experiment = self.project_config.get_experiment_from_key('test_experiment') @@ -335,7 +340,7 @@ def test_does_user_meet_audience_conditions__with_no_audience(self): ) def test_does_user_meet_audience_conditions__evaluates_audience_ids(self): - user_attributes = {'test_attribute': 'test_value_1'} + self.user_context._user_attributes = {'test_attribute': 'test_value_1'} experiment = self.project_config.get_experiment_from_key('test_experiment') experiment.audienceIds = ['11154', '11159'] experiment.audienceConditions = None @@ -350,7 +355,7 @@ def test_does_user_meet_audience_conditions__evaluates_audience_ids(self): experiment.get_audience_conditions_or_ids(), enums.ExperimentAudienceEvaluationLogs, 'test_experiment', - user_attributes, + self.user_context, self.mock_client_logger ) @@ -393,7 +398,7 @@ def test_does_user_meet_audience_conditions__evaluates_audience_conditions(self) experiment.get_audience_conditions_or_ids(), enums.ExperimentAudienceEvaluationLogs, 'audience_combinations_experiment', - {}, + self.user_context, self.mock_client_logger ) @@ -433,6 +438,7 @@ class RolloutRuleAudienceLoggingTest(base.BaseTest): def setUp(self): base.BaseTest.setUp(self) self.mock_client_logger = mock.MagicMock() + self.user_context = self.optimizely.create_user_context('any-user') def test_does_user_meet_audience_conditions__with_no_audience(self): # Using experiment as rule for testing log messages @@ -458,7 +464,7 @@ def test_does_user_meet_audience_conditions__with_no_audience(self): def test_does_user_meet_audience_conditions__evaluates_audience_ids(self): # Using experiment as rule for testing log messages - user_attributes = {'test_attribute': 'test_value_1'} + self.user_context._user_attributes = {'test_attribute': 'test_value_1'} experiment = self.project_config.get_experiment_from_key('test_experiment') experiment.audienceIds = ['11154', '11159'] experiment.audienceConditions = None @@ -473,7 +479,7 @@ def test_does_user_meet_audience_conditions__evaluates_audience_ids(self): experiment.get_audience_conditions_or_ids(), enums.RolloutRuleAudienceEvaluationLogs, 'test_rule', - user_attributes, + self.user_context, self.mock_client_logger ) @@ -517,7 +523,7 @@ def test_does_user_meet_audience_conditions__evaluates_audience_conditions(self) experiment.get_audience_conditions_or_ids(), enums.RolloutRuleAudienceEvaluationLogs, 'test_rule', - {}, + self.user_context, self.mock_client_logger ) diff --git a/tests/helpers_tests/test_condition.py b/tests/helpers_tests/test_condition.py index 3f8c6c16..9d7ae52f 100644 --- a/tests/helpers_tests/test_condition.py +++ b/tests/helpers_tests/test_condition.py @@ -37,6 +37,7 @@ lt_float_condition_list = [['meters_travelled', 48.2, 'custom_attribute', 'lt']] le_int_condition_list = [['meters_travelled', 48, 'custom_attribute', 'le']] le_float_condition_list = [['meters_travelled', 48.2, 'custom_attribute', 'le']] +qualified_condition_list = [['odp.audiences', 'odp-segment-2', 'third_party_dimension', 'qualified']] class CustomAttributeConditionEvaluatorTest(base.BaseTest): @@ -49,23 +50,26 @@ def setUp(self): doubleCondition, ] self.mock_client_logger = mock.MagicMock() + self.user_context = self.optimizely.create_user_context('any-user') def test_evaluate__returns_true__when_attributes_pass_audience_condition(self): + self.user_context._user_attributes = {'browser_type': 'safari'} evaluator = condition_helper.CustomAttributeConditionEvaluator( - self.condition_list, {'browser_type': 'safari'}, self.mock_client_logger + self.condition_list, self.user_context, self.mock_client_logger ) self.assertStrictTrue(evaluator.evaluate(0)) def test_evaluate__returns_false__when_attributes_fail_audience_condition(self): + self.user_context._user_attributes = {'browser_type': 'chrome'} evaluator = condition_helper.CustomAttributeConditionEvaluator( - self.condition_list, {'browser_type': 'chrome'}, self.mock_client_logger + self.condition_list, self.user_context, self.mock_client_logger ) self.assertStrictFalse(evaluator.evaluate(0)) def test_evaluate__evaluates__different_typed_attributes(self): - userAttributes = { + self.user_context._user_attributes = { 'browser_type': 'safari', 'is_firefox': True, 'num_users': 10, @@ -73,7 +77,7 @@ def test_evaluate__evaluates__different_typed_attributes(self): } evaluator = condition_helper.CustomAttributeConditionEvaluator( - self.condition_list, userAttributes, self.mock_client_logger + self.condition_list, self.user_context, self.mock_client_logger ) self.assertStrictTrue(evaluator.evaluate(0)) @@ -84,9 +88,9 @@ def test_evaluate__evaluates__different_typed_attributes(self): def test_evaluate__returns_null__when_condition_has_an_invalid_match_property(self): condition_list = [['weird_condition', 'hi', 'custom_attribute', 'weird_match']] - + self.user_context._user_attributes = {'weird_condition': 'hi'} evaluator = condition_helper.CustomAttributeConditionEvaluator( - condition_list, {'weird_condition': 'hi'}, self.mock_client_logger + condition_list, self.user_context, self.mock_client_logger ) self.assertIsNone(evaluator.evaluate(0)) @@ -94,9 +98,9 @@ def test_evaluate__returns_null__when_condition_has_an_invalid_match_property(se def test_evaluate__assumes_exact__when_condition_match_property_is_none(self): condition_list = [['favorite_constellation', 'Lacerta', 'custom_attribute', None]] - + self.user_context._user_attributes = {'favorite_constellation': 'Lacerta'} evaluator = condition_helper.CustomAttributeConditionEvaluator( - condition_list, {'favorite_constellation': 'Lacerta'}, self.mock_client_logger, + condition_list, self.user_context, self.mock_client_logger, ) self.assertStrictTrue(evaluator.evaluate(0)) @@ -104,9 +108,9 @@ def test_evaluate__assumes_exact__when_condition_match_property_is_none(self): def test_evaluate__returns_null__when_condition_has_an_invalid_type_property(self): condition_list = [['weird_condition', 'hi', 'weird_type', 'exact']] - + self.user_context._user_attributes = {'weird_condition': 'hi'} evaluator = condition_helper.CustomAttributeConditionEvaluator( - condition_list, {'weird_condition': 'hi'}, self.mock_client_logger + condition_list, self.user_context, self.mock_client_logger ) self.assertIsNone(evaluator.evaluate(0)) @@ -115,8 +119,9 @@ def test_semver_eq__returns_true(self): semver_equal_2_0_condition_list = [['Android', "2.0", 'custom_attribute', 'semver_eq']] user_versions = ['2.0.0', '2.0'] for user_version in user_versions: + self.user_context._user_attributes = {'Android': user_version} evaluator = condition_helper.CustomAttributeConditionEvaluator( - semver_equal_2_0_condition_list, {'Android': user_version}, self.mock_client_logger) + semver_equal_2_0_condition_list, self.user_context, self.mock_client_logger) result = evaluator.evaluate(0) custom_err_msg = f"Got {result} in result. Failed for user version: {user_version}" self.assertTrue(result, custom_err_msg) @@ -125,8 +130,9 @@ def test_semver_eq__returns_false(self): semver_equal_2_0_condition_list = [['Android', "2.0", 'custom_attribute', 'semver_eq']] user_versions = ['2.9', '1.9'] for user_version in user_versions: + self.user_context._user_attributes = {'Android': user_version} evaluator = condition_helper.CustomAttributeConditionEvaluator( - semver_equal_2_0_condition_list, {'Android': user_version}, self.mock_client_logger) + semver_equal_2_0_condition_list, self.user_context, self.mock_client_logger) result = evaluator.evaluate(0) custom_err_msg = f"Got {result} in result. Failed for user version: {user_version}" self.assertFalse(result, custom_err_msg) @@ -135,8 +141,9 @@ def test_semver_le__returns_true(self): semver_less_than_or_equal_2_0_condition_list = [['Android', "2.0", 'custom_attribute', 'semver_le']] user_versions = ['2.0.0', '1.9'] for user_version in user_versions: + self.user_context._user_attributes = {'Android': user_version} evaluator = condition_helper.CustomAttributeConditionEvaluator( - semver_less_than_or_equal_2_0_condition_list, {'Android': user_version}, self.mock_client_logger) + semver_less_than_or_equal_2_0_condition_list, self.user_context, self.mock_client_logger) result = evaluator.evaluate(0) custom_err_msg = f"Got {result} in result. Failed for user version: {user_version}" self.assertTrue(result, custom_err_msg) @@ -145,8 +152,9 @@ def test_semver_le__returns_false(self): semver_less_than_or_equal_2_0_condition_list = [['Android', "2.0", 'custom_attribute', 'semver_le']] user_versions = ['2.5.1'] for user_version in user_versions: + self.user_context._user_attributes = {'Android': user_version} evaluator = condition_helper.CustomAttributeConditionEvaluator( - semver_less_than_or_equal_2_0_condition_list, {'Android': user_version}, self.mock_client_logger) + semver_less_than_or_equal_2_0_condition_list, self.user_context, self.mock_client_logger) result = evaluator.evaluate(0) custom_err_msg = f"Got {result} in result. Failed for user version: {user_version}" self.assertFalse(result, custom_err_msg) @@ -155,8 +163,9 @@ def test_semver_ge__returns_true(self): semver_greater_than_or_equal_2_0_condition_list = [['Android', "2.0", 'custom_attribute', 'semver_ge']] user_versions = ['2.0.0', '2.9'] for user_version in user_versions: + self.user_context._user_attributes = {'Android': user_version} evaluator = condition_helper.CustomAttributeConditionEvaluator( - semver_greater_than_or_equal_2_0_condition_list, {'Android': user_version}, self.mock_client_logger) + semver_greater_than_or_equal_2_0_condition_list, self.user_context, self.mock_client_logger) result = evaluator.evaluate(0) custom_err_msg = f"Got {result} in result. Failed for user version: {user_version}" self.assertTrue(result, custom_err_msg) @@ -165,8 +174,9 @@ def test_semver_ge__returns_false(self): semver_greater_than_or_equal_2_0_condition_list = [['Android', "2.0", 'custom_attribute', 'semver_ge']] user_versions = ['1.9'] for user_version in user_versions: + self.user_context._user_attributes = {'Android': user_version} evaluator = condition_helper.CustomAttributeConditionEvaluator( - semver_greater_than_or_equal_2_0_condition_list, {'Android': user_version}, self.mock_client_logger) + semver_greater_than_or_equal_2_0_condition_list, self.user_context, self.mock_client_logger) result = evaluator.evaluate(0) custom_err_msg = f"Got {result} in result. Failed for user version: {user_version}" self.assertFalse(result, custom_err_msg) @@ -175,8 +185,9 @@ def test_semver_lt__returns_true(self): semver_less_than_2_0_condition_list = [['Android', "2.0", 'custom_attribute', 'semver_lt']] user_versions = ['1.9'] for user_version in user_versions: + self.user_context._user_attributes = {'Android': user_version} evaluator = condition_helper.CustomAttributeConditionEvaluator( - semver_less_than_2_0_condition_list, {'Android': user_version}, self.mock_client_logger) + semver_less_than_2_0_condition_list, self.user_context, self.mock_client_logger) result = evaluator.evaluate(0) custom_err_msg = f"Got {result} in result. Failed for user version: {user_version}" self.assertTrue(result, custom_err_msg) @@ -185,8 +196,9 @@ def test_semver_lt__returns_false(self): semver_less_than_2_0_condition_list = [['Android', "2.0", 'custom_attribute', 'semver_lt']] user_versions = ['2.0.0', '2.5.1'] for user_version in user_versions: + self.user_context._user_attributes = {'Android': user_version} evaluator = condition_helper.CustomAttributeConditionEvaluator( - semver_less_than_2_0_condition_list, {'Android': user_version}, self.mock_client_logger) + semver_less_than_2_0_condition_list, self.user_context, self.mock_client_logger) result = evaluator.evaluate(0) custom_err_msg = f"Got {result} in result. Failed for user version: {user_version}" self.assertFalse(result, custom_err_msg) @@ -195,8 +207,9 @@ def test_semver_gt__returns_true(self): semver_greater_than_2_0_condition_list = [['Android', "2.0", 'custom_attribute', 'semver_gt']] user_versions = ['2.9'] for user_version in user_versions: + self.user_context._user_attributes = {'Android': user_version} evaluator = condition_helper.CustomAttributeConditionEvaluator( - semver_greater_than_2_0_condition_list, {'Android': user_version}, self.mock_client_logger) + semver_greater_than_2_0_condition_list, self.user_context, self.mock_client_logger) result = evaluator.evaluate(0) custom_err_msg = f"Got {result} in result. Failed for user version: {user_version}" self.assertTrue(result, custom_err_msg) @@ -205,8 +218,9 @@ def test_semver_gt__returns_false(self): semver_greater_than_2_0_condition_list = [['Android', "2.0", 'custom_attribute', 'semver_gt']] user_versions = ['2.0.0', '1.9'] for user_version in user_versions: + self.user_context._user_attributes = {'Android': user_version} evaluator = condition_helper.CustomAttributeConditionEvaluator( - semver_greater_than_2_0_condition_list, {'Android': user_version}, self.mock_client_logger) + semver_greater_than_2_0_condition_list, self.user_context, self.mock_client_logger) result = evaluator.evaluate(0) custom_err_msg = f"Got {result} in result. Failed for user version: {user_version}" self.assertFalse(result, custom_err_msg) @@ -215,8 +229,9 @@ def test_evaluate__returns_None__when_user_version_is_not_string(self): semver_greater_than_2_0_condition_list = [['Android', "2.0", 'custom_attribute', 'semver_gt']] user_versions = [True, 37] for user_version in user_versions: + self.user_context._user_attributes = {'Android': user_version} evaluator = condition_helper.CustomAttributeConditionEvaluator( - semver_greater_than_2_0_condition_list, {'Android': user_version}, self.mock_client_logger) + semver_greater_than_2_0_condition_list, self.user_context, self.mock_client_logger) result = evaluator.evaluate(0) custom_err_msg = f"Got {result} in result. Failed for user version: {user_version}" self.assertIsNone(result, custom_err_msg) @@ -225,8 +240,9 @@ def test_evaluate__returns_None__when_user_version_with_invalid_semantic(self): semver_greater_than_2_0_condition_list = [['Android', "2.0", 'custom_attribute', 'semver_gt']] user_versions = ['3.7.2.2', '+'] for user_version in user_versions: + self.user_context._user_attributes = {'Android': user_version} evaluator = condition_helper.CustomAttributeConditionEvaluator( - semver_greater_than_2_0_condition_list, {'Android': user_version}, self.mock_client_logger) + semver_greater_than_2_0_condition_list, self.user_context, self.mock_client_logger) result = evaluator.evaluate(0) custom_err_msg = f"Got {result} in result. Failed for user version: {user_version}" self.assertIsNone(result, custom_err_msg) @@ -242,8 +258,9 @@ def test_compare_user_version_with_target_version_equal_to_0(self): ('2.9.1', '2.9.1+beta') ] for target_version, user_version in versions: + self.user_context._user_attributes = {'Android': user_version} evaluator = condition_helper.CustomAttributeConditionEvaluator( - semver_greater_than_2_0_condition_list, {'Android': user_version}, self.mock_client_logger) + semver_greater_than_2_0_condition_list, self.user_context, self.mock_client_logger) result = evaluator.compare_user_version_with_target_version(target_version, user_version) custom_err_msg = f"Got {result} in result. Failed for user version:" \ f" {user_version} and target version: {target_version}" @@ -264,8 +281,9 @@ def test_compare_user_version_with_target_version_greater_than_0(self): ('2.2.3+beta2-beta1', '2.2.3+beta3-beta2') ] for target_version, user_version in versions: + self.user_context._user_attributes = {'Android': user_version} evaluator = condition_helper.CustomAttributeConditionEvaluator( - semver_greater_than_2_0_condition_list, {'Android': user_version}, self.mock_client_logger) + semver_greater_than_2_0_condition_list, self.user_context, self.mock_client_logger) result = evaluator.compare_user_version_with_target_version(target_version, user_version) custom_err_msg = f"Got {result} in result. Failed for user version:" \ f" {user_version} and target version: {target_version}" @@ -286,8 +304,9 @@ def test_compare_user_version_with_target_version_less_than_0(self): ('2.1.3-beta1+beta3', '2.1.3-beta1+beta2') ] for target_version, user_version in versions: + self.user_context._user_attributes = {'Android': user_version} evaluator = condition_helper.CustomAttributeConditionEvaluator( - semver_greater_than_2_0_condition_list, {'Android': user_version}, self.mock_client_logger) + semver_greater_than_2_0_condition_list, self.user_context, self.mock_client_logger) result = evaluator.compare_user_version_with_target_version(target_version, user_version) custom_err_msg = f"Got {result} in result. Failed for user version:" \ f" {user_version} and target version: {target_version}" @@ -300,8 +319,9 @@ def test_compare_invalid_user_version_with(self): target_version = '2.1.0' for user_version in versions: + self.user_context._user_attributes = {'Android': user_version} evaluator = condition_helper.CustomAttributeConditionEvaluator( - semver_greater_than_2_0_condition_list, {'Android': user_version}, self.mock_client_logger) + semver_greater_than_2_0_condition_list, self.user_context, self.mock_client_logger) result = evaluator.compare_user_version_with_target_version(user_version, target_version) custom_err_msg = f"Got {result} in result. Failed for user version: {user_version}" self.assertIsNone(result, custom_err_msg) @@ -309,69 +329,71 @@ def test_compare_invalid_user_version_with(self): def test_exists__returns_false__when_no_user_provided_value(self): evaluator = condition_helper.CustomAttributeConditionEvaluator( - exists_condition_list, {}, self.mock_client_logger + exists_condition_list, self.user_context, self.mock_client_logger ) self.assertStrictFalse(evaluator.evaluate(0)) def test_exists__returns_false__when_user_provided_value_is_null(self): - + self.user_context._user_attributes = {'input_value': None} evaluator = condition_helper.CustomAttributeConditionEvaluator( - exists_condition_list, {'input_value': None}, self.mock_client_logger + exists_condition_list, self.user_context, self.mock_client_logger ) self.assertStrictFalse(evaluator.evaluate(0)) def test_exists__returns_true__when_user_provided_value_is_string(self): + self.user_context._user_attributes = {'input_value': 'hi'} evaluator = condition_helper.CustomAttributeConditionEvaluator( - exists_condition_list, {'input_value': 'hi'}, self.mock_client_logger + exists_condition_list, self.user_context, self.mock_client_logger ) self.assertStrictTrue(evaluator.evaluate(0)) def test_exists__returns_true__when_user_provided_value_is_number(self): - + self.user_context._user_attributes = {'input_value': 10} evaluator = condition_helper.CustomAttributeConditionEvaluator( - exists_condition_list, {'input_value': 10}, self.mock_client_logger + exists_condition_list, self.user_context, self.mock_client_logger ) self.assertStrictTrue(evaluator.evaluate(0)) + self.user_context._user_attributes = {'input_value': 10.0} evaluator = condition_helper.CustomAttributeConditionEvaluator( - exists_condition_list, {'input_value': 10.0}, self.mock_client_logger + exists_condition_list, self.user_context, self.mock_client_logger ) self.assertStrictTrue(evaluator.evaluate(0)) def test_exists__returns_true__when_user_provided_value_is_boolean(self): - + self.user_context._user_attributes = {'input_value': False} evaluator = condition_helper.CustomAttributeConditionEvaluator( - exists_condition_list, {'input_value': False}, self.mock_client_logger + exists_condition_list, self.user_context, self.mock_client_logger ) self.assertStrictTrue(evaluator.evaluate(0)) def test_exact_string__returns_true__when_user_provided_value_is_equal_to_condition_value(self, ): - + self.user_context._user_attributes = {'favorite_constellation': 'Lacerta'} evaluator = condition_helper.CustomAttributeConditionEvaluator( - exact_string_condition_list, {'favorite_constellation': 'Lacerta'}, self.mock_client_logger, + exact_string_condition_list, self.user_context, self.mock_client_logger, ) self.assertStrictTrue(evaluator.evaluate(0)) def test_exact_string__returns_false__when_user_provided_value_is_not_equal_to_condition_value(self, ): - + self.user_context._user_attributes = {'favorite_constellation': 'The Big Dipper'} evaluator = condition_helper.CustomAttributeConditionEvaluator( - exact_string_condition_list, {'favorite_constellation': 'The Big Dipper'}, self.mock_client_logger, + exact_string_condition_list, self.user_context, self.mock_client_logger, ) self.assertStrictFalse(evaluator.evaluate(0)) def test_exact_string__returns_null__when_user_provided_value_is_different_type_from_condition_value(self, ): - + self.user_context._user_attributes = {'favorite_constellation': False} evaluator = condition_helper.CustomAttributeConditionEvaluator( - exact_string_condition_list, {'favorite_constellation': False}, self.mock_client_logger, + exact_string_condition_list, self.user_context, self.mock_client_logger, ) self.assertIsNone(evaluator.evaluate(0)) @@ -379,79 +401,83 @@ def test_exact_string__returns_null__when_user_provided_value_is_different_type_ def test_exact_string__returns_null__when_no_user_provided_value(self): evaluator = condition_helper.CustomAttributeConditionEvaluator( - exact_string_condition_list, {}, self.mock_client_logger + exact_string_condition_list, self.user_context, self.mock_client_logger ) self.assertIsNone(evaluator.evaluate(0)) def test_exact_int__returns_true__when_user_provided_value_is_equal_to_condition_value(self, ): - + self.user_context._user_attributes = {'lasers_count': 9000} evaluator = condition_helper.CustomAttributeConditionEvaluator( - exact_int_condition_list, {'lasers_count': 9000}, self.mock_client_logger + exact_int_condition_list, self.user_context, self.mock_client_logger ) self.assertStrictTrue(evaluator.evaluate(0)) + self.user_context._user_attributes = {'lasers_count': 9000.0} evaluator = condition_helper.CustomAttributeConditionEvaluator( - exact_int_condition_list, {'lasers_count': 9000.0}, self.mock_client_logger + exact_int_condition_list, self.user_context, self.mock_client_logger ) self.assertStrictTrue(evaluator.evaluate(0)) def test_exact_float__returns_true__when_user_provided_value_is_equal_to_condition_value(self, ): - + self.user_context._user_attributes = {'lasers_count': 9000} evaluator = condition_helper.CustomAttributeConditionEvaluator( - exact_float_condition_list, {'lasers_count': 9000}, self.mock_client_logger + exact_float_condition_list, self.user_context, self.mock_client_logger ) self.assertStrictTrue(evaluator.evaluate(0)) + self.user_context._user_attributes = {'lasers_count': 9000.0} evaluator = condition_helper.CustomAttributeConditionEvaluator( - exact_float_condition_list, {'lasers_count': 9000.0}, self.mock_client_logger, + exact_float_condition_list, self.user_context, self.mock_client_logger, ) self.assertStrictTrue(evaluator.evaluate(0)) def test_exact_int__returns_false__when_user_provided_value_is_not_equal_to_condition_value(self, ): - + self.user_context._user_attributes = {'lasers_count': 8000} evaluator = condition_helper.CustomAttributeConditionEvaluator( - exact_int_condition_list, {'lasers_count': 8000}, self.mock_client_logger + exact_int_condition_list, self.user_context, self.mock_client_logger ) self.assertStrictFalse(evaluator.evaluate(0)) def test_exact_float__returns_false__when_user_provided_value_is_not_equal_to_condition_value(self, ): - + self.user_context._user_attributes = {'lasers_count': 8000.0} evaluator = condition_helper.CustomAttributeConditionEvaluator( - exact_float_condition_list, {'lasers_count': 8000.0}, self.mock_client_logger, + exact_float_condition_list, self.user_context, self.mock_client_logger, ) self.assertStrictFalse(evaluator.evaluate(0)) def test_exact_int__returns_null__when_user_provided_value_is_different_type_from_condition_value(self, ): - + self.user_context._user_attributes = {'lasers_count': 'hi'} evaluator = condition_helper.CustomAttributeConditionEvaluator( - exact_int_condition_list, {'lasers_count': 'hi'}, self.mock_client_logger + exact_int_condition_list, self.user_context, self.mock_client_logger ) self.assertIsNone(evaluator.evaluate(0)) + self.user_context._user_attributes = {'lasers_count': True} evaluator = condition_helper.CustomAttributeConditionEvaluator( - exact_int_condition_list, {'lasers_count': True}, self.mock_client_logger + exact_int_condition_list, self.user_context, self.mock_client_logger ) self.assertIsNone(evaluator.evaluate(0)) def test_exact_float__returns_null__when_user_provided_value_is_different_type_from_condition_value(self, ): - + self.user_context._user_attributes = {'lasers_count': 'hi'} evaluator = condition_helper.CustomAttributeConditionEvaluator( - exact_float_condition_list, {'lasers_count': 'hi'}, self.mock_client_logger + exact_float_condition_list, self.user_context, self.mock_client_logger ) self.assertIsNone(evaluator.evaluate(0)) + self.user_context._user_attributes = {'lasers_count': True} evaluator = condition_helper.CustomAttributeConditionEvaluator( - exact_float_condition_list, {'lasers_count': True}, self.mock_client_logger + exact_float_condition_list, self.user_context, self.mock_client_logger ) self.assertIsNone(evaluator.evaluate(0)) @@ -459,7 +485,7 @@ def test_exact_float__returns_null__when_user_provided_value_is_different_type_f def test_exact_int__returns_null__when_no_user_provided_value(self): evaluator = condition_helper.CustomAttributeConditionEvaluator( - exact_int_condition_list, {}, self.mock_client_logger + exact_int_condition_list, self.user_context, self.mock_client_logger ) self.assertIsNone(evaluator.evaluate(0)) @@ -467,7 +493,7 @@ def test_exact_int__returns_null__when_no_user_provided_value(self): def test_exact_float__returns_null__when_no_user_provided_value(self): evaluator = condition_helper.CustomAttributeConditionEvaluator( - exact_float_condition_list, {}, self.mock_client_logger + exact_float_condition_list, self.user_context, self.mock_client_logger ) self.assertIsNone(evaluator.evaluate(0)) @@ -475,9 +501,9 @@ def test_exact_float__returns_null__when_no_user_provided_value(self): def test_exact__given_number_values__calls_is_finite_number(self): """ Test that CustomAttributeConditionEvaluator.evaluate returns True if is_finite_number returns True. Returns None if is_finite_number returns False. """ - + self.user_context._user_attributes = {'lasers_count': 9000} evaluator = condition_helper.CustomAttributeConditionEvaluator( - exact_int_condition_list, {'lasers_count': 9000}, self.mock_client_logger + exact_int_condition_list, self.user_context, self.mock_client_logger ) # assert that isFiniteNumber only needs to reject condition value to stop evaluation. @@ -500,57 +526,56 @@ def test_exact__given_number_values__calls_is_finite_number(self): mock_is_finite.assert_has_calls([mock.call(9000), mock.call(9000)]) def test_exact_bool__returns_true__when_user_provided_value_is_equal_to_condition_value(self, ): - + self.user_context._user_attributes = {'did_register_user': False} evaluator = condition_helper.CustomAttributeConditionEvaluator( - exact_bool_condition_list, {'did_register_user': False}, self.mock_client_logger, + exact_bool_condition_list, self.user_context, self.mock_client_logger, ) self.assertStrictTrue(evaluator.evaluate(0)) def test_exact_bool__returns_false__when_user_provided_value_is_not_equal_to_condition_value(self, ): - + self.user_context._user_attributes = {'did_register_user': True} evaluator = condition_helper.CustomAttributeConditionEvaluator( - exact_bool_condition_list, {'did_register_user': True}, self.mock_client_logger, + exact_bool_condition_list, self.user_context, self.mock_client_logger, ) self.assertStrictFalse(evaluator.evaluate(0)) def test_exact_bool__returns_null__when_user_provided_value_is_different_type_from_condition_value(self, ): - + self.user_context._user_attributes = {'did_register_user': 0} evaluator = condition_helper.CustomAttributeConditionEvaluator( - exact_bool_condition_list, {'did_register_user': 0}, self.mock_client_logger + exact_bool_condition_list, self.user_context, self.mock_client_logger ) self.assertIsNone(evaluator.evaluate(0)) def test_exact_bool__returns_null__when_no_user_provided_value(self): - evaluator = condition_helper.CustomAttributeConditionEvaluator( - exact_bool_condition_list, {}, self.mock_client_logger + exact_bool_condition_list, self.user_context, self.mock_client_logger ) self.assertIsNone(evaluator.evaluate(0)) def test_substring__returns_true__when_condition_value_is_substring_of_user_value(self, ): - + self.user_context._user_attributes = {'headline_text': 'Limited time, buy now!'} evaluator = condition_helper.CustomAttributeConditionEvaluator( - substring_condition_list, {'headline_text': 'Limited time, buy now!'}, self.mock_client_logger, + substring_condition_list, self.user_context, self.mock_client_logger, ) self.assertStrictTrue(evaluator.evaluate(0)) def test_substring__returns_false__when_condition_value_is_not_a_substring_of_user_value(self, ): - + self.user_context._user_attributes = {'headline_text': 'Breaking news!'} evaluator = condition_helper.CustomAttributeConditionEvaluator( - substring_condition_list, {'headline_text': 'Breaking news!'}, self.mock_client_logger, + substring_condition_list, self.user_context, self.mock_client_logger, ) self.assertStrictFalse(evaluator.evaluate(0)) def test_substring__returns_null__when_user_provided_value_not_a_string(self): - + self.user_context._user_attributes = {'headline_text': 10} evaluator = condition_helper.CustomAttributeConditionEvaluator( - substring_condition_list, {'headline_text': 10}, self.mock_client_logger + substring_condition_list, self.user_context, self.mock_client_logger ) self.assertIsNone(evaluator.evaluate(0)) @@ -558,91 +583,96 @@ def test_substring__returns_null__when_user_provided_value_not_a_string(self): def test_substring__returns_null__when_no_user_provided_value(self): evaluator = condition_helper.CustomAttributeConditionEvaluator( - substring_condition_list, {}, self.mock_client_logger + substring_condition_list, self.user_context, self.mock_client_logger ) self.assertIsNone(evaluator.evaluate(0)) def test_greater_than_int__returns_true__when_user_value_greater_than_condition_value(self, ): - + self.user_context._user_attributes = {'meters_travelled': 48.1} evaluator = condition_helper.CustomAttributeConditionEvaluator( - gt_int_condition_list, {'meters_travelled': 48.1}, self.mock_client_logger + gt_int_condition_list, self.user_context, self.mock_client_logger ) self.assertStrictTrue(evaluator.evaluate(0)) + self.user_context._user_attributes = {'meters_travelled': 49} evaluator = condition_helper.CustomAttributeConditionEvaluator( - gt_int_condition_list, {'meters_travelled': 49}, self.mock_client_logger + gt_int_condition_list, self.user_context, self.mock_client_logger ) self.assertStrictTrue(evaluator.evaluate(0)) def test_greater_than_float__returns_true__when_user_value_greater_than_condition_value(self, ): - + self.user_context._user_attributes = {'meters_travelled': 48.3} evaluator = condition_helper.CustomAttributeConditionEvaluator( - gt_float_condition_list, {'meters_travelled': 48.3}, self.mock_client_logger + gt_float_condition_list, self.user_context, self.mock_client_logger ) self.assertStrictTrue(evaluator.evaluate(0)) + self.user_context._user_attributes = {'meters_travelled': 49} evaluator = condition_helper.CustomAttributeConditionEvaluator( - gt_float_condition_list, {'meters_travelled': 49}, self.mock_client_logger + gt_float_condition_list, self.user_context, self.mock_client_logger ) self.assertStrictTrue(evaluator.evaluate(0)) def test_greater_than_int__returns_false__when_user_value_not_greater_than_condition_value(self, ): - + self.user_context._user_attributes = {'meters_travelled': 47.9} evaluator = condition_helper.CustomAttributeConditionEvaluator( - gt_int_condition_list, {'meters_travelled': 47.9}, self.mock_client_logger + gt_int_condition_list, self.user_context, self.mock_client_logger ) self.assertStrictFalse(evaluator.evaluate(0)) - + self.user_context._user_attributes = {'meters_travelled': 47} evaluator = condition_helper.CustomAttributeConditionEvaluator( - gt_int_condition_list, {'meters_travelled': 47}, self.mock_client_logger + gt_int_condition_list, self.user_context, self.mock_client_logger ) self.assertStrictFalse(evaluator.evaluate(0)) def test_greater_than_float__returns_false__when_user_value_not_greater_than_condition_value(self, ): - + self.user_context._user_attributes = {'meters_travelled': 48.2} evaluator = condition_helper.CustomAttributeConditionEvaluator( - gt_float_condition_list, {'meters_travelled': 48.2}, self.mock_client_logger + gt_float_condition_list, self.user_context, self.mock_client_logger ) self.assertStrictFalse(evaluator.evaluate(0)) + self.user_context._user_attributes = {'meters_travelled': 48} evaluator = condition_helper.CustomAttributeConditionEvaluator( - gt_float_condition_list, {'meters_travelled': 48}, self.mock_client_logger + gt_float_condition_list, self.user_context, self.mock_client_logger ) self.assertStrictFalse(evaluator.evaluate(0)) def test_greater_than_int__returns_null__when_user_value_is_not_a_number(self): - + self.user_context._user_attributes = {'meters_travelled': 'a long way'} evaluator = condition_helper.CustomAttributeConditionEvaluator( - gt_int_condition_list, {'meters_travelled': 'a long way'}, self.mock_client_logger, + gt_int_condition_list, self.user_context, self.mock_client_logger, ) self.assertIsNone(evaluator.evaluate(0)) + self.user_context._user_attributes = {'meters_travelled': False} evaluator = condition_helper.CustomAttributeConditionEvaluator( - gt_int_condition_list, {'meters_travelled': False}, self.mock_client_logger + gt_int_condition_list, self.user_context, self.mock_client_logger ) self.assertIsNone(evaluator.evaluate(0)) def test_greater_than_float__returns_null__when_user_value_is_not_a_number(self): - + self.user_context._user_attributes = {'meters_travelled': 'a long way'} evaluator = condition_helper.CustomAttributeConditionEvaluator( - gt_float_condition_list, {'meters_travelled': 'a long way'}, self.mock_client_logger, + gt_float_condition_list, self.user_context, self.mock_client_logger, ) self.assertIsNone(evaluator.evaluate(0)) + self.user_context._user_attributes = {'meters_travelled': False} evaluator = condition_helper.CustomAttributeConditionEvaluator( - gt_float_condition_list, {'meters_travelled': False}, self.mock_client_logger, + gt_float_condition_list, self.user_context, self.mock_client_logger, ) self.assertIsNone(evaluator.evaluate(0)) @@ -650,7 +680,7 @@ def test_greater_than_float__returns_null__when_user_value_is_not_a_number(self) def test_greater_than_int__returns_null__when_no_user_provided_value(self): evaluator = condition_helper.CustomAttributeConditionEvaluator( - gt_int_condition_list, {}, self.mock_client_logger + gt_int_condition_list, self.user_context, self.mock_client_logger ) self.assertIsNone(evaluator.evaluate(0)) @@ -658,105 +688,113 @@ def test_greater_than_int__returns_null__when_no_user_provided_value(self): def test_greater_than_float__returns_null__when_no_user_provided_value(self): evaluator = condition_helper.CustomAttributeConditionEvaluator( - gt_float_condition_list, {}, self.mock_client_logger + gt_float_condition_list, self.user_context, self.mock_client_logger ) self.assertIsNone(evaluator.evaluate(0)) def test_greater_than_or_equal_int__returns_true__when_user_value_greater_than_or_equal_condition_value(self): - + self.user_context._user_attributes = {'meters_travelled': 48.1} evaluator = condition_helper.CustomAttributeConditionEvaluator( - ge_int_condition_list, {'meters_travelled': 48.1}, self.mock_client_logger + ge_int_condition_list, self.user_context, self.mock_client_logger ) self.assertStrictTrue(evaluator.evaluate(0)) + self.user_context._user_attributes = {'meters_travelled': 48} evaluator = condition_helper.CustomAttributeConditionEvaluator( - ge_int_condition_list, {'meters_travelled': 48}, self.mock_client_logger + ge_int_condition_list, self.user_context, self.mock_client_logger ) self.assertStrictTrue(evaluator.evaluate(0)) + self.user_context._user_attributes = {'meters_travelled': 49} evaluator = condition_helper.CustomAttributeConditionEvaluator( - ge_int_condition_list, {'meters_travelled': 49}, self.mock_client_logger + ge_int_condition_list, self.user_context, self.mock_client_logger ) self.assertStrictTrue(evaluator.evaluate(0)) def test_greater_than_or_equal_float__returns_true__when_user_value_greater_than_or_equal_condition_value(self): - + self.user_context._user_attributes = {'meters_travelled': 48.3} evaluator = condition_helper.CustomAttributeConditionEvaluator( - ge_float_condition_list, {'meters_travelled': 48.3}, self.mock_client_logger + ge_float_condition_list, self.user_context, self.mock_client_logger ) self.assertStrictTrue(evaluator.evaluate(0)) + self.user_context._user_attributes = {'meters_travelled': 48.2} evaluator = condition_helper.CustomAttributeConditionEvaluator( - ge_float_condition_list, {'meters_travelled': 48.2}, self.mock_client_logger + ge_float_condition_list, self.user_context, self.mock_client_logger ) self.assertStrictTrue(evaluator.evaluate(0)) + self.user_context._user_attributes = {'meters_travelled': 49} evaluator = condition_helper.CustomAttributeConditionEvaluator( - ge_float_condition_list, {'meters_travelled': 49}, self.mock_client_logger + ge_float_condition_list, self.user_context, self.mock_client_logger ) self.assertStrictTrue(evaluator.evaluate(0)) def test_greater_than_or_equal_int__returns_false__when_user_value_not_greater_than_or_equal_condition_value( self): - + self.user_context._user_attributes = {'meters_travelled': 47.9} evaluator = condition_helper.CustomAttributeConditionEvaluator( - ge_int_condition_list, {'meters_travelled': 47.9}, self.mock_client_logger + ge_int_condition_list, self.user_context, self.mock_client_logger ) self.assertStrictFalse(evaluator.evaluate(0)) + self.user_context._user_attributes = {'meters_travelled': 47} evaluator = condition_helper.CustomAttributeConditionEvaluator( - ge_int_condition_list, {'meters_travelled': 47}, self.mock_client_logger + ge_int_condition_list, self.user_context, self.mock_client_logger ) self.assertStrictFalse(evaluator.evaluate(0)) def test_greater_than_or_equal_float__returns_false__when_user_value_not_greater_than_or_equal_condition_value( self): - + self.user_context._user_attributes = {'meters_travelled': 48.1} evaluator = condition_helper.CustomAttributeConditionEvaluator( - ge_float_condition_list, {'meters_travelled': 48.1}, self.mock_client_logger + ge_float_condition_list, self.user_context, self.mock_client_logger ) self.assertStrictFalse(evaluator.evaluate(0)) + self.user_context._user_attributes = {'meters_travelled': 48} evaluator = condition_helper.CustomAttributeConditionEvaluator( - ge_float_condition_list, {'meters_travelled': 48}, self.mock_client_logger + ge_float_condition_list, self.user_context, self.mock_client_logger ) self.assertStrictFalse(evaluator.evaluate(0)) def test_greater_than_or_equal_int__returns_null__when_user_value_is_not_a_number(self): - + self.user_context._user_attributes = {'meters_travelled': 'a long way'} evaluator = condition_helper.CustomAttributeConditionEvaluator( - ge_int_condition_list, {'meters_travelled': 'a long way'}, self.mock_client_logger, + ge_int_condition_list, self.user_context, self.mock_client_logger, ) self.assertIsNone(evaluator.evaluate(0)) + self.user_context._user_attributes = {'meters_travelled': False} evaluator = condition_helper.CustomAttributeConditionEvaluator( - ge_int_condition_list, {'meters_travelled': False}, self.mock_client_logger + ge_int_condition_list, self.user_context, self.mock_client_logger ) self.assertIsNone(evaluator.evaluate(0)) def test_greater_than_or_equal_float__returns_null__when_user_value_is_not_a_number(self): - + self.user_context._user_attributes = {'meters_travelled': 'a long way'} evaluator = condition_helper.CustomAttributeConditionEvaluator( - ge_float_condition_list, {'meters_travelled': 'a long way'}, self.mock_client_logger, + ge_float_condition_list, self.user_context, self.mock_client_logger, ) self.assertIsNone(evaluator.evaluate(0)) + self.user_context._user_attributes = {'meters_travelled': False} evaluator = condition_helper.CustomAttributeConditionEvaluator( - ge_float_condition_list, {'meters_travelled': False}, self.mock_client_logger, + ge_float_condition_list, self.user_context, self.mock_client_logger, ) self.assertIsNone(evaluator.evaluate(0)) @@ -764,7 +802,7 @@ def test_greater_than_or_equal_float__returns_null__when_user_value_is_not_a_num def test_greater_than_or_equal_int__returns_null__when_no_user_provided_value(self): evaluator = condition_helper.CustomAttributeConditionEvaluator( - ge_int_condition_list, {}, self.mock_client_logger + ge_int_condition_list, self.user_context, self.mock_client_logger ) self.assertIsNone(evaluator.evaluate(0)) @@ -772,79 +810,84 @@ def test_greater_than_or_equal_int__returns_null__when_no_user_provided_value(se def test_greater_than_or_equal_float__returns_null__when_no_user_provided_value(self): evaluator = condition_helper.CustomAttributeConditionEvaluator( - ge_float_condition_list, {}, self.mock_client_logger + ge_float_condition_list, self.user_context, self.mock_client_logger ) self.assertIsNone(evaluator.evaluate(0)) def test_less_than_int__returns_true__when_user_value_less_than_condition_value(self): - + self.user_context._user_attributes = {'meters_travelled': 47.9} evaluator = condition_helper.CustomAttributeConditionEvaluator( - lt_int_condition_list, {'meters_travelled': 47.9}, self.mock_client_logger + lt_int_condition_list, self.user_context, self.mock_client_logger ) self.assertStrictTrue(evaluator.evaluate(0)) + self.user_context._user_attributes = {'meters_travelled': 47} evaluator = condition_helper.CustomAttributeConditionEvaluator( - lt_int_condition_list, {'meters_travelled': 47}, self.mock_client_logger + lt_int_condition_list, self.user_context, self.mock_client_logger ) self.assertStrictTrue(evaluator.evaluate(0)) def test_less_than_float__returns_true__when_user_value_less_than_condition_value(self, ): - + self.user_context._user_attributes = {'meters_travelled': 48.1} evaluator = condition_helper.CustomAttributeConditionEvaluator( - lt_float_condition_list, {'meters_travelled': 48.1}, self.mock_client_logger + lt_float_condition_list, self.user_context, self.mock_client_logger ) self.assertStrictTrue(evaluator.evaluate(0)) + self.user_context._user_attributes = {'meters_travelled': 48} evaluator = condition_helper.CustomAttributeConditionEvaluator( - lt_float_condition_list, {'meters_travelled': 48}, self.mock_client_logger + lt_float_condition_list, self.user_context, self.mock_client_logger ) self.assertStrictTrue(evaluator.evaluate(0)) def test_less_than_int__returns_false__when_user_value_not_less_than_condition_value(self, ): + self.user_context._user_attributes = {'meters_travelled': 48.1} evaluator = condition_helper.CustomAttributeConditionEvaluator( - lt_int_condition_list, {'meters_travelled': 48.1}, self.mock_client_logger + lt_int_condition_list, self.user_context, self.mock_client_logger ) self.assertStrictFalse(evaluator.evaluate(0)) + self.user_context._user_attributes = {'meters_travelled': 49} evaluator = condition_helper.CustomAttributeConditionEvaluator( - lt_int_condition_list, {'meters_travelled': 49}, self.mock_client_logger + lt_int_condition_list, self.user_context, self.mock_client_logger ) self.assertStrictFalse(evaluator.evaluate(0)) def test_less_than_float__returns_false__when_user_value_not_less_than_condition_value(self, ): - + self.user_context._user_attributes = {'meters_travelled': 48.2} evaluator = condition_helper.CustomAttributeConditionEvaluator( - lt_float_condition_list, {'meters_travelled': 48.2}, self.mock_client_logger + lt_float_condition_list, self.user_context, self.mock_client_logger ) self.assertStrictFalse(evaluator.evaluate(0)) + self.user_context._user_attributes = {'meters_travelled': 49} evaluator = condition_helper.CustomAttributeConditionEvaluator( - lt_float_condition_list, {'meters_travelled': 49}, self.mock_client_logger + lt_float_condition_list, self.user_context, self.mock_client_logger ) self.assertStrictFalse(evaluator.evaluate(0)) def test_less_than_int__returns_null__when_user_value_is_not_a_number(self): - + self.user_context._user_attributes = {'meters_travelled': False} evaluator = condition_helper.CustomAttributeConditionEvaluator( - lt_int_condition_list, {'meters_travelled': False}, self.mock_client_logger + lt_int_condition_list, self.user_context, self.mock_client_logger ) self.assertIsNone(evaluator.evaluate(0)) def test_less_than_float__returns_null__when_user_value_is_not_a_number(self): - + self.user_context._user_attributes = {'meters_travelled': False} evaluator = condition_helper.CustomAttributeConditionEvaluator( - lt_float_condition_list, {'meters_travelled': False}, self.mock_client_logger, + lt_float_condition_list, self.user_context, self.mock_client_logger, ) self.assertIsNone(evaluator.evaluate(0)) @@ -852,7 +895,7 @@ def test_less_than_float__returns_null__when_user_value_is_not_a_number(self): def test_less_than_int__returns_null__when_no_user_provided_value(self): evaluator = condition_helper.CustomAttributeConditionEvaluator( - lt_int_condition_list, {}, self.mock_client_logger + lt_int_condition_list, self.user_context, self.mock_client_logger ) self.assertIsNone(evaluator.evaluate(0)) @@ -860,91 +903,97 @@ def test_less_than_int__returns_null__when_no_user_provided_value(self): def test_less_than_float__returns_null__when_no_user_provided_value(self): evaluator = condition_helper.CustomAttributeConditionEvaluator( - lt_float_condition_list, {}, self.mock_client_logger + lt_float_condition_list, self.user_context, self.mock_client_logger ) self.assertIsNone(evaluator.evaluate(0)) def test_less_than_or_equal_int__returns_true__when_user_value_less_than_or_equal_condition_value(self): - + self.user_context._user_attributes = {'meters_travelled': 47.9} evaluator = condition_helper.CustomAttributeConditionEvaluator( - le_int_condition_list, {'meters_travelled': 47.9}, self.mock_client_logger + le_int_condition_list, self.user_context, self.mock_client_logger ) self.assertStrictTrue(evaluator.evaluate(0)) + self.user_context._user_attributes = {'meters_travelled': 47} evaluator = condition_helper.CustomAttributeConditionEvaluator( - le_int_condition_list, {'meters_travelled': 47}, self.mock_client_logger + le_int_condition_list, self.user_context, self.mock_client_logger ) self.assertStrictTrue(evaluator.evaluate(0)) + self.user_context._user_attributes = {'meters_travelled': 48} evaluator = condition_helper.CustomAttributeConditionEvaluator( - le_int_condition_list, {'meters_travelled': 48}, self.mock_client_logger + le_int_condition_list, self.user_context, self.mock_client_logger ) self.assertStrictTrue(evaluator.evaluate(0)) def test_less_than_or_equal_float__returns_true__when_user_value_less_than_or_equal_condition_value(self): - + self.user_context._user_attributes = {'meters_travelled': 41} evaluator = condition_helper.CustomAttributeConditionEvaluator( - le_float_condition_list, {'meters_travelled': 48.1}, self.mock_client_logger + le_float_condition_list, self.user_context, self.mock_client_logger ) self.assertStrictTrue(evaluator.evaluate(0)) + self.user_context._user_attributes = {'meters_travelled': 48.2} evaluator = condition_helper.CustomAttributeConditionEvaluator( - le_float_condition_list, {'meters_travelled': 48.2}, self.mock_client_logger + le_float_condition_list, self.user_context, self.mock_client_logger ) self.assertStrictTrue(evaluator.evaluate(0)) + self.user_context._user_attributes = {'meters_travelled': 48} evaluator = condition_helper.CustomAttributeConditionEvaluator( - le_float_condition_list, {'meters_travelled': 48}, self.mock_client_logger + le_float_condition_list, self.user_context, self.mock_client_logger ) self.assertStrictTrue(evaluator.evaluate(0)) def test_less_than_or_equal_int__returns_false__when_user_value_not_less_than_or_equal_condition_value(self): - + self.user_context._user_attributes = {'meters_travelled': 48.1} evaluator = condition_helper.CustomAttributeConditionEvaluator( - le_int_condition_list, {'meters_travelled': 48.1}, self.mock_client_logger + le_int_condition_list, self.user_context, self.mock_client_logger ) self.assertStrictFalse(evaluator.evaluate(0)) + self.user_context._user_attributes = {'meters_travelled': 49} evaluator = condition_helper.CustomAttributeConditionEvaluator( - le_int_condition_list, {'meters_travelled': 49}, self.mock_client_logger + le_int_condition_list, self.user_context, self.mock_client_logger ) self.assertStrictFalse(evaluator.evaluate(0)) def test_less_than_or_equal_float__returns_false__when_user_value_not_less_than_or_equal_condition_value(self): - + self.user_context._user_attributes = {'meters_travelled': 48.3} evaluator = condition_helper.CustomAttributeConditionEvaluator( - le_float_condition_list, {'meters_travelled': 48.3}, self.mock_client_logger + le_float_condition_list, self.user_context, self.mock_client_logger ) self.assertStrictFalse(evaluator.evaluate(0)) + self.user_context._user_attributes = {'meters_travelled': 49} evaluator = condition_helper.CustomAttributeConditionEvaluator( - le_float_condition_list, {'meters_travelled': 49}, self.mock_client_logger + le_float_condition_list, self.user_context, self.mock_client_logger ) self.assertStrictFalse(evaluator.evaluate(0)) def test_less_than_or_equal_int__returns_null__when_user_value_is_not_a_number(self): - + self.user_context._user_attributes = {'meters_travelled': False} evaluator = condition_helper.CustomAttributeConditionEvaluator( - le_int_condition_list, {'meters_travelled': False}, self.mock_client_logger + le_int_condition_list, self.user_context, self.mock_client_logger ) self.assertIsNone(evaluator.evaluate(0)) def test_less_than_or_equal_float__returns_null__when_user_value_is_not_a_number(self): - + self.user_context._user_attributes = {'meters_travelled': False} evaluator = condition_helper.CustomAttributeConditionEvaluator( - le_float_condition_list, {'meters_travelled': False}, self.mock_client_logger, + le_float_condition_list, self.user_context, self.mock_client_logger, ) self.assertIsNone(evaluator.evaluate(0)) @@ -952,7 +1001,7 @@ def test_less_than_or_equal_float__returns_null__when_user_value_is_not_a_number def test_less_than_or_equal_int__returns_null__when_no_user_provided_value(self): evaluator = condition_helper.CustomAttributeConditionEvaluator( - le_int_condition_list, {}, self.mock_client_logger + le_int_condition_list, self.user_context, self.mock_client_logger ) self.assertIsNone(evaluator.evaluate(0)) @@ -960,7 +1009,7 @@ def test_less_than_or_equal_int__returns_null__when_no_user_provided_value(self) def test_less_than_or_equal_float__returns_null__when_no_user_provided_value(self): evaluator = condition_helper.CustomAttributeConditionEvaluator( - le_float_condition_list, {}, self.mock_client_logger + le_float_condition_list, self.user_context, self.mock_client_logger ) self.assertIsNone(evaluator.evaluate(0)) @@ -968,9 +1017,9 @@ def test_less_than_or_equal_float__returns_null__when_no_user_provided_value(sel def test_greater_than__calls_is_finite_number(self): """ Test that CustomAttributeConditionEvaluator.evaluate returns True if is_finite_number returns True. Returns None if is_finite_number returns False. """ - + self.user_context._user_attributes = {'meters_travelled': 48.1} evaluator = condition_helper.CustomAttributeConditionEvaluator( - gt_int_condition_list, {'meters_travelled': 48.1}, self.mock_client_logger + gt_int_condition_list, self.user_context, self.mock_client_logger ) def is_finite_number__rejecting_condition_value(value): @@ -1012,9 +1061,9 @@ def is_finite_number__accepting_both_values(value): def test_less_than__calls_is_finite_number(self): """ Test that CustomAttributeConditionEvaluator.evaluate returns True if is_finite_number returns True. Returns None if is_finite_number returns False. """ - + self.user_context._user_attributes = {'meters_travelled': 47} evaluator = condition_helper.CustomAttributeConditionEvaluator( - lt_int_condition_list, {'meters_travelled': 47}, self.mock_client_logger + lt_int_condition_list, self.user_context, self.mock_client_logger ) def is_finite_number__rejecting_condition_value(value): @@ -1056,9 +1105,9 @@ def is_finite_number__accepting_both_values(value): def test_greater_than_or_equal__calls_is_finite_number(self): """ Test that CustomAttributeConditionEvaluator.evaluate returns True if is_finite_number returns True. Returns None if is_finite_number returns False. """ - + self.user_context._user_attributes = {'meters_travelled': 48.1} evaluator = condition_helper.CustomAttributeConditionEvaluator( - ge_int_condition_list, {'meters_travelled': 48.1}, self.mock_client_logger + ge_int_condition_list, self.user_context, self.mock_client_logger ) def is_finite_number__rejecting_condition_value(value): @@ -1100,9 +1149,9 @@ def is_finite_number__accepting_both_values(value): def test_less_than_or_equal__calls_is_finite_number(self): """ Test that CustomAttributeConditionEvaluator.evaluate returns True if is_finite_number returns True. Returns None if is_finite_number returns False. """ - + self.user_context._user_attributes = {'meters_travelled': 47} evaluator = condition_helper.CustomAttributeConditionEvaluator( - le_int_condition_list, {'meters_travelled': 47}, self.mock_client_logger + le_int_condition_list, self.user_context, self.mock_client_logger ) def is_finite_number__rejecting_condition_value(value): @@ -1148,13 +1197,55 @@ def test_invalid_semver__returns_None__when_semver_is_invalid(self): "+build-prerelease", "2..0"] for user_version in invalid_test_cases: + self.user_context._user_attributes = {'Android': user_version} evaluator = condition_helper.CustomAttributeConditionEvaluator( - semver_less_than_or_equal_2_0_1_condition_list, {'Android': user_version}, self.mock_client_logger) + semver_less_than_or_equal_2_0_1_condition_list, self.user_context, self.mock_client_logger) result = evaluator.evaluate(0) custom_err_msg = f"Got {result} in result. Failed for user version: {user_version}" self.assertIsNone(result, custom_err_msg) + def test_qualified__returns_true__when_user_is_qualified(self, ): + self.user_context.set_qualified_segments(['odp-segment-2']) + evaluator = condition_helper.CustomAttributeConditionEvaluator( + qualified_condition_list, self.user_context, self.mock_client_logger, + ) + + self.assertStrictTrue(evaluator.evaluate(0)) + + def test_qualified__returns_false__when_user_is_not_qualified(self, ): + self.user_context.set_qualified_segments(['odp-segment-1']) + evaluator = condition_helper.CustomAttributeConditionEvaluator( + qualified_condition_list, self.user_context, self.mock_client_logger, + ) + + self.assertStrictFalse(evaluator.evaluate(0)) + + def test_qualified__returns_false__with_no_qualified_segments(self): + + evaluator = condition_helper.CustomAttributeConditionEvaluator( + qualified_condition_list, self.user_context, self.mock_client_logger + ) + + self.assertStrictFalse(evaluator.evaluate(0)) + + def test_qualified__returns_null__when_condition_value_is_not_string(self): + qualified_condition_list = [['odp.audiences', 5, 'third_party_dimension', 'qualified']] + evaluator = condition_helper.CustomAttributeConditionEvaluator( + qualified_condition_list, self.user_context, self.mock_client_logger + ) + + self.assertIsNone(evaluator.evaluate(0)) + + def test_qualified__returns_true__when_name_is_different(self): + self.user_context.set_qualified_segments(['odp-segment-2']) + qualified_condition_list = [['other-name', 'odp-segment-2', 'third_party_dimension', 'qualified']] + evaluator = condition_helper.CustomAttributeConditionEvaluator( + qualified_condition_list, self.user_context, self.mock_client_logger + ) + + self.assertStrictTrue(evaluator.evaluate(0)) + class ConditionDecoderTests(base.BaseTest): def test_loads(self): @@ -1183,14 +1274,14 @@ class CustomAttributeConditionEvaluatorLogging(base.BaseTest): def setUp(self): base.BaseTest.setUp(self) self.mock_client_logger = mock.MagicMock() + self.user_context = self.optimizely.create_user_context('any-user') def test_evaluate__match_type__invalid(self): log_level = 'warning' condition_list = [['favorite_constellation', 'Lacerta', 'custom_attribute', 'regex']] - user_attributes = {} evaluator = condition_helper.CustomAttributeConditionEvaluator( - condition_list, user_attributes, self.mock_client_logger + condition_list, self.user_context, self.mock_client_logger ) expected_condition_log = { @@ -1211,10 +1302,9 @@ def test_evaluate__match_type__invalid(self): def test_evaluate__condition_type__invalid(self): log_level = 'warning' condition_list = [['favorite_constellation', 'Lacerta', 'sdk_version', 'exact']] - user_attributes = {} evaluator = condition_helper.CustomAttributeConditionEvaluator( - condition_list, user_attributes, self.mock_client_logger + condition_list, self.user_context, self.mock_client_logger ) expected_condition_log = { @@ -1235,10 +1325,9 @@ def test_evaluate__condition_type__invalid(self): def test_exact__user_value__missing(self): log_level = 'debug' exact_condition_list = [['favorite_constellation', 'Lacerta', 'custom_attribute', 'exact']] - user_attributes = {} evaluator = condition_helper.CustomAttributeConditionEvaluator( - exact_condition_list, user_attributes, self.mock_client_logger + exact_condition_list, self.user_context, self.mock_client_logger ) expected_condition_log = { @@ -1259,10 +1348,9 @@ def test_exact__user_value__missing(self): def test_greater_than__user_value__missing(self): log_level = 'debug' gt_condition_list = [['meters_travelled', 48, 'custom_attribute', 'gt']] - user_attributes = {} evaluator = condition_helper.CustomAttributeConditionEvaluator( - gt_condition_list, user_attributes, self.mock_client_logger + gt_condition_list, self.user_context, self.mock_client_logger ) expected_condition_log = { @@ -1283,10 +1371,9 @@ def test_greater_than__user_value__missing(self): def test_less_than__user_value__missing(self): log_level = 'debug' lt_condition_list = [['meters_travelled', 48, 'custom_attribute', 'lt']] - user_attributes = {} evaluator = condition_helper.CustomAttributeConditionEvaluator( - lt_condition_list, user_attributes, self.mock_client_logger + lt_condition_list, self.user_context, self.mock_client_logger ) expected_condition_log = { @@ -1307,10 +1394,9 @@ def test_less_than__user_value__missing(self): def test_substring__user_value__missing(self): log_level = 'debug' substring_condition_list = [['headline_text', 'buy now', 'custom_attribute', 'substring']] - user_attributes = {} evaluator = condition_helper.CustomAttributeConditionEvaluator( - substring_condition_list, user_attributes, self.mock_client_logger + substring_condition_list, self.user_context, self.mock_client_logger ) expected_condition_log = { @@ -1330,10 +1416,9 @@ def test_substring__user_value__missing(self): def test_exists__user_value__missing(self): exists_condition_list = [['input_value', None, 'custom_attribute', 'exists']] - user_attributes = {} evaluator = condition_helper.CustomAttributeConditionEvaluator( - exists_condition_list, user_attributes, self.mock_client_logger + exists_condition_list, self.user_context, self.mock_client_logger ) self.assertStrictFalse(evaluator.evaluate(0)) @@ -1345,10 +1430,10 @@ def test_exists__user_value__missing(self): def test_exact__user_value__None(self): log_level = 'debug' exact_condition_list = [['favorite_constellation', 'Lacerta', 'custom_attribute', 'exact']] - user_attributes = {'favorite_constellation': None} + self.user_context._user_attributes = {'favorite_constellation': None} evaluator = condition_helper.CustomAttributeConditionEvaluator( - exact_condition_list, user_attributes, self.mock_client_logger + exact_condition_list, self.user_context, self.mock_client_logger ) expected_condition_log = { @@ -1369,10 +1454,10 @@ def test_exact__user_value__None(self): def test_greater_than__user_value__None(self): log_level = 'debug' gt_condition_list = [['meters_travelled', 48, 'custom_attribute', 'gt']] - user_attributes = {'meters_travelled': None} + self.user_context._user_attributes = {'meters_travelled': None} evaluator = condition_helper.CustomAttributeConditionEvaluator( - gt_condition_list, user_attributes, self.mock_client_logger + gt_condition_list, self.user_context, self.mock_client_logger ) expected_condition_log = { @@ -1393,10 +1478,10 @@ def test_greater_than__user_value__None(self): def test_less_than__user_value__None(self): log_level = 'debug' lt_condition_list = [['meters_travelled', 48, 'custom_attribute', 'lt']] - user_attributes = {'meters_travelled': None} + self.user_context._user_attributes = {'meters_travelled': None} evaluator = condition_helper.CustomAttributeConditionEvaluator( - lt_condition_list, user_attributes, self.mock_client_logger + lt_condition_list, self.user_context, self.mock_client_logger ) expected_condition_log = { @@ -1417,10 +1502,10 @@ def test_less_than__user_value__None(self): def test_substring__user_value__None(self): log_level = 'debug' substring_condition_list = [['headline_text', '12', 'custom_attribute', 'substring']] - user_attributes = {'headline_text': None} + self.user_context._user_attributes = {'headline_text': None} evaluator = condition_helper.CustomAttributeConditionEvaluator( - substring_condition_list, user_attributes, self.mock_client_logger + substring_condition_list, self.user_context, self.mock_client_logger ) expected_condition_log = { @@ -1440,10 +1525,10 @@ def test_substring__user_value__None(self): def test_exists__user_value__None(self): exists_condition_list = [['input_value', None, 'custom_attribute', 'exists']] - user_attributes = {'input_value': None} + self.user_context._user_attributes = {'input_value': None} evaluator = condition_helper.CustomAttributeConditionEvaluator( - exists_condition_list, user_attributes, self.mock_client_logger + exists_condition_list, self.user_context, self.mock_client_logger ) self.assertStrictFalse(evaluator.evaluate(0)) @@ -1455,10 +1540,10 @@ def test_exists__user_value__None(self): def test_exact__user_value__unexpected_type(self): log_level = 'warning' exact_condition_list = [['favorite_constellation', 'Lacerta', 'custom_attribute', 'exact']] - user_attributes = {'favorite_constellation': {}} + self.user_context._user_attributes = {'favorite_constellation': {}} evaluator = condition_helper.CustomAttributeConditionEvaluator( - exact_condition_list, user_attributes, self.mock_client_logger + exact_condition_list, self.user_context, self.mock_client_logger ) expected_condition_log = { @@ -1479,10 +1564,10 @@ def test_exact__user_value__unexpected_type(self): def test_greater_than__user_value__unexpected_type(self): log_level = 'warning' gt_condition_list = [['meters_travelled', 48, 'custom_attribute', 'gt']] - user_attributes = {'meters_travelled': '48'} + self.user_context._user_attributes = {'meters_travelled': '48'} evaluator = condition_helper.CustomAttributeConditionEvaluator( - gt_condition_list, user_attributes, self.mock_client_logger + gt_condition_list, self.user_context, self.mock_client_logger ) expected_condition_log = { @@ -1503,10 +1588,10 @@ def test_greater_than__user_value__unexpected_type(self): def test_less_than__user_value__unexpected_type(self): log_level = 'warning' lt_condition_list = [['meters_travelled', 48, 'custom_attribute', 'lt']] - user_attributes = {'meters_travelled': True} + self.user_context._user_attributes = {'meters_travelled': True} evaluator = condition_helper.CustomAttributeConditionEvaluator( - lt_condition_list, user_attributes, self.mock_client_logger + lt_condition_list, self.user_context, self.mock_client_logger ) expected_condition_log = { @@ -1527,10 +1612,10 @@ def test_less_than__user_value__unexpected_type(self): def test_substring__user_value__unexpected_type(self): log_level = 'warning' substring_condition_list = [['headline_text', '12', 'custom_attribute', 'substring']] - user_attributes = {'headline_text': 1234} + self.user_context._user_attributes = {'headline_text': 1234} evaluator = condition_helper.CustomAttributeConditionEvaluator( - substring_condition_list, user_attributes, self.mock_client_logger + substring_condition_list, self.user_context, self.mock_client_logger ) expected_condition_log = { @@ -1551,10 +1636,10 @@ def test_substring__user_value__unexpected_type(self): def test_exact__user_value__infinite(self): log_level = 'warning' exact_condition_list = [['meters_travelled', 48, 'custom_attribute', 'exact']] - user_attributes = {'meters_travelled': float("inf")} + self.user_context._user_attributes = {'meters_travelled': float("inf")} evaluator = condition_helper.CustomAttributeConditionEvaluator( - exact_condition_list, user_attributes, self.mock_client_logger + exact_condition_list, self.user_context, self.mock_client_logger ) self.assertIsNone(evaluator.evaluate(0)) @@ -1575,10 +1660,10 @@ def test_exact__user_value__infinite(self): def test_greater_than__user_value__infinite(self): log_level = 'warning' gt_condition_list = [['meters_travelled', 48, 'custom_attribute', 'gt']] - user_attributes = {'meters_travelled': float("nan")} + self.user_context._user_attributes = {'meters_travelled': float("nan")} evaluator = condition_helper.CustomAttributeConditionEvaluator( - gt_condition_list, user_attributes, self.mock_client_logger + gt_condition_list, self.user_context, self.mock_client_logger ) expected_condition_log = { @@ -1600,10 +1685,10 @@ def test_greater_than__user_value__infinite(self): def test_less_than__user_value__infinite(self): log_level = 'warning' lt_condition_list = [['meters_travelled', 48, 'custom_attribute', 'lt']] - user_attributes = {'meters_travelled': float('-inf')} + self.user_context._user_attributes = {'meters_travelled': float('-inf')} evaluator = condition_helper.CustomAttributeConditionEvaluator( - lt_condition_list, user_attributes, self.mock_client_logger + lt_condition_list, self.user_context, self.mock_client_logger ) expected_condition_log = { @@ -1625,10 +1710,10 @@ def test_less_than__user_value__infinite(self): def test_exact__user_value_type_mismatch(self): log_level = 'warning' exact_condition_list = [['favorite_constellation', 'Lacerta', 'custom_attribute', 'exact']] - user_attributes = {'favorite_constellation': 5} + self.user_context._user_attributes = {'favorite_constellation': 5} evaluator = condition_helper.CustomAttributeConditionEvaluator( - exact_condition_list, user_attributes, self.mock_client_logger + exact_condition_list, self.user_context, self.mock_client_logger ) expected_condition_log = { @@ -1649,10 +1734,10 @@ def test_exact__user_value_type_mismatch(self): def test_exact__condition_value_invalid(self): log_level = 'warning' exact_condition_list = [['favorite_constellation', {}, 'custom_attribute', 'exact']] - user_attributes = {'favorite_constellation': 'Lacerta'} + self.user_context._user_attributes = {'favorite_constellation': 'Lacerta'} evaluator = condition_helper.CustomAttributeConditionEvaluator( - exact_condition_list, user_attributes, self.mock_client_logger + exact_condition_list, self.user_context, self.mock_client_logger ) expected_condition_log = { @@ -1673,10 +1758,10 @@ def test_exact__condition_value_invalid(self): def test_exact__condition_value_infinite(self): log_level = 'warning' exact_condition_list = [['favorite_constellation', float('inf'), 'custom_attribute', 'exact']] - user_attributes = {'favorite_constellation': 'Lacerta'} + self.user_context._user_attributes = {'favorite_constellation': 'Lacerta'} evaluator = condition_helper.CustomAttributeConditionEvaluator( - exact_condition_list, user_attributes, self.mock_client_logger + exact_condition_list, self.user_context, self.mock_client_logger ) expected_condition_log = { @@ -1697,10 +1782,10 @@ def test_exact__condition_value_infinite(self): def test_greater_than__condition_value_invalid(self): log_level = 'warning' gt_condition_list = [['meters_travelled', True, 'custom_attribute', 'gt']] - user_attributes = {'meters_travelled': 48} + self.user_context._user_attributes = {'meters_travelled': 48} evaluator = condition_helper.CustomAttributeConditionEvaluator( - gt_condition_list, user_attributes, self.mock_client_logger + gt_condition_list, self.user_context, self.mock_client_logger ) expected_condition_log = { @@ -1721,10 +1806,10 @@ def test_greater_than__condition_value_invalid(self): def test_less_than__condition_value_invalid(self): log_level = 'warning' gt_condition_list = [['meters_travelled', float('nan'), 'custom_attribute', 'lt']] - user_attributes = {'meters_travelled': 48} + self.user_context._user_attributes = {'meters_travelled': 48} evaluator = condition_helper.CustomAttributeConditionEvaluator( - gt_condition_list, user_attributes, self.mock_client_logger + gt_condition_list, self.user_context, self.mock_client_logger ) expected_condition_log = { @@ -1745,10 +1830,10 @@ def test_less_than__condition_value_invalid(self): def test_substring__condition_value_invalid(self): log_level = 'warning' substring_condition_list = [['headline_text', False, 'custom_attribute', 'substring']] - user_attributes = {'headline_text': 'breaking news'} + self.user_context._user_attributes = {'headline_text': 'breaking news'} evaluator = condition_helper.CustomAttributeConditionEvaluator( - substring_condition_list, user_attributes, self.mock_client_logger + substring_condition_list, self.user_context, self.mock_client_logger ) expected_condition_log = { @@ -1765,3 +1850,27 @@ def test_substring__condition_value_invalid(self): f'Audience condition "{json.dumps(expected_condition_log)}" has an unsupported condition value. ' 'You may need to upgrade to a newer release of the Optimizely SDK.' ) + + def test_qualified__condition_value_invalid(self): + log_level = 'warning' + qualified_condition_list = [['odp.audiences', False, 'third_party_dimension', 'qualified']] + self.user_context.qualified_segments = ['segment1'] + + evaluator = condition_helper.CustomAttributeConditionEvaluator( + qualified_condition_list, self.user_context, self.mock_client_logger + ) + + expected_condition_log = { + "name": 'odp.audiences', + "value": False, + "type": 'third_party_dimension', + "match": 'qualified', + } + + self.assertIsNone(evaluator.evaluate(0)) + + mock_log = getattr(self.mock_client_logger, log_level) + mock_log.assert_called_once_with( + f'Audience condition "{json.dumps(expected_condition_log)}" has an unsupported condition value. ' + 'You may need to upgrade to a newer release of the Optimizely SDK.' + ) diff --git a/tests/helpers_tests/test_validator.py b/tests/helpers_tests/test_validator.py index ecee3b74..6d9e3f20 100644 --- a/tests/helpers_tests/test_validator.py +++ b/tests/helpers_tests/test_validator.py @@ -59,6 +59,11 @@ def test_is_datafile_valid__returns_true(self): self.assertTrue(validator.is_datafile_valid(json.dumps(self.config_dict))) + def test_is_datafile_valid__returns_true_with_audience_segments(self): + """ Test that valid datafile with audience segments returns True. """ + + self.assertTrue(validator.is_datafile_valid(json.dumps(self.config_dict_with_audience_segments))) + def test_is_datafile_valid__returns_false(self): """ Test that invalid datafile returns False. """ diff --git a/tests/test_config.py b/tests/test_config.py index 47cce405..3b95b02e 100644 --- a/tests/test_config.py +++ b/tests/test_config.py @@ -13,6 +13,7 @@ import json from unittest import mock +import copy from optimizely import entities from optimizely import error_handler @@ -20,6 +21,7 @@ from optimizely import logger from optimizely import optimizely from optimizely.helpers import enums +from optimizely.project_config import ProjectConfig from . import base @@ -1024,6 +1026,65 @@ def test_to_datafile_from_bytes(self): self.assertEqual(expected_datafile, actual_datafile) + def test_datafile_with_integrations(self): + """ Test to confirm that integration conversion works and has expected output """ + opt_obj = optimizely.Optimizely( + json.dumps(self.config_dict_with_audience_segments) + ) + project_config = opt_obj.config_manager.get_config() + self.assertIsInstance(project_config, ProjectConfig) + + for integration in project_config.integration_key_map.values(): + self.assertIsInstance(integration, entities.Integration) + + integrations = self.config_dict_with_audience_segments['integrations'] + self.assertGreater(len(integrations), 0) + self.assertEqual(len(project_config.integrations), len(integrations)) + + integration = integrations[0] + self.assertEqual(project_config.host_for_odp, integration['host']) + self.assertEqual(project_config.public_key_for_odp, integration['publicKey']) + + self.assertEqual(sorted(project_config.all_segments), ['odp-segment-1', 'odp-segment-2', 'odp-segment-3']) + + def test_datafile_with_no_integrations(self): + """ Test to confirm that datafile with empty integrations still works """ + config_dict_with_audience_segments = copy.deepcopy(self.config_dict_with_audience_segments) + config_dict_with_audience_segments['integrations'] = [] + opt_obj = optimizely.Optimizely( + json.dumps(config_dict_with_audience_segments) + ) + + project_config = opt_obj.config_manager.get_config() + + self.assertIsInstance(project_config, ProjectConfig) + self.assertEqual(len(project_config.integrations), 0) + + def test_datafile_with_integrations_missing_key(self): + """ Test to confirm that datafile without key fails""" + config_dict_with_audience_segments = copy.deepcopy(self.config_dict_with_audience_segments) + del config_dict_with_audience_segments['integrations'][0]['key'] + opt_obj = optimizely.Optimizely( + json.dumps(config_dict_with_audience_segments) + ) + + project_config = opt_obj.config_manager.get_config() + + self.assertIsNone(project_config) + + def test_datafile_with_integrations_only_key(self): + """ Test to confirm that datafile with integrations and only key field still work """ + config_dict_with_audience_segments = copy.deepcopy(self.config_dict_with_audience_segments) + config_dict_with_audience_segments['integrations'].clear() + config_dict_with_audience_segments['integrations'].append({'key': '123'}) + opt_obj = optimizely.Optimizely( + json.dumps(config_dict_with_audience_segments) + ) + + project_config = opt_obj.config_manager.get_config() + + self.assertIsInstance(project_config, ProjectConfig) + class ConfigLoggingTest(base.BaseTest): def setUp(self): diff --git a/tests/test_decision_service.py b/tests/test_decision_service.py index dd1f7fee..4d755de5 100644 --- a/tests/test_decision_service.py +++ b/tests/test_decision_service.py @@ -647,7 +647,7 @@ def test_get_variation__user_bucketed_for_new_experiment__user_profile_service_a experiment.get_audience_conditions_or_ids(), enums.ExperimentAudienceEvaluationLogs, "test_experiment", - user.get_user_attributes(), + user, mock_decision_service_logging ) mock_bucket.assert_called_once_with( @@ -710,7 +710,7 @@ def test_get_variation__user_bucketed_for_new_experiment__user_profile_service_n experiment.get_audience_conditions_or_ids(), enums.ExperimentAudienceEvaluationLogs, "test_experiment", - user.get_user_attributes(), + user, mock_decision_service_logging ) mock_bucket.assert_called_once_with( @@ -764,7 +764,7 @@ def test_get_variation__user_does_not_meet_audience_conditions(self): experiment.get_audience_conditions_or_ids(), enums.ExperimentAudienceEvaluationLogs, "test_experiment", - user.get_user_attributes(), + user, mock_decision_service_logging ) self.assertEqual(0, mock_bucket.call_count) @@ -816,7 +816,7 @@ def test_get_variation__user_profile_in_invalid_format(self): experiment.get_audience_conditions_or_ids(), enums.ExperimentAudienceEvaluationLogs, "test_experiment", - user.get_user_attributes(), + user, mock_decision_service_logging ) mock_decision_service_logging.warning.assert_called_once_with( @@ -878,7 +878,7 @@ def test_get_variation__user_profile_lookup_fails(self): experiment.get_audience_conditions_or_ids(), enums.ExperimentAudienceEvaluationLogs, "test_experiment", - user.get_user_attributes(), + user, mock_decision_service_logging ) mock_decision_service_logging.exception.assert_called_once_with( @@ -939,7 +939,7 @@ def test_get_variation__user_profile_save_fails(self): experiment.get_audience_conditions_or_ids(), enums.ExperimentAudienceEvaluationLogs, "test_experiment", - user.get_user_attributes(), + user, mock_decision_service_logging ) @@ -999,7 +999,7 @@ def test_get_variation__ignore_user_profile_when_specified(self): experiment.get_audience_conditions_or_ids(), enums.ExperimentAudienceEvaluationLogs, "test_experiment", - user.get_user_attributes(), + user, mock_decision_service_logging ) mock_bucket.assert_called_once_with( @@ -1163,7 +1163,7 @@ def test_get_variation_for_rollout__skips_to_everyone_else_rule(self): self.project_config.get_experiment_from_key("211127").get_audience_conditions_or_ids(), enums.RolloutRuleAudienceEvaluationLogs, '1', - user.get_user_attributes(), + user, mock_decision_service_logging, ), mock.call( @@ -1171,7 +1171,7 @@ def test_get_variation_for_rollout__skips_to_everyone_else_rule(self): self.project_config.get_experiment_from_key("211147").get_audience_conditions_or_ids(), enums.RolloutRuleAudienceEvaluationLogs, 'Everyone Else', - user.get_user_attributes(), + user, mock_decision_service_logging, ), ], @@ -1216,7 +1216,7 @@ def test_get_variation_for_rollout__returns_none_for_user_not_in_rollout(self): self.project_config.get_experiment_from_key("211127").get_audience_conditions_or_ids(), enums.RolloutRuleAudienceEvaluationLogs, "1", - user.get_user_attributes(), + user, mock_decision_service_logging, ), mock.call( @@ -1224,7 +1224,7 @@ def test_get_variation_for_rollout__returns_none_for_user_not_in_rollout(self): self.project_config.get_experiment_from_key("211137").get_audience_conditions_or_ids(), enums.RolloutRuleAudienceEvaluationLogs, "2", - user.get_user_attributes(), + user, mock_decision_service_logging, ), mock.call( @@ -1232,7 +1232,7 @@ def test_get_variation_for_rollout__returns_none_for_user_not_in_rollout(self): self.project_config.get_experiment_from_key("211147").get_audience_conditions_or_ids(), enums.RolloutRuleAudienceEvaluationLogs, "Everyone Else", - user.get_user_attributes(), + user, mock_decision_service_logging, ), ], @@ -1370,7 +1370,7 @@ def test_get_variation_for_feature__returns_variation_if_user_not_in_experiment_ self.project_config.get_experiment_from_key("group_exp_2").get_audience_conditions_or_ids(), enums.ExperimentAudienceEvaluationLogs, "group_exp_2", - {}, + user, mock_decision_service_logging, ) @@ -1379,7 +1379,7 @@ def test_get_variation_for_feature__returns_variation_if_user_not_in_experiment_ self.project_config.get_experiment_from_key("211127").get_audience_conditions_or_ids(), enums.RolloutRuleAudienceEvaluationLogs, "1", - user.get_user_attributes(), + user, mock_decision_service_logging, ) diff --git a/tests/test_optimizely.py b/tests/test_optimizely.py index fae2992c..d356b3d7 100644 --- a/tests/test_optimizely.py +++ b/tests/test_optimizely.py @@ -1130,7 +1130,7 @@ def test_activate__with_attributes__no_audience_match(self): expected_experiment.get_audience_conditions_or_ids(), enums.ExperimentAudienceEvaluationLogs, 'test_experiment', - {'test_attribute': 'test_value'}, + mock.ANY, self.optimizely.logger, ) diff --git a/tests/test_user_context.py b/tests/test_user_context.py index 25d58bc2..f61c5420 100644 --- a/tests/test_user_context.py +++ b/tests/test_user_context.py @@ -1784,12 +1784,14 @@ def test_forced_decision_return_status(self): status = user_context.remove_all_forced_decisions() self.assertTrue(status) - def test_forced_decision_clone_return_valid_forced_decision(self): + def test_user_context__clone_return_valid(self): """ - Should return valid forced decision on cloning. + Should return valid objects. """ opt_obj = optimizely.Optimizely(json.dumps(self.config_dict_with_features)) user_context = opt_obj.create_user_context("test_user", {}) + qualified_segments = ['seg1', 'seg2'] + user_context.set_qualified_segments(qualified_segments) context_with_flag = OptimizelyUserContext.OptimizelyDecisionContext('f1', None) decision_for_flag = OptimizelyUserContext.OptimizelyForcedDecision('v1') @@ -1806,6 +1808,11 @@ def test_forced_decision_clone_return_valid_forced_decision(self): self.assertEqual(user_context_2.user_id, 'test_user') self.assertEqual(user_context_2.get_user_attributes(), {}) self.assertIsNotNone(user_context_2.forced_decisions_map) + self.assertIsNot(user_context.forced_decisions_map, user_context_2.forced_decisions_map) + + self.assertTrue(user_context_2.get_qualified_segments()) + self.assertEqual(user_context_2.get_qualified_segments(), qualified_segments) + self.assertIsNot(user_context.get_qualified_segments(), user_context_2.get_qualified_segments()) self.assertEqual(user_context_2.get_forced_decision(context_with_flag).variation_key, 'v1') self.assertEqual(user_context_2.get_forced_decision(context_with_rule).variation_key, 'v2') @@ -1915,3 +1922,56 @@ def increment(self, *args): self.assertEqual(200, remove_forced_decision_counter.call_count) self.assertEqual(100, remove_all_forced_decisions_counter.call_count) self.assertEqual(100, clone_counter.call_count) + + def test_decide_with_qualified_segments__segment_hit_in_ab_test(self): + client = optimizely.Optimizely(json.dumps(self.config_dict_with_audience_segments)) + user = client.create_user_context('user-id') + user.set_qualified_segments(["odp-segment-1", "odp-segment-none"]) + + decision = user.decide('flag-segment', ['IGNORE_USER_PROFILE_SERVICE']) + + self.assertEqual(decision.variation_key, "variation-a") + + def test_decide_with_qualified_segments__other_audience_hit_in_ab_test(self): + client = optimizely.Optimizely(json.dumps(self.config_dict_with_audience_segments)) + user = client.create_user_context('user-id', {"age": 30}) + user.set_qualified_segments(["odp-segment-none"]) + + decision = user.decide('flag-segment', ['IGNORE_USER_PROFILE_SERVICE']) + + self.assertEqual(decision.variation_key, "variation-a") + + def test_decide_with_qualified_segments__segment_hit_in_rollout(self): + client = optimizely.Optimizely(json.dumps(self.config_dict_with_audience_segments)) + user = client.create_user_context('user-id') + user.set_qualified_segments(["odp-segment-2"]) + + decision = user.decide('flag-segment', ['IGNORE_USER_PROFILE_SERVICE']) + + self.assertEqual(decision.variation_key, "rollout-variation-on") + + def test_decide_with_qualified_segments__segment_miss_in_rollout(self): + client = optimizely.Optimizely(json.dumps(self.config_dict_with_audience_segments)) + user = client.create_user_context('user-id') + user.qualified_segments = ["odp-segment-none"] + + decision = user.decide('flag-segment', ['IGNORE_USER_PROFILE_SERVICE']) + + self.assertEqual(decision.variation_key, "rollout-variation-off") + + def test_decide_with_qualified_segments__empty_segments(self): + client = optimizely.Optimizely(json.dumps(self.config_dict_with_audience_segments)) + user = client.create_user_context('user-id') + user.set_qualified_segments([]) + + decision = user.decide('flag-segment', ['IGNORE_USER_PROFILE_SERVICE']) + + self.assertEqual(decision.variation_key, "rollout-variation-off") + + def test_decide_with_qualified_segments__default(self): + client = optimizely.Optimizely(json.dumps(self.config_dict_with_audience_segments)) + user = client.create_user_context('user-id') + + decision = user.decide('flag-segment', ['IGNORE_USER_PROFILE_SERVICE']) + + self.assertEqual(decision.variation_key, "rollout-variation-off") From 9912671ae20becce75fa3309a425cb36ec5ba554 Mon Sep 17 00:00:00 2001 From: Zeeshan Ashraf <35262377+zashraf1985@users.noreply.github.com> Date: Fri, 22 Jul 2022 16:07:42 -0700 Subject: [PATCH 14/68] chore: Check Jira ticket number in PR description (#394) ## Summary Added a check to verify PR description contains a Jira ticket number. ## Ticket: [OASIS-8321](https://optimizely.atlassian.net/browse/OASIS-8321) --- .github/workflows/ticket_reference_check.yml | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) create mode 100644 .github/workflows/ticket_reference_check.yml diff --git a/.github/workflows/ticket_reference_check.yml b/.github/workflows/ticket_reference_check.yml new file mode 100644 index 00000000..d2829e0c --- /dev/null +++ b/.github/workflows/ticket_reference_check.yml @@ -0,0 +1,16 @@ +name: Jira ticket reference check + +on: + pull_request: + types: [opened, edited, reopened, synchronize] + +jobs: + + jira_ticket_reference_check: + runs-on: ubuntu-latest + + steps: + - name: Check for Jira ticket reference + uses: optimizely/github-action-ticket-reference-checker-public@master + with: + bodyRegex: 'OASIS-(?\d+)' From aee87a569370f85039a0b51bde5ec52b18a69960 Mon Sep 17 00:00:00 2001 From: Andy Leap <104936100+andrewleap-optimizely@users.noreply.github.com> Date: Fri, 29 Jul 2022 11:47:17 -0400 Subject: [PATCH 15/68] feat: add lru cache (#395) * add lru cache --- optimizely/odp/__init__.py | 12 ++++ optimizely/odp/lru_cache.py | 120 ++++++++++++++++++++++++++++++++ tests/test_lru_cache.py | 135 ++++++++++++++++++++++++++++++++++++ 3 files changed, 267 insertions(+) create mode 100644 optimizely/odp/__init__.py create mode 100644 optimizely/odp/lru_cache.py create mode 100644 tests/test_lru_cache.py diff --git a/optimizely/odp/__init__.py b/optimizely/odp/__init__.py new file mode 100644 index 00000000..cd898c0e --- /dev/null +++ b/optimizely/odp/__init__.py @@ -0,0 +1,12 @@ +# Copyright 2022, Optimizely +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/optimizely/odp/lru_cache.py b/optimizely/odp/lru_cache.py new file mode 100644 index 00000000..e7fc32af --- /dev/null +++ b/optimizely/odp/lru_cache.py @@ -0,0 +1,120 @@ +# Copyright 2022, Optimizely +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations +from dataclasses import dataclass, field +import threading +from time import time +from collections import OrderedDict +from typing import Optional, Generic, TypeVar, Hashable +from sys import version_info + +if version_info < (3, 8): + from typing_extensions import Protocol +else: + from typing import Protocol # type: ignore + +# generic type definitions for LRUCache parameters +K = TypeVar('K', bound=Hashable, contravariant=True) +V = TypeVar('V') + + +class LRUCache(Generic[K, V]): + """Least Recently Used cache that invalidates entries older than the timeout.""" + + def __init__(self, capacity: int, timeout_in_secs: int): + self.lock = threading.Lock() + self.map: OrderedDict[K, CacheElement[V]] = OrderedDict() + self.capacity = capacity + self.timeout = timeout_in_secs + + def lookup(self, key: K) -> Optional[V]: + """Return the non-stale value associated with the provided key and move the + element to the end of the cache. If the selected value is stale, remove it from + the cache and clear the entire cache if stale. + """ + if self.capacity <= 0: + return None + + with self.lock: + if key not in self.map: + return None + + self.map.move_to_end(key) + element = self.map[key] + + if element._is_stale(self.timeout): + del self.map[key] + return None + + return element.value + + def save(self, key: K, value: V) -> None: + """Insert and/or move the provided key/value pair to the most recent end of the cache. + If the cache grows beyond the cache capacity, the least recently used element will be + removed. + """ + if self.capacity <= 0: + return + + with self.lock: + if key in self.map: + self.map.move_to_end(key) + + self.map[key] = CacheElement(value) + + if len(self.map) > self.capacity: + self.map.popitem(last=False) + + def reset(self) -> None: + """ Clear the cache.""" + if self.capacity <= 0: + return + with self.lock: + self.map.clear() + + def peek(self, key: K) -> Optional[V]: + """Returns the value associated with the provided key without updating the cache.""" + if self.capacity <= 0: + return None + with self.lock: + element = self.map.get(key) + return element.value if element is not None else None + + +@dataclass +class CacheElement(Generic[V]): + """Individual element for the LRUCache.""" + value: V + timestamp: float = field(default_factory=time) + + def _is_stale(self, timeout: float) -> bool: + """Returns True if the provided timeout has passed since the element's timestamp.""" + if timeout <= 0: + return False + return time() - self.timestamp >= timeout + + +class OptimizelySegmentsCache(Protocol): + """Protocol for implementing custom cache.""" + def reset(self) -> None: + """ Clear the cache.""" + ... + + def lookup(self, key: str) -> Optional[list[str]]: + """Return the value associated with the provided key.""" + ... + + def save(self, key: str, value: list[str]) -> None: + """Save the key/value pair in the cache.""" + ... diff --git a/tests/test_lru_cache.py b/tests/test_lru_cache.py new file mode 100644 index 00000000..acaf07cc --- /dev/null +++ b/tests/test_lru_cache.py @@ -0,0 +1,135 @@ +# Copyright 2022, Optimizely +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http:#www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations +import time +from unittest import TestCase +from optimizely.odp.lru_cache import LRUCache, OptimizelySegmentsCache + + +class LRUCacheTest(TestCase): + def test_min_config(self): + cache = LRUCache(1000, 2000) + self.assertEqual(1000, cache.capacity) + self.assertEqual(2000, cache.timeout) + + cache = LRUCache(0, 0) + self.assertEqual(0, cache.capacity) + self.assertEqual(0, cache.timeout) + + def test_save_and_lookup(self): + max_size = 2 + cache = LRUCache(max_size, 1000) + + self.assertIsNone(cache.peek(1)) + cache.save(1, 100) # [1] + cache.save(2, 200) # [1, 2] + cache.save(3, 300) # [2, 3] + self.assertIsNone(cache.peek(1)) + self.assertEqual(200, cache.peek(2)) + self.assertEqual(300, cache.peek(3)) + + cache.save(2, 201) # [3, 2] + cache.save(1, 101) # [2, 1] + self.assertEqual(101, cache.peek(1)) + self.assertEqual(201, cache.peek(2)) + self.assertIsNone(cache.peek(3)) + + self.assertIsNone(cache.lookup(3)) # [2, 1] + self.assertEqual(201, cache.lookup(2)) # [1, 2] + cache.save(3, 302) # [2, 3] + self.assertIsNone(cache.peek(1)) + self.assertEqual(201, cache.peek(2)) + self.assertEqual(302, cache.peek(3)) + + self.assertEqual(302, cache.lookup(3)) # [2, 3] + cache.save(1, 103) # [3, 1] + self.assertEqual(103, cache.peek(1)) + self.assertIsNone(cache.peek(2)) + self.assertEqual(302, cache.peek(3)) + + self.assertEqual(len(cache.map), max_size) + self.assertEqual(len(cache.map), cache.capacity) + + def test_size_zero(self): + cache = LRUCache(0, 1000) + + self.assertIsNone(cache.lookup(1)) + cache.save(1, 100) # [1] + self.assertIsNone(cache.lookup(1)) + + def test_size_less_than_zero(self): + cache = LRUCache(-2, 1000) + + self.assertIsNone(cache.lookup(1)) + cache.save(1, 100) # [1] + self.assertIsNone(cache.lookup(1)) + + def test_timeout(self): + max_timeout = .5 + + cache = LRUCache(1000, max_timeout) + + cache.save(1, 100) # [1] + cache.save(2, 200) # [1, 2] + cache.save(3, 300) # [1, 2, 3] + time.sleep(1.1) # wait to expire + cache.save(4, 400) # [1, 2, 3, 4] + cache.save(1, 101) # [2, 3, 4, 1] + + self.assertEqual(101, cache.lookup(1)) # [4, 1] + self.assertIsNone(cache.lookup(2)) + self.assertIsNone(cache.lookup(3)) + self.assertEqual(400, cache.lookup(4)) + + def test_timeout_zero(self): + max_timeout = 0 + cache = LRUCache(1000, max_timeout) + + cache.save(1, 100) # [1] + cache.save(2, 200) # [1, 2] + time.sleep(1) # wait to expire + + self.assertEqual(100, cache.lookup(1), "should not expire when timeout is 0") + self.assertEqual(200, cache.lookup(2)) + + def test_timeout_less_than_zero(self): + max_timeout = -2 + cache = LRUCache(1000, max_timeout) + + cache.save(1, 100) # [1] + cache.save(2, 200) # [1, 2] + time.sleep(1) # wait to expire + + self.assertEqual(100, cache.lookup(1), "should not expire when timeout is less than 0") + self.assertEqual(200, cache.lookup(2)) + + def test_reset(self): + cache = LRUCache(1000, 600) + cache.save('wow', 'great') + cache.save('tow', 'freight') + + self.assertEqual(cache.lookup('wow'), 'great') + self.assertEqual(len(cache.map), 2) + + cache.reset() + + self.assertEqual(cache.lookup('wow'), None) + self.assertEqual(len(cache.map), 0) + + cache.save('cow', 'crate') + self.assertEqual(cache.lookup('cow'), 'crate') + + # type checker test + # confirm that LRUCache matches OptimizelySegmentsCache protocol + _: OptimizelySegmentsCache = LRUCache(0, 0) From 893d173a93fe788ce6ce4fda9eeac53213ea2055 Mon Sep 17 00:00:00 2001 From: Matjaz Pirnovar Date: Wed, 3 Aug 2022 15:51:42 -0700 Subject: [PATCH 16/68] (feat) add zaius graphql api manager w tests (#396) * (feat) add zaius graphql api manager w tests * fix linting error in py 3.9 line too long * remove refactor extract_component(), use simpler dict query * address PR comments, fix excepton handling, add tests * optimized tests, exceptions and enums * refactor fake_server_response function * add fake_srver_response to 400 error test --- optimizely/event_dispatcher.py | 25 +- optimizely/helpers/enums.py | 19 + optimizely/odp/zaius_graphql_api_manager.py | 197 ++++++++ optimizely/optimizely.py | 3 +- tests/test_event_dispatcher.py | 7 +- tests/test_odp_zaius_graphql_api_manager.py | 468 ++++++++++++++++++++ 6 files changed, 703 insertions(+), 16 deletions(-) create mode 100644 optimizely/odp/zaius_graphql_api_manager.py create mode 100644 tests/test_odp_zaius_graphql_api_manager.py diff --git a/optimizely/event_dispatcher.py b/optimizely/event_dispatcher.py index e744cafd..e2ca54f0 100644 --- a/optimizely/event_dispatcher.py +++ b/optimizely/event_dispatcher.py @@ -13,30 +13,29 @@ import json import logging -import requests +from sys import version_info +import requests from requests import exceptions as request_exception -from sys import version_info -from .helpers import enums from . import event_builder +from .helpers.enums import HTTPVerbs, EventDispatchConfig if version_info < (3, 8): - from typing_extensions import Protocol, Final + from typing_extensions import Protocol else: - from typing import Protocol, Final # type: ignore - - -REQUEST_TIMEOUT: Final = 10 + from typing import Protocol # type: ignore class CustomEventDispatcher(Protocol): """Interface for a custom event dispatcher and required method `dispatch_event`. """ + def dispatch_event(self, event: event_builder.Event) -> None: ... class EventDispatcher: + @staticmethod def dispatch_event(event: event_builder.Event) -> None: """ Dispatch the event being represented by the Event object. @@ -45,11 +44,13 @@ def dispatch_event(event: event_builder.Event) -> None: event: Object holding information about the request to be dispatched to the Optimizely backend. """ try: - if event.http_verb == enums.HTTPVerbs.GET: - requests.get(event.url, params=event.params, timeout=REQUEST_TIMEOUT).raise_for_status() - elif event.http_verb == enums.HTTPVerbs.POST: + if event.http_verb == HTTPVerbs.GET: + requests.get(event.url, params=event.params, + timeout=EventDispatchConfig.REQUEST_TIMEOUT).raise_for_status() + elif event.http_verb == HTTPVerbs.POST: requests.post( - event.url, data=json.dumps(event.params), headers=event.headers, timeout=REQUEST_TIMEOUT, + event.url, data=json.dumps(event.params), headers=event.headers, + timeout=EventDispatchConfig.REQUEST_TIMEOUT, ).raise_for_status() except request_exception.RequestException as error: diff --git a/optimizely/helpers/enums.py b/optimizely/helpers/enums.py index 65af4843..a82d6a98 100644 --- a/optimizely/helpers/enums.py +++ b/optimizely/helpers/enums.py @@ -120,6 +120,10 @@ class Errors: NONE_VARIABLE_KEY_PARAMETER: Final = '"None" is an invalid value for variable key.' UNSUPPORTED_DATAFILE_VERSION: Final = ( 'This version of the Python SDK does not support the given datafile version: "{}".') + INVALID_SEGMENT_IDENTIFIER = 'Audience segments fetch failed (invalid identifier).' + FETCH_SEGMENTS_FAILED = 'Audience segments fetch failed ({}).' + ODP_EVENT_FAILED = 'ODP event send failed (invalid url).' + ODP_NOT_ENABLED = 'ODP is not enabled. ' class ForcedDecisionLogs: @@ -186,3 +190,18 @@ class NotificationTypes: class VersionType: IS_PRE_RELEASE: Final = '-' IS_BUILD: Final = '+' + + +class EventDispatchConfig: + """Event dispatching configs.""" + REQUEST_TIMEOUT: Final = 10 + + +class OdpRestApiConfig: + """ODP Rest API configs.""" + REQUEST_TIMEOUT: Final = 10 + + +class OdpGraphQLApiConfig: + """ODP GraphQL API configs.""" + REQUEST_TIMEOUT: Final = 10 diff --git a/optimizely/odp/zaius_graphql_api_manager.py b/optimizely/odp/zaius_graphql_api_manager.py new file mode 100644 index 00000000..ae6e7653 --- /dev/null +++ b/optimizely/odp/zaius_graphql_api_manager.py @@ -0,0 +1,197 @@ +# Copyright 2022, Optimizely +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +import json +from typing import Optional + +import requests +from requests.exceptions import RequestException, ConnectionError, Timeout, JSONDecodeError + +from optimizely import logger as optimizely_logger +from optimizely.helpers.enums import Errors, OdpGraphQLApiConfig + +""" + ODP GraphQL API + - https://api.zaius.com/v3/graphql + - test ODP public API key = "W4WzcEs-ABgXorzY7h1LCQ" + + + [GraphQL Request] + + # fetch info with fs_user_id for ["has_email", "has_email_opted_in", "push_on_sale"] segments + curl -i -H 'Content-Type: application/json' -H 'x-api-key: W4WzcEs-ABgXorzY7h1LCQ' -X POST -d + '{"query":"query {customer(fs_user_id: \"tester-101\") {audiences(subset:[\"has_email\", + \"has_email_opted_in\", \"push_on_sale\"]) {edges {node {name state}}}}}"}' https://api.zaius.com/v3/graphql + # fetch info with vuid for ["has_email", "has_email_opted_in", "push_on_sale"] segments + curl -i -H 'Content-Type: application/json' -H 'x-api-key: W4WzcEs-ABgXorzY7h1LCQ' -X POST -d + '{"query":"query {customer(vuid: \"d66a9d81923d4d2f99d8f64338976322\") {audiences(subset:[\"has_email\", + \"has_email_opted_in\", \"push_on_sale\"]) {edges {node {name state}}}}}"}' https://api.zaius.com/v3/graphql + + query MyQuery { + customer(vuid: "d66a9d81923d4d2f99d8f64338976322") { + audiences(subset:["has_email", "has_email_opted_in", "push_on_sale"]) { + edges { + node { + name + state + } + } + } + } + } + + + [GraphQL Response] + { + "data": { + "customer": { + "audiences": { + "edges": [ + { + "node": { + "name": "has_email", + "state": "qualified", + } + }, + { + "node": { + "name": "has_email_opted_in", + "state": "qualified", + } + }, + ... + ] + } + } + } + } + + [GraphQL Error Response] + { + "errors": [ + { + "message": "Exception while fetching data (/customer) : java.lang.RuntimeException: + could not resolve _fs_user_id = asdsdaddddd", + "locations": [ + { + "line": 2, + "column": 3 + } + ], + "path": [ + "customer" + ], + "extensions": { + "classification": "InvalidIdentifierException" + } + } + ], + "data": { + "customer": null + } + } +""" + + +class ZaiusGraphQLApiManager: + """Interface for manging the fetching of audience segments.""" + + def __init__(self, logger: Optional[optimizely_logger.Logger] = None): + self.logger = logger or optimizely_logger.NoOpLogger() + + def fetch_segments(self, api_key: str, api_host: str, user_key: str, + user_value: str, segments_to_check: list[str]) -> Optional[list[str]]: + """ + Fetch segments from ODP GraphQL API. + + Args: + api_key: public api key + api_host: domain url of the host + user_key: vuid or fs_user_id (client device id or fullstack id) + user_value: vaue of user_key + segments_to_check: lit of segments to check + + Returns: + Audience segments from GraphQL. + """ + url = f'{api_host}/v3/graphql' + request_headers = {'content-type': 'application/json', + 'x-api-key': str(api_key)} + + segments_filter = self.make_subset_filter(segments_to_check) + payload_dict = { + 'query': 'query {customer(' + str(user_key) + ': "' + str(user_value) + '") ' + '{audiences' + segments_filter + ' {edges {node {name state}}}}}' + } + + try: + response = requests.post(url=url, + headers=request_headers, + data=json.dumps(payload_dict), + timeout=OdpGraphQLApiConfig.REQUEST_TIMEOUT) + + response.raise_for_status() + response_dict = response.json() + + # There is no status code with network issues such as ConnectionError or Timeouts + # (i.e. no internet, server can't be reached). + except (ConnectionError, Timeout) as err: + self.logger.debug(f'GraphQL download failed: {err}') + self.logger.error(Errors.FETCH_SEGMENTS_FAILED.format('network error')) + return None + except JSONDecodeError: + self.logger.error(Errors.FETCH_SEGMENTS_FAILED.format('JSON decode error')) + return None + except RequestException as err: + self.logger.error(Errors.FETCH_SEGMENTS_FAILED.format(err)) + return None + + if response_dict and 'errors' in response_dict: + try: + error_class = response_dict['errors'][0]['extensions']['classification'] + except (KeyError, IndexError): + self.logger.error(Errors.FETCH_SEGMENTS_FAILED.format('decode error')) + return None + + if error_class == 'InvalidIdentifierException': + self.logger.error(Errors.INVALID_SEGMENT_IDENTIFIER) + return None + else: + self.logger.error(Errors.FETCH_SEGMENTS_FAILED.format(error_class)) + return None + else: + try: + audiences = response_dict['data']['customer']['audiences']['edges'] + segments = [edge['node']['name'] for edge in audiences if edge['node']['state'] == 'qualified'] + return segments + except KeyError: + self.logger.error(Errors.FETCH_SEGMENTS_FAILED.format('decode error')) + return None + + @staticmethod + def make_subset_filter(segments: list[str]) -> str: + """ + segments = []: (fetch none) + --> subsetFilter = "(subset:[])" + segments = ["a"]: (fetch one segment) + --> subsetFilter = '(subset:["a"])' + + Purposely using .join() method to deal with special cases of + any words with apostrophes (i.e. don't). .join() method enquotes + correctly without conflicting with the apostrophe. + """ + if segments == []: + return '(subset:[])' + return '(subset:["' + '", "'.join(segments) + '"]' + ')' diff --git a/optimizely/optimizely.py b/optimizely/optimizely.py index 86e54aa0..7edbe6e3 100644 --- a/optimizely/optimizely.py +++ b/optimizely/optimizely.py @@ -255,7 +255,8 @@ def _get_feature_variable_for_type( self, project_config: project_config.ProjectConfig, feature_key: str, variable_key: str, variable_type: Optional[str], user_id: str, attributes: Optional[UserAttributes] ) -> Any: - """ Helper method to determine value for a certain variable attached to a feature flag based on type of variable. + """ Helper method to determine value for a certain variable attached to a feature flag based on + type of variable. Args: project_config: Instance of ProjectConfig. diff --git a/tests/test_event_dispatcher.py b/tests/test_event_dispatcher.py index aa6ddc32..7e075f47 100644 --- a/tests/test_event_dispatcher.py +++ b/tests/test_event_dispatcher.py @@ -18,6 +18,7 @@ from optimizely import event_builder from optimizely import event_dispatcher +from optimizely.helpers.enums import EventDispatchConfig class EventDispatcherTest(unittest.TestCase): @@ -31,7 +32,7 @@ def test_dispatch_event__get_request(self): with mock.patch('requests.get') as mock_request_get: event_dispatcher.EventDispatcher.dispatch_event(event) - mock_request_get.assert_called_once_with(url, params=params, timeout=event_dispatcher.REQUEST_TIMEOUT) + mock_request_get.assert_called_once_with(url, params=params, timeout=EventDispatchConfig.REQUEST_TIMEOUT) def test_dispatch_event__post_request(self): """ Test that dispatch event fires off requests call with provided URL, params, HTTP verb and headers. """ @@ -52,7 +53,7 @@ def test_dispatch_event__post_request(self): url, data=json.dumps(params), headers={'Content-Type': 'application/json'}, - timeout=event_dispatcher.REQUEST_TIMEOUT, + timeout=EventDispatchConfig.REQUEST_TIMEOUT, ) def test_dispatch_event__handle_request_exception(self): @@ -76,6 +77,6 @@ def test_dispatch_event__handle_request_exception(self): url, data=json.dumps(params), headers={'Content-Type': 'application/json'}, - timeout=event_dispatcher.REQUEST_TIMEOUT, + timeout=EventDispatchConfig.REQUEST_TIMEOUT, ) mock_log_error.assert_called_once_with('Dispatch event failed. Error: Failed Request') diff --git a/tests/test_odp_zaius_graphql_api_manager.py b/tests/test_odp_zaius_graphql_api_manager.py new file mode 100644 index 00000000..3c8ec367 --- /dev/null +++ b/tests/test_odp_zaius_graphql_api_manager.py @@ -0,0 +1,468 @@ +import json +from unittest import mock + +from requests import Response +from requests import exceptions as request_exception +from optimizely.helpers.enums import OdpGraphQLApiConfig + +from optimizely.odp.zaius_graphql_api_manager import ZaiusGraphQLApiManager +from . import base + + +class ZaiusGraphQLApiManagerTest(base.BaseTest): + user_key = "vuid" + user_value = "test-user-value" + api_key = "test-api-key" + api_host = "test-host" + + def test_fetch_qualified_segments__valid_request(self): + with mock.patch('requests.post') as mock_request_post: + api = ZaiusGraphQLApiManager() + api.fetch_segments(api_key=self.api_key, + api_host=self.api_host, + user_key=self.user_key, + user_value=self.user_value, + segments_to_check=["a", "b", "c"]) + + test_payload = { + 'query': 'query {customer(' + self.user_key + ': "' + self.user_value + '") ' + '{audiences(subset:["a", "b", "c"]) {edges {node {name state}}}}}' + } + request_headers = {'content-type': 'application/json', 'x-api-key': self.api_key} + mock_request_post.assert_called_once_with(url=self.api_host + "/v3/graphql", + headers=request_headers, + data=json.dumps(test_payload), + timeout=OdpGraphQLApiConfig.REQUEST_TIMEOUT) + + def test_fetch_qualified_segments__success(self): + with mock.patch('requests.post') as mock_request_post: + mock_request_post.return_value = \ + self.fake_server_response(status_code=200, content=self.good_response_data) + + api = ZaiusGraphQLApiManager() + response = api.fetch_segments(api_key=self.api_key, + api_host=self.api_host, + user_key=self.user_key, + user_value=self.user_value, + segments_to_check=['dummy1', 'dummy2', 'dummy3']) + + self.assertEqual(response, ['a', 'b']) + + def test_fetch_qualified_segments__node_missing(self): + with mock.patch('requests.post') as mock_request_post, \ + mock.patch('optimizely.logger') as mock_logger: + mock_request_post.return_value = \ + self.fake_server_response(status_code=200, content=self.node_missing_response_data) + + api = ZaiusGraphQLApiManager(logger=mock_logger) + api.fetch_segments(api_key=self.api_key, + api_host=self.api_host, + user_key=self.user_key, + user_value=self.user_value, + segments_to_check=['dummy1', 'dummy2', 'dummy3']) + + mock_request_post.assert_called_once() + mock_logger.error.assert_called_once_with('Audience segments fetch failed (decode error).') + + def test_fetch_qualified_segments__mixed_missing_keys(self): + with mock.patch('requests.post') as mock_request_post, \ + mock.patch('optimizely.logger') as mock_logger: + mock_request_post.return_value = \ + self.fake_server_response(status_code=200, + content=self.mixed_missing_keys_response_data) + + api = ZaiusGraphQLApiManager(logger=mock_logger) + api.fetch_segments(api_key=self.api_key, + api_host=self.api_host, + user_key=self.user_key, + user_value=self.user_value, + segments_to_check=['dummy1', 'dummy2', 'dummy3']) + + mock_request_post.assert_called_once() + mock_logger.error.assert_called_once_with('Audience segments fetch failed (decode error).') + + def test_fetch_qualified_segments__success_with_empty_segments(self): + with mock.patch('requests.post') as mock_request_post: + mock_request_post.return_value = \ + self.fake_server_response(status_code=200, content=self.good_empty_response_data) + + api = ZaiusGraphQLApiManager() + response = api.fetch_segments(api_key=self.api_key, + api_host=self.api_host, + user_key=self.user_key, + user_value=self.user_value, + segments_to_check=['dummy']) + + self.assertEqual(response, []) + + def test_fetch_qualified_segments__invalid_identifier(self): + with mock.patch('requests.post') as mock_request_post, \ + mock.patch('optimizely.logger') as mock_logger: + mock_request_post.return_value = \ + self.fake_server_response(status_code=200, + content=self.invalid_identifier_response_data) + + api = ZaiusGraphQLApiManager(logger=mock_logger) + api.fetch_segments(api_key=self.api_key, + api_host=self.api_host, + user_key=self.user_key, + user_value=self.user_value, + segments_to_check=[]) + + mock_request_post.assert_called_once() + mock_logger.error.assert_called_once_with('Audience segments fetch failed (invalid identifier).') + + def test_fetch_qualified_segments__other_exception(self): + with mock.patch('requests.post') as mock_request_post, \ + mock.patch('optimizely.logger') as mock_logger: + mock_request_post.return_value = \ + self.fake_server_response(status_code=200, content=self.other_exception_response_data) + + api = ZaiusGraphQLApiManager(logger=mock_logger) + api.fetch_segments(api_key=self.api_key, + api_host=self.api_host, + user_key=self.user_key, + user_value=self.user_value, + segments_to_check=[]) + + mock_request_post.assert_called_once() + mock_logger.error.assert_called_once_with('Audience segments fetch failed (TestExceptionClass).') + + def test_fetch_qualified_segments__bad_response(self): + with mock.patch('requests.post') as mock_request_post, \ + mock.patch('optimizely.logger') as mock_logger: + mock_request_post.return_value = \ + self.fake_server_response(status_code=200, content=self.bad_response_data) + + api = ZaiusGraphQLApiManager(logger=mock_logger) + api.fetch_segments(api_key=self.api_key, + api_host=self.api_host, + user_key=self.user_key, + user_value=self.user_value, + segments_to_check=[]) + + mock_request_post.assert_called_once() + mock_logger.error.assert_called_once_with('Audience segments fetch failed (decode error).') + + def test_fetch_qualified_segments__name_invalid(self): + with mock.patch('requests.post') as mock_request_post, \ + mock.patch('optimizely.logger') as mock_logger: + mock_request_post.return_value = \ + self.fake_server_response(status_code=200, content=self.name_invalid_response_data) + + api = ZaiusGraphQLApiManager(logger=mock_logger) + api.fetch_segments(api_key=self.api_key, + api_host=self.api_host, + user_key=self.user_key, + user_value=self.user_value, + segments_to_check=[]) + + mock_request_post.assert_called_once() + mock_logger.error.assert_called_once_with('Audience segments fetch failed (JSON decode error).') + + def test_fetch_qualified_segments__invalid_key(self): + with mock.patch('requests.post') as mock_request_post, \ + mock.patch('optimizely.logger') as mock_logger: + mock_request_post.return_value.json.return_value = json.loads(self.invalid_edges_key_response_data) + + api = ZaiusGraphQLApiManager(logger=mock_logger) + api.fetch_segments(api_key=self.api_key, + api_host=self.api_host, + user_key=self.user_key, + user_value=self.user_value, + segments_to_check=[]) + + mock_request_post.assert_called_once() + mock_logger.error.assert_called_once_with('Audience segments fetch failed (decode error).') + + def test_fetch_qualified_segments__invalid_key_in_error_body(self): + with mock.patch('requests.post') as mock_request_post, \ + mock.patch('optimizely.logger') as mock_logger: + mock_request_post.return_value.json.return_value = json.loads(self.invalid_key_for_error_response_data) + + api = ZaiusGraphQLApiManager(logger=mock_logger) + api.fetch_segments(api_key=self.api_key, + api_host=self.api_host, + user_key=self.user_key, + user_value=self.user_value, + segments_to_check=[]) + + mock_request_post.assert_called_once() + mock_logger.error.assert_called_once_with('Audience segments fetch failed (decode error).') + + def test_fetch_qualified_segments__network_error(self): + with mock.patch('requests.post', + side_effect=request_exception.ConnectionError('Connection error')) as mock_request_post, \ + mock.patch('optimizely.logger') as mock_logger: + api = ZaiusGraphQLApiManager(logger=mock_logger) + api.fetch_segments(api_key=self.api_key, + api_host=self.api_host, + user_key=self.user_key, + user_value=self.user_value, + segments_to_check=[]) + + mock_request_post.assert_called_once() + mock_logger.error.assert_called_once_with('Audience segments fetch failed (network error).') + mock_logger.debug.assert_called_once_with('GraphQL download failed: Connection error') + + def test_fetch_qualified_segments__400(self): + with mock.patch('requests.post') as mock_request_post, \ + mock.patch('optimizely.logger') as mock_logger: + mock_request_post.return_value = self.fake_server_response(status_code=403, url=self.api_host) + + api = ZaiusGraphQLApiManager(logger=mock_logger) + api.fetch_segments(api_key=self.api_key, + api_host=self.api_host, + user_key=self.user_key, + user_value=self.user_value, + segments_to_check=["a", "b", "c"]) + + # make sure that fetch_segments() is called (once). + # could use assert_called_once_with() but it's not needed, + # we already it assert_called_once_with() in test_fetch_qualified_segments__valid_request() + mock_request_post.assert_called_once() + # assert 403 error log + mock_logger.error.assert_called_once_with('Audience segments fetch failed ' + f'(403 Client Error: None for url: {self.api_host}).') + + def test_fetch_qualified_segments__500(self): + with mock.patch('requests.post') as mock_request_post, \ + mock.patch('optimizely.logger') as mock_logger: + mock_request_post.return_value = self.fake_server_response(status_code=500, url=self.api_host) + + api = ZaiusGraphQLApiManager(logger=mock_logger) + api.fetch_segments(api_key=self.api_key, + api_host=self.api_host, + user_key=self.user_key, + user_value=self.user_value, + segments_to_check=["a", "b", "c"]) + + # make sure that fetch_segments() is called (once). + mock_request_post.assert_called_once() + # assert 500 error log + mock_logger.error.assert_called_once_with('Audience segments fetch failed ' + f'(500 Server Error: None for url: {self.api_host}).') + + def test_make_subset_filter(self): + api = ZaiusGraphQLApiManager() + + self.assertEqual("(subset:[])", api.make_subset_filter([])) + self.assertEqual("(subset:[\"a\"])", api.make_subset_filter(["a"])) + self.assertEqual("(subset:[\"a\", \"b\", \"c\"])", api.make_subset_filter(['a', 'b', 'c'])) + self.assertEqual("(subset:[\"a\", \"b\", \"c\"])", api.make_subset_filter(["a", "b", "c"])) + self.assertEqual("(subset:[\"a\", \"b\", \"don't\"])", api.make_subset_filter(["a", "b", "don't"])) + + # fake server response function and test json responses + + @staticmethod + def fake_server_response(status_code=None, content=None, url=None): + """Mock the server response.""" + response = Response() + response.status_code = status_code + if content: + response._content = content.encode('utf-8') + response.url = url + return response + + good_response_data = """ + { + "data": { + "customer": { + "audiences": { + "edges": [ + { + "node": { + "name": "a", + "state": "qualified", + "description": "qualifed sample 1" + } + }, + { + "node": { + "name": "b", + "state": "qualified", + "description": "qualifed sample 2" + } + }, + { + "node": { + "name": "c", + "state": "not_qualified", + "description": "not-qualified sample" + } + } + ] + } + } + } + } + """ + + good_empty_response_data = """ + { + "data": { + "customer": { + "audiences": { + "edges": [] + } + } + } + } + """ + + invalid_identifier_response_data = """ + { + "errors": [ + { + "message": "Exception while fetching data (/customer) :\ + java.lang.RuntimeException: could not resolve _fs_user_id = asdsdaddddd", + "locations": [ + { + "line": 2, + "column": 3 + } + ], + "path": [ + "customer" + ], + "extensions": { + "classification": "InvalidIdentifierException" + } + } + ], + "data": { + "customer": null + } + } + """ + + other_exception_response_data = """ + { + "errors": [ + { + "message": "Exception while fetching data (/customer) :\ + java.lang.RuntimeException: could not resolve _fs_user_id = asdsdaddddd", + "extensions": { + "classification": "TestExceptionClass" + } + } + ], + "data": { + "customer": null + } + } + """ + + bad_response_data = """ + { + "data": {} + } + """ + + invalid_edges_key_response_data = """ + { + "data": { + "customer": { + "audiences": { + "invalid_test_key": [ + { + "node": { + "name": "a", + "state": "qualified", + "description": "qualifed sample 1" + } + } + ] + } + } + } + } + """ + + invalid_key_for_error_response_data = """ + { + "errors": [ + { + "message": "Exception while fetching data (/customer) :\ + java.lang.RuntimeException: could not resolve _fs_user_id = asdsdaddddd", + "locations": [ + { + "line": 2, + "column": 3 + } + ], + "path": [ + "customer" + ], + "invalid_test_key": { + "classification": "InvalidIdentifierException" + } + } + ], + "data": { + "customer": null + } + } + """ + name_invalid_response_data = """ + { + "data": { + "customer": { + "audiences": { + "edges": [ + { + "node": { + "name": "a":::invalid-part-here:::, + "state": "qualified", + "description": "qualifed sample 1" + } + } + ] + } + } + } + } + """ + + node_missing_response_data = """ + { + "data": { + "customer": { + "audiences": { + "edges": [ + {} + ] + } + } + } + } + """ + + mixed_missing_keys_response_data = """ + { + "data": { + "customer": { + "audiences": { + "edges": [ + { + "node": { + "state": "qualified" + } + }, + { + "node": { + "name": "a" + } + }, + { + "other-name": { + "name": "a", + "state": "qualified" + } + } + ] + } + } + } + } + """ From 2015e5549aa9df9b30c83dbd6b14b6168dcbf5a3 Mon Sep 17 00:00:00 2001 From: Matjaz Pirnovar Date: Thu, 4 Aug 2022 10:55:12 -0700 Subject: [PATCH 17/68] Add license notice to graphgl test file (#397) --- tests/test_odp_zaius_graphql_api_manager.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/tests/test_odp_zaius_graphql_api_manager.py b/tests/test_odp_zaius_graphql_api_manager.py index 3c8ec367..cb728962 100644 --- a/tests/test_odp_zaius_graphql_api_manager.py +++ b/tests/test_odp_zaius_graphql_api_manager.py @@ -1,3 +1,16 @@ +# Copyright 2022, Optimizely +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http:#www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import json from unittest import mock From 998dbc772696059b372a6619c6f32b738cffd37d Mon Sep 17 00:00:00 2001 From: Matjaz Pirnovar Date: Fri, 12 Aug 2022 09:00:11 -0700 Subject: [PATCH 18/68] feat: add odp rest api manager (#398) * feat: add odp rest api manager * fix: fix linting, str type * fix white space * addressed PR comments * moved helper test funciton to base.py * fix graphql tests becasue helper method moved to base.py * remove unnecessary url parsing exceptions * remove print statement * fixed type hints --- optimizely/helpers/enums.py | 2 +- optimizely/odp/odp_event.py | 27 ++++ optimizely/odp/zaius_rest_api_manager.py | 94 +++++++++++++ tests/base.py | 18 +++ tests/test_odp_zaius_graphql_api_manager.py | 21 +-- tests/test_odp_zaius_rest_api_manager.py | 139 ++++++++++++++++++++ 6 files changed, 285 insertions(+), 16 deletions(-) create mode 100644 optimizely/odp/odp_event.py create mode 100644 optimizely/odp/zaius_rest_api_manager.py create mode 100644 tests/test_odp_zaius_rest_api_manager.py diff --git a/optimizely/helpers/enums.py b/optimizely/helpers/enums.py index a82d6a98..ab63d1e3 100644 --- a/optimizely/helpers/enums.py +++ b/optimizely/helpers/enums.py @@ -122,7 +122,7 @@ class Errors: 'This version of the Python SDK does not support the given datafile version: "{}".') INVALID_SEGMENT_IDENTIFIER = 'Audience segments fetch failed (invalid identifier).' FETCH_SEGMENTS_FAILED = 'Audience segments fetch failed ({}).' - ODP_EVENT_FAILED = 'ODP event send failed (invalid url).' + ODP_EVENT_FAILED = 'ODP event send failed ({}).' ODP_NOT_ENABLED = 'ODP is not enabled. ' diff --git a/optimizely/odp/odp_event.py b/optimizely/odp/odp_event.py new file mode 100644 index 00000000..23015db5 --- /dev/null +++ b/optimizely/odp/odp_event.py @@ -0,0 +1,27 @@ +# Copyright 2022, Optimizely +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from typing import Any, Dict + + +class OdpEvent: + """ Representation of an odp event which can be sent to the Optimizely odp platform. """ + + def __init__(self, type: str, action: str, + identifiers: Dict[str, str], data: Dict[str, Any]) -> None: + self.type = type, + self.action = action, + self.identifiers = identifiers, + self.data = data diff --git a/optimizely/odp/zaius_rest_api_manager.py b/optimizely/odp/zaius_rest_api_manager.py new file mode 100644 index 00000000..9cbe2638 --- /dev/null +++ b/optimizely/odp/zaius_rest_api_manager.py @@ -0,0 +1,94 @@ +# Copyright 2022, Optimizely +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +import json +from typing import Optional + +import requests +from requests.exceptions import RequestException, ConnectionError, Timeout + +from optimizely import logger as optimizely_logger +from optimizely.helpers.enums import Errors, OdpRestApiConfig +from optimizely.odp.odp_event import OdpEvent + +""" + ODP REST Events API + - https://api.zaius.com/v3/events + - test ODP public API key = "W4WzcEs-ABgXorzY7h1LCQ" + + [Event Request] + curl -i -H 'Content-Type: application/json' -H 'x-api-key: W4WzcEs-ABgXorzY7h1LCQ' -X POST -d + '{"type":"fullstack","action":"identified","identifiers":{"vuid": "123","fs_user_id": "abc"}, + "data":{"idempotence_id":"xyz","source":"swift-sdk"}}' https://api.zaius.com/v3/events + [Event Response] + {"title":"Accepted","status":202,"timestamp":"2022-06-30T20:59:52.046Z"} +""" + + +class ZaiusRestApiManager: + """Provides an internal service for ODP event REST api access.""" + + def __init__(self, logger: Optional[optimizely_logger.Logger] = None): + self.logger = logger or optimizely_logger.NoOpLogger() + + def send_odp_events(self, api_key: str, api_host: str, events: list[OdpEvent]) -> bool: + """ + Dispatch the event being represented by the OdpEvent object. + + Args: + api_key: public api key + api_host: domain url of the host + events: list of odp events to be sent to optimizely's odp platform. + + Returns: + retry is True - if network or server error (5xx), otherwise False + """ + should_retry = False + url = f'{api_host}/v3/events' + request_headers = {'content-type': 'application/json', 'x-api-key': api_key} + + try: + payload_dict = json.dumps(events) + except TypeError as err: + self.logger.error(Errors.ODP_EVENT_FAILED.format(err)) + return should_retry + + try: + response = requests.post(url=url, + headers=request_headers, + data=payload_dict, + timeout=OdpRestApiConfig.REQUEST_TIMEOUT) + + response.raise_for_status() + + except (ConnectionError, Timeout): + self.logger.error(Errors.ODP_EVENT_FAILED.format('network error')) + # retry on network errors + should_retry = True + except RequestException as err: + if err.response is not None: + if 400 <= err.response.status_code < 500: + # log 4xx + self.logger.error(Errors.ODP_EVENT_FAILED.format(err.response.text)) + else: + # log 5xx + self.logger.error(Errors.ODP_EVENT_FAILED.format(err)) + # retry on 500 exceptions + should_retry = True + else: + # log exceptions without response body (i.e. invalid url) + self.logger.error(Errors.ODP_EVENT_FAILED.format(err)) + + return should_retry diff --git a/tests/base.py b/tests/base.py index e793d1c3..65ae1fe1 100644 --- a/tests/base.py +++ b/tests/base.py @@ -13,6 +13,9 @@ import json import unittest +from typing import Optional + +from requests import Response from optimizely import optimizely @@ -28,6 +31,21 @@ def assertStrictTrue(self, to_assert): def assertStrictFalse(self, to_assert): self.assertIs(to_assert, False) + def fake_server_response(self, status_code: Optional[int] = None, + content: Optional[str] = None, + url: Optional[str] = None) -> Response: + """Mock the server response.""" + response = Response() + + if status_code: + response.status_code = status_code + if content: + response._content = content.encode('utf-8') + if url: + response.url = url + + return response + def setUp(self, config_dict='config_dict'): self.config_dict = { 'revision': '42', diff --git a/tests/test_odp_zaius_graphql_api_manager.py b/tests/test_odp_zaius_graphql_api_manager.py index cb728962..5ac85b2a 100644 --- a/tests/test_odp_zaius_graphql_api_manager.py +++ b/tests/test_odp_zaius_graphql_api_manager.py @@ -14,10 +14,9 @@ import json from unittest import mock -from requests import Response from requests import exceptions as request_exception -from optimizely.helpers.enums import OdpGraphQLApiConfig +from optimizely.helpers.enums import OdpGraphQLApiConfig from optimizely.odp.zaius_graphql_api_manager import ZaiusGraphQLApiManager from . import base @@ -176,7 +175,8 @@ def test_fetch_qualified_segments__name_invalid(self): def test_fetch_qualified_segments__invalid_key(self): with mock.patch('requests.post') as mock_request_post, \ mock.patch('optimizely.logger') as mock_logger: - mock_request_post.return_value.json.return_value = json.loads(self.invalid_edges_key_response_data) + mock_request_post.return_value = self.fake_server_response(status_code=200, + content=self.invalid_edges_key_response_data) api = ZaiusGraphQLApiManager(logger=mock_logger) api.fetch_segments(api_key=self.api_key, @@ -191,7 +191,8 @@ def test_fetch_qualified_segments__invalid_key(self): def test_fetch_qualified_segments__invalid_key_in_error_body(self): with mock.patch('requests.post') as mock_request_post, \ mock.patch('optimizely.logger') as mock_logger: - mock_request_post.return_value.json.return_value = json.loads(self.invalid_key_for_error_response_data) + mock_request_post.return_value = self.fake_server_response(status_code=200, + content=self.invalid_key_for_error_response_data) api = ZaiusGraphQLApiManager(logger=mock_logger) api.fetch_segments(api_key=self.api_key, @@ -265,17 +266,7 @@ def test_make_subset_filter(self): self.assertEqual("(subset:[\"a\", \"b\", \"c\"])", api.make_subset_filter(["a", "b", "c"])) self.assertEqual("(subset:[\"a\", \"b\", \"don't\"])", api.make_subset_filter(["a", "b", "don't"])) - # fake server response function and test json responses - - @staticmethod - def fake_server_response(status_code=None, content=None, url=None): - """Mock the server response.""" - response = Response() - response.status_code = status_code - if content: - response._content = content.encode('utf-8') - response.url = url - return response + # test json responses good_response_data = """ { diff --git a/tests/test_odp_zaius_rest_api_manager.py b/tests/test_odp_zaius_rest_api_manager.py new file mode 100644 index 00000000..e7327d6f --- /dev/null +++ b/tests/test_odp_zaius_rest_api_manager.py @@ -0,0 +1,139 @@ +# Copyright 2022, Optimizely +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http:#www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +from unittest import mock + +from requests import exceptions as request_exception + +from optimizely.helpers.enums import OdpRestApiConfig +from optimizely.odp.zaius_rest_api_manager import ZaiusRestApiManager +from . import base + + +class ZaiusRestApiManagerTest(base.BaseTest): + user_key = "vuid" + user_value = "test-user-value" + api_key = "test-api-key" + api_host = "test-host" + + events = [ + {"type": "t1", "action": "a1", "identifiers": {"id-key-1": "id-value-1"}, "data": {"key-1": "value1"}}, + {"type": "t2", "action": "a2", "identifiers": {"id-key-2": "id-value-2"}, "data": {"key-2": "value2"}}, + ] + + def test_send_odp_events__valid_request(self): + with mock.patch('requests.post') as mock_request_post: + api = ZaiusRestApiManager() + api.send_odp_events(api_key=self.api_key, + api_host=self.api_host, + events=self.events) + + request_headers = {'content-type': 'application/json', 'x-api-key': self.api_key} + mock_request_post.assert_called_once_with(url=self.api_host + "/v3/events", + headers=request_headers, + data=json.dumps(self.events), + timeout=OdpRestApiConfig.REQUEST_TIMEOUT) + + def test_send_odp_ovents_success(self): + with mock.patch('requests.post') as mock_request_post: + # no need to mock url and content because we're not returning the response + mock_request_post.return_value = self.fake_server_response(status_code=200) + + api = ZaiusRestApiManager() + should_retry = api.send_odp_events(api_key=self.api_key, + api_host=self.api_host, + events=self.events) # content of events doesn't matter for the test + + self.assertFalse(should_retry) + + def test_send_odp_events_invalid_json_no_retry(self): + events = {1, 2, 3} # using a set to trigger JSON-not-serializable error + + with mock.patch('requests.post') as mock_request_post, \ + mock.patch('optimizely.logger') as mock_logger: + api = ZaiusRestApiManager(logger=mock_logger) + should_retry = api.send_odp_events(api_key=self.api_key, + api_host=self.api_host, + events=events) + + self.assertFalse(should_retry) + mock_request_post.assert_not_called() + mock_logger.error.assert_called_once_with( + 'ODP event send failed (Object of type set is not JSON serializable).') + + def test_send_odp_events_invalid_url_no_retry(self): + invalid_url = 'https://*api.zaius.com' + + with mock.patch('requests.post', + side_effect=request_exception.InvalidURL('Invalid URL')) as mock_request_post, \ + mock.patch('optimizely.logger') as mock_logger: + api = ZaiusRestApiManager(logger=mock_logger) + should_retry = api.send_odp_events(api_key=self.api_key, + api_host=invalid_url, + events=self.events) + + self.assertFalse(should_retry) + mock_request_post.assert_called_once() + mock_logger.error.assert_called_once_with('ODP event send failed (Invalid URL).') + + def test_send_odp_events_network_error_retry(self): + with mock.patch('requests.post', + side_effect=request_exception.ConnectionError('Connection error')) as mock_request_post, \ + mock.patch('optimizely.logger') as mock_logger: + api = ZaiusRestApiManager(logger=mock_logger) + should_retry = api.send_odp_events(api_key=self.api_key, + api_host=self.api_host, + events=self.events) + + self.assertTrue(should_retry) + mock_request_post.assert_called_once() + mock_logger.error.assert_called_once_with('ODP event send failed (network error).') + + def test_send_odp_events_400_no_retry(self): + with mock.patch('requests.post') as mock_request_post, \ + mock.patch('optimizely.logger') as mock_logger: + mock_request_post.return_value = self.fake_server_response(status_code=400, + url=self.api_host, + content=self.failure_response_data) + + api = ZaiusRestApiManager(logger=mock_logger) + should_retry = api.send_odp_events(api_key=self.api_key, + api_host=self.api_host, + events=self.events) + + self.assertFalse(should_retry) + mock_request_post.assert_called_once() + mock_logger.error.assert_called_once_with('ODP event send failed ({"title":"Bad Request","status":400,' + '"timestamp":"2022-07-01T20:44:00.945Z","detail":{"invalids":' + '[{"event":0,"message":"missing \'type\' field"}]}}).') + + def test_send_odp_events_500_retry(self): + with mock.patch('requests.post') as mock_request_post, \ + mock.patch('optimizely.logger') as mock_logger: + mock_request_post.return_value = self.fake_server_response(status_code=500, url=self.api_host) + + api = ZaiusRestApiManager(logger=mock_logger) + should_retry = api.send_odp_events(api_key=self.api_key, + api_host=self.api_host, + events=self.events) + + self.assertTrue(should_retry) + mock_request_post.assert_called_once() + mock_logger.error.assert_called_once_with('ODP event send failed (500 Server Error: None for url: test-host).') + + # test json responses + success_response_data = '{"title":"Accepted","status":202,"timestamp":"2022-07-01T16:04:06.786Z"}' + + failure_response_data = '{"title":"Bad Request","status":400,"timestamp":"2022-07-01T20:44:00.945Z",' \ + '"detail":{"invalids":[{"event":0,"message":"missing \'type\' field"}]}}' From 9a010566e221c42a9e94d4dcec9ae608b137321f Mon Sep 17 00:00:00 2001 From: Matjaz Pirnovar Date: Mon, 15 Aug 2022 10:49:12 -0700 Subject: [PATCH 19/68] add json encoding validation (#399) --- optimizely/odp/zaius_graphql_api_manager.py | 12 +++++++++--- tests/test_odp_zaius_graphql_api_manager.py | 2 +- 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/optimizely/odp/zaius_graphql_api_manager.py b/optimizely/odp/zaius_graphql_api_manager.py index ae6e7653..4f2ae38a 100644 --- a/optimizely/odp/zaius_graphql_api_manager.py +++ b/optimizely/odp/zaius_graphql_api_manager.py @@ -131,15 +131,21 @@ def fetch_segments(self, api_key: str, api_host: str, user_key: str, 'x-api-key': str(api_key)} segments_filter = self.make_subset_filter(segments_to_check) - payload_dict = { + query = { 'query': 'query {customer(' + str(user_key) + ': "' + str(user_value) + '") ' '{audiences' + segments_filter + ' {edges {node {name state}}}}}' } + try: + payload_dict = json.dumps(query) + except TypeError as err: + self.logger.error(Errors.FETCH_SEGMENTS_FAILED.format(err)) + return None + try: response = requests.post(url=url, headers=request_headers, - data=json.dumps(payload_dict), + data=payload_dict, timeout=OdpGraphQLApiConfig.REQUEST_TIMEOUT) response.raise_for_status() @@ -166,7 +172,7 @@ def fetch_segments(self, api_key: str, api_host: str, user_key: str, return None if error_class == 'InvalidIdentifierException': - self.logger.error(Errors.INVALID_SEGMENT_IDENTIFIER) + self.logger.warning(Errors.INVALID_SEGMENT_IDENTIFIER) return None else: self.logger.error(Errors.FETCH_SEGMENTS_FAILED.format(error_class)) diff --git a/tests/test_odp_zaius_graphql_api_manager.py b/tests/test_odp_zaius_graphql_api_manager.py index 5ac85b2a..e4ec76c4 100644 --- a/tests/test_odp_zaius_graphql_api_manager.py +++ b/tests/test_odp_zaius_graphql_api_manager.py @@ -122,7 +122,7 @@ def test_fetch_qualified_segments__invalid_identifier(self): segments_to_check=[]) mock_request_post.assert_called_once() - mock_logger.error.assert_called_once_with('Audience segments fetch failed (invalid identifier).') + mock_logger.warning.assert_called_once_with('Audience segments fetch failed (invalid identifier).') def test_fetch_qualified_segments__other_exception(self): with mock.patch('requests.post') as mock_request_post, \ From 81a5bfeb67f1f0b223de1570e8270f490443123c Mon Sep 17 00:00:00 2001 From: Andy Leap <104936100+andrewleap-optimizely@users.noreply.github.com> Date: Mon, 22 Aug 2022 13:42:41 -0400 Subject: [PATCH 20/68] feat: add odp config (#401) * add odp_config * fix odp event --- optimizely/odp/odp_config.py | 79 ++++++++++++++++++++++++++++++++++++ optimizely/odp/odp_event.py | 10 ++--- tests/test_odp_config.py | 41 +++++++++++++++++++ 3 files changed, 125 insertions(+), 5 deletions(-) create mode 100644 optimizely/odp/odp_config.py create mode 100644 tests/test_odp_config.py diff --git a/optimizely/odp/odp_config.py b/optimizely/odp/odp_config.py new file mode 100644 index 00000000..64809626 --- /dev/null +++ b/optimizely/odp/odp_config.py @@ -0,0 +1,79 @@ +# Copyright 2022, Optimizely +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from typing import Optional +from threading import Lock + + +class OdpConfig: + """ + Contains configuration used for ODP integration. + + Args: + api_host: The host URL for the ODP audience segments API (optional). + api_key: The public API key for the ODP account from which the audience segments will be fetched (optional). + segments_to_check: A list of all ODP segments used in the current datafile + (associated with api_host/api_key). + """ + def __init__( + self, + api_key: Optional[str] = None, + api_host: Optional[str] = None, + segments_to_check: Optional[list[str]] = None + ) -> None: + self._api_key = api_key + self._api_host = api_host + self._segments_to_check = segments_to_check or [] + self.lock = Lock() + + def update(self, api_key: Optional[str], api_host: Optional[str], segments_to_check: list[str]) -> bool: + """ + Override the ODP configuration. + + Args: + api_host: The host URL for the ODP audience segments API (optional). + api_key: The public API key for the ODP account from which the audience segments will be fetched (optional). + segments_to_check: A list of all ODP segments used in the current datafile + (associated with api_host/api_key). + + Returns: + True if the provided values were different than the existing values. + """ + updated = False + with self.lock: + if self._api_key != api_key or self._api_host != api_host or self._segments_to_check != segments_to_check: + self._api_key = api_key + self._api_host = api_host + self._segments_to_check = segments_to_check + updated = True + + return updated + + def get_api_host(self) -> Optional[str]: + with self.lock: + return self._api_host + + def get_api_key(self) -> Optional[str]: + with self.lock: + return self._api_key + + def get_segments_to_check(self) -> list[str]: + with self.lock: + return self._segments_to_check.copy() + + def odp_integrated(self) -> bool: + """Returns True if ODP is integrated.""" + with self.lock: + return self._api_key is not None and self._api_host is not None diff --git a/optimizely/odp/odp_event.py b/optimizely/odp/odp_event.py index 23015db5..ac3e5d93 100644 --- a/optimizely/odp/odp_event.py +++ b/optimizely/odp/odp_event.py @@ -13,15 +13,15 @@ from __future__ import annotations -from typing import Any, Dict +from typing import Any class OdpEvent: """ Representation of an odp event which can be sent to the Optimizely odp platform. """ def __init__(self, type: str, action: str, - identifiers: Dict[str, str], data: Dict[str, Any]) -> None: - self.type = type, - self.action = action, - self.identifiers = identifiers, + identifiers: dict[str, str], data: dict[str, Any]) -> None: + self.type = type + self.action = action + self.identifiers = identifiers self.data = data diff --git a/tests/test_odp_config.py b/tests/test_odp_config.py new file mode 100644 index 00000000..d72a7321 --- /dev/null +++ b/tests/test_odp_config.py @@ -0,0 +1,41 @@ +# Copyright 2022, Optimizely +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http:#www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations +from tests import base +from optimizely.odp.odp_config import OdpConfig + + +class OdpConfigTest(base.BaseTest): + api_host = 'test-host' + api_key = 'test-key' + segments_to_check = ['test-segment'] + + def test_init_config(self): + config = OdpConfig(self.api_key, self.api_host, self.segments_to_check) + + self.assertEqual(config.get_api_key(), self.api_key) + self.assertEqual(config.get_api_host(), self.api_host) + self.assertEqual(config.get_segments_to_check(), self.segments_to_check) + + def test_update_config(self): + config = OdpConfig() + updated = config.update(self.api_key, self.api_host, self.segments_to_check) + + self.assertStrictTrue(updated) + self.assertEqual(config.get_api_key(), self.api_key) + self.assertEqual(config.get_api_host(), self.api_host) + self.assertEqual(config.get_segments_to_check(), self.segments_to_check) + + updated = config.update(self.api_key, self.api_host, self.segments_to_check) + self.assertStrictFalse(updated) From 415a6663998cff9c7f49d66d80ac706a99c19d86 Mon Sep 17 00:00:00 2001 From: Matjaz Pirnovar Date: Fri, 26 Aug 2022 12:28:52 -0700 Subject: [PATCH 21/68] feat: odp segment manager (#402) * feat: add odp_segment_manager * feat: add segment manager * fix pr comments * fix tests * refacored tests * fix PR comments * refactor logs in tests for cache miss/ignore * cleanup --- optimizely/odp/odp_segment_manager.py | 90 ++++++++++ optimizely/odp/optimizely_odp_option.py | 25 +++ tests/test_odp_segment_manager.py | 211 ++++++++++++++++++++++++ 3 files changed, 326 insertions(+) create mode 100644 optimizely/odp/odp_segment_manager.py create mode 100644 optimizely/odp/optimizely_odp_option.py create mode 100644 tests/test_odp_segment_manager.py diff --git a/optimizely/odp/odp_segment_manager.py b/optimizely/odp/odp_segment_manager.py new file mode 100644 index 00000000..33c829a1 --- /dev/null +++ b/optimizely/odp/odp_segment_manager.py @@ -0,0 +1,90 @@ +# Copyright 2022, Optimizely +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from typing import Optional + +from optimizely import logger as optimizely_logger +from optimizely.helpers.enums import Errors +from optimizely.odp.optimizely_odp_option import OptimizelyOdpOption +from optimizely.odp.lru_cache import OptimizelySegmentsCache +from optimizely.odp.odp_config import OdpConfig +from optimizely.odp.zaius_graphql_api_manager import ZaiusGraphQLApiManager + + +class OdpSegmentManager: + """Schedules connections to ODP for audience segmentation and caches the results.""" + + def __init__(self, odp_config: OdpConfig, segments_cache: OptimizelySegmentsCache, + zaius_manager: ZaiusGraphQLApiManager, + logger: Optional[optimizely_logger.Logger] = None) -> None: + + self.odp_config = odp_config + self.segments_cache = segments_cache + self.zaius_manager = zaius_manager + self.logger = logger or optimizely_logger.NoOpLogger() + + def fetch_qualified_segments(self, user_key: str, user_value: str, options: list[str]) -> \ + Optional[list[str]]: + """ + Args: + user_key: The key for identifying the id type. + user_value: The id itself. + options: An array of OptimizelySegmentOptions used to ignore and/or reset the cache. + + Returns: + Qualified segments for the user from the cache or the ODP server if not in the cache. + """ + odp_api_key = self.odp_config.get_api_key() + odp_api_host = self.odp_config.get_api_host() + odp_segments_to_check = self.odp_config.get_segments_to_check() + + if not (odp_api_key and odp_api_host): + self.logger.error(Errors.FETCH_SEGMENTS_FAILED.format('api_key/api_host not defined')) + return None + + if not odp_segments_to_check: + self.logger.debug('No segments are used in the project. Returning empty list.') + return [] + + cache_key = self.make_cache_key(user_key, user_value) + + ignore_cache = OptimizelyOdpOption.IGNORE_CACHE in options + reset_cache = OptimizelyOdpOption.RESET_CACHE in options + + if reset_cache: + self._reset() + + if not ignore_cache and not reset_cache: + segments = self.segments_cache.lookup(cache_key) + if segments: + self.logger.debug('ODP cache hit. Returning segments from cache.') + return segments + self.logger.debug('ODP cache miss.') + + self.logger.debug('Making a call to ODP server.') + + segments = self.zaius_manager.fetch_segments(odp_api_key, odp_api_host, user_key, user_value, + odp_segments_to_check) + + if segments and not ignore_cache: + self.segments_cache.save(cache_key, segments) + + return segments + + def _reset(self) -> None: + self.segments_cache.reset() + + def make_cache_key(self, user_key: str, user_value: str) -> str: + return f'{user_key}-$-{user_value}' diff --git a/optimizely/odp/optimizely_odp_option.py b/optimizely/odp/optimizely_odp_option.py new file mode 100644 index 00000000..ce6eaf00 --- /dev/null +++ b/optimizely/odp/optimizely_odp_option.py @@ -0,0 +1,25 @@ +# Copyright 2022, Optimizely +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from sys import version_info + +if version_info < (3, 8): + from typing_extensions import Final +else: + from typing import Final # type: ignore + + +class OptimizelyOdpOption: + """Options for the OdpSegmentManager.""" + IGNORE_CACHE: Final = 'IGNORE_CACHE' + RESET_CACHE: Final = 'RESET_CACHE' diff --git a/tests/test_odp_segment_manager.py b/tests/test_odp_segment_manager.py new file mode 100644 index 00000000..1dad6fdd --- /dev/null +++ b/tests/test_odp_segment_manager.py @@ -0,0 +1,211 @@ +# Copyright 2022, Optimizely +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http:#www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from unittest import mock +from unittest.mock import call + +from requests import exceptions as request_exception + +from optimizely.odp.lru_cache import LRUCache +from optimizely.odp.odp_config import OdpConfig +from optimizely.odp.optimizely_odp_option import OptimizelyOdpOption +from optimizely.odp.odp_segment_manager import OdpSegmentManager +from optimizely.odp.zaius_graphql_api_manager import ZaiusGraphQLApiManager +from tests import base + + +class OdpSegmentManagerTest(base.BaseTest): + api_host = 'host' + api_key = 'valid' + user_key = 'fs_user_id' + user_value = 'test-user-value' + + def test_empty_list_with_no_segments_to_check(self): + odp_config = OdpConfig(self.api_key, self.api_host, []) + mock_logger = mock.MagicMock() + segments_cache = LRUCache(1000, 1000) + api = ZaiusGraphQLApiManager() + segment_manager = OdpSegmentManager(odp_config, segments_cache, api, mock_logger) + + with mock.patch.object(api, 'fetch_segments') as mock_fetch_segments: + segments = segment_manager.fetch_qualified_segments(self.user_key, self.user_value, []) + + self.assertEqual(segments, []) + mock_logger.debug.assert_called_once_with('No segments are used in the project. Returning empty list.') + mock_logger.error.assert_not_called() + mock_fetch_segments.assert_not_called() + + def test_fetch_segments_success_cache_miss(self): + """ + we are fetching user key/value 'fs_user_id'/'test-user-value' + which is different from what we have passed to cache (fs_user_id-$-123/['d']) + ---> hence we trigger a cache miss + """ + odp_config = OdpConfig(self.api_key, self.api_host, ["a", "b", "c"]) + mock_logger = mock.MagicMock() + segments_cache = LRUCache(1000, 1000) + api = ZaiusGraphQLApiManager() + + segment_manager = OdpSegmentManager(odp_config, segments_cache, api, mock_logger) + cache_key = segment_manager.make_cache_key(self.user_key, '123') + segment_manager.segments_cache.save(cache_key, ["d"]) + + with mock.patch('requests.post') as mock_request_post: + mock_request_post.return_value = self.fake_server_response(status_code=200, + content=self.good_response_data) + + segments = segment_manager.fetch_qualified_segments(self.user_key, self.user_value, []) + + self.assertEqual(segments, ["a", "b"]) + actual_cache_key = segment_manager.make_cache_key(self.user_key, self.user_value) + self.assertEqual(segment_manager.segments_cache.lookup(actual_cache_key), ["a", "b"]) + + self.assertEqual(mock_logger.debug.call_count, 2) + mock_logger.debug.assert_has_calls([call('ODP cache miss.'), call('Making a call to ODP server.')]) + mock_logger.error.assert_not_called() + + def test_fetch_segments_success_cache_hit(self): + odp_config = OdpConfig() + odp_config.update(self.api_key, self.api_host, ['c']) + mock_logger = mock.MagicMock() + api = ZaiusGraphQLApiManager() + segments_cache = LRUCache(1000, 1000) + + segment_manager = OdpSegmentManager(odp_config, segments_cache, None, mock_logger) + cache_key = segment_manager.make_cache_key(self.user_key, self.user_value) + segment_manager.segments_cache.save(cache_key, ['c']) + + with mock.patch.object(api, 'fetch_segments') as mock_fetch_segments: + segments = segment_manager.fetch_qualified_segments(self.user_key, self.user_value, []) + + self.assertEqual(segments, ['c']) + mock_logger.debug.assert_called_once_with('ODP cache hit. Returning segments from cache.') + mock_logger.error.assert_not_called() + mock_fetch_segments.assert_not_called() + + def test_fetch_segments_missing_api_host_api_key(self): + with mock.patch('optimizely.logger') as mock_logger: + segment_manager = OdpSegmentManager(OdpConfig(), LRUCache(1000, 1000), None, mock_logger) + segments = segment_manager.fetch_qualified_segments(self.user_key, self.user_value, []) + + self.assertEqual(segments, None) + mock_logger.error.assert_called_once_with('Audience segments fetch failed (api_key/api_host not defined).') + + def test_fetch_segments_network_error(self): + """ + Trigger connection error with mock side_effect. Note that Python's requests don't + have a status code for connection error, that's why we need to trigger the exception + instead of returning a fake server response with status code 500. + The error log should come form the GraphQL API manager, not from ODP Segment Manager. + The active mock logger should be placed as parameter in ZaiusGraphQLApiManager object. + """ + odp_config = OdpConfig(self.api_key, self.api_host, ["a", "b", "c"]) + mock_logger = mock.MagicMock() + segments_cache = LRUCache(1000, 1000) + api = ZaiusGraphQLApiManager(mock_logger) + segment_manager = OdpSegmentManager(odp_config, segments_cache, api, None) + + with mock.patch('requests.post', + side_effect=request_exception.ConnectionError('Connection error')): + segments = segment_manager.fetch_qualified_segments(self.user_key, self.user_value, []) + + self.assertEqual(segments, None) + mock_logger.error.assert_called_once_with('Audience segments fetch failed (network error).') + + def test_options_ignore_cache(self): + odp_config = OdpConfig(self.api_key, self.api_host, ["a", "b", "c"]) + mock_logger = mock.MagicMock() + segments_cache = LRUCache(1000, 1000) + api = ZaiusGraphQLApiManager() + + segment_manager = OdpSegmentManager(odp_config, segments_cache, api, mock_logger) + cache_key = segment_manager.make_cache_key(self.user_key, self.user_value) + segment_manager.segments_cache.save(cache_key, ['d']) + + with mock.patch('requests.post') as mock_request_post: + mock_request_post.return_value = self.fake_server_response(status_code=200, + content=self.good_response_data) + + segments = segment_manager.fetch_qualified_segments(self.user_key, self.user_value, + [OptimizelyOdpOption.IGNORE_CACHE]) + + self.assertEqual(segments, ["a", "b"]) + self.assertEqual(segment_manager.segments_cache.lookup(cache_key), ['d']) + mock_logger.debug.assert_called_once_with('Making a call to ODP server.') + mock_logger.error.assert_not_called() + + def test_options_reset_cache(self): + odp_config = OdpConfig(self.api_key, self.api_host, ["a", "b", "c"]) + mock_logger = mock.MagicMock() + segments_cache = LRUCache(1000, 1000) + api = ZaiusGraphQLApiManager() + + segment_manager = OdpSegmentManager(odp_config, segments_cache, api, mock_logger) + cache_key = segment_manager.make_cache_key(self.user_key, self.user_value) + segment_manager.segments_cache.save(cache_key, ['d']) + segment_manager.segments_cache.save('123', ['c', 'd']) + + with mock.patch('requests.post') as mock_request_post: + mock_request_post.return_value = self.fake_server_response(status_code=200, + content=self.good_response_data) + + segments = segment_manager.fetch_qualified_segments(self.user_key, self.user_value, + [OptimizelyOdpOption.RESET_CACHE]) + + self.assertEqual(segments, ["a", "b"]) + self.assertEqual(segment_manager.segments_cache.lookup(cache_key), ['a', 'b']) + self.assertTrue(len(segment_manager.segments_cache.map) == 1) + mock_logger.debug.assert_called_once_with('Making a call to ODP server.') + mock_logger.error.assert_not_called() + + def test_make_correct_cache_key(self): + segment_manager = OdpSegmentManager(None, None, None, None) + cache_key = segment_manager.make_cache_key(self.user_key, self.user_value) + self.assertEqual(cache_key, 'fs_user_id-$-test-user-value') + + # test json response + good_response_data = """ + { + "data": { + "customer": { + "audiences": { + "edges": [ + { + "node": { + "name": "a", + "state": "qualified", + "description": "qualifed sample 1" + } + }, + { + "node": { + "name": "b", + "state": "qualified", + "description": "qualifed sample 2" + } + }, + { + "node": { + "name": "c", + "state": "not_qualified", + "description": "not-qualified sample" + } + } + ] + } + } + } + } + """ From 967471bf6ecfdfcf967483ad80ba16ac31c63543 Mon Sep 17 00:00:00 2001 From: Matjaz Pirnovar Date: Fri, 26 Aug 2022 15:34:45 -0700 Subject: [PATCH 22/68] update py version to 3.10 for gitactions linting (#404) * pdate py version to 3.10 for gitactions linting * add third digit to py version * make py version a string --- .github/workflows/python.yml | 4 ++-- tests/base.py | 4 ---- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/.github/workflows/python.yml b/.github/workflows/python.yml index 7e17c5ff..2df01f72 100644 --- a/.github/workflows/python.yml +++ b/.github/workflows/python.yml @@ -29,10 +29,10 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 - - name: Set up Python 3.9 + - name: Set up Python 3.10 uses: actions/setup-python@v3 with: - python-version: 3.9 + python-version: '3.10' # flake8 version should be same as the version in requirements/test.txt # to avoid lint errors on CI - name: pip install flak8 diff --git a/tests/base.py b/tests/base.py index 65ae1fe1..d4aeae8e 100644 --- a/tests/base.py +++ b/tests/base.py @@ -20,10 +20,6 @@ from optimizely import optimizely -def long(a): - raise NotImplementedError('Tests should only call `long` if running in PY2') - - class BaseTest(unittest.TestCase): def assertStrictTrue(self, to_assert): self.assertIs(to_assert, True) From de849d29394e08e480cd5135c2abdc6f7d0bf5bb Mon Sep 17 00:00:00 2001 From: Andy Leap <104936100+andrewleap-optimizely@users.noreply.github.com> Date: Fri, 2 Sep 2022 11:54:15 -0400 Subject: [PATCH 23/68] feat: add odp event manager (#403) * add odp event manager --- optimizely/helpers/enums.py | 17 +- optimizely/helpers/validator.py | 6 + optimizely/odp/odp_config.py | 23 +- optimizely/odp/odp_event.py | 40 +- optimizely/odp/odp_event_manager.py | 238 +++++++++++ optimizely/odp/zaius_rest_api_manager.py | 4 +- tests/base.py | 13 + tests/test_odp_event_manager.py | 515 +++++++++++++++++++++++ 8 files changed, 843 insertions(+), 13 deletions(-) create mode 100644 optimizely/odp/odp_event_manager.py create mode 100644 tests/test_odp_event_manager.py diff --git a/optimizely/helpers/enums.py b/optimizely/helpers/enums.py index ab63d1e3..02bc9136 100644 --- a/optimizely/helpers/enums.py +++ b/optimizely/helpers/enums.py @@ -120,10 +120,11 @@ class Errors: NONE_VARIABLE_KEY_PARAMETER: Final = '"None" is an invalid value for variable key.' UNSUPPORTED_DATAFILE_VERSION: Final = ( 'This version of the Python SDK does not support the given datafile version: "{}".') - INVALID_SEGMENT_IDENTIFIER = 'Audience segments fetch failed (invalid identifier).' - FETCH_SEGMENTS_FAILED = 'Audience segments fetch failed ({}).' - ODP_EVENT_FAILED = 'ODP event send failed ({}).' - ODP_NOT_ENABLED = 'ODP is not enabled. ' + INVALID_SEGMENT_IDENTIFIER: Final = 'Audience segments fetch failed (invalid identifier).' + FETCH_SEGMENTS_FAILED: Final = 'Audience segments fetch failed ({}).' + ODP_EVENT_FAILED: Final = 'ODP event send failed ({}).' + ODP_NOT_ENABLED: Final = 'ODP is not enabled.' + ODP_NOT_INTEGRATED: Final = 'ODP is not integrated.' class ForcedDecisionLogs: @@ -205,3 +206,11 @@ class OdpRestApiConfig: class OdpGraphQLApiConfig: """ODP GraphQL API configs.""" REQUEST_TIMEOUT: Final = 10 + + +class OdpEventManagerConfig: + """ODP Event Manager configs.""" + DEFAULT_QUEUE_CAPACITY: Final = 1000 + DEFAULT_BATCH_SIZE: Final = 10 + DEFAULT_FLUSH_INTERVAL: Final = 1 + DEFAULT_RETRY_COUNT: Final = 3 diff --git a/optimizely/helpers/validator.py b/optimizely/helpers/validator.py index 244337b0..7ffe0422 100644 --- a/optimizely/helpers/validator.py +++ b/optimizely/helpers/validator.py @@ -31,6 +31,7 @@ from optimizely.event.event_processor import BaseEventProcessor from optimizely.helpers.event_tag_utils import EventTags from optimizely.optimizely_user_context import UserAttributes + from optimizely.odp.odp_event import OdpDataDict def is_datafile_valid(datafile: Optional[str | bytes]) -> bool: @@ -306,3 +307,8 @@ def are_values_same_type(first_val: Any, second_val: Any) -> bool: return True return False + + +def are_odp_data_types_valid(data: OdpDataDict) -> bool: + valid_types = (str, int, float, bool, type(None)) + return all(isinstance(v, valid_types) for v in data.values()) diff --git a/optimizely/odp/odp_config.py b/optimizely/odp/odp_config.py index 64809626..17e435dc 100644 --- a/optimizely/odp/odp_config.py +++ b/optimizely/odp/odp_config.py @@ -12,11 +12,19 @@ # limitations under the License. from __future__ import annotations +from enum import Enum from typing import Optional from threading import Lock +class OdpConfigState(Enum): + """State of the ODP integration.""" + UNDETERMINED = 1 + INTEGRATED = 2 + NOT_INTEGRATED = 3 + + class OdpConfig: """ Contains configuration used for ODP integration. @@ -37,6 +45,9 @@ def __init__( self._api_host = api_host self._segments_to_check = segments_to_check or [] self.lock = Lock() + self._odp_state = OdpConfigState.UNDETERMINED + if self._api_host and self._api_key: + self._odp_state = OdpConfigState.INTEGRATED def update(self, api_key: Optional[str], api_host: Optional[str], segments_to_check: list[str]) -> bool: """ @@ -51,8 +62,14 @@ def update(self, api_key: Optional[str], api_host: Optional[str], segments_to_ch Returns: True if the provided values were different than the existing values. """ + updated = False with self.lock: + if api_key and api_host: + self._odp_state = OdpConfigState.INTEGRATED + else: + self._odp_state = OdpConfigState.NOT_INTEGRATED + if self._api_key != api_key or self._api_host != api_host or self._segments_to_check != segments_to_check: self._api_key = api_key self._api_host = api_host @@ -73,7 +90,7 @@ def get_segments_to_check(self) -> list[str]: with self.lock: return self._segments_to_check.copy() - def odp_integrated(self) -> bool: - """Returns True if ODP is integrated.""" + def odp_state(self) -> OdpConfigState: + """Returns the state of ODP integration (UNDETERMINED, INTEGRATED, or NOT_INTEGRATED).""" with self.lock: - return self._api_key is not None and self._api_host is not None + return self._odp_state diff --git a/optimizely/odp/odp_event.py b/optimizely/odp/odp_event.py index ac3e5d93..fafaa94f 100644 --- a/optimizely/odp/odp_event.py +++ b/optimizely/odp/odp_event.py @@ -13,15 +13,47 @@ from __future__ import annotations -from typing import Any +from typing import Any, Union, Dict +import uuid +import json +from optimizely import version + +OdpDataDict = Dict[str, Union[str, int, float, bool, None]] class OdpEvent: """ Representation of an odp event which can be sent to the Optimizely odp platform. """ - def __init__(self, type: str, action: str, - identifiers: dict[str, str], data: dict[str, Any]) -> None: + def __init__(self, type: str, action: str, identifiers: dict[str, str], data: OdpDataDict) -> None: self.type = type self.action = action self.identifiers = identifiers - self.data = data + self.data = self._add_common_event_data(data) + + def __repr__(self) -> str: + return str(self.__dict__) + + def __eq__(self, other: object) -> bool: + if isinstance(other, OdpEvent): + return self.__dict__ == other.__dict__ + elif isinstance(other, dict): + return self.__dict__ == other + else: + return False + + def _add_common_event_data(self, custom_data: OdpDataDict) -> OdpDataDict: + data: OdpDataDict = { + 'idempotence_id': str(uuid.uuid4()), + 'data_source_type': 'sdk', + 'data_source': 'python-sdk', + 'data_source_version': version.__version__ + } + data.update(custom_data) + return data + + +class OdpEventEncoder(json.JSONEncoder): + def default(self, obj: object) -> Any: + if isinstance(obj, OdpEvent): + return obj.__dict__ + return json.JSONEncoder.default(self, obj) diff --git a/optimizely/odp/odp_event_manager.py b/optimizely/odp/odp_event_manager.py new file mode 100644 index 00000000..df02e3ed --- /dev/null +++ b/optimizely/odp/odp_event_manager.py @@ -0,0 +1,238 @@ +# Copyright 2022, Optimizely +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations +from enum import Enum +from threading import Thread +from typing import Optional +import time +from queue import Empty, Queue, Full + +from optimizely import logger as _logging +from .odp_event import OdpEvent, OdpDataDict +from .odp_config import OdpConfig, OdpConfigState +from .zaius_rest_api_manager import ZaiusRestApiManager +from optimizely.helpers.enums import OdpEventManagerConfig, Errors + + +class Signal(Enum): + """Enum for sending signals to the event queue.""" + SHUTDOWN = 1 + FLUSH = 2 + + +class OdpEventManager: + """ + Class that sends batches of ODP events. + + The OdpEventManager maintains a single consumer thread that pulls events off of + the queue and buffers them before events are sent to ODP. + Sends events when the batch size is met or when the flush timeout has elapsed. + """ + + def __init__( + self, + odp_config: OdpConfig, + logger: Optional[_logging.Logger] = None, + api_manager: Optional[ZaiusRestApiManager] = None + ): + """OdpEventManager init method to configure event batching. + + Args: + odp_config: ODP integration config. + logger: Optional component which provides a log method to log messages. By default nothing would be logged. + api_manager: Optional component which sends events to ODP. + """ + self.logger = logger or _logging.NoOpLogger() + self.zaius_manager = api_manager or ZaiusRestApiManager(self.logger) + self.odp_config = odp_config + self.event_queue: Queue[OdpEvent | Signal] = Queue(OdpEventManagerConfig.DEFAULT_QUEUE_CAPACITY) + self.batch_size = OdpEventManagerConfig.DEFAULT_BATCH_SIZE + self.flush_interval = OdpEventManagerConfig.DEFAULT_FLUSH_INTERVAL + self._flush_deadline: float = 0 + self.retry_count = OdpEventManagerConfig.DEFAULT_RETRY_COUNT + self._current_batch: list[OdpEvent] = [] + """_current_batch should only be modified by the processing thread, as it is not thread safe""" + self.thread = Thread(target=self._run, daemon=True) + self.thread_exception = False + """thread_exception will be True if the processing thread did not exit cleanly""" + + @property + def is_running(self) -> bool: + """Property to check if consumer thread is alive or not.""" + return self.thread.is_alive() + + def start(self) -> None: + """Starts the batch processing thread to batch events.""" + if self.is_running: + self.logger.warning('ODP event queue already started.') + return + + self.thread.start() + + def _run(self) -> None: + """Processes the event queue from a child thread. Events are batched until + the batch size is met or until the flush timeout has elapsed. + """ + try: + while True: + timeout = self._get_queue_timeout() + + try: + item = self.event_queue.get(True, timeout) + except Empty: + item = None + + if item == Signal.SHUTDOWN: + self.logger.debug('ODP event queue: received shutdown signal.') + break + + elif item == Signal.FLUSH: + self.logger.debug('ODP event queue: received flush signal.') + self._flush_batch() + self.event_queue.task_done() + continue + + elif isinstance(item, OdpEvent): + self._add_to_batch(item) + self.event_queue.task_done() + + elif len(self._current_batch) > 0: + self.logger.debug('ODP event queue: flushing on interval.') + self._flush_batch() + + except Exception as exception: + self.thread_exception = True + self.logger.error(f'Uncaught exception processing ODP events. Error: {exception}') + + finally: + self.logger.info('Exiting ODP event processing loop. Attempting to flush pending events.') + self._flush_batch() + if item == Signal.SHUTDOWN: + self.event_queue.task_done() + + def flush(self) -> None: + """Adds flush signal to event_queue.""" + try: + self.event_queue.put_nowait(Signal.FLUSH) + except Full: + self.logger.error("Error flushing ODP event queue") + + def _flush_batch(self) -> None: + """Flushes current batch by dispatching event. + Should only be called by the processing thread.""" + batch_len = len(self._current_batch) + if batch_len == 0: + self.logger.debug('ODP event queue: nothing to flush.') + return + + api_key = self.odp_config.get_api_key() + api_host = self.odp_config.get_api_host() + + if not api_key or not api_host: + self.logger.debug(Errors.ODP_NOT_INTEGRATED) + self._current_batch.clear() + return + + self.logger.debug(f'ODP event queue: flushing batch size {batch_len}.') + should_retry = False + + for i in range(1 + self.retry_count): + try: + should_retry = self.zaius_manager.send_odp_events(api_key, api_host, self._current_batch) + except Exception as error: + should_retry = False + self.logger.error(Errors.ODP_EVENT_FAILED.format(f'Error: {error} {self._current_batch}')) + + if not should_retry: + break + if i < self.retry_count: + self.logger.debug('Error dispatching ODP events, scheduled to retry.') + + if should_retry: + self.logger.error(Errors.ODP_EVENT_FAILED.format(f'Failed after {i} retries: {self._current_batch}')) + + self._current_batch.clear() + + def _add_to_batch(self, odp_event: OdpEvent) -> None: + """Appends received ODP event to current batch, flushing if batch is greater than batch size. + Should only be called by the processing thread.""" + if not self._current_batch: + self._set_flush_deadline() + + self._current_batch.append(odp_event) + if len(self._current_batch) >= self.batch_size: + self.logger.debug('ODP event queue: flushing on batch size.') + self._flush_batch() + + def _set_flush_deadline(self) -> None: + """Sets time that next flush will occur.""" + self._flush_deadline = time.time() + self.flush_interval + + def _get_time_till_flush(self) -> float: + """Returns seconds until next flush; no less than 0.""" + return max(0, self._flush_deadline - time.time()) + + def _get_queue_timeout(self) -> Optional[float]: + """Returns seconds until next flush or None if current batch is empty.""" + if len(self._current_batch) == 0: + return None + return self._get_time_till_flush() + + def stop(self) -> None: + """Flushes and then stops ODP event queue.""" + try: + self.event_queue.put_nowait(Signal.SHUTDOWN) + except Full: + self.logger.error('Error stopping ODP event queue.') + return + + self.logger.warning('Stopping ODP event queue.') + + if self.is_running: + self.thread.join() + + if len(self._current_batch) > 0: + self.logger.error(Errors.ODP_EVENT_FAILED.format(self._current_batch)) + + if self.is_running: + self.logger.error('Error stopping ODP event queue.') + + def send_event(self, type: str, action: str, identifiers: dict[str, str], data: OdpDataDict) -> None: + """Create OdpEvent and add it to the event queue.""" + odp_state = self.odp_config.odp_state() + if odp_state == OdpConfigState.UNDETERMINED: + self.logger.debug('ODP event queue: cannot send before the datafile has loaded.') + return + + if odp_state == OdpConfigState.NOT_INTEGRATED: + self.logger.debug(Errors.ODP_NOT_INTEGRATED) + return + + self.dispatch(OdpEvent(type, action, identifiers, data)) + + def dispatch(self, event: OdpEvent) -> None: + """Add OdpEvent to the event queue.""" + if self.thread_exception: + self.logger.error(Errors.ODP_EVENT_FAILED.format('Queue is down')) + return + + if not self.is_running: + self.logger.warning('ODP event queue is shutdown, not accepting events.') + return + + try: + self.logger.debug('ODP event queue: adding event.') + self.event_queue.put_nowait(event) + except Full: + self.logger.warning(Errors.ODP_EVENT_FAILED.format("Queue is full")) diff --git a/optimizely/odp/zaius_rest_api_manager.py b/optimizely/odp/zaius_rest_api_manager.py index 9cbe2638..62f7c1c7 100644 --- a/optimizely/odp/zaius_rest_api_manager.py +++ b/optimizely/odp/zaius_rest_api_manager.py @@ -21,7 +21,7 @@ from optimizely import logger as optimizely_logger from optimizely.helpers.enums import Errors, OdpRestApiConfig -from optimizely.odp.odp_event import OdpEvent +from optimizely.odp.odp_event import OdpEvent, OdpEventEncoder """ ODP REST Events API @@ -60,7 +60,7 @@ def send_odp_events(self, api_key: str, api_host: str, events: list[OdpEvent]) - request_headers = {'content-type': 'application/json', 'x-api-key': api_key} try: - payload_dict = json.dumps(events) + payload_dict = json.dumps(events, cls=OdpEventEncoder) except TypeError as err: self.logger.error(Errors.ODP_EVENT_FAILED.format(err)) return should_retry diff --git a/tests/base.py b/tests/base.py index d4aeae8e..6e74e3aa 100644 --- a/tests/base.py +++ b/tests/base.py @@ -14,12 +14,25 @@ import json import unittest from typing import Optional +from copy import deepcopy +from unittest import mock from requests import Response from optimizely import optimizely +class CopyingMock(mock.MagicMock): + """ + Forces mock to make a copy of the args instead of keeping a reference. + Otherwise mutable args (lists, dicts) can change after they're captured. + """ + def __call__(self, *args, **kwargs): + args = deepcopy(args) + kwargs = deepcopy(kwargs) + return super().__call__(*args, **kwargs) + + class BaseTest(unittest.TestCase): def assertStrictTrue(self, to_assert): self.assertIs(to_assert, True) diff --git a/tests/test_odp_event_manager.py b/tests/test_odp_event_manager.py new file mode 100644 index 00000000..ffbab40d --- /dev/null +++ b/tests/test_odp_event_manager.py @@ -0,0 +1,515 @@ +# Copyright 2022, Optimizely +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http:#www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import time +from unittest import mock +from copy import deepcopy +import uuid + +from optimizely.odp.odp_event import OdpEvent +from optimizely.odp.odp_event_manager import OdpEventManager +from optimizely.odp.odp_config import OdpConfig +from .base import BaseTest, CopyingMock +from optimizely.version import __version__ +from optimizely.helpers import validator +from optimizely.helpers.enums import Errors + + +class MockOdpEventManager(OdpEventManager): + def _add_to_batch(self, *args): + raise Exception("Unexpected error") + + +TEST_UUID = str(uuid.uuid4()) + + +@mock.patch('uuid.uuid4', return_value=TEST_UUID, new=mock.DEFAULT) +class OdpEventManagerTest(BaseTest): + user_key = "vuid" + user_value = "test-user-value" + api_key = "test-api-key" + api_host = "https://test-host.com" + odp_config = OdpConfig(api_key, api_host) + + events = [ + { + "type": "t1", + "action": "a1", + "identifiers": {"id-key-1": "id-value-1"}, + "data": {"key-1": "value1", "key-2": 2, "key-3": 3.0, "key-4": None, 'key-5': True} + }, + { + "type": "t2", + "action": "a2", + "identifiers": {"id-key-2": "id-value-2"}, + "data": {"key-2": "value2"} + } + ] + + processed_events = [ + { + "type": "t1", + "action": "a1", + "identifiers": {"id-key-1": "id-value-1"}, + "data": { + "idempotence_id": TEST_UUID, + "data_source_type": "sdk", + "data_source": "python-sdk", + "data_source_version": __version__, + "key-1": "value1", + "key-2": 2, + "key-3": 3.0, + "key-4": None, + "key-5": True + } + }, + { + "type": "t2", + "action": "a2", + "identifiers": {"id-key-2": "id-value-2"}, + "data": { + "idempotence_id": TEST_UUID, + "data_source_type": "sdk", + "data_source": "python-sdk", + "data_source_version": __version__, + "key-2": "value2" + } + } + ] + + def test_odp_event_init(self, *args): + event = self.events[0] + self.assertStrictTrue(validator.are_odp_data_types_valid(event['data'])) + odp_event = OdpEvent(**event) + self.assertEqual(odp_event, self.processed_events[0]) + + def test_invalid_odp_event(self, *args): + event = deepcopy(self.events[0]) + event['data']['invalid-item'] = {} + self.assertStrictFalse(validator.are_odp_data_types_valid(event['data'])) + + def test_odp_event_manager_success(self, *args): + mock_logger = mock.Mock() + event_manager = OdpEventManager(self.odp_config, mock_logger) + event_manager.start() + + with mock.patch('requests.post', return_value=self.fake_server_response(status_code=200)): + event_manager.send_event(**self.events[0]) + event_manager.send_event(**self.events[1]) + event_manager.stop() + + self.assertEqual(len(event_manager._current_batch), 0) + mock_logger.error.assert_not_called() + mock_logger.debug.assert_any_call('ODP event queue: flushing batch size 2.') + mock_logger.debug.assert_any_call('ODP event queue: received shutdown signal.') + self.assertStrictFalse(event_manager.is_running) + + def test_odp_event_manager_batch(self, *args): + mock_logger = mock.Mock() + event_manager = OdpEventManager(self.odp_config, mock_logger) + event_manager.start() + + event_manager.batch_size = 2 + with mock.patch.object( + event_manager.zaius_manager, 'send_odp_events', new_callable=CopyingMock, return_value=False + ) as mock_send: + event_manager.send_event(**self.events[0]) + event_manager.send_event(**self.events[1]) + event_manager.event_queue.join() + + mock_send.assert_called_once_with(self.api_key, self.api_host, self.processed_events) + self.assertEqual(len(event_manager._current_batch), 0) + mock_logger.error.assert_not_called() + mock_logger.debug.assert_any_call('ODP event queue: flushing on batch size.') + event_manager.stop() + + def test_odp_event_manager_multiple_batches(self, *args): + mock_logger = mock.Mock() + event_manager = OdpEventManager(self.odp_config, mock_logger) + event_manager.start() + + event_manager.batch_size = 2 + batch_count = 4 + + with mock.patch.object( + event_manager.zaius_manager, 'send_odp_events', new_callable=CopyingMock, return_value=False + ) as mock_send: + for _ in range(batch_count): + event_manager.send_event(**self.events[0]) + event_manager.send_event(**self.events[1]) + event_manager.event_queue.join() + + self.assertEqual(mock_send.call_count, batch_count) + mock_send.assert_has_calls( + [mock.call(self.api_key, self.api_host, self.processed_events)] * batch_count + ) + + self.assertEqual(len(event_manager._current_batch), 0) + mock_logger.error.assert_not_called() + mock_logger.debug.assert_has_calls([ + mock.call('ODP event queue: flushing on batch size.'), + mock.call('ODP event queue: flushing batch size 2.') + ] * batch_count, any_order=True) + event_manager.stop() + + def test_odp_event_manager_backlog(self, *args): + mock_logger = mock.Mock() + event_manager = OdpEventManager(self.odp_config, mock_logger) + + event_manager.batch_size = 2 + batch_count = 4 + + # create events before starting processing to simulate backlog + with mock.patch('optimizely.odp.odp_event_manager.OdpEventManager.is_running', True): + for _ in range(batch_count - 1): + event_manager.send_event(**self.events[0]) + event_manager.send_event(**self.events[1]) + + with mock.patch.object( + event_manager.zaius_manager, 'send_odp_events', new_callable=CopyingMock, return_value=False + ) as mock_send: + event_manager.start() + event_manager.send_event(**self.events[0]) + event_manager.send_event(**self.events[1]) + event_manager.stop() + event_manager.event_queue.join() + + self.assertEqual(mock_send.call_count, batch_count) + mock_send.assert_has_calls( + [mock.call(self.api_key, self.api_host, self.processed_events)] * batch_count + ) + + self.assertEqual(len(event_manager._current_batch), 0) + mock_logger.error.assert_not_called() + mock_logger.debug.assert_has_calls([ + mock.call('ODP event queue: flushing on batch size.'), + mock.call('ODP event queue: flushing batch size 2.') + ] * batch_count, any_order=True) + + def test_odp_event_manager_flush(self, *args): + mock_logger = mock.Mock() + event_manager = OdpEventManager(self.odp_config, mock_logger) + event_manager.start() + + with mock.patch.object( + event_manager.zaius_manager, 'send_odp_events', new_callable=CopyingMock, return_value=False + ) as mock_send: + event_manager.send_event(**self.events[0]) + event_manager.send_event(**self.events[1]) + event_manager.flush() + event_manager.event_queue.join() + + mock_send.assert_called_once_with(self.api_key, self.api_host, self.processed_events) + mock_logger.error.assert_not_called() + self.assertEqual(len(event_manager._current_batch), 0) + mock_logger.debug.assert_any_call('ODP event queue: received flush signal.') + event_manager.stop() + + def test_odp_event_manager_multiple_flushes(self, *args): + mock_logger = mock.Mock() + event_manager = OdpEventManager(self.odp_config, mock_logger) + event_manager.start() + flush_count = 4 + + with mock.patch.object( + event_manager.zaius_manager, 'send_odp_events', new_callable=CopyingMock, return_value=False + ) as mock_send: + for _ in range(flush_count): + event_manager.send_event(**self.events[0]) + event_manager.send_event(**self.events[1]) + event_manager.flush() + event_manager.event_queue.join() + + self.assertEqual(mock_send.call_count, flush_count) + for call in mock_send.call_args_list: + self.assertEqual(call, mock.call(self.api_key, self.api_host, self.processed_events)) + mock_logger.error.assert_not_called() + + self.assertEqual(len(event_manager._current_batch), 0) + mock_logger.debug.assert_has_calls([ + mock.call('ODP event queue: received flush signal.'), + mock.call('ODP event queue: flushing batch size 2.') + ] * flush_count, any_order=True) + event_manager.stop() + + def test_odp_event_manager_retry_failure(self, *args): + mock_logger = mock.Mock() + event_manager = OdpEventManager(self.odp_config, mock_logger) + event_manager.start() + + number_of_tries = event_manager.retry_count + 1 + + with mock.patch.object( + event_manager.zaius_manager, 'send_odp_events', new_callable=CopyingMock, return_value=True + ) as mock_send: + event_manager.send_event(**self.events[0]) + event_manager.send_event(**self.events[1]) + event_manager.flush() + event_manager.event_queue.join() + + mock_send.assert_has_calls( + [mock.call(self.api_key, self.api_host, self.processed_events)] * number_of_tries + ) + self.assertEqual(len(event_manager._current_batch), 0) + mock_logger.debug.assert_any_call('Error dispatching ODP events, scheduled to retry.') + mock_logger.error.assert_called_once_with( + f'ODP event send failed (Failed after 3 retries: {self.processed_events}).' + ) + event_manager.stop() + + def test_odp_event_manager_retry_success(self, *args): + mock_logger = mock.Mock() + event_manager = OdpEventManager(self.odp_config, mock_logger) + event_manager.start() + + with mock.patch.object( + event_manager.zaius_manager, 'send_odp_events', new_callable=CopyingMock, side_effect=[True, True, False] + ) as mock_send: + event_manager.send_event(**self.events[0]) + event_manager.send_event(**self.events[1]) + event_manager.flush() + event_manager.event_queue.join() + + mock_send.assert_has_calls([mock.call(self.api_key, self.api_host, self.processed_events)] * 3) + self.assertEqual(len(event_manager._current_batch), 0) + mock_logger.debug.assert_any_call('Error dispatching ODP events, scheduled to retry.') + mock_logger.error.assert_not_called() + self.assertStrictTrue(event_manager.is_running) + event_manager.stop() + + def test_odp_event_manager_send_failure(self, *args): + mock_logger = mock.Mock() + event_manager = OdpEventManager(self.odp_config, mock_logger) + event_manager.start() + + with mock.patch.object( + event_manager.zaius_manager, + 'send_odp_events', + new_callable=CopyingMock, + side_effect=Exception('Unexpected error') + ) as mock_send: + event_manager.send_event(**self.events[0]) + event_manager.send_event(**self.events[1]) + event_manager.flush() + event_manager.event_queue.join() + + mock_send.assert_called_once_with(self.api_key, self.api_host, self.processed_events) + self.assertEqual(len(event_manager._current_batch), 0) + mock_logger.error.assert_any_call(f"ODP event send failed (Error: Unexpected error {self.processed_events}).") + self.assertStrictTrue(event_manager.is_running) + event_manager.stop() + + def test_odp_event_manager_disabled(self, *args): + mock_logger = mock.Mock() + odp_config = OdpConfig() + odp_config.update(None, None, None) + event_manager = OdpEventManager(odp_config, mock_logger) + event_manager.start() + + event_manager.send_event(**self.events[0]) + event_manager.send_event(**self.events[1]) + event_manager.event_queue.join() + + self.assertEqual(len(event_manager._current_batch), 0) + mock_logger.error.assert_not_called() + mock_logger.debug.assert_any_call(Errors.ODP_NOT_INTEGRATED) + self.assertStrictTrue(event_manager.is_running) + event_manager.stop() + + def test_odp_event_manager_queue_full(self, *args): + mock_logger = mock.Mock() + + with mock.patch('optimizely.helpers.enums.OdpEventManagerConfig.DEFAULT_QUEUE_CAPACITY', 1): + event_manager = OdpEventManager(self.odp_config, mock_logger) + + with mock.patch('optimizely.odp.odp_event_manager.OdpEventManager.is_running', True): + event_manager.send_event(**self.events[0]) + event_manager.send_event(**self.events[1]) + event_manager.flush() + + # warning when adding event to full queue + mock_logger.warning.assert_called_once_with('ODP event send failed (Queue is full).') + # error when trying to flush with full queue + mock_logger.error.assert_called_once_with('Error flushing ODP event queue') + + def test_odp_event_manager_thread_exception(self, *args): + mock_logger = mock.Mock() + event_manager = MockOdpEventManager(self.odp_config, mock_logger) + event_manager.start() + + event_manager.send_event(**self.events[0]) + time.sleep(.1) + event_manager.send_event(**self.events[0]) + + event_manager.thread.join() + mock_logger.error.assert_has_calls([ + mock.call('Uncaught exception processing ODP events. Error: Unexpected error'), + mock.call('ODP event send failed (Queue is down).') + ]) + event_manager.stop() + + def test_odp_event_manager_override_default_data(self, *args): + mock_logger = mock.Mock() + event_manager = OdpEventManager(self.odp_config, mock_logger) + event_manager.start() + + event = deepcopy(self.events[0]) + event['data']['data_source'] = 'my-app' + + processed_event = deepcopy(self.processed_events[0]) + processed_event['data']['data_source'] = 'my-app' + + with mock.patch.object( + event_manager.zaius_manager, 'send_odp_events', new_callable=CopyingMock, return_value=False + ) as mock_send: + event_manager.send_event(**event) + event_manager.flush() + event_manager.event_queue.join() + + mock_send.assert_called_once_with(self.api_key, self.api_host, [processed_event]) + event_manager.stop() + + def test_odp_event_manager_flush_timeout(self, *args): + mock_logger = mock.Mock() + event_manager = OdpEventManager(self.odp_config, mock_logger) + event_manager.flush_interval = .5 + event_manager.start() + + with mock.patch.object( + event_manager.zaius_manager, 'send_odp_events', new_callable=CopyingMock, return_value=False + ) as mock_send: + event_manager.send_event(**self.events[0]) + event_manager.send_event(**self.events[1]) + event_manager.event_queue.join() + time.sleep(1) + + mock_logger.error.assert_not_called() + mock_logger.debug.assert_any_call('ODP event queue: flushing on interval.') + mock_send.assert_called_once_with(self.api_key, self.api_host, self.processed_events) + event_manager.stop() + + def test_odp_event_manager_events_before_odp_ready(self, *args): + mock_logger = mock.Mock() + odp_config = OdpConfig() + event_manager = OdpEventManager(odp_config, mock_logger) + event_manager.start() + + with mock.patch.object( + event_manager.zaius_manager, 'send_odp_events', new_callable=CopyingMock, return_value=False + ) as mock_send: + event_manager.send_event(**self.events[0]) + event_manager.send_event(**self.events[1]) + + odp_config.update(self.api_key, self.api_host, []) + event_manager.event_queue.join() + + event_manager.send_event(**self.events[0]) + event_manager.send_event(**self.events[1]) + event_manager.flush() + + event_manager.event_queue.join() + + mock_logger.error.assert_not_called() + mock_logger.debug.assert_has_calls([ + mock.call('ODP event queue: cannot send before the datafile has loaded.'), + mock.call('ODP event queue: cannot send before the datafile has loaded.'), + mock.call('ODP event queue: adding event.'), + mock.call('ODP event queue: adding event.'), + mock.call('ODP event queue: received flush signal.'), + mock.call('ODP event queue: flushing batch size 2.') + ]) + mock_send.assert_called_once_with(self.api_key, self.api_host, self.processed_events) + event_manager.stop() + + def test_odp_event_manager_events_before_odp_disabled(self, *args): + mock_logger = mock.Mock() + odp_config = OdpConfig() + event_manager = OdpEventManager(odp_config, mock_logger) + event_manager.start() + + with mock.patch.object(event_manager.zaius_manager, 'send_odp_events') as mock_send: + event_manager.send_event(**self.events[0]) + event_manager.send_event(**self.events[1]) + + odp_config.update(None, None, []) + event_manager.event_queue.join() + + event_manager.send_event(**self.events[0]) + event_manager.send_event(**self.events[1]) + + event_manager.event_queue.join() + + mock_logger.error.assert_not_called() + mock_logger.debug.assert_has_calls([ + mock.call('ODP event queue: cannot send before the datafile has loaded.'), + mock.call('ODP event queue: cannot send before the datafile has loaded.'), + mock.call(Errors.ODP_NOT_INTEGRATED), + mock.call(Errors.ODP_NOT_INTEGRATED) + ]) + self.assertEqual(len(event_manager._current_batch), 0) + mock_send.assert_not_called() + event_manager.stop() + + def test_odp_event_manager_disabled_after_init(self, *args): + mock_logger = mock.Mock() + odp_config = OdpConfig(self.api_key, self.api_host) + event_manager = OdpEventManager(odp_config, mock_logger) + event_manager.start() + event_manager.batch_size = 2 + + with mock.patch.object( + event_manager.zaius_manager, 'send_odp_events', new_callable=CopyingMock, return_value=False + ) as mock_send: + event_manager.send_event(**self.events[0]) + event_manager.send_event(**self.events[1]) + event_manager.event_queue.join() + + odp_config.update(None, None, []) + + event_manager.send_event(**self.events[0]) + event_manager.send_event(**self.events[1]) + + event_manager.event_queue.join() + + mock_logger.error.assert_not_called() + mock_logger.debug.assert_has_calls([ + mock.call('ODP event queue: flushing batch size 2.'), + mock.call(Errors.ODP_NOT_INTEGRATED), + mock.call(Errors.ODP_NOT_INTEGRATED) + ]) + self.assertEqual(len(event_manager._current_batch), 0) + mock_send.assert_called_once_with(self.api_key, self.api_host, self.processed_events) + event_manager.stop() + + def test_odp_event_manager_disabled_after_events_in_queue(self, *args): + mock_logger = mock.Mock() + odp_config = OdpConfig(self.api_key, self.api_host) + + event_manager = OdpEventManager(odp_config, mock_logger) + event_manager.batch_size = 2 + + with mock.patch('optimizely.odp.odp_event_manager.OdpEventManager.is_running', True): + event_manager.send_event(**self.events[0]) + event_manager.send_event(**self.events[1]) + + with mock.patch.object(event_manager.zaius_manager, 'send_odp_events') as mock_send: + odp_config.update(None, None, []) + event_manager.start() + event_manager.send_event(**self.events[1]) + event_manager.event_queue.join() + + self.assertEqual(len(event_manager._current_batch), 0) + mock_logger.debug.assert_any_call(Errors.ODP_NOT_INTEGRATED) + mock_logger.error.assert_not_called() + mock_send.assert_not_called() + event_manager.stop() From d1b521bd908663e25480c61ae54cb767bfb53437 Mon Sep 17 00:00:00 2001 From: Matjaz Pirnovar Date: Wed, 21 Sep 2022 08:53:16 -0700 Subject: [PATCH 24/68] feat: add odp manager (#405) * bump up py version in gitactions to 3.10 * feat: add odp_manager * add update config event manager signal Co-authored-by: Andy Leap --- optimizely/exceptions.py | 18 ++ optimizely/helpers/enums.py | 15 +- optimizely/odp/odp_event_manager.py | 46 ++- optimizely/odp/odp_manager.py | 133 +++++++++ optimizely/odp/odp_segment_manager.py | 18 +- tests/test_odp_event_manager.py | 17 +- tests/test_odp_manager.py | 402 ++++++++++++++++++++++++++ 7 files changed, 627 insertions(+), 22 deletions(-) create mode 100644 optimizely/odp/odp_manager.py create mode 100644 tests/test_odp_manager.py diff --git a/optimizely/exceptions.py b/optimizely/exceptions.py index d6003ab1..e7644064 100644 --- a/optimizely/exceptions.py +++ b/optimizely/exceptions.py @@ -64,3 +64,21 @@ class UnsupportedDatafileVersionException(Exception): """ Raised when provided version in datafile is not supported. """ pass + + +class OdpNotEnabled(Exception): + """ Raised when Optimizely Data Platform (ODP) is not enabled. """ + + pass + + +class OdpNotIntegrated(Exception): + """ Raised when Optimizely Data Platform (ODP) is not integrated. """ + + pass + + +class OdpInvalidData(Exception): + """ Raised when passing invalid ODP data. """ + + pass diff --git a/optimizely/helpers/enums.py b/optimizely/helpers/enums.py index 02bc9136..886d269a 100644 --- a/optimizely/helpers/enums.py +++ b/optimizely/helpers/enums.py @@ -123,8 +123,9 @@ class Errors: INVALID_SEGMENT_IDENTIFIER: Final = 'Audience segments fetch failed (invalid identifier).' FETCH_SEGMENTS_FAILED: Final = 'Audience segments fetch failed ({}).' ODP_EVENT_FAILED: Final = 'ODP event send failed ({}).' - ODP_NOT_ENABLED: Final = 'ODP is not enabled.' ODP_NOT_INTEGRATED: Final = 'ODP is not integrated.' + ODP_NOT_ENABLED: Final = 'ODP is not enabled.' + ODP_INVALID_DATA: Final = 'ODP data is not valid.' class ForcedDecisionLogs: @@ -214,3 +215,15 @@ class OdpEventManagerConfig: DEFAULT_BATCH_SIZE: Final = 10 DEFAULT_FLUSH_INTERVAL: Final = 1 DEFAULT_RETRY_COUNT: Final = 3 + + +class OdpManagerConfig: + """ODP Manager configs.""" + KEY_FOR_USER_ID: Final = 'fs_user_id' + EVENT_TYPE: Final = 'fullstack' + + +class OdpSegmentsCacheConfig: + """ODP Segment Cache configs.""" + DEFAULT_CAPACITY: Final = 10_000 + DEFAULT_TIMEOUT_SECS: Final = 600 diff --git a/optimizely/odp/odp_event_manager.py b/optimizely/odp/odp_event_manager.py index df02e3ed..f608213e 100644 --- a/optimizely/odp/odp_event_manager.py +++ b/optimizely/odp/odp_event_manager.py @@ -12,23 +12,25 @@ # limitations under the License. from __future__ import annotations + +import time from enum import Enum +from queue import Empty, Queue, Full from threading import Thread from typing import Optional -import time -from queue import Empty, Queue, Full from optimizely import logger as _logging -from .odp_event import OdpEvent, OdpDataDict +from optimizely.helpers.enums import OdpEventManagerConfig, Errors, OdpManagerConfig from .odp_config import OdpConfig, OdpConfigState +from .odp_event import OdpEvent, OdpDataDict from .zaius_rest_api_manager import ZaiusRestApiManager -from optimizely.helpers.enums import OdpEventManagerConfig, Errors class Signal(Enum): """Enum for sending signals to the event queue.""" SHUTDOWN = 1 FLUSH = 2 + UPDATE_CONFIG = 3 class OdpEventManager: @@ -55,7 +57,11 @@ def __init__( """ self.logger = logger or _logging.NoOpLogger() self.zaius_manager = api_manager or ZaiusRestApiManager(self.logger) + self.odp_config = odp_config + self.api_key = odp_config.get_api_key() + self.api_host = odp_config.get_api_host() + self.event_queue: Queue[OdpEvent | Signal] = Queue(OdpEventManagerConfig.DEFAULT_QUEUE_CAPACITY) self.batch_size = OdpEventManagerConfig.DEFAULT_BATCH_SIZE self.flush_interval = OdpEventManagerConfig.DEFAULT_FLUSH_INTERVAL @@ -101,7 +107,11 @@ def _run(self) -> None: self.logger.debug('ODP event queue: received flush signal.') self._flush_batch() self.event_queue.task_done() - continue + + elif item == Signal.UPDATE_CONFIG: + self.logger.debug('ODP event queue: received update config signal.') + self._update_config() + self.event_queue.task_done() elif isinstance(item, OdpEvent): self._add_to_batch(item) @@ -136,10 +146,7 @@ def _flush_batch(self) -> None: self.logger.debug('ODP event queue: nothing to flush.') return - api_key = self.odp_config.get_api_key() - api_host = self.odp_config.get_api_host() - - if not api_key or not api_host: + if not self.api_key or not self.api_host: self.logger.debug(Errors.ODP_NOT_INTEGRATED) self._current_batch.clear() return @@ -149,7 +156,7 @@ def _flush_batch(self) -> None: for i in range(1 + self.retry_count): try: - should_retry = self.zaius_manager.send_odp_events(api_key, api_host, self._current_batch) + should_retry = self.zaius_manager.send_odp_events(self.api_key, self.api_host, self._current_batch) except Exception as error: should_retry = False self.logger.error(Errors.ODP_EVENT_FAILED.format(f'Error: {error} {self._current_batch}')) @@ -236,3 +243,22 @@ def dispatch(self, event: OdpEvent) -> None: self.event_queue.put_nowait(event) except Full: self.logger.warning(Errors.ODP_EVENT_FAILED.format("Queue is full")) + + def identify_user(self, user_id: str) -> None: + self.send_event(OdpManagerConfig.EVENT_TYPE, 'identified', + {OdpManagerConfig.KEY_FOR_USER_ID: user_id}, {}) + + def update_config(self) -> None: + """Adds update config signal to event_queue.""" + try: + self.event_queue.put_nowait(Signal.UPDATE_CONFIG) + except Full: + self.logger.error("Error updating ODP config for the event queue") + + def _update_config(self) -> None: + """Updates the configuration used to send events.""" + if len(self._current_batch) > 0: + self._flush_batch() + + self.api_host = self.odp_config.get_api_host() + self.api_key = self.odp_config.get_api_key() diff --git a/optimizely/odp/odp_manager.py b/optimizely/odp/odp_manager.py new file mode 100644 index 00000000..72c61514 --- /dev/null +++ b/optimizely/odp/odp_manager.py @@ -0,0 +1,133 @@ +# Copyright 2022, Optimizely +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from typing import Optional, Any + +from optimizely import exceptions as optimizely_exception +from optimizely import logger as optimizely_logger +from optimizely.helpers.enums import Errors, OdpManagerConfig, OdpSegmentsCacheConfig +from optimizely.helpers.validator import are_odp_data_types_valid +from optimizely.odp.lru_cache import OptimizelySegmentsCache, LRUCache +from optimizely.odp.odp_config import OdpConfig, OdpConfigState +from optimizely.odp.odp_event_manager import OdpEventManager +from optimizely.odp.odp_segment_manager import OdpSegmentManager +from optimizely.odp.zaius_graphql_api_manager import ZaiusGraphQLApiManager + + +class OdpManager: + """Orchestrates segment manager, event manager and odp config.""" + + def __init__( + self, + disable: bool, + segments_cache: Optional[OptimizelySegmentsCache] = None, + segment_manager: Optional[OdpSegmentManager] = None, + event_manager: Optional[OdpEventManager] = None, + logger: Optional[optimizely_logger.Logger] = None + ) -> None: + + self.enabled = not disable + self.odp_config = OdpConfig() + self.logger = logger or optimizely_logger.NoOpLogger() + + self.segment_manager = segment_manager + self.event_manager = event_manager + + if not self.enabled: + self.logger.info('ODP is disabled.') + return + + if not self.segment_manager: + if not segments_cache: + segments_cache = LRUCache( + OdpSegmentsCacheConfig.DEFAULT_CAPACITY, + OdpSegmentsCacheConfig.DEFAULT_TIMEOUT_SECS + ) + self.segment_manager = OdpSegmentManager( + self.odp_config, + segments_cache, + ZaiusGraphQLApiManager(logger), logger + ) + else: + self.segment_manager.odp_config = self.odp_config + + if event_manager: + event_manager.odp_config = self.odp_config + self.event_manager = event_manager + else: + self.event_manager = OdpEventManager(self.odp_config, logger) + + self.event_manager.start() + + def fetch_qualified_segments(self, user_id: str, options: list[str]) -> Optional[list[str]]: + if not self.enabled or not self.segment_manager: + self.logger.error(Errors.ODP_NOT_ENABLED) + return None + + user_key = OdpManagerConfig.KEY_FOR_USER_ID + user_value = user_id + + return self.segment_manager.fetch_qualified_segments(user_key, user_value, options) + + def identify_user(self, user_id: str) -> None: + if not self.enabled or not self.event_manager: + self.logger.debug('ODP identify event is not dispatched (ODP disabled).') + return + if self.odp_config.odp_state() == OdpConfigState.NOT_INTEGRATED: + self.logger.debug('ODP identify event is not dispatched (ODP not integrated).') + return + + self.event_manager.identify_user(user_id) + + def send_event(self, type: str, action: str, identifiers: dict[str, str], data: dict[str, Any]) -> None: + """ + Send an event to the ODP server. + + Args: + type: The event type. + action: The event action name. + identifiers: A dictionary for identifiers. + data: A dictionary for associated data. The default event data will be added to this data + before sending to the ODP server. + + Raises custom exception if error is detected. + """ + if not self.enabled or not self.event_manager: + raise optimizely_exception.OdpNotEnabled(Errors.ODP_NOT_ENABLED) + + if self.odp_config.odp_state() == OdpConfigState.NOT_INTEGRATED: + raise optimizely_exception.OdpNotIntegrated(Errors.ODP_NOT_INTEGRATED) + + if not are_odp_data_types_valid(data): + raise optimizely_exception.OdpInvalidData(Errors.ODP_INVALID_DATA) + + self.event_manager.send_event(type, action, identifiers, data) + + def update_odp_config(self, api_key: Optional[str], api_host: Optional[str], + segments_to_check: list[str]) -> None: + if not self.enabled: + return + + config_changed = self.odp_config.update(api_key, api_host, segments_to_check) + if not config_changed: + self.logger.debug('Odp config was not changed.') + return + + # reset segments cache when odp integration or segments to check are changed + if self.segment_manager: + self.segment_manager.reset() + + if self.event_manager: + self.event_manager.update_config() diff --git a/optimizely/odp/odp_segment_manager.py b/optimizely/odp/odp_segment_manager.py index 33c829a1..a5d363fd 100644 --- a/optimizely/odp/odp_segment_manager.py +++ b/optimizely/odp/odp_segment_manager.py @@ -26,17 +26,21 @@ class OdpSegmentManager: """Schedules connections to ODP for audience segmentation and caches the results.""" - def __init__(self, odp_config: OdpConfig, segments_cache: OptimizelySegmentsCache, - zaius_manager: ZaiusGraphQLApiManager, - logger: Optional[optimizely_logger.Logger] = None) -> None: + def __init__( + self, + odp_config: OdpConfig, + segments_cache: OptimizelySegmentsCache, + zaius_manager: ZaiusGraphQLApiManager, + logger: Optional[optimizely_logger.Logger] = None + ) -> None: self.odp_config = odp_config self.segments_cache = segments_cache self.zaius_manager = zaius_manager self.logger = logger or optimizely_logger.NoOpLogger() - def fetch_qualified_segments(self, user_key: str, user_value: str, options: list[str]) -> \ - Optional[list[str]]: + def fetch_qualified_segments(self, user_key: str, user_value: str, options: list[str] + ) -> Optional[list[str]]: """ Args: user_key: The key for identifying the id type. @@ -64,7 +68,7 @@ def fetch_qualified_segments(self, user_key: str, user_value: str, options: list reset_cache = OptimizelyOdpOption.RESET_CACHE in options if reset_cache: - self._reset() + self.reset() if not ignore_cache and not reset_cache: segments = self.segments_cache.lookup(cache_key) @@ -83,7 +87,7 @@ def fetch_qualified_segments(self, user_key: str, user_value: str, options: list return segments - def _reset(self) -> None: + def reset(self) -> None: self.segments_cache.reset() def make_cache_key(self, user_key: str, user_value: str) -> str: diff --git a/tests/test_odp_event_manager.py b/tests/test_odp_event_manager.py index ffbab40d..ea90ada5 100644 --- a/tests/test_odp_event_manager.py +++ b/tests/test_odp_event_manager.py @@ -411,6 +411,7 @@ def test_odp_event_manager_events_before_odp_ready(self, *args): event_manager.send_event(**self.events[1]) odp_config.update(self.api_key, self.api_host, []) + event_manager.update_config() event_manager.event_queue.join() event_manager.send_event(**self.events[0]) @@ -423,6 +424,7 @@ def test_odp_event_manager_events_before_odp_ready(self, *args): mock_logger.debug.assert_has_calls([ mock.call('ODP event queue: cannot send before the datafile has loaded.'), mock.call('ODP event queue: cannot send before the datafile has loaded.'), + mock.call('ODP event queue: received update config signal.'), mock.call('ODP event queue: adding event.'), mock.call('ODP event queue: adding event.'), mock.call('ODP event queue: received flush signal.'), @@ -442,6 +444,7 @@ def test_odp_event_manager_events_before_odp_disabled(self, *args): event_manager.send_event(**self.events[1]) odp_config.update(None, None, []) + event_manager.update_config() event_manager.event_queue.join() event_manager.send_event(**self.events[0]) @@ -453,6 +456,7 @@ def test_odp_event_manager_events_before_odp_disabled(self, *args): mock_logger.debug.assert_has_calls([ mock.call('ODP event queue: cannot send before the datafile has loaded.'), mock.call('ODP event queue: cannot send before the datafile has loaded.'), + mock.call('ODP event queue: received update config signal.'), mock.call(Errors.ODP_NOT_INTEGRATED), mock.call(Errors.ODP_NOT_INTEGRATED) ]) @@ -496,20 +500,25 @@ def test_odp_event_manager_disabled_after_events_in_queue(self, *args): odp_config = OdpConfig(self.api_key, self.api_host) event_manager = OdpEventManager(odp_config, mock_logger) - event_manager.batch_size = 2 + event_manager.batch_size = 3 with mock.patch('optimizely.odp.odp_event_manager.OdpEventManager.is_running', True): event_manager.send_event(**self.events[0]) event_manager.send_event(**self.events[1]) - - with mock.patch.object(event_manager.zaius_manager, 'send_odp_events') as mock_send: odp_config.update(None, None, []) + event_manager.update_config() + + with mock.patch.object( + event_manager.zaius_manager, 'send_odp_events', new_callable=CopyingMock, return_value=False + ) as mock_send: event_manager.start() + event_manager.send_event(**self.events[0]) event_manager.send_event(**self.events[1]) + event_manager.send_event(**self.events[0]) event_manager.event_queue.join() self.assertEqual(len(event_manager._current_batch), 0) mock_logger.debug.assert_any_call(Errors.ODP_NOT_INTEGRATED) mock_logger.error.assert_not_called() - mock_send.assert_not_called() + mock_send.assert_called_once_with(self.api_key, self.api_host, self.processed_events) event_manager.stop() diff --git a/tests/test_odp_manager.py b/tests/test_odp_manager.py new file mode 100644 index 00000000..d60d40c9 --- /dev/null +++ b/tests/test_odp_manager.py @@ -0,0 +1,402 @@ +# Copyright 2022, Optimizely +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http:#www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from unittest import mock + +from optimizely import exceptions as optimizely_exception +from optimizely import version +from optimizely.helpers.enums import Errors +from optimizely.odp.lru_cache import OptimizelySegmentsCache, LRUCache +from optimizely.odp.odp_config import OdpConfig +from optimizely.odp.odp_event_manager import OdpEventManager +from optimizely.odp.odp_manager import OdpManager +from optimizely.odp.odp_segment_manager import OdpSegmentManager +from optimizely.odp.zaius_graphql_api_manager import ZaiusGraphQLApiManager +from optimizely.odp.zaius_rest_api_manager import ZaiusRestApiManager +from tests import base + + +class OdpManagerTest(base.BaseTest): + + def test_configurations_disable_odp(self): + mock_logger = mock.MagicMock() + manager = OdpManager(True, OptimizelySegmentsCache, logger=mock_logger) + + mock_logger.info.assert_called_once_with('ODP is disabled.') + manager.update_odp_config('valid', 'host', []) + self.assertIsNone(manager.odp_config.get_api_key()) + self.assertIsNone(manager.odp_config.get_api_host()) + + manager.fetch_qualified_segments('user1', []) + mock_logger.error.assert_called_once_with(Errors.ODP_NOT_ENABLED) + + # these call should be dropped gracefully with None + manager.identify_user('user1') + + self.assertRaisesRegex(optimizely_exception.OdpNotEnabled, Errors.ODP_NOT_ENABLED, + manager.send_event, 't1', 'a1', {}, {}) + + self.assertIsNone(manager.event_manager) + self.assertIsNone(manager.segment_manager) + + def test_fetch_qualified_segments(self): + mock_logger = mock.MagicMock() + segment_manager = OdpSegmentManager(OdpConfig(), OptimizelySegmentsCache, + ZaiusGraphQLApiManager(mock_logger), mock_logger) + + manager = OdpManager(False, OptimizelySegmentsCache, segment_manager, logger=mock_logger) + + with mock.patch.object(segment_manager, 'fetch_qualified_segments') as mock_fetch_qualif_segments: + manager.fetch_qualified_segments('user1', []) + + mock_logger.debug.assert_not_called() + mock_logger.error.assert_not_called() + mock_fetch_qualif_segments.assert_called_once_with('fs_user_id', 'user1', []) + + with mock.patch.object(segment_manager, 'fetch_qualified_segments') as mock_fetch_qualif_segments: + manager.fetch_qualified_segments('user1', ['IGNORE_CACHE']) + + mock_logger.debug.assert_not_called() + mock_logger.error.assert_not_called() + mock_fetch_qualif_segments.assert_called_once_with('fs_user_id', 'user1', ['IGNORE_CACHE']) + + def test_fetch_qualified_segments__disabled(self): + mock_logger = mock.MagicMock() + segment_manager = OdpSegmentManager(OdpConfig(), OptimizelySegmentsCache, + ZaiusGraphQLApiManager(mock_logger), mock_logger) + + manager = OdpManager(True, OptimizelySegmentsCache, segment_manager, logger=mock_logger) + + with mock.patch.object(segment_manager, 'fetch_qualified_segments') as mock_fetch_qualif_segments: + manager.fetch_qualified_segments('user1', []) + mock_logger.error.assert_called_once_with(Errors.ODP_NOT_ENABLED) + mock_fetch_qualif_segments.assert_not_called() + + def test_fetch_qualified_segments__segment_mgr_is_none(self): + """ + When segment manager is None, then fetching segment + should take place using the default segment manager. + """ + mock_logger = mock.MagicMock() + manager = OdpManager(False, LRUCache(10, 20), logger=mock_logger) + manager.update_odp_config('api_key', 'api_host', []) + + with mock.patch.object(manager.segment_manager, 'fetch_qualified_segments') as mock_fetch_qualif_segments: + manager.fetch_qualified_segments('user1', []) + + mock_logger.error.assert_not_called() + mock_fetch_qualif_segments.assert_called_once_with('fs_user_id', 'user1', []) + + def test_fetch_qualified_segments__seg_cache_and_seg_mgr_are_none(self): + """ + When segment cache and segment manager are None, then fetching segment + should take place using the default managers. + """ + mock_logger = mock.MagicMock() + manager = OdpManager(False, mock_logger) + manager.update_odp_config('api_key', 'api_host', []) + + with mock.patch.object(manager.segment_manager, 'fetch_qualified_segments') as mock_fetch_qualif_segments: + manager.fetch_qualified_segments('user1', []) + + mock_logger.debug.assert_not_called() + mock_logger.error.assert_not_called() + mock_fetch_qualif_segments.assert_called_once_with('fs_user_id', 'user1', []) + + def test_identify_user_datafile_not_ready(self): + mock_logger = mock.MagicMock() + event_manager = OdpEventManager(OdpConfig(), mock_logger) + + manager = OdpManager(False, OptimizelySegmentsCache, event_manager=event_manager, logger=mock_logger) + + with mock.patch.object(event_manager, 'identify_user') as mock_identify_user: + manager.identify_user('user1') + + mock_identify_user.assert_called_once_with('user1') + mock_logger.error.assert_not_called() + + def test_identify_user_odp_integrated(self): + mock_logger = mock.MagicMock() + event_manager = OdpEventManager(OdpConfig(), mock_logger, ZaiusRestApiManager()) + + manager = OdpManager(False, LRUCache(10, 20), event_manager=event_manager, logger=mock_logger) + manager.update_odp_config('key1', 'host1', []) + + with mock.patch.object(event_manager, 'dispatch') as mock_dispatch_event: + manager.identify_user('user1') + + mock_dispatch_event.assert_called_once_with({ + 'type': 'fullstack', + 'action': 'identified', + 'identifiers': {'fs_user_id': 'user1'}, + 'data': { + 'idempotence_id': mock.ANY, + 'data_source_type': 'sdk', + 'data_source': 'python-sdk', + 'data_source_version': version.__version__ + }}) + mock_logger.error.assert_not_called() + + def test_identify_user_odp_not_integrated(self): + mock_logger = mock.MagicMock() + event_manager = OdpEventManager(OdpConfig(), mock_logger, ZaiusRestApiManager()) + + manager = OdpManager(False, OptimizelySegmentsCache, event_manager=event_manager, logger=mock_logger) + manager.update_odp_config(None, None, []) + + with mock.patch.object(event_manager, 'dispatch') as mock_dispatch_event: + manager.identify_user('user1') + + mock_dispatch_event.assert_not_called() + mock_logger.error.assert_not_called() + mock_logger.debug.assert_any_call('Odp config was not changed.') + mock_logger.debug.assert_any_call('ODP identify event is not dispatched (ODP not integrated).') + + def test_identify_user_odp_disabled(self): + mock_logger = mock.MagicMock() + event_manager = OdpEventManager(OdpConfig(), mock_logger, ZaiusRestApiManager()) + + manager = OdpManager(False, OptimizelySegmentsCache, event_manager=event_manager, logger=mock_logger) + manager.enabled = False + + with mock.patch.object(event_manager, 'identify_user') as mock_identify_user: + manager.identify_user('user1') + + mock_identify_user.assert_not_called() + mock_logger.error.assert_not_called() + mock_logger.debug.assert_called_with('ODP identify event is not dispatched (ODP disabled).') + + def test_send_event_datafile_not_ready(self): + mock_logger = mock.MagicMock() + event_manager = OdpEventManager(OdpConfig(), mock_logger, ZaiusRestApiManager()) + + manager = OdpManager(False, OptimizelySegmentsCache, event_manager=event_manager, logger=mock_logger) + + with mock.patch.object(event_manager, 'dispatch') as mock_dispatch_event: + manager.send_event('t1', 'a1', {'id-key1': 'id-val-1'}, {'key1': 'val1'}) + + mock_dispatch_event.assert_not_called() + mock_logger.error.assert_not_called() + mock_logger.debug.assert_called_with('ODP event queue: cannot send before the datafile has loaded.') + + def test_send_event_odp_integrated(self): + mock_logger = mock.MagicMock() + event_manager = OdpEventManager(OdpConfig(), mock_logger, ZaiusRestApiManager()) + + manager = OdpManager(False, LRUCache(10, 20), event_manager=event_manager, logger=mock_logger) + manager.update_odp_config('key1', 'host1', []) + + with mock.patch.object(event_manager, 'dispatch') as mock_dispatch_event: + manager.send_event('t1', 'a1', {'id-key1': 'id-val-1'}, {'key1': 'val1'}) + + mock_dispatch_event.assert_called_once_with({ + 'type': 't1', + 'action': 'a1', + 'identifiers': {'id-key1': 'id-val-1'}, + 'data': { + 'idempotence_id': mock.ANY, + 'data_source_type': 'sdk', + 'data_source': 'python-sdk', + 'data_source_version': version.__version__, + 'key1': 'val1' + }}) + + def test_send_event_odp_not_integrated(self): + mock_logger = mock.MagicMock() + event_manager = OdpEventManager(OdpConfig(), mock_logger, ZaiusRestApiManager()) + + manager = OdpManager(False, OptimizelySegmentsCache, event_manager=event_manager, logger=mock_logger) + manager.update_odp_config(None, None, []) + + with mock.patch.object(event_manager, 'dispatch') as mock_dispatch_event: + self.assertRaisesRegex(optimizely_exception.OdpNotIntegrated, Errors.ODP_NOT_INTEGRATED, + manager.send_event, 't1', 'a1', {'id-key1': 'id-val-1'}, {'key1': 'val1'}) + + mock_dispatch_event.assert_not_called() + mock_logger.debug.assert_any_call('Odp config was not changed.') + mock_logger.error.assert_not_called() + + def test_send_event_odp_disabled(self): + mock_logger = mock.MagicMock() + event_manager = OdpEventManager(OdpConfig(), mock_logger, ZaiusRestApiManager()) + + manager = OdpManager(False, OptimizelySegmentsCache, event_manager=event_manager, logger=mock_logger) + manager.enabled = False + + with mock.patch.object(event_manager, 'dispatch') as mock_dispatch_event: + self.assertRaisesRegex(optimizely_exception.OdpNotEnabled, Errors.ODP_NOT_ENABLED, + manager.send_event, 't1', 'a1', {'id-key1': 'id-val-1'}, {'key1': 'val1'}) + + mock_dispatch_event.assert_not_called() + mock_logger.debug.assert_not_called() + mock_logger.error.assert_not_called() + + def test_send_event_odp_disabled__event_manager_not_available(self): + mock_logger = mock.MagicMock() + event_manager = OdpEventManager(OdpConfig(), mock_logger, ZaiusRestApiManager()) + + manager = OdpManager(False, OptimizelySegmentsCache, event_manager=event_manager, logger=mock_logger) + manager.event_manager = False + + with mock.patch.object(event_manager, 'dispatch') as mock_dispatch_event: + self.assertRaisesRegex(optimizely_exception.OdpNotEnabled, Errors.ODP_NOT_ENABLED, + manager.send_event, 't1', 'a1', {'id-key1': 'id-val-1'}, {'key1': 'val1'}) + + mock_dispatch_event.assert_not_called() + mock_logger.debug.assert_not_called() + mock_logger.error.assert_not_called() + + def test_send_event_invalid_data(self): + mock_logger = mock.MagicMock() + event_manager = OdpEventManager(OdpConfig(), mock_logger, ZaiusRestApiManager()) + + manager = OdpManager(False, LRUCache(10, 20), event_manager=event_manager, logger=mock_logger) + manager.update_odp_config('key1', 'host1', []) + + with mock.patch.object(event_manager, 'dispatch') as mock_dispatch_event: + self.assertRaisesRegex(optimizely_exception.OdpInvalidData, Errors.ODP_INVALID_DATA, + manager.send_event, 't1', 'a1', {'id-key1': 'id-val-1'}, {'invalid-item': {}}) + + mock_dispatch_event.assert_not_called() + mock_logger.error.assert_not_called() + + def test_config_not_changed(self): + mock_logger = mock.MagicMock() + event_manager = OdpEventManager(OdpConfig(), mock_logger, ZaiusRestApiManager()) + + manager = OdpManager(False, OptimizelySegmentsCache, event_manager=event_manager, logger=mock_logger) + manager.update_odp_config(None, None, []) + mock_logger.debug.assert_called_with('Odp config was not changed.') + mock_logger.error.assert_not_called() + + def test_update_odp_config__reset_called(self): + # build segment manager + mock_logger = mock.MagicMock() + segment_manager = OdpSegmentManager(OdpConfig(), OptimizelySegmentsCache, + ZaiusGraphQLApiManager(mock_logger), mock_logger) + # build event manager + event_manager = OdpEventManager(OdpConfig(), mock_logger, ZaiusRestApiManager()) + + manager = OdpManager(False, OptimizelySegmentsCache, segment_manager, event_manager, mock_logger) + + with mock.patch.object(segment_manager, 'reset') as mock_reset: + manager.update_odp_config('key1', 'host1', []) + mock_reset.assert_called_once() + mock_reset.reset_mock() + + manager.update_odp_config('key1', 'host1', []) + mock_reset.assert_not_called() + + manager.update_odp_config('key2', 'host1', []) + mock_reset.assert_called_once() + mock_reset.reset_mock() + + manager.update_odp_config('key2', 'host2', []) + mock_reset.assert_called_once() + mock_reset.reset_mock() + + manager.update_odp_config('key2', 'host2', ['a']) + mock_reset.assert_called_once() + mock_reset.reset_mock() + + manager.update_odp_config('key2', 'host2', ['a', 'b']) + mock_reset.assert_called_once() + mock_reset.reset_mock() + + manager.update_odp_config('key2', 'host2', ['c']) + mock_reset.assert_called_once() + mock_reset.reset_mock() + + manager.update_odp_config('key2', 'host2', ['c']) + mock_reset.assert_not_called() + + manager.update_odp_config(None, None, []) + mock_reset.assert_called_once() + mock_logger.error.assert_not_called() + + def test_update_odp_config__update_config_called(self): + """ + Test if event_manager.update_config is called when change + to odp_config is made or not in OdpManager. + """ + mock_logger = mock.MagicMock() + event_manager = OdpEventManager(OdpConfig(), mock_logger, ZaiusRestApiManager()) + manager = OdpManager(False, LRUCache(10, 20), event_manager=event_manager, logger=mock_logger) + + with mock.patch.object(event_manager, 'update_config') as mock_update: + first_api_key = manager.odp_config.get_api_key() + manager.update_odp_config('key1', 'host1', []) + second_api_key = manager.odp_config.get_api_key() + + mock_update.assert_called_once() + mock_logger.debug.assert_not_called() + self.assertEqual(first_api_key, None) + self.assertEqual(second_api_key, 'key1') + + with mock.patch.object(event_manager, 'update_config') as mock_update: + first_api_key = manager.odp_config.get_api_key() + manager.update_odp_config('key2', 'host1', []) + second_api_key = manager.odp_config.get_api_key() + + mock_update.assert_called_once() + mock_logger.debug.assert_not_called() + self.assertEqual(first_api_key, 'key1') + self.assertEqual(second_api_key, 'key2') + + with mock.patch.object(event_manager, 'update_config') as mock_update: + first_api_key = manager.odp_config.get_api_key() + manager.update_odp_config('key2', 'host1', []) + second_api_key = manager.odp_config.get_api_key() + + # event_manager.update_config not called when no change to odp_config + mock_update.assert_not_called() + mock_logger.error.assert_not_called() + mock_logger.debug.assert_called_with('Odp config was not changed.') + self.assertEqual(first_api_key, 'key2') + self.assertEqual(second_api_key, 'key2') + + def test_update_odp_config__odp_config_propagated_properly(self): + mock_logger = mock.MagicMock() + event_manager = OdpEventManager(OdpConfig(), mock_logger, ZaiusRestApiManager()) + manager = OdpManager(False, LRUCache(10, 20), event_manager=event_manager, logger=mock_logger) + manager.update_odp_config('key1', 'host1', ['a', 'b']) + + self.assertEqual(manager.segment_manager.odp_config.get_api_key(), 'key1') + self.assertEqual(manager.segment_manager.odp_config.get_api_host(), 'host1') + self.assertEqual(manager.segment_manager.odp_config.get_segments_to_check(), ['a', 'b']) + self.assertEqual(manager.event_manager.odp_config.get_api_key(), 'key1') + self.assertEqual(manager.event_manager.odp_config.get_api_host(), 'host1') + self.assertEqual(manager.event_manager.odp_config.get_segments_to_check(), ['a', 'b']) + + # odp disabled with invalid apiKey (apiKey/apiHost propagated into submanagers) + manager.update_odp_config(None, None, []) + + self.assertEqual(manager.segment_manager.odp_config.get_api_key(), None) + self.assertEqual(manager.segment_manager.odp_config.get_api_host(), None) + self.assertEqual(manager.segment_manager.odp_config.get_segments_to_check(), []) + self.assertEqual(manager.event_manager.odp_config.get_api_key(), None) + self.assertEqual(manager.event_manager.odp_config.get_api_host(), None) + self.assertEqual(manager.event_manager.odp_config.get_segments_to_check(), []) + + manager.update_odp_config(None, None, ['a', 'b']) + self.assertEqual(manager.segment_manager.odp_config.get_segments_to_check(), ['a', 'b']) + self.assertEqual(manager.event_manager.odp_config.get_segments_to_check(), ['a', 'b']) + mock_logger.error.assert_not_called() + + def test_segments_cache_default_settings(self): + manager = OdpManager(False) + segments_cache = manager.segment_manager.segments_cache + self.assertEqual(segments_cache.capacity, 10_000) + self.assertEqual(segments_cache.timeout, 600) From 082f171a43b18d08fed37acd4060f914921b6520 Mon Sep 17 00:00:00 2001 From: Matjaz Pirnovar Date: Mon, 26 Sep 2022 15:16:52 -0700 Subject: [PATCH 25/68] chore: update tests because of custom json encoder (#407) * chore: update tests because of custome json encoder * update github ticket reference check --- .github/workflows/ticket_reference_check.yml | 2 +- tests/test_odp_zaius_rest_api_manager.py | 11 ++++++----- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/.github/workflows/ticket_reference_check.yml b/.github/workflows/ticket_reference_check.yml index d2829e0c..3d58f804 100644 --- a/.github/workflows/ticket_reference_check.yml +++ b/.github/workflows/ticket_reference_check.yml @@ -13,4 +13,4 @@ jobs: - name: Check for Jira ticket reference uses: optimizely/github-action-ticket-reference-checker-public@master with: - bodyRegex: 'OASIS-(?\d+)' + bodyRegex: 'FSSDK-(?\d+)' diff --git a/tests/test_odp_zaius_rest_api_manager.py b/tests/test_odp_zaius_rest_api_manager.py index e7327d6f..6e1835d5 100644 --- a/tests/test_odp_zaius_rest_api_manager.py +++ b/tests/test_odp_zaius_rest_api_manager.py @@ -17,6 +17,7 @@ from requests import exceptions as request_exception from optimizely.helpers.enums import OdpRestApiConfig +from optimizely.odp.odp_event import OdpEvent, OdpEventEncoder from optimizely.odp.zaius_rest_api_manager import ZaiusRestApiManager from . import base @@ -26,10 +27,9 @@ class ZaiusRestApiManagerTest(base.BaseTest): user_value = "test-user-value" api_key = "test-api-key" api_host = "test-host" - events = [ - {"type": "t1", "action": "a1", "identifiers": {"id-key-1": "id-value-1"}, "data": {"key-1": "value1"}}, - {"type": "t2", "action": "a2", "identifiers": {"id-key-2": "id-value-2"}, "data": {"key-2": "value2"}}, + OdpEvent('t1', 'a1', {"id-key-1": "id-value-1"}, {"key-1": "value1"}), + OdpEvent('t2', 'a2', {"id-key-2": "id-value-2"}, {"key-2": "value2"}) ] def test_send_odp_events__valid_request(self): @@ -42,7 +42,7 @@ def test_send_odp_events__valid_request(self): request_headers = {'content-type': 'application/json', 'x-api-key': self.api_key} mock_request_post.assert_called_once_with(url=self.api_host + "/v3/events", headers=request_headers, - data=json.dumps(self.events), + data=json.dumps(self.events, cls=OdpEventEncoder), timeout=OdpRestApiConfig.REQUEST_TIMEOUT) def test_send_odp_ovents_success(self): @@ -58,7 +58,8 @@ def test_send_odp_ovents_success(self): self.assertFalse(should_retry) def test_send_odp_events_invalid_json_no_retry(self): - events = {1, 2, 3} # using a set to trigger JSON-not-serializable error + """Using a set to trigger JSON-not-serializable error.""" + events = {1, 2, 3} with mock.patch('requests.post') as mock_request_post, \ mock.patch('optimizely.logger') as mock_logger: From 92ab102a5bd35d98e3b76d2b02ca82b24e6b0d89 Mon Sep 17 00:00:00 2001 From: Andy Leap <104936100+andrewleap-optimizely@users.noreply.github.com> Date: Tue, 27 Sep 2022 09:12:11 -0400 Subject: [PATCH 26/68] refactor: remove odp config from constructors (#406) * remove odp_config from event_manager init * remove odp_config from segment_manager init --- optimizely/odp/odp_event_manager.py | 23 +++++--- optimizely/odp/odp_manager.py | 15 ++--- optimizely/odp/odp_segment_manager.py | 18 +++--- tests/test_odp_event_manager.py | 85 +++++++++++++++------------ tests/test_odp_manager.py | 32 +++++----- tests/test_odp_segment_manager.py | 32 +++++----- 6 files changed, 109 insertions(+), 96 deletions(-) diff --git a/optimizely/odp/odp_event_manager.py b/optimizely/odp/odp_event_manager.py index f608213e..ae8f4066 100644 --- a/optimizely/odp/odp_event_manager.py +++ b/optimizely/odp/odp_event_manager.py @@ -44,23 +44,21 @@ class OdpEventManager: def __init__( self, - odp_config: OdpConfig, logger: Optional[_logging.Logger] = None, api_manager: Optional[ZaiusRestApiManager] = None ): """OdpEventManager init method to configure event batching. Args: - odp_config: ODP integration config. logger: Optional component which provides a log method to log messages. By default nothing would be logged. api_manager: Optional component which sends events to ODP. """ self.logger = logger or _logging.NoOpLogger() self.zaius_manager = api_manager or ZaiusRestApiManager(self.logger) - self.odp_config = odp_config - self.api_key = odp_config.get_api_key() - self.api_host = odp_config.get_api_host() + self.odp_config: Optional[OdpConfig] = None + self.api_key: Optional[str] = None + self.api_host: Optional[str] = None self.event_queue: Queue[OdpEvent | Signal] = Queue(OdpEventManagerConfig.DEFAULT_QUEUE_CAPACITY) self.batch_size = OdpEventManagerConfig.DEFAULT_BATCH_SIZE @@ -78,12 +76,16 @@ def is_running(self) -> bool: """Property to check if consumer thread is alive or not.""" return self.thread.is_alive() - def start(self) -> None: + def start(self, odp_config: OdpConfig) -> None: """Starts the batch processing thread to batch events.""" if self.is_running: self.logger.warning('ODP event queue already started.') return + self.odp_config = odp_config + self.api_host = self.odp_config.get_api_host() + self.api_key = self.odp_config.get_api_key() + self.thread.start() def _run(self) -> None: @@ -217,6 +219,10 @@ def stop(self) -> None: def send_event(self, type: str, action: str, identifiers: dict[str, str], data: OdpDataDict) -> None: """Create OdpEvent and add it to the event queue.""" + if not self.odp_config: + self.logger.debug('ODP event queue: cannot send before config has been set.') + return + odp_state = self.odp_config.odp_state() if odp_state == OdpConfigState.UNDETERMINED: self.logger.debug('ODP event queue: cannot send before the datafile has loaded.') @@ -260,5 +266,6 @@ def _update_config(self) -> None: if len(self._current_batch) > 0: self._flush_batch() - self.api_host = self.odp_config.get_api_host() - self.api_key = self.odp_config.get_api_key() + if self.odp_config: + self.api_host = self.odp_config.get_api_host() + self.api_key = self.odp_config.get_api_key() diff --git a/optimizely/odp/odp_manager.py b/optimizely/odp/odp_manager.py index 72c61514..6198cf89 100644 --- a/optimizely/odp/odp_manager.py +++ b/optimizely/odp/odp_manager.py @@ -23,7 +23,6 @@ from optimizely.odp.odp_config import OdpConfig, OdpConfigState from optimizely.odp.odp_event_manager import OdpEventManager from optimizely.odp.odp_segment_manager import OdpSegmentManager -from optimizely.odp.zaius_graphql_api_manager import ZaiusGraphQLApiManager class OdpManager: @@ -55,21 +54,15 @@ def __init__( OdpSegmentsCacheConfig.DEFAULT_CAPACITY, OdpSegmentsCacheConfig.DEFAULT_TIMEOUT_SECS ) - self.segment_manager = OdpSegmentManager( - self.odp_config, - segments_cache, - ZaiusGraphQLApiManager(logger), logger - ) - else: - self.segment_manager.odp_config = self.odp_config + self.segment_manager = OdpSegmentManager(segments_cache, logger=self.logger) if event_manager: - event_manager.odp_config = self.odp_config self.event_manager = event_manager else: - self.event_manager = OdpEventManager(self.odp_config, logger) + self.event_manager = OdpEventManager(self.logger) - self.event_manager.start() + self.segment_manager.odp_config = self.odp_config + self.event_manager.start(self.odp_config) def fetch_qualified_segments(self, user_id: str, options: list[str]) -> Optional[list[str]]: if not self.enabled or not self.segment_manager: diff --git a/optimizely/odp/odp_segment_manager.py b/optimizely/odp/odp_segment_manager.py index a5d363fd..d01fede0 100644 --- a/optimizely/odp/odp_segment_manager.py +++ b/optimizely/odp/odp_segment_manager.py @@ -17,9 +17,9 @@ from optimizely import logger as optimizely_logger from optimizely.helpers.enums import Errors +from optimizely.odp.odp_config import OdpConfig from optimizely.odp.optimizely_odp_option import OptimizelyOdpOption from optimizely.odp.lru_cache import OptimizelySegmentsCache -from optimizely.odp.odp_config import OdpConfig from optimizely.odp.zaius_graphql_api_manager import ZaiusGraphQLApiManager @@ -28,16 +28,15 @@ class OdpSegmentManager: def __init__( self, - odp_config: OdpConfig, segments_cache: OptimizelySegmentsCache, - zaius_manager: ZaiusGraphQLApiManager, + zaius_manager: Optional[ZaiusGraphQLApiManager] = None, logger: Optional[optimizely_logger.Logger] = None ) -> None: - self.odp_config = odp_config + self.odp_config: Optional[OdpConfig] = None self.segments_cache = segments_cache - self.zaius_manager = zaius_manager self.logger = logger or optimizely_logger.NoOpLogger() + self.zaius_manager = zaius_manager or ZaiusGraphQLApiManager(self.logger) def fetch_qualified_segments(self, user_key: str, user_value: str, options: list[str] ) -> Optional[list[str]]: @@ -50,11 +49,12 @@ def fetch_qualified_segments(self, user_key: str, user_value: str, options: list Returns: Qualified segments for the user from the cache or the ODP server if not in the cache. """ - odp_api_key = self.odp_config.get_api_key() - odp_api_host = self.odp_config.get_api_host() - odp_segments_to_check = self.odp_config.get_segments_to_check() + if self.odp_config: + odp_api_key = self.odp_config.get_api_key() + odp_api_host = self.odp_config.get_api_host() + odp_segments_to_check = self.odp_config.get_segments_to_check() - if not (odp_api_key and odp_api_host): + if not self.odp_config or not (odp_api_key and odp_api_host): self.logger.error(Errors.FETCH_SEGMENTS_FAILED.format('api_key/api_host not defined')) return None diff --git a/tests/test_odp_event_manager.py b/tests/test_odp_event_manager.py index ea90ada5..766c8ad1 100644 --- a/tests/test_odp_event_manager.py +++ b/tests/test_odp_event_manager.py @@ -100,8 +100,8 @@ def test_invalid_odp_event(self, *args): def test_odp_event_manager_success(self, *args): mock_logger = mock.Mock() - event_manager = OdpEventManager(self.odp_config, mock_logger) - event_manager.start() + event_manager = OdpEventManager(mock_logger) + event_manager.start(self.odp_config) with mock.patch('requests.post', return_value=self.fake_server_response(status_code=200)): event_manager.send_event(**self.events[0]) @@ -116,8 +116,8 @@ def test_odp_event_manager_success(self, *args): def test_odp_event_manager_batch(self, *args): mock_logger = mock.Mock() - event_manager = OdpEventManager(self.odp_config, mock_logger) - event_manager.start() + event_manager = OdpEventManager(mock_logger) + event_manager.start(self.odp_config) event_manager.batch_size = 2 with mock.patch.object( @@ -135,8 +135,8 @@ def test_odp_event_manager_batch(self, *args): def test_odp_event_manager_multiple_batches(self, *args): mock_logger = mock.Mock() - event_manager = OdpEventManager(self.odp_config, mock_logger) - event_manager.start() + event_manager = OdpEventManager(mock_logger) + event_manager.start(self.odp_config) event_manager.batch_size = 2 batch_count = 4 @@ -164,7 +164,8 @@ def test_odp_event_manager_multiple_batches(self, *args): def test_odp_event_manager_backlog(self, *args): mock_logger = mock.Mock() - event_manager = OdpEventManager(self.odp_config, mock_logger) + event_manager = OdpEventManager(mock_logger) + event_manager.odp_config = self.odp_config event_manager.batch_size = 2 batch_count = 4 @@ -178,7 +179,7 @@ def test_odp_event_manager_backlog(self, *args): with mock.patch.object( event_manager.zaius_manager, 'send_odp_events', new_callable=CopyingMock, return_value=False ) as mock_send: - event_manager.start() + event_manager.start(self.odp_config) event_manager.send_event(**self.events[0]) event_manager.send_event(**self.events[1]) event_manager.stop() @@ -198,8 +199,8 @@ def test_odp_event_manager_backlog(self, *args): def test_odp_event_manager_flush(self, *args): mock_logger = mock.Mock() - event_manager = OdpEventManager(self.odp_config, mock_logger) - event_manager.start() + event_manager = OdpEventManager(mock_logger) + event_manager.start(self.odp_config) with mock.patch.object( event_manager.zaius_manager, 'send_odp_events', new_callable=CopyingMock, return_value=False @@ -217,8 +218,8 @@ def test_odp_event_manager_flush(self, *args): def test_odp_event_manager_multiple_flushes(self, *args): mock_logger = mock.Mock() - event_manager = OdpEventManager(self.odp_config, mock_logger) - event_manager.start() + event_manager = OdpEventManager(mock_logger) + event_manager.start(self.odp_config) flush_count = 4 with mock.patch.object( @@ -244,8 +245,8 @@ def test_odp_event_manager_multiple_flushes(self, *args): def test_odp_event_manager_retry_failure(self, *args): mock_logger = mock.Mock() - event_manager = OdpEventManager(self.odp_config, mock_logger) - event_manager.start() + event_manager = OdpEventManager(mock_logger) + event_manager.start(self.odp_config) number_of_tries = event_manager.retry_count + 1 @@ -269,8 +270,8 @@ def test_odp_event_manager_retry_failure(self, *args): def test_odp_event_manager_retry_success(self, *args): mock_logger = mock.Mock() - event_manager = OdpEventManager(self.odp_config, mock_logger) - event_manager.start() + event_manager = OdpEventManager(mock_logger) + event_manager.start(self.odp_config) with mock.patch.object( event_manager.zaius_manager, 'send_odp_events', new_callable=CopyingMock, side_effect=[True, True, False] @@ -289,8 +290,8 @@ def test_odp_event_manager_retry_success(self, *args): def test_odp_event_manager_send_failure(self, *args): mock_logger = mock.Mock() - event_manager = OdpEventManager(self.odp_config, mock_logger) - event_manager.start() + event_manager = OdpEventManager(mock_logger) + event_manager.start(self.odp_config) with mock.patch.object( event_manager.zaius_manager, @@ -313,8 +314,8 @@ def test_odp_event_manager_disabled(self, *args): mock_logger = mock.Mock() odp_config = OdpConfig() odp_config.update(None, None, None) - event_manager = OdpEventManager(odp_config, mock_logger) - event_manager.start() + event_manager = OdpEventManager(mock_logger) + event_manager.start(odp_config) event_manager.send_event(**self.events[0]) event_manager.send_event(**self.events[1]) @@ -330,7 +331,9 @@ def test_odp_event_manager_queue_full(self, *args): mock_logger = mock.Mock() with mock.patch('optimizely.helpers.enums.OdpEventManagerConfig.DEFAULT_QUEUE_CAPACITY', 1): - event_manager = OdpEventManager(self.odp_config, mock_logger) + event_manager = OdpEventManager(mock_logger) + + event_manager.odp_config = self.odp_config with mock.patch('optimizely.odp.odp_event_manager.OdpEventManager.is_running', True): event_manager.send_event(**self.events[0]) @@ -344,8 +347,8 @@ def test_odp_event_manager_queue_full(self, *args): def test_odp_event_manager_thread_exception(self, *args): mock_logger = mock.Mock() - event_manager = MockOdpEventManager(self.odp_config, mock_logger) - event_manager.start() + event_manager = MockOdpEventManager(mock_logger) + event_manager.start(self.odp_config) event_manager.send_event(**self.events[0]) time.sleep(.1) @@ -360,8 +363,8 @@ def test_odp_event_manager_thread_exception(self, *args): def test_odp_event_manager_override_default_data(self, *args): mock_logger = mock.Mock() - event_manager = OdpEventManager(self.odp_config, mock_logger) - event_manager.start() + event_manager = OdpEventManager(mock_logger) + event_manager.start(self.odp_config) event = deepcopy(self.events[0]) event['data']['data_source'] = 'my-app' @@ -381,9 +384,9 @@ def test_odp_event_manager_override_default_data(self, *args): def test_odp_event_manager_flush_timeout(self, *args): mock_logger = mock.Mock() - event_manager = OdpEventManager(self.odp_config, mock_logger) + event_manager = OdpEventManager(mock_logger) event_manager.flush_interval = .5 - event_manager.start() + event_manager.start(self.odp_config) with mock.patch.object( event_manager.zaius_manager, 'send_odp_events', new_callable=CopyingMock, return_value=False @@ -401,8 +404,8 @@ def test_odp_event_manager_flush_timeout(self, *args): def test_odp_event_manager_events_before_odp_ready(self, *args): mock_logger = mock.Mock() odp_config = OdpConfig() - event_manager = OdpEventManager(odp_config, mock_logger) - event_manager.start() + event_manager = OdpEventManager(mock_logger) + event_manager.start(odp_config) with mock.patch.object( event_manager.zaius_manager, 'send_odp_events', new_callable=CopyingMock, return_value=False @@ -436,8 +439,8 @@ def test_odp_event_manager_events_before_odp_ready(self, *args): def test_odp_event_manager_events_before_odp_disabled(self, *args): mock_logger = mock.Mock() odp_config = OdpConfig() - event_manager = OdpEventManager(odp_config, mock_logger) - event_manager.start() + event_manager = OdpEventManager(mock_logger) + event_manager.start(odp_config) with mock.patch.object(event_manager.zaius_manager, 'send_odp_events') as mock_send: event_manager.send_event(**self.events[0]) @@ -467,8 +470,8 @@ def test_odp_event_manager_events_before_odp_disabled(self, *args): def test_odp_event_manager_disabled_after_init(self, *args): mock_logger = mock.Mock() odp_config = OdpConfig(self.api_key, self.api_host) - event_manager = OdpEventManager(odp_config, mock_logger) - event_manager.start() + event_manager = OdpEventManager(mock_logger) + event_manager.start(odp_config) event_manager.batch_size = 2 with mock.patch.object( @@ -499,19 +502,20 @@ def test_odp_event_manager_disabled_after_events_in_queue(self, *args): mock_logger = mock.Mock() odp_config = OdpConfig(self.api_key, self.api_host) - event_manager = OdpEventManager(odp_config, mock_logger) + event_manager = OdpEventManager(mock_logger) + event_manager.odp_config = odp_config event_manager.batch_size = 3 with mock.patch('optimizely.odp.odp_event_manager.OdpEventManager.is_running', True): event_manager.send_event(**self.events[0]) event_manager.send_event(**self.events[1]) - odp_config.update(None, None, []) - event_manager.update_config() with mock.patch.object( event_manager.zaius_manager, 'send_odp_events', new_callable=CopyingMock, return_value=False ) as mock_send: - event_manager.start() + event_manager.start(odp_config) + odp_config.update(None, None, []) + event_manager.update_config() event_manager.send_event(**self.events[0]) event_manager.send_event(**self.events[1]) event_manager.send_event(**self.events[0]) @@ -522,3 +526,10 @@ def test_odp_event_manager_disabled_after_events_in_queue(self, *args): mock_logger.error.assert_not_called() mock_send.assert_called_once_with(self.api_key, self.api_host, self.processed_events) event_manager.stop() + + def test_send_event_before_config_set(self, *args): + mock_logger = mock.Mock() + + event_manager = OdpEventManager(mock_logger) + event_manager.send_event(**self.events[0]) + mock_logger.debug.assert_called_with('ODP event queue: cannot send before config has been set.') diff --git a/tests/test_odp_manager.py b/tests/test_odp_manager.py index d60d40c9..bef4cae9 100644 --- a/tests/test_odp_manager.py +++ b/tests/test_odp_manager.py @@ -53,7 +53,7 @@ def test_configurations_disable_odp(self): def test_fetch_qualified_segments(self): mock_logger = mock.MagicMock() - segment_manager = OdpSegmentManager(OdpConfig(), OptimizelySegmentsCache, + segment_manager = OdpSegmentManager(OptimizelySegmentsCache, ZaiusGraphQLApiManager(mock_logger), mock_logger) manager = OdpManager(False, OptimizelySegmentsCache, segment_manager, logger=mock_logger) @@ -74,7 +74,7 @@ def test_fetch_qualified_segments(self): def test_fetch_qualified_segments__disabled(self): mock_logger = mock.MagicMock() - segment_manager = OdpSegmentManager(OdpConfig(), OptimizelySegmentsCache, + segment_manager = OdpSegmentManager(OptimizelySegmentsCache, ZaiusGraphQLApiManager(mock_logger), mock_logger) manager = OdpManager(True, OptimizelySegmentsCache, segment_manager, logger=mock_logger) @@ -129,7 +129,7 @@ def test_identify_user_datafile_not_ready(self): def test_identify_user_odp_integrated(self): mock_logger = mock.MagicMock() - event_manager = OdpEventManager(OdpConfig(), mock_logger, ZaiusRestApiManager()) + event_manager = OdpEventManager(mock_logger, ZaiusRestApiManager()) manager = OdpManager(False, LRUCache(10, 20), event_manager=event_manager, logger=mock_logger) manager.update_odp_config('key1', 'host1', []) @@ -151,7 +151,7 @@ def test_identify_user_odp_integrated(self): def test_identify_user_odp_not_integrated(self): mock_logger = mock.MagicMock() - event_manager = OdpEventManager(OdpConfig(), mock_logger, ZaiusRestApiManager()) + event_manager = OdpEventManager(mock_logger, ZaiusRestApiManager()) manager = OdpManager(False, OptimizelySegmentsCache, event_manager=event_manager, logger=mock_logger) manager.update_odp_config(None, None, []) @@ -166,7 +166,7 @@ def test_identify_user_odp_not_integrated(self): def test_identify_user_odp_disabled(self): mock_logger = mock.MagicMock() - event_manager = OdpEventManager(OdpConfig(), mock_logger, ZaiusRestApiManager()) + event_manager = OdpEventManager(mock_logger, ZaiusRestApiManager()) manager = OdpManager(False, OptimizelySegmentsCache, event_manager=event_manager, logger=mock_logger) manager.enabled = False @@ -180,7 +180,7 @@ def test_identify_user_odp_disabled(self): def test_send_event_datafile_not_ready(self): mock_logger = mock.MagicMock() - event_manager = OdpEventManager(OdpConfig(), mock_logger, ZaiusRestApiManager()) + event_manager = OdpEventManager(mock_logger, ZaiusRestApiManager()) manager = OdpManager(False, OptimizelySegmentsCache, event_manager=event_manager, logger=mock_logger) @@ -193,7 +193,7 @@ def test_send_event_datafile_not_ready(self): def test_send_event_odp_integrated(self): mock_logger = mock.MagicMock() - event_manager = OdpEventManager(OdpConfig(), mock_logger, ZaiusRestApiManager()) + event_manager = OdpEventManager(mock_logger, ZaiusRestApiManager()) manager = OdpManager(False, LRUCache(10, 20), event_manager=event_manager, logger=mock_logger) manager.update_odp_config('key1', 'host1', []) @@ -215,7 +215,7 @@ def test_send_event_odp_integrated(self): def test_send_event_odp_not_integrated(self): mock_logger = mock.MagicMock() - event_manager = OdpEventManager(OdpConfig(), mock_logger, ZaiusRestApiManager()) + event_manager = OdpEventManager(mock_logger, ZaiusRestApiManager()) manager = OdpManager(False, OptimizelySegmentsCache, event_manager=event_manager, logger=mock_logger) manager.update_odp_config(None, None, []) @@ -230,7 +230,7 @@ def test_send_event_odp_not_integrated(self): def test_send_event_odp_disabled(self): mock_logger = mock.MagicMock() - event_manager = OdpEventManager(OdpConfig(), mock_logger, ZaiusRestApiManager()) + event_manager = OdpEventManager(mock_logger, ZaiusRestApiManager()) manager = OdpManager(False, OptimizelySegmentsCache, event_manager=event_manager, logger=mock_logger) manager.enabled = False @@ -245,7 +245,7 @@ def test_send_event_odp_disabled(self): def test_send_event_odp_disabled__event_manager_not_available(self): mock_logger = mock.MagicMock() - event_manager = OdpEventManager(OdpConfig(), mock_logger, ZaiusRestApiManager()) + event_manager = OdpEventManager(mock_logger, ZaiusRestApiManager()) manager = OdpManager(False, OptimizelySegmentsCache, event_manager=event_manager, logger=mock_logger) manager.event_manager = False @@ -260,7 +260,7 @@ def test_send_event_odp_disabled__event_manager_not_available(self): def test_send_event_invalid_data(self): mock_logger = mock.MagicMock() - event_manager = OdpEventManager(OdpConfig(), mock_logger, ZaiusRestApiManager()) + event_manager = OdpEventManager(mock_logger, ZaiusRestApiManager()) manager = OdpManager(False, LRUCache(10, 20), event_manager=event_manager, logger=mock_logger) manager.update_odp_config('key1', 'host1', []) @@ -274,7 +274,7 @@ def test_send_event_invalid_data(self): def test_config_not_changed(self): mock_logger = mock.MagicMock() - event_manager = OdpEventManager(OdpConfig(), mock_logger, ZaiusRestApiManager()) + event_manager = OdpEventManager(mock_logger, ZaiusRestApiManager()) manager = OdpManager(False, OptimizelySegmentsCache, event_manager=event_manager, logger=mock_logger) manager.update_odp_config(None, None, []) @@ -284,10 +284,10 @@ def test_config_not_changed(self): def test_update_odp_config__reset_called(self): # build segment manager mock_logger = mock.MagicMock() - segment_manager = OdpSegmentManager(OdpConfig(), OptimizelySegmentsCache, + segment_manager = OdpSegmentManager(OptimizelySegmentsCache, ZaiusGraphQLApiManager(mock_logger), mock_logger) # build event manager - event_manager = OdpEventManager(OdpConfig(), mock_logger, ZaiusRestApiManager()) + event_manager = OdpEventManager(mock_logger, ZaiusRestApiManager()) manager = OdpManager(False, OptimizelySegmentsCache, segment_manager, event_manager, mock_logger) @@ -332,7 +332,7 @@ def test_update_odp_config__update_config_called(self): to odp_config is made or not in OdpManager. """ mock_logger = mock.MagicMock() - event_manager = OdpEventManager(OdpConfig(), mock_logger, ZaiusRestApiManager()) + event_manager = OdpEventManager(mock_logger, ZaiusRestApiManager()) manager = OdpManager(False, LRUCache(10, 20), event_manager=event_manager, logger=mock_logger) with mock.patch.object(event_manager, 'update_config') as mock_update: @@ -369,7 +369,7 @@ def test_update_odp_config__update_config_called(self): def test_update_odp_config__odp_config_propagated_properly(self): mock_logger = mock.MagicMock() - event_manager = OdpEventManager(OdpConfig(), mock_logger, ZaiusRestApiManager()) + event_manager = OdpEventManager(mock_logger, ZaiusRestApiManager()) manager = OdpManager(False, LRUCache(10, 20), event_manager=event_manager, logger=mock_logger) manager.update_odp_config('key1', 'host1', ['a', 'b']) diff --git a/tests/test_odp_segment_manager.py b/tests/test_odp_segment_manager.py index 1dad6fdd..34d04dac 100644 --- a/tests/test_odp_segment_manager.py +++ b/tests/test_odp_segment_manager.py @@ -36,8 +36,9 @@ def test_empty_list_with_no_segments_to_check(self): odp_config = OdpConfig(self.api_key, self.api_host, []) mock_logger = mock.MagicMock() segments_cache = LRUCache(1000, 1000) - api = ZaiusGraphQLApiManager() - segment_manager = OdpSegmentManager(odp_config, segments_cache, api, mock_logger) + api = ZaiusGraphQLApiManager(mock_logger) + segment_manager = OdpSegmentManager(segments_cache, api, mock_logger) + segment_manager.odp_config = odp_config with mock.patch.object(api, 'fetch_segments') as mock_fetch_segments: segments = segment_manager.fetch_qualified_segments(self.user_key, self.user_value, []) @@ -56,9 +57,9 @@ def test_fetch_segments_success_cache_miss(self): odp_config = OdpConfig(self.api_key, self.api_host, ["a", "b", "c"]) mock_logger = mock.MagicMock() segments_cache = LRUCache(1000, 1000) - api = ZaiusGraphQLApiManager() - segment_manager = OdpSegmentManager(odp_config, segments_cache, api, mock_logger) + segment_manager = OdpSegmentManager(segments_cache, logger=mock_logger) + segment_manager.odp_config = odp_config cache_key = segment_manager.make_cache_key(self.user_key, '123') segment_manager.segments_cache.save(cache_key, ["d"]) @@ -80,14 +81,14 @@ def test_fetch_segments_success_cache_hit(self): odp_config = OdpConfig() odp_config.update(self.api_key, self.api_host, ['c']) mock_logger = mock.MagicMock() - api = ZaiusGraphQLApiManager() segments_cache = LRUCache(1000, 1000) - segment_manager = OdpSegmentManager(odp_config, segments_cache, None, mock_logger) + segment_manager = OdpSegmentManager(segments_cache, logger=mock_logger) + segment_manager.odp_config = odp_config cache_key = segment_manager.make_cache_key(self.user_key, self.user_value) segment_manager.segments_cache.save(cache_key, ['c']) - with mock.patch.object(api, 'fetch_segments') as mock_fetch_segments: + with mock.patch.object(segment_manager.zaius_manager, 'fetch_segments') as mock_fetch_segments: segments = segment_manager.fetch_qualified_segments(self.user_key, self.user_value, []) self.assertEqual(segments, ['c']) @@ -97,7 +98,8 @@ def test_fetch_segments_success_cache_hit(self): def test_fetch_segments_missing_api_host_api_key(self): with mock.patch('optimizely.logger') as mock_logger: - segment_manager = OdpSegmentManager(OdpConfig(), LRUCache(1000, 1000), None, mock_logger) + segment_manager = OdpSegmentManager(LRUCache(1000, 1000), logger=mock_logger) + segment_manager.odp_config = OdpConfig() segments = segment_manager.fetch_qualified_segments(self.user_key, self.user_value, []) self.assertEqual(segments, None) @@ -114,8 +116,8 @@ def test_fetch_segments_network_error(self): odp_config = OdpConfig(self.api_key, self.api_host, ["a", "b", "c"]) mock_logger = mock.MagicMock() segments_cache = LRUCache(1000, 1000) - api = ZaiusGraphQLApiManager(mock_logger) - segment_manager = OdpSegmentManager(odp_config, segments_cache, api, None) + segment_manager = OdpSegmentManager(segments_cache, logger=mock_logger) + segment_manager.odp_config = odp_config with mock.patch('requests.post', side_effect=request_exception.ConnectionError('Connection error')): @@ -128,9 +130,9 @@ def test_options_ignore_cache(self): odp_config = OdpConfig(self.api_key, self.api_host, ["a", "b", "c"]) mock_logger = mock.MagicMock() segments_cache = LRUCache(1000, 1000) - api = ZaiusGraphQLApiManager() - segment_manager = OdpSegmentManager(odp_config, segments_cache, api, mock_logger) + segment_manager = OdpSegmentManager(segments_cache, logger=mock_logger) + segment_manager.odp_config = odp_config cache_key = segment_manager.make_cache_key(self.user_key, self.user_value) segment_manager.segments_cache.save(cache_key, ['d']) @@ -150,9 +152,9 @@ def test_options_reset_cache(self): odp_config = OdpConfig(self.api_key, self.api_host, ["a", "b", "c"]) mock_logger = mock.MagicMock() segments_cache = LRUCache(1000, 1000) - api = ZaiusGraphQLApiManager() - segment_manager = OdpSegmentManager(odp_config, segments_cache, api, mock_logger) + segment_manager = OdpSegmentManager(segments_cache, logger=mock_logger) + segment_manager.odp_config = odp_config cache_key = segment_manager.make_cache_key(self.user_key, self.user_value) segment_manager.segments_cache.save(cache_key, ['d']) segment_manager.segments_cache.save('123', ['c', 'd']) @@ -171,7 +173,7 @@ def test_options_reset_cache(self): mock_logger.error.assert_not_called() def test_make_correct_cache_key(self): - segment_manager = OdpSegmentManager(None, None, None, None) + segment_manager = OdpSegmentManager(None) cache_key = segment_manager.make_cache_key(self.user_key, self.user_value) self.assertEqual(cache_key, 'fs_user_id-$-test-user-value') From 193d3c90c9e4bd02bb934d630b7cde2721f32a43 Mon Sep 17 00:00:00 2001 From: Matjaz Pirnovar Date: Wed, 26 Oct 2022 06:28:18 -0700 Subject: [PATCH 27/68] feat: add odp integration w client and user context (#408) * add main functionality for odp integraton w client and user context Co-authored-by: Andy Leap --- README.md | 6 + optimizely/helpers/enums.py | 8 +- optimizely/helpers/sdk_settings.py | 55 +++ optimizely/helpers/validator.py | 70 +++- ...pi_manager.py => odp_event_api_manager.py} | 6 +- optimizely/odp/odp_event_manager.py | 8 +- optimizely/odp/odp_manager.py | 32 +- ..._manager.py => odp_segment_api_manager.py} | 33 +- optimizely/odp/odp_segment_manager.py | 10 +- optimizely/optimizely.py | 131 ++++++- optimizely/optimizely_factory.py | 8 +- optimizely/optimizely_user_context.py | 61 +++- tests/test_lru_cache.py | 2 +- tests/test_odp_config.py | 2 +- ...nager.py => test_odp_event_api_manager.py} | 24 +- tests/test_odp_event_manager.py | 30 +- tests/test_odp_manager.py | 110 +++--- ...ger.py => test_odp_segment_api_manager.py} | 53 ++- tests/test_odp_segment_manager.py | 10 +- tests/test_optimizely.py | 344 ++++++++++++++++-- tests/test_optimizely_factory.py | 21 ++ tests/test_user_context.py | 271 ++++++++++++++ 22 files changed, 1060 insertions(+), 235 deletions(-) create mode 100644 optimizely/helpers/sdk_settings.py rename optimizely/odp/{zaius_rest_api_manager.py => odp_event_api_manager.py} (95%) rename optimizely/odp/{zaius_graphql_api_manager.py => odp_segment_api_manager.py} (85%) rename tests/{test_odp_zaius_rest_api_manager.py => test_odp_event_api_manager.py} (90%) rename tests/{test_odp_zaius_graphql_api_manager.py => test_odp_segment_api_manager.py} (90%) diff --git a/README.md b/README.md index f2013e68..041d87f3 100644 --- a/README.md +++ b/README.md @@ -24,6 +24,12 @@ documentation](https://docs.developers.optimizely.com/rollouts/docs). ## Getting Started +### Requirements + +Version `4.0+`: Python 3.7+, PyPy 3.7+ + +Version `3.0+`: Python 2.7+, PyPy 3.4+ + ### Installing the SDK The SDK is available through [PyPi](https://pypi.python.org/pypi?name=optimizely-sdk&:action=display). diff --git a/optimizely/helpers/enums.py b/optimizely/helpers/enums.py index 886d269a..8ba311a1 100644 --- a/optimizely/helpers/enums.py +++ b/optimizely/helpers/enums.py @@ -199,13 +199,13 @@ class EventDispatchConfig: REQUEST_TIMEOUT: Final = 10 -class OdpRestApiConfig: - """ODP Rest API configs.""" +class OdpEventApiConfig: + """ODP Events API configs.""" REQUEST_TIMEOUT: Final = 10 -class OdpGraphQLApiConfig: - """ODP GraphQL API configs.""" +class OdpSegmentApiConfig: + """ODP Segments API configs.""" REQUEST_TIMEOUT: Final = 10 diff --git a/optimizely/helpers/sdk_settings.py b/optimizely/helpers/sdk_settings.py new file mode 100644 index 00000000..c55fd654 --- /dev/null +++ b/optimizely/helpers/sdk_settings.py @@ -0,0 +1,55 @@ +# Copyright 2022, Optimizely +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from typing import Optional + +from optimizely.helpers import enums +from optimizely.odp.lru_cache import OptimizelySegmentsCache +from optimizely.odp.odp_event_manager import OdpEventManager +from optimizely.odp.odp_segment_manager import OdpSegmentManager + + +class OptimizelySdkSettings: + """Contains configuration used for Optimizely Project initialization.""" + + def __init__( + self, + odp_disabled: bool = False, + segments_cache_size: int = enums.OdpSegmentsCacheConfig.DEFAULT_CAPACITY, + segments_cache_timeout_in_secs: int = enums.OdpSegmentsCacheConfig.DEFAULT_TIMEOUT_SECS, + odp_segments_cache: Optional[OptimizelySegmentsCache] = None, + odp_segment_manager: Optional[OdpSegmentManager] = None, + odp_event_manager: Optional[OdpEventManager] = None + ) -> None: + """ + Args: + odp_disabled: Set this flag to true (default = False) to disable ODP features. + segments_cache_size: The maximum size of audience segments cache (optional. default = 10,000). + Set to zero to disable caching. + segments_cache_timeout_in_secs: The timeout in seconds of audience segments cache (optional. default = 600). + Set to zero to disable timeout. + odp_segments_cache: A custom odp segments cache. Required methods include: + `save(key, value)`, `lookup(key) -> value`, and `reset()` + odp_segment_manager: A custom odp segment manager. Required method is: + `fetch_qualified_segments(user_key, user_value, options)`. + odp_event_manager: A custom odp event manager. Required method is: + `send_event(type:, action:, identifiers:, data:)` + """ + + self.odp_disabled = odp_disabled + self.segments_cache_size = segments_cache_size + self.segments_cache_timeout_in_secs = segments_cache_timeout_in_secs + self.segments_cache = odp_segments_cache + self.odp_segment_manager = odp_segment_manager + self.odp_event_manager = odp_event_manager diff --git a/optimizely/helpers/validator.py b/optimizely/helpers/validator.py index 7ffe0422..17cff87c 100644 --- a/optimizely/helpers/validator.py +++ b/optimizely/helpers/validator.py @@ -21,6 +21,9 @@ from optimizely.notification_center import NotificationCenter from optimizely.user_profile import UserProfile from . import constants +from ..odp.lru_cache import OptimizelySegmentsCache +from ..odp.odp_event_manager import OdpEventManager +from ..odp.odp_segment_manager import OdpSegmentManager if TYPE_CHECKING: # prevent circular dependenacy by skipping import at runtime @@ -67,10 +70,10 @@ def _has_method(obj: object, method: str) -> bool: method: Method whose presence needs to be determined. Returns: - Boolean depending upon whether the method is available or not. + Boolean depending upon whether the method is available and callable or not. """ - return getattr(obj, method, None) is not None + return callable(getattr(obj, method, None)) def is_config_manager_valid(config_manager: BaseConfigManager) -> bool: @@ -312,3 +315,66 @@ def are_values_same_type(first_val: Any, second_val: Any) -> bool: def are_odp_data_types_valid(data: OdpDataDict) -> bool: valid_types = (str, int, float, bool, type(None)) return all(isinstance(v, valid_types) for v in data.values()) + + +def is_segments_cache_valid(segments_cache: Optional[OptimizelySegmentsCache]) -> bool: + """ Given a segments_cache determine if it is valid or not i.e. provides a reset, lookup and save methods. + + Args: + segments_cache: Provides cache methods: reset, lookup, save. + + Returns: + Boolean depending upon whether segments_cache is valid or not. + """ + if not _has_method(segments_cache, 'reset'): + return False + + if not _has_method(segments_cache, 'lookup'): + return False + + if not _has_method(segments_cache, 'save'): + return False + + return True + + +def is_segment_manager_valid(segment_manager: Optional[OdpSegmentManager]) -> bool: + """ Given a segments_manager determine if it is valid or not. + + Args: + segment_manager: Provides methods fetch_qualified_segments and reset + + Returns: + Boolean depending upon whether segments_manager is valid or not. + """ + if not _has_method(segment_manager, 'fetch_qualified_segments'): + return False + + if not _has_method(segment_manager, 'reset'): + return False + + return True + + +def is_event_manager_valid(event_manager: Optional[OdpEventManager]) -> bool: + """ Given an event_manager determine if it is valid or not. + + Args: + event_manager: Provides send_event method + + Returns: + Boolean depending upon whether event_manager is valid or not. + """ + if not hasattr(event_manager, 'is_running'): + return False + + if not _has_method(event_manager, 'send_event'): + return False + + if not _has_method(event_manager, 'stop'): + return False + + if not _has_method(event_manager, 'update_config'): + return False + + return True diff --git a/optimizely/odp/zaius_rest_api_manager.py b/optimizely/odp/odp_event_api_manager.py similarity index 95% rename from optimizely/odp/zaius_rest_api_manager.py rename to optimizely/odp/odp_event_api_manager.py index 62f7c1c7..00c8050a 100644 --- a/optimizely/odp/zaius_rest_api_manager.py +++ b/optimizely/odp/odp_event_api_manager.py @@ -20,7 +20,7 @@ from requests.exceptions import RequestException, ConnectionError, Timeout from optimizely import logger as optimizely_logger -from optimizely.helpers.enums import Errors, OdpRestApiConfig +from optimizely.helpers.enums import Errors, OdpEventApiConfig from optimizely.odp.odp_event import OdpEvent, OdpEventEncoder """ @@ -37,7 +37,7 @@ """ -class ZaiusRestApiManager: +class OdpEventApiManager: """Provides an internal service for ODP event REST api access.""" def __init__(self, logger: Optional[optimizely_logger.Logger] = None): @@ -69,7 +69,7 @@ def send_odp_events(self, api_key: str, api_host: str, events: list[OdpEvent]) - response = requests.post(url=url, headers=request_headers, data=payload_dict, - timeout=OdpRestApiConfig.REQUEST_TIMEOUT) + timeout=OdpEventApiConfig.REQUEST_TIMEOUT) response.raise_for_status() diff --git a/optimizely/odp/odp_event_manager.py b/optimizely/odp/odp_event_manager.py index ae8f4066..ec1e3fc9 100644 --- a/optimizely/odp/odp_event_manager.py +++ b/optimizely/odp/odp_event_manager.py @@ -23,7 +23,7 @@ from optimizely.helpers.enums import OdpEventManagerConfig, Errors, OdpManagerConfig from .odp_config import OdpConfig, OdpConfigState from .odp_event import OdpEvent, OdpDataDict -from .zaius_rest_api_manager import ZaiusRestApiManager +from .odp_event_api_manager import OdpEventApiManager class Signal(Enum): @@ -45,7 +45,7 @@ class OdpEventManager: def __init__( self, logger: Optional[_logging.Logger] = None, - api_manager: Optional[ZaiusRestApiManager] = None + api_manager: Optional[OdpEventApiManager] = None ): """OdpEventManager init method to configure event batching. @@ -54,7 +54,7 @@ def __init__( api_manager: Optional component which sends events to ODP. """ self.logger = logger or _logging.NoOpLogger() - self.zaius_manager = api_manager or ZaiusRestApiManager(self.logger) + self.api_manager = api_manager or OdpEventApiManager(self.logger) self.odp_config: Optional[OdpConfig] = None self.api_key: Optional[str] = None @@ -158,7 +158,7 @@ def _flush_batch(self) -> None: for i in range(1 + self.retry_count): try: - should_retry = self.zaius_manager.send_odp_events(self.api_key, self.api_host, self._current_batch) + should_retry = self.api_manager.send_odp_events(self.api_key, self.api_host, self._current_batch) except Exception as error: should_retry = False self.logger.error(Errors.ODP_EVENT_FAILED.format(f'Error: {error} {self._current_batch}')) diff --git a/optimizely/odp/odp_manager.py b/optimizely/odp/odp_manager.py index 6198cf89..b07f0c9f 100644 --- a/optimizely/odp/odp_manager.py +++ b/optimizely/odp/odp_manager.py @@ -3,7 +3,7 @@ # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ from typing import Optional, Any -from optimizely import exceptions as optimizely_exception from optimizely import logger as optimizely_logger from optimizely.helpers.enums import Errors, OdpManagerConfig, OdpSegmentsCacheConfig from optimizely.helpers.validator import are_odp_data_types_valid @@ -56,13 +55,8 @@ def __init__( ) self.segment_manager = OdpSegmentManager(segments_cache, logger=self.logger) - if event_manager: - self.event_manager = event_manager - else: - self.event_manager = OdpEventManager(self.logger) - + self.event_manager = self.event_manager or OdpEventManager(self.logger) self.segment_manager.odp_config = self.odp_config - self.event_manager.start(self.odp_config) def fetch_qualified_segments(self, user_id: str, options: list[str]) -> Optional[list[str]]: if not self.enabled or not self.segment_manager: @@ -94,17 +88,18 @@ def send_event(self, type: str, action: str, identifiers: dict[str, str], data: identifiers: A dictionary for identifiers. data: A dictionary for associated data. The default event data will be added to this data before sending to the ODP server. - - Raises custom exception if error is detected. """ if not self.enabled or not self.event_manager: - raise optimizely_exception.OdpNotEnabled(Errors.ODP_NOT_ENABLED) + self.logger.error(Errors.ODP_NOT_ENABLED) + return if self.odp_config.odp_state() == OdpConfigState.NOT_INTEGRATED: - raise optimizely_exception.OdpNotIntegrated(Errors.ODP_NOT_INTEGRATED) + self.logger.error(Errors.ODP_NOT_INTEGRATED) + return if not are_odp_data_types_valid(data): - raise optimizely_exception.OdpInvalidData(Errors.ODP_INVALID_DATA) + self.logger.error(Errors.ODP_INVALID_DATA) + return self.event_manager.send_event(type, action, identifiers, data) @@ -122,5 +117,14 @@ def update_odp_config(self, api_key: Optional[str], api_host: Optional[str], if self.segment_manager: self.segment_manager.reset() - if self.event_manager: + if not self.event_manager: + return + + if self.event_manager.is_running: self.event_manager.update_config() + elif self.odp_config.odp_state() == OdpConfigState.INTEGRATED: + self.event_manager.start(self.odp_config) + + def close(self) -> None: + if self.enabled and self.event_manager: + self.event_manager.stop() diff --git a/optimizely/odp/zaius_graphql_api_manager.py b/optimizely/odp/odp_segment_api_manager.py similarity index 85% rename from optimizely/odp/zaius_graphql_api_manager.py rename to optimizely/odp/odp_segment_api_manager.py index 4f2ae38a..dc51c6f6 100644 --- a/optimizely/odp/zaius_graphql_api_manager.py +++ b/optimizely/odp/odp_segment_api_manager.py @@ -20,7 +20,7 @@ from requests.exceptions import RequestException, ConnectionError, Timeout, JSONDecodeError from optimizely import logger as optimizely_logger -from optimizely.helpers.enums import Errors, OdpGraphQLApiConfig +from optimizely.helpers.enums import Errors, OdpSegmentApiConfig """ ODP GraphQL API @@ -105,7 +105,7 @@ """ -class ZaiusGraphQLApiManager: +class OdpSegmentApiManager: """Interface for manging the fetching of audience segments.""" def __init__(self, logger: Optional[optimizely_logger.Logger] = None): @@ -130,10 +130,15 @@ def fetch_segments(self, api_key: str, api_host: str, user_key: str, request_headers = {'content-type': 'application/json', 'x-api-key': str(api_key)} - segments_filter = self.make_subset_filter(segments_to_check) query = { - 'query': 'query {customer(' + str(user_key) + ': "' + str(user_value) + '") ' - '{audiences' + segments_filter + ' {edges {node {name state}}}}}' + 'query': + 'query($userId: String, $audiences: [String]) {' + f'customer({user_key}: $userId) ' + '{audiences(subset: $audiences) {edges {node {name state}}}}}', + 'variables': { + 'userId': str(user_value), + 'audiences': segments_to_check + } } try: @@ -146,7 +151,7 @@ def fetch_segments(self, api_key: str, api_host: str, user_key: str, response = requests.post(url=url, headers=request_headers, data=payload_dict, - timeout=OdpGraphQLApiConfig.REQUEST_TIMEOUT) + timeout=OdpSegmentApiConfig.REQUEST_TIMEOUT) response.raise_for_status() response_dict = response.json() @@ -185,19 +190,3 @@ def fetch_segments(self, api_key: str, api_host: str, user_key: str, except KeyError: self.logger.error(Errors.FETCH_SEGMENTS_FAILED.format('decode error')) return None - - @staticmethod - def make_subset_filter(segments: list[str]) -> str: - """ - segments = []: (fetch none) - --> subsetFilter = "(subset:[])" - segments = ["a"]: (fetch one segment) - --> subsetFilter = '(subset:["a"])' - - Purposely using .join() method to deal with special cases of - any words with apostrophes (i.e. don't). .join() method enquotes - correctly without conflicting with the apostrophe. - """ - if segments == []: - return '(subset:[])' - return '(subset:["' + '", "'.join(segments) + '"]' + ')' diff --git a/optimizely/odp/odp_segment_manager.py b/optimizely/odp/odp_segment_manager.py index d01fede0..a9dd8dfb 100644 --- a/optimizely/odp/odp_segment_manager.py +++ b/optimizely/odp/odp_segment_manager.py @@ -20,7 +20,7 @@ from optimizely.odp.odp_config import OdpConfig from optimizely.odp.optimizely_odp_option import OptimizelyOdpOption from optimizely.odp.lru_cache import OptimizelySegmentsCache -from optimizely.odp.zaius_graphql_api_manager import ZaiusGraphQLApiManager +from optimizely.odp.odp_segment_api_manager import OdpSegmentApiManager class OdpSegmentManager: @@ -29,14 +29,14 @@ class OdpSegmentManager: def __init__( self, segments_cache: OptimizelySegmentsCache, - zaius_manager: Optional[ZaiusGraphQLApiManager] = None, + api_manager: Optional[OdpSegmentApiManager] = None, logger: Optional[optimizely_logger.Logger] = None ) -> None: self.odp_config: Optional[OdpConfig] = None self.segments_cache = segments_cache self.logger = logger or optimizely_logger.NoOpLogger() - self.zaius_manager = zaius_manager or ZaiusGraphQLApiManager(self.logger) + self.api_manager = api_manager or OdpSegmentApiManager(self.logger) def fetch_qualified_segments(self, user_key: str, user_value: str, options: list[str] ) -> Optional[list[str]]: @@ -79,8 +79,8 @@ def fetch_qualified_segments(self, user_key: str, user_value: str, options: list self.logger.debug('Making a call to ODP server.') - segments = self.zaius_manager.fetch_segments(odp_api_key, odp_api_host, user_key, user_value, - odp_segments_to_check) + segments = self.api_manager.fetch_segments(odp_api_key, odp_api_host, user_key, user_value, + odp_segments_to_check) if segments and not ignore_cache: self.segments_cache.save(cache_key, segments) diff --git a/optimizely/optimizely.py b/optimizely/optimizely.py index 7edbe6e3..5bdda3e1 100644 --- a/optimizely/optimizely.py +++ b/optimizely/optimizely.py @@ -3,7 +3,7 @@ # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, @@ -13,14 +13,16 @@ from __future__ import annotations -from . import project_config +from typing import TYPE_CHECKING, Any, Optional + from . import decision_service from . import entities from . import event_builder from . import exceptions from . import logger as _logging -from .config_manager import BaseConfigManager +from . import project_config from .config_manager import AuthDatafilePollingConfigManager +from .config_manager import BaseConfigManager from .config_manager import PollingConfigManager from .config_manager import StaticConfigManager from .decision.optimizely_decide_option import OptimizelyDecideOption @@ -31,17 +33,17 @@ from .event import event_factory, user_event_factory from .event.event_processor import BatchEventProcessor, BaseEventProcessor from .event_dispatcher import EventDispatcher, CustomEventDispatcher - from .helpers import enums, validator +from .helpers.sdk_settings import OptimizelySdkSettings from .helpers.enums import DecisionSources from .notification_center import NotificationCenter +from .odp.lru_cache import LRUCache +from .odp.odp_manager import OdpManager from .optimizely_config import OptimizelyConfig, OptimizelyConfigService from .optimizely_user_context import OptimizelyUserContext, UserAttributes -from typing import TYPE_CHECKING, Any, Optional - if TYPE_CHECKING: - # prevent circular dependenacy by skipping import at runtime + # prevent circular dependency by skipping import at runtime from .user_profile import UserProfileService from .helpers.event_tag_utils import EventTags @@ -63,7 +65,8 @@ def __init__( event_processor: Optional[BaseEventProcessor] = None, datafile_access_token: Optional[str] = None, default_decide_options: Optional[list[str]] = None, - event_processor_options: Optional[dict[str, Any]] = None + event_processor_options: Optional[dict[str, Any]] = None, + settings: Optional[OptimizelySdkSettings] = None ) -> None: """ Optimizely init method for managing Custom projects. @@ -92,6 +95,7 @@ def __init__( datafile_access_token: Optional string used to fetch authenticated datafile for a secure project environment. default_decide_options: Optional list of decide options used with the decide APIs. event_processor_options: Optional dict of options to be passed to the default batch event processor. + settings: Optional instance of OptimizelySdkSettings for sdk configuration. """ self.logger_name = '.'.join([__name__, self.__class__.__name__]) self.is_valid = True @@ -128,6 +132,8 @@ def __init__( self.logger.debug('Provided default decide options is not a list.') self.default_decide_options = [] + self.sdk_settings: OptimizelySdkSettings = settings # type: ignore[assignment] + try: self._validate_instantiation_options() except exceptions.InvalidInputException as error: @@ -138,6 +144,16 @@ def __init__( self.logger.exception(str(error)) return + self.setup_odp() + + self.odp_manager = OdpManager( + self.sdk_settings.odp_disabled, + self.sdk_settings.segments_cache, + self.sdk_settings.odp_segment_manager, + self.sdk_settings.odp_event_manager, + self.logger + ) + config_manager_options: dict[str, Any] = { 'datafile': datafile, 'logger': self.logger, @@ -157,6 +173,9 @@ def __init__( else: self.config_manager = StaticConfigManager(**config_manager_options) + if not self.sdk_settings.odp_disabled: + self._update_odp_config_on_datafile_update() + self.event_builder = event_builder.EventBuilder() self.decision_service = decision_service.DecisionService(self.logger, user_profile_service) @@ -184,6 +203,23 @@ def _validate_instantiation_options(self) -> None: if not validator.is_event_processor_valid(self.event_processor): raise exceptions.InvalidInputException(enums.Errors.INVALID_INPUT.format('event_processor')) + if not isinstance(self.sdk_settings, OptimizelySdkSettings): + if self.sdk_settings is not None: + self.logger.debug('Provided sdk_settings is not an OptimizelySdkSettings instance.') + self.sdk_settings = OptimizelySdkSettings() + + if self.sdk_settings.segments_cache: + if not validator.is_segments_cache_valid(self.sdk_settings.segments_cache): + raise exceptions.InvalidInputException(enums.Errors.INVALID_INPUT.format('segments_cache')) + + if self.sdk_settings.odp_segment_manager: + if not validator.is_segment_manager_valid(self.sdk_settings.odp_segment_manager): + raise exceptions.InvalidInputException(enums.Errors.INVALID_INPUT.format('segment_manager')) + + if self.sdk_settings.odp_event_manager: + if not validator.is_event_manager_valid(self.sdk_settings.odp_event_manager): + raise exceptions.InvalidInputException(enums.Errors.INVALID_INPUT.format('event_manager')) + def _validate_user_inputs( self, attributes: Optional[UserAttributes] = None, event_tags: Optional[EventTags] = None ) -> bool: @@ -252,8 +288,8 @@ def _send_impression_event( ) def _get_feature_variable_for_type( - self, project_config: project_config.ProjectConfig, feature_key: str, variable_key: str, - variable_type: Optional[str], user_id: str, attributes: Optional[UserAttributes] + self, project_config: project_config.ProjectConfig, feature_key: str, variable_key: str, + variable_type: Optional[str], user_id: str, attributes: Optional[UserAttributes] ) -> Any: """ Helper method to determine value for a certain variable attached to a feature flag based on type of variable. @@ -364,8 +400,8 @@ def _get_feature_variable_for_type( return actual_value def _get_all_feature_variables_for_type( - self, project_config: project_config.ProjectConfig, feature_key: str, - user_id: str, attributes: Optional[UserAttributes], + self, project_config: project_config.ProjectConfig, feature_key: str, + user_id: str, attributes: Optional[UserAttributes], ) -> Optional[dict[str, Any]]: """ Helper method to determine value for all variables attached to a feature flag. @@ -1274,3 +1310,74 @@ def _decide_for_keys( continue decisions[key] = decision return decisions + + def setup_odp(self) -> None: + """ + - Make sure cache is instantiated with provided parameters or defaults. + - Set up listener to update odp_config when datafile is updated. + """ + if self.sdk_settings.odp_disabled: + return + + self.notification_center.add_notification_listener( + enums.NotificationTypes.OPTIMIZELY_CONFIG_UPDATE, + self._update_odp_config_on_datafile_update + ) + + if self.sdk_settings.odp_segment_manager: + return + + if not self.sdk_settings.segments_cache: + self.sdk_settings.segments_cache = LRUCache( + self.sdk_settings.segments_cache_size or enums.OdpSegmentsCacheConfig.DEFAULT_CAPACITY, + self.sdk_settings.segments_cache_timeout_in_secs or enums.OdpSegmentsCacheConfig.DEFAULT_TIMEOUT_SECS + ) + + def _update_odp_config_on_datafile_update(self) -> None: + config = None + + if isinstance(self.config_manager, PollingConfigManager): + # can not use get_config here because callback is fired before _config_ready event is set + # and that would be a deadlock + config = self.config_manager._config + elif self.config_manager: + config = self.config_manager.get_config() + + if not config: + return + + self.odp_manager.update_odp_config( + config.public_key_for_odp, + config.host_for_odp, + config.all_segments + ) + + def identify_user(self, user_id: str) -> None: + self.odp_manager.identify_user(user_id) + + def fetch_qualified_segments(self, user_id: str, options: Optional[list[str]] = None) -> Optional[list[str]]: + return self.odp_manager.fetch_qualified_segments(user_id, options or []) + + def send_odp_event( + self, + action: str, + type: str = enums.OdpManagerConfig.EVENT_TYPE, + identifiers: Optional[dict[str, str]] = None, + data: Optional[dict[str, str | int | float | bool | None]] = None + ) -> None: + """ + Send an event to the ODP server. + + Args: + action: The event action name. + type: The event type. Default 'fullstack'. + identifiers: An optional dictionary for identifiers. + data: An optional dictionary for associated data. The default event data will be added to this data + before sending to the ODP server. + """ + self.odp_manager.send_event(type, action, identifiers or {}, data or {}) + + def close(self) -> None: + if callable(getattr(self.event_processor, 'stop', None)): + self.event_processor.stop() # type: ignore[attr-defined] + self.odp_manager.close() diff --git a/optimizely/optimizely_factory.py b/optimizely/optimizely_factory.py index 5060780e..ae466979 100644 --- a/optimizely/optimizely_factory.py +++ b/optimizely/optimizely_factory.py @@ -13,6 +13,8 @@ from __future__ import annotations from typing import TYPE_CHECKING, Optional +from optimizely.helpers.sdk_settings import OptimizelySdkSettings + from . import logger as optimizely_logger from .config_manager import BaseConfigManager, PollingConfigManager from .error_handler import BaseErrorHandler, NoOpErrorHandler @@ -124,7 +126,8 @@ def custom_instance( skip_json_validation: Optional[bool] = None, user_profile_service: Optional[UserProfileService] = None, config_manager: Optional[BaseConfigManager] = None, - notification_center: Optional[NotificationCenter] = None + notification_center: Optional[NotificationCenter] = None, + settings: Optional[OptimizelySdkSettings] = None ) -> Optimizely: """ Returns a new optimizely instance. if max_event_batch_size and max_event_flush_interval are None then default batch_size and flush_interval @@ -144,6 +147,7 @@ def custom_instance( user profiles. config_manager: Optional ConfigManager interface responds to 'config' method. notification_center: Optional Instance of NotificationCenter. + settings: Optional Instance of OptimizelySdkSettings. """ error_handler = error_handler or NoOpErrorHandler() @@ -172,5 +176,5 @@ def custom_instance( return Optimizely( datafile, event_dispatcher, logger, error_handler, skip_json_validation, user_profile_service, - sdk_key, config_manager, notification_center, event_processor + sdk_key, config_manager, notification_center, event_processor, settings=settings ) diff --git a/optimizely/optimizely_user_context.py b/optimizely/optimizely_user_context.py index 11b8af9d..fd03ec6d 100644 --- a/optimizely/optimizely_user_context.py +++ b/optimizely/optimizely_user_context.py @@ -13,14 +13,15 @@ # limitations under the License. # from __future__ import annotations + import copy import threading -from typing import TYPE_CHECKING, Any, Optional, NewType, Dict +from typing import TYPE_CHECKING, Any, Callable, Optional, NewType, Dict from optimizely.decision import optimizely_decision if TYPE_CHECKING: - # prevent circular dependenacy by skipping import at runtime + # prevent circular dependency by skipping import at runtime from . import optimizely from optimizely.helpers.event_tag_utils import EventTags from .logger import Logger @@ -54,7 +55,7 @@ def __init__( self.client = optimizely_client self.logger = logger self.user_id = user_id - self._qualified_segments: list[str] = [] + self._qualified_segments: Optional[list[str]] = None if not isinstance(user_attributes, dict): user_attributes = UserAttributes({}) @@ -66,7 +67,9 @@ def __init__( OptimizelyUserContext.OptimizelyForcedDecision ] = {} - # decision context + if self.client: + self.client.identify_user(user_id) + class OptimizelyDecisionContext: """ Using class with attributes here instead of namedtuple because class is extensible, it's easy to add another attribute if we wanted @@ -216,7 +219,7 @@ def remove_forced_decision(self, decision_context: OptimizelyDecisionContext) -> decision_context: a decision context. Returns: - Returns: true if the forced decision has been removed successfully. + True if the forced decision has been removed successfully. """ with self.lock: if decision_context in self.forced_decisions_map: @@ -265,9 +268,11 @@ def is_qualified_for(self, segment: str) -> bool: Returns: true if the segment is in the qualified segments list. """ with self.lock: - return segment in self._qualified_segments + if self._qualified_segments is not None: + return segment in self._qualified_segments + return False - def get_qualified_segments(self) -> list[str]: + def get_qualified_segments(self) -> Optional[list[str]]: """ Gets the qualified segments. @@ -275,9 +280,11 @@ def get_qualified_segments(self) -> list[str]: A list of qualified segment names. """ with self.lock: - return self._qualified_segments.copy() + if self._qualified_segments is not None: + return self._qualified_segments.copy() + return None - def set_qualified_segments(self, segments: list[str]) -> None: + def set_qualified_segments(self, segments: Optional[list[str]]) -> None: """ Replaces any qualified segments with the provided list of segments. @@ -288,4 +295,38 @@ def set_qualified_segments(self, segments: list[str]) -> None: None. """ with self.lock: - self._qualified_segments = segments.copy() + self._qualified_segments = None if segments is None else segments.copy() + + def fetch_qualified_segments( + self, + callback: Optional[Callable[[bool], None]] = None, + options: Optional[list[str]] = None + ) -> bool | threading.Thread: + """ + Fetch all qualified segments for the user context. + The fetched segments will be saved and can be accessed using get/set_qualified_segment methods. + + Args: + callback: An optional function to run after the fetch has completed. The function will be provided + a boolean value indicating if the fetch was successful. If a callback is provided, the fetch + will be run in a seperate thread, otherwise it will be run syncronously. + options: An array of OptimizelySegmentOptions used to ignore and/or reset the cache (optional). + + Returns: + A boolean value indicating if the fetch was successful. + """ + def _fetch_qualified_segments() -> bool: + segments = self.client.fetch_qualified_segments(self.user_id, options or []) if self.client else None + self.set_qualified_segments(segments) + success = segments is not None + + if callable(callback): + callback(success) + return success + + if callback: + fetch_thread = threading.Thread(target=_fetch_qualified_segments) + fetch_thread.start() + return fetch_thread + else: + return _fetch_qualified_segments() diff --git a/tests/test_lru_cache.py b/tests/test_lru_cache.py index acaf07cc..cc4dfdb1 100644 --- a/tests/test_lru_cache.py +++ b/tests/test_lru_cache.py @@ -3,7 +3,7 @@ # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http:#www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/test_odp_config.py b/tests/test_odp_config.py index d72a7321..b7a48e84 100644 --- a/tests/test_odp_config.py +++ b/tests/test_odp_config.py @@ -3,7 +3,7 @@ # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http:#www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, diff --git a/tests/test_odp_zaius_rest_api_manager.py b/tests/test_odp_event_api_manager.py similarity index 90% rename from tests/test_odp_zaius_rest_api_manager.py rename to tests/test_odp_event_api_manager.py index 6e1835d5..47438bd2 100644 --- a/tests/test_odp_zaius_rest_api_manager.py +++ b/tests/test_odp_event_api_manager.py @@ -3,7 +3,7 @@ # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http:#www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, @@ -16,13 +16,13 @@ from requests import exceptions as request_exception -from optimizely.helpers.enums import OdpRestApiConfig +from optimizely.helpers.enums import OdpEventApiConfig from optimizely.odp.odp_event import OdpEvent, OdpEventEncoder -from optimizely.odp.zaius_rest_api_manager import ZaiusRestApiManager +from optimizely.odp.odp_event_api_manager import OdpEventApiManager from . import base -class ZaiusRestApiManagerTest(base.BaseTest): +class OdpEventApiManagerTest(base.BaseTest): user_key = "vuid" user_value = "test-user-value" api_key = "test-api-key" @@ -34,7 +34,7 @@ class ZaiusRestApiManagerTest(base.BaseTest): def test_send_odp_events__valid_request(self): with mock.patch('requests.post') as mock_request_post: - api = ZaiusRestApiManager() + api = OdpEventApiManager() api.send_odp_events(api_key=self.api_key, api_host=self.api_host, events=self.events) @@ -43,14 +43,14 @@ def test_send_odp_events__valid_request(self): mock_request_post.assert_called_once_with(url=self.api_host + "/v3/events", headers=request_headers, data=json.dumps(self.events, cls=OdpEventEncoder), - timeout=OdpRestApiConfig.REQUEST_TIMEOUT) + timeout=OdpEventApiConfig.REQUEST_TIMEOUT) def test_send_odp_ovents_success(self): with mock.patch('requests.post') as mock_request_post: # no need to mock url and content because we're not returning the response mock_request_post.return_value = self.fake_server_response(status_code=200) - api = ZaiusRestApiManager() + api = OdpEventApiManager() should_retry = api.send_odp_events(api_key=self.api_key, api_host=self.api_host, events=self.events) # content of events doesn't matter for the test @@ -63,7 +63,7 @@ def test_send_odp_events_invalid_json_no_retry(self): with mock.patch('requests.post') as mock_request_post, \ mock.patch('optimizely.logger') as mock_logger: - api = ZaiusRestApiManager(logger=mock_logger) + api = OdpEventApiManager(logger=mock_logger) should_retry = api.send_odp_events(api_key=self.api_key, api_host=self.api_host, events=events) @@ -79,7 +79,7 @@ def test_send_odp_events_invalid_url_no_retry(self): with mock.patch('requests.post', side_effect=request_exception.InvalidURL('Invalid URL')) as mock_request_post, \ mock.patch('optimizely.logger') as mock_logger: - api = ZaiusRestApiManager(logger=mock_logger) + api = OdpEventApiManager(logger=mock_logger) should_retry = api.send_odp_events(api_key=self.api_key, api_host=invalid_url, events=self.events) @@ -92,7 +92,7 @@ def test_send_odp_events_network_error_retry(self): with mock.patch('requests.post', side_effect=request_exception.ConnectionError('Connection error')) as mock_request_post, \ mock.patch('optimizely.logger') as mock_logger: - api = ZaiusRestApiManager(logger=mock_logger) + api = OdpEventApiManager(logger=mock_logger) should_retry = api.send_odp_events(api_key=self.api_key, api_host=self.api_host, events=self.events) @@ -108,7 +108,7 @@ def test_send_odp_events_400_no_retry(self): url=self.api_host, content=self.failure_response_data) - api = ZaiusRestApiManager(logger=mock_logger) + api = OdpEventApiManager(logger=mock_logger) should_retry = api.send_odp_events(api_key=self.api_key, api_host=self.api_host, events=self.events) @@ -124,7 +124,7 @@ def test_send_odp_events_500_retry(self): mock.patch('optimizely.logger') as mock_logger: mock_request_post.return_value = self.fake_server_response(status_code=500, url=self.api_host) - api = ZaiusRestApiManager(logger=mock_logger) + api = OdpEventApiManager(logger=mock_logger) should_retry = api.send_odp_events(api_key=self.api_key, api_host=self.api_host, events=self.events) diff --git a/tests/test_odp_event_manager.py b/tests/test_odp_event_manager.py index 766c8ad1..a2963ec9 100644 --- a/tests/test_odp_event_manager.py +++ b/tests/test_odp_event_manager.py @@ -3,7 +3,7 @@ # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http:#www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, @@ -121,7 +121,7 @@ def test_odp_event_manager_batch(self, *args): event_manager.batch_size = 2 with mock.patch.object( - event_manager.zaius_manager, 'send_odp_events', new_callable=CopyingMock, return_value=False + event_manager.api_manager, 'send_odp_events', new_callable=CopyingMock, return_value=False ) as mock_send: event_manager.send_event(**self.events[0]) event_manager.send_event(**self.events[1]) @@ -142,7 +142,7 @@ def test_odp_event_manager_multiple_batches(self, *args): batch_count = 4 with mock.patch.object( - event_manager.zaius_manager, 'send_odp_events', new_callable=CopyingMock, return_value=False + event_manager.api_manager, 'send_odp_events', new_callable=CopyingMock, return_value=False ) as mock_send: for _ in range(batch_count): event_manager.send_event(**self.events[0]) @@ -177,7 +177,7 @@ def test_odp_event_manager_backlog(self, *args): event_manager.send_event(**self.events[1]) with mock.patch.object( - event_manager.zaius_manager, 'send_odp_events', new_callable=CopyingMock, return_value=False + event_manager.api_manager, 'send_odp_events', new_callable=CopyingMock, return_value=False ) as mock_send: event_manager.start(self.odp_config) event_manager.send_event(**self.events[0]) @@ -203,7 +203,7 @@ def test_odp_event_manager_flush(self, *args): event_manager.start(self.odp_config) with mock.patch.object( - event_manager.zaius_manager, 'send_odp_events', new_callable=CopyingMock, return_value=False + event_manager.api_manager, 'send_odp_events', new_callable=CopyingMock, return_value=False ) as mock_send: event_manager.send_event(**self.events[0]) event_manager.send_event(**self.events[1]) @@ -223,7 +223,7 @@ def test_odp_event_manager_multiple_flushes(self, *args): flush_count = 4 with mock.patch.object( - event_manager.zaius_manager, 'send_odp_events', new_callable=CopyingMock, return_value=False + event_manager.api_manager, 'send_odp_events', new_callable=CopyingMock, return_value=False ) as mock_send: for _ in range(flush_count): event_manager.send_event(**self.events[0]) @@ -251,7 +251,7 @@ def test_odp_event_manager_retry_failure(self, *args): number_of_tries = event_manager.retry_count + 1 with mock.patch.object( - event_manager.zaius_manager, 'send_odp_events', new_callable=CopyingMock, return_value=True + event_manager.api_manager, 'send_odp_events', new_callable=CopyingMock, return_value=True ) as mock_send: event_manager.send_event(**self.events[0]) event_manager.send_event(**self.events[1]) @@ -274,7 +274,7 @@ def test_odp_event_manager_retry_success(self, *args): event_manager.start(self.odp_config) with mock.patch.object( - event_manager.zaius_manager, 'send_odp_events', new_callable=CopyingMock, side_effect=[True, True, False] + event_manager.api_manager, 'send_odp_events', new_callable=CopyingMock, side_effect=[True, True, False] ) as mock_send: event_manager.send_event(**self.events[0]) event_manager.send_event(**self.events[1]) @@ -294,7 +294,7 @@ def test_odp_event_manager_send_failure(self, *args): event_manager.start(self.odp_config) with mock.patch.object( - event_manager.zaius_manager, + event_manager.api_manager, 'send_odp_events', new_callable=CopyingMock, side_effect=Exception('Unexpected error') @@ -373,7 +373,7 @@ def test_odp_event_manager_override_default_data(self, *args): processed_event['data']['data_source'] = 'my-app' with mock.patch.object( - event_manager.zaius_manager, 'send_odp_events', new_callable=CopyingMock, return_value=False + event_manager.api_manager, 'send_odp_events', new_callable=CopyingMock, return_value=False ) as mock_send: event_manager.send_event(**event) event_manager.flush() @@ -389,7 +389,7 @@ def test_odp_event_manager_flush_timeout(self, *args): event_manager.start(self.odp_config) with mock.patch.object( - event_manager.zaius_manager, 'send_odp_events', new_callable=CopyingMock, return_value=False + event_manager.api_manager, 'send_odp_events', new_callable=CopyingMock, return_value=False ) as mock_send: event_manager.send_event(**self.events[0]) event_manager.send_event(**self.events[1]) @@ -408,7 +408,7 @@ def test_odp_event_manager_events_before_odp_ready(self, *args): event_manager.start(odp_config) with mock.patch.object( - event_manager.zaius_manager, 'send_odp_events', new_callable=CopyingMock, return_value=False + event_manager.api_manager, 'send_odp_events', new_callable=CopyingMock, return_value=False ) as mock_send: event_manager.send_event(**self.events[0]) event_manager.send_event(**self.events[1]) @@ -442,7 +442,7 @@ def test_odp_event_manager_events_before_odp_disabled(self, *args): event_manager = OdpEventManager(mock_logger) event_manager.start(odp_config) - with mock.patch.object(event_manager.zaius_manager, 'send_odp_events') as mock_send: + with mock.patch.object(event_manager.api_manager, 'send_odp_events') as mock_send: event_manager.send_event(**self.events[0]) event_manager.send_event(**self.events[1]) @@ -475,7 +475,7 @@ def test_odp_event_manager_disabled_after_init(self, *args): event_manager.batch_size = 2 with mock.patch.object( - event_manager.zaius_manager, 'send_odp_events', new_callable=CopyingMock, return_value=False + event_manager.api_manager, 'send_odp_events', new_callable=CopyingMock, return_value=False ) as mock_send: event_manager.send_event(**self.events[0]) event_manager.send_event(**self.events[1]) @@ -511,7 +511,7 @@ def test_odp_event_manager_disabled_after_events_in_queue(self, *args): event_manager.send_event(**self.events[1]) with mock.patch.object( - event_manager.zaius_manager, 'send_odp_events', new_callable=CopyingMock, return_value=False + event_manager.api_manager, 'send_odp_events', new_callable=CopyingMock, return_value=False ) as mock_send: event_manager.start(odp_config) odp_config.update(None, None, []) diff --git a/tests/test_odp_manager.py b/tests/test_odp_manager.py index bef4cae9..ae0e4a1a 100644 --- a/tests/test_odp_manager.py +++ b/tests/test_odp_manager.py @@ -3,7 +3,7 @@ # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http:#www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ from unittest import mock -from optimizely import exceptions as optimizely_exception from optimizely import version from optimizely.helpers.enums import Errors from optimizely.odp.lru_cache import OptimizelySegmentsCache, LRUCache @@ -23,11 +22,16 @@ from optimizely.odp.odp_event_manager import OdpEventManager from optimizely.odp.odp_manager import OdpManager from optimizely.odp.odp_segment_manager import OdpSegmentManager -from optimizely.odp.zaius_graphql_api_manager import ZaiusGraphQLApiManager -from optimizely.odp.zaius_rest_api_manager import ZaiusRestApiManager +from optimizely.odp.odp_segment_api_manager import OdpSegmentApiManager +from optimizely.odp.odp_event_api_manager import OdpEventApiManager from tests import base +class CustomCache: + def reset(self) -> None: + pass + + class OdpManagerTest(base.BaseTest): def test_configurations_disable_odp(self): @@ -41,12 +45,13 @@ def test_configurations_disable_odp(self): manager.fetch_qualified_segments('user1', []) mock_logger.error.assert_called_once_with(Errors.ODP_NOT_ENABLED) + mock_logger.reset_mock() # these call should be dropped gracefully with None manager.identify_user('user1') - self.assertRaisesRegex(optimizely_exception.OdpNotEnabled, Errors.ODP_NOT_ENABLED, - manager.send_event, 't1', 'a1', {}, {}) + manager.send_event('t1', 'a1', {}, {}) + mock_logger.error.assert_called_once_with('ODP is not enabled.') self.assertIsNone(manager.event_manager) self.assertIsNone(manager.segment_manager) @@ -54,7 +59,7 @@ def test_configurations_disable_odp(self): def test_fetch_qualified_segments(self): mock_logger = mock.MagicMock() segment_manager = OdpSegmentManager(OptimizelySegmentsCache, - ZaiusGraphQLApiManager(mock_logger), mock_logger) + OdpSegmentApiManager(mock_logger), mock_logger) manager = OdpManager(False, OptimizelySegmentsCache, segment_manager, logger=mock_logger) @@ -75,7 +80,7 @@ def test_fetch_qualified_segments(self): def test_fetch_qualified_segments__disabled(self): mock_logger = mock.MagicMock() segment_manager = OdpSegmentManager(OptimizelySegmentsCache, - ZaiusGraphQLApiManager(mock_logger), mock_logger) + OdpSegmentApiManager(mock_logger), mock_logger) manager = OdpManager(True, OptimizelySegmentsCache, segment_manager, logger=mock_logger) @@ -129,7 +134,7 @@ def test_identify_user_datafile_not_ready(self): def test_identify_user_odp_integrated(self): mock_logger = mock.MagicMock() - event_manager = OdpEventManager(mock_logger, ZaiusRestApiManager()) + event_manager = OdpEventManager(mock_logger, OdpEventApiManager()) manager = OdpManager(False, LRUCache(10, 20), event_manager=event_manager, logger=mock_logger) manager.update_odp_config('key1', 'host1', []) @@ -151,9 +156,9 @@ def test_identify_user_odp_integrated(self): def test_identify_user_odp_not_integrated(self): mock_logger = mock.MagicMock() - event_manager = OdpEventManager(mock_logger, ZaiusRestApiManager()) + event_manager = OdpEventManager(mock_logger, OdpEventApiManager()) - manager = OdpManager(False, OptimizelySegmentsCache, event_manager=event_manager, logger=mock_logger) + manager = OdpManager(False, CustomCache(), event_manager=event_manager, logger=mock_logger) manager.update_odp_config(None, None, []) with mock.patch.object(event_manager, 'dispatch') as mock_dispatch_event: @@ -161,12 +166,11 @@ def test_identify_user_odp_not_integrated(self): mock_dispatch_event.assert_not_called() mock_logger.error.assert_not_called() - mock_logger.debug.assert_any_call('Odp config was not changed.') mock_logger.debug.assert_any_call('ODP identify event is not dispatched (ODP not integrated).') def test_identify_user_odp_disabled(self): mock_logger = mock.MagicMock() - event_manager = OdpEventManager(mock_logger, ZaiusRestApiManager()) + event_manager = OdpEventManager(mock_logger, OdpEventApiManager()) manager = OdpManager(False, OptimizelySegmentsCache, event_manager=event_manager, logger=mock_logger) manager.enabled = False @@ -180,7 +184,7 @@ def test_identify_user_odp_disabled(self): def test_send_event_datafile_not_ready(self): mock_logger = mock.MagicMock() - event_manager = OdpEventManager(mock_logger, ZaiusRestApiManager()) + event_manager = OdpEventManager(mock_logger, OdpEventApiManager()) manager = OdpManager(False, OptimizelySegmentsCache, event_manager=event_manager, logger=mock_logger) @@ -189,11 +193,11 @@ def test_send_event_datafile_not_ready(self): mock_dispatch_event.assert_not_called() mock_logger.error.assert_not_called() - mock_logger.debug.assert_called_with('ODP event queue: cannot send before the datafile has loaded.') + mock_logger.debug.assert_called_with('ODP event queue: cannot send before config has been set.') def test_send_event_odp_integrated(self): mock_logger = mock.MagicMock() - event_manager = OdpEventManager(mock_logger, ZaiusRestApiManager()) + event_manager = OdpEventManager(mock_logger, OdpEventApiManager()) manager = OdpManager(False, LRUCache(10, 20), event_manager=event_manager, logger=mock_logger) manager.update_odp_config('key1', 'host1', []) @@ -215,79 +219,62 @@ def test_send_event_odp_integrated(self): def test_send_event_odp_not_integrated(self): mock_logger = mock.MagicMock() - event_manager = OdpEventManager(mock_logger, ZaiusRestApiManager()) + event_manager = OdpEventManager(mock_logger, OdpEventApiManager()) - manager = OdpManager(False, OptimizelySegmentsCache, event_manager=event_manager, logger=mock_logger) + manager = OdpManager(False, CustomCache(), event_manager=event_manager, logger=mock_logger) + manager.update_odp_config('api_key', 'api_host', []) manager.update_odp_config(None, None, []) with mock.patch.object(event_manager, 'dispatch') as mock_dispatch_event: - self.assertRaisesRegex(optimizely_exception.OdpNotIntegrated, Errors.ODP_NOT_INTEGRATED, - manager.send_event, 't1', 'a1', {'id-key1': 'id-val-1'}, {'key1': 'val1'}) + manager.send_event('t1', 'a1', {'id-key1': 'id-val-1'}, {'key1': 'val1'}) mock_dispatch_event.assert_not_called() - mock_logger.debug.assert_any_call('Odp config was not changed.') - mock_logger.error.assert_not_called() + mock_logger.error.assert_called_once_with('ODP is not integrated.') def test_send_event_odp_disabled(self): mock_logger = mock.MagicMock() - event_manager = OdpEventManager(mock_logger, ZaiusRestApiManager()) + event_manager = OdpEventManager(mock_logger, OdpEventApiManager()) - manager = OdpManager(False, OptimizelySegmentsCache, event_manager=event_manager, logger=mock_logger) - manager.enabled = False + manager = OdpManager(True, OptimizelySegmentsCache, event_manager=event_manager, logger=mock_logger) with mock.patch.object(event_manager, 'dispatch') as mock_dispatch_event: - self.assertRaisesRegex(optimizely_exception.OdpNotEnabled, Errors.ODP_NOT_ENABLED, - manager.send_event, 't1', 'a1', {'id-key1': 'id-val-1'}, {'key1': 'val1'}) + manager.send_event('t1', 'a1', {'id-key1': 'id-val-1'}, {'key1': 'val1'}) mock_dispatch_event.assert_not_called() - mock_logger.debug.assert_not_called() - mock_logger.error.assert_not_called() + mock_logger.error.assert_called_once_with('ODP is not enabled.') def test_send_event_odp_disabled__event_manager_not_available(self): mock_logger = mock.MagicMock() - event_manager = OdpEventManager(mock_logger, ZaiusRestApiManager()) + event_manager = OdpEventManager(mock_logger, OdpEventApiManager()) manager = OdpManager(False, OptimizelySegmentsCache, event_manager=event_manager, logger=mock_logger) manager.event_manager = False with mock.patch.object(event_manager, 'dispatch') as mock_dispatch_event: - self.assertRaisesRegex(optimizely_exception.OdpNotEnabled, Errors.ODP_NOT_ENABLED, - manager.send_event, 't1', 'a1', {'id-key1': 'id-val-1'}, {'key1': 'val1'}) - - mock_dispatch_event.assert_not_called() - mock_logger.debug.assert_not_called() - mock_logger.error.assert_not_called() - - def test_send_event_invalid_data(self): - mock_logger = mock.MagicMock() - event_manager = OdpEventManager(mock_logger, ZaiusRestApiManager()) - - manager = OdpManager(False, LRUCache(10, 20), event_manager=event_manager, logger=mock_logger) - manager.update_odp_config('key1', 'host1', []) - - with mock.patch.object(event_manager, 'dispatch') as mock_dispatch_event: - self.assertRaisesRegex(optimizely_exception.OdpInvalidData, Errors.ODP_INVALID_DATA, - manager.send_event, 't1', 'a1', {'id-key1': 'id-val-1'}, {'invalid-item': {}}) + manager.send_event('t1', 'a1', {'id-key1': 'id-val-1'}, {'key1': 'val1'}) mock_dispatch_event.assert_not_called() - mock_logger.error.assert_not_called() + mock_logger.error.assert_called_once_with('ODP is not enabled.') def test_config_not_changed(self): mock_logger = mock.MagicMock() - event_manager = OdpEventManager(mock_logger, ZaiusRestApiManager()) + event_manager = OdpEventManager(mock_logger, OdpEventApiManager()) - manager = OdpManager(False, OptimizelySegmentsCache, event_manager=event_manager, logger=mock_logger) + manager = OdpManager(False, CustomCache(), event_manager=event_manager, logger=mock_logger) + # finish initialization manager.update_odp_config(None, None, []) - mock_logger.debug.assert_called_with('Odp config was not changed.') + # update without change + manager.update_odp_config(None, None, []) + mock_logger.debug.assert_any_call('Odp config was not changed.') mock_logger.error.assert_not_called() def test_update_odp_config__reset_called(self): # build segment manager mock_logger = mock.MagicMock() segment_manager = OdpSegmentManager(OptimizelySegmentsCache, - ZaiusGraphQLApiManager(mock_logger), mock_logger) + OdpSegmentApiManager(mock_logger), mock_logger) # build event manager - event_manager = OdpEventManager(mock_logger, ZaiusRestApiManager()) + event_manager = OdpEventManager(mock_logger, OdpEventApiManager()) manager = OdpManager(False, OptimizelySegmentsCache, segment_manager, event_manager, mock_logger) @@ -332,8 +319,9 @@ def test_update_odp_config__update_config_called(self): to odp_config is made or not in OdpManager. """ mock_logger = mock.MagicMock() - event_manager = OdpEventManager(mock_logger, ZaiusRestApiManager()) + event_manager = OdpEventManager(mock_logger, OdpEventApiManager()) manager = OdpManager(False, LRUCache(10, 20), event_manager=event_manager, logger=mock_logger) + event_manager.start(manager.odp_config) with mock.patch.object(event_manager, 'update_config') as mock_update: first_api_key = manager.odp_config.get_api_key() @@ -369,7 +357,7 @@ def test_update_odp_config__update_config_called(self): def test_update_odp_config__odp_config_propagated_properly(self): mock_logger = mock.MagicMock() - event_manager = OdpEventManager(mock_logger, ZaiusRestApiManager()) + event_manager = OdpEventManager(mock_logger, OdpEventApiManager()) manager = OdpManager(False, LRUCache(10, 20), event_manager=event_manager, logger=mock_logger) manager.update_odp_config('key1', 'host1', ['a', 'b']) @@ -395,6 +383,18 @@ def test_update_odp_config__odp_config_propagated_properly(self): self.assertEqual(manager.event_manager.odp_config.get_segments_to_check(), ['a', 'b']) mock_logger.error.assert_not_called() + def test_update_odp_config__odp_config_starts_event_manager(self): + mock_logger = mock.MagicMock() + event_manager = OdpEventManager(mock_logger) + manager = OdpManager(False, event_manager=event_manager, logger=mock_logger) + self.assertFalse(event_manager.is_running) + + manager.update_odp_config('key1', 'host1', ['a', 'b']) + self.assertTrue(event_manager.is_running) + + mock_logger.error.assert_not_called() + manager.close() + def test_segments_cache_default_settings(self): manager = OdpManager(False) segments_cache = manager.segment_manager.segments_cache diff --git a/tests/test_odp_zaius_graphql_api_manager.py b/tests/test_odp_segment_api_manager.py similarity index 90% rename from tests/test_odp_zaius_graphql_api_manager.py rename to tests/test_odp_segment_api_manager.py index e4ec76c4..0f909f24 100644 --- a/tests/test_odp_zaius_graphql_api_manager.py +++ b/tests/test_odp_segment_api_manager.py @@ -3,7 +3,7 @@ # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http:#www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, @@ -16,12 +16,12 @@ from requests import exceptions as request_exception -from optimizely.helpers.enums import OdpGraphQLApiConfig -from optimizely.odp.zaius_graphql_api_manager import ZaiusGraphQLApiManager +from optimizely.helpers.enums import OdpSegmentApiConfig +from optimizely.odp.odp_segment_api_manager import OdpSegmentApiManager from . import base -class ZaiusGraphQLApiManagerTest(base.BaseTest): +class OdpSegmentApiManagerTest(base.BaseTest): user_key = "vuid" user_value = "test-user-value" api_key = "test-api-key" @@ -29,7 +29,7 @@ class ZaiusGraphQLApiManagerTest(base.BaseTest): def test_fetch_qualified_segments__valid_request(self): with mock.patch('requests.post') as mock_request_post: - api = ZaiusGraphQLApiManager() + api = OdpSegmentApiManager() api.fetch_segments(api_key=self.api_key, api_host=self.api_host, user_key=self.user_key, @@ -37,21 +37,23 @@ def test_fetch_qualified_segments__valid_request(self): segments_to_check=["a", "b", "c"]) test_payload = { - 'query': 'query {customer(' + self.user_key + ': "' + self.user_value + '") ' - '{audiences(subset:["a", "b", "c"]) {edges {node {name state}}}}}' + 'query': 'query($userId: String, $audiences: [String]) {' + 'customer(vuid: $userId) ' + '{audiences(subset: $audiences) {edges {node {name state}}}}}', + 'variables': {'userId': self.user_value, 'audiences': ["a", "b", "c"]} } request_headers = {'content-type': 'application/json', 'x-api-key': self.api_key} mock_request_post.assert_called_once_with(url=self.api_host + "/v3/graphql", headers=request_headers, data=json.dumps(test_payload), - timeout=OdpGraphQLApiConfig.REQUEST_TIMEOUT) + timeout=OdpSegmentApiConfig.REQUEST_TIMEOUT) def test_fetch_qualified_segments__success(self): with mock.patch('requests.post') as mock_request_post: mock_request_post.return_value = \ self.fake_server_response(status_code=200, content=self.good_response_data) - api = ZaiusGraphQLApiManager() + api = OdpSegmentApiManager() response = api.fetch_segments(api_key=self.api_key, api_host=self.api_host, user_key=self.user_key, @@ -66,7 +68,7 @@ def test_fetch_qualified_segments__node_missing(self): mock_request_post.return_value = \ self.fake_server_response(status_code=200, content=self.node_missing_response_data) - api = ZaiusGraphQLApiManager(logger=mock_logger) + api = OdpSegmentApiManager(logger=mock_logger) api.fetch_segments(api_key=self.api_key, api_host=self.api_host, user_key=self.user_key, @@ -83,7 +85,7 @@ def test_fetch_qualified_segments__mixed_missing_keys(self): self.fake_server_response(status_code=200, content=self.mixed_missing_keys_response_data) - api = ZaiusGraphQLApiManager(logger=mock_logger) + api = OdpSegmentApiManager(logger=mock_logger) api.fetch_segments(api_key=self.api_key, api_host=self.api_host, user_key=self.user_key, @@ -98,7 +100,7 @@ def test_fetch_qualified_segments__success_with_empty_segments(self): mock_request_post.return_value = \ self.fake_server_response(status_code=200, content=self.good_empty_response_data) - api = ZaiusGraphQLApiManager() + api = OdpSegmentApiManager() response = api.fetch_segments(api_key=self.api_key, api_host=self.api_host, user_key=self.user_key, @@ -114,7 +116,7 @@ def test_fetch_qualified_segments__invalid_identifier(self): self.fake_server_response(status_code=200, content=self.invalid_identifier_response_data) - api = ZaiusGraphQLApiManager(logger=mock_logger) + api = OdpSegmentApiManager(logger=mock_logger) api.fetch_segments(api_key=self.api_key, api_host=self.api_host, user_key=self.user_key, @@ -130,7 +132,7 @@ def test_fetch_qualified_segments__other_exception(self): mock_request_post.return_value = \ self.fake_server_response(status_code=200, content=self.other_exception_response_data) - api = ZaiusGraphQLApiManager(logger=mock_logger) + api = OdpSegmentApiManager(logger=mock_logger) api.fetch_segments(api_key=self.api_key, api_host=self.api_host, user_key=self.user_key, @@ -146,7 +148,7 @@ def test_fetch_qualified_segments__bad_response(self): mock_request_post.return_value = \ self.fake_server_response(status_code=200, content=self.bad_response_data) - api = ZaiusGraphQLApiManager(logger=mock_logger) + api = OdpSegmentApiManager(logger=mock_logger) api.fetch_segments(api_key=self.api_key, api_host=self.api_host, user_key=self.user_key, @@ -162,7 +164,7 @@ def test_fetch_qualified_segments__name_invalid(self): mock_request_post.return_value = \ self.fake_server_response(status_code=200, content=self.name_invalid_response_data) - api = ZaiusGraphQLApiManager(logger=mock_logger) + api = OdpSegmentApiManager(logger=mock_logger) api.fetch_segments(api_key=self.api_key, api_host=self.api_host, user_key=self.user_key, @@ -178,7 +180,7 @@ def test_fetch_qualified_segments__invalid_key(self): mock_request_post.return_value = self.fake_server_response(status_code=200, content=self.invalid_edges_key_response_data) - api = ZaiusGraphQLApiManager(logger=mock_logger) + api = OdpSegmentApiManager(logger=mock_logger) api.fetch_segments(api_key=self.api_key, api_host=self.api_host, user_key=self.user_key, @@ -194,7 +196,7 @@ def test_fetch_qualified_segments__invalid_key_in_error_body(self): mock_request_post.return_value = self.fake_server_response(status_code=200, content=self.invalid_key_for_error_response_data) - api = ZaiusGraphQLApiManager(logger=mock_logger) + api = OdpSegmentApiManager(logger=mock_logger) api.fetch_segments(api_key=self.api_key, api_host=self.api_host, user_key=self.user_key, @@ -208,7 +210,7 @@ def test_fetch_qualified_segments__network_error(self): with mock.patch('requests.post', side_effect=request_exception.ConnectionError('Connection error')) as mock_request_post, \ mock.patch('optimizely.logger') as mock_logger: - api = ZaiusGraphQLApiManager(logger=mock_logger) + api = OdpSegmentApiManager(logger=mock_logger) api.fetch_segments(api_key=self.api_key, api_host=self.api_host, user_key=self.user_key, @@ -224,7 +226,7 @@ def test_fetch_qualified_segments__400(self): mock.patch('optimizely.logger') as mock_logger: mock_request_post.return_value = self.fake_server_response(status_code=403, url=self.api_host) - api = ZaiusGraphQLApiManager(logger=mock_logger) + api = OdpSegmentApiManager(logger=mock_logger) api.fetch_segments(api_key=self.api_key, api_host=self.api_host, user_key=self.user_key, @@ -244,7 +246,7 @@ def test_fetch_qualified_segments__500(self): mock.patch('optimizely.logger') as mock_logger: mock_request_post.return_value = self.fake_server_response(status_code=500, url=self.api_host) - api = ZaiusGraphQLApiManager(logger=mock_logger) + api = OdpSegmentApiManager(logger=mock_logger) api.fetch_segments(api_key=self.api_key, api_host=self.api_host, user_key=self.user_key, @@ -257,15 +259,6 @@ def test_fetch_qualified_segments__500(self): mock_logger.error.assert_called_once_with('Audience segments fetch failed ' f'(500 Server Error: None for url: {self.api_host}).') - def test_make_subset_filter(self): - api = ZaiusGraphQLApiManager() - - self.assertEqual("(subset:[])", api.make_subset_filter([])) - self.assertEqual("(subset:[\"a\"])", api.make_subset_filter(["a"])) - self.assertEqual("(subset:[\"a\", \"b\", \"c\"])", api.make_subset_filter(['a', 'b', 'c'])) - self.assertEqual("(subset:[\"a\", \"b\", \"c\"])", api.make_subset_filter(["a", "b", "c"])) - self.assertEqual("(subset:[\"a\", \"b\", \"don't\"])", api.make_subset_filter(["a", "b", "don't"])) - # test json responses good_response_data = """ diff --git a/tests/test_odp_segment_manager.py b/tests/test_odp_segment_manager.py index 34d04dac..50794746 100644 --- a/tests/test_odp_segment_manager.py +++ b/tests/test_odp_segment_manager.py @@ -3,7 +3,7 @@ # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # -# http:#www.apache.org/licenses/LICENSE-2.0 +# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, @@ -22,7 +22,7 @@ from optimizely.odp.odp_config import OdpConfig from optimizely.odp.optimizely_odp_option import OptimizelyOdpOption from optimizely.odp.odp_segment_manager import OdpSegmentManager -from optimizely.odp.zaius_graphql_api_manager import ZaiusGraphQLApiManager +from optimizely.odp.odp_segment_api_manager import OdpSegmentApiManager from tests import base @@ -36,7 +36,7 @@ def test_empty_list_with_no_segments_to_check(self): odp_config = OdpConfig(self.api_key, self.api_host, []) mock_logger = mock.MagicMock() segments_cache = LRUCache(1000, 1000) - api = ZaiusGraphQLApiManager(mock_logger) + api = OdpSegmentApiManager(mock_logger) segment_manager = OdpSegmentManager(segments_cache, api, mock_logger) segment_manager.odp_config = odp_config @@ -88,7 +88,7 @@ def test_fetch_segments_success_cache_hit(self): cache_key = segment_manager.make_cache_key(self.user_key, self.user_value) segment_manager.segments_cache.save(cache_key, ['c']) - with mock.patch.object(segment_manager.zaius_manager, 'fetch_segments') as mock_fetch_segments: + with mock.patch.object(segment_manager.api_manager, 'fetch_segments') as mock_fetch_segments: segments = segment_manager.fetch_qualified_segments(self.user_key, self.user_value, []) self.assertEqual(segments, ['c']) @@ -111,7 +111,7 @@ def test_fetch_segments_network_error(self): have a status code for connection error, that's why we need to trigger the exception instead of returning a fake server response with status code 500. The error log should come form the GraphQL API manager, not from ODP Segment Manager. - The active mock logger should be placed as parameter in ZaiusGraphQLApiManager object. + The active mock logger should be placed as parameter in OdpSegmentApiManager object. """ odp_config = OdpConfig(self.api_key, self.api_host, ["a", "b", "c"]) mock_logger = mock.MagicMock() diff --git a/tests/test_optimizely.py b/tests/test_optimizely.py index d356b3d7..c6132598 100644 --- a/tests/test_optimizely.py +++ b/tests/test_optimizely.py @@ -29,6 +29,7 @@ from optimizely import version from optimizely.event.event_factory import EventFactory from optimizely.helpers import enums +from optimizely.helpers.sdk_settings import OptimizelySdkSettings from . import base @@ -540,7 +541,7 @@ def test_decision_listener__user_not_in_experiment(self): ) as mock_broadcast_decision: self.assertEqual(None, self.optimizely.activate('test_experiment', 'test_user')) - mock_broadcast_decision.assert_called_once_with( + mock_broadcast_decision.assert_any_call( enums.NotificationTypes.DECISION, 'ab-test', 'test_user', @@ -1787,7 +1788,7 @@ def test_get_variation(self): self.assertEqual(mock_broadcast.call_count, 1) - mock_broadcast.assert_called_once_with( + mock_broadcast.assert_any_call( enums.NotificationTypes.DECISION, 'ab-test', 'test_user', @@ -2673,7 +2674,7 @@ def test_get_feature_variable_boolean(self): 'Got variable value "true" for variable "is_working" of feature flag "test_feature_in_experiment".' ) - mock_broadcast_decision.assert_called_once_with( + mock_broadcast_decision.assert_any_call( enums.NotificationTypes.DECISION, 'feature-variable', 'test_user', @@ -2711,7 +2712,7 @@ def test_get_feature_variable_double(self): 'Got variable value "10.02" for variable "cost" of feature flag "test_feature_in_experiment".' ) - mock_broadcast_decision.assert_called_once_with( + mock_broadcast_decision.assert_any_call( enums.NotificationTypes.DECISION, 'feature-variable', 'test_user', @@ -2749,7 +2750,7 @@ def test_get_feature_variable_integer(self): 'Got variable value "4243" for variable "count" of feature flag "test_feature_in_experiment".' ) - mock_broadcast_decision.assert_called_once_with( + mock_broadcast_decision.assert_any_call( enums.NotificationTypes.DECISION, 'feature-variable', 'test_user', @@ -2788,7 +2789,7 @@ def test_get_feature_variable_string(self): 'Got variable value "staging" for variable "environment" of feature flag "test_feature_in_experiment".' ) - mock_broadcast_decision.assert_called_once_with( + mock_broadcast_decision.assert_any_call( enums.NotificationTypes.DECISION, 'feature-variable', 'test_user', @@ -2827,7 +2828,7 @@ def test_get_feature_variable_json(self): 'Got variable value "{"test": 123}" for variable "object" of feature flag "test_feature_in_experiment".' ) - mock_broadcast_decision.assert_called_once_with( + mock_broadcast_decision.assert_any_call( enums.NotificationTypes.DECISION, 'feature-variable', 'test_user', @@ -2891,7 +2892,7 @@ def test_get_all_feature_variables(self): ], any_order=True ) - mock_broadcast_decision.assert_called_once_with( + mock_broadcast_decision.assert_any_call( enums.NotificationTypes.DECISION, 'all-feature-variables', 'test_user', @@ -2928,7 +2929,7 @@ def test_get_feature_variable(self): 'Got variable value "true" for variable "is_working" of feature flag "test_feature_in_experiment".' ) - mock_broadcast_decision.assert_called_once_with( + mock_broadcast_decision.assert_any_call( enums.NotificationTypes.DECISION, 'feature-variable', 'test_user', @@ -2959,7 +2960,7 @@ def test_get_feature_variable(self): 'Got variable value "10.02" for variable "cost" of feature flag "test_feature_in_experiment".' ) - mock_broadcast_decision.assert_called_once_with( + mock_broadcast_decision.assert_any_call( enums.NotificationTypes.DECISION, 'feature-variable', 'test_user', @@ -2990,7 +2991,7 @@ def test_get_feature_variable(self): 'Got variable value "4243" for variable "count" of feature flag "test_feature_in_experiment".' ) - mock_broadcast_decision.assert_called_once_with( + mock_broadcast_decision.assert_any_call( enums.NotificationTypes.DECISION, 'feature-variable', 'test_user', @@ -3053,7 +3054,7 @@ def test_get_feature_variable(self): 'Got variable value "{"test": 123}" for variable "object" of feature flag "test_feature_in_experiment".' ) - mock_broadcast_decision.assert_called_once_with( + mock_broadcast_decision.assert_any_call( enums.NotificationTypes.DECISION, 'feature-variable', 'test_user', @@ -3095,7 +3096,7 @@ def test_get_feature_variable_boolean_for_feature_in_rollout(self): 'Got variable value "true" for variable "is_running" of feature flag "test_feature_in_rollout".' ) - mock_broadcast_decision.assert_called_once_with( + mock_broadcast_decision.assert_any_call( enums.NotificationTypes.DECISION, 'feature-variable', 'test_user', @@ -3137,7 +3138,7 @@ def test_get_feature_variable_double_for_feature_in_rollout(self): 'Got variable value "39.99" for variable "price" of feature flag "test_feature_in_rollout".' ) - mock_broadcast_decision.assert_called_once_with( + mock_broadcast_decision.assert_any_call( enums.NotificationTypes.DECISION, 'feature-variable', 'test_user', @@ -3179,7 +3180,7 @@ def test_get_feature_variable_integer_for_feature_in_rollout(self): 'Got variable value "399" for variable "count" of feature flag "test_feature_in_rollout".' ) - mock_broadcast_decision.assert_called_once_with( + mock_broadcast_decision.assert_any_call( enums.NotificationTypes.DECISION, 'feature-variable', 'test_user', @@ -3221,7 +3222,7 @@ def test_get_feature_variable_string_for_feature_in_rollout(self): 'Got variable value "Hello audience" for variable "message" of feature flag "test_feature_in_rollout".' ) - mock_broadcast_decision.assert_called_once_with( + mock_broadcast_decision.assert_any_call( enums.NotificationTypes.DECISION, 'feature-variable', 'test_user', @@ -3263,7 +3264,7 @@ def test_get_feature_variable_json_for_feature_in_rollout(self): 'Got variable value "{"field": 12}" for variable "object" of feature flag "test_feature_in_rollout".' ) - mock_broadcast_decision.assert_called_once_with( + mock_broadcast_decision.assert_any_call( enums.NotificationTypes.DECISION, 'feature-variable', 'test_user', @@ -3318,7 +3319,7 @@ def test_get_all_feature_variables_for_feature_in_rollout(self): ], any_order=True ) - mock_broadcast_decision.assert_called_once_with( + mock_broadcast_decision.assert_any_call( enums.NotificationTypes.DECISION, 'all-feature-variables', 'test_user', @@ -3363,7 +3364,7 @@ def test_get_feature_variable_for_feature_in_rollout(self): 'Got variable value "true" for variable "is_running" of feature flag "test_feature_in_rollout".' ) - mock_broadcast_decision.assert_called_once_with( + mock_broadcast_decision.assert_any_call( enums.NotificationTypes.DECISION, 'feature-variable', 'test_user', @@ -3396,7 +3397,7 @@ def test_get_feature_variable_for_feature_in_rollout(self): 'Got variable value "39.99" for variable "price" of feature flag "test_feature_in_rollout".' ) - mock_broadcast_decision.assert_called_once_with( + mock_broadcast_decision.assert_any_call( enums.NotificationTypes.DECISION, 'feature-variable', 'test_user', @@ -3429,7 +3430,7 @@ def test_get_feature_variable_for_feature_in_rollout(self): 'Got variable value "399" for variable "count" of feature flag "test_feature_in_rollout".' ) - mock_broadcast_decision.assert_called_once_with( + mock_broadcast_decision.assert_any_call( enums.NotificationTypes.DECISION, 'feature-variable', 'test_user', @@ -3462,7 +3463,7 @@ def test_get_feature_variable_for_feature_in_rollout(self): 'Got variable value "Hello audience" for variable "message" of feature flag "test_feature_in_rollout".' ) - mock_broadcast_decision.assert_called_once_with( + mock_broadcast_decision.assert_any_call( enums.NotificationTypes.DECISION, 'feature-variable', 'test_user', @@ -3496,7 +3497,7 @@ def test_get_feature_variable_for_feature_in_rollout(self): 'Got variable value "{"field": 12}" for variable "object" of feature flag "test_feature_in_rollout".' ) - mock_broadcast_decision.assert_called_once_with( + mock_broadcast_decision.assert_any_call( enums.NotificationTypes.DECISION, 'feature-variable', 'test_user', @@ -3629,7 +3630,7 @@ def test_get_feature_variable__returns_default_value_if_no_variation(self): 'Returning default value for variable "is_working" of feature flag "test_feature_in_experiment".' ) - mock_broadcast_decision.assert_called_once_with( + mock_broadcast_decision.assert_any_call( enums.NotificationTypes.DECISION, 'feature-variable', 'test_user', @@ -3663,7 +3664,7 @@ def test_get_feature_variable__returns_default_value_if_no_variation(self): 'Returning default value for variable "cost" of feature flag "test_feature_in_experiment".' ) - mock_broadcast_decision.assert_called_once_with( + mock_broadcast_decision.assert_any_call( enums.NotificationTypes.DECISION, 'feature-variable', 'test_user', @@ -3732,7 +3733,7 @@ def test_get_feature_variable__returns_default_value_if_no_variation(self): 'Returning default value for variable "environment" of feature flag "test_feature_in_experiment".' ) - mock_broadcast_decision.assert_called_once_with( + mock_broadcast_decision.assert_any_call( enums.NotificationTypes.DECISION, 'feature-variable', 'test_user', @@ -3766,7 +3767,7 @@ def test_get_feature_variable__returns_default_value_if_no_variation(self): 'Returning default value for variable "object" of feature flag "test_feature_in_experiment".' ) - mock_broadcast_decision.assert_called_once_with( + mock_broadcast_decision.assert_any_call( enums.NotificationTypes.DECISION, 'feature-variable', 'test_user', @@ -3798,7 +3799,7 @@ def test_get_feature_variable__returns_default_value_if_no_variation(self): 'Returning default value for variable "is_working" of feature flag "test_feature_in_experiment".' ) - mock_broadcast_decision.assert_called_once_with( + mock_broadcast_decision.assert_any_call( enums.NotificationTypes.DECISION, 'feature-variable', 'test_user', @@ -3831,7 +3832,7 @@ def test_get_feature_variable__returns_default_value_if_no_variation(self): 'Returning default value for variable "cost" of feature flag "test_feature_in_experiment".' ) - mock_broadcast_decision.assert_called_once_with( + mock_broadcast_decision.assert_any_call( enums.NotificationTypes.DECISION, 'feature-variable', 'test_user', @@ -3864,7 +3865,7 @@ def test_get_feature_variable__returns_default_value_if_no_variation(self): 'Returning default value for variable "count" of feature flag "test_feature_in_experiment".' ) - mock_broadcast_decision.assert_called_once_with( + mock_broadcast_decision.assert_any_call( enums.NotificationTypes.DECISION, 'feature-variable', 'test_user', @@ -3897,7 +3898,7 @@ def test_get_feature_variable__returns_default_value_if_no_variation(self): 'Returning default value for variable "environment" of feature flag "test_feature_in_experiment".' ) - mock_broadcast_decision.assert_called_once_with( + mock_broadcast_decision.assert_any_call( enums.NotificationTypes.DECISION, 'feature-variable', 'test_user', @@ -5060,9 +5061,7 @@ def test_get_forced_variation__invalid_user_id(self): mock_client_logging.error.assert_called_once_with('Provided "user_id" is in an invalid format.') def test_user_context_invalid_user_id(self): - """ - Tests user context. - """ + """Tests user context.""" user_ids = [5, 5.5, None, True, [], {}] for u in user_ids: @@ -5070,8 +5069,277 @@ def test_user_context_invalid_user_id(self): self.assertIsNone(uc, "invalid user id should return none") def test_invalid_flag_key(self): - """ - Tests invalid flag key in function get_flag_variation_by_key(). - """ - # TODO mock function get_flag_variation_by_key + """Tests invalid flag key in function get_flag_variation_by_key().""" pass + + def test_send_identify_event_when_called_with_odp_enabled(self): + mock_logger = mock.Mock() + client = optimizely.Optimizely(json.dumps(self.config_dict_with_audience_segments), logger=mock_logger) + with mock.patch.object(client, 'identify_user') as identify: + client.create_user_context('user-id') + + identify.assert_called_once_with('user-id') + mock_logger.error.assert_not_called() + client.close() + + def test_sdk_settings__log_info_when_disabled(self): + mock_logger = mock.Mock() + sdk_settings = OptimizelySdkSettings(odp_disabled=True) + client = optimizely.Optimizely( + json.dumps(self.config_dict_with_audience_segments), + logger=mock_logger, + settings=sdk_settings + ) + self.assertIsNone(client.odp_manager.event_manager) + self.assertIsNone(client.odp_manager.segment_manager) + mock_logger.info.assert_called_once_with('ODP is disabled.') + mock_logger.error.assert_not_called() + client.close() + + def test_sdk_settings__accept_cache_size(self): + mock_logger = mock.Mock() + sdk_settings = OptimizelySdkSettings(segments_cache_size=5) + client = optimizely.Optimizely( + json.dumps(self.config_dict_with_audience_segments), + logger=mock_logger, + settings=sdk_settings + ) + segments_cache = client.odp_manager.segment_manager.segments_cache + self.assertEqual(segments_cache.capacity, 5) + + mock_logger.error.assert_not_called() + client.close() + + def test_sdk_settings__accept_cache_timeout(self): + mock_logger = mock.Mock() + sdk_settings = OptimizelySdkSettings(segments_cache_timeout_in_secs=5) + client = optimizely.Optimizely( + json.dumps(self.config_dict_with_audience_segments), + logger=mock_logger, + settings=sdk_settings + ) + segments_cache = client.odp_manager.segment_manager.segments_cache + self.assertEqual(segments_cache.timeout, 5) + + mock_logger.error.assert_not_called() + client.close() + + def test_sdk_settings__accept_cache_size_and_cache_timeout(self): + mock_logger = mock.Mock() + sdk_settings = OptimizelySdkSettings(segments_cache_size=10, segments_cache_timeout_in_secs=5) + client = optimizely.Optimizely( + json.dumps(self.config_dict_with_audience_segments), + logger=mock_logger, + settings=sdk_settings + ) + segments_cache = client.odp_manager.segment_manager.segments_cache + self.assertEqual(segments_cache.capacity, 10) + self.assertEqual(segments_cache.timeout, 5) + + mock_logger.error.assert_not_called() + client.close() + + def test_sdk_settings__accept_valid_custom_cache(self): + class CustomCache: + def reset(self): + pass + + def lookup(self): + pass + + def save(self): + pass + + mock_logger = mock.Mock() + sdk_settings = OptimizelySdkSettings(odp_segments_cache=CustomCache()) + client = optimizely.Optimizely( + json.dumps(self.config_dict_with_audience_segments), + logger=mock_logger, + settings=sdk_settings + ) + segments_cache = client.odp_manager.segment_manager.segments_cache + self.assertIsInstance(segments_cache, CustomCache) + mock_logger.error.assert_not_called() + client.close() + + def test_sdk_settings__log_error_when_custom_cache_is_invalid(self): + class InvalidCache: + pass + mock_logger = mock.Mock() + sdk_settings = OptimizelySdkSettings(odp_segments_cache=InvalidCache()) + with mock.patch('optimizely.logger.reset_logger', return_value=mock_logger): + optimizely.Optimizely( + json.dumps(self.config_dict_with_audience_segments), + settings=sdk_settings + ) + mock_logger.exception.assert_called_once_with('Provided "segments_cache" is in an invalid format.') + + def test_sdk_settings__accept_custom_segment_manager(self): + class CustomSegmentManager: + def reset(self): + pass + + def fetch_qualified_segments(self): + pass + + mock_logger = mock.Mock() + sdk_settings = OptimizelySdkSettings(odp_segment_manager=CustomSegmentManager()) + client = optimizely.Optimizely( + json.dumps(self.config_dict_with_audience_segments), + logger=mock_logger, + settings=sdk_settings + ) + segment_manager = client.odp_manager.segment_manager + self.assertIsInstance(segment_manager, CustomSegmentManager) + mock_logger.error.assert_not_called() + client.close() + + def test_sdk_settings__log_error_when_custom_segment_manager_is_invalid(self): + class InvalidSegmentManager: + pass + mock_logger = mock.Mock() + sdk_settings = OptimizelySdkSettings(odp_segment_manager=InvalidSegmentManager()) + with mock.patch('optimizely.logger.reset_logger', return_value=mock_logger): + optimizely.Optimizely( + json.dumps(self.config_dict_with_audience_segments), + settings=sdk_settings + ) + mock_logger.exception.assert_called_once_with('Provided "segment_manager" is in an invalid format.') + + def test_sdk_settings__accept_valid_custom_event_manager(self): + class CustomEventManager: + is_running = True + + def send_event(self): + pass + + def update_config(self): + pass + + def stop(self): + pass + + mock_logger = mock.Mock() + sdk_settings = OptimizelySdkSettings(odp_event_manager=CustomEventManager()) + client = optimizely.Optimizely( + json.dumps(self.config_dict_with_audience_segments), + logger=mock_logger, + settings=sdk_settings + ) + event_manager = client.odp_manager.event_manager + self.assertIsInstance(event_manager, CustomEventManager) + mock_logger.error.assert_not_called() + client.close() + + def test_sdk_settings__log_error_when_custom_event_manager_is_invalid(self): + class InvalidEventManager: + pass + mock_logger = mock.Mock() + sdk_settings = OptimizelySdkSettings(odp_event_manager=InvalidEventManager()) + with mock.patch('optimizely.logger.reset_logger', return_value=mock_logger): + optimizely.Optimizely( + json.dumps(self.config_dict_with_audience_segments), + settings=sdk_settings + ) + mock_logger.exception.assert_called_once_with('Provided "event_manager" is in an invalid format.') + + def test_sdk_settings__log_error_when_sdk_settings_isnt_correct(self): + mock_logger = mock.Mock() + optimizely.Optimizely( + json.dumps(self.config_dict_with_audience_segments), + logger=mock_logger, + settings={} + ) + mock_logger.debug.assert_any_call('Provided sdk_settings is not an OptimizelySdkSettings instance.') + + def test_send_odp_event__send_event_with_static_config_manager(self): + mock_logger = mock.Mock() + client = optimizely.Optimizely( + json.dumps(self.config_dict_with_audience_segments), + logger=mock_logger, + ) + with mock.patch('requests.post', return_value=self.fake_server_response(status_code=200)): + client.send_odp_event(type='wow', action='great', identifiers={}, data={}) + client.close() + mock_logger.error.assert_not_called() + mock_logger.debug.assert_called_with('ODP event queue: flushing batch size 1.') + + def test_send_odp_event__send_event_with_polling_config_manager(self): + mock_logger = mock.Mock() + with mock.patch( + 'requests.get', + return_value=self.fake_server_response( + status_code=200, + content=json.dumps(self.config_dict_with_audience_segments) + ) + ): + client = optimizely.Optimizely(sdk_key='test', logger=mock_logger) + # wait for config + client.config_manager.get_config() + + with mock.patch('requests.post', return_value=self.fake_server_response(status_code=200)): + client.send_odp_event(type='wow', action='great', identifiers={}, data={}) + client.close() + + mock_logger.error.assert_not_called() + mock_logger.debug.assert_called_with('ODP event queue: flushing batch size 1.') + + def test_send_odp_event__log_error_when_odp_disabled(self): + mock_logger = mock.Mock() + client = optimizely.Optimizely( + json.dumps(self.config_dict_with_audience_segments), + logger=mock_logger, + settings=OptimizelySdkSettings(odp_disabled=True) + ) + with mock.patch('requests.post', return_value=self.fake_server_response(status_code=200)): + client.send_odp_event(type='wow', action='great', identifiers={}, data={}) + client.close() + mock_logger.error.assert_called_with('ODP is not enabled.') + + def test_send_odp_event__log_debug_if_datafile_not_ready(self): + mock_logger = mock.Mock() + client = optimizely.Optimizely(sdk_key='test', logger=mock_logger) + client.send_odp_event(type='wow', action='great', identifiers={}, data={}) + + mock_logger.debug.assert_called_with('ODP event queue: cannot send before config has been set.') + client.close() + + def test_send_odp_event__log_error_if_odp_not_enabled_with_polling_config_manager(self): + mock_logger = mock.Mock() + with mock.patch( + 'requests.get', + return_value=self.fake_server_response( + status_code=200, + content=json.dumps(self.config_dict_with_audience_segments) + ) + ): + client = optimizely.Optimizely( + sdk_key='test', + logger=mock_logger, + settings=OptimizelySdkSettings(odp_disabled=True) + ) + # wait for config + client.config_manager.get_config() + + with mock.patch('requests.post', return_value=self.fake_server_response(status_code=200)): + client.send_odp_event(type='wow', action='great', identifiers={}, data={}) + client.close() + + mock_logger.error.assert_called_with('ODP is not enabled.') + + def test_send_odp_event__log_error_with_invalid_data(self): + mock_logger = mock.Mock() + client = optimizely.Optimizely(json.dumps(self.config_dict_with_audience_segments), logger=mock_logger) + + client.send_odp_event(type='wow', action='great', identifiers={}, data={'test': {}}) + client.close() + + mock_logger.error.assert_called_with('ODP data is not valid.') + + def test_send_odp_event__log_error_with_missing_integrations_data(self): + mock_logger = mock.Mock() + client = optimizely.Optimizely(json.dumps(self.config_dict_with_typed_audiences), logger=mock_logger) + client.send_odp_event(type='wow', action='great', identifiers={}, data={}) + + mock_logger.error.assert_called_with('ODP is not integrated.') + client.close() diff --git a/tests/test_optimizely_factory.py b/tests/test_optimizely_factory.py index 7bed42af..1792f80f 100644 --- a/tests/test_optimizely_factory.py +++ b/tests/test_optimizely_factory.py @@ -11,6 +11,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import json from unittest import mock from optimizely.config_manager import PollingConfigManager @@ -19,12 +20,14 @@ from optimizely.notification_center import NotificationCenter from optimizely.optimizely_factory import OptimizelyFactory from optimizely.user_profile import UserProfileService + from . import base @mock.patch('requests.get') class OptimizelyFactoryTest(base.BaseTest): def setUp(self): + super().setUp() self.datafile = '{ revision: "42" }' self.error_handler = NoOpErrorHandler() self.mock_client_logger = mock.MagicMock() @@ -160,3 +163,21 @@ def test_set_batch_size_and_set_flush_interval___should_set_values_valid_or_inva optimizely_instance = OptimizelyFactory.custom_instance('sdk_key') self.assertEqual(optimizely_instance.event_processor.flush_interval.seconds, 30) self.assertEqual(optimizely_instance.event_processor.batch_size, 10) + + def test_update_odp_config_correctly(self, _): + with mock.patch('requests.get') as mock_request_post: + mock_request_post.return_value = self.fake_server_response( + status_code=200, + content=json.dumps(self.config_dict_with_audience_segments) + ) + client = OptimizelyFactory.custom_instance('instance-test') + + # wait for config to be ready + client.config_manager.get_config() + + odp_config = client.odp_manager.odp_config + odp_settings = self.config_dict_with_audience_segments['integrations'][0] + self.assertEqual(odp_config.get_api_key(), odp_settings['publicKey']) + self.assertEqual(odp_config.get_api_host(), odp_settings['host']) + + client.close() diff --git a/tests/test_user_context.py b/tests/test_user_context.py index f61c5420..a4860765 100644 --- a/tests/test_user_context.py +++ b/tests/test_user_context.py @@ -27,6 +27,37 @@ class UserContextTest(base.BaseTest): def setUp(self): base.BaseTest.setUp(self, 'config_dict_with_multiple_experiments') + self.good_response_data = { + "data": { + "customer": { + "audiences": { + "edges": [ + { + "node": { + "name": "a", + "state": "qualified", + "description": "qualifed sample 1" + } + }, + { + "node": { + "name": "b", + "state": "qualified", + "description": "qualifed sample 2" + } + }, + { + "node": { + "name": "c", + "state": "not_qualified", + "description": "not-qualified sample" + } + } + ] + } + } + } + } def compare_opt_decisions(self, expected, actual): self.assertEqual(expected.variation_key, actual.variation_key) @@ -1975,3 +2006,243 @@ def test_decide_with_qualified_segments__default(self): decision = user.decide('flag-segment', ['IGNORE_USER_PROFILE_SERVICE']) self.assertEqual(decision.variation_key, "rollout-variation-off") + + def test_none_client_should_not_fail(self): + uc = OptimizelyUserContext(None, None, 'test-user', None) + self.assertIsInstance(uc, OptimizelyUserContext) + + def test_send_identify_event_when_user_context_created(self): + mock_logger = mock.Mock() + client = optimizely.Optimizely(json.dumps(self.config_dict_with_audience_segments), logger=mock_logger) + with mock.patch.object(client, 'identify_user') as identify: + OptimizelyUserContext(client, mock_logger, 'user-id') + + identify.assert_called_once_with('user-id') + mock_logger.error.assert_not_called() + client.close() + + # fetch qualified segments + def test_fetch_segments(self): + mock_logger = mock.Mock() + client = optimizely.Optimizely(json.dumps(self.config_dict_with_audience_segments), logger=mock_logger) + user = OptimizelyUserContext(client, mock_logger, 'user-id') + + with mock.patch('requests.post', return_value=self.fake_server_response( + status_code=200, content=json.dumps(self.good_response_data) + )): + success = user.fetch_qualified_segments() + + self.assertTrue(success) + self.assertEqual(user.get_qualified_segments(), ['a', 'b']) + mock_logger.error.assert_not_called() + client.close() + + def test_return_empty_array_when_not_qualified_for_any_segments(self): + for edge in self.good_response_data['data']['customer']['audiences']['edges']: + edge['node']['state'] = 'unqualified' + + mock_logger = mock.Mock() + client = optimizely.Optimizely(json.dumps(self.config_dict_with_audience_segments), logger=mock_logger) + user = OptimizelyUserContext(client, mock_logger, 'user-id') + + with mock.patch('requests.post', return_value=self.fake_server_response( + status_code=200, content=json.dumps(self.good_response_data) + )): + success = user.fetch_qualified_segments() + + self.assertTrue(success) + self.assertEqual(user.get_qualified_segments(), []) + mock_logger.error.assert_not_called() + client.close() + + def test_fetch_segments_and_reset_cache(self): + mock_logger = mock.Mock() + client = optimizely.Optimizely(json.dumps(self.config_dict_with_audience_segments), logger=mock_logger) + segments_cache = client.odp_manager.segment_manager.segments_cache + segments_cache.save('wow', 'great') + self.assertEqual(segments_cache.lookup('wow'), 'great') + + user = OptimizelyUserContext(client, mock_logger, 'user-id') + with mock.patch('requests.post', return_value=self.fake_server_response( + status_code=200, content=json.dumps(self.good_response_data) + )): + success = user.fetch_qualified_segments(options=['RESET_CACHE']) + + self.assertTrue(success) + self.assertEqual(user.get_qualified_segments(), ['a', 'b']) + self.assertIsNone(segments_cache.lookup('wow')) + mock_logger.error.assert_not_called() + client.close() + + def test_fetch_segments_from_cache(self): + mock_logger = mock.Mock() + client = optimizely.Optimizely(json.dumps(self.config_dict_with_audience_segments), logger=mock_logger) + segment_manager = client.odp_manager.segment_manager + cache_key = segment_manager.make_cache_key(enums.OdpManagerConfig.KEY_FOR_USER_ID, 'user-id') + segments_cache = segment_manager.segments_cache + segments_cache.save(cache_key, ['great']) + self.assertEqual(segments_cache.lookup(cache_key), ['great']) + + user = OptimizelyUserContext(client, mock_logger, 'user-id') + with mock.patch('requests.post', return_value=self.fake_server_response( + status_code=200, content=json.dumps(self.good_response_data) + )): + success = user.fetch_qualified_segments() + + self.assertTrue(success) + self.assertEqual(user.get_qualified_segments(), ['great']) + mock_logger.error.assert_not_called() + client.close() + + def test_fetch_segments_and_ignore_cache(self): + mock_logger = mock.Mock() + client = optimizely.Optimizely(json.dumps(self.config_dict_with_audience_segments), logger=mock_logger) + segment_manager = client.odp_manager.segment_manager + cache_key = segment_manager.make_cache_key(enums.OdpManagerConfig.KEY_FOR_USER_ID, 'user-id') + segments_cache = segment_manager.segments_cache + segments_cache.save(cache_key, ['great']) + self.assertEqual(segments_cache.lookup(cache_key), ['great']) + + user = OptimizelyUserContext(client, mock_logger, 'user-id') + with mock.patch('requests.post', return_value=self.fake_server_response( + status_code=200, content=json.dumps(self.good_response_data) + )): + success = user.fetch_qualified_segments(options=['IGNORE_CACHE']) + + self.assertTrue(success) + self.assertEqual(segments_cache.lookup(cache_key), ['great']) + self.assertEqual(user.get_qualified_segments(), ['a', 'b']) + mock_logger.error.assert_not_called() + client.close() + + def test_return_false_on_error(self): + mock_logger = mock.Mock() + client = optimizely.Optimizely(json.dumps(self.config_dict_with_audience_segments), logger=mock_logger) + user = OptimizelyUserContext(client, mock_logger, 'user-id') + + with mock.patch('requests.post', return_value=self.fake_server_response(status_code=500)): + success = user.fetch_qualified_segments() + + self.assertFalse(success) + self.assertIsNone(user.get_qualified_segments()) + mock_logger.error.assert_called_once_with( + 'Audience segments fetch failed (500 Server Error: None for url: None).' + ) + client.close() + + def test_no_error_when_client_is_none(self): + mock_logger = mock.Mock() + user = OptimizelyUserContext(None, mock_logger, 'user-id') + success = user.fetch_qualified_segments() + + self.assertFalse(success) + self.assertIsNone(user.get_qualified_segments()) + mock_logger.error.assert_not_called() + + def test_fetch_segments_when_non_blocking(self): + mock_logger = mock.Mock() + client = optimizely.Optimizely(json.dumps(self.config_dict_with_audience_segments), logger=mock_logger) + user = OptimizelyUserContext(client, mock_logger, 'user-id') + + with mock.patch('requests.post', return_value=self.fake_server_response( + status_code=200, content=json.dumps(self.good_response_data) + )): + thread = user.fetch_qualified_segments(callback=True) + thread.join() + + self.assertEqual(user.get_qualified_segments(), ['a', 'b']) + mock_logger.error.assert_not_called() + client.close() + + def test_fetch_segments_with_callback(self): + mock_logger = mock.Mock() + client = optimizely.Optimizely(json.dumps(self.config_dict_with_audience_segments), logger=mock_logger) + user = OptimizelyUserContext(client, mock_logger, 'user-id') + result = [] + + with mock.patch('requests.post', return_value=self.fake_server_response( + status_code=200, content=json.dumps(self.good_response_data) + )): + thread = user.fetch_qualified_segments(callback=lambda x: result.append(x)) + thread.join() + + self.assertEqual(user.get_qualified_segments(), ['a', 'b']) + self.assertTrue(result.pop()) + mock_logger.error.assert_not_called() + client.close() + + def test_pass_false_to_callback_when_failed_and_non_blocking(self): + mock_logger = mock.Mock() + client = optimizely.Optimizely(json.dumps(self.config_dict_with_audience_segments), logger=mock_logger) + user = OptimizelyUserContext(client, mock_logger, 'user-id') + result = [] + + with mock.patch('requests.post', return_value=self.fake_server_response(status_code=500)): + thread = user.fetch_qualified_segments(callback=lambda x: result.append(x)) + thread.join() + + self.assertIsNone(user.get_qualified_segments()) + self.assertFalse(result.pop()) + mock_logger.error.assert_called_once_with( + 'Audience segments fetch failed (500 Server Error: None for url: None).' + ) + client.close() + + def test_fetch_segments_from_cache_with_non_blocking(self): + mock_logger = mock.Mock() + client = optimizely.Optimizely(json.dumps(self.config_dict_with_audience_segments), logger=mock_logger) + segment_manager = client.odp_manager.segment_manager + cache_key = segment_manager.make_cache_key(enums.OdpManagerConfig.KEY_FOR_USER_ID, 'user-id') + segments_cache = segment_manager.segments_cache + segments_cache.save(cache_key, ['great']) + self.assertEqual(segments_cache.lookup(cache_key), ['great']) + + user = OptimizelyUserContext(client, mock_logger, 'user-id') + with mock.patch('requests.post', return_value=self.fake_server_response( + status_code=200, content=json.dumps(self.good_response_data) + )): + thread = user.fetch_qualified_segments(callback=True) + thread.join() + + self.assertEqual(user.get_qualified_segments(), ['great']) + mock_logger.error.assert_not_called() + client.close() + + def test_decide_correctly_with_non_blocking(self): + self.good_response_data['data']['customer']['audiences']['edges'][0]['node']['name'] = 'odp-segment-2' + mock_logger = mock.Mock() + client = optimizely.Optimizely(json.dumps(self.config_dict_with_audience_segments), logger=mock_logger) + user = OptimizelyUserContext(client, mock_logger, 'user-id') + results = [] + + def callback(success): + results.append(success) + decision = user.decide('flag-segment') + results.append(decision.variation_key) + + with mock.patch('requests.post', return_value=self.fake_server_response( + status_code=200, content=json.dumps(self.good_response_data) + )): + thread = user.fetch_qualified_segments(callback=callback) + thread.join() + + self.assertEqual(user.get_qualified_segments(), ['odp-segment-2', 'b']) + self.assertEqual(results.pop(), 'rollout-variation-on') + self.assertStrictTrue(results.pop()) + mock_logger.error.assert_not_called() + client.close() + + def test_fetch_segments_error(self): + mock_logger = mock.Mock() + client = optimizely.Optimizely(json.dumps(self.config_dict_with_audience_segments), logger=mock_logger) + user = OptimizelyUserContext(client, mock_logger, 'user"id') + + with mock.patch('requests.post', return_value=self.fake_server_response( + status_code=200, content=json.dumps(self.good_response_data) + )): + success = user.fetch_qualified_segments() + + self.assertTrue(success) + self.assertEqual(user.get_qualified_segments(), ['a', 'b']) + mock_logger.error.assert_not_called() + client.close() From 9f46ddfecd59ef2682d2600ac4aec9c6fc2a9272 Mon Sep 17 00:00:00 2001 From: Andy Leap <104936100+andrewleap-optimizely@users.noreply.github.com> Date: Mon, 7 Nov 2022 17:24:37 -0500 Subject: [PATCH 28/68] fix: skip identify on user_context clone (#409) --- optimizely/optimizely_user_context.py | 19 +++++++++++++++---- tests/test_user_context.py | 18 ++++++++++++++++++ 2 files changed, 33 insertions(+), 4 deletions(-) diff --git a/optimizely/optimizely_user_context.py b/optimizely/optimizely_user_context.py index fd03ec6d..e2674be1 100644 --- a/optimizely/optimizely_user_context.py +++ b/optimizely/optimizely_user_context.py @@ -37,8 +37,12 @@ class OptimizelyUserContext: """ def __init__( - self, optimizely_client: optimizely.Optimizely, logger: Logger, - user_id: str, user_attributes: Optional[UserAttributes] = None + self, + optimizely_client: optimizely.Optimizely, + logger: Logger, + user_id: str, + user_attributes: Optional[UserAttributes] = None, + identify: bool = True ): """ Create an instance of the Optimizely User Context. @@ -47,6 +51,7 @@ def __init__( logger: logger for logging user_id: user id of this user context user_attributes: user attributes to use for this user context + identify: True to send identify event to ODP. Returns: UserContext instance @@ -67,7 +72,7 @@ def __init__( OptimizelyUserContext.OptimizelyForcedDecision ] = {} - if self.client: + if self.client and identify: self.client.identify_user(user_id) class OptimizelyDecisionContext: @@ -94,7 +99,13 @@ def _clone(self) -> Optional[OptimizelyUserContext]: if not self.client: return None - user_context = OptimizelyUserContext(self.client, self.logger, self.user_id, self.get_user_attributes()) + user_context = OptimizelyUserContext( + self.client, + self.logger, + self.user_id, + self.get_user_attributes(), + identify=False + ) with self.lock: if self.forced_decisions_map: diff --git a/tests/test_user_context.py b/tests/test_user_context.py index a4860765..15499792 100644 --- a/tests/test_user_context.py +++ b/tests/test_user_context.py @@ -2021,6 +2021,24 @@ def test_send_identify_event_when_user_context_created(self): mock_logger.error.assert_not_called() client.close() + def test_identify_is_skipped_with_decisions(self): + mock_logger = mock.Mock() + client = optimizely.Optimizely(json.dumps(self.config_dict_with_features), logger=mock_logger) + with mock.patch.object(client, 'identify_user') as identify: + user_context = OptimizelyUserContext(client, mock_logger, 'user-id') + + identify.assert_called_once_with('user-id') + mock_logger.error.assert_not_called() + + with mock.patch.object(client, 'identify_user') as identify: + user_context.decide('test_feature_in_rollout') + user_context.decide_all() + user_context.decide_for_keys(['test_feature_in_rollout']) + + identify.assert_not_called() + mock_logger.error.assert_not_called() + client.close() + # fetch qualified segments def test_fetch_segments(self): mock_logger = mock.Mock() From f673a32123268661fb8747ee0f5ee43ea5f7b20a Mon Sep 17 00:00:00 2001 From: Andy Leap <104936100+andrewleap-optimizely@users.noreply.github.com> Date: Mon, 14 Nov 2022 19:18:47 -0500 Subject: [PATCH 29/68] add stop method to polling config manager (#410) --- optimizely/config_manager.py | 22 +++-- optimizely/optimizely.py | 9 +- requirements/typing.txt | 2 +- tests/test_config_manager.py | 170 ++++++++--------------------------- 4 files changed, 59 insertions(+), 144 deletions(-) diff --git a/optimizely/config_manager.py b/optimizely/config_manager.py index 68a04b26..c5cf8bca 100644 --- a/optimizely/config_manager.py +++ b/optimizely/config_manager.py @@ -17,7 +17,6 @@ from typing import TYPE_CHECKING, Any, Optional import requests import threading -import time from requests import codes as http_status_codes from requests import exceptions as requests_exceptions @@ -216,8 +215,8 @@ def __init__( self.set_update_interval(update_interval) self.set_blocking_timeout(blocking_timeout) self.last_modified: Optional[str] = None - self._polling_thread = threading.Thread(target=self._run) - self._polling_thread.daemon = True + self.stopped = threading.Event() + self._initialize_thread() self._polling_thread.start() @staticmethod @@ -375,15 +374,23 @@ def is_running(self) -> bool: """ Check if polling thread is alive or not. """ return self._polling_thread.is_alive() + def stop(self) -> None: + """ Stop the polling thread and wait for it to exit. """ + if self.is_running: + self.stopped.set() + self._polling_thread.join() + def _run(self) -> None: """ Triggered as part of the thread which fetches the datafile and sleeps until next update interval. """ try: - while self.is_running: + while True: self.fetch_datafile() - time.sleep(self.update_interval) + if self.stopped.wait(self.update_interval): + self.stopped.clear() + break except (OSError, OverflowError) as err: self.logger.error( - f'Error in time.sleep. Provided update_interval value may be too big. Error: {err}' + f'Provided update_interval value may be too big. Error: {err}' ) raise @@ -392,6 +399,9 @@ def start(self) -> None: if not self.is_running: self._polling_thread.start() + def _initialize_thread(self) -> None: + self._polling_thread = threading.Thread(target=self._run, daemon=True) + class AuthDatafilePollingConfigManager(PollingConfigManager): """ Config manager that polls for authenticated datafile using access token. """ diff --git a/optimizely/optimizely.py b/optimizely/optimizely.py index 5bdda3e1..7a46f927 100644 --- a/optimizely/optimizely.py +++ b/optimizely/optimizely.py @@ -88,10 +88,9 @@ def __init__( config_manager.BaseConfigManager implementation which can be using the same NotificationCenter instance. event_processor: Optional component which processes the given event(s). - By default optimizely.event.event_processor.ForwardingEventProcessor is used - which simply forwards events to the event dispatcher. - To enable event batching configure and use - optimizely.event.event_processor.BatchEventProcessor. + By default optimizely.event.event_processor.BatchEventProcessor is used + which batches events. To simply forward events to the event dispatcher + configure and use optimizely.event.event_processor.ForwardingEventProcessor. datafile_access_token: Optional string used to fetch authenticated datafile for a secure project environment. default_decide_options: Optional list of decide options used with the decide APIs. event_processor_options: Optional dict of options to be passed to the default batch event processor. @@ -1381,3 +1380,5 @@ def close(self) -> None: if callable(getattr(self.event_processor, 'stop', None)): self.event_processor.stop() # type: ignore[attr-defined] self.odp_manager.close() + if callable(getattr(self.config_manager, 'stop', None)): + self.config_manager.stop() # type: ignore[attr-defined] diff --git a/requirements/typing.txt b/requirements/typing.txt index ba65f536..67aac34a 100644 --- a/requirements/typing.txt +++ b/requirements/typing.txt @@ -1,4 +1,4 @@ -mypy +mypy==0.982 types-jsonschema types-requests types-Flask \ No newline at end of file diff --git a/tests/test_config_manager.py b/tests/test_config_manager.py index 75b5aaf7..38dcfa33 100644 --- a/tests/test_config_manager.py +++ b/tests/test_config_manager.py @@ -218,38 +218,6 @@ def test_get_config_blocks(self): self.assertEqual(1, round(end_time - start_time)) -class MockPollingConfigManager(config_manager.PollingConfigManager): - ''' Wrapper class to allow manual call of fetch_datafile in the polling thread by - overriding the _run method.''' - def __init__(self, *args, **kwargs): - self.run = False - self.stop = False - super().__init__(*args, **kwargs) - - def _run(self): - '''Parent thread can use self.run to start fetch_datafile in polling thread and wait for it to complete.''' - while self.is_running and not self.stop: - if self.run: - self.fetch_datafile() - self.run = False - - -class MockAuthDatafilePollingConfigManager(config_manager.AuthDatafilePollingConfigManager): - ''' Wrapper class to allow manual call of fetch_datafile in the polling thread by - overriding the _run method.''' - def __init__(self, *args, **kwargs): - self.run = False - self.stop = False - super().__init__(*args, **kwargs) - - def _run(self): - '''Parent thread can use self.run to start fetch_datafile and wait for it to complete.''' - while self.is_running and not self.stop: - if self.run: - self.fetch_datafile() - self.run = False - - @mock.patch('requests.get') class PollingConfigManagerTest(base.BaseTest): def test_init__no_sdk_key_no_url__fails(self, _): @@ -327,12 +295,8 @@ def test_get_datafile_url__sdk_key_and_url_and_template_provided(self, _): def test_set_update_interval(self, _): """ Test set_update_interval with different inputs. """ - # prevent polling thread from starting in PollingConfigManager.__init__ - # otherwise it can outlive this test and get out of sync with pytest - with mock.patch('threading.Thread.start') as mock_thread: - project_config_manager = config_manager.PollingConfigManager(sdk_key='some_key') + project_config_manager = config_manager.PollingConfigManager(sdk_key='some_key') - mock_thread.assert_called_once() # Assert that if invalid update_interval is set, then exception is raised. with self.assertRaisesRegex( optimizely_exceptions.InvalidInputException, 'Invalid update_interval "invalid interval" provided.', @@ -355,15 +319,13 @@ def test_set_update_interval(self, _): project_config_manager.set_update_interval(42) self.assertEqual(42, project_config_manager.update_interval) + project_config_manager.stop() + def test_set_blocking_timeout(self, _): """ Test set_blocking_timeout with different inputs. """ - # prevent polling thread from starting in PollingConfigManager.__init__ - # otherwise it can outlive this test and get out of sync with pytest - with mock.patch('threading.Thread.start') as mock_thread: - project_config_manager = config_manager.PollingConfigManager(sdk_key='some_key') + project_config_manager = config_manager.PollingConfigManager(sdk_key='some_key') - mock_thread.assert_called_once() # Assert that if invalid blocking_timeout is set, then exception is raised. with self.assertRaisesRegex( optimizely_exceptions.InvalidInputException, 'Invalid blocking timeout "invalid timeout" provided.', @@ -390,15 +352,13 @@ def test_set_blocking_timeout(self, _): project_config_manager.set_blocking_timeout(5) self.assertEqual(5, project_config_manager.blocking_timeout) + project_config_manager.stop() + def test_set_last_modified(self, _): """ Test that set_last_modified sets last_modified field based on header. """ - # prevent polling thread from starting in PollingConfigManager.__init__ - # otherwise it can outlive this test and get out of sync with pytest - with mock.patch('threading.Thread.start') as mock_thread: - project_config_manager = config_manager.PollingConfigManager(sdk_key='some_key') + project_config_manager = config_manager.PollingConfigManager(sdk_key='some_key') - mock_thread.assert_called_once() last_modified_time = 'Test Last Modified Time' test_response_headers = { 'Last-Modified': last_modified_time, @@ -406,15 +366,12 @@ def test_set_last_modified(self, _): } project_config_manager.set_last_modified(test_response_headers) self.assertEqual(last_modified_time, project_config_manager.last_modified) + project_config_manager.stop() def test_fetch_datafile(self, _): """ Test that fetch_datafile sets config and last_modified based on response. """ sdk_key = 'some_key' - # use wrapper class to control start and stop of fetch_datafile - # this prevents the polling thread from outliving the test - # and getting out of sync with pytest - project_config_manager = MockPollingConfigManager(sdk_key=sdk_key) expected_datafile_url = enums.ConfigManager.DATAFILE_URL_TEMPLATE.format(sdk_key=sdk_key) test_headers = {'Last-Modified': 'New Time'} test_datafile = json.dumps(self.config_dict_with_features) @@ -423,11 +380,8 @@ def test_fetch_datafile(self, _): test_response.headers = test_headers test_response._content = test_datafile with mock.patch('requests.get', return_value=test_response) as mock_request: - # manually trigger fetch_datafile in the polling thread - project_config_manager.run = True - # Wait for polling thread to finish - while project_config_manager.run: - pass + project_config_manager = config_manager.PollingConfigManager(sdk_key=sdk_key) + project_config_manager.stop() mock_request.assert_called_once_with( expected_datafile_url, @@ -439,11 +393,9 @@ def test_fetch_datafile(self, _): # Call fetch_datafile again and assert that request to URL is with If-Modified-Since header. with mock.patch('requests.get', return_value=test_response) as mock_requests: - # manually trigger fetch_datafile in the polling thread - project_config_manager.run = True - # Wait for polling thread to finish - while project_config_manager.run: - pass + project_config_manager._initialize_thread() + project_config_manager.start() + project_config_manager.stop() mock_requests.assert_called_once_with( expected_datafile_url, @@ -452,10 +404,6 @@ def test_fetch_datafile(self, _): ) self.assertEqual(test_headers['Last-Modified'], project_config_manager.last_modified) self.assertIsInstance(project_config_manager.get_config(), project_config.ProjectConfig) - self.assertTrue(project_config_manager.is_running) - - # Shut down the polling thread - project_config_manager.stop = True def test_fetch_datafile__status_exception_raised(self, _): """ Test that config_manager keeps running if status code exception is raised when fetching datafile. """ @@ -473,16 +421,9 @@ def raise_for_status(self): test_response.headers = test_headers test_response._content = test_datafile - # use wrapper class to control start and stop of fetch_datafile - # this prevents the polling thread from outliving the test - # and getting out of sync with pytest - project_config_manager = MockPollingConfigManager(sdk_key=sdk_key, logger=mock_logger) with mock.patch('requests.get', return_value=test_response) as mock_request: - # manually trigger fetch_datafile in the polling thread - project_config_manager.run = True - # Wait for polling thread to finish - while project_config_manager.run: - pass + project_config_manager = config_manager.PollingConfigManager(sdk_key=sdk_key, logger=mock_logger) + project_config_manager.stop() mock_request.assert_called_once_with( expected_datafile_url, @@ -494,11 +435,9 @@ def raise_for_status(self): # Call fetch_datafile again, but raise exception this time with mock.patch('requests.get', return_value=MockExceptionResponse()) as mock_requests: - # manually trigger fetch_datafile in the polling thread - project_config_manager.run = True - # Wait for polling thread to finish - while project_config_manager.run: - pass + project_config_manager._initialize_thread() + project_config_manager.start() + project_config_manager.stop() mock_requests.assert_called_once_with( expected_datafile_url, @@ -510,21 +449,12 @@ def raise_for_status(self): ) self.assertEqual(test_headers['Last-Modified'], project_config_manager.last_modified) self.assertIsInstance(project_config_manager.get_config(), project_config.ProjectConfig) - # Confirm that config manager keeps running - self.assertTrue(project_config_manager.is_running) - - # Shut down the polling thread - project_config_manager.stop = True def test_fetch_datafile__request_exception_raised(self, _): """ Test that config_manager keeps running if a request exception is raised when fetching datafile. """ sdk_key = 'some_key' mock_logger = mock.Mock() - # use wrapper class to control start and stop of fetch_datafile - # this prevents the polling thread from outliving the test - # and getting out of sync with pytest - project_config_manager = MockPollingConfigManager(sdk_key=sdk_key, logger=mock_logger) expected_datafile_url = enums.ConfigManager.DATAFILE_URL_TEMPLATE.format(sdk_key=sdk_key) test_headers = {'Last-Modified': 'New Time'} test_datafile = json.dumps(self.config_dict_with_features) @@ -533,11 +463,8 @@ def test_fetch_datafile__request_exception_raised(self, _): test_response.headers = test_headers test_response._content = test_datafile with mock.patch('requests.get', return_value=test_response) as mock_request: - # manually trigger fetch_datafile in the polling thread - project_config_manager.run = True - # Wait for polling thread to finish - while project_config_manager.run: - pass + project_config_manager = config_manager.PollingConfigManager(sdk_key=sdk_key, logger=mock_logger) + project_config_manager.stop() mock_request.assert_called_once_with( expected_datafile_url, @@ -552,11 +479,9 @@ def test_fetch_datafile__request_exception_raised(self, _): 'requests.get', side_effect=requests.exceptions.RequestException('Error Error !!'), ) as mock_requests: - # manually trigger fetch_datafile in the polling thread - project_config_manager.run = True - # Wait for polling thread to finish - while project_config_manager.run: - pass + project_config_manager._initialize_thread() + project_config_manager.start() + project_config_manager.stop() mock_requests.assert_called_once_with( expected_datafile_url, @@ -568,11 +493,6 @@ def test_fetch_datafile__request_exception_raised(self, _): ) self.assertEqual(test_headers['Last-Modified'], project_config_manager.last_modified) self.assertIsInstance(project_config_manager.get_config(), project_config.ProjectConfig) - # Confirm that config manager keeps running - self.assertTrue(project_config_manager.is_running) - - # Shut down the polling thread - project_config_manager.stop = True def test_is_running(self, _): """ Test that polling thread is running after instance of PollingConfigManager is created. """ @@ -580,8 +500,7 @@ def test_is_running(self, _): project_config_manager = config_manager.PollingConfigManager(sdk_key='some_key') self.assertTrue(project_config_manager.is_running) - # Prevent the polling thread from running fetch_datafile if it hasn't already - project_config_manager._polling_thread._is_stopped = True + project_config_manager.stop() @mock.patch('requests.get') @@ -600,14 +519,11 @@ def test_set_datafile_access_token(self, _): datafile_access_token = 'some_token' sdk_key = 'some_key' - # prevent polling thread from starting in PollingConfigManager.__init__ - # otherwise it can outlive this test and get out of sync with pytest - with mock.patch('threading.Thread.start') as mock_thread: - project_config_manager = config_manager.AuthDatafilePollingConfigManager( - datafile_access_token=datafile_access_token, sdk_key=sdk_key) + project_config_manager = config_manager.AuthDatafilePollingConfigManager( + datafile_access_token=datafile_access_token, sdk_key=sdk_key) - mock_thread.assert_called_once() self.assertEqual(datafile_access_token, project_config_manager.datafile_access_token) + project_config_manager.stop() def test_fetch_datafile(self, _): """ Test that fetch_datafile sets authorization header in request header and sets config based on response. """ @@ -645,11 +561,6 @@ def test_fetch_datafile__request_exception_raised(self, _): sdk_key = 'some_key' mock_logger = mock.Mock() - # use wrapper class to control start and stop of fetch_datafile - # this prevents the polling thread from outliving the test - # and getting out of sync with pytest - project_config_manager = MockAuthDatafilePollingConfigManager(datafile_access_token=datafile_access_token, - sdk_key=sdk_key, logger=mock_logger) expected_datafile_url = enums.ConfigManager.AUTHENTICATED_DATAFILE_URL_TEMPLATE.format(sdk_key=sdk_key) test_headers = {'Last-Modified': 'New Time'} test_datafile = json.dumps(self.config_dict_with_features) @@ -659,13 +570,13 @@ def test_fetch_datafile__request_exception_raised(self, _): test_response._content = test_datafile # Call fetch_datafile and assert that request was sent with correct authorization header - with mock.patch('requests.get', - return_value=test_response) as mock_request: - # manually trigger fetch_datafile in the polling thread - project_config_manager.run = True - # Wait for polling thread to finish - while project_config_manager.run: - pass + with mock.patch('requests.get', return_value=test_response) as mock_request: + project_config_manager = config_manager.AuthDatafilePollingConfigManager( + datafile_access_token=datafile_access_token, + sdk_key=sdk_key, + logger=mock_logger + ) + project_config_manager.stop() mock_request.assert_called_once_with( expected_datafile_url, @@ -680,11 +591,9 @@ def test_fetch_datafile__request_exception_raised(self, _): 'requests.get', side_effect=requests.exceptions.RequestException('Error Error !!'), ) as mock_requests: - # manually trigger fetch_datafile in the polling thread - project_config_manager.run = True - # Wait for polling thread to finish - while project_config_manager.run: - pass + project_config_manager._initialize_thread() + project_config_manager.start() + project_config_manager.stop() mock_requests.assert_called_once_with( expected_datafile_url, @@ -699,8 +608,3 @@ def test_fetch_datafile__request_exception_raised(self, _): ) self.assertEqual(test_headers['Last-Modified'], project_config_manager.last_modified) self.assertIsInstance(project_config_manager.get_config(), project_config.ProjectConfig) - # Confirm that config manager keeps running - self.assertTrue(project_config_manager.is_running) - - # Shut down the polling thread - project_config_manager.stop = True From f67a0ccae2e124d3a54baed2cd53774f76b5d9ed Mon Sep 17 00:00:00 2001 From: Ozayr <54209343+ozayr-zaviar@users.noreply.github.com> Date: Mon, 19 Dec 2022 22:05:51 -0800 Subject: [PATCH 30/68] feat: fetch timeout made configurable (#411) Fetch segment and send opd event timeout made configurable through sdk_settings option --- optimizely/helpers/sdk_settings.py | 8 +++++++- optimizely/odp/odp_event_api_manager.py | 10 +++++++--- optimizely/odp/odp_event_manager.py | 10 +++++++--- optimizely/odp/odp_manager.py | 7 +++++-- optimizely/odp/odp_segment_api_manager.py | 5 +++-- optimizely/odp/odp_segment_manager.py | 8 ++++---- optimizely/optimizely.py | 2 ++ tests/test_odp_event_api_manager.py | 13 +++++++++++++ tests/test_odp_event_manager.py | 2 +- tests/test_odp_segment_api_manager.py | 21 +++++++++++++++++++++ 10 files changed, 70 insertions(+), 16 deletions(-) diff --git a/optimizely/helpers/sdk_settings.py b/optimizely/helpers/sdk_settings.py index c55fd654..00142e54 100644 --- a/optimizely/helpers/sdk_settings.py +++ b/optimizely/helpers/sdk_settings.py @@ -30,7 +30,9 @@ def __init__( segments_cache_timeout_in_secs: int = enums.OdpSegmentsCacheConfig.DEFAULT_TIMEOUT_SECS, odp_segments_cache: Optional[OptimizelySegmentsCache] = None, odp_segment_manager: Optional[OdpSegmentManager] = None, - odp_event_manager: Optional[OdpEventManager] = None + odp_event_manager: Optional[OdpEventManager] = None, + fetch_segments_timeout: Optional[int] = None, + odp_event_timeout: Optional[int] = None ) -> None: """ Args: @@ -45,6 +47,8 @@ def __init__( `fetch_qualified_segments(user_key, user_value, options)`. odp_event_manager: A custom odp event manager. Required method is: `send_event(type:, action:, identifiers:, data:)` + fetch_segments_timeout: A fetch segment timeout in seconds (optional). + odp_event_timeout: A send odp event timeout in seconds (optional). """ self.odp_disabled = odp_disabled @@ -53,3 +57,5 @@ def __init__( self.segments_cache = odp_segments_cache self.odp_segment_manager = odp_segment_manager self.odp_event_manager = odp_event_manager + self.fetch_segments_timeout = fetch_segments_timeout + self.odp_event_timeout = odp_event_timeout diff --git a/optimizely/odp/odp_event_api_manager.py b/optimizely/odp/odp_event_api_manager.py index 00c8050a..85967415 100644 --- a/optimizely/odp/odp_event_api_manager.py +++ b/optimizely/odp/odp_event_api_manager.py @@ -40,10 +40,14 @@ class OdpEventApiManager: """Provides an internal service for ODP event REST api access.""" - def __init__(self, logger: Optional[optimizely_logger.Logger] = None): + def __init__(self, logger: Optional[optimizely_logger.Logger] = None, timeout: Optional[int] = None): self.logger = logger or optimizely_logger.NoOpLogger() + self.timeout = timeout or OdpEventApiConfig.REQUEST_TIMEOUT - def send_odp_events(self, api_key: str, api_host: str, events: list[OdpEvent]) -> bool: + def send_odp_events(self, + api_key: str, + api_host: str, + events: list[OdpEvent]) -> bool: """ Dispatch the event being represented by the OdpEvent object. @@ -69,7 +73,7 @@ def send_odp_events(self, api_key: str, api_host: str, events: list[OdpEvent]) - response = requests.post(url=url, headers=request_headers, data=payload_dict, - timeout=OdpEventApiConfig.REQUEST_TIMEOUT) + timeout=self.timeout) response.raise_for_status() diff --git a/optimizely/odp/odp_event_manager.py b/optimizely/odp/odp_event_manager.py index ec1e3fc9..2c4a6cda 100644 --- a/optimizely/odp/odp_event_manager.py +++ b/optimizely/odp/odp_event_manager.py @@ -45,16 +45,18 @@ class OdpEventManager: def __init__( self, logger: Optional[_logging.Logger] = None, - api_manager: Optional[OdpEventApiManager] = None + api_manager: Optional[OdpEventApiManager] = None, + timeout: Optional[int] = None ): """OdpEventManager init method to configure event batching. Args: logger: Optional component which provides a log method to log messages. By default nothing would be logged. api_manager: Optional component which sends events to ODP. + timeout: Optional event timeout in seconds. """ self.logger = logger or _logging.NoOpLogger() - self.api_manager = api_manager or OdpEventApiManager(self.logger) + self.api_manager = api_manager or OdpEventApiManager(self.logger, timeout) self.odp_config: Optional[OdpConfig] = None self.api_key: Optional[str] = None @@ -158,7 +160,9 @@ def _flush_batch(self) -> None: for i in range(1 + self.retry_count): try: - should_retry = self.api_manager.send_odp_events(self.api_key, self.api_host, self._current_batch) + should_retry = self.api_manager.send_odp_events(self.api_key, + self.api_host, + self._current_batch) except Exception as error: should_retry = False self.logger.error(Errors.ODP_EVENT_FAILED.format(f'Error: {error} {self._current_batch}')) diff --git a/optimizely/odp/odp_manager.py b/optimizely/odp/odp_manager.py index b07f0c9f..f122523a 100644 --- a/optimizely/odp/odp_manager.py +++ b/optimizely/odp/odp_manager.py @@ -33,6 +33,8 @@ def __init__( segments_cache: Optional[OptimizelySegmentsCache] = None, segment_manager: Optional[OdpSegmentManager] = None, event_manager: Optional[OdpEventManager] = None, + fetch_segments_timeout: Optional[int] = None, + odp_event_timeout: Optional[int] = None, logger: Optional[optimizely_logger.Logger] = None ) -> None: @@ -42,6 +44,7 @@ def __init__( self.segment_manager = segment_manager self.event_manager = event_manager + self.fetch_segments_timeout = fetch_segments_timeout if not self.enabled: self.logger.info('ODP is disabled.') @@ -53,9 +56,9 @@ def __init__( OdpSegmentsCacheConfig.DEFAULT_CAPACITY, OdpSegmentsCacheConfig.DEFAULT_TIMEOUT_SECS ) - self.segment_manager = OdpSegmentManager(segments_cache, logger=self.logger) + self.segment_manager = OdpSegmentManager(segments_cache, logger=self.logger, timeout=fetch_segments_timeout) - self.event_manager = self.event_manager or OdpEventManager(self.logger) + self.event_manager = self.event_manager or OdpEventManager(self.logger, timeout=odp_event_timeout) self.segment_manager.odp_config = self.odp_config def fetch_qualified_segments(self, user_id: str, options: list[str]) -> Optional[list[str]]: diff --git a/optimizely/odp/odp_segment_api_manager.py b/optimizely/odp/odp_segment_api_manager.py index dc51c6f6..d422bfad 100644 --- a/optimizely/odp/odp_segment_api_manager.py +++ b/optimizely/odp/odp_segment_api_manager.py @@ -108,8 +108,9 @@ class OdpSegmentApiManager: """Interface for manging the fetching of audience segments.""" - def __init__(self, logger: Optional[optimizely_logger.Logger] = None): + def __init__(self, logger: Optional[optimizely_logger.Logger] = None, timeout: Optional[int] = None): self.logger = logger or optimizely_logger.NoOpLogger() + self.timeout = timeout or OdpSegmentApiConfig.REQUEST_TIMEOUT def fetch_segments(self, api_key: str, api_host: str, user_key: str, user_value: str, segments_to_check: list[str]) -> Optional[list[str]]: @@ -151,7 +152,7 @@ def fetch_segments(self, api_key: str, api_host: str, user_key: str, response = requests.post(url=url, headers=request_headers, data=payload_dict, - timeout=OdpSegmentApiConfig.REQUEST_TIMEOUT) + timeout=self.timeout) response.raise_for_status() response_dict = response.json() diff --git a/optimizely/odp/odp_segment_manager.py b/optimizely/odp/odp_segment_manager.py index a9dd8dfb..b0f04b73 100644 --- a/optimizely/odp/odp_segment_manager.py +++ b/optimizely/odp/odp_segment_manager.py @@ -30,16 +30,16 @@ def __init__( self, segments_cache: OptimizelySegmentsCache, api_manager: Optional[OdpSegmentApiManager] = None, - logger: Optional[optimizely_logger.Logger] = None + logger: Optional[optimizely_logger.Logger] = None, + timeout: Optional[int] = None ) -> None: self.odp_config: Optional[OdpConfig] = None self.segments_cache = segments_cache self.logger = logger or optimizely_logger.NoOpLogger() - self.api_manager = api_manager or OdpSegmentApiManager(self.logger) + self.api_manager = api_manager or OdpSegmentApiManager(self.logger, timeout) - def fetch_qualified_segments(self, user_key: str, user_value: str, options: list[str] - ) -> Optional[list[str]]: + def fetch_qualified_segments(self, user_key: str, user_value: str, options: list[str]) -> Optional[list[str]]: """ Args: user_key: The key for identifying the id type. diff --git a/optimizely/optimizely.py b/optimizely/optimizely.py index 7a46f927..595513a8 100644 --- a/optimizely/optimizely.py +++ b/optimizely/optimizely.py @@ -150,6 +150,8 @@ def __init__( self.sdk_settings.segments_cache, self.sdk_settings.odp_segment_manager, self.sdk_settings.odp_event_manager, + self.sdk_settings.fetch_segments_timeout, + self.sdk_settings.odp_event_timeout, self.logger ) diff --git a/tests/test_odp_event_api_manager.py b/tests/test_odp_event_api_manager.py index 47438bd2..0e7c50d8 100644 --- a/tests/test_odp_event_api_manager.py +++ b/tests/test_odp_event_api_manager.py @@ -45,6 +45,19 @@ def test_send_odp_events__valid_request(self): data=json.dumps(self.events, cls=OdpEventEncoder), timeout=OdpEventApiConfig.REQUEST_TIMEOUT) + def test_send_odp_events__custom_timeout(self): + with mock.patch('requests.post') as mock_request_post: + api = OdpEventApiManager(timeout=14) + api.send_odp_events(api_key=self.api_key, + api_host=self.api_host, + events=self.events) + + request_headers = {'content-type': 'application/json', 'x-api-key': self.api_key} + mock_request_post.assert_called_once_with(url=self.api_host + "/v3/events", + headers=request_headers, + data=json.dumps(self.events, cls=OdpEventEncoder), + timeout=14) + def test_send_odp_ovents_success(self): with mock.patch('requests.post') as mock_request_post: # no need to mock url and content because we're not returning the response diff --git a/tests/test_odp_event_manager.py b/tests/test_odp_event_manager.py index a2963ec9..20456997 100644 --- a/tests/test_odp_event_manager.py +++ b/tests/test_odp_event_manager.py @@ -71,7 +71,7 @@ class OdpEventManagerTest(BaseTest): "key-3": 3.0, "key-4": None, "key-5": True - } + }, }, { "type": "t2", diff --git a/tests/test_odp_segment_api_manager.py b/tests/test_odp_segment_api_manager.py index 0f909f24..47913973 100644 --- a/tests/test_odp_segment_api_manager.py +++ b/tests/test_odp_segment_api_manager.py @@ -48,6 +48,27 @@ def test_fetch_qualified_segments__valid_request(self): data=json.dumps(test_payload), timeout=OdpSegmentApiConfig.REQUEST_TIMEOUT) + def test_fetch_qualified_segments__custom_timeout(self): + with mock.patch('requests.post') as mock_request_post: + api = OdpSegmentApiManager(timeout=12) + api.fetch_segments(api_key=self.api_key, + api_host=self.api_host, + user_key=self.user_key, + user_value=self.user_value, + segments_to_check=["a", "b", "c"]) + + test_payload = { + 'query': 'query($userId: String, $audiences: [String]) {' + 'customer(vuid: $userId) ' + '{audiences(subset: $audiences) {edges {node {name state}}}}}', + 'variables': {'userId': self.user_value, 'audiences': ["a", "b", "c"]} + } + request_headers = {'content-type': 'application/json', 'x-api-key': self.api_key} + mock_request_post.assert_called_once_with(url=self.api_host + "/v3/graphql", + headers=request_headers, + data=json.dumps(test_payload), + timeout=12) + def test_fetch_qualified_segments__success(self): with mock.patch('requests.post') as mock_request_post: mock_request_post.return_value = \ From 6be3cbd5eda6e7d9ef4aa766dc67cad5f1ce2da7 Mon Sep 17 00:00:00 2001 From: Andy Leap <104936100+andrewleap-optimizely@users.noreply.github.com> Date: Thu, 26 Jan 2023 14:49:37 -0500 Subject: [PATCH 31/68] fix: odp issues identified by FSC (#412) * check if opti instance is valid on odp methods * fix variable missing error * fix extrenous identify calls * change integrations to default to first with key * fix cache_size bug * add timeout to pollingconfig stop * Update python.yml * revert branch to master * fix create_user_context * remove unnecessary checks Co-authored-by: Matjaz Pirnovar --- .github/workflows/python.yml | 2 +- optimizely/config_manager.py | 5 ++-- optimizely/entities.py | 2 +- optimizely/optimizely.py | 44 ++++++++++++++++++--------------- optimizely/optimizely_config.py | 6 +++-- optimizely/project_config.py | 11 ++++++--- 6 files changed, 41 insertions(+), 29 deletions(-) diff --git a/.github/workflows/python.yml b/.github/workflows/python.yml index 2df01f72..7cf83362 100644 --- a/.github/workflows/python.yml +++ b/.github/workflows/python.yml @@ -5,7 +5,7 @@ name: build on: push: - branches: [ master ] + branches: [ master ] pull_request: branches: [ master ] diff --git a/optimizely/config_manager.py b/optimizely/config_manager.py index c5cf8bca..9d26fa3a 100644 --- a/optimizely/config_manager.py +++ b/optimizely/config_manager.py @@ -375,10 +375,11 @@ def is_running(self) -> bool: return self._polling_thread.is_alive() def stop(self) -> None: - """ Stop the polling thread and wait for it to exit. """ + """ Stop the polling thread and briefly wait for it to exit. """ if self.is_running: self.stopped.set() - self._polling_thread.join() + # no need to wait too long as this exists to avoid interfering with tests + self._polling_thread.join(timeout=0.2) def _run(self) -> None: """ Triggered as part of the thread which fetches the datafile and sleeps until next update interval. """ diff --git a/optimizely/entities.py b/optimizely/entities.py index 63b54f68..fed1a49a 100644 --- a/optimizely/entities.py +++ b/optimizely/entities.py @@ -188,7 +188,7 @@ def __str__(self) -> str: class Integration(BaseEntity): - def __init__(self, key: str, host: Optional[str] = None, publicKey: Optional[str] = None): + def __init__(self, key: str, host: Optional[str] = None, publicKey: Optional[str] = None, **kwargs: Any): self.key = key self.host = host self.publicKey = publicKey diff --git a/optimizely/optimizely.py b/optimizely/optimizely.py index 595513a8..8408cbcc 100644 --- a/optimizely/optimizely.py +++ b/optimizely/optimizely.py @@ -345,10 +345,8 @@ def _get_feature_variable_for_type( source_info = {} variable_value = variable.defaultValue - user_context = self.create_user_context(user_id, attributes) - # error is logged in create_user_context - if user_context is None: - return None + user_context = OptimizelyUserContext(self, self.logger, user_id, attributes, False) + decision, _ = self.decision_service.get_variation_for_feature(project_config, feature_flag, user_context) if decision.variation: @@ -434,10 +432,8 @@ def _get_all_feature_variables_for_type( feature_enabled = False source_info = {} - user_context = self.create_user_context(user_id, attributes) - # error is logged in create_user_context - if user_context is None: - return None + user_context = OptimizelyUserContext(self, self.logger, user_id, attributes, False) + decision, _ = self.decision_service.get_variation_for_feature(project_config, feature_flag, user_context) if decision.variation: @@ -643,10 +639,7 @@ def get_variation( if not self._validate_user_inputs(attributes): return None - user_context = self.create_user_context(user_id, attributes) - # error is logged in create_user_context - if not user_context: - return None + user_context = OptimizelyUserContext(self, self.logger, user_id, attributes, False) variation, _ = self.decision_service.get_variation(project_config, experiment, user_context) if variation: @@ -705,10 +698,8 @@ def is_feature_enabled(self, feature_key: str, user_id: str, attributes: Optiona feature_enabled = False source_info = {} - user_context = self.create_user_context(user_id, attributes) - # error is logged in create_user_context - if not user_context: - return False + + user_context = OptimizelyUserContext(self, self.logger, user_id, attributes, False) decision, _ = self.decision_service.get_variation_for_feature(project_config, feature, user_context) is_source_experiment = decision.source == enums.DecisionSources.FEATURE_TEST @@ -1083,7 +1074,7 @@ def create_user_context( self.logger.error(enums.Errors.INVALID_INPUT.format('attributes')) return None - return OptimizelyUserContext(self, self.logger, user_id, attributes) + return OptimizelyUserContext(self, self.logger, user_id, attributes, True) def _decide( self, user_context: Optional[OptimizelyUserContext], key: str, @@ -1330,8 +1321,8 @@ def setup_odp(self) -> None: if not self.sdk_settings.segments_cache: self.sdk_settings.segments_cache = LRUCache( - self.sdk_settings.segments_cache_size or enums.OdpSegmentsCacheConfig.DEFAULT_CAPACITY, - self.sdk_settings.segments_cache_timeout_in_secs or enums.OdpSegmentsCacheConfig.DEFAULT_TIMEOUT_SECS + self.sdk_settings.segments_cache_size, + self.sdk_settings.segments_cache_timeout_in_secs ) def _update_odp_config_on_datafile_update(self) -> None: @@ -1354,9 +1345,17 @@ def _update_odp_config_on_datafile_update(self) -> None: ) def identify_user(self, user_id: str) -> None: + if not self.is_valid: + self.logger.error(enums.Errors.INVALID_OPTIMIZELY.format('identify_user')) + return + self.odp_manager.identify_user(user_id) def fetch_qualified_segments(self, user_id: str, options: Optional[list[str]] = None) -> Optional[list[str]]: + if not self.is_valid: + self.logger.error(enums.Errors.INVALID_OPTIMIZELY.format('fetch_qualified_segments')) + return None + return self.odp_manager.fetch_qualified_segments(user_id, options or []) def send_odp_event( @@ -1376,11 +1375,16 @@ def send_odp_event( data: An optional dictionary for associated data. The default event data will be added to this data before sending to the ODP server. """ + if not self.is_valid: + self.logger.error(enums.Errors.INVALID_OPTIMIZELY.format('send_odp_event')) + return + self.odp_manager.send_event(type, action, identifiers or {}, data or {}) def close(self) -> None: if callable(getattr(self.event_processor, 'stop', None)): self.event_processor.stop() # type: ignore[attr-defined] - self.odp_manager.close() + if self.is_valid: + self.odp_manager.close() if callable(getattr(self.config_manager, 'stop', None)): self.config_manager.stop() # type: ignore[attr-defined] diff --git a/optimizely/optimizely_config.py b/optimizely/optimizely_config.py index 397ddba5..c4f55d86 100644 --- a/optimizely/optimizely_config.py +++ b/optimizely/optimizely_config.py @@ -343,9 +343,11 @@ def _get_variables_map( # set variation specific variable value if any if variation.get('featureEnabled'): + feature_variables_map = self.feature_key_variable_id_to_variable_map[feature_flag['key']] for variable in variation.get('variables', []): - feature_variable = self.feature_key_variable_id_to_variable_map[feature_flag['key']][variable['id']] - variables_map[feature_variable.key].value = variable['value'] + feature_variable = feature_variables_map.get(variable['id']) + if feature_variable: + variables_map[feature_variable.key].value = variable['value'] return variables_map diff --git a/optimizely/project_config.py b/optimizely/project_config.py index 9490e735..adfeee41 100644 --- a/optimizely/project_config.py +++ b/optimizely/project_config.py @@ -112,7 +112,9 @@ def __init__(self, datafile: str | bytes, logger: Logger, error_handler: Any): self.experiment_id_map[experiment_dict['id']] = entities.Experiment(**experiment_dict) if self.integrations: - self.integration_key_map = self._generate_key_map(self.integrations, 'key', entities.Integration) + self.integration_key_map = self._generate_key_map( + self.integrations, 'key', entities.Integration, first_value=True + ) odp_integration = self.integration_key_map.get('odp') if odp_integration: self.public_key_for_odp = odp_integration.publicKey @@ -191,7 +193,7 @@ def __init__(self, datafile: str | bytes, logger: Logger, error_handler: Any): @staticmethod def _generate_key_map( - entity_list: Iterable[Any], key: str, entity_class: Type[EntityClass] + entity_list: Iterable[Any], key: str, entity_class: Type[EntityClass], first_value: bool = False ) -> dict[str, EntityClass]: """ Helper method to generate map from key to entity object for given list of dicts. @@ -199,13 +201,16 @@ def _generate_key_map( entity_list: List consisting of dict. key: Key in each dict which will be key in the map. entity_class: Class representing the entity. + first_value: If True, only save the first value found for each key. Returns: Map mapping key to entity object. """ - key_map = {} + key_map: dict[str, EntityClass] = {} for obj in entity_list: + if first_value and key_map.get(obj[key]): + continue key_map[obj[key]] = entity_class(**obj) return key_map From 3fe4935b3d6ec0640ad6542a28565e5a82c209fa Mon Sep 17 00:00:00 2001 From: Andy Leap <104936100+andrewleap-optimizely@users.noreply.github.com> Date: Fri, 3 Feb 2023 16:56:39 -0500 Subject: [PATCH 32/68] fix: add notification center registry (#413) * add notification center registry * add abstractmethod get_sdk_key to BaseConfigManager * make sdk_key or datafile required in PollingConfigManager --- optimizely/config_manager.py | 38 ++++++++-- optimizely/helpers/enums.py | 1 + optimizely/notification_center_registry.py | 64 ++++++++++++++++ optimizely/optimizely.py | 61 ++++++++------- tests/base.py | 18 ++++- tests/test_config.py | 1 + tests/test_config_manager.py | 8 +- tests/test_notification_center_registry.py | 84 +++++++++++++++++++++ tests/test_optimizely.py | 88 ++++++++++++++++++++-- tests/test_optimizely_config.py | 2 +- tests/test_optimizely_factory.py | 85 +++++++++++++++++++++ 11 files changed, 401 insertions(+), 49 deletions(-) create mode 100644 optimizely/notification_center_registry.py create mode 100644 tests/test_notification_center_registry.py diff --git a/optimizely/config_manager.py b/optimizely/config_manager.py index 9d26fa3a..247f5ce5 100644 --- a/optimizely/config_manager.py +++ b/optimizely/config_manager.py @@ -1,4 +1,4 @@ -# Copyright 2019-2020, 2022, Optimizely +# Copyright 2019-2020, 2022-2023, Optimizely # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -25,6 +25,7 @@ from . import project_config from .error_handler import NoOpErrorHandler, BaseErrorHandler from .notification_center import NotificationCenter +from .notification_center_registry import _NotificationCenterRegistry from .helpers import enums from .helpers import validator from .optimizely_config import OptimizelyConfig, OptimizelyConfigService @@ -78,6 +79,13 @@ def get_config(self) -> Optional[project_config.ProjectConfig]: The config should be an instance of project_config.ProjectConfig.""" pass + @abstractmethod + def get_sdk_key(self) -> Optional[str]: + """ Get sdk_key for use by optimizely.Optimizely. + The sdk_key should uniquely identify the datafile for a project and environment combination. + """ + pass + class StaticConfigManager(BaseConfigManager): """ Config manager that returns ProjectConfig based on provided datafile. """ @@ -106,9 +114,13 @@ def __init__( ) self._config: project_config.ProjectConfig = None # type: ignore[assignment] self.optimizely_config: Optional[OptimizelyConfig] = None + self._sdk_key: Optional[str] = None self.validate_schema = not skip_json_validation self._set_config(datafile) + def get_sdk_key(self) -> Optional[str]: + return self._sdk_key + def _set_config(self, datafile: Optional[str | bytes]) -> None: """ Looks up and sets datafile and config based on response body. @@ -146,8 +158,16 @@ def _set_config(self, datafile: Optional[str | bytes]) -> None: return self._config = config + self._sdk_key = self._sdk_key or config.sdk_key self.optimizely_config = OptimizelyConfigService(config).get_config() self.notification_center.send_notifications(enums.NotificationTypes.OPTIMIZELY_CONFIG_UPDATE) + + internal_notification_center = _NotificationCenterRegistry.get_notification_center( + self._sdk_key, self.logger + ) + if internal_notification_center: + internal_notification_center.send_notifications(enums.NotificationTypes.OPTIMIZELY_CONFIG_UPDATE) + self.logger.debug( 'Received new datafile and updated config. ' f'Old revision number: {previous_revision}. New revision number: {config.get_revision()}.' @@ -181,11 +201,12 @@ def __init__( notification_center: Optional[NotificationCenter] = None, skip_json_validation: Optional[bool] = False, ): - """ Initialize config manager. One of sdk_key or url has to be set to be able to use. + """ Initialize config manager. One of sdk_key or datafile has to be set to be able to use. Args: - sdk_key: Optional string uniquely identifying the datafile. - datafile: Optional JSON string representing the project. + sdk_key: Optional string uniquely identifying the datafile. If not provided, datafile must + contain a sdk_key. + datafile: Optional JSON string representing the project. If not provided, sdk_key is required. update_interval: Optional floating point number representing time interval in seconds at which to request datafile and set ProjectConfig. blocking_timeout: Optional Time in seconds to block the get_config call until config object @@ -209,8 +230,13 @@ def __init__( notification_center=notification_center, skip_json_validation=skip_json_validation, ) + self._sdk_key = sdk_key or self._sdk_key + + if self._sdk_key is None: + raise optimizely_exceptions.InvalidInputException(enums.Errors.MISSING_SDK_KEY) + self.datafile_url = self.get_datafile_url( - sdk_key, url, url_template or self.DATAFILE_URL_TEMPLATE + self._sdk_key, url, url_template or self.DATAFILE_URL_TEMPLATE ) self.set_update_interval(update_interval) self.set_blocking_timeout(blocking_timeout) @@ -415,7 +441,7 @@ def __init__( *args: Any, **kwargs: Any ): - """ Initialize config manager. One of sdk_key or url has to be set to be able to use. + """ Initialize config manager. One of sdk_key or datafile has to be set to be able to use. Args: datafile_access_token: String to be attached to the request header to fetch the authenticated datafile. diff --git a/optimizely/helpers/enums.py b/optimizely/helpers/enums.py index 8ba311a1..56fb4946 100644 --- a/optimizely/helpers/enums.py +++ b/optimizely/helpers/enums.py @@ -126,6 +126,7 @@ class Errors: ODP_NOT_INTEGRATED: Final = 'ODP is not integrated.' ODP_NOT_ENABLED: Final = 'ODP is not enabled.' ODP_INVALID_DATA: Final = 'ODP data is not valid.' + MISSING_SDK_KEY: Final = 'SDK key not provided/cannot be found in the datafile.' class ForcedDecisionLogs: diff --git a/optimizely/notification_center_registry.py b/optimizely/notification_center_registry.py new file mode 100644 index 00000000..b07702ab --- /dev/null +++ b/optimizely/notification_center_registry.py @@ -0,0 +1,64 @@ +# Copyright 2023, Optimizely +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations +from threading import Lock +from typing import Optional +from .logger import Logger as OptimizelyLogger +from .notification_center import NotificationCenter +from .helpers.enums import Errors + + +class _NotificationCenterRegistry: + """ Class managing internal notification centers.""" + _notification_centers: dict[str, NotificationCenter] = {} + _lock = Lock() + + @classmethod + def get_notification_center(cls, sdk_key: Optional[str], logger: OptimizelyLogger) -> Optional[NotificationCenter]: + """Returns an internal notification center for the given sdk_key, creating one + if none exists yet. + + Args: + sdk_key: A string sdk key to uniquely identify the notification center. + logger: Optional logger. + + Returns: + None or NotificationCenter + """ + + if not sdk_key: + logger.error(f'{Errors.MISSING_SDK_KEY} ODP may not work properly without it.') + return None + + with cls._lock: + if sdk_key in cls._notification_centers: + notification_center = cls._notification_centers[sdk_key] + else: + notification_center = NotificationCenter(logger) + cls._notification_centers[sdk_key] = notification_center + + return notification_center + + @classmethod + def remove_notification_center(cls, sdk_key: str) -> None: + """Remove a previously added notification center and clear all its listeners. + + Args: + sdk_key: The sdk_key of the notification center to remove. + """ + + with cls._lock: + notification_center = cls._notification_centers.pop(sdk_key, None) + if notification_center: + notification_center.clear_all_notification_listeners() diff --git a/optimizely/optimizely.py b/optimizely/optimizely.py index 8408cbcc..00451175 100644 --- a/optimizely/optimizely.py +++ b/optimizely/optimizely.py @@ -1,4 +1,4 @@ -# Copyright 2016-2022, Optimizely +# Copyright 2016-2023, Optimizely # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -37,6 +37,7 @@ from .helpers.sdk_settings import OptimizelySdkSettings from .helpers.enums import DecisionSources from .notification_center import NotificationCenter +from .notification_center_registry import _NotificationCenterRegistry from .odp.lru_cache import LRUCache from .odp.odp_manager import OdpManager from .optimizely_config import OptimizelyConfig, OptimizelyConfigService @@ -143,18 +144,6 @@ def __init__( self.logger.exception(str(error)) return - self.setup_odp() - - self.odp_manager = OdpManager( - self.sdk_settings.odp_disabled, - self.sdk_settings.segments_cache, - self.sdk_settings.odp_segment_manager, - self.sdk_settings.odp_event_manager, - self.sdk_settings.fetch_segments_timeout, - self.sdk_settings.odp_event_timeout, - self.logger - ) - config_manager_options: dict[str, Any] = { 'datafile': datafile, 'logger': self.logger, @@ -174,8 +163,8 @@ def __init__( else: self.config_manager = StaticConfigManager(**config_manager_options) - if not self.sdk_settings.odp_disabled: - self._update_odp_config_on_datafile_update() + self.odp_manager: OdpManager + self.setup_odp(self.config_manager.get_sdk_key()) self.event_builder = event_builder.EventBuilder() self.decision_service = decision_service.DecisionService(self.logger, user_profile_service) @@ -1303,28 +1292,46 @@ def _decide_for_keys( decisions[key] = decision return decisions - def setup_odp(self) -> None: + def setup_odp(self, sdk_key: Optional[str]) -> None: """ - - Make sure cache is instantiated with provided parameters or defaults. + - Make sure odp manager is instantiated with provided parameters or defaults. - Set up listener to update odp_config when datafile is updated. + - Manually call callback in case datafile was received before the listener was registered. """ - if self.sdk_settings.odp_disabled: - return - self.notification_center.add_notification_listener( - enums.NotificationTypes.OPTIMIZELY_CONFIG_UPDATE, - self._update_odp_config_on_datafile_update + # no need to instantiate a cache if a custom cache or segment manager is provided. + if ( + not self.sdk_settings.odp_disabled and + not self.sdk_settings.odp_segment_manager and + not self.sdk_settings.segments_cache + ): + self.sdk_settings.segments_cache = LRUCache( + self.sdk_settings.segments_cache_size, + self.sdk_settings.segments_cache_timeout_in_secs + ) + + self.odp_manager = OdpManager( + self.sdk_settings.odp_disabled, + self.sdk_settings.segments_cache, + self.sdk_settings.odp_segment_manager, + self.sdk_settings.odp_event_manager, + self.sdk_settings.fetch_segments_timeout, + self.sdk_settings.odp_event_timeout, + self.logger ) - if self.sdk_settings.odp_segment_manager: + if self.sdk_settings.odp_disabled: return - if not self.sdk_settings.segments_cache: - self.sdk_settings.segments_cache = LRUCache( - self.sdk_settings.segments_cache_size, - self.sdk_settings.segments_cache_timeout_in_secs + internal_notification_center = _NotificationCenterRegistry.get_notification_center(sdk_key, self.logger) + if internal_notification_center: + internal_notification_center.add_notification_listener( + enums.NotificationTypes.OPTIMIZELY_CONFIG_UPDATE, + self._update_odp_config_on_datafile_update ) + self._update_odp_config_on_datafile_update() + def _update_odp_config_on_datafile_update(self) -> None: config = None diff --git a/tests/base.py b/tests/base.py index 6e74e3aa..875a26e6 100644 --- a/tests/base.py +++ b/tests/base.py @@ -1,4 +1,4 @@ -# Copyright 2016-2021, Optimizely +# Copyright 2016-2023 Optimizely # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -58,6 +58,7 @@ def fake_server_response(self, status_code: Optional[int] = None, def setUp(self, config_dict='config_dict'): self.config_dict = { 'revision': '42', + 'sdkKey': 'basic-test', 'version': '2', 'events': [ {'key': 'test_event', 'experimentIds': ['111127'], 'id': '111095'}, @@ -150,6 +151,7 @@ def setUp(self, config_dict='config_dict'): # datafile version 4 self.config_dict_with_features = { 'revision': '1', + 'sdkKey': 'features-test', 'accountId': '12001', 'projectId': '111111', 'version': '4', @@ -552,6 +554,7 @@ def setUp(self, config_dict='config_dict'): self.config_dict_with_multiple_experiments = { 'revision': '42', + 'sdkKey': 'multiple-experiments', 'version': '2', 'events': [ {'key': 'test_event', 'experimentIds': ['111127', '111130'], 'id': '111095'}, @@ -657,6 +660,7 @@ def setUp(self, config_dict='config_dict'): self.config_dict_with_unsupported_version = { 'version': '5', + 'sdkKey': 'unsupported-version', 'rollouts': [], 'projectId': '10431130345', 'variables': [], @@ -1073,6 +1077,7 @@ def setUp(self, config_dict='config_dict'): {'key': 'user_signed_up', 'id': '594090', 'experimentIds': ['1323241598', '1323241599']}, ], 'revision': '3', + 'sdkKey': 'typed-audiences', } self.config_dict_with_audience_segments = { @@ -1261,8 +1266,15 @@ def setUp(self, config_dict='config_dict'): } ], 'accountId': '10367498574', - 'events': [], - 'revision': '101' + 'events': [ + { + "experimentIds": ["10420810910"], + "id": "10404198134", + "key": "event1" + } + ], + 'revision': '101', + 'sdkKey': 'segments-test' } config = getattr(self, config_dict) diff --git a/tests/test_config.py b/tests/test_config.py index 3b95b02e..9a16035d 100644 --- a/tests/test_config.py +++ b/tests/test_config.py @@ -160,6 +160,7 @@ def test_init__with_v4_datafile(self): # Adding some additional fields like live variables and IP anonymization config_dict = { 'revision': '42', + 'sdkKey': 'test', 'version': '4', 'anonymizeIP': False, 'botFiltering': True, diff --git a/tests/test_config_manager.py b/tests/test_config_manager.py index 38dcfa33..6f4038cb 100644 --- a/tests/test_config_manager.py +++ b/tests/test_config_manager.py @@ -220,14 +220,14 @@ def test_get_config_blocks(self): @mock.patch('requests.get') class PollingConfigManagerTest(base.BaseTest): - def test_init__no_sdk_key_no_url__fails(self, _): - """ Test that initialization fails if there is no sdk_key or url provided. """ + def test_init__no_sdk_key_no_datafile__fails(self, _): + """ Test that initialization fails if there is no sdk_key or datafile provided. """ self.assertRaisesRegex( optimizely_exceptions.InvalidInputException, - 'Must provide at least one of sdk_key or url.', + enums.Errors.MISSING_SDK_KEY, config_manager.PollingConfigManager, sdk_key=None, - url=None, + datafile=None, ) def test_get_datafile_url__no_sdk_key_no_url_raises(self, _): diff --git a/tests/test_notification_center_registry.py b/tests/test_notification_center_registry.py new file mode 100644 index 00000000..9159d01a --- /dev/null +++ b/tests/test_notification_center_registry.py @@ -0,0 +1,84 @@ +# Copyright 2023, Optimizely +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +from unittest import mock +import copy + +from optimizely.notification_center_registry import _NotificationCenterRegistry +from optimizely.notification_center import NotificationCenter +from optimizely.optimizely import Optimizely +from optimizely.helpers.enums import NotificationTypes, Errors +from .base import BaseTest + + +class NotificationCenterRegistryTest(BaseTest): + def test_get_notification_center(self): + logger = mock.MagicMock() + sdk_key = 'test' + client = Optimizely(sdk_key=sdk_key, logger=logger) + notification_center = _NotificationCenterRegistry.get_notification_center(sdk_key, logger) + self.assertIsInstance(notification_center, NotificationCenter) + config_notifications = notification_center.notification_listeners[NotificationTypes.OPTIMIZELY_CONFIG_UPDATE] + + self.assertIn((mock.ANY, client._update_odp_config_on_datafile_update), config_notifications) + + logger.error.assert_not_called() + + _NotificationCenterRegistry.get_notification_center(None, logger) + + logger.error.assert_called_once_with(f'{Errors.MISSING_SDK_KEY} ODP may not work properly without it.') + + client.close() + + def test_only_one_notification_center_created(self): + logger = mock.MagicMock() + sdk_key = 'single' + notification_center = _NotificationCenterRegistry.get_notification_center(sdk_key, logger) + client = Optimizely(sdk_key=sdk_key, logger=logger) + + self.assertIs(notification_center, _NotificationCenterRegistry.get_notification_center(sdk_key, logger)) + + logger.error.assert_not_called() + + client.close() + + def test_remove_notification_center(self): + logger = mock.MagicMock() + sdk_key = 'segments-test' + test_datafile = json.dumps(self.config_dict_with_audience_segments) + test_response = self.fake_server_response(status_code=200, content=test_datafile) + notification_center = _NotificationCenterRegistry.get_notification_center(sdk_key, logger) + + with mock.patch('requests.get', return_value=test_response), \ + mock.patch.object(notification_center, 'send_notifications') as mock_send: + + client = Optimizely(sdk_key=sdk_key, logger=logger) + client.config_manager.get_config() + + mock_send.assert_called_once() + mock_send.reset_mock() + + _NotificationCenterRegistry.remove_notification_center(sdk_key) + self.assertNotIn(notification_center, _NotificationCenterRegistry._notification_centers) + + revised_datafile = copy.deepcopy(self.config_dict_with_audience_segments) + revised_datafile['revision'] = str(int(revised_datafile['revision']) + 1) + + # trigger notification + client.config_manager._set_config(json.dumps(revised_datafile)) + mock_send.assert_not_called() + + logger.error.assert_not_called() + + client.close() diff --git a/tests/test_optimizely.py b/tests/test_optimizely.py index c6132598..c0a69cf1 100644 --- a/tests/test_optimizely.py +++ b/tests/test_optimizely.py @@ -12,6 +12,7 @@ # limitations under the License. import json +import time from operator import itemgetter from unittest import mock @@ -25,6 +26,7 @@ from optimizely import logger from optimizely import optimizely from optimizely import optimizely_config +from optimizely.odp.odp_config import OdpConfigState from optimizely import project_config from optimizely import version from optimizely.event.event_factory import EventFactory @@ -92,7 +94,10 @@ def test_init__invalid_datafile__logs_error(self): with mock.patch('optimizely.logger.reset_logger', return_value=mock_client_logger): opt_obj = optimizely.Optimizely('invalid_datafile') - mock_client_logger.error.assert_called_once_with('Provided "datafile" is in an invalid format.') + mock_client_logger.error.assert_has_calls([ + mock.call('Provided "datafile" is in an invalid format.'), + mock.call(f'{enums.Errors.MISSING_SDK_KEY} ODP may not work properly without it.') + ], any_order=True) self.assertIsNone(opt_obj.config_manager.get_config()) def test_init__null_datafile__logs_error(self): @@ -102,7 +107,10 @@ def test_init__null_datafile__logs_error(self): with mock.patch('optimizely.logger.reset_logger', return_value=mock_client_logger): opt_obj = optimizely.Optimizely(None) - mock_client_logger.error.assert_called_once_with('Provided "datafile" is in an invalid format.') + mock_client_logger.error.assert_has_calls([ + mock.call('Provided "datafile" is in an invalid format.'), + mock.call(f'{enums.Errors.MISSING_SDK_KEY} ODP may not work properly without it.') + ], any_order=True) self.assertIsNone(opt_obj.config_manager.get_config()) def test_init__empty_datafile__logs_error(self): @@ -112,7 +120,10 @@ def test_init__empty_datafile__logs_error(self): with mock.patch('optimizely.logger.reset_logger', return_value=mock_client_logger): opt_obj = optimizely.Optimizely("") - mock_client_logger.error.assert_called_once_with('Provided "datafile" is in an invalid format.') + mock_client_logger.error.assert_has_calls([ + mock.call('Provided "datafile" is in an invalid format.'), + mock.call(f'{enums.Errors.MISSING_SDK_KEY} ODP may not work properly without it.') + ], any_order=True) self.assertIsNone(opt_obj.config_manager.get_config()) def test_init__invalid_config_manager__logs_error(self): @@ -204,9 +215,10 @@ def test_init__unsupported_datafile_version__logs_error(self): ) as mock_error_handler: opt_obj = optimizely.Optimizely(json.dumps(self.config_dict_with_unsupported_version)) - mock_client_logger.error.assert_called_once_with( - 'This version of the Python SDK does not support the given datafile version: "5".' - ) + mock_client_logger.error.assert_has_calls([ + mock.call(f'{enums.Errors.MISSING_SDK_KEY} ODP may not work properly without it.'), + mock.call('This version of the Python SDK does not support the given datafile version: "5".') + ], any_order=True) args, kwargs = mock_error_handler.call_args self.assertIsInstance(args[0], exceptions.UnsupportedDatafileVersionException) @@ -276,7 +288,10 @@ def test_invalid_json_raises_schema_validation_off(self): ) as mock_error_handler: opt_obj = optimizely.Optimizely('invalid_json', skip_json_validation=True) - mock_client_logger.error.assert_called_once_with('Provided "datafile" is in an invalid format.') + mock_client_logger.error.assert_has_calls([ + mock.call('Provided "datafile" is in an invalid format.'), + mock.call(f'{enums.Errors.MISSING_SDK_KEY} ODP may not work properly without it.') + ], any_order=True) args, kwargs = mock_error_handler.call_args self.assertIsInstance(args[0], exceptions.InvalidInputException) self.assertEqual(args[0].args[0], 'Provided "datafile" is in an invalid format.') @@ -293,7 +308,10 @@ def test_invalid_json_raises_schema_validation_off(self): {'version': '2', 'events': 'invalid_value', 'experiments': 'invalid_value'}, skip_json_validation=True, ) - mock_client_logger.error.assert_called_once_with('Provided "datafile" is in an invalid format.') + mock_client_logger.error.assert_has_calls([ + mock.call('Provided "datafile" is in an invalid format.'), + mock.call(f'{enums.Errors.MISSING_SDK_KEY} ODP may not work properly without it.') + ], any_order=True) args, kwargs = mock_error_handler.call_args self.assertIsInstance(args[0], exceptions.InvalidInputException) self.assertEqual(args[0].args[0], 'Provided "datafile" is in an invalid format.') @@ -4616,6 +4634,9 @@ def test_get_optimizely_config_with_custom_config_manager(self): return_config = some_obj.config_manager.get_config() class SomeConfigManager: + def get_sdk_key(self): + return return_config.sdk_key + def get_config(self): return return_config @@ -4631,6 +4652,57 @@ def get_config(self): self.assertEqual(1, mock_opt_service.call_count) + def test_odp_updated_with_custom_polling_config(self): + logger = mock.MagicMock() + + test_datafile = json.dumps(self.config_dict_with_audience_segments) + test_response = self.fake_server_response(status_code=200, content=test_datafile) + + def delay(*args, **kwargs): + time.sleep(.5) + return mock.DEFAULT + + with mock.patch('requests.get', return_value=test_response, side_effect=delay): + # initialize config_manager with delay, so it will receive the datafile after client initialization + custom_config_manager = config_manager.PollingConfigManager(sdk_key='segments-test', logger=logger) + client = optimizely.Optimizely(config_manager=custom_config_manager) + odp_manager = client.odp_manager + + # confirm odp config has not yet been updated + self.assertEqual(odp_manager.odp_config.odp_state(), OdpConfigState.UNDETERMINED) + + # wait for datafile + custom_config_manager.get_config() + + # wait for odp config to be updated + odp_manager.event_manager.event_queue.join() + + self.assertEqual(odp_manager.odp_config.odp_state(), OdpConfigState.INTEGRATED) + + logger.error.assert_not_called() + + client.close() + + def test_odp_events_not_sent_with_legacy_apis(self): + logger = mock.MagicMock() + experiment_key = 'experiment-segment' + feature_key = 'flag-segment' + user_id = 'test_user' + + test_datafile = json.dumps(self.config_dict_with_audience_segments) + client = optimizely.Optimizely(test_datafile, logger=logger) + + with mock.patch.object(client.odp_manager.event_manager, 'send_event') as send_event_mock: + client.activate(experiment_key, user_id) + client.track('event1', user_id) + client.get_variation(experiment_key, user_id) + client.get_all_feature_variables(feature_key, user_id) + client.is_feature_enabled(feature_key, user_id) + + send_event_mock.assert_not_called() + + client.close() + class OptimizelyWithExceptionTest(base.BaseTest): def setUp(self): diff --git a/tests/test_optimizely_config.py b/tests/test_optimizely_config.py index 640100d7..e33c1272 100644 --- a/tests/test_optimizely_config.py +++ b/tests/test_optimizely_config.py @@ -26,7 +26,7 @@ def setUp(self): self.opt_config_service = optimizely_config.OptimizelyConfigService(self.project_config) self.expected_config = { - 'sdk_key': '', + 'sdk_key': 'features-test', 'environment_key': '', 'attributes': [{'key': 'test_attribute', 'id': '111094'}], 'events': [{'key': 'test_event', 'experiment_ids': ['111127'], 'id': '111095'}], diff --git a/tests/test_optimizely_factory.py b/tests/test_optimizely_factory.py index 1792f80f..be41755a 100644 --- a/tests/test_optimizely_factory.py +++ b/tests/test_optimizely_factory.py @@ -12,9 +12,11 @@ # limitations under the License. import json +import time from unittest import mock from optimizely.config_manager import PollingConfigManager +from optimizely.odp.odp_config import OdpConfigState from optimizely.error_handler import NoOpErrorHandler from optimizely.event_dispatcher import EventDispatcher from optimizely.notification_center import NotificationCenter @@ -26,6 +28,10 @@ @mock.patch('requests.get') class OptimizelyFactoryTest(base.BaseTest): + def delay(*args, **kwargs): + time.sleep(.5) + return mock.DEFAULT + def setUp(self): super().setUp() self.datafile = '{ revision: "42" }' @@ -181,3 +187,82 @@ def test_update_odp_config_correctly(self, _): self.assertEqual(odp_config.get_api_host(), odp_settings['host']) client.close() + + def test_update_odp_config_correctly_with_custom_config_manager_and_delay(self, _): + logger = mock.MagicMock() + + test_datafile = json.dumps(self.config_dict_with_audience_segments) + test_response = self.fake_server_response(status_code=200, content=test_datafile) + + with mock.patch('requests.get', return_value=test_response, side_effect=self.delay): + # initialize config_manager with delay, so it will receive the datafile after client initialization + config_manager = PollingConfigManager(sdk_key='test', logger=logger) + client = OptimizelyFactory.default_instance_with_config_manager(config_manager=config_manager) + odp_manager = client.odp_manager + + # confirm odp config has not yet been updated + self.assertEqual(odp_manager.odp_config.odp_state(), OdpConfigState.UNDETERMINED) + + # wait for datafile + client.config_manager.get_config() + + # wait for odp config to be updated + odp_manager.event_manager.event_queue.join() + + self.assertEqual(odp_manager.odp_config.odp_state(), OdpConfigState.INTEGRATED) + + logger.error.assert_not_called() + + client.close() + + def test_update_odp_config_correctly_with_delay(self, _): + logger = mock.MagicMock() + + test_datafile = json.dumps(self.config_dict_with_audience_segments) + test_response = self.fake_server_response(status_code=200, content=test_datafile) + + with mock.patch('requests.get', return_value=test_response, side_effect=self.delay): + # initialize config_manager with delay, so it will receive the datafile after client initialization + client = OptimizelyFactory.default_instance(sdk_key='test') + odp_manager = client.odp_manager + + # confirm odp config has not yet been updated + self.assertEqual(odp_manager.odp_config.odp_state(), OdpConfigState.UNDETERMINED) + + # wait for datafile + client.config_manager.get_config() + + # wait for odp config to be updated + odp_manager.event_manager.event_queue.join() + + self.assertEqual(odp_manager.odp_config.odp_state(), OdpConfigState.INTEGRATED) + + logger.error.assert_not_called() + + client.close() + + def test_odp_updated_with_custom_instance(self, _): + logger = mock.MagicMock() + + test_datafile = json.dumps(self.config_dict_with_audience_segments) + test_response = self.fake_server_response(status_code=200, content=test_datafile) + + with mock.patch('requests.get', return_value=test_response, side_effect=self.delay): + # initialize config_manager with delay, so it will receive the datafile after client initialization + client = OptimizelyFactory.custom_instance(sdk_key='test') + odp_manager = client.odp_manager + + # confirm odp config has not yet been updated + self.assertEqual(odp_manager.odp_config.odp_state(), OdpConfigState.UNDETERMINED) + + # wait for datafile + client.config_manager.get_config() + + # wait for odp config to be updated + odp_manager.event_manager.event_queue.join() + + self.assertEqual(odp_manager.odp_config.odp_state(), OdpConfigState.INTEGRATED) + + logger.error.assert_not_called() + + client.close() From b2f1cc9e2e9770fdc683521fe91cb748637503c1 Mon Sep 17 00:00:00 2001 From: Andy Leap <104936100+andrewleap-optimizely@users.noreply.github.com> Date: Mon, 6 Feb 2023 09:39:46 -0500 Subject: [PATCH 33/68] update changelog with pollingconfig change (#415) --- CHANGELOG.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index aafa1f33..ff77ec70 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,10 @@ # Optimizely Python SDK Changelog +## Unreleased + +### Breaking Changes: +* `PollingConfigManager` now requires `sdk_key` even when providing a url. ([#413](https://github.com/optimizely/python-sdk/pull/413)) + ## 4.1.0 July 7th, 2022 From e13482f4cda4f072191174c37e444dff16b786d9 Mon Sep 17 00:00:00 2001 From: Matjaz Pirnovar Date: Tue, 7 Feb 2023 11:07:20 -0800 Subject: [PATCH 34/68] feat: add odp event flush interval (#414) * expose odp flush interval to the client * add odp flush interval and tests * tests fix * Delete z_matjaz_play directory * pr fixes * add a new test * cleanup * rename docstring --- optimizely/helpers/sdk_settings.py | 16 ++++--- optimizely/odp/odp_event_manager.py | 16 ++++--- optimizely/odp/odp_manager.py | 4 +- optimizely/optimizely.py | 3 +- tests/test_odp_event_manager.py | 29 +++++++++++-- tests/test_optimizely.py | 65 ++++++++++++++++++++++++++--- 6 files changed, 111 insertions(+), 22 deletions(-) diff --git a/optimizely/helpers/sdk_settings.py b/optimizely/helpers/sdk_settings.py index 00142e54..6b31ee9c 100644 --- a/optimizely/helpers/sdk_settings.py +++ b/optimizely/helpers/sdk_settings.py @@ -31,8 +31,9 @@ def __init__( odp_segments_cache: Optional[OptimizelySegmentsCache] = None, odp_segment_manager: Optional[OdpSegmentManager] = None, odp_event_manager: Optional[OdpEventManager] = None, - fetch_segments_timeout: Optional[int] = None, - odp_event_timeout: Optional[int] = None + odp_segment_request_timeout: Optional[int] = None, + odp_event_request_timeout: Optional[int] = None, + odp_event_flush_interval: Optional[int] = None ) -> None: """ Args: @@ -47,8 +48,10 @@ def __init__( `fetch_qualified_segments(user_key, user_value, options)`. odp_event_manager: A custom odp event manager. Required method is: `send_event(type:, action:, identifiers:, data:)` - fetch_segments_timeout: A fetch segment timeout in seconds (optional). - odp_event_timeout: A send odp event timeout in seconds (optional). + odp_segment_request_timeout: Time to wait in seconds for fetch_qualified_segments request to + send successfully (optional). + odp_event_request_timeout: Time to wait in seconds for send_odp_events request to send successfully. + odp_event_flush_interval: Time to wait for events to accumulate before sending a batch in seconds (optional). """ self.odp_disabled = odp_disabled @@ -57,5 +60,6 @@ def __init__( self.segments_cache = odp_segments_cache self.odp_segment_manager = odp_segment_manager self.odp_event_manager = odp_event_manager - self.fetch_segments_timeout = fetch_segments_timeout - self.odp_event_timeout = odp_event_timeout + self.fetch_segments_timeout = odp_segment_request_timeout + self.odp_event_timeout = odp_event_request_timeout + self.odp_flush_interval = odp_event_flush_interval diff --git a/optimizely/odp/odp_event_manager.py b/optimizely/odp/odp_event_manager.py index 2c4a6cda..67f1dd7d 100644 --- a/optimizely/odp/odp_event_manager.py +++ b/optimizely/odp/odp_event_manager.py @@ -40,31 +40,37 @@ class OdpEventManager: The OdpEventManager maintains a single consumer thread that pulls events off of the queue and buffers them before events are sent to ODP. Sends events when the batch size is met or when the flush timeout has elapsed. + Flushes the event queue after specified time (seconds). """ def __init__( self, logger: Optional[_logging.Logger] = None, api_manager: Optional[OdpEventApiManager] = None, - timeout: Optional[int] = None + request_timeout: Optional[int] = None, + flush_interval: Optional[int] = None ): """OdpEventManager init method to configure event batching. Args: logger: Optional component which provides a log method to log messages. By default nothing would be logged. api_manager: Optional component which sends events to ODP. - timeout: Optional event timeout in seconds. + request_timeout: Optional event timeout in seconds - wait time for odp platform to respond before failing. + flush_interval: Optional time to wait for events to accumulate before sending the batch in seconds. """ self.logger = logger or _logging.NoOpLogger() - self.api_manager = api_manager or OdpEventApiManager(self.logger, timeout) + self.api_manager = api_manager or OdpEventApiManager(self.logger, request_timeout) self.odp_config: Optional[OdpConfig] = None self.api_key: Optional[str] = None self.api_host: Optional[str] = None self.event_queue: Queue[OdpEvent | Signal] = Queue(OdpEventManagerConfig.DEFAULT_QUEUE_CAPACITY) - self.batch_size = OdpEventManagerConfig.DEFAULT_BATCH_SIZE - self.flush_interval = OdpEventManagerConfig.DEFAULT_FLUSH_INTERVAL + self.batch_size = 0 if flush_interval == 0 else OdpEventManagerConfig.DEFAULT_BATCH_SIZE + + self.flush_interval = OdpEventManagerConfig.DEFAULT_FLUSH_INTERVAL if flush_interval is None \ + else flush_interval + self._flush_deadline: float = 0 self.retry_count = OdpEventManagerConfig.DEFAULT_RETRY_COUNT self._current_batch: list[OdpEvent] = [] diff --git a/optimizely/odp/odp_manager.py b/optimizely/odp/odp_manager.py index f122523a..a6e26253 100644 --- a/optimizely/odp/odp_manager.py +++ b/optimizely/odp/odp_manager.py @@ -35,6 +35,7 @@ def __init__( event_manager: Optional[OdpEventManager] = None, fetch_segments_timeout: Optional[int] = None, odp_event_timeout: Optional[int] = None, + odp_flush_interval: Optional[int] = None, logger: Optional[optimizely_logger.Logger] = None ) -> None: @@ -58,7 +59,8 @@ def __init__( ) self.segment_manager = OdpSegmentManager(segments_cache, logger=self.logger, timeout=fetch_segments_timeout) - self.event_manager = self.event_manager or OdpEventManager(self.logger, timeout=odp_event_timeout) + self.event_manager = self.event_manager or OdpEventManager(self.logger, request_timeout=odp_event_timeout, + flush_interval=odp_flush_interval) self.segment_manager.odp_config = self.odp_config def fetch_qualified_segments(self, user_id: str, options: list[str]) -> Optional[list[str]]: diff --git a/optimizely/optimizely.py b/optimizely/optimizely.py index 00451175..dd6a8954 100644 --- a/optimizely/optimizely.py +++ b/optimizely/optimizely.py @@ -1317,7 +1317,8 @@ def setup_odp(self, sdk_key: Optional[str]) -> None: self.sdk_settings.odp_event_manager, self.sdk_settings.fetch_segments_timeout, self.sdk_settings.odp_event_timeout, - self.logger + self.sdk_settings.odp_flush_interval, + self.logger, ) if self.sdk_settings.odp_disabled: diff --git a/tests/test_odp_event_manager.py b/tests/test_odp_event_manager.py index 20456997..0642f393 100644 --- a/tests/test_odp_event_manager.py +++ b/tests/test_odp_event_manager.py @@ -382,10 +382,10 @@ def test_odp_event_manager_override_default_data(self, *args): mock_send.assert_called_once_with(self.api_key, self.api_host, [processed_event]) event_manager.stop() - def test_odp_event_manager_flush_timeout(self, *args): + def test_odp_event_manager_flush_interval(self, *args): + """Verify that both events have been sent together after they have been batched.""" mock_logger = mock.Mock() - event_manager = OdpEventManager(mock_logger) - event_manager.flush_interval = .5 + event_manager = OdpEventManager(mock_logger, flush_interval=.5) event_manager.start(self.odp_config) with mock.patch.object( @@ -394,13 +394,34 @@ def test_odp_event_manager_flush_timeout(self, *args): event_manager.send_event(**self.events[0]) event_manager.send_event(**self.events[1]) event_manager.event_queue.join() - time.sleep(1) + time.sleep(1) # ensures that the flush interval time has passed mock_logger.error.assert_not_called() mock_logger.debug.assert_any_call('ODP event queue: flushing on interval.') mock_send.assert_called_once_with(self.api_key, self.api_host, self.processed_events) event_manager.stop() + def test_odp_event_manager_flush_interval_is_zero(self, *args): + """Verify that event is immediately if flush interval is zero.""" + mock_logger = mock.Mock() + event_manager = OdpEventManager(mock_logger, flush_interval=0) + event_manager.start(self.odp_config) + + with mock.patch.object( + event_manager.api_manager, 'send_odp_events', new_callable=CopyingMock, return_value=False + ) as mock_send: + event_manager.send_event(**self.events[0]) + event_manager.send_event(**self.events[1]) + event_manager.event_queue.join() + + mock_send.assert_has_calls( + [mock.call(self.api_key, self.api_host, [self.processed_events[0]]), + mock.call(self.api_key, self.api_host, [self.processed_events[1]])] + ) + mock_logger.error.assert_not_called() + mock_logger.debug.assert_any_call('ODP event queue: flushing batch size 1.') + event_manager.stop() + def test_odp_event_manager_events_before_odp_ready(self, *args): mock_logger = mock.Mock() odp_config = OdpConfig() diff --git a/tests/test_optimizely.py b/tests/test_optimizely.py index c0a69cf1..4c2eee54 100644 --- a/tests/test_optimizely.py +++ b/tests/test_optimizely.py @@ -5140,11 +5140,7 @@ def test_user_context_invalid_user_id(self): uc = self.optimizely.create_user_context(u) self.assertIsNone(uc, "invalid user id should return none") - def test_invalid_flag_key(self): - """Tests invalid flag key in function get_flag_variation_by_key().""" - pass - - def test_send_identify_event_when_called_with_odp_enabled(self): + def test_send_identify_event__when_called_with_odp_enabled(self): mock_logger = mock.Mock() client = optimizely.Optimizely(json.dumps(self.config_dict_with_audience_segments), logger=mock_logger) with mock.patch.object(client, 'identify_user') as identify: @@ -5154,6 +5150,34 @@ def test_send_identify_event_when_called_with_odp_enabled(self): mock_logger.error.assert_not_called() client.close() + def test_sdk_settings__accept_zero_for_flush_interval(self): + mock_logger = mock.Mock() + sdk_settings = OptimizelySdkSettings(odp_event_flush_interval=0) + client = optimizely.Optimizely( + json.dumps(self.config_dict_with_audience_segments), + logger=mock_logger, + settings=sdk_settings + ) + flush_interval = client.odp_manager.event_manager.flush_interval + + self.assertEqual(flush_interval, 0) + mock_logger.error.assert_not_called() + client.close() + + def test_sdk_settings__should_use_default_when_odp_flush_interval_none(self): + mock_logger = mock.Mock() + sdk_settings = OptimizelySdkSettings(odp_event_flush_interval=None) + client = optimizely.Optimizely( + json.dumps(self.config_dict_with_audience_segments), + logger=mock_logger, + settings=sdk_settings + ) + flush_interval = client.odp_manager.event_manager.flush_interval + self.assertEqual(flush_interval, enums.OdpEventManagerConfig.DEFAULT_FLUSH_INTERVAL) + + mock_logger.error.assert_not_called() + client.close() + def test_sdk_settings__log_info_when_disabled(self): mock_logger = mock.Mock() sdk_settings = OptimizelySdkSettings(odp_disabled=True) @@ -5162,6 +5186,7 @@ def test_sdk_settings__log_info_when_disabled(self): logger=mock_logger, settings=sdk_settings ) + self.assertIsNone(client.odp_manager.event_manager) self.assertIsNone(client.odp_manager.segment_manager) mock_logger.info.assert_called_once_with('ODP is disabled.') @@ -5211,6 +5236,36 @@ def test_sdk_settings__accept_cache_size_and_cache_timeout(self): mock_logger.error.assert_not_called() client.close() + def test_sdk_settings__use_default_cache_size_and_timeout_when_odp_flush_interval_none(self): + mock_logger = mock.Mock() + sdk_settings = OptimizelySdkSettings() + client = optimizely.Optimizely( + json.dumps(self.config_dict_with_audience_segments), + logger=mock_logger, + settings=sdk_settings + ) + segments_cache = client.odp_manager.segment_manager.segments_cache + self.assertEqual(segments_cache.timeout, enums.OdpSegmentsCacheConfig.DEFAULT_TIMEOUT_SECS) + self.assertEqual(segments_cache.capacity, enums.OdpSegmentsCacheConfig.DEFAULT_CAPACITY) + + mock_logger.error.assert_not_called() + client.close() + + def test_sdk_settings__accept_zero_cache_size_timeout_and_cache_size(self): + mock_logger = mock.Mock() + sdk_settings = OptimizelySdkSettings(segments_cache_size=0, segments_cache_timeout_in_secs=0) + client = optimizely.Optimizely( + json.dumps(self.config_dict_with_audience_segments), + logger=mock_logger, + settings=sdk_settings + ) + segments_cache = client.odp_manager.segment_manager.segments_cache + self.assertEqual(segments_cache.capacity, 0) + self.assertEqual(segments_cache.timeout, 0) + + mock_logger.error.assert_not_called() + client.close() + def test_sdk_settings__accept_valid_custom_cache(self): class CustomCache: def reset(self): From c8c80f0a92644adabf913ebcdb975155ff8cd76f Mon Sep 17 00:00:00 2001 From: Andy Leap <104936100+andrewleap-optimizely@users.noreply.github.com> Date: Wed, 8 Feb 2023 19:04:21 -0500 Subject: [PATCH 35/68] fix: make client odp methods private (#416) * notification registry test fix * make client methods private --- optimizely/optimizely.py | 8 ++++---- optimizely/optimizely_user_context.py | 4 ++-- tests/test_notification_center_registry.py | 3 ++- tests/test_optimizely.py | 2 +- tests/test_user_context.py | 6 +++--- 5 files changed, 12 insertions(+), 11 deletions(-) diff --git a/optimizely/optimizely.py b/optimizely/optimizely.py index dd6a8954..7eeab834 100644 --- a/optimizely/optimizely.py +++ b/optimizely/optimizely.py @@ -164,7 +164,7 @@ def __init__( self.config_manager = StaticConfigManager(**config_manager_options) self.odp_manager: OdpManager - self.setup_odp(self.config_manager.get_sdk_key()) + self._setup_odp(self.config_manager.get_sdk_key()) self.event_builder = event_builder.EventBuilder() self.decision_service = decision_service.DecisionService(self.logger, user_profile_service) @@ -1292,7 +1292,7 @@ def _decide_for_keys( decisions[key] = decision return decisions - def setup_odp(self, sdk_key: Optional[str]) -> None: + def _setup_odp(self, sdk_key: Optional[str]) -> None: """ - Make sure odp manager is instantiated with provided parameters or defaults. - Set up listener to update odp_config when datafile is updated. @@ -1352,14 +1352,14 @@ def _update_odp_config_on_datafile_update(self) -> None: config.all_segments ) - def identify_user(self, user_id: str) -> None: + def _identify_user(self, user_id: str) -> None: if not self.is_valid: self.logger.error(enums.Errors.INVALID_OPTIMIZELY.format('identify_user')) return self.odp_manager.identify_user(user_id) - def fetch_qualified_segments(self, user_id: str, options: Optional[list[str]] = None) -> Optional[list[str]]: + def _fetch_qualified_segments(self, user_id: str, options: Optional[list[str]] = None) -> Optional[list[str]]: if not self.is_valid: self.logger.error(enums.Errors.INVALID_OPTIMIZELY.format('fetch_qualified_segments')) return None diff --git a/optimizely/optimizely_user_context.py b/optimizely/optimizely_user_context.py index e2674be1..fb674f93 100644 --- a/optimizely/optimizely_user_context.py +++ b/optimizely/optimizely_user_context.py @@ -73,7 +73,7 @@ def __init__( ] = {} if self.client and identify: - self.client.identify_user(user_id) + self.client._identify_user(user_id) class OptimizelyDecisionContext: """ Using class with attributes here instead of namedtuple because @@ -327,7 +327,7 @@ def fetch_qualified_segments( A boolean value indicating if the fetch was successful. """ def _fetch_qualified_segments() -> bool: - segments = self.client.fetch_qualified_segments(self.user_id, options or []) if self.client else None + segments = self.client._fetch_qualified_segments(self.user_id, options or []) if self.client else None self.set_qualified_segments(segments) success = segments is not None diff --git a/tests/test_notification_center_registry.py b/tests/test_notification_center_registry.py index 9159d01a..0f800cfd 100644 --- a/tests/test_notification_center_registry.py +++ b/tests/test_notification_center_registry.py @@ -69,8 +69,9 @@ def test_remove_notification_center(self): mock_send.assert_called_once() mock_send.reset_mock() + self.assertIn(notification_center, _NotificationCenterRegistry._notification_centers.values()) _NotificationCenterRegistry.remove_notification_center(sdk_key) - self.assertNotIn(notification_center, _NotificationCenterRegistry._notification_centers) + self.assertNotIn(notification_center, _NotificationCenterRegistry._notification_centers.values()) revised_datafile = copy.deepcopy(self.config_dict_with_audience_segments) revised_datafile['revision'] = str(int(revised_datafile['revision']) + 1) diff --git a/tests/test_optimizely.py b/tests/test_optimizely.py index 4c2eee54..9d37a133 100644 --- a/tests/test_optimizely.py +++ b/tests/test_optimizely.py @@ -5143,7 +5143,7 @@ def test_user_context_invalid_user_id(self): def test_send_identify_event__when_called_with_odp_enabled(self): mock_logger = mock.Mock() client = optimizely.Optimizely(json.dumps(self.config_dict_with_audience_segments), logger=mock_logger) - with mock.patch.object(client, 'identify_user') as identify: + with mock.patch.object(client, '_identify_user') as identify: client.create_user_context('user-id') identify.assert_called_once_with('user-id') diff --git a/tests/test_user_context.py b/tests/test_user_context.py index 15499792..48f08885 100644 --- a/tests/test_user_context.py +++ b/tests/test_user_context.py @@ -2014,7 +2014,7 @@ def test_none_client_should_not_fail(self): def test_send_identify_event_when_user_context_created(self): mock_logger = mock.Mock() client = optimizely.Optimizely(json.dumps(self.config_dict_with_audience_segments), logger=mock_logger) - with mock.patch.object(client, 'identify_user') as identify: + with mock.patch.object(client, '_identify_user') as identify: OptimizelyUserContext(client, mock_logger, 'user-id') identify.assert_called_once_with('user-id') @@ -2024,13 +2024,13 @@ def test_send_identify_event_when_user_context_created(self): def test_identify_is_skipped_with_decisions(self): mock_logger = mock.Mock() client = optimizely.Optimizely(json.dumps(self.config_dict_with_features), logger=mock_logger) - with mock.patch.object(client, 'identify_user') as identify: + with mock.patch.object(client, '_identify_user') as identify: user_context = OptimizelyUserContext(client, mock_logger, 'user-id') identify.assert_called_once_with('user-id') mock_logger.error.assert_not_called() - with mock.patch.object(client, 'identify_user') as identify: + with mock.patch.object(client, '_identify_user') as identify: user_context.decide('test_feature_in_rollout') user_context.decide_all() user_context.decide_for_keys(['test_feature_in_rollout']) From 8363350e20d146be51e55dfdea05163188b5b349 Mon Sep 17 00:00:00 2001 From: Andy Leap <104936100+andrewleap-optimizely@users.noreply.github.com> Date: Wed, 22 Feb 2023 16:41:52 -0500 Subject: [PATCH 36/68] change batch_size disabled 0 to 1 (#417) --- optimizely/odp/odp_event_manager.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/optimizely/odp/odp_event_manager.py b/optimizely/odp/odp_event_manager.py index 67f1dd7d..18b08eb0 100644 --- a/optimizely/odp/odp_event_manager.py +++ b/optimizely/odp/odp_event_manager.py @@ -66,7 +66,7 @@ def __init__( self.api_host: Optional[str] = None self.event_queue: Queue[OdpEvent | Signal] = Queue(OdpEventManagerConfig.DEFAULT_QUEUE_CAPACITY) - self.batch_size = 0 if flush_interval == 0 else OdpEventManagerConfig.DEFAULT_BATCH_SIZE + self.batch_size = 1 if flush_interval == 0 else OdpEventManagerConfig.DEFAULT_BATCH_SIZE self.flush_interval = OdpEventManagerConfig.DEFAULT_FLUSH_INTERVAL if flush_interval is None \ else flush_interval From f000c6e98d9d21ae6ce338e687f5a42f506cbdd3 Mon Sep 17 00:00:00 2001 From: Andy Leap <104936100+andrewleap-optimizely@users.noreply.github.com> Date: Mon, 27 Feb 2023 13:56:41 -0500 Subject: [PATCH 37/68] [FSSDK-8946] fix: make odp event identifiers required (#418) * make odp event identifiers required --- optimizely/optimizely.py | 10 +++++++--- tests/test_optimizely.py | 32 +++++++++++++++++++++++++------- 2 files changed, 32 insertions(+), 10 deletions(-) diff --git a/optimizely/optimizely.py b/optimizely/optimizely.py index 7eeab834..e7a594f2 100644 --- a/optimizely/optimizely.py +++ b/optimizely/optimizely.py @@ -1369,8 +1369,8 @@ def _fetch_qualified_segments(self, user_id: str, options: Optional[list[str]] = def send_odp_event( self, action: str, + identifiers: dict[str, str], type: str = enums.OdpManagerConfig.EVENT_TYPE, - identifiers: Optional[dict[str, str]] = None, data: Optional[dict[str, str | int | float | bool | None]] = None ) -> None: """ @@ -1378,8 +1378,8 @@ def send_odp_event( Args: action: The event action name. + identifiers: A dictionary for identifiers. The caller must provide at least one key-value pair. type: The event type. Default 'fullstack'. - identifiers: An optional dictionary for identifiers. data: An optional dictionary for associated data. The default event data will be added to this data before sending to the ODP server. """ @@ -1387,7 +1387,11 @@ def send_odp_event( self.logger.error(enums.Errors.INVALID_OPTIMIZELY.format('send_odp_event')) return - self.odp_manager.send_event(type, action, identifiers or {}, data or {}) + if not identifiers or not isinstance(identifiers, dict): + self.logger.error('ODP events must have at least one key-value pair in identifiers.') + return + + self.odp_manager.send_event(type, action, identifiers, data or {}) def close(self) -> None: if callable(getattr(self.event_processor, 'stop', None)): diff --git a/tests/test_optimizely.py b/tests/test_optimizely.py index 9d37a133..19529b39 100644 --- a/tests/test_optimizely.py +++ b/tests/test_optimizely.py @@ -5386,7 +5386,7 @@ def test_send_odp_event__send_event_with_static_config_manager(self): logger=mock_logger, ) with mock.patch('requests.post', return_value=self.fake_server_response(status_code=200)): - client.send_odp_event(type='wow', action='great', identifiers={}, data={}) + client.send_odp_event(type='wow', action='great', identifiers={'amazing': 'fantastic'}, data={}) client.close() mock_logger.error.assert_not_called() mock_logger.debug.assert_called_with('ODP event queue: flushing batch size 1.') @@ -5405,7 +5405,7 @@ def test_send_odp_event__send_event_with_polling_config_manager(self): client.config_manager.get_config() with mock.patch('requests.post', return_value=self.fake_server_response(status_code=200)): - client.send_odp_event(type='wow', action='great', identifiers={}, data={}) + client.send_odp_event(type='wow', action='great', identifiers={'amazing': 'fantastic'}, data={}) client.close() mock_logger.error.assert_not_called() @@ -5419,14 +5419,14 @@ def test_send_odp_event__log_error_when_odp_disabled(self): settings=OptimizelySdkSettings(odp_disabled=True) ) with mock.patch('requests.post', return_value=self.fake_server_response(status_code=200)): - client.send_odp_event(type='wow', action='great', identifiers={}, data={}) + client.send_odp_event(type='wow', action='great', identifiers={'amazing': 'fantastic'}, data={}) client.close() mock_logger.error.assert_called_with('ODP is not enabled.') def test_send_odp_event__log_debug_if_datafile_not_ready(self): mock_logger = mock.Mock() client = optimizely.Optimizely(sdk_key='test', logger=mock_logger) - client.send_odp_event(type='wow', action='great', identifiers={}, data={}) + client.send_odp_event(type='wow', action='great', identifiers={'amazing': 'fantastic'}, data={}) mock_logger.debug.assert_called_with('ODP event queue: cannot send before config has been set.') client.close() @@ -5449,7 +5449,7 @@ def test_send_odp_event__log_error_if_odp_not_enabled_with_polling_config_manage client.config_manager.get_config() with mock.patch('requests.post', return_value=self.fake_server_response(status_code=200)): - client.send_odp_event(type='wow', action='great', identifiers={}, data={}) + client.send_odp_event(type='wow', action='great', identifiers={'amazing': 'fantastic'}, data={}) client.close() mock_logger.error.assert_called_with('ODP is not enabled.') @@ -5458,15 +5458,33 @@ def test_send_odp_event__log_error_with_invalid_data(self): mock_logger = mock.Mock() client = optimizely.Optimizely(json.dumps(self.config_dict_with_audience_segments), logger=mock_logger) - client.send_odp_event(type='wow', action='great', identifiers={}, data={'test': {}}) + client.send_odp_event(type='wow', action='great', identifiers={'amazing': 'fantastic'}, data={'test': {}}) client.close() mock_logger.error.assert_called_with('ODP data is not valid.') + def test_send_odp_event__log_error_with_empty_identifiers(self): + mock_logger = mock.Mock() + client = optimizely.Optimizely(json.dumps(self.config_dict_with_audience_segments), logger=mock_logger) + + client.send_odp_event(type='wow', action='great', identifiers={}, data={}) + client.close() + + mock_logger.error.assert_called_with('ODP events must have at least one key-value pair in identifiers.') + + def test_send_odp_event__log_error_with_no_identifiers(self): + mock_logger = mock.Mock() + client = optimizely.Optimizely(json.dumps(self.config_dict_with_audience_segments), logger=mock_logger) + + client.send_odp_event(type='wow', action='great', identifiers=None, data={}) + client.close() + + mock_logger.error.assert_called_with('ODP events must have at least one key-value pair in identifiers.') + def test_send_odp_event__log_error_with_missing_integrations_data(self): mock_logger = mock.Mock() client = optimizely.Optimizely(json.dumps(self.config_dict_with_typed_audiences), logger=mock_logger) - client.send_odp_event(type='wow', action='great', identifiers={}, data={}) + client.send_odp_event(type='wow', action='great', identifiers={'amazing': 'fantastic'}, data={}) mock_logger.error.assert_called_with('ODP is not integrated.') client.close() From f52e50d059395f287c041d557252d4ca41b12c5e Mon Sep 17 00:00:00 2001 From: Andy Leap <104936100+andrewleap-optimizely@users.noreply.github.com> Date: Fri, 10 Mar 2023 13:53:13 -0500 Subject: [PATCH 38/68] [FSSDK-8954] docs: change full stack to feature experimentation (#420) * change full stack to feature experimentation --- README.md | 84 +++++++++++++++++++++++++++++++++++-------------------- setup.py | 9 +++--- 2 files changed, 59 insertions(+), 34 deletions(-) diff --git a/README.md b/README.md index 041d87f3..24d4116c 100644 --- a/README.md +++ b/README.md @@ -3,26 +3,17 @@ [![PyPI version](https://badge.fury.io/py/optimizely-sdk.svg)](https://pypi.org/project/optimizely-sdk) [![Build Status](https://github.com/optimizely/python-sdk/actions/workflows/python.yml/badge.svg?branch=master)](https://github.com/optimizely/python-sdk/actions/workflows/python.yml?query=branch%3Amaster) [![Coverage Status](https://coveralls.io/repos/github/optimizely/python-sdk/badge.svg)](https://coveralls.io/github/optimizely/python-sdk) -[![Documentation Status](https://readthedocs.org/projects/optimizely-python-sdk/badge/?version=latest)](https://optimizely-python-sdk.readthedocs.io/en/latest/?badge=latest) [![Apache 2.0](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](http://www.apache.org/licenses/LICENSE-2.0) -This repository houses the official Python SDK for use with Optimizely -Full Stack and Optimizely Rollouts. +This repository houses the Python SDK for use with Optimizely Feature Experimentation and Optimizely Full Stack (legacy). -Optimizely Full Stack is A/B testing and feature flag management for -product development teams. Experiment in any application. Make every -feature on your roadmap an opportunity to learn. Learn more at -, or see the [Full -Stack -documentation](https://docs.developers.optimizely.com/full-stack/docs). +Optimizely Feature Experimentation is an A/B testing and feature management tool for product development teams that enables you to experiment at every step. Using Optimizely Feature Experimentation allows for every feature on your roadmap to be an opportunity to discover hidden insights. Learn more at [Optimizely.com](https://www.optimizely.com/products/experiment/feature-experimentation/), or see the [developer documentation](https://docs.developers.optimizely.com/experimentation/v4.0.0-full-stack/docs/welcome). -Optimizely Rollouts is free feature flags for development teams. Easily -roll out and roll back features in any application without code deploys. -Mitigate risk for every feature on your roadmap. Learn more at -, or see the [Rollouts -documentation](https://docs.developers.optimizely.com/rollouts/docs). +Optimizely Rollouts is [free feature flags](https://www.optimizely.com/free-feature-flagging/) for development teams. You can easily roll out and roll back features in any application without code deploys, mitigating risk for every feature on your roadmap. -## Getting Started +## Get Started + +Refer to the [Python SDK's developer documentation](https://docs.developers.optimizely.com/experimentation/v4.0.0-full-stack/docs/python-sdk) for detailed instructions on getting started with using the SDK. ### Requirements @@ -30,7 +21,7 @@ Version `4.0+`: Python 3.7+, PyPy 3.7+ Version `3.0+`: Python 2.7+, PyPy 3.4+ -### Installing the SDK +### Install the SDK The SDK is available through [PyPi](https://pypi.python.org/pypi?name=optimizely-sdk&:action=display). @@ -41,9 +32,11 @@ To install: ### Feature Management Access To access the Feature Management configuration in the Optimizely -dashboard, please contact your Optimizely account executive. +dashboard, please contact your Optimizely customer success manager. + +## Use the Python SDK -### Using the SDK +### Initialization You can initialize the Optimizely instance in three ways: with a datafile, by providing an sdk_key, or by providing an implementation of [BaseConfigManager](https://github.com/optimizely/python-sdk/tree/master/optimizely/config_manager.py#L32). @@ -85,7 +78,7 @@ Each method is described below. config_manager=custom_config_manager ) -#### PollingConfigManager +### PollingConfigManager The [PollingConfigManager](https://github.com/optimizely/python-sdk/blob/master/optimizely/config_manager.py#L150) asynchronously polls for datafiles from a specified URL at regular intervals by making HTTP requests. @@ -126,7 +119,7 @@ used to form the target URL. You may also provide your own logger, error_handler, or notification_center. -#### AuthDatafilePollingConfigManager +### AuthDatafilePollingConfigManager The [AuthDatafilePollingConfigManager](https://github.com/optimizely/python-sdk/blob/master/optimizely/config_manager.py#L375) implements `PollingConfigManager` and asynchronously polls for authenticated datafiles from a specified URL at regular intervals @@ -143,7 +136,7 @@ your project and generate an access token for your datafile. **datafile_access_token** The datafile_access_token is attached to the outbound HTTP request header to authorize the request and fetch the datafile. -#### Advanced configuration +### Advanced configuration The following properties can be set to override the default configurations for [PollingConfigManager](#pollingconfigmanager) and [AuthDatafilePollingConfigManager](#authdatafilepollingconfigmanager). @@ -164,10 +157,10 @@ notifications, use: notification_center.add_notification_listener(NotificationTypes.OPTIMIZELY_CONFIG_UPDATE, update_callback) ``` -For Further details see the Optimizely [Full Stack documentation](https://docs.developers.optimizely.com/full-stack/docs) +For Further details see the Optimizely [Feature Experimentation documentation](https://docs.developers.optimizely.com/experimentation/v4.0.0-full-stack/docs/welcome) to learn how to set up your first Python project and use the SDK. -## Development +## SDK Development ### Building the SDK @@ -175,7 +168,7 @@ Build and install the SDK with pip, using the following command: pip install -e . -### Unit tests +### Unit Tests #### Running all tests @@ -226,9 +219,40 @@ would be: Please see [CONTRIBUTING](https://github.com/optimizely/python-sdk/blob/master/CONTRIBUTING.md). -### Additional Code -This software incorporates code from the following open source repos: -requests (Apache-2.0 License: https://github.com/psf/requests/blob/master/LICENSE) -pyOpenSSL (Apache-2.0 License https://github.com/pyca/pyopenssl/blob/main/LICENSE) -cryptography (Apache-2.0 https://github.com/pyca/cryptography/blob/main/LICENSE.APACHE) -idna (BSD 3-Clause License https://github.com/kjd/idna/blob/master/LICENSE.md) +### Credits + +This software incorporates code from the following open source projects: + +requests (Apache-2.0 License: https://github.com/psf/requests/blob/master/LICENSE) + +pyOpenSSL (Apache-2.0 License https://github.com/pyca/pyopenssl/blob/main/LICENSE) + +cryptography (Apache-2.0 https://github.com/pyca/cryptography/blob/main/LICENSE.APACHE) + +idna (BSD 3-Clause License https://github.com/kjd/idna/blob/master/LICENSE.md) + +### Other Optimizely SDKs + +- Agent - https://github.com/optimizely/agent + +- Android - https://github.com/optimizely/android-sdk + +- C# - https://github.com/optimizely/csharp-sdk + +- Flutter - https://github.com/optimizely/optimizely-flutter-sdk + +- Go - https://github.com/optimizely/go-sdk + +- Java - https://github.com/optimizely/java-sdk + +- JavaScript - https://github.com/optimizely/javascript-sdk + +- PHP - https://github.com/optimizely/php-sdk + +- Python - https://github.com/optimizely/python-sdk + +- React - https://github.com/optimizely/react-sdk + +- Ruby - https://github.com/optimizely/ruby-sdk + +- Swift - https://github.com/optimizely/swift-sdk diff --git a/setup.py b/setup.py index d40a23b6..5e2ccc2e 100644 --- a/setup.py +++ b/setup.py @@ -24,16 +24,17 @@ CHANGELOG = _file.read() about_text = ( - 'Optimizely X Full Stack is A/B testing and feature management for product development teams. ' + 'Optimizely Feature Experimentation is A/B testing and feature management for product development teams. ' 'Experiment in any application. Make every feature on your roadmap an opportunity to learn. ' - 'Learn more at https://www.optimizely.com/products/full-stack/ or see our documentation at ' - 'https://docs.developers.optimizely.com/full-stack/docs. ' + 'Learn more at https://www.optimizely.com/products/experiment/feature-experimentation/ or see our documentation at ' + 'https://docs.developers.optimizely.com/experimentation/v4.0.0-full-stack/docs/welcome. ' ) setup( name='optimizely-sdk', version=__version__, - description='Python SDK for Optimizely X Full Stack.', + description='Python SDK for Optimizely Feature Experimentation, Optimizely Full Stack (legacy), ' + 'and Optimizely Rollouts.', long_description=about_text + README + CHANGELOG, long_description_content_type='text/markdown', author='Optimizely', From 60ab8079cf78a65b6bd18f70f86eb72dca27dc1f Mon Sep 17 00:00:00 2001 From: Andy Leap <104936100+andrewleap-optimizely@users.noreply.github.com> Date: Fri, 10 Mar 2023 17:06:46 -0500 Subject: [PATCH 39/68] [FSSDK-8954] chore: prep for 4.1.1 release (#421) * prep for 4.1.1 release --- CHANGELOG.md | 5 +++++ optimizely/version.py | 4 ++-- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index ff77ec70..9873cd09 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,11 @@ ### Breaking Changes: * `PollingConfigManager` now requires `sdk_key` even when providing a url. ([#413](https://github.com/optimizely/python-sdk/pull/413)) +## 4.1.1 +March 10th, 2023 + +We updated our README.md and other non-functional code to reflect that this SDK supports both Optimizely Feature Experimentation and Optimizely Full Stack. ([#420](https://github.com/optimizely/python-sdk/pull/420)) + ## 4.1.0 July 7th, 2022 diff --git a/optimizely/version.py b/optimizely/version.py index f3265ea2..1e0f67fc 100644 --- a/optimizely/version.py +++ b/optimizely/version.py @@ -1,4 +1,4 @@ -# Copyright 2016-2020, 2022, Optimizely +# Copyright 2016-2020, 2022-2023, Optimizely # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -11,5 +11,5 @@ # See the License for the specific language governing permissions and # limitations under the License. -version_info = (4, 1, 0) +version_info = (4, 1, 1) __version__ = '.'.join(str(v) for v in version_info) From 7b1c3f120f984b7c397d78a22a5f1907b6d7815c Mon Sep 17 00:00:00 2001 From: Andy Leap <104936100+andrewleap-optimizely@users.noreply.github.com> Date: Mon, 20 Mar 2023 13:20:59 -0400 Subject: [PATCH 40/68] fix: block odp methods on datafile (#419) * add blocking call to odp methods * git ignore mypy cache --- .gitignore | 1 + optimizely/optimizely.py | 15 +++++++++++++++ tests/test_optimizely.py | 17 ++++++----------- 3 files changed, 22 insertions(+), 11 deletions(-) diff --git a/.gitignore b/.gitignore index 961aa6ad..cff402c4 100644 --- a/.gitignore +++ b/.gitignore @@ -2,6 +2,7 @@ MANIFEST .idea/* .*virtualenv/* +.mypy_cache # Output of building package *.egg-info diff --git a/optimizely/optimizely.py b/optimizely/optimizely.py index e7a594f2..95ce2d07 100644 --- a/optimizely/optimizely.py +++ b/optimizely/optimizely.py @@ -1357,6 +1357,11 @@ def _identify_user(self, user_id: str) -> None: self.logger.error(enums.Errors.INVALID_OPTIMIZELY.format('identify_user')) return + config = self.config_manager.get_config() + if not config: + self.logger.error(enums.Errors.INVALID_PROJECT_CONFIG.format('identify_user')) + return + self.odp_manager.identify_user(user_id) def _fetch_qualified_segments(self, user_id: str, options: Optional[list[str]] = None) -> Optional[list[str]]: @@ -1364,6 +1369,11 @@ def _fetch_qualified_segments(self, user_id: str, options: Optional[list[str]] = self.logger.error(enums.Errors.INVALID_OPTIMIZELY.format('fetch_qualified_segments')) return None + config = self.config_manager.get_config() + if not config: + self.logger.error(enums.Errors.INVALID_PROJECT_CONFIG.format('fetch_qualified_segments')) + return None + return self.odp_manager.fetch_qualified_segments(user_id, options or []) def send_odp_event( @@ -1391,6 +1401,11 @@ def send_odp_event( self.logger.error('ODP events must have at least one key-value pair in identifiers.') return + config = self.config_manager.get_config() + if not config: + self.logger.error(enums.Errors.INVALID_PROJECT_CONFIG.format('send_odp_event')) + return + self.odp_manager.send_event(type, action, identifiers, data or {}) def close(self) -> None: diff --git a/tests/test_optimizely.py b/tests/test_optimizely.py index 19529b39..e0907c5c 100644 --- a/tests/test_optimizely.py +++ b/tests/test_optimizely.py @@ -5399,12 +5399,8 @@ def test_send_odp_event__send_event_with_polling_config_manager(self): status_code=200, content=json.dumps(self.config_dict_with_audience_segments) ) - ): + ), mock.patch('requests.post', return_value=self.fake_server_response(status_code=200)): client = optimizely.Optimizely(sdk_key='test', logger=mock_logger) - # wait for config - client.config_manager.get_config() - - with mock.patch('requests.post', return_value=self.fake_server_response(status_code=200)): client.send_odp_event(type='wow', action='great', identifiers={'amazing': 'fantastic'}, data={}) client.close() @@ -5426,9 +5422,12 @@ def test_send_odp_event__log_error_when_odp_disabled(self): def test_send_odp_event__log_debug_if_datafile_not_ready(self): mock_logger = mock.Mock() client = optimizely.Optimizely(sdk_key='test', logger=mock_logger) + client.config_manager.set_blocking_timeout(0) client.send_odp_event(type='wow', action='great', identifiers={'amazing': 'fantastic'}, data={}) - mock_logger.debug.assert_called_with('ODP event queue: cannot send before config has been set.') + mock_logger.error.assert_called_with( + 'Invalid config. Optimizely instance is not valid. Failing "send_odp_event".' + ) client.close() def test_send_odp_event__log_error_if_odp_not_enabled_with_polling_config_manager(self): @@ -5439,16 +5438,12 @@ def test_send_odp_event__log_error_if_odp_not_enabled_with_polling_config_manage status_code=200, content=json.dumps(self.config_dict_with_audience_segments) ) - ): + ), mock.patch('requests.post', return_value=self.fake_server_response(status_code=200)): client = optimizely.Optimizely( sdk_key='test', logger=mock_logger, settings=OptimizelySdkSettings(odp_disabled=True) ) - # wait for config - client.config_manager.get_config() - - with mock.patch('requests.post', return_value=self.fake_server_response(status_code=200)): client.send_odp_event(type='wow', action='great', identifiers={'amazing': 'fantastic'}, data={}) client.close() From a2dba602b4ff31200fca0e48a70daa56fa2a0e2f Mon Sep 17 00:00:00 2001 From: Andy Leap <104936100+andrewleap-optimizely@users.noreply.github.com> Date: Mon, 10 Apr 2023 14:48:36 -0400 Subject: [PATCH 41/68] [FSSDK-9069] fix: odp event validation (#423) * fix odp send event validation * add unit tests * update action missing error --- optimizely/helpers/enums.py | 1 + optimizely/odp/odp_event.py | 17 +++++++++++++- optimizely/optimizely.py | 9 +++++++- tests/test_odp_event_manager.py | 13 +++++++++++ tests/test_optimizely.py | 40 +++++++++++++++++++++++++++++++++ 5 files changed, 78 insertions(+), 2 deletions(-) diff --git a/optimizely/helpers/enums.py b/optimizely/helpers/enums.py index 56fb4946..2588ac39 100644 --- a/optimizely/helpers/enums.py +++ b/optimizely/helpers/enums.py @@ -126,6 +126,7 @@ class Errors: ODP_NOT_INTEGRATED: Final = 'ODP is not integrated.' ODP_NOT_ENABLED: Final = 'ODP is not enabled.' ODP_INVALID_DATA: Final = 'ODP data is not valid.' + ODP_INVALID_ACTION: Final = 'ODP action is not valid (cannot be empty).' MISSING_SDK_KEY: Final = 'SDK key not provided/cannot be found in the datafile.' diff --git a/optimizely/odp/odp_event.py b/optimizely/odp/odp_event.py index fafaa94f..640b0dc3 100644 --- a/optimizely/odp/odp_event.py +++ b/optimizely/odp/odp_event.py @@ -17,6 +17,7 @@ import uuid import json from optimizely import version +from optimizely.helpers.enums import OdpManagerConfig OdpDataDict = Dict[str, Union[str, int, float, bool, None]] @@ -27,7 +28,7 @@ class OdpEvent: def __init__(self, type: str, action: str, identifiers: dict[str, str], data: OdpDataDict) -> None: self.type = type self.action = action - self.identifiers = identifiers + self.identifiers = self._convert_identifers(identifiers) self.data = self._add_common_event_data(data) def __repr__(self) -> str: @@ -51,6 +52,20 @@ def _add_common_event_data(self, custom_data: OdpDataDict) -> OdpDataDict: data.update(custom_data) return data + def _convert_identifers(self, identifiers: dict[str, str]) -> dict[str, str]: + """ + Convert incorrect case/separator of identifier key `fs_user_id` + (ie. `fs-user-id`, `FS_USER_ID`). + """ + for key in list(identifiers): + if key == OdpManagerConfig.KEY_FOR_USER_ID: + break + elif key.lower() in ("fs-user-id", OdpManagerConfig.KEY_FOR_USER_ID): + identifiers[OdpManagerConfig.KEY_FOR_USER_ID] = identifiers.pop(key) + break + + return identifiers + class OdpEventEncoder(json.JSONEncoder): def default(self, obj: object) -> Any: diff --git a/optimizely/optimizely.py b/optimizely/optimizely.py index 95ce2d07..7904f551 100644 --- a/optimizely/optimizely.py +++ b/optimizely/optimizely.py @@ -1387,7 +1387,7 @@ def send_odp_event( Send an event to the ODP server. Args: - action: The event action name. + action: The event action name. Cannot be None or empty string. identifiers: A dictionary for identifiers. The caller must provide at least one key-value pair. type: The event type. Default 'fullstack'. data: An optional dictionary for associated data. The default event data will be added to this data @@ -1397,10 +1397,17 @@ def send_odp_event( self.logger.error(enums.Errors.INVALID_OPTIMIZELY.format('send_odp_event')) return + if action is None or action == "": + self.logger.error(enums.Errors.ODP_INVALID_ACTION) + return + if not identifiers or not isinstance(identifiers, dict): self.logger.error('ODP events must have at least one key-value pair in identifiers.') return + if type is None or type == "": + type = enums.OdpManagerConfig.EVENT_TYPE + config = self.config_manager.get_config() if not config: self.logger.error(enums.Errors.INVALID_PROJECT_CONFIG.format('send_odp_event')) diff --git a/tests/test_odp_event_manager.py b/tests/test_odp_event_manager.py index 0642f393..d9d29eab 100644 --- a/tests/test_odp_event_manager.py +++ b/tests/test_odp_event_manager.py @@ -98,6 +98,19 @@ def test_invalid_odp_event(self, *args): event['data']['invalid-item'] = {} self.assertStrictFalse(validator.are_odp_data_types_valid(event['data'])) + def test_odp_event_identifier_conversion(self, *args): + event = OdpEvent('type', 'action', {'fs-user-id': 'great'}, {}) + self.assertDictEqual(event.identifiers, {'fs_user_id': 'great'}) + + event = OdpEvent('type', 'action', {'FS-user-ID': 'great'}, {}) + self.assertDictEqual(event.identifiers, {'fs_user_id': 'great'}) + + event = OdpEvent('type', 'action', {'FS_USER_ID': 'great', 'fs.user.id': 'wow'}, {}) + self.assertDictEqual(event.identifiers, {'fs_user_id': 'great', 'fs.user.id': 'wow'}) + + event = OdpEvent('type', 'action', {'fs_user_id': 'great', 'fsuserid': 'wow'}, {}) + self.assertDictEqual(event.identifiers, {'fs_user_id': 'great', 'fsuserid': 'wow'}) + def test_odp_event_manager_success(self, *args): mock_logger = mock.Mock() event_manager = OdpEventManager(mock_logger) diff --git a/tests/test_optimizely.py b/tests/test_optimizely.py index e0907c5c..f1d1db89 100644 --- a/tests/test_optimizely.py +++ b/tests/test_optimizely.py @@ -5483,3 +5483,43 @@ def test_send_odp_event__log_error_with_missing_integrations_data(self): mock_logger.error.assert_called_with('ODP is not integrated.') client.close() + + def test_send_odp_event__log_error_with_action_none(self): + mock_logger = mock.Mock() + client = optimizely.Optimizely(json.dumps(self.config_dict_with_audience_segments), logger=mock_logger) + + client.send_odp_event(type='wow', action=None, identifiers={'amazing': 'fantastic'}, data={}) + client.close() + + mock_logger.error.assert_called_once_with('ODP action is not valid (cannot be empty).') + + def test_send_odp_event__log_error_with_action_empty_string(self): + mock_logger = mock.Mock() + client = optimizely.Optimizely(json.dumps(self.config_dict_with_audience_segments), logger=mock_logger) + + client.send_odp_event(type='wow', action="", identifiers={'amazing': 'fantastic'}, data={}) + client.close() + + mock_logger.error.assert_called_once_with('ODP action is not valid (cannot be empty).') + + def test_send_odp_event__default_type_when_none(self): + mock_logger = mock.Mock() + + client = optimizely.Optimizely(json.dumps(self.config_dict_with_audience_segments), logger=mock_logger) + with mock.patch.object(client.odp_manager, 'send_event') as mock_send_event: + client.send_odp_event(type=None, action="great", identifiers={'amazing': 'fantastic'}, data={}) + client.close() + + mock_send_event.assert_called_with('fullstack', 'great', {'amazing': 'fantastic'}, {}) + mock_logger.error.assert_not_called() + + def test_send_odp_event__default_type_when_empty_string(self): + mock_logger = mock.Mock() + + client = optimizely.Optimizely(json.dumps(self.config_dict_with_audience_segments), logger=mock_logger) + with mock.patch.object(client.odp_manager, 'send_event') as mock_send_event: + client.send_odp_event(type="", action="great", identifiers={'amazing': 'fantastic'}, data={}) + client.close() + + mock_send_event.assert_called_with('fullstack', 'great', {'amazing': 'fantastic'}, {}) + mock_logger.error.assert_not_called() From 48347c541e7e28f88f358661302871cce71f0351 Mon Sep 17 00:00:00 2001 From: Andy Leap <104936100+andrewleap-optimizely@users.noreply.github.com> Date: Tue, 25 Apr 2023 13:13:10 -0400 Subject: [PATCH 42/68] fix invalid identifiers error code (#424) --- optimizely/helpers/enums.py | 1 - optimizely/odp/odp_segment_api_manager.py | 12 +++++++----- tests/test_odp_segment_api_manager.py | 3 ++- 3 files changed, 9 insertions(+), 7 deletions(-) diff --git a/optimizely/helpers/enums.py b/optimizely/helpers/enums.py index 2588ac39..1c7a8e1c 100644 --- a/optimizely/helpers/enums.py +++ b/optimizely/helpers/enums.py @@ -120,7 +120,6 @@ class Errors: NONE_VARIABLE_KEY_PARAMETER: Final = '"None" is an invalid value for variable key.' UNSUPPORTED_DATAFILE_VERSION: Final = ( 'This version of the Python SDK does not support the given datafile version: "{}".') - INVALID_SEGMENT_IDENTIFIER: Final = 'Audience segments fetch failed (invalid identifier).' FETCH_SEGMENTS_FAILED: Final = 'Audience segments fetch failed ({}).' ODP_EVENT_FAILED: Final = 'ODP event send failed ({}).' ODP_NOT_INTEGRATED: Final = 'ODP is not integrated.' diff --git a/optimizely/odp/odp_segment_api_manager.py b/optimizely/odp/odp_segment_api_manager.py index d422bfad..8e5d8bc5 100644 --- a/optimizely/odp/odp_segment_api_manager.py +++ b/optimizely/odp/odp_segment_api_manager.py @@ -172,13 +172,15 @@ def fetch_segments(self, api_key: str, api_host: str, user_key: str, if response_dict and 'errors' in response_dict: try: - error_class = response_dict['errors'][0]['extensions']['classification'] - except (KeyError, IndexError): + extensions = response_dict['errors'][0]['extensions'] + error_class = extensions['classification'] + error_code = extensions.get('code') + except (KeyError, IndexError, TypeError): self.logger.error(Errors.FETCH_SEGMENTS_FAILED.format('decode error')) return None - if error_class == 'InvalidIdentifierException': - self.logger.warning(Errors.INVALID_SEGMENT_IDENTIFIER) + if error_code == 'INVALID_IDENTIFIER_EXCEPTION': + self.logger.warning(Errors.FETCH_SEGMENTS_FAILED.format('invalid identifier')) return None else: self.logger.error(Errors.FETCH_SEGMENTS_FAILED.format(error_class)) @@ -188,6 +190,6 @@ def fetch_segments(self, api_key: str, api_host: str, user_key: str, audiences = response_dict['data']['customer']['audiences']['edges'] segments = [edge['node']['name'] for edge in audiences if edge['node']['state'] == 'qualified'] return segments - except KeyError: + except (KeyError, TypeError): self.logger.error(Errors.FETCH_SEGMENTS_FAILED.format('decode error')) return None diff --git a/tests/test_odp_segment_api_manager.py b/tests/test_odp_segment_api_manager.py index 47913973..f45af4d2 100644 --- a/tests/test_odp_segment_api_manager.py +++ b/tests/test_odp_segment_api_manager.py @@ -344,7 +344,8 @@ def test_fetch_qualified_segments__500(self): "customer" ], "extensions": { - "classification": "InvalidIdentifierException" + "classification": "DataFetchingException", + "code": "INVALID_IDENTIFIER_EXCEPTION" } } ], From 6bc3454bcf4713d4fc4607ac79746537567c1d1a Mon Sep 17 00:00:00 2001 From: Matjaz Pirnovar Date: Thu, 27 Apr 2023 15:12:27 -0700 Subject: [PATCH 43/68] [FSSDK-9107] Update changelog and version for advanced audience targeting (#425) * Update changelog and version for advanced audience targeting * PR fixes * PR fixe for version * update date --- CHANGELOG.md | 39 ++++++++++++++++++++++++++++++++++++--- optimizely/version.py | 2 +- 2 files changed, 37 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9873cd09..6ed00ab5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,9 +1,42 @@ # Optimizely Python SDK Changelog -## Unreleased +## 5.0.0-beta +Apr 28th, 2023 -### Breaking Changes: -* `PollingConfigManager` now requires `sdk_key` even when providing a url. ([#413](https://github.com/optimizely/python-sdk/pull/413)) +### New Features + +The 5.0.0-beta release introduces a new primary feature, [Advanced Audience Targeting]( https://docs.developers.optimizely.com/feature-experimentation/docs/optimizely-data-platform-advanced-audience-targeting) enabled through integration with [Optimizely Data Platform (ODP)](https://docs.developers.optimizely.com/optimizely-data-platform/docs) ([#395](https://github.com/optimizely/python-sdk/pull/395), [#398](https://github.com/optimizely/python-sdk/pull/398), [#402](https://github.com/optimizely/python-sdk/pull/402), [#403](https://github.com/optimizely/python-sdk/pull/403), [#405](https://github.com/optimizely/python-sdk/pull/405)). + +You can use ODP, a high-performance [Customer Data Platform (CDP)]( https://www.optimizely.com/optimization-glossary/customer-data-platform/), to easily create complex real-time segments (RTS) using first-party and 50+ third-party data sources out of the box. You can create custom schemas that support the user attributes important for your business, and stitch together user behavior done on different devices to better understand and target your customers for personalized user experiences. ODP can be used as a single source of truth for these segments in any Optimizely or 3rd party tool. + +With ODP accounts integrated into Optimizely projects, you can build audiences using segments pre-defined in ODP. The SDK will fetch the segments for given users and make decisions using the segments. For access to ODP audience targeting in your Feature Experimentation account, please contact your Optimizely Customer Success Manager. + +This version includes the following changes: + +* New API added to `OptimizelyUserContext`: + + * `fetchQualifiedSegments()`: this API will retrieve user segments from the ODP server. The fetched segments will be used for audience evaluation. The fetched data will be stored in the local cache to avoid repeated network delays. + * When an `OptimizelyUserContext` is created, the SDK will automatically send an identify request to the ODP server to facilitate observing user activities. + +* New APIs added to `OptimizelyClient`: + + * `sendOdpEvent()`: customers can build/send arbitrary ODP events that will bind user identifiers and data to user profiles in ODP. + +For details, refer to our documentation pages: + +* [Advanced Audience Targeting](https://docs.developers.optimizely.com/feature-experimentation/docs/optimizely-data-platform-advanced-audience-targeting) +* [Server SDK Support](https://docs.developers.optimizely.com/feature-experimentation/v1.0/docs/advanced-audience-targeting-for-server-side-sdks) +* [Initialize Python SDK](https://docs.developers.optimizely.com/feature-experimentation/docs/initialize-sdk-python) +* [OptimizelyUserContext Python SDK](https://docs.developers.optimizely.com/feature-experimentation/docs/wip-fsodp-optimizelyusercontext-python) +* [Advanced Audience Targeting segment qualification methods](https://docs.developers.optimizely.com/feature-experimentation/v1.0/docs/advanced-audience-targeting-segment-qualification-methods-python) +* [Send Optimizely Data Platform data using Advanced Audience Targeting](https://docs.developers.optimizely.com/feature-experimentation/v1.0/docs/send-odp-data-using-advanced-audience-targeting-python) + +### Breaking Changes + +* `ODPManager` in the SDK is enabled by default. Unless an ODP account is integrated into the Optimizely projects, most `ODPManager` functions will be ignored. If needed, `ODPManager` can be disabled when `OptimizelyClient` is instantiated. +* `BaseConfigManager` abstract class now requires a get_sdk_key method. ([#413](https://github.com/optimizely/python-sdk/pull/413)) +* `PollingConfigManager` requires either the sdk_key parameter or datafile containing an sdkKey. ([#413](https://github.com/optimizely/python-sdk/pull/413)) +* Asynchronous `BatchEventProcessor` is now the default event processor. ([#378](https://github.com/optimizely/python-sdk/pull/378)) ## 4.1.1 March 10th, 2023 diff --git a/optimizely/version.py b/optimizely/version.py index 1e0f67fc..44b3134d 100644 --- a/optimizely/version.py +++ b/optimizely/version.py @@ -11,5 +11,5 @@ # See the License for the specific language governing permissions and # limitations under the License. -version_info = (4, 1, 1) +version_info = (5, 0, '0-beta') __version__ = '.'.join(str(v) for v in version_info) From 7e158748706ebf1299f90de3526b5de7dcdfa61c Mon Sep 17 00:00:00 2001 From: Yasir Folio3 <39988750+yasirfolio3@users.noreply.github.com> Date: Mon, 26 Jun 2023 19:11:59 -0400 Subject: [PATCH 44/68] [FSSDK-9098]: Updates minimum python version for CI Tests to 3.8 (#426) * updating minimum python version for CI * Adding pypy3.10 --- .github/workflows/python.yml | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/workflows/python.yml b/.github/workflows/python.yml index 7cf83362..cadcc77c 100644 --- a/.github/workflows/python.yml +++ b/.github/workflows/python.yml @@ -29,10 +29,10 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 - - name: Set up Python 3.10 - uses: actions/setup-python@v3 + - name: Set up Python 3.11 + uses: actions/setup-python@v4 with: - python-version: '3.10' + python-version: '3.11' # flake8 version should be same as the version in requirements/test.txt # to avoid lint errors on CI - name: pip install flak8 @@ -64,11 +64,11 @@ jobs: strategy: fail-fast: false matrix: - python-version: ["pypy-3.7-v7.3.5", "3.7", "3.8", "3.9", "3.10"] + python-version: ["pypy-3.10-v7.3.12", "3.8", "3.9", "3.10", "3.11"] steps: - uses: actions/checkout@v3 - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v3 + uses: actions/setup-python@v4 with: python-version: ${{ matrix.python-version }} - name: Install dependencies @@ -84,11 +84,11 @@ jobs: strategy: fail-fast: false matrix: - python-version: ["3.7", "3.8", "3.9", "3.10"] + python-version: ["3.8", "3.9", "3.10", "3.11"] steps: - uses: actions/checkout@v3 - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v3 + uses: actions/setup-python@v4 with: python-version: ${{ matrix.python-version }} - name: Install dependencies From c44f7121343fb427c8fe8d7f7353d813d4313627 Mon Sep 17 00:00:00 2001 From: Yasir Folio3 <39988750+yasirfolio3@users.noreply.github.com> Date: Wed, 28 Jun 2023 17:15:15 -0400 Subject: [PATCH 45/68] [FSSDK-9100]: Updating old dependencies. (#427) * Updating dependencies. --- .github/workflows/python.yml | 2 +- requirements/docs.txt | 6 +++--- requirements/typing.txt | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/python.yml b/.github/workflows/python.yml index cadcc77c..27f15835 100644 --- a/.github/workflows/python.yml +++ b/.github/workflows/python.yml @@ -97,5 +97,5 @@ jobs: pip install -r requirements/typing.txt - name: Type check with mypy run: | - mypy . + mypy . --exclude "tests/testapp" mypy . --exclude "tests/" --strict diff --git a/requirements/docs.txt b/requirements/docs.txt index 51d4bf0e..91542e7a 100644 --- a/requirements/docs.txt +++ b/requirements/docs.txt @@ -1,3 +1,3 @@ -sphinx==2.4.4 -sphinx-rtd-theme==0.4.3 -m2r==0.2.1 +sphinx==4.4.0 +sphinx-rtd-theme==1.2.2 +m2r==0.3.1 diff --git a/requirements/typing.txt b/requirements/typing.txt index 67aac34a..ba65f536 100644 --- a/requirements/typing.txt +++ b/requirements/typing.txt @@ -1,4 +1,4 @@ -mypy==0.982 +mypy types-jsonschema types-requests types-Flask \ No newline at end of file From bf000e737f391270f9adec97606646ce4761ecd8 Mon Sep 17 00:00:00 2001 From: Yasir Folio3 <39988750+yasirfolio3@users.noreply.github.com> Date: Tue, 18 Jul 2023 13:24:17 -0400 Subject: [PATCH 46/68] [FSSDK-9510]: Implements a warning log for polling interval below 30s (#428) * Implements a warning log for polling interval below 30s * cleanup. --- optimizely/config_manager.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/optimizely/config_manager.py b/optimizely/config_manager.py index 247f5ce5..0e4008b7 100644 --- a/optimizely/config_manager.py +++ b/optimizely/config_manager.py @@ -323,6 +323,11 @@ def set_update_interval(self, update_interval: Optional[int | float]) -> None: ) update_interval = enums.ConfigManager.DEFAULT_UPDATE_INTERVAL + if update_interval < 30: + self.logger.warning( + 'Polling intervals below 30 seconds are not recommended.' + ) + self.update_interval = update_interval def set_blocking_timeout(self, blocking_timeout: Optional[int | float]) -> None: From d2ed4be3469da41f92a0213deaf899e5db0d06ed Mon Sep 17 00:00:00 2001 From: Andy Leap <104936100+andrewleap-optimizely@users.noreply.github.com> Date: Wed, 6 Dec 2023 14:05:25 -0500 Subject: [PATCH 47/68] [FSSDK-8320] fix type hints (#429) * Create py.typed --- optimizely/event/event_processor.py | 2 +- optimizely/optimizely_config.py | 2 +- optimizely/py.typed | 1 + 3 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 optimizely/py.typed diff --git a/optimizely/event/event_processor.py b/optimizely/event/event_processor.py index 0341c1e4..9445ffc6 100644 --- a/optimizely/event/event_processor.py +++ b/optimizely/event/event_processor.py @@ -351,7 +351,7 @@ class ForwardingEventProcessor(BaseEventProcessor): def __init__( self, - event_dispatcher: type[EventDispatcher] | CustomEventDispatcher, + event_dispatcher: Optional[type[EventDispatcher] | CustomEventDispatcher], logger: Optional[_logging.Logger] = None, notification_center: Optional[_notification_center.NotificationCenter] = None ): diff --git a/optimizely/optimizely_config.py b/optimizely/optimizely_config.py index c4f55d86..37969fb4 100644 --- a/optimizely/optimizely_config.py +++ b/optimizely/optimizely_config.py @@ -243,7 +243,7 @@ def stringify_conditions(self, conditions: str | list[Any], audiences_map: dict[ operand = conditions[i].upper() else: # Check if element is a list or not - if type(conditions[i]) == list: + if isinstance(conditions[i], list): # Check if at the end or not to determine where to add the operand # Recursive call to call stringify on embedded list if i + 1 < length: diff --git a/optimizely/py.typed b/optimizely/py.typed new file mode 100644 index 00000000..8b137891 --- /dev/null +++ b/optimizely/py.typed @@ -0,0 +1 @@ + From f77898993e1c31a8d4bab9b0a49ecd5214a91202 Mon Sep 17 00:00:00 2001 From: Matjaz Pirnovar Date: Wed, 6 Dec 2023 14:36:32 -0800 Subject: [PATCH 48/68] [FSSDK-9780] Return Latest Experiment When Duplicate Keys in Config (#430) * firt run to add guard againsts duplicate key * cleanup * fix logger * cleanup comments * linting * fix logger --- optimizely/config_manager.py | 2 +- optimizely/optimizely.py | 2 +- optimizely/optimizely_config.py | 10 +++- tests/test_optimizely_config.py | 91 ++++++++++++++++++++++++++++++--- 4 files changed, 95 insertions(+), 10 deletions(-) diff --git a/optimizely/config_manager.py b/optimizely/config_manager.py index 0e4008b7..032189e9 100644 --- a/optimizely/config_manager.py +++ b/optimizely/config_manager.py @@ -159,7 +159,7 @@ def _set_config(self, datafile: Optional[str | bytes]) -> None: self._config = config self._sdk_key = self._sdk_key or config.sdk_key - self.optimizely_config = OptimizelyConfigService(config).get_config() + self.optimizely_config = OptimizelyConfigService(config, self.logger).get_config() self.notification_center.send_notifications(enums.NotificationTypes.OPTIMIZELY_CONFIG_UPDATE) internal_notification_center = _NotificationCenterRegistry.get_notification_center( diff --git a/optimizely/optimizely.py b/optimizely/optimizely.py index 7904f551..c50bfcb3 100644 --- a/optimizely/optimizely.py +++ b/optimizely/optimizely.py @@ -1039,7 +1039,7 @@ def get_optimizely_config(self) -> Optional[OptimizelyConfig]: if hasattr(self.config_manager, 'optimizely_config'): return self.config_manager.optimizely_config - return OptimizelyConfigService(project_config).get_config() + return OptimizelyConfigService(project_config, self.logger).get_config() def create_user_context( self, user_id: str, attributes: Optional[UserAttributes] = None diff --git a/optimizely/optimizely_config.py b/optimizely/optimizely_config.py index 37969fb4..cf443896 100644 --- a/optimizely/optimizely_config.py +++ b/optimizely/optimizely_config.py @@ -19,6 +19,8 @@ from .helpers.types import VariationDict, ExperimentDict, RolloutDict, AttributeDict, EventDict from .project_config import ProjectConfig +from .logger import Logger + class OptimizelyConfig: def __init__( @@ -126,11 +128,12 @@ def __init__(self, id: Optional[str], name: Optional[str], conditions: Optional[ class OptimizelyConfigService: """ Class encapsulating methods to be used in creating instance of OptimizelyConfig. """ - def __init__(self, project_config: ProjectConfig): + def __init__(self, project_config: ProjectConfig, logger: Logger): """ Args: project_config ProjectConfig """ + self.logger = logger self.is_valid = True if not isinstance(project_config, ProjectConfig): @@ -411,7 +414,12 @@ def _get_experiments_maps(self) -> tuple[dict[str, OptimizelyExperiment], dict[s audiences_map[audience_id] = audience_name if audience_name is not None else '' all_experiments = self._get_all_experiments() + for exp in all_experiments: + # check if experiment key already exists + if exp["key"] in experiments_key_map: + self.logger.warning(f"Duplicate experiment keys found in datafile: {exp['key']}") + optly_exp = OptimizelyExperiment( exp['id'], exp['key'], self._get_variations_map(exp) ) diff --git a/tests/test_optimizely_config.py b/tests/test_optimizely_config.py index e33c1272..b6b60adf 100644 --- a/tests/test_optimizely_config.py +++ b/tests/test_optimizely_config.py @@ -4,7 +4,6 @@ # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 - # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -12,9 +11,11 @@ # limitations under the License. import json +from unittest.mock import patch from optimizely import optimizely, project_config from optimizely import optimizely_config +from optimizely import logger from . import base @@ -23,7 +24,8 @@ def setUp(self): base.BaseTest.setUp(self) opt_instance = optimizely.Optimizely(json.dumps(self.config_dict_with_features)) self.project_config = opt_instance.config_manager.get_config() - self.opt_config_service = optimizely_config.OptimizelyConfigService(self.project_config) + self.opt_config_service = optimizely_config.OptimizelyConfigService(self.project_config, + logger=logger.SimpleLogger()) self.expected_config = { 'sdk_key': 'features-test', @@ -1452,7 +1454,7 @@ def test__get_config(self): def test__get_config__invalid_project_config(self): """ Test that get_config returns None when invalid project config supplied. """ - opt_service = optimizely_config.OptimizelyConfigService({"key": "invalid"}) + opt_service = optimizely_config.OptimizelyConfigService({"key": "invalid"}, None) self.assertIsNone(opt_service.get_config()) def test__get_experiments_maps(self): @@ -1473,6 +1475,81 @@ def test__get_experiments_maps(self): self.assertEqual(expected_id_map, self.to_dict(actual_id_map)) + def test__duplicate_experiment_keys(self): + """ Test that multiple features don't have the same experiment key. """ + + # update the test datafile with an additional feature flag with the same experiment rule key + new_experiment = { + 'key': 'test_experiment', # added duplicate "test_experiment" + 'status': 'Running', + 'layerId': '8', + "audienceConditions": [ + "or", + "11160" + ], + 'audienceIds': ['11160'], + 'id': '111137', + 'forcedVariations': {}, + 'trafficAllocation': [ + {'entityId': '222242', 'endOfRange': 8000}, + {'entityId': '', 'endOfRange': 10000} + ], + 'variations': [ + { + 'id': '222242', + 'key': 'control', + 'variables': [], + } + ], + } + + new_feature = { + 'id': '91117', + 'key': 'new_feature', + 'experimentIds': ['111137'], + 'rolloutId': '', + 'variables': [ + {'id': '127', 'key': 'is_working', 'defaultValue': 'true', 'type': 'boolean'}, + {'id': '128', 'key': 'environment', 'defaultValue': 'devel', 'type': 'string'}, + {'id': '129', 'key': 'cost', 'defaultValue': '10.99', 'type': 'double'}, + {'id': '130', 'key': 'count', 'defaultValue': '999', 'type': 'integer'}, + {'id': '131', 'key': 'variable_without_usage', 'defaultValue': '45', 'type': 'integer'}, + {'id': '132', 'key': 'object', 'defaultValue': '{"test": 12}', 'type': 'string', + 'subType': 'json'}, + {'id': '133', 'key': 'true_object', 'defaultValue': '{"true_test": 23.54}', 'type': 'json'}, + ], + } + + # add new experiment rule with the same key and a new feature with the same rule key + self.config_dict_with_features['experiments'].append(new_experiment) + self.config_dict_with_features['featureFlags'].append(new_feature) + + config_with_duplicate_key = self.config_dict_with_features + opt_instance = optimizely.Optimizely(json.dumps(config_with_duplicate_key)) + self.project_config = opt_instance.config_manager.get_config() + + with patch('optimizely.logger.SimpleLogger.warning') as mock_logger: + self.opt_config_service = optimizely_config.OptimizelyConfigService(self.project_config, + logger=logger.SimpleLogger()) + + actual_key_map, actual_id_map = self.opt_config_service._get_experiments_maps() + + self.assertIsInstance(actual_key_map, dict) + for exp in actual_key_map.values(): + self.assertIsInstance(exp, optimizely_config.OptimizelyExperiment) + + # Assert that the warning method of the mock logger was called with the expected message + expected_warning_message = f"Duplicate experiment keys found in datafile: {new_experiment['key']}" + mock_logger.assert_called_with(expected_warning_message) + + # assert we get ID of the duplicated experiment + assert actual_key_map.get('test_experiment').id == "111137" + + # assert we get one duplicated experiment + keys_list = list(actual_key_map.keys()) + assert "test_experiment" in keys_list, "Key 'test_experiment' not found in actual key map" + assert keys_list.count("test_experiment") == 1, "Key 'test_experiment' found more than once in actual key map" + def test__get_features_map(self): """ Test that get_features_map returns expected features map. """ @@ -1674,7 +1751,7 @@ def test_get_audiences(self): error_handler=None ) - config_service = optimizely_config.OptimizelyConfigService(proj_conf) + config_service = optimizely_config.OptimizelyConfigService(proj_conf, logger=logger.SimpleLogger()) for audience in config_service.audiences: self.assertIsInstance(audience, optimizely_config.OptimizelyAudience) @@ -1742,7 +1819,7 @@ def test_stringify_audience_conditions_all_cases(self): '("us" OR ("female" AND "adult")) AND ("fr" AND ("male" OR "adult"))' ] - config_service = optimizely_config.OptimizelyConfigService(config) + config_service = optimizely_config.OptimizelyConfigService(config, None) for i in range(len(audiences_input)): result = config_service.stringify_conditions(audiences_input[i], audiences_map) @@ -1760,7 +1837,7 @@ def test_optimizely_audience_conversion(self): error_handler=None ) - config_service = optimizely_config.OptimizelyConfigService(proj_conf) + config_service = optimizely_config.OptimizelyConfigService(proj_conf, None) for audience in config_service.audiences: self.assertIsInstance(audience, optimizely_config.OptimizelyAudience) @@ -1776,7 +1853,7 @@ def test_get_variations_from_experiments_map(self): error_handler=None ) - config_service = optimizely_config.OptimizelyConfigService(proj_conf) + config_service = optimizely_config.OptimizelyConfigService(proj_conf, None) experiments_key_map, experiments_id_map = config_service._get_experiments_maps() From 3bad4a605dda13c0f172d19e1292c9cd942e25bd Mon Sep 17 00:00:00 2001 From: Andy Leap <104936100+andrewleap-optimizely@users.noreply.github.com> Date: Thu, 18 Jan 2024 14:37:00 -0500 Subject: [PATCH 49/68] [FSSDK-8583] chore: prepare for 5.0 (#431) * bump version * update supported version * run unit tests on 3.12, pypy 3.8 and 3.9 * fix license --- .github/workflows/python.yml | 24 +++++-- CHANGELOG.md | 80 ++++++++++++++++++----- LICENSE | 2 +- README.md | 2 + optimizely/odp/odp_segment_api_manager.py | 3 +- optimizely/version.py | 2 +- setup.py | 3 +- 7 files changed, 90 insertions(+), 26 deletions(-) diff --git a/.github/workflows/python.yml b/.github/workflows/python.yml index 27f15835..0699f84c 100644 --- a/.github/workflows/python.yml +++ b/.github/workflows/python.yml @@ -29,10 +29,10 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 - - name: Set up Python 3.11 + - name: Set up Python 3.12 uses: actions/setup-python@v4 with: - python-version: '3.11' + python-version: '3.12' # flake8 version should be same as the version in requirements/test.txt # to avoid lint errors on CI - name: pip install flak8 @@ -64,7 +64,15 @@ jobs: strategy: fail-fast: false matrix: - python-version: ["pypy-3.10-v7.3.12", "3.8", "3.9", "3.10", "3.11"] + python-version: + - "pypy-3.8" + - "pypy-3.9" + - "pypy-3.10" + - "3.8" + - "3.9" + - "3.10" + - "3.11" + - "3.12" steps: - uses: actions/checkout@v3 - name: Set up Python ${{ matrix.python-version }} @@ -84,7 +92,15 @@ jobs: strategy: fail-fast: false matrix: - python-version: ["3.8", "3.9", "3.10", "3.11"] + python-version: + - "pypy-3.8" + - "pypy-3.9" + - "pypy-3.10" + - "3.8" + - "3.9" + - "3.10" + - "3.11" + - "3.12" steps: - uses: actions/checkout@v3 - name: Set up Python ${{ matrix.python-version }} diff --git a/CHANGELOG.md b/CHANGELOG.md index 6ed00ab5..94e3bbd3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,39 +1,85 @@ # Optimizely Python SDK Changelog +## 5.0.0 +January 18th, 2024 + +### New Features + +The 5.0.0 release introduces a new primary feature, [Advanced Audience Targeting]( https://docs.developers.optimizely.com/feature-experimentation/docs/optimizely-data-platform-advanced-audience-targeting) enabled through integration with [Optimizely Data Platform (ODP)](https://docs.developers.optimizely.com/optimizely-data-platform/docs) ([#395](https://github.com/optimizely/python-sdk/pull/395), [#398](https://github.com/optimizely/python-sdk/pull/398), [#402](https://github.com/optimizely/python-sdk/pull/402), [#403](https://github.com/optimizely/python-sdk/pull/403), [#405](https://github.com/optimizely/python-sdk/pull/405)). + +You can use ODP, a high-performance [Customer Data Platform (CDP)]( https://www.optimizely.com/optimization-glossary/customer-data-platform/), to easily create complex real-time segments (RTS) using first-party and 50+ third-party data sources out of the box. You can create custom schemas that support the user attributes important for your business, and stitch together user behavior done on different devices to better understand and target your customers for personalized user experiences. ODP can be used as a single source of truth for these segments in any Optimizely or 3rd party tool. + +With ODP accounts integrated into Optimizely projects, you can build audiences using segments pre-defined in ODP. The SDK will fetch the segments for given users and make decisions using the segments. For access to ODP audience targeting in your Feature Experimentation account, please contact your Optimizely Customer Success Manager. + +This version includes the following changes: + +* New API added to `OptimizelyUserContext`: + + * `fetchQualifiedSegments()`: this API will retrieve user segments from the ODP server. The fetched segments will be used for audience evaluation. The fetched data will be stored in the local cache to avoid repeated network delays. + * When an `OptimizelyUserContext` is created, the SDK will automatically send an identify request to the ODP server to facilitate observing user activities. + +* New APIs added to `OptimizelyClient`: + + * `sendOdpEvent()`: customers can build/send arbitrary ODP events that will bind user identifiers and data to user profiles in ODP. + +For details, refer to our documentation pages: + +* [Advanced Audience Targeting](https://docs.developers.optimizely.com/feature-experimentation/docs/optimizely-data-platform-advanced-audience-targeting) +* [Server SDK Support](https://docs.developers.optimizely.com/feature-experimentation/v1.0/docs/advanced-audience-targeting-for-server-side-sdks) +* [Initialize Python SDK](https://docs.developers.optimizely.com/feature-experimentation/docs/initialize-sdk-python) +* [OptimizelyUserContext Python SDK](https://docs.developers.optimizely.com/feature-experimentation/docs/wip-fsodp-optimizelyusercontext-python) +* [Advanced Audience Targeting segment qualification methods](https://docs.developers.optimizely.com/feature-experimentation/v1.0/docs/advanced-audience-targeting-segment-qualification-methods-python) +* [Send Optimizely Data Platform data using Advanced Audience Targeting](https://docs.developers.optimizely.com/feature-experimentation/v1.0/docs/send-odp-data-using-advanced-audience-targeting-python) + +### Logging + +* Add warning to polling intervals below 30 seconds ([#428](https://github.com/optimizely/python-sdk/pull/428)) +* Add warning to duplicate experiment keys ([#430](https://github.com/optimizely/python-sdk/pull/430)) + +### Enhancements +* Added `py.typed` to enable external usage of mypy type annotations. + +### Breaking Changes +* Updated minimum supported Python version from 3.7 -> 3.8 +* `ODPManager` in the SDK is enabled by default. Unless an ODP account is integrated into the Optimizely projects, most `ODPManager` functions will be ignored. If needed, `ODPManager` can be disabled when `OptimizelyClient` is instantiated. +* `BaseConfigManager` abstract class now requires a get_sdk_key method. ([#413](https://github.com/optimizely/python-sdk/pull/413)) +* `PollingConfigManager` requires either the sdk_key parameter or datafile containing an sdkKey. ([#413](https://github.com/optimizely/python-sdk/pull/413)) +* Asynchronous `BatchEventProcessor` is now the default event processor. ([#378](https://github.com/optimizely/python-sdk/pull/378)) + ## 5.0.0-beta Apr 28th, 2023 -### New Features +### New Features -The 5.0.0-beta release introduces a new primary feature, [Advanced Audience Targeting]( https://docs.developers.optimizely.com/feature-experimentation/docs/optimizely-data-platform-advanced-audience-targeting) enabled through integration with [Optimizely Data Platform (ODP)](https://docs.developers.optimizely.com/optimizely-data-platform/docs) ([#395](https://github.com/optimizely/python-sdk/pull/395), [#398](https://github.com/optimizely/python-sdk/pull/398), [#402](https://github.com/optimizely/python-sdk/pull/402), [#403](https://github.com/optimizely/python-sdk/pull/403), [#405](https://github.com/optimizely/python-sdk/pull/405)). +The 5.0.0-beta release introduces a new primary feature, [Advanced Audience Targeting]( https://docs.developers.optimizely.com/feature-experimentation/docs/optimizely-data-platform-advanced-audience-targeting) enabled through integration with [Optimizely Data Platform (ODP)](https://docs.developers.optimizely.com/optimizely-data-platform/docs) ([#395](https://github.com/optimizely/python-sdk/pull/395), [#398](https://github.com/optimizely/python-sdk/pull/398), [#402](https://github.com/optimizely/python-sdk/pull/402), [#403](https://github.com/optimizely/python-sdk/pull/403), [#405](https://github.com/optimizely/python-sdk/pull/405)). -You can use ODP, a high-performance [Customer Data Platform (CDP)]( https://www.optimizely.com/optimization-glossary/customer-data-platform/), to easily create complex real-time segments (RTS) using first-party and 50+ third-party data sources out of the box. You can create custom schemas that support the user attributes important for your business, and stitch together user behavior done on different devices to better understand and target your customers for personalized user experiences. ODP can be used as a single source of truth for these segments in any Optimizely or 3rd party tool. +You can use ODP, a high-performance [Customer Data Platform (CDP)]( https://www.optimizely.com/optimization-glossary/customer-data-platform/), to easily create complex real-time segments (RTS) using first-party and 50+ third-party data sources out of the box. You can create custom schemas that support the user attributes important for your business, and stitch together user behavior done on different devices to better understand and target your customers for personalized user experiences. ODP can be used as a single source of truth for these segments in any Optimizely or 3rd party tool. -With ODP accounts integrated into Optimizely projects, you can build audiences using segments pre-defined in ODP. The SDK will fetch the segments for given users and make decisions using the segments. For access to ODP audience targeting in your Feature Experimentation account, please contact your Optimizely Customer Success Manager. +With ODP accounts integrated into Optimizely projects, you can build audiences using segments pre-defined in ODP. The SDK will fetch the segments for given users and make decisions using the segments. For access to ODP audience targeting in your Feature Experimentation account, please contact your Optimizely Customer Success Manager. -This version includes the following changes: +This version includes the following changes: -* New API added to `OptimizelyUserContext`: +* New API added to `OptimizelyUserContext`: * `fetchQualifiedSegments()`: this API will retrieve user segments from the ODP server. The fetched segments will be used for audience evaluation. The fetched data will be stored in the local cache to avoid repeated network delays. - * When an `OptimizelyUserContext` is created, the SDK will automatically send an identify request to the ODP server to facilitate observing user activities. + * When an `OptimizelyUserContext` is created, the SDK will automatically send an identify request to the ODP server to facilitate observing user activities. -* New APIs added to `OptimizelyClient`: +* New APIs added to `OptimizelyClient`: * `sendOdpEvent()`: customers can build/send arbitrary ODP events that will bind user identifiers and data to user profiles in ODP. -For details, refer to our documentation pages: +For details, refer to our documentation pages: -* [Advanced Audience Targeting](https://docs.developers.optimizely.com/feature-experimentation/docs/optimizely-data-platform-advanced-audience-targeting) -* [Server SDK Support](https://docs.developers.optimizely.com/feature-experimentation/v1.0/docs/advanced-audience-targeting-for-server-side-sdks) -* [Initialize Python SDK](https://docs.developers.optimizely.com/feature-experimentation/docs/initialize-sdk-python) -* [OptimizelyUserContext Python SDK](https://docs.developers.optimizely.com/feature-experimentation/docs/wip-fsodp-optimizelyusercontext-python) -* [Advanced Audience Targeting segment qualification methods](https://docs.developers.optimizely.com/feature-experimentation/v1.0/docs/advanced-audience-targeting-segment-qualification-methods-python) -* [Send Optimizely Data Platform data using Advanced Audience Targeting](https://docs.developers.optimizely.com/feature-experimentation/v1.0/docs/send-odp-data-using-advanced-audience-targeting-python) +* [Advanced Audience Targeting](https://docs.developers.optimizely.com/feature-experimentation/docs/optimizely-data-platform-advanced-audience-targeting) +* [Server SDK Support](https://docs.developers.optimizely.com/feature-experimentation/v1.0/docs/advanced-audience-targeting-for-server-side-sdks) +* [Initialize Python SDK](https://docs.developers.optimizely.com/feature-experimentation/docs/initialize-sdk-python) +* [OptimizelyUserContext Python SDK](https://docs.developers.optimizely.com/feature-experimentation/docs/wip-fsodp-optimizelyusercontext-python) +* [Advanced Audience Targeting segment qualification methods](https://docs.developers.optimizely.com/feature-experimentation/v1.0/docs/advanced-audience-targeting-segment-qualification-methods-python) +* [Send Optimizely Data Platform data using Advanced Audience Targeting](https://docs.developers.optimizely.com/feature-experimentation/v1.0/docs/send-odp-data-using-advanced-audience-targeting-python) -### Breaking Changes +### Breaking Changes -* `ODPManager` in the SDK is enabled by default. Unless an ODP account is integrated into the Optimizely projects, most `ODPManager` functions will be ignored. If needed, `ODPManager` can be disabled when `OptimizelyClient` is instantiated. +* `ODPManager` in the SDK is enabled by default. Unless an ODP account is integrated into the Optimizely projects, most `ODPManager` functions will be ignored. If needed, `ODPManager` can be disabled when `OptimizelyClient` is instantiated. * `BaseConfigManager` abstract class now requires a get_sdk_key method. ([#413](https://github.com/optimizely/python-sdk/pull/413)) * `PollingConfigManager` requires either the sdk_key parameter or datafile containing an sdkKey. ([#413](https://github.com/optimizely/python-sdk/pull/413)) * Asynchronous `BatchEventProcessor` is now the default event processor. ([#378](https://github.com/optimizely/python-sdk/pull/378)) diff --git a/LICENSE b/LICENSE index 532cbad9..1b91d409 100644 --- a/LICENSE +++ b/LICENSE @@ -186,7 +186,7 @@ same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright 2016 Optimizely + © Optimizely 2016 Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/README.md b/README.md index 24d4116c..7a6456c1 100644 --- a/README.md +++ b/README.md @@ -17,6 +17,8 @@ Refer to the [Python SDK's developer documentation](https://docs.developers.opti ### Requirements +Version `5.0+`: Python 3.8+, PyPy 3.8+ + Version `4.0+`: Python 3.7+, PyPy 3.7+ Version `3.0+`: Python 2.7+, PyPy 3.4+ diff --git a/optimizely/odp/odp_segment_api_manager.py b/optimizely/odp/odp_segment_api_manager.py index 8e5d8bc5..1ea191eb 100644 --- a/optimizely/odp/odp_segment_api_manager.py +++ b/optimizely/odp/odp_segment_api_manager.py @@ -138,8 +138,7 @@ def fetch_segments(self, api_key: str, api_host: str, user_key: str, '{audiences(subset: $audiences) {edges {node {name state}}}}}', 'variables': { 'userId': str(user_value), - 'audiences': segments_to_check - } + 'audiences': segments_to_check} } try: diff --git a/optimizely/version.py b/optimizely/version.py index 44b3134d..de16cae8 100644 --- a/optimizely/version.py +++ b/optimizely/version.py @@ -11,5 +11,5 @@ # See the License for the specific language governing permissions and # limitations under the License. -version_info = (5, 0, '0-beta') +version_info = (5, 0, 0) __version__ = '.'.join(str(v) for v in version_info) diff --git a/setup.py b/setup.py index 5e2ccc2e..1954aa48 100644 --- a/setup.py +++ b/setup.py @@ -47,10 +47,11 @@ 'License :: OSI Approved :: Apache Software License', 'Operating System :: OS Independent', 'Programming Language :: Python', - 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: 3.9', 'Programming Language :: Python :: 3.10', + 'Programming Language :: Python :: 3.11', + 'Programming Language :: Python :: 3.12', ], packages=find_packages(exclude=['docs', 'tests']), extras_require={'test': TEST_REQUIREMENTS}, From 3d1a21c8f729a6bf14115755d7dc6d88d091b288 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 7 Feb 2024 21:09:36 -0800 Subject: [PATCH 50/68] build(deps): bump flask from 1.1.2 to 2.2.5 in /tests/testapp (#432) * build(deps): bump flask from 1.1.2 to 2.2.5 in /tests/testapp Bumps [flask](https://github.com/pallets/flask) from 1.1.2 to 2.2.5. - [Release notes](https://github.com/pallets/flask/releases) - [Changelog](https://github.com/pallets/flask/blob/main/CHANGES.rst) - [Commits](https://github.com/pallets/flask/compare/1.1.2...2.2.5) --- updated-dependencies: - dependency-name: flask dependency-type: direct:production ... Signed-off-by: dependabot[bot] * update to py 3 in dockerfile --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Matjaz Pirnovar --- tests/testapp/Dockerfile | 2 +- tests/testapp/requirements.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/testapp/Dockerfile b/tests/testapp/Dockerfile index 3a146d7b..1042c462 100644 --- a/tests/testapp/Dockerfile +++ b/tests/testapp/Dockerfile @@ -1,4 +1,4 @@ -FROM python:2.7.10 +FROM python:3.11 LABEL maintainer="developers@optimizely.com" diff --git a/tests/testapp/requirements.txt b/tests/testapp/requirements.txt index 46a48dd9..4b70123b 100644 --- a/tests/testapp/requirements.txt +++ b/tests/testapp/requirements.txt @@ -1 +1 @@ -Flask==1.1.2 +Flask==2.2.5 From 2f00b4de7010a056bd367101c8080b80809f356b Mon Sep 17 00:00:00 2001 From: Matjaz Pirnovar Date: Tue, 12 Mar 2024 09:01:00 -0700 Subject: [PATCH 51/68] Mpirnovar update error (#433) * updare error log message * test for the log message * upate to generic exception --- optimizely/config_manager.py | 4 ++-- tests/test_config_manager.py | 26 ++++++++++++++++++++++++++ 2 files changed, 28 insertions(+), 2 deletions(-) diff --git a/optimizely/config_manager.py b/optimizely/config_manager.py index 032189e9..755c6b9c 100644 --- a/optimizely/config_manager.py +++ b/optimizely/config_manager.py @@ -420,9 +420,9 @@ def _run(self) -> None: if self.stopped.wait(self.update_interval): self.stopped.clear() break - except (OSError, OverflowError) as err: + except Exception as err: self.logger.error( - f'Provided update_interval value may be too big. Error: {err}' + f'Thread for background datafile polling failed. Error: {err}' ) raise diff --git a/tests/test_config_manager.py b/tests/test_config_manager.py index 6f4038cb..1c3fbe89 100644 --- a/tests/test_config_manager.py +++ b/tests/test_config_manager.py @@ -494,6 +494,32 @@ def test_fetch_datafile__request_exception_raised(self, _): self.assertEqual(test_headers['Last-Modified'], project_config_manager.last_modified) self.assertIsInstance(project_config_manager.get_config(), project_config.ProjectConfig) + def test_fetch_datafile__exception_polling_thread_failed(self, _): + """ Test that exception is raised when polling thread stops. """ + sdk_key = 'some_key' + mock_logger = mock.Mock() + + test_headers = {'Last-Modified': 'New Time'} + test_datafile = json.dumps(self.config_dict_with_features) + test_response = requests.Response() + test_response.status_code = 200 + test_response.headers = test_headers + test_response._content = test_datafile + + with mock.patch('requests.get', return_value=test_response): + project_config_manager = config_manager.PollingConfigManager(sdk_key=sdk_key, + logger=mock_logger, + update_interval=12345678912345) + + project_config_manager.stop() + + # verify the error log message + log_messages = [args[0] for args, _ in mock_logger.error.call_args_list] + for message in log_messages: + if "Thread for background datafile polling failed. " \ + "Error: timestamp too large to convert to C _PyTime_t" not in message: + assert False + def test_is_running(self, _): """ Test that polling thread is running after instance of PollingConfigManager is created. """ with mock.patch('optimizely.config_manager.PollingConfigManager.fetch_datafile'): From 5caf9a56fdc28f0e92d2654bd52c07177a88a594 Mon Sep 17 00:00:00 2001 From: Matjaz Pirnovar Date: Tue, 25 Jun 2024 12:44:28 -0700 Subject: [PATCH 52/68] remove two modules from core requirements (#435) --- requirements/core.txt | 2 -- 1 file changed, 2 deletions(-) diff --git a/requirements/core.txt b/requirements/core.txt index 45db2ece..7cbfe29f 100644 --- a/requirements/core.txt +++ b/requirements/core.txt @@ -1,6 +1,4 @@ jsonschema>=3.2.0 pyrsistent>=0.16.0 requests>=2.21 -pyOpenSSL>=19.1.0 -cryptography>=2.8.0 idna>=2.10 From 144e41f5a6adf67befd2e8a21c2158481c586c25 Mon Sep 17 00:00:00 2001 From: Matjaz Pirnovar Date: Wed, 26 Jun 2024 12:52:06 -0700 Subject: [PATCH 53/68] remove two dependencies from readme (#436) --- README.md | 4 ---- 1 file changed, 4 deletions(-) diff --git a/README.md b/README.md index 7a6456c1..e0aeafb6 100644 --- a/README.md +++ b/README.md @@ -227,10 +227,6 @@ This software incorporates code from the following open source projects: requests (Apache-2.0 License: https://github.com/psf/requests/blob/master/LICENSE) -pyOpenSSL (Apache-2.0 License https://github.com/pyca/pyopenssl/blob/main/LICENSE) - -cryptography (Apache-2.0 https://github.com/pyca/cryptography/blob/main/LICENSE.APACHE) - idna (BSD 3-Clause License https://github.com/kjd/idna/blob/master/LICENSE.md) ### Other Optimizely SDKs From 986e615c989f79135a12be20902533a300e78dcb Mon Sep 17 00:00:00 2001 From: Matjaz Pirnovar Date: Wed, 26 Jun 2024 13:13:23 -0700 Subject: [PATCH 54/68] changelog, version (#437) --- CHANGELOG.md | 5 +++++ optimizely/version.py | 2 +- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 94e3bbd3..3db4a7f9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,10 @@ # Optimizely Python SDK Changelog +## 5.0.1 +June 26th, 2024 + +We removed redundant dependencies pyOpenSSL and cryptography ([#435](https://github.com/optimizely/python-sdk/pull/435), [#436](https://github.com/optimizely/python-sdk/pull/436)). + ## 5.0.0 January 18th, 2024 diff --git a/optimizely/version.py b/optimizely/version.py index de16cae8..da021f94 100644 --- a/optimizely/version.py +++ b/optimizely/version.py @@ -11,5 +11,5 @@ # See the License for the specific language governing permissions and # limitations under the License. -version_info = (5, 0, 0) +version_info = (5, 0, 1) __version__ = '.'.join(str(v) for v in version_info) From 40880ffad7403ef96c7b11b02a110fb42adf39c2 Mon Sep 17 00:00:00 2001 From: Farhan Anjum Date: Wed, 25 Sep 2024 22:50:47 +0600 Subject: [PATCH 55/68] [FSSDK-10665] fix: Github Actions YAML files vulnerable to script injections corrected (#438) --- .github/workflows/integration_test.yml | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/.github/workflows/integration_test.yml b/.github/workflows/integration_test.yml index 9a4e5eb1..7619ca51 100644 --- a/.github/workflows/integration_test.yml +++ b/.github/workflows/integration_test.yml @@ -23,14 +23,18 @@ jobs: path: 'home/runner/travisci-tools' ref: 'master' - name: set SDK Branch if PR + env: + HEAD_REF: ${{ github.head_ref }} if: ${{ github.event_name == 'pull_request' }} run: | - echo "SDK_BRANCH=${{ github.head_ref }}" >> $GITHUB_ENV + echo "SDK_BRANCH=$HEAD_REF" >> $GITHUB_ENV - name: set SDK Branch if not pull request + env: + REF_NAME: ${{ github.ref_name }} if: ${{ github.event_name != 'pull_request' }} run: | - echo "SDK_BRANCH=${{ github.ref_name }}" >> $GITHUB_ENV - echo "TRAVIS_BRANCH=${{ github.ref_name }}" >> $GITHUB_ENV + echo "SDK_BRANCH=${REF_NAME}" >> $GITHUB_ENV + echo "TRAVIS_BRANCH=${REF_NAME}" >> $GITHUB_ENV - name: Trigger build env: SDK: python From 22c74ee2bb1482a5945d6728aca4b28a3998b5ef Mon Sep 17 00:00:00 2001 From: Farhan Anjum Date: Wed, 27 Nov 2024 22:27:05 +0600 Subject: [PATCH 56/68] [FSSDK-10763] Implement UPS request batching for decideForKeys (#440) * update: UserProfile class created, changes in decision_service, decide_for_keys * update: get_variation function changed * update: new function in decision_service * update: everything implemented from java. tests are failing * update: minor changes * update: user_profile_tracker added to tests * update: some tests fixed * optimizely/decision_service.py -> Added check for `ignore_user_profile` in decision logic. optimizely/user_profile.py -> Improved user profile loading with missing key checks. tests/test_decision_service.py -> Updated tests to include user profile tracker. * tests/test_decision_service.py -> Added expected decision object. tests/test_decision_service.py -> Updated experiment bucket map call. tests/test_decision_service.py -> Introduced user_profile_tracker usage. tests/test_decision_service.py -> Modified method calls with user_profile_tracker. * optimizely/decision_service.py -> fixed get_variations_for_feature_list * optimizely/decision_service.py -> Fixed how rollout reasons are added tests/test_decision_service.py -> Added user profile tracker object * tests/test_user_context.py -> fixed some tests * optimizely/user_profile.py -> Updated type for `experiment_bucket_map`. tests/test_decision_service.py -> Fixed tests * all unit tests passing * lint check * fix: typechecks added * more types updated * all typechecks passing * gha typechecks fixed * all typecheck should pass * lint check should pass * removed unnecessary comments * removed comments from test * optimizely/decision_service.py -> Removed user profile save logic optimizely/optimizely.py -> Added loading and saving profile logic * optimizely/user_profile.py -> Updated experiment_bucket_map type optimizely/user_profile.py -> Testing user profile update logic * optimizely/decision_service.py -> Commented out profile loading optimizely/user_profile.py -> Removed unused import statement * optimizely/decision_service.py -> Removed unused profile loading optimizely/user_profile.py -> Fixed handling of reasons list optimizely/user_profile.py -> Improved profile retrieval error logging tests/test_decision_service.py -> Updated mock checks to simplify tests tests/test_user_profile.py -> Added tests for user profile handling tests/test_optimizely.py -> New test for variation lookup and save * optimizely/user_profile.py -> Reverted back to variation ID retrieval logic. * optimizely/user_profile.py -> Added error handling logic --- optimizely/decision_service.py | 169 +++++++++++------ optimizely/optimizely.py | 181 ++++++++++++------ optimizely/user_profile.py | 71 ++++++- tests/test_decision_service.py | 327 +++++---------------------------- tests/test_optimizely.py | 42 ++++- tests/test_user_context.py | 214 ++++++++++++++------- tests/test_user_profile.py | 74 ++++++++ 7 files changed, 601 insertions(+), 477 deletions(-) diff --git a/optimizely/decision_service.py b/optimizely/decision_service.py index 72254ce9..df85464e 100644 --- a/optimizely/decision_service.py +++ b/optimizely/decision_service.py @@ -22,7 +22,7 @@ from .helpers import experiment as experiment_helper from .helpers import validator from .optimizely_user_context import OptimizelyUserContext, UserAttributes -from .user_profile import UserProfile, UserProfileService +from .user_profile import UserProfile, UserProfileService, UserProfileTracker if TYPE_CHECKING: # prevent circular dependenacy by skipping import at runtime @@ -35,7 +35,7 @@ class Decision(NamedTuple): None if no experiment/variation was selected.""" experiment: Optional[entities.Experiment] variation: Optional[entities.Variation] - source: str + source: Optional[str] class DecisionService: @@ -247,6 +247,8 @@ def get_variation( project_config: ProjectConfig, experiment: entities.Experiment, user_context: OptimizelyUserContext, + user_profile_tracker: Optional[UserProfileTracker], + reasons: list[str] = [], options: Optional[Sequence[str]] = None ) -> tuple[Optional[entities.Variation], list[str]]: """ Top-level function to help determine variation user should be put in. @@ -260,7 +262,9 @@ def get_variation( Args: project_config: Instance of ProjectConfig. experiment: Experiment for which user variation needs to be determined. - user_context: contains user id and attributes + user_context: contains user id and attributes. + user_profile_tracker: tracker for reading and updating user profile of the user. + reasons: Decision reasons. options: Decide options. Returns: @@ -275,6 +279,8 @@ def get_variation( ignore_user_profile = False decide_reasons = [] + if reasons is not None: + decide_reasons += reasons # Check if experiment is running if not experiment_helper.is_experiment_running(experiment): message = f'Experiment "{experiment.key}" is not running.' @@ -296,23 +302,14 @@ def get_variation( return variation, decide_reasons # Check to see if user has a decision available for the given experiment - user_profile = UserProfile(user_id) - if not ignore_user_profile and self.user_profile_service: - try: - retrieved_profile = self.user_profile_service.lookup(user_id) - except: - self.logger.exception(f'Unable to retrieve user profile for user "{user_id}" as lookup failed.') - retrieved_profile = None - - if retrieved_profile and validator.is_user_profile_valid(retrieved_profile): - user_profile = UserProfile(**retrieved_profile) - variation = self.get_stored_variation(project_config, experiment, user_profile) - if variation: - message = f'Returning previously activated variation ID "{variation}" of experiment ' \ - f'"{experiment}" for user "{user_id}" from user profile.' - self.logger.info(message) - decide_reasons.append(message) - return variation, decide_reasons + if user_profile_tracker is not None and not ignore_user_profile: + variation = self.get_stored_variation(project_config, experiment, user_profile_tracker.get_user_profile()) + if variation: + message = f'Returning previously activated variation ID "{variation}" of experiment ' \ + f'"{experiment}" for user "{user_id}" from user profile.' + self.logger.info(message) + decide_reasons.append(message) + return variation, decide_reasons else: self.logger.warning('User profile has invalid format.') @@ -340,10 +337,9 @@ def get_variation( self.logger.info(message) decide_reasons.append(message) # Store this new decision and return the variation for the user - if not ignore_user_profile and self.user_profile_service: + if user_profile_tracker is not None and not ignore_user_profile: try: - user_profile.save_variation_for_experiment(experiment.id, variation.id) - self.user_profile_service.save(user_profile.__dict__) + user_profile_tracker.update_user_profile(experiment, variation) except: self.logger.exception(f'Unable to save user profile for user "{user_id}".') return variation, decide_reasons @@ -479,44 +475,7 @@ def get_variation_for_feature( Returns: Decision namedtuple consisting of experiment and variation for the user. """ - decide_reasons = [] - - # Check if the feature flag is under an experiment and the the user is bucketed into one of these experiments - if feature.experimentIds: - # Evaluate each experiment ID and return the first bucketed experiment variation - for experiment_id in feature.experimentIds: - experiment = project_config.get_experiment_from_id(experiment_id) - decision_variation = None - - if experiment: - optimizely_decision_context = OptimizelyUserContext.OptimizelyDecisionContext(feature.key, - experiment.key) - - forced_decision_variation, reasons_received = self.validated_forced_decision( - project_config, optimizely_decision_context, user_context) - decide_reasons += reasons_received - - if forced_decision_variation: - decision_variation = forced_decision_variation - else: - decision_variation, variation_reasons = self.get_variation(project_config, - experiment, user_context, options) - decide_reasons += variation_reasons - - if decision_variation: - message = f'User "{user_context.user_id}" bucketed into a ' \ - f'experiment "{experiment.key}" of feature "{feature.key}".' - self.logger.debug(message) - return Decision(experiment, decision_variation, - enums.DecisionSources.FEATURE_TEST), decide_reasons - - message = f'User "{user_context.user_id}" is not bucketed into any of the ' \ - f'experiments on the feature "{feature.key}".' - self.logger.debug(message) - variation, rollout_variation_reasons = self.get_variation_for_rollout(project_config, feature, user_context) - if rollout_variation_reasons: - decide_reasons += rollout_variation_reasons - return variation, decide_reasons + return self.get_variations_for_feature_list(project_config, [feature], user_context, options)[0] def validated_forced_decision( self, @@ -580,3 +539,91 @@ def validated_forced_decision( user_context.logger.info(user_has_forced_decision_but_invalid) return None, reasons + + def get_variations_for_feature_list( + self, + project_config: ProjectConfig, + features: list[entities.FeatureFlag], + user_context: OptimizelyUserContext, + options: Optional[Sequence[str]] = None + ) -> list[tuple[Decision, list[str]]]: + """ + Returns the list of experiment/variation the user is bucketed in for the given list of features. + Args: + project_config: Instance of ProjectConfig. + features: List of features for which we are determining if it is enabled or not for the given user. + user_context: user context for user. + options: Decide options. + + Returns: + List of Decision namedtuple consisting of experiment and variation for the user. + """ + decide_reasons: list[str] = [] + + if options: + ignore_ups = OptimizelyDecideOption.IGNORE_USER_PROFILE_SERVICE in options + else: + ignore_ups = False + + user_profile_tracker: Optional[UserProfileTracker] = None + if self.user_profile_service is not None and not ignore_ups: + user_profile_tracker = UserProfileTracker(user_context.user_id, self.user_profile_service, self.logger) + user_profile_tracker.load_user_profile(decide_reasons, None) + + decisions = [] + + for feature in features: + feature_reasons = decide_reasons.copy() + experiment_decision_found = False # Track if an experiment decision was made for the feature + + # Check if the feature flag is under an experiment + if feature.experimentIds: + for experiment_id in feature.experimentIds: + experiment = project_config.get_experiment_from_id(experiment_id) + decision_variation = None + + if experiment: + optimizely_decision_context = OptimizelyUserContext.OptimizelyDecisionContext( + feature.key, experiment.key) + forced_decision_variation, reasons_received = self.validated_forced_decision( + project_config, optimizely_decision_context, user_context) + feature_reasons.extend(reasons_received) + + if forced_decision_variation: + decision_variation = forced_decision_variation + else: + decision_variation, variation_reasons = self.get_variation( + project_config, experiment, user_context, user_profile_tracker, feature_reasons, options + ) + feature_reasons.extend(variation_reasons) + + if decision_variation: + self.logger.debug( + f'User "{user_context.user_id}" ' + f'bucketed into experiment "{experiment.key}" of feature "{feature.key}".' + ) + decision = Decision(experiment, decision_variation, enums.DecisionSources.FEATURE_TEST) + decisions.append((decision, feature_reasons)) + experiment_decision_found = True # Mark that a decision was found + break # Stop after the first successful experiment decision + + # Only process rollout if no experiment decision was found + if not experiment_decision_found: + rollout_decision, rollout_reasons = self.get_variation_for_rollout(project_config, + feature, + user_context) + if rollout_reasons: + feature_reasons.extend(rollout_reasons) + if rollout_decision: + self.logger.debug(f'User "{user_context.user_id}" ' + f'bucketed into rollout for feature "{feature.key}".') + else: + self.logger.debug(f'User "{user_context.user_id}" ' + f'not bucketed into any rollout for feature "{feature.key}".') + + decisions.append((rollout_decision, feature_reasons)) + + if self.user_profile_service is not None and user_profile_tracker is not None and ignore_ups is False: + user_profile_tracker.save_user_profile() + + return decisions diff --git a/optimizely/optimizely.py b/optimizely/optimizely.py index c50bfcb3..1b25bec6 100644 --- a/optimizely/optimizely.py +++ b/optimizely/optimizely.py @@ -21,6 +21,7 @@ from . import exceptions from . import logger as _logging from . import project_config +from . import user_profile from .config_manager import AuthDatafilePollingConfigManager from .config_manager import BaseConfigManager from .config_manager import PollingConfigManager @@ -42,6 +43,7 @@ from .odp.odp_manager import OdpManager from .optimizely_config import OptimizelyConfig, OptimizelyConfigService from .optimizely_user_context import OptimizelyUserContext, UserAttributes +from .project_config import ProjectConfig if TYPE_CHECKING: # prevent circular dependency by skipping import at runtime @@ -168,6 +170,7 @@ def __init__( self.event_builder = event_builder.EventBuilder() self.decision_service = decision_service.DecisionService(self.logger, user_profile_service) + self.user_profile_service = user_profile_service def _validate_instantiation_options(self) -> None: """ Helper method to validate all instantiation parameters. @@ -629,8 +632,13 @@ def get_variation( return None user_context = OptimizelyUserContext(self, self.logger, user_id, attributes, False) - - variation, _ = self.decision_service.get_variation(project_config, experiment, user_context) + user_profile_tracker = user_profile.UserProfileTracker(user_id, self.user_profile_service, self.logger) + user_profile_tracker.load_user_profile() + variation, _ = self.decision_service.get_variation(project_config, + experiment, + user_context, + user_profile_tracker) + user_profile_tracker.save_user_profile() if variation: variation_key = variation.key @@ -701,7 +709,7 @@ def is_feature_enabled(self, feature_key: str, user_id: str, attributes: Optiona if (is_source_rollout or not decision.variation) and project_config.get_send_flag_decisions_value(): self._send_impression_event( project_config, decision.experiment, decision.variation, feature.key, decision.experiment.key if - decision.experiment else '', decision.source, feature_enabled, user_id, attributes + decision.experiment else '', str(decision.source), feature_enabled, user_id, attributes ) # Send event if Decision came from an experiment. @@ -712,7 +720,7 @@ def is_feature_enabled(self, feature_key: str, user_id: str, attributes: Optiona } self._send_impression_event( project_config, decision.experiment, decision.variation, feature.key, decision.experiment.key, - decision.source, feature_enabled, user_id, attributes + str(decision.source), feature_enabled, user_id, attributes ) if feature_enabled: @@ -1118,73 +1126,70 @@ def _decide( self.logger.debug('Provided decide options is not an array. Using default decide options.') decide_options = self.default_decide_options - # Create Optimizely Decision Result. + if OptimizelyDecideOption.ENABLED_FLAGS_ONLY in decide_options: + decide_options.remove(OptimizelyDecideOption.ENABLED_FLAGS_ONLY) + + decision = self._decide_for_keys( + user_context, + [key], + decide_options, + True + )[key] + + return decision + + def _create_optimizely_decision( + self, + user_context: OptimizelyUserContext, + flag_key: str, + flag_decision: Decision, + decision_reasons: Optional[list[str]], + decide_options: list[str], + project_config: ProjectConfig + ) -> OptimizelyDecision: user_id = user_context.user_id - attributes = user_context.get_user_attributes() - variation_key = None - variation = None feature_enabled = False - rule_key = None - flag_key = key + if flag_decision.variation is not None: + if flag_decision.variation.featureEnabled: + feature_enabled = True + + self.logger.info(f'Feature {flag_key} is enabled for user {user_id} {feature_enabled}"') + + # Create Optimizely Decision Result. + attributes = user_context.get_user_attributes() + rule_key = flag_decision.experiment.key if flag_decision.experiment else None all_variables = {} - experiment = None - decision_source = DecisionSources.ROLLOUT - source_info: dict[str, Any] = {} + decision_source = flag_decision.source decision_event_dispatched = False - # Check forced decisions first - optimizely_decision_context = OptimizelyUserContext.OptimizelyDecisionContext(flag_key=key, rule_key=rule_key) - forced_decision_response = self.decision_service.validated_forced_decision(config, - optimizely_decision_context, - user_context) - variation, decision_reasons = forced_decision_response - reasons += decision_reasons - - if variation: - decision = Decision(None, variation, enums.DecisionSources.FEATURE_TEST) - else: - # Regular decision - decision, decision_reasons = self.decision_service.get_variation_for_feature(config, - feature_flag, - user_context, decide_options) - - reasons += decision_reasons - - # Fill in experiment and variation if returned (rollouts can have featureEnabled variables as well.) - if decision.experiment is not None: - experiment = decision.experiment - source_info["experiment"] = experiment - rule_key = experiment.key if experiment else None - if decision.variation is not None: - variation = decision.variation - variation_key = variation.key - feature_enabled = variation.featureEnabled - decision_source = decision.source - source_info["variation"] = variation + feature_flag = project_config.feature_key_map.get(flag_key) # Send impression event if Decision came from a feature # test and decide options doesn't include disableDecisionEvent if OptimizelyDecideOption.DISABLE_DECISION_EVENT not in decide_options: - if decision_source == DecisionSources.FEATURE_TEST or config.send_flag_decisions: - self._send_impression_event(config, experiment, variation, flag_key, rule_key or '', - decision_source, feature_enabled, + if decision_source == DecisionSources.FEATURE_TEST or project_config.send_flag_decisions: + self._send_impression_event(project_config, + flag_decision.experiment, + flag_decision.variation, + flag_key, rule_key or '', + str(decision_source), feature_enabled, user_id, attributes) decision_event_dispatched = True # Generate all variables map if decide options doesn't include excludeVariables - if OptimizelyDecideOption.EXCLUDE_VARIABLES not in decide_options: + if OptimizelyDecideOption.EXCLUDE_VARIABLES not in decide_options and feature_flag: for variable_key, variable in feature_flag.variables.items(): variable_value = variable.defaultValue if feature_enabled: - variable_value = config.get_variable_value_for_variation(variable, decision.variation) + variable_value = project_config.get_variable_value_for_variation(variable, flag_decision.variation) self.logger.debug( f'Got variable value "{variable_value}" for ' f'variable "{variable_key}" of feature flag "{flag_key}".' ) try: - actual_value = config.get_typecast_value(variable_value, variable.type) + actual_value = project_config.get_typecast_value(variable_value, variable.type) except: self.logger.error('Unable to cast value. Returning None.') actual_value = None @@ -1192,7 +1197,11 @@ def _decide( all_variables[variable_key] = actual_value should_include_reasons = OptimizelyDecideOption.INCLUDE_REASONS in decide_options - + variation_key = ( + flag_decision.variation.key + if flag_decision is not None and flag_decision.variation is not None + else None + ) # Send notification self.notification_center.send_notifications( enums.NotificationTypes.DECISION, @@ -1205,7 +1214,7 @@ def _decide( 'variables': all_variables, 'variation_key': variation_key, 'rule_key': rule_key, - 'reasons': reasons if should_include_reasons else [], + 'reasons': decision_reasons if should_include_reasons else [], 'decision_event_dispatched': decision_event_dispatched }, @@ -1213,7 +1222,7 @@ def _decide( return OptimizelyDecision(variation_key=variation_key, enabled=feature_enabled, variables=all_variables, rule_key=rule_key, flag_key=flag_key, - user_context=user_context, reasons=reasons if should_include_reasons else [] + user_context=user_context, reasons=decision_reasons if should_include_reasons else [] ) def _decide_all( @@ -1253,7 +1262,8 @@ def _decide_for_keys( self, user_context: Optional[OptimizelyUserContext], keys: list[str], - decide_options: Optional[list[str]] = None + decide_options: Optional[list[str]] = None, + ignore_default_options: bool = False ) -> dict[str, OptimizelyDecision]: """ Args: @@ -1277,19 +1287,74 @@ def _decide_for_keys( merged_decide_options: list[str] = [] if isinstance(decide_options, list): merged_decide_options = decide_options[:] - merged_decide_options += self.default_decide_options + if not ignore_default_options: + merged_decide_options += self.default_decide_options else: self.logger.debug('Provided decide options is not an array. Using default decide options.') merged_decide_options = self.default_decide_options - enabled_flags_only = OptimizelyDecideOption.ENABLED_FLAGS_ONLY in merged_decide_options + decisions: dict[str, OptimizelyDecision] = {} + valid_keys = [] + decision_reasons_dict = {} + + project_config = self.config_manager.get_config() + flags_without_forced_decision: list[entities.FeatureFlag] = [] + flag_decisions: dict[str, Decision] = {} - decisions = {} + if project_config is None: + return decisions for key in keys: - decision = self._decide(user_context, key, decide_options) - if enabled_flags_only and not decision.enabled: + feature_flag = project_config.feature_key_map.get(key) + if feature_flag is None: + decisions[key] = OptimizelyDecision(None, False, None, None, key, user_context, []) continue - decisions[key] = decision + valid_keys.append(key) + decision_reasons: list[str] = [] + decision_reasons_dict[key] = decision_reasons + + optimizely_decision_context = OptimizelyUserContext.OptimizelyDecisionContext(flag_key=key, rule_key=None) + forced_decision_response = self.decision_service.validated_forced_decision(project_config, + optimizely_decision_context, + user_context) + variation, decision_reasons = forced_decision_response + decision_reasons_dict[key] += decision_reasons + + if variation: + decision = Decision(None, variation, enums.DecisionSources.FEATURE_TEST) + flag_decisions[key] = decision + else: + flags_without_forced_decision.append(feature_flag) + + decision_list = self.decision_service.get_variations_for_feature_list( + project_config, + flags_without_forced_decision, + user_context, + merged_decide_options + ) + + for i in range(0, len(flags_without_forced_decision)): + decision = decision_list[i][0] + reasons = decision_list[i][1] + flag_key = flags_without_forced_decision[i].key + flag_decisions[flag_key] = decision + decision_reasons_dict[flag_key] += reasons + + for key in valid_keys: + flag_decision = flag_decisions[key] + decision_reasons = decision_reasons_dict[key] + optimizely_decision = self._create_optimizely_decision( + user_context, + key, + flag_decision, + decision_reasons, + merged_decide_options, + project_config + ) + enabled_flags_only_missing = OptimizelyDecideOption.ENABLED_FLAGS_ONLY not in merged_decide_options + is_enabled = optimizely_decision.enabled + if enabled_flags_only_missing or is_enabled: + decisions[key] = optimizely_decision + return decisions def _setup_odp(self, sdk_key: Optional[str]) -> None: diff --git a/optimizely/user_profile.py b/optimizely/user_profile.py index 0410bcf7..f5ded013 100644 --- a/optimizely/user_profile.py +++ b/optimizely/user_profile.py @@ -14,11 +14,17 @@ from __future__ import annotations from typing import Any, Optional from sys import version_info +from . import logger as _logging if version_info < (3, 8): from typing_extensions import Final else: - from typing import Final # type: ignore + from typing import Final, TYPE_CHECKING # type: ignore + + if TYPE_CHECKING: + # prevent circular dependenacy by skipping import at runtime + from .entities import Experiment, Variation + from optimizely.error_handler import BaseErrorHandler class UserProfile: @@ -54,7 +60,6 @@ def get_variation_for_experiment(self, experiment_id: str) -> Optional[str]: Returns: Variation ID corresponding to the experiment. None if no decision available. """ - return self.experiment_bucket_map.get(experiment_id, {self.VARIATION_ID_KEY: None}).get(self.VARIATION_ID_KEY) def save_variation_for_experiment(self, experiment_id: str, variation_id: str) -> None: @@ -64,7 +69,6 @@ def save_variation_for_experiment(self, experiment_id: str, variation_id: str) - experiment_id: ID for experiment for which the decision is to be stored. variation_id: ID for variation that the user saw. """ - self.experiment_bucket_map.update({experiment_id: {self.VARIATION_ID_KEY: variation_id}}) @@ -90,3 +94,64 @@ def save(self, user_profile: dict[str, Any]) -> None: user_profile: Dict representing the user's profile. """ pass + + +class UserProfileTracker: + def __init__(self, + user_id: str, + user_profile_service: Optional[UserProfileService], + logger: Optional[_logging.Logger] = None): + self.user_id = user_id + self.user_profile_service = user_profile_service + self.logger = _logging.adapt_logger(logger or _logging.NoOpLogger()) + self.profile_updated = False + self.user_profile = UserProfile(user_id, {}) + + def get_user_profile(self) -> UserProfile: + return self.user_profile + + def load_user_profile(self, reasons: Optional[list[str]] = [], + error_handler: Optional[BaseErrorHandler] = None) -> None: + if reasons is None: + reasons = [] + try: + user_profile = self.user_profile_service.lookup(self.user_id) if self.user_profile_service else None + if user_profile is None: + message = "Unable to get a user profile from the UserProfileService." + reasons.append(message) + else: + if 'user_id' in user_profile and 'experiment_bucket_map' in user_profile: + self.user_profile = UserProfile( + user_profile['user_id'], + user_profile['experiment_bucket_map'] + ) + self.logger.info("User profile loaded successfully.") + else: + missing_keys = [key for key in ['user_id', 'experiment_bucket_map'] if key not in user_profile] + message = f"User profile is missing keys: {', '.join(missing_keys)}" + reasons.append(message) + except Exception as exception: + message = str(exception) + reasons.append(message) + self.logger.exception(f'Unable to retrieve user profile for user "{self.user_id}" as lookup failed.') + if error_handler: + error_handler.handle_error(exception) + + def update_user_profile(self, experiment: Experiment, variation: Variation) -> None: + variation_id = variation.id + experiment_id = experiment.id + self.user_profile.save_variation_for_experiment(experiment_id, variation_id) + self.profile_updated = True + + def save_user_profile(self, error_handler: Optional[BaseErrorHandler] = None) -> None: + if not self.profile_updated: + return + try: + if self.user_profile_service: + self.user_profile_service.save(self.user_profile.__dict__) + self.logger.info(f'Saved user profile of user "{self.user_profile.user_id}".') + except Exception as exception: + self.logger.warning(f'Failed to save user profile of user "{self.user_profile.user_id}" ' + f'for exception:{exception}".') + if error_handler: + error_handler.handle_error(exception) diff --git a/tests/test_decision_service.py b/tests/test_decision_service.py index 4d755de5..6c5862a5 100644 --- a/tests/test_decision_service.py +++ b/tests/test_decision_service.py @@ -485,6 +485,8 @@ def test_get_variation__bucketing_id_provided(self): "random_key": "random_value", "$opt_bucketing_id": "user_bucket_value", }) + user_profile_service = user_profile.UserProfileService() + user_profile_tracker = user_profile.UserProfileTracker(user.user_id, user_profile_service) experiment = self.project_config.get_experiment_from_key("test_experiment") with mock.patch( "optimizely.decision_service.DecisionService.get_forced_variation", @@ -501,7 +503,8 @@ def test_get_variation__bucketing_id_provided(self): variation, _ = self.decision_service.get_variation( self.project_config, experiment, - user + user, + user_profile_tracker ) # Assert that bucket is called with appropriate bucketing ID @@ -515,6 +518,8 @@ def test_get_variation__user_whitelisted_for_variation(self): user = optimizely_user_context.OptimizelyUserContext(optimizely_client=None, logger=None, user_id="test_user", user_attributes={}) + user_profile_service = user_profile.UserProfileService() + user_profile_tracker = user_profile.UserProfileTracker(user.user_id, user_profile_service) experiment = self.project_config.get_experiment_from_key("test_experiment") with mock.patch( "optimizely.decision_service.DecisionService.get_whitelisted_variation", @@ -531,7 +536,7 @@ def test_get_variation__user_whitelisted_for_variation(self): "optimizely.user_profile.UserProfileService.save" ) as mock_save: variation, _ = self.decision_service.get_variation( - self.project_config, experiment, user + self.project_config, experiment, user, user_profile_tracker ) self.assertEqual( entities.Variation("111128", "control"), @@ -554,6 +559,8 @@ def test_get_variation__user_has_stored_decision(self): user = optimizely_user_context.OptimizelyUserContext(optimizely_client=None, logger=None, user_id="test_user", user_attributes={}) + user_profile_service = user_profile.UserProfileService() + user_profile_tracker = user_profile.UserProfileTracker(user.user_id, user_profile_service) experiment = self.project_config.get_experiment_from_key("test_experiment") with mock.patch( "optimizely.decision_service.DecisionService.get_whitelisted_variation", @@ -565,49 +572,38 @@ def test_get_variation__user_has_stored_decision(self): "optimizely.helpers.audience.does_user_meet_audience_conditions" ) as mock_audience_check, mock.patch( "optimizely.bucketer.Bucketer.bucket" - ) as mock_bucket, mock.patch( - "optimizely.user_profile.UserProfileService.lookup", - return_value={ - "user_id": "test_user", - "experiment_bucket_map": {"111127": {"variation_id": "111128"}}, - }, - ) as mock_lookup, mock.patch( - "optimizely.user_profile.UserProfileService.save" - ) as mock_save: + ) as mock_bucket: variation, _ = self.decision_service.get_variation( - self.project_config, experiment, user, None + self.project_config, experiment, user, user_profile_tracker ) self.assertEqual( entities.Variation("111128", "control"), variation, ) - # Assert that stored variation is returned and bucketing service is not involved mock_get_whitelisted_variation.assert_called_once_with( self.project_config, experiment, "test_user" ) - mock_lookup.assert_called_once_with("test_user") mock_get_stored_variation.assert_called_once_with( self.project_config, experiment, - user_profile.UserProfile( - "test_user", {"111127": {"variation_id": "111128"}} - ), + user_profile_tracker.user_profile ) self.assertEqual(0, mock_audience_check.call_count) self.assertEqual(0, mock_bucket.call_count) - self.assertEqual(0, mock_save.call_count) - def test_get_variation__user_bucketed_for_new_experiment__user_profile_service_available( + def test_get_variation__user_bucketed_for_new_experiment__user_profile_tracker_available( self, ): """ Test that get_variation buckets and returns variation if no forced variation or decision available. - Also, stores decision if user profile service is available. """ + """ user = optimizely_user_context.OptimizelyUserContext(optimizely_client=None, logger=None, user_id="test_user", user_attributes={}) + user_profile_service = user_profile.UserProfileService() + user_profile_tracker = user_profile.UserProfileTracker(user.user_id, user_profile_service) experiment = self.project_config.get_experiment_from_key("test_experiment") with mock.patch.object( self.decision_service, "logger" @@ -622,14 +618,9 @@ def test_get_variation__user_bucketed_for_new_experiment__user_profile_service_a ) as mock_audience_check, mock.patch( "optimizely.bucketer.Bucketer.bucket", return_value=[entities.Variation("111129", "variation"), []], - ) as mock_bucket, mock.patch( - "optimizely.user_profile.UserProfileService.lookup", - return_value={"user_id": "test_user", "experiment_bucket_map": {}}, - ) as mock_lookup, mock.patch( - "optimizely.user_profile.UserProfileService.save" - ) as mock_save: + ) as mock_bucket: variation, _ = self.decision_service.get_variation( - self.project_config, experiment, user, None + self.project_config, experiment, user, user_profile_tracker ) self.assertEqual( entities.Variation("111129", "variation"), @@ -640,71 +631,8 @@ def test_get_variation__user_bucketed_for_new_experiment__user_profile_service_a mock_get_whitelisted_variation.assert_called_once_with( self.project_config, experiment, user.user_id ) - mock_lookup.assert_called_once_with("test_user") - self.assertEqual(1, mock_get_stored_variation.call_count) - mock_audience_check.assert_called_once_with( - self.project_config, - experiment.get_audience_conditions_or_ids(), - enums.ExperimentAudienceEvaluationLogs, - "test_experiment", - user, - mock_decision_service_logging - ) - mock_bucket.assert_called_once_with( - self.project_config, experiment, "test_user", "test_user" - ) - mock_save.assert_called_once_with( - { - "user_id": "test_user", - "experiment_bucket_map": {"111127": {"variation_id": "111129"}}, - } - ) - - def test_get_variation__user_bucketed_for_new_experiment__user_profile_service_not_available( - self, - ): - """ Test that get_variation buckets and returns variation if - no forced variation and no user profile service available. """ - - # Unset user profile service - self.decision_service.user_profile_service = None - - user = optimizely_user_context.OptimizelyUserContext(optimizely_client=None, - logger=None, - user_id="test_user", - user_attributes={}) - experiment = self.project_config.get_experiment_from_key("test_experiment") - with mock.patch.object( - self.decision_service, "logger" - ) as mock_decision_service_logging, mock.patch( - "optimizely.decision_service.DecisionService.get_whitelisted_variation", - return_value=[None, []], - ) as mock_get_whitelisted_variation, mock.patch( - "optimizely.decision_service.DecisionService.get_stored_variation" - ) as mock_get_stored_variation, mock.patch( - "optimizely.helpers.audience.does_user_meet_audience_conditions", return_value=[True, []] - ) as mock_audience_check, mock.patch( - "optimizely.bucketer.Bucketer.bucket", - return_value=[entities.Variation("111129", "variation"), []], - ) as mock_bucket, mock.patch( - "optimizely.user_profile.UserProfileService.lookup" - ) as mock_lookup, mock.patch( - "optimizely.user_profile.UserProfileService.save" - ) as mock_save: - variation, _ = self.decision_service.get_variation( - self.project_config, experiment, user, None - ) - self.assertEqual( - entities.Variation("111129", "variation"), - variation, - ) - # Assert that user is bucketed and new decision is not stored as user profile service is not available - mock_get_whitelisted_variation.assert_called_once_with( - self.project_config, experiment, "test_user" - ) - self.assertEqual(0, mock_lookup.call_count) - self.assertEqual(0, mock_get_stored_variation.call_count) + self.assertEqual(1, mock_get_stored_variation.call_count) mock_audience_check.assert_called_once_with( self.project_config, experiment.get_audience_conditions_or_ids(), @@ -716,7 +644,6 @@ def test_get_variation__user_bucketed_for_new_experiment__user_profile_service_n mock_bucket.assert_called_once_with( self.project_config, experiment, "test_user", "test_user" ) - self.assertEqual(0, mock_save.call_count) def test_get_variation__user_does_not_meet_audience_conditions(self): """ Test that get_variation returns None if user is not in experiment. """ @@ -725,6 +652,7 @@ def test_get_variation__user_does_not_meet_audience_conditions(self): logger=None, user_id="test_user", user_attributes={}) + user_profile_tracker = user_profile.UserProfileTracker(user.user_id, self.decision_service.user_profile_service) experiment = self.project_config.get_experiment_from_key("test_experiment") with mock.patch.object( self.decision_service, "logger" @@ -739,13 +667,10 @@ def test_get_variation__user_does_not_meet_audience_conditions(self): ) as mock_audience_check, mock.patch( "optimizely.bucketer.Bucketer.bucket" ) as mock_bucket, mock.patch( - "optimizely.user_profile.UserProfileService.lookup", - return_value={"user_id": "test_user", "experiment_bucket_map": {}}, - ) as mock_lookup, mock.patch( "optimizely.user_profile.UserProfileService.save" ) as mock_save: variation, _ = self.decision_service.get_variation( - self.project_config, experiment, user, None + self.project_config, experiment, user, user_profile_tracker ) self.assertIsNone( variation @@ -755,9 +680,8 @@ def test_get_variation__user_does_not_meet_audience_conditions(self): mock_get_whitelisted_variation.assert_called_once_with( self.project_config, experiment, "test_user" ) - mock_lookup.assert_called_once_with("test_user") mock_get_stored_variation.assert_called_once_with( - self.project_config, experiment, user_profile.UserProfile("test_user") + self.project_config, experiment, user_profile_tracker.get_user_profile() ) mock_audience_check.assert_called_once_with( self.project_config, @@ -770,192 +694,6 @@ def test_get_variation__user_does_not_meet_audience_conditions(self): self.assertEqual(0, mock_bucket.call_count) self.assertEqual(0, mock_save.call_count) - def test_get_variation__user_profile_in_invalid_format(self): - """ Test that get_variation handles invalid user profile gracefully. """ - - user = optimizely_user_context.OptimizelyUserContext(optimizely_client=None, - logger=None, - user_id="test_user", - user_attributes={}) - experiment = self.project_config.get_experiment_from_key("test_experiment") - with mock.patch.object( - self.decision_service, "logger" - ) as mock_decision_service_logging, mock.patch( - "optimizely.decision_service.DecisionService.get_whitelisted_variation", - return_value=[None, []], - ) as mock_get_whitelisted_variation, mock.patch( - "optimizely.decision_service.DecisionService.get_stored_variation" - ) as mock_get_stored_variation, mock.patch( - "optimizely.helpers.audience.does_user_meet_audience_conditions", return_value=[True, []] - ) as mock_audience_check, mock.patch( - "optimizely.bucketer.Bucketer.bucket", - return_value=[entities.Variation("111129", "variation"), []], - ) as mock_bucket, mock.patch( - "optimizely.user_profile.UserProfileService.lookup", - return_value="invalid_profile", - ) as mock_lookup, mock.patch( - "optimizely.user_profile.UserProfileService.save" - ) as mock_save: - variation, _ = self.decision_service.get_variation( - self.project_config, experiment, user, None - ) - self.assertEqual( - entities.Variation("111129", "variation"), - variation, - ) - - # Assert that user is bucketed and new decision is stored - mock_get_whitelisted_variation.assert_called_once_with( - self.project_config, experiment, "test_user" - ) - mock_lookup.assert_called_once_with("test_user") - # Stored decision is not consulted as user profile is invalid - self.assertEqual(0, mock_get_stored_variation.call_count) - mock_audience_check.assert_called_once_with( - self.project_config, - experiment.get_audience_conditions_or_ids(), - enums.ExperimentAudienceEvaluationLogs, - "test_experiment", - user, - mock_decision_service_logging - ) - mock_decision_service_logging.warning.assert_called_once_with( - "User profile has invalid format." - ) - mock_bucket.assert_called_once_with( - self.project_config, experiment, "test_user", "test_user" - ) - mock_save.assert_called_once_with( - { - "user_id": "test_user", - "experiment_bucket_map": {"111127": {"variation_id": "111129"}}, - } - ) - - def test_get_variation__user_profile_lookup_fails(self): - """ Test that get_variation acts gracefully when lookup fails. """ - - user = optimizely_user_context.OptimizelyUserContext(optimizely_client=None, - logger=None, - user_id="test_user", - user_attributes={}) - experiment = self.project_config.get_experiment_from_key("test_experiment") - with mock.patch.object( - self.decision_service, "logger" - ) as mock_decision_service_logging, mock.patch( - "optimizely.decision_service.DecisionService.get_whitelisted_variation", - return_value=[None, []], - ) as mock_get_whitelisted_variation, mock.patch( - "optimizely.decision_service.DecisionService.get_stored_variation" - ) as mock_get_stored_variation, mock.patch( - "optimizely.helpers.audience.does_user_meet_audience_conditions", return_value=[True, []] - ) as mock_audience_check, mock.patch( - "optimizely.bucketer.Bucketer.bucket", - return_value=[entities.Variation("111129", "variation"), []], - ) as mock_bucket, mock.patch( - "optimizely.user_profile.UserProfileService.lookup", - side_effect=Exception("major problem"), - ) as mock_lookup, mock.patch( - "optimizely.user_profile.UserProfileService.save" - ) as mock_save: - variation, _ = self.decision_service.get_variation( - self.project_config, experiment, user, None - ) - self.assertEqual( - entities.Variation("111129", "variation"), - variation, - ) - - # Assert that user is bucketed and new decision is stored - mock_get_whitelisted_variation.assert_called_once_with( - self.project_config, experiment, "test_user" - ) - mock_lookup.assert_called_once_with("test_user") - # Stored decision is not consulted as lookup failed - self.assertEqual(0, mock_get_stored_variation.call_count) - mock_audience_check.assert_called_once_with( - self.project_config, - experiment.get_audience_conditions_or_ids(), - enums.ExperimentAudienceEvaluationLogs, - "test_experiment", - user, - mock_decision_service_logging - ) - mock_decision_service_logging.exception.assert_called_once_with( - 'Unable to retrieve user profile for user "test_user" as lookup failed.' - ) - mock_bucket.assert_called_once_with( - self.project_config, experiment, "test_user", "test_user" - ) - mock_save.assert_called_once_with( - { - "user_id": "test_user", - "experiment_bucket_map": {"111127": {"variation_id": "111129"}}, - } - ) - - def test_get_variation__user_profile_save_fails(self): - """ Test that get_variation acts gracefully when save fails. """ - - user = optimizely_user_context.OptimizelyUserContext(optimizely_client=None, - logger=None, - user_id="test_user", - user_attributes={}) - experiment = self.project_config.get_experiment_from_key("test_experiment") - with mock.patch.object( - self.decision_service, "logger" - ) as mock_decision_service_logging, mock.patch( - "optimizely.decision_service.DecisionService.get_whitelisted_variation", - return_value=[None, []], - ) as mock_get_whitelisted_variation, mock.patch( - "optimizely.decision_service.DecisionService.get_stored_variation" - ) as mock_get_stored_variation, mock.patch( - "optimizely.helpers.audience.does_user_meet_audience_conditions", return_value=[True, []] - ) as mock_audience_check, mock.patch( - "optimizely.bucketer.Bucketer.bucket", - return_value=[entities.Variation("111129", "variation"), []], - ) as mock_bucket, mock.patch( - "optimizely.user_profile.UserProfileService.lookup", return_value=None - ) as mock_lookup, mock.patch( - "optimizely.user_profile.UserProfileService.save", - side_effect=Exception("major problem"), - ) as mock_save: - variation, _ = self.decision_service.get_variation( - self.project_config, experiment, user, None - ) - self.assertEqual( - entities.Variation("111129", "variation"), - variation, - ) - - # Assert that user is bucketed and new decision is stored - mock_get_whitelisted_variation.assert_called_once_with( - self.project_config, experiment, "test_user" - ) - mock_lookup.assert_called_once_with("test_user") - self.assertEqual(0, mock_get_stored_variation.call_count) - mock_audience_check.assert_called_once_with( - self.project_config, - experiment.get_audience_conditions_or_ids(), - enums.ExperimentAudienceEvaluationLogs, - "test_experiment", - user, - mock_decision_service_logging - ) - - mock_decision_service_logging.exception.assert_called_once_with( - 'Unable to save user profile for user "test_user".' - ) - mock_bucket.assert_called_once_with( - self.project_config, experiment, "test_user", "test_user" - ) - mock_save.assert_called_once_with( - { - "user_id": "test_user", - "experiment_bucket_map": {"111127": {"variation_id": "111129"}}, - } - ) - def test_get_variation__ignore_user_profile_when_specified(self): """ Test that we ignore the user profile service if specified. """ @@ -963,6 +701,8 @@ def test_get_variation__ignore_user_profile_when_specified(self): logger=None, user_id="test_user", user_attributes={}) + user_profile_service = user_profile.UserProfileService() + user_profile_tracker = user_profile.UserProfileTracker(user.user_id, user_profile_service) experiment = self.project_config.get_experiment_from_key("test_experiment") with mock.patch.object( self.decision_service, "logger" @@ -983,6 +723,8 @@ def test_get_variation__ignore_user_profile_when_specified(self): self.project_config, experiment, user, + user_profile_tracker, + [], options=['IGNORE_USER_PROFILE_SERVICE'], ) self.assertEqual( @@ -1290,6 +1032,8 @@ def test_get_variation_for_feature__returns_variation_for_feature_in_experiment( self.project_config, self.project_config.get_experiment_from_key("test_experiment"), user, + None, + [], None ) @@ -1417,6 +1161,8 @@ def test_get_variation_for_feature__returns_variation_for_feature_in_group(self) self.project_config, self.project_config.get_experiment_from_key("group_exp_1"), user, + None, + [], None ) @@ -1445,6 +1191,8 @@ def test_get_variation_for_feature__returns_none_for_user_not_in_experiment(self self.project_config, self.project_config.get_experiment_from_key("test_experiment"), user, + None, + [], None ) @@ -1472,7 +1220,7 @@ def test_get_variation_for_feature__returns_none_for_user_in_group_experiment_no ) mock_decision.assert_called_once_with( - self.project_config, self.project_config.get_experiment_from_id("32222"), user, False + self.project_config, self.project_config.get_experiment_from_id("32222"), user, None, [], False ) def test_get_variation_for_feature__returns_variation_for_feature_in_mutex_group_bucket_less_than_2500( @@ -1560,6 +1308,7 @@ def test_get_variation_for_feature__returns_variation_for_feature_in_mutex_group with mock.patch( 'optimizely.bucketer.Bucketer._generate_bucket_value', return_value=6500) as mock_generate_bucket_value, \ mock.patch.object(self.project_config, 'logger') as mock_config_logging: + variation_received, _ = self.decision_service.get_variation_for_feature( self.project_config, feature, user ) @@ -1789,6 +1538,13 @@ def test_get_variation_for_feature_returns_rollout_in_experiment_bucket_range_25 variation_received, _ = self.decision_service.get_variation_for_feature( self.project_config, feature, user ) + print(f"variation received is: {variation_received}") + x = decision_service.Decision( + expected_experiment, + expected_variation, + enums.DecisionSources.ROLLOUT, + ) + print(f"need to be:{x}") self.assertEqual( decision_service.Decision( expected_experiment, @@ -1797,6 +1553,7 @@ def test_get_variation_for_feature_returns_rollout_in_experiment_bucket_range_25 ), variation_received, ) + mock_config_logging.debug.assert_called_with( 'Assigned bucket 4000 to user with bucketing ID "test_user".') mock_generate_bucket_value.assert_called_with("test_user211147") diff --git a/tests/test_optimizely.py b/tests/test_optimizely.py index f1d1db89..8d36b830 100644 --- a/tests/test_optimizely.py +++ b/tests/test_optimizely.py @@ -369,9 +369,11 @@ def test_activate(self): log_event = EventFactory.create_log_event(mock_process.call_args[0][0], self.optimizely.logger) user_context = mock_decision.call_args[0][2] + user_profile_tracker = mock_decision.call_args[0][3] mock_decision.assert_called_once_with( - self.project_config, self.project_config.get_experiment_from_key('test_experiment'), user_context + self.project_config, self.project_config.get_experiment_from_key('test_experiment'), + user_context, user_profile_tracker ) self.assertEqual(1, mock_process.call_count) @@ -766,11 +768,13 @@ def test_activate__with_attributes__audience_match(self): log_event = EventFactory.create_log_event(mock_process.call_args[0][0], self.optimizely.logger) user_context = mock_get_variation.call_args[0][2] + user_profile_tracker = mock_get_variation.call_args[0][3] mock_get_variation.assert_called_once_with( self.project_config, self.project_config.get_experiment_from_key('test_experiment'), - user_context + user_context, + user_profile_tracker ) self.assertEqual(1, mock_process.call_count) self._validate_event_object( @@ -1120,11 +1124,12 @@ def test_activate__with_attributes__audience_match__bucketing_id_provided(self): log_event = EventFactory.create_log_event(mock_process.call_args[0][0], self.optimizely.logger) user_context = mock_get_variation.call_args[0][2] - + user_profile_tracker = mock_get_variation.call_args[0][3] mock_get_variation.assert_called_once_with( self.project_config, self.project_config.get_experiment_from_key('test_experiment'), - user_context + user_context, + user_profile_tracker ) self.assertEqual(1, mock_process.call_count) self._validate_event_object( @@ -1814,6 +1819,35 @@ def test_get_variation(self): {'experiment_key': 'test_experiment', 'variation_key': variation}, ) + def test_get_variation_lookup_and_save_is_called(self): + """ Test that lookup is called, get_variation returns valid variation and then save is called""" + + with mock.patch( + 'optimizely.decision_service.DecisionService.get_variation', + return_value=(self.project_config.get_variation_from_id('test_experiment', '111129'), []), + ), mock.patch( + 'optimizely.notification_center.NotificationCenter.send_notifications' + ) as mock_broadcast, mock.patch( + 'optimizely.user_profile.UserProfileTracker.load_user_profile' + ) as mock_load_user_profile, mock.patch( + 'optimizely.user_profile.UserProfileTracker.save_user_profile' + ) as mock_save_user_profile: + variation = self.optimizely.get_variation('test_experiment', 'test_user') + self.assertEqual( + 'variation', variation, + ) + self.assertEqual(mock_load_user_profile.call_count, 1) + self.assertEqual(mock_save_user_profile.call_count, 1) + self.assertEqual(mock_broadcast.call_count, 1) + + mock_broadcast.assert_any_call( + enums.NotificationTypes.DECISION, + 'ab-test', + 'test_user', + {}, + {'experiment_key': 'test_experiment', 'variation_key': variation}, + ) + def test_get_variation_with_experiment_in_feature(self): """ Test that get_variation returns valid variation and broadcasts decision listener with type feature-test when get_variation returns feature experiment variation.""" diff --git a/tests/test_user_context.py b/tests/test_user_context.py index 48f08885..0c35e230 100644 --- a/tests/test_user_context.py +++ b/tests/test_user_context.py @@ -228,9 +228,17 @@ def test_decide__feature_test(self): mock_variation = project_config.get_variation_from_id('test_experiment', '111129') with mock.patch( - 'optimizely.decision_service.DecisionService.get_variation_for_feature', - return_value=(decision_service.Decision(mock_experiment, mock_variation, - enums.DecisionSources.FEATURE_TEST), []), + 'optimizely.decision_service.DecisionService.get_variations_for_feature_list', + return_value=[ + ( + decision_service.Decision( + mock_experiment, + mock_variation, + enums.DecisionSources.FEATURE_TEST + ), + [] + ) + ] ), mock.patch( 'optimizely.notification_center.NotificationCenter.send_notifications' ) as mock_broadcast_decision, mock.patch( @@ -303,9 +311,17 @@ def test_decide__feature_test__send_flag_decision_false(self): mock_variation = project_config.get_variation_from_id('test_experiment', '111129') with mock.patch( - 'optimizely.decision_service.DecisionService.get_variation_for_feature', - return_value=(decision_service.Decision(mock_experiment, mock_variation, - enums.DecisionSources.FEATURE_TEST), []), + 'optimizely.decision_service.DecisionService.get_variations_for_feature_list', + return_value=[ + ( + decision_service.Decision( + mock_experiment, + mock_variation, + enums.DecisionSources.FEATURE_TEST + ), + [] + ) + ] ), mock.patch( 'optimizely.notification_center.NotificationCenter.send_notifications' ) as mock_broadcast_decision, mock.patch( @@ -478,9 +494,17 @@ def test_decide_feature_null_variation(self): mock_variation = None with mock.patch( - 'optimizely.decision_service.DecisionService.get_variation_for_feature', - return_value=(decision_service.Decision(mock_experiment, mock_variation, - enums.DecisionSources.ROLLOUT), []), + 'optimizely.decision_service.DecisionService.get_variations_for_feature_list', + return_value=[ + ( + decision_service.Decision( + mock_experiment, + mock_variation, + enums.DecisionSources.ROLLOUT + ), + [] + ) + ] ), mock.patch( 'optimizely.notification_center.NotificationCenter.send_notifications' ) as mock_broadcast_decision, mock.patch( @@ -553,9 +577,17 @@ def test_decide_feature_null_variation__send_flag_decision_false(self): mock_variation = None with mock.patch( - 'optimizely.decision_service.DecisionService.get_variation_for_feature', - return_value=(decision_service.Decision(mock_experiment, mock_variation, - enums.DecisionSources.ROLLOUT), []), + 'optimizely.decision_service.DecisionService.get_variations_for_feature_list', + return_value=[ + ( + decision_service.Decision( + mock_experiment, + mock_variation, + enums.DecisionSources.ROLLOUT + ), + [] + ) + ] ), mock.patch( 'optimizely.notification_center.NotificationCenter.send_notifications' ) as mock_broadcast_decision, mock.patch( @@ -614,9 +646,17 @@ def test_decide__option__disable_decision_event(self): mock_variation = project_config.get_variation_from_id('test_experiment', '111129') with mock.patch( - 'optimizely.decision_service.DecisionService.get_variation_for_feature', - return_value=(decision_service.Decision(mock_experiment, mock_variation, - enums.DecisionSources.FEATURE_TEST), []), + 'optimizely.decision_service.DecisionService.get_variations_for_feature_list', + return_value=[ + ( + decision_service.Decision( + mock_experiment, + mock_variation, + enums.DecisionSources.FEATURE_TEST + ), + [] + ) + ] ), mock.patch( 'optimizely.notification_center.NotificationCenter.send_notifications' ) as mock_broadcast_decision, mock.patch( @@ -678,9 +718,17 @@ def test_decide__default_option__disable_decision_event(self): mock_variation = project_config.get_variation_from_id('test_experiment', '111129') with mock.patch( - 'optimizely.decision_service.DecisionService.get_variation_for_feature', - return_value=(decision_service.Decision(mock_experiment, mock_variation, - enums.DecisionSources.FEATURE_TEST), []), + 'optimizely.decision_service.DecisionService.get_variations_for_feature_list', + return_value=[ + ( + decision_service.Decision( + mock_experiment, + mock_variation, + enums.DecisionSources.FEATURE_TEST + ), + [] + ) + ] ), mock.patch( 'optimizely.notification_center.NotificationCenter.send_notifications' ) as mock_broadcast_decision, mock.patch( @@ -739,9 +787,17 @@ def test_decide__option__exclude_variables(self): mock_variation = project_config.get_variation_from_id('test_experiment', '111129') with mock.patch( - 'optimizely.decision_service.DecisionService.get_variation_for_feature', - return_value=(decision_service.Decision(mock_experiment, mock_variation, - enums.DecisionSources.FEATURE_TEST), []), + 'optimizely.decision_service.DecisionService.get_variations_for_feature_list', + return_value=[ + ( + decision_service.Decision( + mock_experiment, + mock_variation, + enums.DecisionSources.FEATURE_TEST + ), + [] + ) + ] ), mock.patch( 'optimizely.notification_center.NotificationCenter.send_notifications' ) as mock_broadcast_decision, mock.patch( @@ -835,9 +891,17 @@ def test_decide__option__enabled_flags_only(self): expected_var = project_config.get_variation_from_key('211127', '211229') with mock.patch( - 'optimizely.decision_service.DecisionService.get_variation_for_feature', - return_value=(decision_service.Decision(expected_experiment, expected_var, - enums.DecisionSources.ROLLOUT), []), + 'optimizely.decision_service.DecisionService.get_variations_for_feature_list', + return_value=[ + ( + decision_service.Decision( + expected_experiment, + expected_var, + enums.DecisionSources.ROLLOUT + ), + [] + ) + ] ), mock.patch( 'optimizely.notification_center.NotificationCenter.send_notifications' ) as mock_broadcast_decision, mock.patch( @@ -914,9 +978,17 @@ def test_decide__default_options__with__options(self): mock_variation = project_config.get_variation_from_id('test_experiment', '111129') with mock.patch( - 'optimizely.decision_service.DecisionService.get_variation_for_feature', - return_value=(decision_service.Decision(mock_experiment, mock_variation, - enums.DecisionSources.FEATURE_TEST), []), + 'optimizely.decision_service.DecisionService.get_variations_for_feature_list', + return_value=[ + ( + decision_service.Decision( + mock_experiment, + mock_variation, + enums.DecisionSources.FEATURE_TEST + ), + [] + ) + ] ), mock.patch( 'optimizely.notification_center.NotificationCenter.send_notifications' ) as mock_broadcast_decision, mock.patch( @@ -968,14 +1040,17 @@ def test_decide_for_keys(self): mocked_decision_2 = OptimizelyDecision(flag_key='test_feature_in_rollout', enabled=False) def side_effect(*args, **kwargs): - flag = args[1] - if flag == 'test_feature_in_experiment': - return mocked_decision_1 - else: - return mocked_decision_2 + flags = args[1] + res = {} + for flag in flags: + if flag == 'test_feature_in_experiment': + res[flag] = mocked_decision_1 + else: + res[flag] = mocked_decision_2 + return res with mock.patch( - 'optimizely.optimizely.Optimizely._decide', side_effect=side_effect + 'optimizely.optimizely.Optimizely._decide_for_keys', side_effect=side_effect ) as mock_decide, mock.patch( 'optimizely.optimizely_user_context.OptimizelyUserContext._clone', return_value=user_context @@ -984,18 +1059,10 @@ def side_effect(*args, **kwargs): flags = ['test_feature_in_rollout', 'test_feature_in_experiment'] options = [] decisions = user_context.decide_for_keys(flags, options) - self.assertEqual(2, len(decisions)) - - mock_decide.assert_any_call( - user_context, - 'test_feature_in_experiment', - options - ) - mock_decide.assert_any_call( user_context, - 'test_feature_in_rollout', + ['test_feature_in_rollout', 'test_feature_in_experiment'], options ) @@ -1011,14 +1078,17 @@ def test_decide_for_keys__option__enabled_flags_only(self): mocked_decision_2 = OptimizelyDecision(flag_key='test_feature_in_rollout', enabled=False) def side_effect(*args, **kwargs): - flag = args[1] - if flag == 'test_feature_in_experiment': - return mocked_decision_1 - else: - return mocked_decision_2 + flags = args[1] + res = {} + for flag in flags: + if flag == 'test_feature_in_experiment': + res[flag] = mocked_decision_1 + else: + res[flag] = mocked_decision_2 + return res with mock.patch( - 'optimizely.optimizely.Optimizely._decide', side_effect=side_effect + 'optimizely.optimizely.Optimizely._decide_for_keys', side_effect=side_effect ) as mock_decide, mock.patch( 'optimizely.optimizely_user_context.OptimizelyUserContext._clone', return_value=user_context @@ -1028,20 +1098,13 @@ def side_effect(*args, **kwargs): options = ['ENABLED_FLAGS_ONLY'] decisions = user_context.decide_for_keys(flags, options) - self.assertEqual(1, len(decisions)) - - mock_decide.assert_any_call( - user_context, - 'test_feature_in_experiment', - options - ) + self.assertEqual(2, len(decisions)) mock_decide.assert_any_call( user_context, - 'test_feature_in_rollout', + ['test_feature_in_rollout', 'test_feature_in_experiment'], options ) - self.assertEqual(mocked_decision_1, decisions['test_feature_in_experiment']) def test_decide_for_keys__default_options__with__options(self): @@ -1053,20 +1116,29 @@ def test_decide_for_keys__default_options__with__options(self): user_context = opt_obj.create_user_context('test_user') with mock.patch( - 'optimizely.optimizely.Optimizely._decide' - ) as mock_decide, mock.patch( + 'optimizely.decision_service.DecisionService.get_variations_for_feature_list' + ) as mock_get_variations, mock.patch( 'optimizely.optimizely_user_context.OptimizelyUserContext._clone', return_value=user_context ): flags = ['test_feature_in_experiment'] options = ['EXCLUDE_VARIABLES'] + + mock_decision = mock.MagicMock() + mock_decision.experiment = mock.MagicMock(key='test_experiment') + mock_decision.variation = mock.MagicMock(key='variation') + mock_decision.source = enums.DecisionSources.FEATURE_TEST + + mock_get_variations.return_value = [(mock_decision, [])] + user_context.decide_for_keys(flags, options) - mock_decide.assert_called_with( - user_context, - 'test_feature_in_experiment', - ['EXCLUDE_VARIABLES'] + mock_get_variations.assert_called_with( + mock.ANY, # ProjectConfig + mock.ANY, # FeatureFlag list + user_context, # UserContext object + ['EXCLUDE_VARIABLES', 'ENABLED_FLAGS_ONLY'] ) def test_decide_for_all(self): @@ -1323,9 +1395,17 @@ def test_decide_experiment(self): mock_experiment = project_config.get_experiment_from_key('test_experiment') mock_variation = project_config.get_variation_from_id('test_experiment', '111129') with mock.patch( - 'optimizely.decision_service.DecisionService.get_variation_for_feature', - return_value=(decision_service.Decision(mock_experiment, - mock_variation, enums.DecisionSources.FEATURE_TEST), []), + 'optimizely.decision_service.DecisionService.get_variations_for_feature_list', + return_value=[ + ( + decision_service.Decision( + mock_experiment, + mock_variation, + enums.DecisionSources.FEATURE_TEST + ), + [] + ), + ] ): user_context = opt_obj.create_user_context('test_user') decision = user_context.decide('test_feature_in_experiment', [DecideOption.DISABLE_DECISION_EVENT]) @@ -1631,6 +1711,8 @@ def test_should_return_valid_decision_after_setting_invalid_experiment_rule_vari self.assertEqual(decide_decision.user_context.get_user_attributes(), {}) expected_reasons = [ + 'Invalid variation is mapped to flag (test_feature_in_experiment), rule (test_experiment) ' + 'and user (test_user) in the forced decision map.', 'Invalid variation is mapped to flag (test_feature_in_experiment), rule (test_experiment) ' 'and user (test_user) in the forced decision map.', 'Evaluating audiences for experiment "test_experiment": [].', diff --git a/tests/test_user_profile.py b/tests/test_user_profile.py index ffeb3e34..84aacd05 100644 --- a/tests/test_user_profile.py +++ b/tests/test_user_profile.py @@ -14,6 +14,7 @@ import unittest from optimizely import user_profile +from unittest import mock class UserProfileTest(unittest.TestCase): @@ -63,3 +64,76 @@ def test_save(self): user_profile_service = user_profile.UserProfileService() self.assertIsNone(user_profile_service.save({'user_id': 'test_user', 'experiment_bucket_map': {}})) + + +class UserProfileTrackerTest(unittest.TestCase): + def test_load_user_profile_failure(self): + """Test that load_user_profile handles exceptions gracefully.""" + mock_user_profile_service = mock.MagicMock() + mock_logger = mock.MagicMock() + + user_profile_tracker = user_profile.UserProfileTracker( + user_id="test_user", + user_profile_service=mock_user_profile_service, + logger=mock_logger + ) + mock_user_profile_service.lookup.side_effect = Exception("Lookup failure") + + user_profile_tracker.load_user_profile() + + # Verify that the logger recorded the exception + mock_logger.exception.assert_called_once_with( + 'Unable to retrieve user profile for user "test_user" as lookup failed.' + ) + + # Verify that the user profile is reset to an empty profile + self.assertEqual(user_profile_tracker.user_profile.user_id, "test_user") + self.assertEqual(user_profile_tracker.user_profile.experiment_bucket_map, {}) + + def test_load_user_profile__user_profile_invalid(self): + """Test that load_user_profile handles an invalid user profile format.""" + mock_user_profile_service = mock.MagicMock() + mock_logger = mock.MagicMock() + + user_profile_tracker = user_profile.UserProfileTracker( + user_id="test_user", + user_profile_service=mock_user_profile_service, + logger=mock_logger + ) + + mock_user_profile_service.lookup.return_value = {"invalid_key": "value"} + + reasons = [] + user_profile_tracker.load_user_profile(reasons=reasons) + + # Verify that the logger recorded a warning for the missing keys + missing_keys_message = "User profile is missing keys: user_id, experiment_bucket_map" + self.assertIn(missing_keys_message, reasons) + + # Ensure the logger logs the invalid format + mock_logger.info.assert_not_called() + self.assertEqual(user_profile_tracker.user_profile.user_id, "test_user") + self.assertEqual(user_profile_tracker.user_profile.experiment_bucket_map, {}) + + # Verify the reasons list was updated + self.assertIn(missing_keys_message, reasons) + + def test_save_user_profile_failure(self): + """Test that save_user_profile handles exceptions gracefully.""" + mock_user_profile_service = mock.MagicMock() + mock_logger = mock.MagicMock() + + user_profile_tracker = user_profile.UserProfileTracker( + user_id="test_user", + user_profile_service=mock_user_profile_service, + logger=mock_logger + ) + + user_profile_tracker.profile_updated = True + mock_user_profile_service.save.side_effect = Exception("Save failure") + + user_profile_tracker.save_user_profile() + + mock_logger.warning.assert_called_once_with( + 'Failed to save user profile of user "test_user" for exception:Save failure".' + ) From 7fa6153d898687bc616e8f1d6920106f75ca19d0 Mon Sep 17 00:00:00 2001 From: Farhan Anjum Date: Fri, 29 Nov 2024 10:58:15 +0600 Subject: [PATCH 57/68] CHANGELOG.md -> Added section for version 5.1.0 version.py -> Updated version to 5.1.0 (#441) --- CHANGELOG.md | 5 +++++ optimizely/version.py | 2 +- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3db4a7f9..7f3bc3cb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,10 @@ # Optimizely Python SDK Changelog +## 5.1.0 +November 27th, 2024 + +Added support for batch processing in DecideAll and DecideForKeys, enabling more efficient handling of multiple decisions in the User Profile Service.([#440](https://github.com/optimizely/python-sdk/pull/440)) + ## 5.0.1 June 26th, 2024 diff --git a/optimizely/version.py b/optimizely/version.py index da021f94..941e5e68 100644 --- a/optimizely/version.py +++ b/optimizely/version.py @@ -11,5 +11,5 @@ # See the License for the specific language governing permissions and # limitations under the License. -version_info = (5, 0, 1) +version_info = (5, 1, 0) __version__ = '.'.join(str(v) for v in version_info) From 45e73bb97fc87fc884fc6e05ab7f17998e4486f5 Mon Sep 17 00:00:00 2001 From: Farhan Anjum Date: Thu, 12 Dec 2024 00:06:01 +0600 Subject: [PATCH 58/68] All threads have been named (#443) --- .gitignore | 2 ++ optimizely/config_manager.py | 2 +- optimizely/event/event_processor.py | 3 +-- optimizely/odp/odp_event_manager.py | 2 +- optimizely/optimizely_user_context.py | 2 +- 5 files changed, 6 insertions(+), 5 deletions(-) diff --git a/.gitignore b/.gitignore index cff402c4..00ad86a4 100644 --- a/.gitignore +++ b/.gitignore @@ -3,6 +3,7 @@ MANIFEST .idea/* .*virtualenv/* .mypy_cache +.vscode/* # Output of building package *.egg-info @@ -26,3 +27,4 @@ datafile.json # Sphinx documentation docs/build/ + diff --git a/optimizely/config_manager.py b/optimizely/config_manager.py index 755c6b9c..c959914e 100644 --- a/optimizely/config_manager.py +++ b/optimizely/config_manager.py @@ -432,7 +432,7 @@ def start(self) -> None: self._polling_thread.start() def _initialize_thread(self) -> None: - self._polling_thread = threading.Thread(target=self._run, daemon=True) + self._polling_thread = threading.Thread(target=self._run, name="PollThread", daemon=True) class AuthDatafilePollingConfigManager(PollingConfigManager): diff --git a/optimizely/event/event_processor.py b/optimizely/event/event_processor.py index 9445ffc6..05f5e078 100644 --- a/optimizely/event/event_processor.py +++ b/optimizely/event/event_processor.py @@ -186,8 +186,7 @@ def start(self) -> None: return self.flushing_interval_deadline = self._get_time() + self._get_time(self.flush_interval.total_seconds()) - self.executor = threading.Thread(target=self._run) - self.executor.daemon = True + self.executor = threading.Thread(target=self._run, name="EventThread", daemon=True) self.executor.start() def _run(self) -> None: diff --git a/optimizely/odp/odp_event_manager.py b/optimizely/odp/odp_event_manager.py index 18b08eb0..85512e90 100644 --- a/optimizely/odp/odp_event_manager.py +++ b/optimizely/odp/odp_event_manager.py @@ -75,7 +75,7 @@ def __init__( self.retry_count = OdpEventManagerConfig.DEFAULT_RETRY_COUNT self._current_batch: list[OdpEvent] = [] """_current_batch should only be modified by the processing thread, as it is not thread safe""" - self.thread = Thread(target=self._run, daemon=True) + self.thread = Thread(target=self._run, name="OdpThread", daemon=True) self.thread_exception = False """thread_exception will be True if the processing thread did not exit cleanly""" diff --git a/optimizely/optimizely_user_context.py b/optimizely/optimizely_user_context.py index fb674f93..e88c0f52 100644 --- a/optimizely/optimizely_user_context.py +++ b/optimizely/optimizely_user_context.py @@ -336,7 +336,7 @@ def _fetch_qualified_segments() -> bool: return success if callback: - fetch_thread = threading.Thread(target=_fetch_qualified_segments) + fetch_thread = threading.Thread(target=_fetch_qualified_segments, name="FetchQualifiedSegmentsThread") fetch_thread.start() return fetch_thread else: From d098f9ab45c6dece44419085e7fef0da3a27c590 Mon Sep 17 00:00:00 2001 From: Paul V Craven Date: Wed, 26 Feb 2025 12:21:35 -0600 Subject: [PATCH 59/68] [FSSDK-11212] Update code to retry web API calls for fetching datafile and pushing events (#445) * Update code to retry web API calls for fetching datafile and pushing events * Fix linting issues * Remove print statements * Fix up 'retries' member * Stub out requests.Session.get instead of requests.get * Update tests * Fix mypy error and linting error * Update for tests * Update * Update optimizely/event_dispatcher.py Co-authored-by: Jae Kim <45045038+jaeopt@users.noreply.github.com> * Update event dispatch to try three times to send events * Update changelog and version number * Update version number * Remove changelog and version update --------- Co-authored-by: Paul V Craven Co-authored-by: Jae Kim <45045038+jaeopt@users.noreply.github.com> --- optimizely/config_manager.py | 34 ++++++++++++++++++---- optimizely/event_dispatcher.py | 18 ++++++++++-- optimizely/helpers/enums.py | 1 + optimizely/helpers/validator.py | 5 ++-- tests/test_config_manager.py | 27 ++++++++--------- tests/test_event_dispatcher.py | 6 ++-- tests/test_notification_center_registry.py | 2 +- tests/test_optimizely.py | 6 ++-- tests/test_optimizely_factory.py | 10 +++---- 9 files changed, 73 insertions(+), 36 deletions(-) diff --git a/optimizely/config_manager.py b/optimizely/config_manager.py index c959914e..3dce2741 100644 --- a/optimizely/config_manager.py +++ b/optimizely/config_manager.py @@ -19,6 +19,8 @@ import threading from requests import codes as http_status_codes from requests import exceptions as requests_exceptions +from requests.adapters import HTTPAdapter +from urllib3.util.retry import Retry from . import exceptions as optimizely_exceptions from . import logger as optimizely_logger @@ -200,6 +202,7 @@ def __init__( error_handler: Optional[BaseErrorHandler] = None, notification_center: Optional[NotificationCenter] = None, skip_json_validation: Optional[bool] = False, + retries: Optional[int] = 3, ): """ Initialize config manager. One of sdk_key or datafile has to be set to be able to use. @@ -222,6 +225,7 @@ def __init__( JSON schema validation will be performed. """ + self.retries = retries self._config_ready_event = threading.Event() super().__init__( datafile=datafile, @@ -391,9 +395,18 @@ def fetch_datafile(self) -> None: request_headers[enums.HTTPHeaders.IF_MODIFIED_SINCE] = self.last_modified try: - response = requests.get( - self.datafile_url, headers=request_headers, timeout=enums.ConfigManager.REQUEST_TIMEOUT, - ) + session = requests.Session() + + retries = Retry(total=self.retries, + backoff_factor=0.1, + status_forcelist=[500, 502, 503, 504]) + adapter = HTTPAdapter(max_retries=retries) + + session.mount('http://', adapter) + session.mount("https://", adapter) + response = session.get(self.datafile_url, + headers=request_headers, + timeout=enums.ConfigManager.REQUEST_TIMEOUT) except requests_exceptions.RequestException as err: self.logger.error(f'Fetching datafile from {self.datafile_url} failed. Error: {err}') return @@ -475,9 +488,18 @@ def fetch_datafile(self) -> None: request_headers[enums.HTTPHeaders.IF_MODIFIED_SINCE] = self.last_modified try: - response = requests.get( - self.datafile_url, headers=request_headers, timeout=enums.ConfigManager.REQUEST_TIMEOUT, - ) + session = requests.Session() + + retries = Retry(total=self.retries, + backoff_factor=0.1, + status_forcelist=[500, 502, 503, 504]) + adapter = HTTPAdapter(max_retries=retries) + + session.mount('http://', adapter) + session.mount("https://", adapter) + response = session.get(self.datafile_url, + headers=request_headers, + timeout=enums.ConfigManager.REQUEST_TIMEOUT) except requests_exceptions.RequestException as err: self.logger.error(f'Fetching datafile from {self.datafile_url} failed. Error: {err}') return diff --git a/optimizely/event_dispatcher.py b/optimizely/event_dispatcher.py index e2ca54f0..767fbb7d 100644 --- a/optimizely/event_dispatcher.py +++ b/optimizely/event_dispatcher.py @@ -17,6 +17,8 @@ import requests from requests import exceptions as request_exception +from requests.adapters import HTTPAdapter +from urllib3.util.retry import Retry from . import event_builder from .helpers.enums import HTTPVerbs, EventDispatchConfig @@ -44,11 +46,21 @@ def dispatch_event(event: event_builder.Event) -> None: event: Object holding information about the request to be dispatched to the Optimizely backend. """ try: + session = requests.Session() + + retries = Retry(total=EventDispatchConfig.RETRIES, + backoff_factor=0.1, + status_forcelist=[500, 502, 503, 504]) + adapter = HTTPAdapter(max_retries=retries) + + session.mount('http://', adapter) + session.mount("https://", adapter) + if event.http_verb == HTTPVerbs.GET: - requests.get(event.url, params=event.params, - timeout=EventDispatchConfig.REQUEST_TIMEOUT).raise_for_status() + session.get(event.url, params=event.params, + timeout=EventDispatchConfig.REQUEST_TIMEOUT).raise_for_status() elif event.http_verb == HTTPVerbs.POST: - requests.post( + session.post( event.url, data=json.dumps(event.params), headers=event.headers, timeout=EventDispatchConfig.REQUEST_TIMEOUT, ).raise_for_status() diff --git a/optimizely/helpers/enums.py b/optimizely/helpers/enums.py index 1c7a8e1c..fe90946e 100644 --- a/optimizely/helpers/enums.py +++ b/optimizely/helpers/enums.py @@ -198,6 +198,7 @@ class VersionType: class EventDispatchConfig: """Event dispatching configs.""" REQUEST_TIMEOUT: Final = 10 + RETRIES: Final = 3 class OdpEventApiConfig: diff --git a/optimizely/helpers/validator.py b/optimizely/helpers/validator.py index 17cff87c..b9e4fcc5 100644 --- a/optimizely/helpers/validator.py +++ b/optimizely/helpers/validator.py @@ -276,8 +276,9 @@ def is_finite_number(value: Any) -> bool: if math.isnan(value) or math.isinf(value): return False - if abs(value) > (2 ** 53): - return False + if isinstance(value, (int, float)): + if abs(value) > (2 ** 53): + return False return True diff --git a/tests/test_config_manager.py b/tests/test_config_manager.py index 1c3fbe89..56674381 100644 --- a/tests/test_config_manager.py +++ b/tests/test_config_manager.py @@ -218,7 +218,7 @@ def test_get_config_blocks(self): self.assertEqual(1, round(end_time - start_time)) -@mock.patch('requests.get') +@mock.patch('requests.Session.get') class PollingConfigManagerTest(base.BaseTest): def test_init__no_sdk_key_no_datafile__fails(self, _): """ Test that initialization fails if there is no sdk_key or datafile provided. """ @@ -379,7 +379,7 @@ def test_fetch_datafile(self, _): test_response.status_code = 200 test_response.headers = test_headers test_response._content = test_datafile - with mock.patch('requests.get', return_value=test_response) as mock_request: + with mock.patch('requests.Session.get', return_value=test_response) as mock_request: project_config_manager = config_manager.PollingConfigManager(sdk_key=sdk_key) project_config_manager.stop() @@ -392,7 +392,7 @@ def test_fetch_datafile(self, _): self.assertIsInstance(project_config_manager.get_config(), project_config.ProjectConfig) # Call fetch_datafile again and assert that request to URL is with If-Modified-Since header. - with mock.patch('requests.get', return_value=test_response) as mock_requests: + with mock.patch('requests.Session.get', return_value=test_response) as mock_requests: project_config_manager._initialize_thread() project_config_manager.start() project_config_manager.stop() @@ -421,7 +421,7 @@ def raise_for_status(self): test_response.headers = test_headers test_response._content = test_datafile - with mock.patch('requests.get', return_value=test_response) as mock_request: + with mock.patch('requests.Session.get', return_value=test_response) as mock_request: project_config_manager = config_manager.PollingConfigManager(sdk_key=sdk_key, logger=mock_logger) project_config_manager.stop() @@ -434,7 +434,7 @@ def raise_for_status(self): self.assertIsInstance(project_config_manager.get_config(), project_config.ProjectConfig) # Call fetch_datafile again, but raise exception this time - with mock.patch('requests.get', return_value=MockExceptionResponse()) as mock_requests: + with mock.patch('requests.Session.get', return_value=MockExceptionResponse()) as mock_requests: project_config_manager._initialize_thread() project_config_manager.start() project_config_manager.stop() @@ -462,7 +462,7 @@ def test_fetch_datafile__request_exception_raised(self, _): test_response.status_code = 200 test_response.headers = test_headers test_response._content = test_datafile - with mock.patch('requests.get', return_value=test_response) as mock_request: + with mock.patch('requests.Session.get', return_value=test_response) as mock_request: project_config_manager = config_manager.PollingConfigManager(sdk_key=sdk_key, logger=mock_logger) project_config_manager.stop() @@ -476,7 +476,7 @@ def test_fetch_datafile__request_exception_raised(self, _): # Call fetch_datafile again, but raise exception this time with mock.patch( - 'requests.get', + 'requests.Session.get', side_effect=requests.exceptions.RequestException('Error Error !!'), ) as mock_requests: project_config_manager._initialize_thread() @@ -506,7 +506,7 @@ def test_fetch_datafile__exception_polling_thread_failed(self, _): test_response.headers = test_headers test_response._content = test_datafile - with mock.patch('requests.get', return_value=test_response): + with mock.patch('requests.Session.get', return_value=test_response): project_config_manager = config_manager.PollingConfigManager(sdk_key=sdk_key, logger=mock_logger, update_interval=12345678912345) @@ -516,8 +516,9 @@ def test_fetch_datafile__exception_polling_thread_failed(self, _): # verify the error log message log_messages = [args[0] for args, _ in mock_logger.error.call_args_list] for message in log_messages: + print(message) if "Thread for background datafile polling failed. " \ - "Error: timestamp too large to convert to C _PyTime_t" not in message: + "Error: timestamp too large to convert to C PyTime_t" not in message: assert False def test_is_running(self, _): @@ -529,7 +530,7 @@ def test_is_running(self, _): project_config_manager.stop() -@mock.patch('requests.get') +@mock.patch('requests.Session.get') class AuthDatafilePollingConfigManagerTest(base.BaseTest): def test_init__datafile_access_token_none__fails(self, _): """ Test that initialization fails if datafile_access_token is None. """ @@ -569,7 +570,7 @@ def test_fetch_datafile(self, _): test_response._content = test_datafile # Call fetch_datafile and assert that request was sent with correct authorization header - with mock.patch('requests.get', + with mock.patch('requests.Session.get', return_value=test_response) as mock_request: project_config_manager.fetch_datafile() @@ -596,7 +597,7 @@ def test_fetch_datafile__request_exception_raised(self, _): test_response._content = test_datafile # Call fetch_datafile and assert that request was sent with correct authorization header - with mock.patch('requests.get', return_value=test_response) as mock_request: + with mock.patch('requests.Session.get', return_value=test_response) as mock_request: project_config_manager = config_manager.AuthDatafilePollingConfigManager( datafile_access_token=datafile_access_token, sdk_key=sdk_key, @@ -614,7 +615,7 @@ def test_fetch_datafile__request_exception_raised(self, _): # Call fetch_datafile again, but raise exception this time with mock.patch( - 'requests.get', + 'requests.Session.get', side_effect=requests.exceptions.RequestException('Error Error !!'), ) as mock_requests: project_config_manager._initialize_thread() diff --git a/tests/test_event_dispatcher.py b/tests/test_event_dispatcher.py index 7e075f47..30311e35 100644 --- a/tests/test_event_dispatcher.py +++ b/tests/test_event_dispatcher.py @@ -29,7 +29,7 @@ def test_dispatch_event__get_request(self): params = {'a': '111001', 'n': 'test_event', 'g': '111028', 'u': 'oeutest_user'} event = event_builder.Event(url, params) - with mock.patch('requests.get') as mock_request_get: + with mock.patch('requests.Session.get') as mock_request_get: event_dispatcher.EventDispatcher.dispatch_event(event) mock_request_get.assert_called_once_with(url, params=params, timeout=EventDispatchConfig.REQUEST_TIMEOUT) @@ -46,7 +46,7 @@ def test_dispatch_event__post_request(self): } event = event_builder.Event(url, params, http_verb='POST', headers={'Content-Type': 'application/json'}) - with mock.patch('requests.post') as mock_request_post: + with mock.patch('requests.Session.post') as mock_request_post: event_dispatcher.EventDispatcher.dispatch_event(event) mock_request_post.assert_called_once_with( @@ -69,7 +69,7 @@ def test_dispatch_event__handle_request_exception(self): event = event_builder.Event(url, params, http_verb='POST', headers={'Content-Type': 'application/json'}) with mock.patch( - 'requests.post', side_effect=request_exception.RequestException('Failed Request'), + 'requests.Session.post', side_effect=request_exception.RequestException('Failed Request'), ) as mock_request_post, mock.patch('logging.error') as mock_log_error: event_dispatcher.EventDispatcher.dispatch_event(event) diff --git a/tests/test_notification_center_registry.py b/tests/test_notification_center_registry.py index 0f800cfd..81984059 100644 --- a/tests/test_notification_center_registry.py +++ b/tests/test_notification_center_registry.py @@ -60,7 +60,7 @@ def test_remove_notification_center(self): test_response = self.fake_server_response(status_code=200, content=test_datafile) notification_center = _NotificationCenterRegistry.get_notification_center(sdk_key, logger) - with mock.patch('requests.get', return_value=test_response), \ + with mock.patch('requests.Session.get', return_value=test_response), \ mock.patch.object(notification_center, 'send_notifications') as mock_send: client = Optimizely(sdk_key=sdk_key, logger=logger) diff --git a/tests/test_optimizely.py b/tests/test_optimizely.py index 8d36b830..1f4293cd 100644 --- a/tests/test_optimizely.py +++ b/tests/test_optimizely.py @@ -4696,7 +4696,7 @@ def delay(*args, **kwargs): time.sleep(.5) return mock.DEFAULT - with mock.patch('requests.get', return_value=test_response, side_effect=delay): + with mock.patch('requests.Session.get', return_value=test_response, side_effect=delay): # initialize config_manager with delay, so it will receive the datafile after client initialization custom_config_manager = config_manager.PollingConfigManager(sdk_key='segments-test', logger=logger) client = optimizely.Optimizely(config_manager=custom_config_manager) @@ -5428,7 +5428,7 @@ def test_send_odp_event__send_event_with_static_config_manager(self): def test_send_odp_event__send_event_with_polling_config_manager(self): mock_logger = mock.Mock() with mock.patch( - 'requests.get', + 'requests.Session.get', return_value=self.fake_server_response( status_code=200, content=json.dumps(self.config_dict_with_audience_segments) @@ -5467,7 +5467,7 @@ def test_send_odp_event__log_debug_if_datafile_not_ready(self): def test_send_odp_event__log_error_if_odp_not_enabled_with_polling_config_manager(self): mock_logger = mock.Mock() with mock.patch( - 'requests.get', + 'requests.Session.get', return_value=self.fake_server_response( status_code=200, content=json.dumps(self.config_dict_with_audience_segments) diff --git a/tests/test_optimizely_factory.py b/tests/test_optimizely_factory.py index be41755a..989d960c 100644 --- a/tests/test_optimizely_factory.py +++ b/tests/test_optimizely_factory.py @@ -26,7 +26,7 @@ from . import base -@mock.patch('requests.get') +@mock.patch('requests.Session.get') class OptimizelyFactoryTest(base.BaseTest): def delay(*args, **kwargs): time.sleep(.5) @@ -171,7 +171,7 @@ def test_set_batch_size_and_set_flush_interval___should_set_values_valid_or_inva self.assertEqual(optimizely_instance.event_processor.batch_size, 10) def test_update_odp_config_correctly(self, _): - with mock.patch('requests.get') as mock_request_post: + with mock.patch('requests.Session.get') as mock_request_post: mock_request_post.return_value = self.fake_server_response( status_code=200, content=json.dumps(self.config_dict_with_audience_segments) @@ -194,7 +194,7 @@ def test_update_odp_config_correctly_with_custom_config_manager_and_delay(self, test_datafile = json.dumps(self.config_dict_with_audience_segments) test_response = self.fake_server_response(status_code=200, content=test_datafile) - with mock.patch('requests.get', return_value=test_response, side_effect=self.delay): + with mock.patch('requests.Session.get', return_value=test_response, side_effect=self.delay): # initialize config_manager with delay, so it will receive the datafile after client initialization config_manager = PollingConfigManager(sdk_key='test', logger=logger) client = OptimizelyFactory.default_instance_with_config_manager(config_manager=config_manager) @@ -221,7 +221,7 @@ def test_update_odp_config_correctly_with_delay(self, _): test_datafile = json.dumps(self.config_dict_with_audience_segments) test_response = self.fake_server_response(status_code=200, content=test_datafile) - with mock.patch('requests.get', return_value=test_response, side_effect=self.delay): + with mock.patch('requests.Session.get', return_value=test_response, side_effect=self.delay): # initialize config_manager with delay, so it will receive the datafile after client initialization client = OptimizelyFactory.default_instance(sdk_key='test') odp_manager = client.odp_manager @@ -247,7 +247,7 @@ def test_odp_updated_with_custom_instance(self, _): test_datafile = json.dumps(self.config_dict_with_audience_segments) test_response = self.fake_server_response(status_code=200, content=test_datafile) - with mock.patch('requests.get', return_value=test_response, side_effect=self.delay): + with mock.patch('requests.Session.get', return_value=test_response, side_effect=self.delay): # initialize config_manager with delay, so it will receive the datafile after client initialization client = OptimizelyFactory.custom_instance(sdk_key='test') odp_manager = client.odp_manager From 55bc00832dd5a14a695c7960b9914f9664a2614c Mon Sep 17 00:00:00 2001 From: Paul V Craven Date: Wed, 26 Feb 2025 12:32:13 -0600 Subject: [PATCH 60/68] Add changelog and update version number (#446) Co-authored-by: Paul V Craven --- CHANGELOG.md | 7 +++++++ optimizely/version.py | 2 +- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7f3bc3cb..d0cd8b71 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,12 @@ # Optimizely Python SDK Changelog +## 5.2.0 +February 26, 2025 + +Python threads have been named. + +`PollingConfigManager` now has another optional parameter `retries` that will control how many times the SDK will attempt to get the datafile if the connection fails. Previously, the SDK would only try once. Now it defaults to maximum of three attempts. When sending event data, the SDK will attempt to send event data up to three times, where as before it would only attempt once. + ## 5.1.0 November 27th, 2024 diff --git a/optimizely/version.py b/optimizely/version.py index 941e5e68..4f0f20c6 100644 --- a/optimizely/version.py +++ b/optimizely/version.py @@ -11,5 +11,5 @@ # See the License for the specific language governing permissions and # limitations under the License. -version_info = (5, 1, 0) +version_info = (5, 2, 0) __version__ = '.'.join(str(v) for v in version_info) From 8062f542a17ada93e27ff39e04e849afc6b32502 Mon Sep 17 00:00:00 2001 From: Paul V Craven Date: Thu, 24 Apr 2025 09:32:02 -0500 Subject: [PATCH 61/68] [FSSDK-11362] Fix CSRF security warning (#448) * Fix CSRF security warning * Ignore linting error * Ignore flake8 warning --------- Co-authored-by: Paul V Craven --- tests/testapp/application.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/tests/testapp/application.py b/tests/testapp/application.py index 7b2a81ee..116efc66 100644 --- a/tests/testapp/application.py +++ b/tests/testapp/application.py @@ -16,15 +16,15 @@ import types from os import environ -from flask import Flask -from flask import request - import user_profile_service -from optimizely import logger -from optimizely import optimizely +from flask import CSRFProtect, Flask, request + +from optimizely import logger, optimizely from optimizely.helpers import enums app = Flask(__name__) +# Initialize CSRF protection +csrf = CSRFProtect(app) datafile = open('datafile.json', 'r') datafile_content = datafile.read() @@ -118,7 +118,7 @@ def before_request(): @app.after_request def after_request(response): - global optimizely_instance + global optimizely_instance # noqa: F824 global listener_return_maps optimizely_instance.notification_center.clear_all_notifications() From f8da2618c604d32bf0c7c4340139a371bed78171 Mon Sep 17 00:00:00 2001 From: Paul V Craven Date: Fri, 25 Apr 2025 14:43:09 -0500 Subject: [PATCH 62/68] Import CSRFProtect from a better spot so prisma picks it up (#450) Co-authored-by: Paul V Craven --- tests/testapp/application.py | 3 ++- tests/testapp/requirements.txt | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/tests/testapp/application.py b/tests/testapp/application.py index 116efc66..af5f5b33 100644 --- a/tests/testapp/application.py +++ b/tests/testapp/application.py @@ -17,7 +17,8 @@ from os import environ import user_profile_service -from flask import CSRFProtect, Flask, request +from flask import Flask, request +from flask_wtf.csrf import CSRFProtect from optimizely import logger, optimizely from optimizely.helpers import enums diff --git a/tests/testapp/requirements.txt b/tests/testapp/requirements.txt index 4b70123b..dae26c1f 100644 --- a/tests/testapp/requirements.txt +++ b/tests/testapp/requirements.txt @@ -1 +1,2 @@ -Flask==2.2.5 +Flask==3.1.0 +flask-wtf==1.2.2 \ No newline at end of file From 5f719225cbd79d67d34655f28c18a80cadeb5e2a Mon Sep 17 00:00:00 2001 From: Farhan Anjum Date: Mon, 5 May 2025 21:20:46 +0600 Subject: [PATCH 63/68] [FSSDK-11139] update: enable project config to track CMAB properties (#451) * Add CmabDict type and update Experiment class to include cmab field * Refactor ProjectConfig to add attribute ID to key mapping and implement retrieval methods; update test for cmab field population --- optimizely/entities.py | 4 +++- optimizely/helpers/types.py | 6 ++++++ optimizely/project_config.py | 32 +++++++++++++++++++++++++++++++- tests/test_config.py | 17 +++++++++++++++++ 4 files changed, 57 insertions(+), 2 deletions(-) diff --git a/optimizely/entities.py b/optimizely/entities.py index fed1a49a..7d257656 100644 --- a/optimizely/entities.py +++ b/optimizely/entities.py @@ -22,7 +22,7 @@ if TYPE_CHECKING: # prevent circular dependenacy by skipping import at runtime - from .helpers.types import ExperimentDict, TrafficAllocation, VariableDict, VariationDict + from .helpers.types import ExperimentDict, TrafficAllocation, VariableDict, VariationDict, CmabDict class BaseEntity: @@ -84,6 +84,7 @@ def __init__( audienceConditions: Optional[Sequence[str | list[str]]] = None, groupId: Optional[str] = None, groupPolicy: Optional[str] = None, + cmab: Optional[CmabDict] = None, **kwargs: Any ): self.id = id @@ -97,6 +98,7 @@ def __init__( self.layerId = layerId self.groupId = groupId self.groupPolicy = groupPolicy + self.cmab = cmab def get_audience_conditions_or_ids(self) -> Sequence[str | list[str]]: """ Returns audienceConditions if present, otherwise audienceIds. """ diff --git a/optimizely/helpers/types.py b/optimizely/helpers/types.py index a28aca67..3cca45de 100644 --- a/optimizely/helpers/types.py +++ b/optimizely/helpers/types.py @@ -109,3 +109,9 @@ class IntegrationDict(BaseEntity): key: str host: str publicKey: str + + +class CmabDict(BaseEntity): + """Cmab dict from parsed datafile json.""" + attributeIds: list[str] + trafficAllocation: int diff --git a/optimizely/project_config.py b/optimizely/project_config.py index adfeee41..f2b1467b 100644 --- a/optimizely/project_config.py +++ b/optimizely/project_config.py @@ -94,7 +94,9 @@ def __init__(self, datafile: str | bytes, logger: Logger, error_handler: Any): self.attribute_key_map: dict[str, entities.Attribute] = self._generate_key_map( self.attributes, 'key', entities.Attribute ) - + self.attribute_id_to_key_map: dict[str, str] = {} + for attribute in self.attributes: + self.attribute_id_to_key_map[attribute['id']] = attribute['key'] self.audience_id_map: dict[str, entities.Audience] = self._generate_key_map( self.audiences, 'id', entities.Audience ) @@ -510,6 +512,34 @@ def get_attribute_id(self, attribute_key: str) -> Optional[str]: self.error_handler.handle_error(exceptions.InvalidAttributeException(enums.Errors.INVALID_ATTRIBUTE)) return None + def get_attribute_by_key(self, key: str) -> Optional[entities.Attribute]: + """ Get attribute for the provided attribute key. + + Args: + key: Attribute key for which attribute is to be fetched. + + Returns: + Attribute corresponding to the provided attribute key. + """ + if key in self.attribute_key_map: + return self.attribute_key_map[key] + self.logger.error(f'Attribute with key:"{key}" is not in datafile.') + return None + + def get_attribute_key_by_id(self, id: str) -> Optional[str]: + """ Get attribute key for the provided attribute id. + + Args: + id: Attribute id for which attribute is to be fetched. + + Returns: + Attribute key corresponding to the provided attribute id. + """ + if id in self.attribute_id_to_key_map: + return self.attribute_id_to_key_map[id] + self.logger.error(f'Attribute with id:"{id}" is not in datafile.') + return None + def get_feature_from_key(self, feature_key: str) -> Optional[entities.FeatureFlag]: """ Get feature for the provided feature key. diff --git a/tests/test_config.py b/tests/test_config.py index 9a16035d..9ec5c761 100644 --- a/tests/test_config.py +++ b/tests/test_config.py @@ -154,6 +154,23 @@ def test_init(self): self.assertEqual(expected_variation_key_map, self.project_config.variation_key_map) self.assertEqual(expected_variation_id_map, self.project_config.variation_id_map) + def test_cmab_field_population(self): + """ Test that the cmab field is populated correctly in experiments.""" + + # Deep copy existing datafile and add cmab config to the first experiment + config_dict = copy.deepcopy(self.config_dict_with_multiple_experiments) + config_dict['experiments'][0]['cmab'] = {'attributeIds': ['808797688', '808797689'], 'trafficAllocation': 4000} + config_dict['experiments'][0]['trafficAllocation'] = [] + + opt_obj = optimizely.Optimizely(json.dumps(config_dict)) + project_config = opt_obj.config_manager.get_config() + + experiment = project_config.get_experiment_from_key('test_experiment') + self.assertEqual(experiment.cmab, {'attributeIds': ['808797688', '808797689'], 'trafficAllocation': 4000}) + + experiment_2 = project_config.get_experiment_from_key('test_experiment_2') + self.assertIsNone(experiment_2.cmab) + def test_init__with_v4_datafile(self): """ Test that on creating object, properties are initiated correctly for version 4 datafile. """ From fd0930c9edaae8bb58a8cf1f04448ce3564a4bd2 Mon Sep 17 00:00:00 2001 From: Paul V Craven Date: Wed, 7 May 2025 08:20:36 -0500 Subject: [PATCH 64/68] Try 3 to fix csrf scan issue (#452) Co-authored-by: Paul V Craven --- tests/testapp/application.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/tests/testapp/application.py b/tests/testapp/application.py index af5f5b33..5848cfd1 100644 --- a/tests/testapp/application.py +++ b/tests/testapp/application.py @@ -23,10 +23,14 @@ from optimizely import logger, optimizely from optimizely.helpers import enums +# Create the flask app app = Flask(__name__) -# Initialize CSRF protection + +# Set up CSRF protection +app.config["SECRET_KEY"] = environ.get("CSRF_SECRET_KEY", "default_csrf_secret_key") csrf = CSRFProtect(app) +# Read in the datafile datafile = open('datafile.json', 'r') datafile_content = datafile.read() datafile.close() From 47e7b4f47ece216fed2bceb2db310bba3b960a8a Mon Sep 17 00:00:00 2001 From: Farhan Anjum Date: Fri, 16 May 2025 20:39:08 +0600 Subject: [PATCH 65/68] [FSSDK-11017] update: experiment_id and variation_id added to payloads (#447) * experiment_id and variation_id added to payloads * optimizely/optimizely.py -> Removed experiment_id and variation_id from legacy apis. optimizely/project_config.py -> Enhanced comments for clarity. tests/test_user_context.py -> Updated test assertions for experiments. * .flake8 -> redundant checks being performed in tests/testapp/application.py so added it to exclusions * reverting to previous code * change in logic to get experiment_id by key or rollout_id * update project_config.py * fetching experiment_id and variation_id from flag_decision * -updated experiment_id and variation_id fetching logic -removed redundant function from project_config.py * chore: trigger workflow --- .flake8 | 2 +- optimizely/optimizely.py | 20 ++++++++++- tests/test_user_context.py | 70 +++++++++++++++++++++++++------------- 3 files changed, 67 insertions(+), 25 deletions(-) diff --git a/.flake8 b/.flake8 index f5990a83..0fc0cadc 100644 --- a/.flake8 +++ b/.flake8 @@ -4,5 +4,5 @@ # Line break before operand needs to be ignored for line lengths # greater than max-line-length. Best practice shows W504 ignore = E722, W504 -exclude = optimizely/lib/pymmh3.py,*virtualenv* +exclude = optimizely/lib/pymmh3.py,*virtualenv*,tests/testapp/application.py max-line-length = 120 diff --git a/optimizely/optimizely.py b/optimizely/optimizely.py index 1b25bec6..af442224 100644 --- a/optimizely/optimizely.py +++ b/optimizely/optimizely.py @@ -1202,6 +1202,22 @@ def _create_optimizely_decision( if flag_decision is not None and flag_decision.variation is not None else None ) + + experiment_id = None + variation_id = None + + try: + if flag_decision.experiment is not None: + experiment_id = flag_decision.experiment.id + except AttributeError: + self.logger.warning("flag_decision.experiment has no attribute 'id'") + + try: + if flag_decision.variation is not None: + variation_id = flag_decision.variation.id + except AttributeError: + self.logger.warning("flag_decision.variation has no attribute 'id'") + # Send notification self.notification_center.send_notifications( enums.NotificationTypes.DECISION, @@ -1215,7 +1231,9 @@ def _create_optimizely_decision( 'variation_key': variation_key, 'rule_key': rule_key, 'reasons': decision_reasons if should_include_reasons else [], - 'decision_event_dispatched': decision_event_dispatched + 'decision_event_dispatched': decision_event_dispatched, + 'experiment_id': experiment_id, + 'variation_id': variation_id }, ) diff --git a/tests/test_user_context.py b/tests/test_user_context.py index 0c35e230..6705e414 100644 --- a/tests/test_user_context.py +++ b/tests/test_user_context.py @@ -283,6 +283,8 @@ def test_decide__feature_test(self): 'reasons': expected.reasons, 'decision_event_dispatched': True, 'variables': expected.variables, + 'experiment_id': mock_experiment.id, + 'variation_id': mock_variation.id }, ) @@ -391,6 +393,24 @@ def test_decide_feature_rollout(self): self.compare_opt_decisions(expected, actual) + # assert event count + self.assertEqual(1, mock_send_event.call_count) + + # assert event payload + expected_experiment = project_config.get_experiment_from_key(expected.rule_key) + expected_var = project_config.get_variation_from_key(expected.rule_key, expected.variation_key) + mock_send_event.assert_called_with( + project_config, + expected_experiment, + expected_var, + expected.flag_key, + expected.rule_key, + 'rollout', + expected.enabled, + 'test_user', + user_attributes + ) + # assert notification count self.assertEqual(1, mock_broadcast_decision.call_count) @@ -408,27 +428,11 @@ def test_decide_feature_rollout(self): 'reasons': expected.reasons, 'decision_event_dispatched': True, 'variables': expected.variables, + 'experiment_id': expected_experiment.id, + 'variation_id': expected_var.id }, ) - # assert event count - self.assertEqual(1, mock_send_event.call_count) - - # assert event payload - expected_experiment = project_config.get_experiment_from_key(expected.rule_key) - expected_var = project_config.get_variation_from_key(expected.rule_key, expected.variation_key) - mock_send_event.assert_called_with( - project_config, - expected_experiment, - expected_var, - expected.flag_key, - expected.rule_key, - 'rollout', - expected.enabled, - 'test_user', - user_attributes - ) - def test_decide_feature_rollout__send_flag_decision_false(self): opt_obj = optimizely.Optimizely(json.dumps(self.config_dict_with_features)) project_config = opt_obj.config_manager.get_config() @@ -467,6 +471,8 @@ def test_decide_feature_rollout__send_flag_decision_false(self): self.assertEqual(1, mock_broadcast_decision.call_count) # assert notification + expected_experiment = project_config.get_experiment_from_key(expected.rule_key) + expected_var = project_config.get_variation_from_key(expected.rule_key, expected.variation_key) mock_broadcast_decision.assert_called_with( enums.NotificationTypes.DECISION, 'flag', @@ -480,6 +486,8 @@ def test_decide_feature_rollout__send_flag_decision_false(self): 'reasons': expected.reasons, 'decision_event_dispatched': False, 'variables': expected.variables, + 'experiment_id': expected_experiment.id, + 'variation_id': expected_var.id }, ) @@ -549,7 +557,9 @@ def test_decide_feature_null_variation(self): 'reasons': expected.reasons, 'decision_event_dispatched': True, 'variables': expected.variables, - }, + 'experiment_id': None, + 'variation_id': None + } ) # assert event count @@ -632,6 +642,8 @@ def test_decide_feature_null_variation__send_flag_decision_false(self): 'reasons': expected.reasons, 'decision_event_dispatched': False, 'variables': expected.variables, + 'experiment_id': None, + 'variation_id': None }, ) @@ -701,6 +713,8 @@ def test_decide__option__disable_decision_event(self): 'reasons': expected.reasons, 'decision_event_dispatched': False, 'variables': expected.variables, + 'experiment_id': mock_experiment.id, + 'variation_id': mock_variation.id, }, ) @@ -773,6 +787,8 @@ def test_decide__default_option__disable_decision_event(self): 'reasons': expected.reasons, 'decision_event_dispatched': False, 'variables': expected.variables, + 'experiment_id': mock_experiment.id, + 'variation_id': mock_variation.id }, ) @@ -834,6 +850,8 @@ def test_decide__option__exclude_variables(self): 'reasons': expected.reasons, 'decision_event_dispatched': True, 'variables': expected.variables, + 'experiment_id': mock_experiment.id, + 'variation_id': mock_variation.id, }, ) @@ -948,6 +966,8 @@ def test_decide__option__enabled_flags_only(self): 'reasons': expected.reasons, 'decision_event_dispatched': True, 'variables': expected.variables, + 'experiment_id': expected_experiment.id, + 'variation_id': expected_var.id, }, ) @@ -1006,7 +1026,7 @@ def test_decide__default_options__with__options(self): enabled=True, variables=expected_variables, flag_key='test_feature_in_experiment', - user_context=user_context + user_context=user_context, ) self.compare_opt_decisions(expected, actual) @@ -1025,6 +1045,8 @@ def test_decide__default_options__with__options(self): 'reasons': expected.reasons, 'decision_event_dispatched': False, 'variables': expected.variables, + 'experiment_id': mock_experiment.id, + 'variation_id': mock_variation.id }, ) @@ -1490,6 +1512,9 @@ def test_should_return_valid_decision_after_setting_and_removing_forced_decision 'User "test_user" is in variation "control" of experiment test_experiment.'] ) + expected_experiment = project_config.get_experiment_from_key(expected.rule_key) + expected_var = project_config.get_variation_from_key('test_experiment', expected.variation_key) + # assert notification count self.assertEqual(1, mock_broadcast_decision.call_count) @@ -1507,12 +1532,11 @@ def test_should_return_valid_decision_after_setting_and_removing_forced_decision 'reasons': expected.reasons, 'decision_event_dispatched': True, 'variables': expected.variables, + 'experiment_id': expected_experiment.id, + 'variation_id': expected_var.id }, ) - expected_experiment = project_config.get_experiment_from_key(expected.rule_key) - expected_var = project_config.get_variation_from_key('test_experiment', expected.variation_key) - mock_send_event.assert_called_with( project_config, expected_experiment, From 72048b697d5254cacab02fd4ca742d52ec8ed292 Mon Sep 17 00:00:00 2001 From: Farhan Anjum Date: Tue, 20 May 2025 20:19:14 +0600 Subject: [PATCH 66/68] [FSSDK-11157] update: added remove method in LRU Cache for CMAB service (#454) * Add remove method and tests in LRUCache for cmab service * refactor: simplify remove method in LRUCache and update related tests * refactor: remove redundant assertion in test_remove_existing_key --- optimizely/odp/lru_cache.py | 5 +++ tests/test_lru_cache.py | 76 +++++++++++++++++++++++++++++++++++++ 2 files changed, 81 insertions(+) diff --git a/optimizely/odp/lru_cache.py b/optimizely/odp/lru_cache.py index e7fc32af..073973e6 100644 --- a/optimizely/odp/lru_cache.py +++ b/optimizely/odp/lru_cache.py @@ -91,6 +91,11 @@ def peek(self, key: K) -> Optional[V]: element = self.map.get(key) return element.value if element is not None else None + def remove(self, key: K) -> None: + """Remove the element associated with the provided key from the cache.""" + with self.lock: + self.map.pop(key, None) + @dataclass class CacheElement(Generic[V]): diff --git a/tests/test_lru_cache.py b/tests/test_lru_cache.py index cc4dfdb1..b30617b3 100644 --- a/tests/test_lru_cache.py +++ b/tests/test_lru_cache.py @@ -130,6 +130,82 @@ def test_reset(self): cache.save('cow', 'crate') self.assertEqual(cache.lookup('cow'), 'crate') + def test_remove_non_existent_key(self): + cache = LRUCache(3, 1000) + cache.save("1", 100) + cache.save("2", 200) + + cache.remove("3") # Doesn't exist + + self.assertEqual(cache.lookup("1"), 100) + self.assertEqual(cache.lookup("2"), 200) + + def test_remove_existing_key(self): + cache = LRUCache(3, 1000) + + cache.save("1", 100) + cache.save("2", 200) + cache.save("3", 300) + + self.assertEqual(cache.lookup("1"), 100) + self.assertEqual(cache.lookup("2"), 200) + self.assertEqual(cache.lookup("3"), 300) + + cache.remove("2") + + self.assertEqual(cache.lookup("1"), 100) + self.assertIsNone(cache.lookup("2")) + self.assertEqual(cache.lookup("3"), 300) + + def test_remove_from_zero_sized_cache(self): + cache = LRUCache(0, 1000) + cache.save("1", 100) + cache.remove("1") + + self.assertIsNone(cache.lookup("1")) + + def test_remove_and_add_back(self): + cache = LRUCache(3, 1000) + cache.save("1", 100) + cache.save("2", 200) + cache.save("3", 300) + + cache.remove("2") + cache.save("2", 201) + + self.assertEqual(cache.lookup("1"), 100) + self.assertEqual(cache.lookup("2"), 201) + self.assertEqual(cache.lookup("3"), 300) + + def test_thread_safety(self): + import threading + + max_size = 100 + cache = LRUCache(max_size, 1000) + + for i in range(1, max_size + 1): + cache.save(str(i), i * 100) + + def remove_key(k): + cache.remove(str(k)) + + threads = [] + for i in range(1, (max_size // 2) + 1): + thread = threading.Thread(target=remove_key, args=(i,)) + threads.append(thread) + thread.start() + + for thread in threads: + thread.join() + + for i in range(1, max_size + 1): + if i <= max_size // 2: + self.assertIsNone(cache.lookup(str(i))) + else: + self.assertEqual(cache.lookup(str(i)), i * 100) + + self.assertEqual(len(cache.map), max_size // 2) + # type checker test # confirm that LRUCache matches OptimizelySegmentsCache protocol _: OptimizelySegmentsCache = LRUCache(0, 0) From 046d457efce00c8478e09ba022dd83d9924ff253 Mon Sep 17 00:00:00 2001 From: Farhan Anjum Date: Fri, 23 May 2025 08:35:24 +0600 Subject: [PATCH 67/68] [FSSDK-11148] update: Implement CMAB Client (#453) * Implement CMAB client with retry logic for fetching predictions * Enhance CMAB client error handling and logging; add unit tests for fetch methods * Refactor CMAB client: enhance docstrings for classes and methods, improve formatting, and clean up imports * Add custom exceptions for CMAB client errors and enhance error handling in fetch methods * Update fetch_decision method to set default timeout value to 10 seconds * replace constant endpoint with formatted string in fetch_decision method * chore: trigger CI * refactor: streamline fetch_decision method and enhance test cases for improved clarity and functionality --- optimizely/cmab/cmab_client.py | 193 +++++++++++++++++++++++++++ optimizely/exceptions.py | 18 +++ optimizely/helpers/enums.py | 2 + tests/test_cmab_client.py | 235 +++++++++++++++++++++++++++++++++ 4 files changed, 448 insertions(+) create mode 100644 optimizely/cmab/cmab_client.py create mode 100644 tests/test_cmab_client.py diff --git a/optimizely/cmab/cmab_client.py b/optimizely/cmab/cmab_client.py new file mode 100644 index 00000000..dfcffa78 --- /dev/null +++ b/optimizely/cmab/cmab_client.py @@ -0,0 +1,193 @@ +# Copyright 2025 Optimizely +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import json +import time +import requests +import math +from typing import Dict, Any, Optional +from optimizely import logger as _logging +from optimizely.helpers.enums import Errors +from optimizely.exceptions import CmabFetchError, CmabInvalidResponseError + +# Default constants for CMAB requests +DEFAULT_MAX_RETRIES = 3 +DEFAULT_INITIAL_BACKOFF = 0.1 # in seconds (100 ms) +DEFAULT_MAX_BACKOFF = 10 # in seconds +DEFAULT_BACKOFF_MULTIPLIER = 2.0 +MAX_WAIT_TIME = 10.0 + + +class CmabRetryConfig: + """Configuration for retrying CMAB requests. + + Contains parameters for maximum retries, backoff intervals, and multipliers. + """ + def __init__( + self, + max_retries: int = DEFAULT_MAX_RETRIES, + initial_backoff: float = DEFAULT_INITIAL_BACKOFF, + max_backoff: float = DEFAULT_MAX_BACKOFF, + backoff_multiplier: float = DEFAULT_BACKOFF_MULTIPLIER, + ): + self.max_retries = max_retries + self.initial_backoff = initial_backoff + self.max_backoff = max_backoff + self.backoff_multiplier = backoff_multiplier + + +class DefaultCmabClient: + """Client for interacting with the CMAB service. + + Provides methods to fetch decisions with optional retry logic. + """ + def __init__(self, http_client: Optional[requests.Session] = None, + retry_config: Optional[CmabRetryConfig] = None, + logger: Optional[_logging.Logger] = None): + """Initialize the CMAB client. + + Args: + http_client (Optional[requests.Session]): HTTP client for making requests. + retry_config (Optional[CmabRetryConfig]): Configuration for retry logic. + logger (Optional[_logging.Logger]): Logger for logging messages. + """ + self.http_client = http_client or requests.Session() + self.retry_config = retry_config + self.logger = _logging.adapt_logger(logger or _logging.NoOpLogger()) + + def fetch_decision( + self, + rule_id: str, + user_id: str, + attributes: Dict[str, Any], + cmab_uuid: str, + timeout: float = MAX_WAIT_TIME + ) -> str: + """Fetch a decision from the CMAB prediction service. + + Args: + rule_id (str): The rule ID for the experiment. + user_id (str): The user ID for the request. + attributes (Dict[str, Any]): User attributes for the request. + cmab_uuid (str): Unique identifier for the CMAB request. + timeout (float): Maximum wait time for request to respond in seconds. Defaults to 10 seconds. + + Returns: + str: The variation ID. + """ + url = f"https://prediction.cmab.optimizely.com/predict/{rule_id}" + cmab_attributes = [ + {"id": key, "value": value, "type": "custom_attribute"} + for key, value in attributes.items() + ] + + request_body = { + "instances": [{ + "visitorId": user_id, + "experimentId": rule_id, + "attributes": cmab_attributes, + "cmabUUID": cmab_uuid, + }] + } + if self.retry_config: + variation_id = self._do_fetch_with_retry(url, request_body, self.retry_config, timeout) + else: + variation_id = self._do_fetch(url, request_body, timeout) + return variation_id + + def _do_fetch(self, url: str, request_body: Dict[str, Any], timeout: float) -> str: + """Perform a single fetch request to the CMAB prediction service. + + Args: + url (str): The endpoint URL. + request_body (Dict[str, Any]): The request payload. + timeout (float): Maximum wait time for request to respond in seconds. + Returns: + str: The variation ID + """ + headers = {'Content-Type': 'application/json'} + try: + response = self.http_client.post(url, data=json.dumps(request_body), headers=headers, timeout=timeout) + except requests.exceptions.RequestException as e: + error_message = Errors.CMAB_FETCH_FAILED.format(str(e)) + self.logger.error(error_message) + raise CmabFetchError(error_message) + + if not 200 <= response.status_code < 300: + error_message = Errors.CMAB_FETCH_FAILED.format(str(response.status_code)) + self.logger.error(error_message) + raise CmabFetchError(error_message) + + try: + body = response.json() + except json.JSONDecodeError: + error_message = Errors.INVALID_CMAB_FETCH_RESPONSE + self.logger.error(error_message) + raise CmabInvalidResponseError(error_message) + + if not self.validate_response(body): + error_message = Errors.INVALID_CMAB_FETCH_RESPONSE + self.logger.error(error_message) + raise CmabInvalidResponseError(error_message) + + return str(body['predictions'][0]['variation_id']) + + def validate_response(self, body: Dict[str, Any]) -> bool: + """Validate the response structure from the CMAB service. + + Args: + body (Dict[str, Any]): The response body to validate. + + Returns: + bool: True if the response is valid, False otherwise. + """ + return ( + isinstance(body, dict) and + 'predictions' in body and + isinstance(body['predictions'], list) and + len(body['predictions']) > 0 and + isinstance(body['predictions'][0], dict) and + "variation_id" in body["predictions"][0] + ) + + def _do_fetch_with_retry( + self, + url: str, + request_body: Dict[str, Any], + retry_config: CmabRetryConfig, + timeout: float + ) -> str: + """Perform a fetch request with retry logic. + + Args: + url (str): The endpoint URL. + request_body (Dict[str, Any]): The request payload. + retry_config (CmabRetryConfig): Configuration for retry logic. + timeout (float): Maximum wait time for request to respond in seconds. + Returns: + str: The variation ID + """ + backoff = retry_config.initial_backoff + for attempt in range(retry_config.max_retries + 1): + try: + variation_id = self._do_fetch(url, request_body, timeout) + return variation_id + except: + if attempt < retry_config.max_retries: + self.logger.info(f"Retrying CMAB request (attempt: {attempt + 1}) after {backoff} seconds...") + time.sleep(backoff) + backoff = min(backoff * math.pow(retry_config.backoff_multiplier, attempt + 1), + retry_config.max_backoff) + + error_message = Errors.CMAB_FETCH_FAILED.format('Exhausted all retries for CMAB request.') + self.logger.error(error_message) + raise CmabFetchError(error_message) diff --git a/optimizely/exceptions.py b/optimizely/exceptions.py index e7644064..b17b1397 100644 --- a/optimizely/exceptions.py +++ b/optimizely/exceptions.py @@ -82,3 +82,21 @@ class OdpInvalidData(Exception): """ Raised when passing invalid ODP data. """ pass + + +class CmabError(Exception): + """Base exception for CMAB client errors.""" + + pass + + +class CmabFetchError(CmabError): + """Exception raised when CMAB fetch fails.""" + + pass + + +class CmabInvalidResponseError(CmabError): + """Exception raised when CMAB response is invalid.""" + + pass diff --git a/optimizely/helpers/enums.py b/optimizely/helpers/enums.py index fe90946e..2d6febab 100644 --- a/optimizely/helpers/enums.py +++ b/optimizely/helpers/enums.py @@ -127,6 +127,8 @@ class Errors: ODP_INVALID_DATA: Final = 'ODP data is not valid.' ODP_INVALID_ACTION: Final = 'ODP action is not valid (cannot be empty).' MISSING_SDK_KEY: Final = 'SDK key not provided/cannot be found in the datafile.' + CMAB_FETCH_FAILED: Final = 'CMAB decision fetch failed with status: {}' + INVALID_CMAB_FETCH_RESPONSE = 'Invalid CMAB fetch response' class ForcedDecisionLogs: diff --git a/tests/test_cmab_client.py b/tests/test_cmab_client.py new file mode 100644 index 00000000..0e15b3f4 --- /dev/null +++ b/tests/test_cmab_client.py @@ -0,0 +1,235 @@ +import unittest +import json +from unittest.mock import MagicMock, patch, call +from optimizely.cmab.cmab_client import DefaultCmabClient, CmabRetryConfig +from requests.exceptions import RequestException +from optimizely.helpers.enums import Errors +from optimizely.exceptions import CmabFetchError, CmabInvalidResponseError + + +class TestDefaultCmabClient(unittest.TestCase): + def setUp(self): + self.mock_http_client = MagicMock() + self.mock_logger = MagicMock() + self.retry_config = CmabRetryConfig(max_retries=3, initial_backoff=0.01, max_backoff=1, backoff_multiplier=2) + self.client = DefaultCmabClient( + http_client=self.mock_http_client, + logger=self.mock_logger, + retry_config=None + ) + self.rule_id = 'test_rule' + self.user_id = 'user123' + self.attributes = {'attr1': 'value1', 'attr2': 'value2'} + self.cmab_uuid = 'uuid-1234' + self.expected_url = f"https://prediction.cmab.optimizely.com/predict/{self.rule_id}" + self.expected_body = { + "instances": [{ + "visitorId": self.user_id, + "experimentId": self.rule_id, + "attributes": [ + {"id": "attr1", "value": "value1", "type": "custom_attribute"}, + {"id": "attr2", "value": "value2", "type": "custom_attribute"} + ], + "cmabUUID": self.cmab_uuid, + }] + } + self.expected_headers = {'Content-Type': 'application/json'} + + def test_fetch_decision_returns_success_no_retry(self): + mock_response = MagicMock() + mock_response.status_code = 200 + mock_response.json.return_value = { + 'predictions': [{'variation_id': 'abc123'}] + } + self.mock_http_client.post.return_value = mock_response + result = self.client.fetch_decision(self.rule_id, self.user_id, self.attributes, self.cmab_uuid) + self.assertEqual(result, 'abc123') + self.mock_http_client.post.assert_called_once_with( + self.expected_url, + data=json.dumps(self.expected_body), + headers=self.expected_headers, + timeout=10.0 + ) + + def test_fetch_decision_returns_http_exception_no_retry(self): + self.mock_http_client.post.side_effect = RequestException('Connection error') + + with self.assertRaises(CmabFetchError) as context: + self.client.fetch_decision(self.rule_id, self.user_id, self.attributes, self.cmab_uuid) + + self.mock_http_client.post.assert_called_once() + self.mock_logger.error.assert_called_with(Errors.CMAB_FETCH_FAILED.format('Connection error')) + self.assertIn('Connection error', str(context.exception)) + + def test_fetch_decision_returns_non_2xx_status_no_retry(self): + mock_response = MagicMock() + mock_response.status_code = 500 + self.mock_http_client.post.return_value = mock_response + + with self.assertRaises(CmabFetchError) as context: + self.client.fetch_decision(self.rule_id, self.user_id, self.attributes, self.cmab_uuid) + + self.mock_http_client.post.assert_called_once_with( + self.expected_url, + data=json.dumps(self.expected_body), + headers=self.expected_headers, + timeout=10.0 + ) + self.mock_logger.error.assert_called_with(Errors.CMAB_FETCH_FAILED.format(str(mock_response.status_code))) + self.assertIn(str(mock_response.status_code), str(context.exception)) + + def test_fetch_decision_returns_invalid_json_no_retry(self): + mock_response = MagicMock() + mock_response.status_code = 200 + mock_response.json.side_effect = json.JSONDecodeError("Expecting value", "", 0) + self.mock_http_client.post.return_value = mock_response + + with self.assertRaises(CmabInvalidResponseError) as context: + self.client.fetch_decision(self.rule_id, self.user_id, self.attributes, self.cmab_uuid) + + self.mock_http_client.post.assert_called_once_with( + self.expected_url, + data=json.dumps(self.expected_body), + headers=self.expected_headers, + timeout=10.0 + ) + self.mock_logger.error.assert_called_with(Errors.INVALID_CMAB_FETCH_RESPONSE) + self.assertIn(Errors.INVALID_CMAB_FETCH_RESPONSE, str(context.exception)) + + def test_fetch_decision_returns_invalid_response_structure_no_retry(self): + mock_response = MagicMock() + mock_response.status_code = 200 + mock_response.json.return_value = {'no_predictions': []} + self.mock_http_client.post.return_value = mock_response + + with self.assertRaises(CmabInvalidResponseError) as context: + self.client.fetch_decision(self.rule_id, self.user_id, self.attributes, self.cmab_uuid) + + self.mock_http_client.post.assert_called_once_with( + self.expected_url, + data=json.dumps(self.expected_body), + headers=self.expected_headers, + timeout=10.0 + ) + self.mock_logger.error.assert_called_with(Errors.INVALID_CMAB_FETCH_RESPONSE) + self.assertIn(Errors.INVALID_CMAB_FETCH_RESPONSE, str(context.exception)) + + @patch('time.sleep', return_value=None) + def test_fetch_decision_returns_success_with_retry_on_first_try(self, mock_sleep): + # Create client with retry + client_with_retry = DefaultCmabClient( + http_client=self.mock_http_client, + logger=self.mock_logger, + retry_config=self.retry_config + ) + + # Mock successful response + mock_response = MagicMock() + mock_response.status_code = 200 + mock_response.json.return_value = { + 'predictions': [{'variation_id': 'abc123'}] + } + self.mock_http_client.post.return_value = mock_response + + result = client_with_retry.fetch_decision(self.rule_id, self.user_id, self.attributes, self.cmab_uuid) + + # Verify result and request parameters + self.assertEqual(result, 'abc123') + self.mock_http_client.post.assert_called_once_with( + self.expected_url, + data=json.dumps(self.expected_body), + headers=self.expected_headers, + timeout=10.0 + ) + self.assertEqual(self.mock_http_client.post.call_count, 1) + mock_sleep.assert_not_called() + + @patch('time.sleep', return_value=None) + def test_fetch_decision_returns_success_with_retry_on_third_try(self, mock_sleep): + client_with_retry = DefaultCmabClient( + http_client=self.mock_http_client, + logger=self.mock_logger, + retry_config=self.retry_config + ) + + # Create failure and success responses + failure_response = MagicMock() + failure_response.status_code = 500 + + success_response = MagicMock() + success_response.status_code = 200 + success_response.json.return_value = { + 'predictions': [{'variation_id': 'xyz456'}] + } + + # First two calls fail, third succeeds + self.mock_http_client.post.side_effect = [ + failure_response, + failure_response, + success_response + ] + + result = client_with_retry.fetch_decision(self.rule_id, self.user_id, self.attributes, self.cmab_uuid) + + self.assertEqual(result, 'xyz456') + self.assertEqual(self.mock_http_client.post.call_count, 3) + + # Verify all HTTP calls used correct parameters + self.mock_http_client.post.assert_called_with( + self.expected_url, + data=json.dumps(self.expected_body), + headers=self.expected_headers, + timeout=10.0 + ) + + # Verify retry logging + self.mock_logger.info.assert_has_calls([ + call("Retrying CMAB request (attempt: 1) after 0.01 seconds..."), + call("Retrying CMAB request (attempt: 2) after 0.02 seconds...") + ]) + + # Verify sleep was called with correct backoff times + mock_sleep.assert_has_calls([ + call(0.01), + call(0.02) + ]) + + @patch('time.sleep', return_value=None) + def test_fetch_decision_exhausts_all_retry_attempts(self, mock_sleep): + client_with_retry = DefaultCmabClient( + http_client=self.mock_http_client, + logger=self.mock_logger, + retry_config=self.retry_config + ) + + # Create failure response + failure_response = MagicMock() + failure_response.status_code = 500 + + # All attempts fail + self.mock_http_client.post.return_value = failure_response + + with self.assertRaises(CmabFetchError): + client_with_retry.fetch_decision(self.rule_id, self.user_id, self.attributes, self.cmab_uuid) + + # Verify all attempts were made (1 initial + 3 retries) + self.assertEqual(self.mock_http_client.post.call_count, 4) + + # Verify retry logging + self.mock_logger.info.assert_has_calls([ + call("Retrying CMAB request (attempt: 1) after 0.01 seconds..."), + call("Retrying CMAB request (attempt: 2) after 0.02 seconds..."), + call("Retrying CMAB request (attempt: 3) after 0.08 seconds...") + ]) + + # Verify sleep was called for each retry + mock_sleep.assert_has_calls([ + call(0.01), + call(0.02), + call(0.08) + ]) + + # Verify final error + self.mock_logger.error.assert_called_with( + Errors.CMAB_FETCH_FAILED.format('Exhausted all retries for CMAB request.') + ) From 82ec019c10898bc497d5668ba1b0ad4eb05dd768 Mon Sep 17 00:00:00 2001 From: Farhan Anjum Date: Tue, 27 May 2025 22:03:49 +0600 Subject: [PATCH 68/68] [FSSDK-11166] update: implement CMAB service (#455) * update: Implement DefaultCmabService * update: Add tests for DefaultCmabService * update: Fix formatting in DefaultCmabService and test cases * update: Fix key mapping in ProjectConfig to use 'id' instead of empty string * update: Refactor cache decision logic and enhance test cases for DefaultCmabService * update: Refactor attribute handling in get_decision and add test for CMAB attribute filtering --- optimizely/cmab/cmab_service.py | 106 ++++++++++ .../decision/optimizely_decide_option.py | 3 + optimizely/project_config.py | 3 + tests/test_cmab_client.py | 12 ++ tests/test_cmab_service.py | 187 ++++++++++++++++++ 5 files changed, 311 insertions(+) create mode 100644 optimizely/cmab/cmab_service.py create mode 100644 tests/test_cmab_service.py diff --git a/optimizely/cmab/cmab_service.py b/optimizely/cmab/cmab_service.py new file mode 100644 index 00000000..418280b8 --- /dev/null +++ b/optimizely/cmab/cmab_service.py @@ -0,0 +1,106 @@ +# Copyright 2025 Optimizely +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import uuid +import json +import hashlib + +from typing import Optional, List, TypedDict +from optimizely.cmab.cmab_client import DefaultCmabClient +from optimizely.odp.lru_cache import LRUCache +from optimizely.optimizely_user_context import OptimizelyUserContext, UserAttributes +from optimizely.project_config import ProjectConfig +from optimizely.decision.optimizely_decide_option import OptimizelyDecideOption +from optimizely import logger as _logging + + +class CmabDecision(TypedDict): + variation_id: str + cmab_uuid: str + + +class CmabCacheValue(TypedDict): + attributes_hash: str + variation_id: str + cmab_uuid: str + + +class DefaultCmabService: + def __init__(self, cmab_cache: LRUCache[str, CmabCacheValue], + cmab_client: DefaultCmabClient, logger: Optional[_logging.Logger] = None): + self.cmab_cache = cmab_cache + self.cmab_client = cmab_client + self.logger = logger + + def get_decision(self, project_config: ProjectConfig, user_context: OptimizelyUserContext, + rule_id: str, options: List[str]) -> CmabDecision: + + filtered_attributes = self._filter_attributes(project_config, user_context, rule_id) + + if OptimizelyDecideOption.IGNORE_CMAB_CACHE in options: + return self._fetch_decision(rule_id, user_context.user_id, filtered_attributes) + + if OptimizelyDecideOption.RESET_CMAB_CACHE in options: + self.cmab_cache.reset() + + cache_key = self._get_cache_key(user_context.user_id, rule_id) + + if OptimizelyDecideOption.INVALIDATE_USER_CMAB_CACHE in options: + self.cmab_cache.remove(cache_key) + + cached_value = self.cmab_cache.lookup(cache_key) + + attributes_hash = self._hash_attributes(filtered_attributes) + + if cached_value: + if cached_value['attributes_hash'] == attributes_hash: + return CmabDecision(variation_id=cached_value['variation_id'], cmab_uuid=cached_value['cmab_uuid']) + else: + self.cmab_cache.remove(cache_key) + + cmab_decision = self._fetch_decision(rule_id, user_context.user_id, filtered_attributes) + self.cmab_cache.save(cache_key, { + 'attributes_hash': attributes_hash, + 'variation_id': cmab_decision['variation_id'], + 'cmab_uuid': cmab_decision['cmab_uuid'], + }) + return cmab_decision + + def _fetch_decision(self, rule_id: str, user_id: str, attributes: UserAttributes) -> CmabDecision: + cmab_uuid = str(uuid.uuid4()) + variation_id = self.cmab_client.fetch_decision(rule_id, user_id, attributes, cmab_uuid) + cmab_decision = CmabDecision(variation_id=variation_id, cmab_uuid=cmab_uuid) + return cmab_decision + + def _filter_attributes(self, project_config: ProjectConfig, + user_context: OptimizelyUserContext, rule_id: str) -> UserAttributes: + user_attributes = user_context.get_user_attributes() + filtered_user_attributes = UserAttributes({}) + + experiment = project_config.experiment_id_map.get(rule_id) + if not experiment or not experiment.cmab: + return filtered_user_attributes + + cmab_attribute_ids = experiment.cmab['attributeIds'] + for attribute_id in cmab_attribute_ids: + attribute = project_config.attribute_id_map.get(attribute_id) + if attribute and attribute.key in user_attributes: + filtered_user_attributes[attribute.key] = user_attributes[attribute.key] + + return filtered_user_attributes + + def _get_cache_key(self, user_id: str, rule_id: str) -> str: + return f"{len(user_id)}-{user_id}-{rule_id}" + + def _hash_attributes(self, attributes: UserAttributes) -> str: + sorted_attrs = json.dumps(attributes, sort_keys=True) + return hashlib.md5(sorted_attrs.encode()).hexdigest() diff --git a/optimizely/decision/optimizely_decide_option.py b/optimizely/decision/optimizely_decide_option.py index 8b091d96..8cffcfec 100644 --- a/optimizely/decision/optimizely_decide_option.py +++ b/optimizely/decision/optimizely_decide_option.py @@ -25,3 +25,6 @@ class OptimizelyDecideOption: IGNORE_USER_PROFILE_SERVICE: Final = 'IGNORE_USER_PROFILE_SERVICE' INCLUDE_REASONS: Final = 'INCLUDE_REASONS' EXCLUDE_VARIABLES: Final = 'EXCLUDE_VARIABLES' + IGNORE_CMAB_CACHE: Final = "IGNORE_CMAB_CACHE" + RESET_CMAB_CACHE: Final = "RESET_CMAB_CACHE" + INVALIDATE_USER_CMAB_CACHE: Final = "INVALIDATE_USER_CMAB_CACHE" diff --git a/optimizely/project_config.py b/optimizely/project_config.py index f2b1467b..f774ff8a 100644 --- a/optimizely/project_config.py +++ b/optimizely/project_config.py @@ -97,6 +97,9 @@ def __init__(self, datafile: str | bytes, logger: Logger, error_handler: Any): self.attribute_id_to_key_map: dict[str, str] = {} for attribute in self.attributes: self.attribute_id_to_key_map[attribute['id']] = attribute['key'] + self.attribute_id_map: dict[str, entities.Attribute] = self._generate_key_map( + self.attributes, 'id', entities.Attribute + ) self.audience_id_map: dict[str, entities.Audience] = self._generate_key_map( self.audiences, 'id', entities.Audience ) diff --git a/tests/test_cmab_client.py b/tests/test_cmab_client.py index 0e15b3f4..3aac5fd9 100644 --- a/tests/test_cmab_client.py +++ b/tests/test_cmab_client.py @@ -1,3 +1,15 @@ +# Copyright 2025, Optimizely +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import unittest import json from unittest.mock import MagicMock, patch, call diff --git a/tests/test_cmab_service.py b/tests/test_cmab_service.py new file mode 100644 index 00000000..0b3c593a --- /dev/null +++ b/tests/test_cmab_service.py @@ -0,0 +1,187 @@ +# Copyright 2025, Optimizely +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import unittest +from unittest.mock import MagicMock +from optimizely.cmab.cmab_service import DefaultCmabService +from optimizely.optimizely_user_context import OptimizelyUserContext +from optimizely.decision.optimizely_decide_option import OptimizelyDecideOption +from optimizely.odp.lru_cache import LRUCache +from optimizely.cmab.cmab_client import DefaultCmabClient +from optimizely.project_config import ProjectConfig +from optimizely.entities import Attribute + + +class TestDefaultCmabService(unittest.TestCase): + def setUp(self): + self.mock_cmab_cache = MagicMock(spec=LRUCache) + self.mock_cmab_client = MagicMock(spec=DefaultCmabClient) + self.mock_logger = MagicMock() + + self.cmab_service = DefaultCmabService( + cmab_cache=self.mock_cmab_cache, + cmab_client=self.mock_cmab_client, + logger=self.mock_logger + ) + + self.mock_project_config = MagicMock(spec=ProjectConfig) + self.mock_user_context = MagicMock(spec=OptimizelyUserContext) + self.mock_user_context.user_id = 'user123' + self.mock_user_context.get_user_attributes.return_value = {'age': 25, 'location': 'USA'} + + # Setup mock experiment and attribute mapping + self.mock_project_config.experiment_id_map = { + 'exp1': MagicMock(cmab={'attributeIds': ['66', '77']}) + } + attr1 = Attribute(id="66", key="age") + attr2 = Attribute(id="77", key="location") + self.mock_project_config.attribute_id_map = { + "66": attr1, + "77": attr2 + } + + def test_returns_decision_from_cache_when_valid(self): + expected_key = self.cmab_service._get_cache_key("user123", "exp1") + expected_attributes = {"age": 25, "location": "USA"} + expected_hash = self.cmab_service._hash_attributes(expected_attributes) + + self.mock_cmab_cache.lookup.return_value = { + "attributes_hash": expected_hash, + "variation_id": "varA", + "cmab_uuid": "uuid-123" + } + + decision = self.cmab_service.get_decision( + self.mock_project_config, self.mock_user_context, "exp1", [] + ) + + self.mock_cmab_cache.lookup.assert_called_once_with(expected_key) + self.assertEqual(decision["variation_id"], "varA") + self.assertEqual(decision["cmab_uuid"], "uuid-123") + + def test_ignores_cache_when_option_given(self): + self.mock_cmab_client.fetch_decision.return_value = "varB" + expected_attributes = {"age": 25, "location": "USA"} + + decision = self.cmab_service.get_decision( + self.mock_project_config, + self.mock_user_context, + "exp1", + [OptimizelyDecideOption.IGNORE_CMAB_CACHE] + ) + + self.assertEqual(decision["variation_id"], "varB") + self.assertIn('cmab_uuid', decision) + self.mock_cmab_client.fetch_decision.assert_called_once_with( + "exp1", + self.mock_user_context.user_id, + expected_attributes, + decision["cmab_uuid"] + ) + + def test_invalidates_user_cache_when_option_given(self): + self.mock_cmab_client.fetch_decision.return_value = "varC" + self.mock_cmab_cache.lookup.return_value = None + self.cmab_service.get_decision( + self.mock_project_config, + self.mock_user_context, + "exp1", + [OptimizelyDecideOption.INVALIDATE_USER_CMAB_CACHE] + ) + + key = self.cmab_service._get_cache_key("user123", "exp1") + self.mock_cmab_cache.remove.assert_called_with(key) + self.mock_cmab_cache.remove.assert_called_once() + + def test_resets_cache_when_option_given(self): + self.mock_cmab_client.fetch_decision.return_value = "varD" + + decision = self.cmab_service.get_decision( + self.mock_project_config, + self.mock_user_context, + "exp1", + [OptimizelyDecideOption.RESET_CMAB_CACHE] + ) + + self.mock_cmab_cache.reset.assert_called_once() + self.assertEqual(decision["variation_id"], "varD") + self.assertIn('cmab_uuid', decision) + + def test_new_decision_when_hash_changes(self): + self.mock_cmab_cache.lookup.return_value = { + "attributes_hash": "old_hash", + "variation_id": "varA", + "cmab_uuid": "uuid-123" + } + self.mock_cmab_client.fetch_decision.return_value = "varE" + + expected_attribute = {"age": 25, "location": "USA"} + expected_hash = self.cmab_service._hash_attributes(expected_attribute) + expected_key = self.cmab_service._get_cache_key("user123", "exp1") + + decision = self.cmab_service.get_decision(self.mock_project_config, self.mock_user_context, "exp1", []) + self.mock_cmab_cache.remove.assert_called_once_with(expected_key) + self.mock_cmab_cache.save.assert_called_once_with( + expected_key, + { + "cmab_uuid": decision["cmab_uuid"], + "variation_id": decision["variation_id"], + "attributes_hash": expected_hash + } + ) + self.assertEqual(decision["variation_id"], "varE") + self.mock_cmab_client.fetch_decision.assert_called_once_with( + "exp1", + self.mock_user_context.user_id, + expected_attribute, + decision["cmab_uuid"] + ) + + def test_filter_attributes_returns_correct_subset(self): + filtered = self.cmab_service._filter_attributes(self.mock_project_config, self.mock_user_context, "exp1") + self.assertEqual(filtered["age"], 25) + self.assertEqual(filtered["location"], "USA") + + def test_filter_attributes_empty_when_no_cmab(self): + self.mock_project_config.experiment_id_map["exp1"].cmab = None + filtered = self.cmab_service._filter_attributes(self.mock_project_config, self.mock_user_context, "exp1") + self.assertEqual(filtered, {}) + + def test_hash_attributes_produces_stable_output(self): + attrs = {"b": 2, "a": 1} + hash1 = self.cmab_service._hash_attributes(attrs) + hash2 = self.cmab_service._hash_attributes({"a": 1, "b": 2}) + self.assertEqual(hash1, hash2) + + def test_only_cmab_attributes_passed_to_client(self): + self.mock_user_context.get_user_attributes.return_value = { + 'age': 25, + 'location': 'USA', + 'extra_attr': 'value', # This shouldn't be passed to CMAB + 'another_extra': 123 # This shouldn't be passed to CMAB + } + self.mock_cmab_client.fetch_decision.return_value = "varF" + + decision = self.cmab_service.get_decision( + self.mock_project_config, + self.mock_user_context, + "exp1", + [OptimizelyDecideOption.IGNORE_CMAB_CACHE] + ) + + # Verify only age and location are passed (attributes configured in setUp) + self.mock_cmab_client.fetch_decision.assert_called_once_with( + "exp1", + self.mock_user_context.user_id, + {"age": 25, "location": "USA"}, + decision["cmab_uuid"] + )