diff --git a/.gitignore b/.gitignore index e1a5c95daabdf..63495030e1034 100644 --- a/.gitignore +++ b/.gitignore @@ -2,28 +2,36 @@ *.pyo *.class *.log + /infra/ +localstack/infra/ + /node_modules/ package-lock.json /nosetests.xml + /.venv* +/.coverage .settings/ .project .classpath -/.coverage -node_modules/ -localstack/infra/ .DS_Store -/build/ -/dist/ *.egg-info/ .eggs/ -/target/ *.sw* ~* *~ + +node_modules/ +/build/ +/dist/ +/target/ + .idea + **/obj/** **/bin/** + !bin/docker-entrypoint.sh +requirements.copy.txt diff --git a/.travis.yml b/.travis.yml index b28df43e5da86..e84a1380ff72e 100644 --- a/.travis.yml +++ b/.travis.yml @@ -31,7 +31,8 @@ env: install: - set -e - - make reinstall-p3 + - # install under python 3 + - PIP_CMD=pip3 VENV_OPTS="-p '`which python3`'" make install-basic - make init - make prepare-java-tests-if-changed - nohup docker pull lambci/lambda:nodejs8.10 > /dev/null & @@ -42,7 +43,6 @@ install: script: - set -e # fail fast - - (while [ 1 ]; do top -o %MEM -n1 | grep 'KiB Mem'; sleep 30; done) & # run tests using Python 3 - DEBUG=1 LAMBDA_EXECUTOR=docker TEST_ERROR_INJECTION=1 make test # run tests using Python 2 diff --git a/Dockerfile b/Dockerfile index d2bc18de753c7..530d61437f868 100644 --- a/Dockerfile +++ b/Dockerfile @@ -13,7 +13,7 @@ RUN mkdir -p localstack/utils/kinesis/ && mkdir -p localstack/services/ && \ touch localstack/__init__.py localstack/utils/__init__.py localstack/services/__init__.py localstack/utils/kinesis/__init__.py ADD localstack/constants.py localstack/config.py localstack/ ADD localstack/services/install.py localstack/services/ -ADD localstack/utils/common.py localstack/utils/ +ADD localstack/utils/common.py localstack/utils/bootstrap.py localstack/utils/ ADD localstack/utils/kinesis/ localstack/utils/kinesis/ ADD localstack/ext/ localstack/ext/ diff --git a/Makefile b/Makefile index 2dc6f317afd66..4ea92517b1a80 100644 --- a/Makefile +++ b/Makefile @@ -10,12 +10,20 @@ TEST_PATH ?= . usage: ## Show this help @fgrep -h "##" $(MAKEFILE_LIST) | fgrep -v fgrep | sed -e 's/\\$$//' | sed -e 's/##//' -install: ## Install dependencies in virtualenv +setup-venv: (test `which virtualenv` || $(PIP_CMD) install --user virtualenv) && \ - (test -e $(VENV_DIR) || virtualenv $(VENV_OPTS) $(VENV_DIR)) && \ + (test -e $(VENV_DIR) || virtualenv $(VENV_OPTS) $(VENV_DIR)) + +install: ## Install full dependencies in virtualenv + make setup-venv && \ (test ! -e requirements.txt || ($(VENV_RUN); $(PIP_CMD) -q install -r requirements.txt && \ PYTHONPATH=. exec python localstack/services/install.py testlibs)) || exit 1 +install-basic: ## Install basic dependencies for CLI usage in virtualenv + make setup-venv && \ + ($(VENV_RUN); cat requirements.txt | grep -ve '^#' | grep '#basic' | sed 's/ #.*//' \ + | xargs $(PIP_CMD) install) + install-web: ## Install npm dependencies for dashboard Web UI (cd localstack/dashboard/web && (test ! -e package.json || npm install --silent > /dev/null)) @@ -36,7 +44,7 @@ init: ## Initialize the infrastructure, make sure all libs are down $(VENV_RUN); PYTHONPATH=. exec python localstack/services/install.py libs infra: ## Manually start the local infrastructure for testing - ($(VENV_RUN); exec bin/localstack start) + ($(VENV_RUN); exec bin/localstack start --host) docker-build: ## Build Docker image docker build -t $(IMAGE_NAME) . diff --git a/README.md b/README.md index 07c47e429443e..ce69ac81d6864 100644 --- a/README.md +++ b/README.md @@ -26,7 +26,7 @@ any longer. # Overview -*LocalStack* spins up the following core Cloud APIs on your local machine: +LocalStack spins up the following core Cloud APIs on your local machine: * **API Gateway** at http://localhost:4567 * **Kinesis** at http://localhost:4568 @@ -53,71 +53,64 @@ any longer. * **IAM** at http://localhost:4593 * **EC2** at http://localhost:4597 -Additionally, *LocalStack* provides a powerful set of tools to interact with the cloud services, including +Additionally, LocalStack provides a powerful set of tools to interact with the cloud services, including a fully featured KCL Kinesis client with Python binding, simple setup/teardown integration for nosetests, as well as an Environment abstraction that allows to easily switch between local and remote Cloud execution. -## Why *LocalStack*? +## Why LocalStack? -*LocalStack* builds on existing best-of-breed mocking/testing tools, most notably +LocalStack builds on existing best-of-breed mocking/testing tools, most notably [kinesalite](https://github.com/mhart/kinesalite)/[dynalite](https://github.com/mhart/dynalite) and [moto](https://github.com/spulec/moto). While these tools are *awesome* (!), they lack functionality -for certain use cases. *LocalStack* combines the tools, makes them interoperable, and adds important +for certain use cases. LocalStack combines the tools, makes them interoperable, and adds important missing functionality on top of them: -* **Error injection:** *LocalStack* allows to inject errors frequently occurring in real Cloud environments, +* **Error injection:** LocalStack allows to inject errors frequently occurring in real Cloud environments, for instance `ProvisionedThroughputExceededException` which is thrown by Kinesis or DynamoDB if the amount of read/write throughput is exceeded. -* **Isolated processes**: All services in *LocalStack* run in separate processes. The overhead of additional +* **Isolated processes**: All services in LocalStack run in separate processes. The overhead of additional processes is negligible, and the entire stack can easily be executed on any developer machine and CI server. In moto, components are often hard-wired in RAM (e.g., when forwarding a message on an SNS topic to an SQS queue, - the queue endpoint is looked up in a local hash map). In contrast, *LocalStack* services live in isolation + the queue endpoint is looked up in a local hash map). In contrast, LocalStack services live in isolation (separate processes available via HTTP), which fosters true decoupling and more closely resembles the real cloud environment. -* **Pluggable services**: All services in *LocalStack* are easily pluggable (and replaceable), due to the fact that +* **Pluggable services**: All services in LocalStack are easily pluggable (and replaceable), due to the fact that we are using isolated processes for each service. This allows us to keep the framework up-to-date and select best-of-breed mocks for each individual service. ## Requirements -* `make` * `python` (both Python 2.x and 3.x supported) * `pip` (python package manager) -* `npm` (node.js package manager) -* `java`/`javac` (Java 8 runtime environment and compiler) -* `mvn` (Maven, the build system for Java) +* `Docker` ## Installing -The easiest way to install *LocalStack* is via `pip`: +The easiest way to install LocalStack is via `pip`: ``` pip install localstack ``` -Once installed, run the infrastructure using the following command: -``` -localstack start -``` - -**Note**: Please do **not** use `sudo` or the `root` user - *LocalStack* -should be installed and started entirely under a local non-root user. -If you have problems with permissions in MacOS X Sierra, -install with `pip install --user localstack` +**Note**: Please do **not** use `sudo` or the `root` user - LocalStack +should be installed and started entirely under a local non-root user. If you have problems +with permissions in MacOS X Sierra, install with `pip install --user localstack` ## Running in Docker -You can also spin up *LocalStack* in Docker: +By default, LocalStack gets started inside a Docker container using this command: ``` -localstack start --docker +localstack start ``` (Note that on MacOS you may have to run `TMPDIR=/private$TMPDIR localstack start --docker` if `$TMPDIR` contains a symbolic link that cannot be mounted by Docker.) -Or using docker-compose (you need to clone the repository first, currently requires docker-compose version 2.1+): +### Using `docker-compose` + +You can also use the `docker-compose.yml` file from the repository and use this command (currently requires `docker-compose` version 2.1+): ``` docker-compose up @@ -151,6 +144,22 @@ services: To facilitate interoperability, configuration variables can be prefixed with `LOCALSTACK_` in docker. For instance, setting `LOCALSTACK_SERVICES=s3` is equivalent to `SERVICES=s3`. +## Starting locally (non-Docker mode) + +Alternatively, the infrastructure can be spun up on the local host machine (without using Docker) using the following command: + +``` +localstack start --host +``` + +(Note that this will require [additional dependencies](#Developing), and currently is not supported on some operating systems, including Windows.) + +LocalStack will attempt to automatically fetch the missing dependencies when you first start it up in "host" mode; alternatively, you can use the `full` profile to install all dependencies at `pip` installation time: + +``` +pip install localstack[full] +``` + ## Configurations You can pass the following environment variables to LocalStack: @@ -181,7 +190,7 @@ You can pass the following environment variables to LocalStack: - `docker`: run each function invocation in a separate Docker container - `docker-reuse`: create one Docker container per function and reuse it across invocations - For `docker` and `docker-reuse`, if *LocalStack* itself is started inside Docker, then + For `docker` and `docker-reuse`, if LocalStack itself is started inside Docker, then the `docker` command needs to be available inside the container (usually requires to run the container in privileged mode). Default is `docker`, fallback to `local` if Docker is not available. * `LAMBDA_REMOTE_DOCKER` determines whether Lambda code is copied or mounted into containers. @@ -278,8 +287,9 @@ aws --endpoint-url=http://localhost:4568 kinesis list-streams } ``` -**NEW**: Check out [awslocal](https://github.com/localstack/awscli-local), a thin CLI wrapper that runs commands directly against *LocalStack* (no need to -specify `--endpoint-url` anymore). Install it via `pip install awscli-local`, and then use it as follows: +**NEW**: Check out [awslocal](https://github.com/localstack/awscli-local), a thin CLI wrapper +that runs commands directly against LocalStack (no need to specify `--endpoint-url` anymore). +Install it via `pip install awscli-local`, and then use it as follows: ``` awslocal kinesis list-streams @@ -299,7 +309,7 @@ inside your Lambda function. See [Configurations](#Configurations) section for m ## Integration with nosetests -If you want to use *LocalStack* in your integration tests (e.g., nosetests), simply fire up the +If you want to use LocalStack in your integration tests (e.g., nosetests), simply fire up the infrastructure in your test setup method and then clean up everything in your teardown method: ``` @@ -337,7 +347,7 @@ In order to mount a local folder, ensure that `LAMBDA_REMOTE_DOCKER` is set to ` ## Integration with Java/JUnit -In order to use *LocalStack* with Java, the project ships with a simple JUnit runner and a JUnit 5 extension. Take a look +In order to use LocalStack with Java, the project ships with a simple JUnit runner and a JUnit 5 extension. Take a look at the example JUnit test in `ext/java`. When you run the test, all dependencies are automatically downloaded and installed to a temporary directory in your system. @@ -368,8 +378,8 @@ public class MyCloudAppTest { } ``` -Additionally, there is a version of the *LocalStack* Test Runner which runs in a docker container -instead of installing *LocalStack* on the current machine. The only dependency is to have docker +Additionally, there is a version of the LocalStack Test Runner which runs in a docker container +instead of installing LocalStack on the current machine. The only dependency is to have docker installed locally. The test runner will automatically pull the image and start the container for the duration of the test. The container can be configured by using the @LocalstackDockerProperties annotation. @@ -396,7 +406,7 @@ public class MyDockerCloudAppTest { } ``` -The *LocalStack* JUnit test runner is published as an artifact in Maven Central. +The LocalStack JUnit test runner is published as an artifact in Maven Central. Simply add the following dependency to your `pom.xml` file: ``` @@ -443,7 +453,7 @@ with the `--user` flag: `pip install --user localstack` * If you are deploying within OpenShift, please be aware: the pod must run as `root`, and the user must have capabilities added to the running pod, in order to allow Elasticsearch to be run as the non-root `localstack` user. -* The environment variable `no_proxy` is rewritten by *LocalStack*. +* The environment variable `no_proxy` is rewritten by LocalStack. (Internal requests will go straight via localhost, bypassing any proxy configuration). * For troubleshooting LocalStack start issues, you can check debug logs by running `DEBUG=1 localstack start` @@ -454,6 +464,17 @@ with the `--user` flag: `pip install --user localstack` ## Developing +### Requirements for developing or starting locally + +To develop new features, or to start the stack locally (outside of Docker), the following additional tools are required: + +* `make` +* `npm` (node.js package manager) +* `java`/`javac` (Java 8 runtime environment and compiler) +* `mvn` (Maven, the build system for Java) + +### Development Environment + If you pull the repo in order to extend/modify LocalStack, run this command to install all the dependencies: @@ -502,6 +523,7 @@ localstack web ## Change Log +* v0.10.0: Lazy loading of libraries; fix handling of regions; add API multiserver; improve CPU profiling; fix ES xpack installation; add basic EventBridge support; refactor Lambda API and executor; add MessageAttributes on SNS payloads; tagging for SNS; ability to customize docker command * v0.9.6: Add API Gateway SQS proxy; fix command to push Docker image; fix Docker bridge IP configuration; fix SSL issue in dashboard infra; updates to README * v0.9.5: Reduce Docker image size by squashing; fix response body for presigned URL S3 PUT requests; fix CreateDate returned by IAM; fix account IDs for CF and SNS; fix topic checks for SMS using SNS; improve documentation around `@LocalstackDockerProperties`; add basic EC2 support; upgrade to ElasticSearch 6.7; set Last-Modified header in S3; preserve logic with uppercase event keys in Java; add support for nodejs 10.x Lambdas * v0.9.4: Fix ARNs in CloudFormation deployments; write stderr to file in supervisord; fix Lambda invocation times; fix canonicalization of service names when running in Docker; add support for `@Nested` in Junit5; add support for batch/transaction in DynamoDB; fix output buffering for subprocesses; assign unique ports under docker-reuse; check if topic ARN exists before publish @@ -618,11 +640,11 @@ Support this project by becoming a sponsor. Your logo will show up here with a l ## License -Copyright (c) 2017-2019 *LocalStack* maintainers and contributors. +Copyright (c) 2017-2019 LocalStack maintainers and contributors. Copyright (c) 2016 Atlassian and others. -This version of *LocalStack* is released under the Apache License, Version 2.0 (see LICENSE.txt). +This version of LocalStack is released under the Apache License, Version 2.0 (see LICENSE.txt). By downloading and using this software you agree to the [End-User License Agreement (EULA)](doc/end_user_license_agreement). diff --git a/bin/localstack b/bin/localstack index 6a3947da7f146..bfee2c1c6045e 100755 --- a/bin/localstack +++ b/bin/localstack @@ -19,17 +19,20 @@ Options: import os import sys +import glob import json import traceback -PARENT_FOLDER = os.path.realpath(os.path.join(os.path.dirname(__file__), '..')) -if os.path.isdir(os.path.join(PARENT_FOLDER, '.venv')): +PARENT_FOLDER = os.path.realpath(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')) +venv_dir = os.path.join(PARENT_FOLDER, '.venv') +if os.path.isdir(venv_dir): + for path in glob.glob(os.path.join(venv_dir, 'lib/python*/site-packages')): + sys.path.insert(0, path) sys.path.insert(0, PARENT_FOLDER) from docopt import docopt from localstack import config, constants -from localstack.utils import cli -from localstack.services import infra +from localstack.utils import cli, bootstrap if __name__ == '__main__': @@ -53,7 +56,7 @@ if __name__ == '__main__': } # load CLI plugins - infra.load_plugins(scope=infra.PLUGIN_SCOPE_COMMANDS) + bootstrap.load_plugins(scope=bootstrap.PLUGIN_SCOPE_COMMANDS) # create final usage string additional_params = [] diff --git a/localstack/constants.py b/localstack/constants.py index b8c88d057b748..0a770f7abbf3b 100644 --- a/localstack/constants.py +++ b/localstack/constants.py @@ -2,7 +2,7 @@ import localstack_client.config # LocalStack version -VERSION = '0.9.6' +VERSION = '0.10.0' # default AWS region if 'DEFAULT_REGION' not in os.environ: diff --git a/localstack/services/awslambda/lambda_api.py b/localstack/services/awslambda/lambda_api.py index 36f0265f2abbc..76f6842ab5efa 100644 --- a/localstack/services/awslambda/lambda_api.py +++ b/localstack/services/awslambda/lambda_api.py @@ -465,8 +465,6 @@ def execute(event, context): def set_archive_code(code, lambda_name, zip_file_content=None): - # get file content - zip_file_content = zip_file_content or get_zip_bytes(code) # get metadata lambda_arn = func_arn(lambda_name) @@ -483,6 +481,9 @@ def set_archive_code(code, lambda_name, zip_file_content=None): # this folder to TMP_FILES or similar). return code['S3Key'] + # get file content + zip_file_content = zip_file_content or get_zip_bytes(code) + # Save the zip file to a temporary file that the lambda executors can reference code_sha_256 = base64.standard_b64encode(hashlib.sha256(zip_file_content).digest()) lambda_details.get_version('$LATEST')['CodeSize'] = len(zip_file_content) @@ -514,8 +515,9 @@ def generic_handler(event, context): if code_passed: lambda_cwd = lambda_cwd or set_archive_code(code_passed, lambda_name) - # Save the zip file to a temporary file that the lambda executors can reference - zip_file_content = get_zip_bytes(code_passed) + if not is_local_mount: + # Save the zip file to a temporary file that the lambda executors can reference + zip_file_content = get_zip_bytes(code_passed) else: lambda_cwd = lambda_cwd or lambda_details.cwd diff --git a/localstack/services/cloudformation/cloudformation_starter.py b/localstack/services/cloudformation/cloudformation_starter.py index 9af264e095ff4..b0e1c7db3b940 100644 --- a/localstack/services/cloudformation/cloudformation_starter.py +++ b/localstack/services/cloudformation/cloudformation_starter.py @@ -17,7 +17,8 @@ from localstack.utils.common import short_uid from localstack.stepfunctions import models as sfn_models from localstack.services.infra import ( - get_service_protocol, start_proxy_for_service, do_run, setup_logging, canonicalize_api_names) + get_service_protocol, start_proxy_for_service, do_run, canonicalize_api_names) +from localstack.utils.bootstrap import setup_logging from localstack.utils.cloudformation import template_deployer from localstack.services.awslambda.lambda_api import BUCKET_MARKER_LOCAL diff --git a/localstack/services/infra.py b/localstack/services/infra.py index ba90cdc48588c..ad6f45b692336 100644 --- a/localstack/services/infra.py +++ b/localstack/services/infra.py @@ -8,8 +8,6 @@ import logging import boto3 import subprocess -import six -import pkgutil from requests.models import Response from localstack import constants, config from localstack.config import USE_SSL @@ -17,11 +15,12 @@ ENV_DEV, DEFAULT_REGION, LOCALSTACK_VENV_FOLDER, ENV_INTERNAL_TEST_RUN, DEFAULT_PORT_APIGATEWAY_BACKEND, DEFAULT_PORT_SNS_BACKEND, DEFAULT_PORT_IAM_BACKEND) from localstack.utils import common, persistence -from localstack.utils.common import (run, TMP_THREADS, in_ci, run_cmd_safe, get_free_tcp_port, - FuncThread, ShellCommandThread, mkdir, get_service_protocol, docker_container_running, - in_docker, is_debug, setup_logging) +from localstack.utils.common import (TMP_THREADS, run, get_free_tcp_port, + FuncThread, ShellCommandThread, get_service_protocol, in_docker) from localstack.utils.server import multiserver +from localstack.utils.bootstrap import setup_logging, is_debug, canonicalize_api_names from localstack.utils.analytics import event_publisher +from localstack.utils.bootstrap import load_plugins from localstack.services import generic_proxy, install from localstack.services.es import es_api from localstack.services.firehose import firehose_api @@ -32,20 +31,6 @@ # flag to indicate whether signal handlers have been set up already SIGNAL_HANDLERS_SETUP = False -# maps plugin scope ("services", "commands") to flags which indicate whether plugins have been loaded -PLUGINS_LOADED = {} - -# maps from API names to list of other API names that they depend on -API_DEPENDENCIES = { - 'dynamodbstreams': ['kinesis'], - 'lambda': ['logs'], - 'es': ['elasticsearch'] -} -# composites define an abstract name like "serverless" that maps to a set of services -API_COMPOSITES = { - 'serverless': ['iam', 'lambda', 'dynamodb', 'apigateway', 's3', 'sns'] -} - # default backend host address DEFAULT_BACKEND_HOST = '127.0.0.1' @@ -55,13 +40,6 @@ # map of service plugins, mapping from service name to plugin details SERVICE_PLUGINS = {} -# whether or not to manually fix permissions on /var/run/docker.sock (currently disabled) -DO_CHMOD_DOCKER_SOCK = False - -# plugin scopes -PLUGIN_SCOPE_SERVICES = 'services' -PLUGIN_SCOPE_COMMANDS = 'commands' - # ----------------- # PLUGIN UTILITIES @@ -97,53 +75,6 @@ def register_plugin(plugin): SERVICE_PLUGINS[plugin.name()] = plugin -def load_plugin_from_path(file_path, scope=None): - if os.path.exists(file_path): - module = re.sub(r'(^|.+/)([^/]+)/plugins.py', r'\2', file_path) - method_name = 'register_localstack_plugins' - scope = scope or PLUGIN_SCOPE_SERVICES - if scope == PLUGIN_SCOPE_COMMANDS: - method_name = 'register_localstack_commands' - try: - namespace = {} - exec('from %s.plugins import %s' % (module, method_name), namespace) - method_to_execute = namespace[method_name] - except Exception as e: - if not re.match(r'.*cannot import name .*%s.*' % method_name, str(e)): - LOG.debug('Unable to load plugins from module %s: %s' % (module, e)) - return - try: - return method_to_execute() - except Exception as e: - LOG.warning('Unable to load plugins from file %s: %s' % (file_path, e)) - - -def load_plugins(scope=None): - scope = scope or PLUGIN_SCOPE_SERVICES - if PLUGINS_LOADED.get(scope): - return PLUGINS_LOADED[scope] - - setup_logging() - - loaded_files = [] - result = [] - for module in pkgutil.iter_modules(): - file_path = None - if six.PY3 and not isinstance(module, tuple): - file_path = '%s/%s/plugins.py' % (module.module_finder.path, module.name) - elif six.PY3 or isinstance(module[0], pkgutil.ImpImporter): - if hasattr(module[0], 'path'): - file_path = '%s/%s/plugins.py' % (module[0].path, module[1]) - if file_path and file_path not in loaded_files: - plugin_config = load_plugin_from_path(file_path, scope=scope) - if plugin_config: - result.append(plugin_config) - loaded_files.append(file_path) - # set global flag - PLUGINS_LOADED[scope] = result - return result - - # ----------------------- # CONFIG UPDATE BACKDOOR # ----------------------- @@ -413,157 +344,11 @@ def check_infra(retries=10, expect_shutdown=False, apis=None, additional_checks= check_infra(retries - 1, expect_shutdown=expect_shutdown, apis=apis, additional_checks=additional_checks) -# ------------- -# DOCKER STARTUP -# ------------- - - -def start_infra_in_docker(): - - container_name = 'localstack_main' - - if docker_container_running(container_name): - raise Exception('LocalStack container named "%s" is already running' % container_name) - - # load plugins before starting the docker container - plugin_configs = load_plugins() - plugin_run_params = ' '.join([ - entry.get('docker', {}).get('run_flags', '') for entry in plugin_configs]) - - # prepare APIs - canonicalize_api_names() - - services = os.environ.get('SERVICES', '') - entrypoint = os.environ.get('ENTRYPOINT', '') - cmd = os.environ.get('CMD', '') - user_flags = config.DOCKER_FLAGS - image_name = os.environ.get('IMAGE_NAME', constants.DOCKER_IMAGE_NAME) - service_ports = config.SERVICE_PORTS - force_noninteractive = os.environ.get('FORCE_NONINTERACTIVE', '') - - # construct port mappings - ports_list = sorted(service_ports.values()) - start_port = 0 - last_port = 0 - port_ranges = [] - for i in range(0, len(ports_list)): - if not start_port: - start_port = ports_list[i] - if not last_port: - last_port = ports_list[i] - if ports_list[i] > last_port + 1: - port_ranges.append([start_port, last_port]) - start_port = ports_list[i] - elif i >= len(ports_list) - 1: - port_ranges.append([start_port, ports_list[i]]) - last_port = ports_list[i] - port_mappings = ' '.join( - '-p {start}-{end}:{start}-{end}'.format(start=entry[0], end=entry[1]) - if entry[0] < entry[1] else '-p {port}:{port}'.format(port=entry[0]) - for entry in port_ranges) - - if services: - port_mappings = '' - for service, port in service_ports.items(): - port_mappings += ' -p {port}:{port}'.format(port=port) - - env_str = '' - for env_var in config.CONFIG_ENV_VARS: - value = os.environ.get(env_var, None) - if value is not None: - env_str += '-e %s="%s" ' % (env_var, value) - - data_dir_mount = '' - data_dir = os.environ.get('DATA_DIR', None) - if data_dir is not None: - container_data_dir = '/tmp/localstack_data' - data_dir_mount = '-v "%s:%s" ' % (data_dir, container_data_dir) - env_str += '-e DATA_DIR="%s" ' % container_data_dir - - interactive = '' if force_noninteractive or in_ci() else '-it ' - - # append space if parameter is set - user_flags = '%s ' % user_flags if user_flags else user_flags - entrypoint = '%s ' % entrypoint if entrypoint else entrypoint - plugin_run_params = '%s ' % plugin_run_params if plugin_run_params else plugin_run_params - - container_name = 'localstack_main' - - docker_cmd = ('%s run %s%s%s%s%s' + - '--rm --privileged ' + - '--name %s ' + - '-p 8080:8080 %s %s' + - '-v "%s:/tmp/localstack" -v "%s:%s" ' + - '-e DOCKER_HOST="unix://%s" ' + - '-e HOST_TMP_FOLDER="%s" "%s" %s') % ( - config.DOCKER_CMD, interactive, entrypoint, env_str, user_flags, plugin_run_params, - container_name, port_mappings, data_dir_mount, config.TMP_FOLDER, config.DOCKER_SOCK, - config.DOCKER_SOCK, config.DOCKER_SOCK, config.HOST_TMP_FOLDER, image_name, cmd - ) - - mkdir(config.TMP_FOLDER) - run_cmd_safe(cmd='chmod -R 777 "%s"' % config.TMP_FOLDER) - - print(docker_cmd) - t = ShellCommandThread(docker_cmd, outfile=subprocess.PIPE) - t.start() - time.sleep(2) - - if DO_CHMOD_DOCKER_SOCK: - # fix permissions on /var/run/docker.sock - for i in range(0, 100): - if docker_container_running(container_name): - break - time.sleep(2) - run('%s exec -u root "%s" chmod 777 /var/run/docker.sock' % (config.DOCKER_CMD, container_name)) - - t.process.wait() - sys.exit(t.process.returncode) - - # ------------- # MAIN STARTUP # ------------- -def canonicalize_api_names(apis=None): - """ Finalize the list of API names by - (1) resolving and adding dependencies (e.g., "dynamodbstreams" requires "kinesis"), - (2) resolving and adding composites (e.g., "serverless" describes an ensemble - including "iam", "lambda", "dynamodb", "apigateway", "s3", "sns", and "logs"), and - (3) removing duplicates from the list. """ - - apis = apis or list(config.SERVICE_PORTS.keys()) - - def contains(apis, api): - for a in apis: - if a == api: - return True - - # resolve composites - for comp, deps in API_COMPOSITES.items(): - if contains(apis, comp): - apis.extend(deps) - config.SERVICE_PORTS.pop(comp) - - # resolve dependencies - for i, api in enumerate(apis): - for dep in API_DEPENDENCIES.get(api, []): - if not contains(apis, dep): - apis.append(dep) - - # remove duplicates and composite names - apis = list(set([a for a in apis if a not in API_COMPOSITES.keys()])) - - # make sure we have port mappings for each API - for api in apis: - if api not in config.SERVICE_PORTS: - config.SERVICE_PORTS[api] = config.DEFAULT_SERVICE_PORTS.get(api) - config.populate_configs(config.SERVICE_PORTS) - - return apis - - def start_infra(asynchronous=False, apis=None): try: # load plugins diff --git a/localstack/services/install.py b/localstack/services/install.py index 6f45e62052ad4..91d10ede15555 100755 --- a/localstack/services/install.py +++ b/localstack/services/install.py @@ -7,9 +7,13 @@ import shutil import logging import tempfile +from localstack.utils import bootstrap from localstack.constants import (DEFAULT_SERVICE_PORTS, ELASTICMQ_JAR_URL, STS_JAR_URL, ELASTICSEARCH_JAR_URL, ELASTICSEARCH_PLUGIN_LIST, DYNAMODB_JAR_URL, LOCALSTACK_MAVEN_VERSION, STEPFUNCTIONS_ZIP_URL) +if __name__ == '__main__': + bootstrap.bootstrap_installation() +# flake8: noqa: E402 from localstack.utils.common import ( download, parallelize, run, mkdir, load_file, save_file, unzip, rm_rf, chmod_r) @@ -27,6 +31,9 @@ URL_LOCALSTACK_FAT_JAR = ('https://repo1.maven.org/maven2/' + 'cloud/localstack/localstack-utils/{v}/localstack-utils-{v}-fat.jar').format(v=LOCALSTACK_MAVEN_VERSION) +# Target version for javac, to ensure compatibility with earlier JREs +JAVAC_TARGET_VERSION = '1.8' + # set up logger LOGGER = logging.getLogger(__name__) @@ -147,7 +154,7 @@ def install_amazon_kinesis_client_libs(): java_files = '%s/utils/kinesis/java/cloud/localstack/*.java' % ROOT_PATH class_files = '%s/utils/kinesis/java/cloud/localstack/*.class' % ROOT_PATH if not glob.glob(class_files): - run('javac -cp "%s" %s' % (classpath, java_files)) + run('javac -target %s -cp "%s" %s' % (JAVAC_TARGET_VERSION, classpath, java_files)) def install_lambda_java_libs(): @@ -221,7 +228,7 @@ def download_and_extract(): logging.basicConfig(level=logging.INFO) logging.getLogger('requests').setLevel(logging.WARNING) install_all_components() - print('Done.') - elif sys.argv[1] == 'testlibs': + if sys.argv[1] in ('libs', 'testlibs'): # Install additional libraries for testing install_amazon_kinesis_client_libs() + print('Done.') diff --git a/localstack/services/s3/s3_starter.py b/localstack/services/s3/s3_starter.py index e9a2d261cc621..6453930bf9ea0 100644 --- a/localstack/services/s3/s3_starter.py +++ b/localstack/services/s3/s3_starter.py @@ -8,7 +8,8 @@ from localstack.utils.aws import aws_stack from localstack.utils.common import wait_for_port_open from localstack.services.infra import ( - get_service_protocol, start_proxy_for_service, do_run, setup_logging) + get_service_protocol, start_proxy_for_service, do_run) +from localstack.utils.bootstrap import setup_logging LOGGER = logging.getLogger(__name__) diff --git a/localstack/utils/bootstrap.py b/localstack/utils/bootstrap.py new file mode 100644 index 0000000000000..1ad13a1036628 --- /dev/null +++ b/localstack/utils/bootstrap.py @@ -0,0 +1,362 @@ +import os +import re +import sys +import pip as pip_mod +import time +import pkgutil +import logging +import warnings +import threading +try: + import subprocess32 as subprocess +except Exception: + import subprocess +import six +from localstack import constants, config + +# set up logger +LOG = logging.getLogger(os.path.basename(__file__)) + +# maps plugin scope ("services", "commands") to flags which indicate whether plugins have been loaded +PLUGINS_LOADED = {} + +# marker for extended/ignored libs in requirements.txt +IGNORED_LIB_MARKER = '#extended-lib' +BASIC_LIB_MARKER = '#basic-lib' + +# whether or not to manually fix permissions on /var/run/docker.sock (currently disabled) +DO_CHMOD_DOCKER_SOCK = False + +# log format strings +LOG_FORMAT = '%(asctime)s:%(levelname)s:%(name)s: %(message)s' +LOG_DATE_FORMAT = '%Y-%m-%dT%H:%M:%S' + +# plugin scopes +PLUGIN_SCOPE_SERVICES = 'services' +PLUGIN_SCOPE_COMMANDS = 'commands' + +# maps from API names to list of other API names that they depend on +API_DEPENDENCIES = { + 'dynamodbstreams': ['kinesis'], + 'lambda': ['logs'], + 'es': ['elasticsearch'] +} +# composites define an abstract name like "serverless" that maps to a set of services +API_COMPOSITES = { + 'serverless': ['iam', 'lambda', 'dynamodb', 'apigateway', 's3', 'sns'] +} + + +def bootstrap_installation(): + try: + from localstack.services import infra + assert infra + except Exception: + install_dependencies() + + +def install_dependencies(): + # determine requirements + with open('requirements.txt') as f: + requirements = f.read() + install_requires = [] + for line in re.split('\n', requirements): + if line and line[0] != '#': + if BASIC_LIB_MARKER not in line and IGNORED_LIB_MARKER not in line: + line = line.split(' #')[0].strip() + install_requires.append(line) + LOG.info('Lazily installing missing pip dependencies, this could take a while: %s' % + ', '.join(install_requires)) + args = ['install'] + install_requires + if hasattr(pip_mod, 'main'): + pip_mod.main(args) + else: + import pip._internal + pip._internal.main(args) + + +def load_plugin_from_path(file_path, scope=None): + if os.path.exists(file_path): + module = re.sub(r'(^|.+/)([^/]+)/plugins.py', r'\2', file_path) + method_name = 'register_localstack_plugins' + scope = scope or PLUGIN_SCOPE_SERVICES + if scope == PLUGIN_SCOPE_COMMANDS: + method_name = 'register_localstack_commands' + try: + namespace = {} + exec('from %s.plugins import %s' % (module, method_name), namespace) + method_to_execute = namespace[method_name] + except Exception as e: + if not re.match(r'.*cannot import name .*%s.*' % method_name, str(e)): + LOG.debug('Unable to load plugins from module %s: %s' % (module, e)) + return + try: + return method_to_execute() + except Exception as e: + LOG.warning('Unable to load plugins from file %s: %s' % (file_path, e)) + + +def load_plugins(scope=None): + scope = scope or PLUGIN_SCOPE_SERVICES + if PLUGINS_LOADED.get(scope): + return PLUGINS_LOADED[scope] + + setup_logging() + + loaded_files = [] + result = [] + for module in pkgutil.iter_modules(): + file_path = None + if six.PY3 and not isinstance(module, tuple): + file_path = '%s/%s/plugins.py' % (module.module_finder.path, module.name) + elif six.PY3 or isinstance(module[0], pkgutil.ImpImporter): + if hasattr(module[0], 'path'): + file_path = '%s/%s/plugins.py' % (module[0].path, module[1]) + if file_path and file_path not in loaded_files: + plugin_config = load_plugin_from_path(file_path, scope=scope) + if plugin_config: + result.append(plugin_config) + loaded_files.append(file_path) + # set global flag + PLUGINS_LOADED[scope] = result + return result + + +def docker_container_running(container_name): + output = to_str(run("docker ps --format '{{.Names}}'")) + container_names = re.split(r'\s+', output.replace('\n', ' ')) + return container_name in container_names + + +def setup_logging(): + # determine and set log level + log_level = logging.DEBUG if is_debug() else logging.INFO + logging.basicConfig(level=log_level, format=LOG_FORMAT, datefmt=LOG_DATE_FORMAT) + + # set up werkzeug logger + + class WerkzeugLogFilter(logging.Filter): + def filter(self, record): + return record.name != 'werkzeug' + + root_handlers = logging.getLogger().handlers + if len(root_handlers) > 0: + root_handlers[0].addFilter(WerkzeugLogFilter()) + if is_debug(): + format = '%(asctime)s:API: %(message)s' + handler = logging.StreamHandler() + handler.setLevel(logging.INFO) + handler.setFormatter(logging.Formatter(format)) + logging.getLogger('werkzeug').addHandler(handler) + + # disable some logs and warnings + warnings.filterwarnings('ignore') + logging.captureWarnings(True) + logging.getLogger('boto3').setLevel(logging.INFO) + logging.getLogger('s3transfer').setLevel(logging.INFO) + logging.getLogger('docker').setLevel(logging.WARNING) + logging.getLogger('urllib3').setLevel(logging.WARNING) + logging.getLogger('requests').setLevel(logging.WARNING) + logging.getLogger('botocore').setLevel(logging.ERROR) + logging.getLogger('elasticsearch').setLevel(logging.ERROR) + + +# -------------- +# INFRA STARTUP +# -------------- + + +def canonicalize_api_names(apis=None): + """ Finalize the list of API names by + (1) resolving and adding dependencies (e.g., "dynamodbstreams" requires "kinesis"), + (2) resolving and adding composites (e.g., "serverless" describes an ensemble + including "iam", "lambda", "dynamodb", "apigateway", "s3", "sns", and "logs"), and + (3) removing duplicates from the list. """ + + apis = apis or list(config.SERVICE_PORTS.keys()) + + def contains(apis, api): + for a in apis: + if a == api: + return True + + # resolve composites + for comp, deps in API_COMPOSITES.items(): + if contains(apis, comp): + apis.extend(deps) + config.SERVICE_PORTS.pop(comp) + + # resolve dependencies + for i, api in enumerate(apis): + for dep in API_DEPENDENCIES.get(api, []): + if not contains(apis, dep): + apis.append(dep) + + # remove duplicates and composite names + apis = list(set([a for a in apis if a not in API_COMPOSITES.keys()])) + + # make sure we have port mappings for each API + for api in apis: + if api not in config.SERVICE_PORTS: + config.SERVICE_PORTS[api] = config.DEFAULT_SERVICE_PORTS.get(api) + config.populate_configs(config.SERVICE_PORTS) + + return apis + + +def start_infra_locally(): + bootstrap_installation() + from localstack.services import infra + return infra.start_infra() + + +def start_infra_in_docker(): + + container_name = 'localstack_main' + + if docker_container_running(container_name): + raise Exception('LocalStack container named "%s" is already running' % container_name) + + # load plugins before starting the docker container + plugin_configs = load_plugins() + plugin_run_params = ' '.join([ + entry.get('docker', {}).get('run_flags', '') for entry in plugin_configs]) + + # prepare APIs + canonicalize_api_names() + + services = os.environ.get('SERVICES', '') + entrypoint = os.environ.get('ENTRYPOINT', '') + cmd = os.environ.get('CMD', '') + user_flags = config.DOCKER_FLAGS + image_name = os.environ.get('IMAGE_NAME', constants.DOCKER_IMAGE_NAME) + service_ports = config.SERVICE_PORTS + force_noninteractive = os.environ.get('FORCE_NONINTERACTIVE', '') + + # construct port mappings + ports_list = sorted(service_ports.values()) + start_port = 0 + last_port = 0 + port_ranges = [] + for i in range(0, len(ports_list)): + if not start_port: + start_port = ports_list[i] + if not last_port: + last_port = ports_list[i] + if ports_list[i] > last_port + 1: + port_ranges.append([start_port, last_port]) + start_port = ports_list[i] + elif i >= len(ports_list) - 1: + port_ranges.append([start_port, ports_list[i]]) + last_port = ports_list[i] + port_mappings = ' '.join( + '-p {start}-{end}:{start}-{end}'.format(start=entry[0], end=entry[1]) + if entry[0] < entry[1] else '-p {port}:{port}'.format(port=entry[0]) + for entry in port_ranges) + + if services: + port_mappings = '' + for service, port in service_ports.items(): + port_mappings += ' -p {port}:{port}'.format(port=port) + + env_str = '' + for env_var in config.CONFIG_ENV_VARS: + value = os.environ.get(env_var, None) + if value is not None: + env_str += '-e %s="%s" ' % (env_var, value) + + data_dir_mount = '' + data_dir = os.environ.get('DATA_DIR', None) + if data_dir is not None: + container_data_dir = '/tmp/localstack_data' + data_dir_mount = '-v "%s:%s" ' % (data_dir, container_data_dir) + env_str += '-e DATA_DIR="%s" ' % container_data_dir + + interactive = '' if force_noninteractive or in_ci() else '-it ' + + # append space if parameter is set + user_flags = '%s ' % user_flags if user_flags else user_flags + entrypoint = '%s ' % entrypoint if entrypoint else entrypoint + plugin_run_params = '%s ' % plugin_run_params if plugin_run_params else plugin_run_params + + container_name = 'localstack_main' + + docker_cmd = ('%s run %s%s%s%s%s' + + '--rm --privileged ' + + '--name %s ' + + '-p 8080:8080 %s %s' + + '-v "%s:/tmp/localstack" -v "%s:%s" ' + + '-e DOCKER_HOST="unix://%s" ' + + '-e HOST_TMP_FOLDER="%s" "%s" %s') % ( + config.DOCKER_CMD, interactive, entrypoint, env_str, user_flags, plugin_run_params, + container_name, port_mappings, data_dir_mount, config.TMP_FOLDER, config.DOCKER_SOCK, + config.DOCKER_SOCK, config.DOCKER_SOCK, config.HOST_TMP_FOLDER, image_name, cmd + ) + + mkdir(config.TMP_FOLDER) + try: + run('chmod -R 777 "%s"' % config.TMP_FOLDER) + except Exception: + pass + + class ShellRunnerThread(threading.Thread): + def __init__(self, cmd): + threading.Thread.__init__(self) + self.daemon = True + self.cmd = cmd + + def run(self): + self.process = run(self.cmd, asynchronous=True) + + print(docker_cmd) + t = ShellRunnerThread(docker_cmd) + t.start() + time.sleep(2) + + if DO_CHMOD_DOCKER_SOCK: + # fix permissions on /var/run/docker.sock + for i in range(0, 100): + if docker_container_running(container_name): + break + time.sleep(2) + run('%s exec -u root "%s" chmod 777 /var/run/docker.sock' % (config.DOCKER_CMD, container_name)) + + t.process.wait() + sys.exit(t.process.returncode) + + +# --------------- +# UTIL FUNCTIONS +# --------------- + + +def to_str(obj, errors='strict'): + return obj.decode('utf-8', errors) if isinstance(obj, six.binary_type) else obj + + +def in_ci(): + """ Whether or not we are running in a CI environment """ + for key in ('CI', 'TRAVIS'): + if os.environ.get(key, '') not in [False, '', '0', 'false']: + return True + return False + + +def run(cmd, asynchronous=False): + if asynchronous: + return subprocess.Popen(cmd, shell=True) + return subprocess.check_output(cmd, shell=True) + + +def mkdir(folder): + if not os.path.exists(folder): + try: + os.makedirs(folder) + except OSError as err: + # Ignore rare 'File exists' race conditions. + if err.errno != 17: + raise + + +def is_debug(): + return os.environ.get('DEBUG', '').strip() not in ['', '0', 'false'] diff --git a/localstack/utils/cli.py b/localstack/utils/cli.py index 10255af6ffdd4..118ff1994eadc 100644 --- a/localstack/utils/cli.py +++ b/localstack/utils/cli.py @@ -1,8 +1,9 @@ import re from docopt import docopt from localstack import config -from localstack.services import infra -from localstack.utils.common import run +from localstack.utils import bootstrap + +# Note: make sure we don't have imports at the root level here def cmd_infra(argv, args): @@ -11,10 +12,11 @@ def cmd_infra(argv, args): localstack infra [options] Commands: - infra start Start the local infrastructure + infra start Start the local infrastructure Options: - --docker Run the infrastructure in a Docker container + --docker Run the infrastructure in a Docker container (default) + --host Run the infrastructure on the local host """ if argv[0] == 'start': argv = ['infra', 'start'] + argv[1:] @@ -22,11 +24,14 @@ def cmd_infra(argv, args): args[''] = ['start'] + args[''] args.update(docopt(cmd_infra.__doc__.strip(), argv=argv)) if args[''] == 'start': + if args['--docker'] and args['--host']: + raise Exception('Please specify either --docker or --host') print('Starting local dev environment. CTRL-C to quit.') - if args['--docker']: - infra.start_infra_in_docker() + in_docker = args['--docker'] or not args['--host'] + if in_docker: + bootstrap.start_infra_in_docker() else: - infra.start_infra() + bootstrap.start_infra_locally() def cmd_web(argv, args): @@ -60,6 +65,9 @@ def cmd_ssh(argv, args): Options: """ + bootstrap.bootstrap_installation() + from localstack.utils.common import run + args.update(docopt(cmd_ssh.__doc__.strip(), argv=argv)) lines = run('docker ps').split('\n')[1:] lines = [l for l in lines if 'localstack' in l] diff --git a/localstack/utils/common.py b/localstack/utils/common.py index 58e95c7b3b526..21e9f7ffd4af2 100644 --- a/localstack/utils/common.py +++ b/localstack/utils/common.py @@ -16,7 +16,6 @@ import zipfile import binascii import tempfile -import warnings import threading import traceback import subprocess @@ -56,9 +55,6 @@ # set up logger LOG = logging.getLogger(__name__) -# log format strings -LOG_FORMAT = '%(asctime)s:%(levelname)s:%(name)s: %(message)s' -LOG_DATE_FORMAT = TIMESTAMP_FORMAT # flag to indicate whether we've received and processed the stop signal INFRA_STOPPED = False @@ -333,14 +329,6 @@ def md5(string): return m.hexdigest() -def in_ci(): - """ Whether or not we are running in a CI environment """ - for key in ('CI', 'TRAVIS'): - if os.environ.get(key, '') not in [False, '', '0', 'false']: - return True - return False - - def in_docker(): return config.in_docker() @@ -353,43 +341,6 @@ def has_docker(): return False -def setup_logging(): - # determine and set log level - log_level = logging.DEBUG if is_debug() else logging.INFO - logging.basicConfig(level=log_level, format=LOG_FORMAT, datefmt=LOG_DATE_FORMAT) - - # set up werkzeug logger - - class WerkzeugLogFilter(logging.Filter): - def filter(self, record): - return record.name != 'werkzeug' - - root_handlers = logging.getLogger().handlers - if len(root_handlers) > 0: - root_handlers[0].addFilter(WerkzeugLogFilter()) - if is_debug(): - format = '%(asctime)s:API: %(message)s' - handler = logging.StreamHandler() - handler.setLevel(logging.INFO) - handler.setFormatter(logging.Formatter(format)) - logging.getLogger('werkzeug').addHandler(handler) - - # disable some logs and warnings - warnings.filterwarnings('ignore') - logging.captureWarnings(True) - logging.getLogger('boto3').setLevel(logging.INFO) - logging.getLogger('s3transfer').setLevel(logging.INFO) - logging.getLogger('docker').setLevel(logging.WARNING) - logging.getLogger('urllib3').setLevel(logging.WARNING) - logging.getLogger('requests').setLevel(logging.WARNING) - logging.getLogger('botocore').setLevel(logging.ERROR) - logging.getLogger('elasticsearch').setLevel(logging.ERROR) - - -def is_debug(): - return os.environ.get('DEBUG', '').strip() not in ['', '0', 'false'] - - def is_port_open(port_or_url, http_path=None, expect_success=True): port = port_or_url host = 'localhost' @@ -669,11 +620,6 @@ def load_file(file_path, default=None, mode=None): return result -def docker_container_running(container_name): - container_names = re.split(r'\s+', run("docker ps --format '{{.Names}}'").replace('\n', ' ')) - return container_name in container_names - - def to_str(obj, encoding=DEFAULT_ENCODING, errors='strict'): """ If ``obj`` is an instance of ``binary_type``, return ``obj.decode(encoding, errors)``, otherwise return ``obj`` """ diff --git a/localstack/utils/server/multiserver.py b/localstack/utils/server/multiserver.py index 75c3c77dab761..82ddfcfe9aa43 100644 --- a/localstack/utils/server/multiserver.py +++ b/localstack/utils/server/multiserver.py @@ -7,8 +7,8 @@ from moto.server import main as moto_main from localstack import constants from localstack.utils.common import ( - FuncThread, ShellCommandThread, TMP_THREADS, to_str, wait_for_port_open, - json_safe, setup_logging) + FuncThread, ShellCommandThread, TMP_THREADS, to_str, wait_for_port_open, json_safe) +from localstack.utils.bootstrap import setup_logging from localstack.services.generic_proxy import ProxyListener, GenericProxy LOG = logging.getLogger('localstack.multiserver') diff --git a/requirements.txt b/requirements.txt index 9a2358cbe62a8..84b940835d51e 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,16 +1,22 @@ -# Lines annotated with "#extended-lib" are excluded +# (1) Lines annotated with "#extended-lib" are excluded # from the dependencies when we build the pip package +# (2) Lines annotated with "#basic-lib" are included in the basic +# version of the pip package (i.e., for "pip install localstack"). + +# (3) The remaining dependencies (those without markers) are only included in +# the "full" version of the pip package (i.e., "pip install localstack[full]"). + airspeed==0.5.10 # Use this version until this bug is fixed: https://github.com/awslabs/amazon-kinesis-client-python/issues/99 git+https://github.com/whummer/amazon-kinesis-client-python@maven-retries#egg=amazon_kclpy # amazon-kclpy==1.5.1 awscli>=1.14.18 boto>=2.49.0 -boto3>=1.9.71 +boto3>=1.9.71 #basic-lib botocore>=1.12.13 coverage>=4.0.3 -docopt>=0.6.2 +docopt>=0.6.2 #basic-lib elasticsearch>=6.0.0,<7.0.0 flake8>=3.6.0 flake8-quotes>=0.11.0 @@ -20,7 +26,7 @@ flask_swagger==0.2.12 forbiddenfruit==0.1.3 jsonpath-rw==1.4.0 localstack-ext>=0.8.6 -localstack-client==0.9 +localstack-client==0.9 #basic-lib moto-ext>=1.3.7.1 nose>=1.3.7 nose-timer>=0.7.5 @@ -31,7 +37,7 @@ python-coveralls>=2.9.1 pyyaml>=3.13,<=5.1 requests>=2.20.0 requests-aws4auth==0.9 -six>=1.12.0 -subprocess32-ext==3.2.8.2 +six>=1.12.0 #basic-lib +subprocess32-ext==3.2.8.2 #basic-lib xmltodict>=0.11.0 # yappi==1.0 diff --git a/setup.py b/setup.py index b56f4cd2d40dc..412572c2f8c10 100755 --- a/setup.py +++ b/setup.py @@ -1,15 +1,15 @@ #!/usr/bin/env python -from __future__ import print_function - import re from setuptools import find_packages, setup -# marker for extended/ignored libs in requirements.txt +# marker for extended/ignored and basic libs in requirements.txt IGNORED_LIB_MARKER = '#extended-lib' +BASIC_LIB_MARKER = '#basic-lib' # parameter variables install_requires = [] +extra_requires = [] dependency_links = [] package_data = {} @@ -27,22 +27,32 @@ if line and line[0] == '#' and '#egg=' in line: line = re.search(r'#\s*(.*)', line).group(1) if line and line[0] != '#': - if '://' not in line and IGNORED_LIB_MARKER not in line: - install_requires.append(line) + # include only basic requirements here + if IGNORED_LIB_MARKER not in line: + line = line.split(' #')[0].strip() + if BASIC_LIB_MARKER in line: + install_requires.append(line) + else: + extra_requires.append(line) + +# copy requirements file, to make it available inside the package at runtime +with open('localstack/requirements.copy.txt', 'w') as f: + f.write(requirements) package_data = { '': ['Makefile', '*.md'], 'localstack': [ 'package.json', + 'requirements*.txt', 'dashboard/web/*.*', 'dashboard/web/css/*', 'dashboard/web/img/*', 'dashboard/web/js/*', 'dashboard/web/views/*', 'ext/java/*.*', - 'ext/java/src/main/java/com/atlassian/localstack/*.*', - 'utils/kinesis/java/com/atlassian/*.*' + 'ext/java/src/main/java/cloud/localstack/*.*', + 'utils/kinesis/java/cloud/localstack/*.*' ]} @@ -52,13 +62,16 @@ name='localstack', version=version, description='An easy-to-use test/mocking framework for developing Cloud applications', - author='Waldemar Hummer (Atlassian)', + author='Waldemar Hummer', author_email='waldemar.hummer@gmail.com', url='https://github.com/localstack/localstack', scripts=['bin/localstack'], packages=find_packages(exclude=('tests', 'tests.*')), package_data=package_data, install_requires=install_requires, + extra_requires={ + 'full': extra_requires + }, dependency_links=dependency_links, test_suite='tests', license='Apache License 2.0',