From 12c7ba342d3cbc62945c264a96dac79a3d0797ba Mon Sep 17 00:00:00 2001 From: Charlie Drage Date: Tue, 29 Dec 2015 13:35:12 -0500 Subject: [PATCH 001/193] Raise on failure to pull Docker image --- atomicapp/nulecule/container.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/atomicapp/nulecule/container.py b/atomicapp/nulecule/container.py index 77b24e94..cb50c80f 100644 --- a/atomicapp/nulecule/container.py +++ b/atomicapp/nulecule/container.py @@ -12,6 +12,7 @@ class DockerHandler(object): + """Interface to interact with Docker.""" def __init__(self, dryrun=False, docker_cli='/usr/bin/docker'): @@ -48,10 +49,14 @@ def pull(self, image, update=False): logger.info('Pulling Docker image: %s' % image) pull_cmd = [self.docker_cli, 'pull', image] logger.debug(' '.join(pull_cmd)) - if not self.dryrun: - subprocess.call(pull_cmd) else: logger.info('Skipping pulling Docker image: %s' % image) + return + + if self.dryrun: + logger.info("DRY-RUN: %s", pull_cmd) + elif subprocess.call(pull_cmd) != 0: + raise Exception("Could not pull Docker image %s" % image) def extract(self, image, source, dest, update=False): """ From b64068d468cf5b7141c2fb856868a39bc70b51af Mon Sep 17 00:00:00 2001 From: "Brian (bex) Exelbierd" Date: Sun, 10 Jan 2016 20:57:53 +0100 Subject: [PATCH 002/193] Fixing the build_run.sh Dockerfile path This allows you to build both .git and .pkgs containers --- build_run.sh | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/build_run.sh b/build_run.sh index 382d0525..e13c29c4 100755 --- a/build_run.sh +++ b/build_run.sh @@ -1,11 +1,15 @@ #!/bin/bash WHAT=$1 +DISTRO=$2 # TODO sanity check that we got docker >= 1.6 -[ -z "${WHAT}" ] && echo "Need to provide a distro you want to build for (fedora|centos|rhel7|debian)" && exit -IMAGE_NAME=atomicapp-${WHAT} +[ -z "${WHAT}" ] && echo "Need to provide a source location to build from (git|pkgs)" && echo "usage: build_run.sh SRC_LOCATION DISTRO" && exit + +[ -z "${DISTRO}" ] && echo "Need to provide a distro you want to build for (fedora|centos|rhel7|debian)" && echo "usage: build_run.sh SRC_LOCATION DISTRO" && exit + +IMAGE_NAME=atomicapp-${WHAT}-${DISTRO} if [ -z "$USERNAME" ]; then echo "setting USERNAME to " `whoami` @@ -13,7 +17,7 @@ if [ -z "$USERNAME" ]; then fi echo docker build $USERNAME/$IMAGE_NAME -docker build --rm --tag $USERNAME/$IMAGE_NAME --file Dockerfile.${WHAT} . +docker build --rm --tag $USERNAME/$IMAGE_NAME --file Dockerfiles.${WHAT}/Dockerfile.${DISTRO} . #doesn't really make sense to run it #test From a26d7219a019ba137c91910433c941bc10da9448 Mon Sep 17 00:00:00 2001 From: Dusty Mabe Date: Thu, 14 Jan 2016 10:45:51 -0500 Subject: [PATCH 003/193] cli: allow overriding cmdline from env vars This is useful when testing or overriding values inside of the native openshift containers started by `oc new-app`. With these overrides we can add extra arguments to the command line or completely set the cmdline. Ex: oc new-app myapp --grant-install-rights \ -e ATOMICAPP_APPEND_ARGS='--providertlsverify=False' or oc new-app projectatomica/atomicapp --grant-install-rights \ -e ATOMICAPP_ARGS='run -v --dest=none projectatomic/etherpad-centos7-atomicapp' --- atomicapp/cli/main.py | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/atomicapp/cli/main.py b/atomicapp/cli/main.py index 94119efb..187dbe7f 100644 --- a/atomicapp/cli/main.py +++ b/atomicapp/cli/main.py @@ -328,6 +328,20 @@ def run(self): if Utils.running_on_openshift(): cmdline = 'run -v --dest=none /{}'.format(APP_ENT_PATH).split() + # If the user has elected to provide all arguments via the + # ATOMICAPP_ARGS environment variable then set it now + argstr = os.environ.get('ATOMICAPP_ARGS') + if argstr: + logger.debug("Setting cmdline args to: {}".format(argstr)) + cmdline = argstr.split() + + # If the user has elected to provide some arguments via the + # ATOMICAPP_APPEND_ARGS environment variable then add those now + argstr = os.environ.get('ATOMICAPP_APPEND_ARGS') + if argstr: + logger.debug("Appending args to cmdline: {}".format(argstr)) + cmdline.extend(argstr.split()) + # We want to be able to place options anywhere on the command # line. We have added all global options to each subparser, # but subparsers require all options to be after the 'action' @@ -353,6 +367,9 @@ def run(self): else: set_logging(level=logging.INFO) + # Now that we have set the logging level let's print out the cmdline + logger.debug("Final parsed cmdline: {}".format(' '.join(cmdline))) + lock = LockFile(os.path.join(Utils.getRoot(), LOCK_FILE)) try: lock.acquire(timeout=-1) From 438ad78ec3af83c012d395ece1e9f9c31e1ff1ac Mon Sep 17 00:00:00 2001 From: Dusty Mabe Date: Thu, 14 Jan 2016 11:02:07 -0500 Subject: [PATCH 004/193] Add support for embedding answers file in application. This has been a use case that has been requested to allow for someone who is doing deployment of an atomicapp to actually embed his answers file within the provided atomicapp (via FROM:) in order to create a new container that has exactly the config he/she desires. An example would be that I have decided to package up mariadb with an answers file that contains all the questions answered already. I create an answers file with the data in it then create a new container with a Dockerfile like so: FROM: projectatomic/mariadb-centos7-atomicapp ADD /answers.conf /application-entity/ Then the resulting atomicapp can be deployed without anyone having to create an answers file or answer questions. --- atomicapp/nulecule/main.py | 51 +++++++++++++++++++++++++++----------- 1 file changed, 37 insertions(+), 14 deletions(-) diff --git a/atomicapp/nulecule/main.py b/atomicapp/nulecule/main.py index 01f17b39..56707140 100644 --- a/atomicapp/nulecule/main.py +++ b/atomicapp/nulecule/main.py @@ -88,16 +88,13 @@ def __init__(self, app_spec, destination=None, answers_file=None): # Set where the main nulecule file should be self.main_file = os.path.join(self.app_path, MAIN_FILE) - # If user provided a path to answers then make sure it exists. If they - # didn't provide one then use the one in the app dir if it exists. + # If user provided a path to answers, then load them if answers_file: - self.answers_file = answers_file - if not os.path.isfile(self.answers_file): + if not os.path.isfile(answers_file): raise NuleculeException( - "Path for answers doesn't exist: %s" % self.answers_file) - else: - if os.path.isfile(os.path.join(self.app_path, ANSWERS_FILE)): - self.answers_file = os.path.join(self.app_path, ANSWERS_FILE) + "Answers file doesn't exist: {}".format(answers_file)) + self.answers_file = answers_file + self._process_answers() # TODO: put this in a better place in the future. # If we are running inside of an openshift pod then override @@ -188,8 +185,6 @@ def install(self, nodeps=False, update=False, dryrun=False, Returns: None """ - if self.answers_file: - self.answers = Utils.loadAnswers(self.answers_file) self.answers_format = answers_format or ANSWERS_FILE_SAMPLE_FORMAT # Call unpack. If the app doesn't exist it will be pulled. If @@ -225,8 +220,6 @@ def run(self, cli_provider, answers_output, ask, Returns: None """ - if self.answers_file: - self.answers = Utils.loadAnswers(self.answers_file) self.answers_format = answers_format or ANSWERS_FILE_SAMPLE_FORMAT dryrun = kwargs.get('dryrun') or False @@ -234,6 +227,11 @@ def run(self, cli_provider, answers_output, ask, # it does exist it will be just be loaded and returned self.nulecule = self.unpack(dryrun=dryrun, config=self.answers) + # If we didn't find an answers file before then call _process_answers + # again just in case the app developer embedded an answers file + if not self.answers_file: + self._process_answers() + # Unless otherwise specified with CLI arguments we will # default to the first provider available providers = Utils.getSupportedProviders(self.app_path) @@ -261,8 +259,8 @@ def stop(self, cli_provider, **kwargs): kwargs (dict): Extra keyword arguments """ # For stop we use the generated answer file from the run - self.answers = Utils.loadAnswers( - os.path.join(self.app_path, ANSWERS_RUNTIME_FILE)) + self.answers_file = os.path.join(self.app_path, ANSWERS_RUNTIME_FILE) + self._process_answers() dryrun = kwargs.get('dryrun') or False self.nulecule = Nulecule.load_from_path( @@ -282,6 +280,31 @@ def clean(self, force=False): distutils.dir_util.remove_tree(self.unpack_path) self.initialize() + def _process_answers(self): + """ + Processes answer files to load data from them. + + NOTE: This function should be called once on startup and then + once more after the application has been extracted, but only + if answers file wasn't found on the first invocation. The idea + is to allow for people to embed an answers file in the application + if they want, which won't be available until after extraction. + + Returns: + None + """ + + # If the user didn't provide an answers file then check the app + # dir to see if one exists. + if not self.answers_file: + f = os.path.join(self.app_path, ANSWERS_FILE) + if os.path.isfile(f): + self.answers_file = f + + # At this point if we have an answers file, load it + if self.answers_file: + self.answers = Utils.loadAnswers(self.answers_file) + def _write_answers(self, path, answers, answers_format): """ Write answers data to file. From b9c72124d04e83710caec3ecb10c349b349bbc6e Mon Sep 17 00:00:00 2001 From: Dusty Mabe Date: Thu, 14 Jan 2016 11:20:40 -0500 Subject: [PATCH 005/193] Add in cli options for some provider* answers. Probably the most useful of these is --providertlsverify since it will enable people to easily disable tls verification if necessary. --- atomicapp/cli/main.py | 42 ++++++++++++++++++++++++++++++++++++++ atomicapp/nulecule/main.py | 15 +++++++++++--- 2 files changed, 54 insertions(+), 3 deletions(-) diff --git a/atomicapp/cli/main.py b/atomicapp/cli/main.py index 187dbe7f..8b4faff5 100644 --- a/atomicapp/cli/main.py +++ b/atomicapp/cli/main.py @@ -71,6 +71,7 @@ def cli_install(args): destination = argdict['destination'] nm = NuleculeManager(app_spec=argdict['app_spec'], destination=destination, + cli_answers=argdict['cli_answers'], answers_file=argdict['answers']) nm.install(**argdict) # Clean up the files if the user asked us to. Otherwise @@ -94,6 +95,7 @@ def cli_run(args): destination = argdict['destination'] nm = NuleculeManager(app_spec=argdict['app_spec'], destination=destination, + cli_answers=argdict['cli_answers'], answers_file=argdict['answers']) nm.run(**argdict) # Clean up the files if the user asked us to. Otherwise @@ -125,6 +127,18 @@ def cli_stop(args): sys.exit(1) +# Create a custom action parser. Need this because for some args we don't +# want to store a value if the user didn't provide one. "store_true" does +# not allow this; it will always create an attribute and store a value. +class TrueOrFalseAction(argparse.Action): + def __call__(self, parser, namespace, values, option_string=None): + if values.lower() == 'true': + booleanvalue = True + else: + booleanvalue = False + setattr(namespace, self.dest, booleanvalue) + + class CLI(): def __init__(self): @@ -210,6 +224,26 @@ def create_parser(self): default=ANSWERS_FILE_SAMPLE_FORMAT, choices=['ini', 'json', 'xml', 'yaml'], help="The format for the answers.conf.sample file. Default: %s" % ANSWERS_FILE_SAMPLE_FORMAT) + globals_parser.add_argument( + "--providertlsverify", + dest="providertlsverify", + action=TrueOrFalseAction, + choices=['True', 'False'], + help=(''' + Value for providertlsverify answers option. + --providertlsverify=False to disable tls verification''')) + globals_parser.add_argument( + "--providerconfig", + dest="providerconfig", + help='Value for providerconfig answers option.') + globals_parser.add_argument( + "--providercafile", + dest="providercafile", + help='Value for providercafile answers option.') + globals_parser.add_argument( + "--providerapi", + dest="providerapi", + help='Value for providerapi answers option.') # === "run" SUBPARSER === run_subparser = toplevel_subparsers.add_parser( @@ -359,6 +393,14 @@ def run(self): # Finally, parse args and give error if necessary args = self.parser.parse_args(cmdline) + # Take the arguments that correspond to "answers" config file data + # and make a dictionary of it to pass along in args. + setattr(args, 'cli_answers', {}) + for item in ['providerapi', 'providercafile', + 'providerconfig', 'providertlsverify']: + if hasattr(args, item) and getattr(args, item) is not None: + args.cli_answers[item] = getattr(args, item) + # Set logging level if args.verbose: set_logging(level=logging.DEBUG) diff --git a/atomicapp/nulecule/main.py b/atomicapp/nulecule/main.py index 56707140..1dd35542 100644 --- a/atomicapp/nulecule/main.py +++ b/atomicapp/nulecule/main.py @@ -29,7 +29,8 @@ class NuleculeManager(object): Interface to install, run, stop a Nulecule application. """ - def __init__(self, app_spec, destination=None, answers_file=None): + def __init__(self, app_spec, destination=None, + cli_answers=None, answers_file=None): """ init function for NuleculeManager. Sets a few instance variables. @@ -37,9 +38,11 @@ def __init__(self, app_spec, destination=None, answers_file=None): app_spec: either a path to an unpacked nulecule app or a container image name where a nulecule can be found destination: where to unpack a nulecule to if it isn't local + cli_answers: some answer file values provided from cli args + answers_file: the location of the answers file """ - # Let's pass in a default format for our answers self.answers = copy.deepcopy(DEFAULT_ANSWERS) + self.cli_answers = cli_answers self.answers_format = None self.answers_file = None # The path to an answer file self.app_path = None # The path where the app resides or will reside @@ -282,7 +285,8 @@ def clean(self, force=False): def _process_answers(self): """ - Processes answer files to load data from them. + Processes answer files to load data from them and then merges + any cli provided answers into the config. NOTE: This function should be called once on startup and then once more after the application has been extracted, but only @@ -305,6 +309,11 @@ def _process_answers(self): if self.answers_file: self.answers = Utils.loadAnswers(self.answers_file) + # If there is answers data from the cli then merge it in now + if self.cli_answers: + for k, v in self.cli_answers.iteritems(): + self.answers[GLOBAL_CONF][k] = v + def _write_answers(self, path, answers, answers_format): """ Write answers data to file. From 89667d918f64001434968b28163761bb29f37c08 Mon Sep 17 00:00:00 2001 From: Dusty Mabe Date: Wed, 13 Jan 2016 13:01:07 -0500 Subject: [PATCH 006/193] native openshift: move detection of provider information to provider. --- atomicapp/cli/main.py | 4 ++-- atomicapp/nulecule/main.py | 10 ---------- atomicapp/providers/openshift.py | 10 ++++++++++ 3 files changed, 12 insertions(+), 12 deletions(-) diff --git a/atomicapp/cli/main.py b/atomicapp/cli/main.py index 8b4faff5..341d54ff 100644 --- a/atomicapp/cli/main.py +++ b/atomicapp/cli/main.py @@ -358,9 +358,9 @@ def run(self): # If we are running in an openshift pod (via `oc new-app`) then # there is no cmdline but we want to default to "atomicapp run". - # In this case copy files to cwd and use the working directory. if Utils.running_on_openshift(): - cmdline = 'run -v --dest=none /{}'.format(APP_ENT_PATH).split() + cmdline = 'run -v --dest=none --provider=openshift /{}' + cmdline = cmdline.format(APP_ENT_PATH).split() # now a list # If the user has elected to provide all arguments via the # ATOMICAPP_ARGS environment variable then set it now diff --git a/atomicapp/nulecule/main.py b/atomicapp/nulecule/main.py index 1dd35542..0aa52f8b 100644 --- a/atomicapp/nulecule/main.py +++ b/atomicapp/nulecule/main.py @@ -99,16 +99,6 @@ def __init__(self, app_spec, destination=None, self.answers_file = answers_file self._process_answers() - # TODO: put this in a better place in the future. - # If we are running inside of an openshift pod then override - # some of the config by detecting some values from the environment - if Utils.running_on_openshift(): - self.answers[GLOBAL_CONF]['provider'] = 'openshift' - self.answers[GLOBAL_CONF]['accesstoken'] = os.environ['TOKEN_ENV_VAR'] - self.answers[GLOBAL_CONF]['namespace'] = os.environ['POD_NAMESPACE'] - self.answers[GLOBAL_CONF]['providerapi'] = \ - Utils.get_openshift_api_endpoint_from_env() - def unpack(self, update=False, dryrun=False, nodeps=False, config=None): """ diff --git a/atomicapp/providers/openshift.py b/atomicapp/providers/openshift.py index 2a35b152..c0123261 100644 --- a/atomicapp/providers/openshift.py +++ b/atomicapp/providers/openshift.py @@ -698,6 +698,16 @@ def _set_config_values(self): """ + # First things first, if we are running inside of an openshift pod via + # `oc new-app` then get the config from the environment (files/env vars) + if Utils.running_on_openshift(): + self.providerapi = Utils.get_openshift_api_endpoint_from_env() + self.namespace = os.environ['POD_NAMESPACE'] + self.access_token = os.environ['TOKEN_ENV_VAR'] + self.provider_tls_verify = False + self.provider_ca = None + return # No need to process other information + # initialize result to default values result = {PROVIDER_API_KEY: self.providerapi, ACCESS_TOKEN_KEY: self.access_token, From 4f6c44e17d954ebdeb49034144bb5a6f97a5a401 Mon Sep 17 00:00:00 2001 From: Dusty Mabe Date: Wed, 13 Jan 2016 13:02:22 -0500 Subject: [PATCH 007/193] native openshift: Add in ssl verification. --- atomicapp/providers/openshift.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/atomicapp/providers/openshift.py b/atomicapp/providers/openshift.py index c0123261..eb14e6c5 100644 --- a/atomicapp/providers/openshift.py +++ b/atomicapp/providers/openshift.py @@ -40,6 +40,9 @@ import logging logger = logging.getLogger(__name__) +# If running in an openshift POD via `oc new-app`, the ca file is here +OPENSHIFT_POD_CA_FILE = "/run/secrets/kubernetes.io/serviceaccount/ca.crt" + class OpenshiftClient(object): @@ -704,8 +707,8 @@ def _set_config_values(self): self.providerapi = Utils.get_openshift_api_endpoint_from_env() self.namespace = os.environ['POD_NAMESPACE'] self.access_token = os.environ['TOKEN_ENV_VAR'] - self.provider_tls_verify = False - self.provider_ca = None + self.provider_tls_verify = True + self.provider_ca = OPENSHIFT_POD_CA_FILE return # No need to process other information # initialize result to default values From 4b07ba01a9348bb66a7b6296d1956619c3af20a5 Mon Sep 17 00:00:00 2001 From: Dusty Mabe Date: Thu, 14 Jan 2016 12:54:07 -0500 Subject: [PATCH 008/193] native openshift: respect it if user set tls_verify to False. We default to true so if it is false then the user set it somewhere so let's respect that and not verify. --- atomicapp/providers/openshift.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/atomicapp/providers/openshift.py b/atomicapp/providers/openshift.py index eb14e6c5..8b987152 100644 --- a/atomicapp/providers/openshift.py +++ b/atomicapp/providers/openshift.py @@ -188,7 +188,7 @@ def _requests_tls_verify(self): in format that is used by requests library. see: http://docs.python-requests.org/en/latest/user/advanced/#ssl-cert-verification """ - if self.provider_ca: + if self.provider_ca and self.provider_tls_verify: return self.provider_ca else: return self.provider_tls_verify @@ -703,12 +703,14 @@ def _set_config_values(self): # First things first, if we are running inside of an openshift pod via # `oc new-app` then get the config from the environment (files/env vars) + # NOTE: pick up provider_tls_verify from answers if exists if Utils.running_on_openshift(): self.providerapi = Utils.get_openshift_api_endpoint_from_env() self.namespace = os.environ['POD_NAMESPACE'] self.access_token = os.environ['TOKEN_ENV_VAR'] - self.provider_tls_verify = True self.provider_ca = OPENSHIFT_POD_CA_FILE + self.provider_tls_verify = \ + self.config.get(PROVIDER_TLS_VERIFY_KEY, True) return # No need to process other information # initialize result to default values From be23b4badbecea5291d2559e4f0994c9deefb87d Mon Sep 17 00:00:00 2001 From: Charlie Drage Date: Thu, 14 Jan 2016 16:44:03 -0500 Subject: [PATCH 009/193] 0.3.1 Release --- CHANGELOG.md | 120 ++++++++++++++++++++++++++++++ Dockerfiles.git/Dockerfile.centos | 2 +- Dockerfiles.git/Dockerfile.debian | 2 +- Dockerfiles.git/Dockerfile.fedora | 2 +- README.md | 2 +- atomicapp/constants.py | 2 +- setup.py | 2 +- 7 files changed, 126 insertions(+), 6 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index d0e6760c..577c44c5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,123 @@ +## Atomic App 0.3.1 (01-14-2016) + +This release introduces some significant features to Atomic App as well as our first release since 0.3.0. + +The outmost features include: + - Persistent storage + - HTTPS (TLS) verification and support for OpenShift + - OpenShift stop support + - Nested Nulecule application support for OpenShift. + +For an extended list of changes, please see the git shortlog below. + +``` +Charlie Drage (9): + Merge pull request #457 from rtnpro/remove-docker-containers-on-stop + Merge pull request #392 from kadel/marathon-provider + 0.3.0 Release + Add persistent storage core + Add Kubernetes persistent storage functionality + Test requirements.py persistent storage + Warn if no persistent volumes exist to claim + Merge pull request #485 from kadel/issue484 + Stop Docker containers more gracefully + +Dharmit Shah (10): + Common place for list of Providers + PEP8 + Adds Marathon provider data for `helloapache` example + Nulecule for `helloapache` app now contains information about marathon artifacts + CLI tests for marathon provider using `helloapache` atomic app + Information about where to specify `providerapi` for Marathon provider + Changes suggested in PR review + Added try..except block for request + Catch `AnyMarkupError` instead of `Exception` for invalid artifacts + Use `ProviderFailedException` instead of `sys.exit` + +Dusty Mabe (40): + Merge pull request #463 from kadel/make_rest_request + Revert "Remove container on stopping on Docker provider. Fixes #389" + Merge pull request #464 from projectatomic/revert-457-remove-docker-containers-on-stop + Allow user to specify both source and destination as directories. + Merge pull request #466 from dustymabe/dusty-src-dest + cli: import argparse rather than specific items + cli: Restructure argument parsers. + cli: Add global options help text to toplevel parser. + cli: Add in a --mode cli switch to select action. + Merge pull request #468 from dustymabe/dusty-add-mode + Fix yaml choice for --answers-format. + utils: add rm_dir() function. + Add --destination=none. Files don't persist after run. + Update native openshift code to use dest=none. + Add 'genanswers' action to generate answers.conf in cwd. + Merge pull request #469 from dustymabe/dusty-add-genanswers-new + cli: Fix the name of the genanswers subparser. + cli: Clarify some of the app_spec help texts. + Merge pull request #465 from projectatomic/openshift-unittests + Merge pull request #473 from kadel/openshift-AttributeError + Merge pull request #472 from dustymabe/dusty-update-stop-app-spec-help + Merge pull request #474 from kadel/openshift-stop + Merge pull request #460 from cdrage/persistent-storage + Merge pull request #488 from cdrage/stop-more-gracefully + cli: Add genanswers as a choice for --mode. + Include port information in detected openshift api endpoint. + Merge pull request #490 from dustymabe/allow-genanswers-for-mode + Merge pull request #491 from dustymabe/dusty-add-port-to-providerapi + Merge pull request #480 from kadel/openshift-ssl + Merge pull request #489 from projectatomic/oc-new-app-with-nested-nulecules + cli: allow overriding cmdline from env vars + Merge pull request #504 from dustymabe/dusty-cli-overrides + Add support for embedding answers file in application. + Merge pull request #505 from dustymabe/dusty-allow-embedded-answers-file + Add in cli options for some provider* answers. + Merge pull request #506 from dustymabe/dusty-add-cli-overrides + native openshift: move detection of provider information to provider. + native openshift: Add in ssl verification. + native openshift: respect it if user set tls_verify to False. + Merge pull request #503 from dustymabe/dusty-ssl-in-native-openshift + +Ratnadeep Debnath (13): + Remove container on stopping on Docker provider. Fixes #389 + Refactored openshift provider for testing. #459 + Refactor openshift provider: Move interaction with remote API from OpenShiftProvider + Added tests for OpenshiftProvider.deploy. + Refactor openshift _process_artifacts + Added tests for openshift _process_artifact_data. + Added tests for openshift to parse kube config + Added docs for openshift provider unittests. + Unpack image using Openshift API on Openshift provider. + Fixed unittests for Nulecule and NuleculeComponent + Fix using ssl connection options in websocket connection to Openshift. + Wait for Openshift pod to run, before extracting content. + Delete openshift pod irrespective of successful or failed extraction. + +Tomas Kral (24): + move openshift._make_request() to Utils.make_rest_request() + first draft of marathon provider + change providerurl to providerapi + fix dry-run for marathon + empty marathon_artifacts array in init() + marathon fixes + add Marathon to list of supported providers + raise exeption on AnyMarkupError in Marathon provider + mention Mesos with Marathon in docs + use Utils.make_rest_request in Marathon provider + add more docs to functions in Marathon provider + fix AttributeError OpenshiftClient.ssl_verify + Implement stop for OpenShift provider. + openshift provider: fix typos, add comments + openshift provider: when deleting use selector from RC to get PODs + openshift provider: update comments + openshift provider: add option for skiping tls verification + fix typos and flake8 errors + openshift provider: doc of providertlsverify and providercafile + openshift provider: break ssl_verify to provider_ca and provider_tls_verify + openshift provider: use _requests_tls_verify() in undeploy + openshift provider: check that required options are !None + openshift provider: test connection to OpenShift print nicer error message when invalid ttl/ssl certificate + openshift provider: translate CA path to host path and check if exists +``` + ## Atomic App 0.3.0 (12-16-2015) This release introduces a new provider (Mesos) as well as a major refactor of the OpenShift provider. diff --git a/Dockerfiles.git/Dockerfile.centos b/Dockerfiles.git/Dockerfile.centos index 55ef5c6f..4af28781 100644 --- a/Dockerfiles.git/Dockerfile.centos +++ b/Dockerfiles.git/Dockerfile.centos @@ -2,7 +2,7 @@ FROM centos:7 MAINTAINER Red Hat, Inc. -ENV ATOMICAPPVERSION="0.3.0" +ENV ATOMICAPPVERSION="0.3.1" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ io.openshift.generate.job=true \ diff --git a/Dockerfiles.git/Dockerfile.debian b/Dockerfiles.git/Dockerfile.debian index db9ef706..6150a04c 100644 --- a/Dockerfiles.git/Dockerfile.debian +++ b/Dockerfiles.git/Dockerfile.debian @@ -2,7 +2,7 @@ FROM debian:jessie MAINTAINER Red Hat, Inc. -ENV ATOMICAPPVERSION="0.3.0" +ENV ATOMICAPPVERSION="0.3.1" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ RUN="docker run -it --rm \${OPT1} --privileged -v `pwd`:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} -v \${OPT2} run \${OPT3} \${IMAGE}" \ diff --git a/Dockerfiles.git/Dockerfile.fedora b/Dockerfiles.git/Dockerfile.fedora index 7ed3d788..1ef9dd80 100644 --- a/Dockerfiles.git/Dockerfile.fedora +++ b/Dockerfiles.git/Dockerfile.fedora @@ -2,7 +2,7 @@ FROM fedora:23 MAINTAINER Red Hat, Inc. -ENV ATOMICAPPVERSION="0.3.0" +ENV ATOMICAPPVERSION="0.3.1" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ io.openshift.generate.job=true \ diff --git a/README.md b/README.md index 4c1109dc..65205878 100644 --- a/README.md +++ b/README.md @@ -7,7 +7,7 @@ Atomic App is a reference implementation of the [Nulecule Specification](http:// Atomic App is packaged as a container. End-users typically do not install the software from source. Instead use the atomicapp container as the `FROM` line in a Dockerfile and package your application on top. For example: ``` -FROM projectatomic/atomicapp:0.3.0 +FROM projectatomic/atomicapp:0.3.1 MAINTAINER Your Name diff --git a/atomicapp/constants.py b/atomicapp/constants.py index 2827d8d2..087269fe 100644 --- a/atomicapp/constants.py +++ b/atomicapp/constants.py @@ -23,7 +23,7 @@ 2) LABEL io.projectatomic.nulecule.specversion in app Dockefile """ -__ATOMICAPPVERSION__ = '0.3.0' +__ATOMICAPPVERSION__ = '0.3.1' __NULECULESPECVERSION__ = '0.0.2' EXTERNAL_APP_DIR = "external" diff --git a/setup.py b/setup.py index d0014388..b81692e5 100644 --- a/setup.py +++ b/setup.py @@ -41,7 +41,7 @@ def _install_requirements(): setup( name='atomicapp', - version='0.3.0', + version='0.3.1', description='A tool to install and run Nulecule apps', author='Red Hat, Inc.', author_email='container-tools@redhat.com', From 7a70361a73e900f17a88149ee7efa8a8e7371ee9 Mon Sep 17 00:00:00 2001 From: Tomas Kral Date: Fri, 15 Jan 2016 18:59:08 +0100 Subject: [PATCH 010/193] openshift provider: safer stop Delete DCs first than instead of finding and deleting related pods for RC, scale RC down to 0 replicas. --- atomicapp/providers/openshift.py | 66 ++++++++++++++++++-------------- atomicapp/utils.py | 3 ++ 2 files changed, 41 insertions(+), 28 deletions(-) diff --git a/atomicapp/providers/openshift.py b/atomicapp/providers/openshift.py index 8b987152..9ed7f220 100644 --- a/atomicapp/providers/openshift.py +++ b/atomicapp/providers/openshift.py @@ -167,6 +167,30 @@ def delete(self, url): logger.error(msg) raise ProviderFailedException(msg) + def scale(self, url, replicas): + """ + Scale ReplicationControllers or DeploymentConfig + + Args: + url (str): full url for artifact + replicas (int): number of replicas scale to + """ + patch = [{"op": "replace", + "path": "/spec/replicas", + "value": replicas}] + + (status_code, return_data) = \ + Utils.make_rest_request("patch", + url, + data=patch, + verify=self._requests_tls_verify()) + if status_code == 200: + logger.info("Sucessfully scaled %s to %s replicas", url, replicas) + else: + msg = "%s %s" % (status_code, return_data) + logger.error(msg) + raise ProviderFailedException(msg) + def process_template(self, url, template): (status_code, return_data) = \ Utils.make_rest_request("post", @@ -371,13 +395,17 @@ def undeploy(self): logger.debug("Starting undeploy") delete_artifacts = [] for kind, objects in self.openshift_artifacts.iteritems(): - delete_artifacts.extend(objects) + # Add DCs to beggining of list so they are deleted first. + if kind == "deploymentconfig": + delete_artifacts = objects + delete_artifacts + else: + delete_artifacts = delete_artifacts + objects for artifact in delete_artifacts: kind = artifact["kind"].lower() namespace = self._get_namespace(artifact) - # get name from metadata so we know which object to delete + # Get name from metadata so we know which object to delete. if "metadata" in artifact and \ "name" in artifact["metadata"]: name = artifact["metadata"]["name"] @@ -415,34 +443,16 @@ def undeploy(self): # add items to list of artifact to be deleted delete_artifacts.extend(items) - # If this is a ReplicationController we need to delete all - # Pods that were created by this RC. Find the pods that - # belong to this RC by querying for all pods and filtering - # based on the selector used in the RC. - if kind.lower() == "replicationcontroller": - selector = ",".join(["%s=%s" % (k, v) for k, v in artifact["spec"]["selector"].iteritems()]) - logger.debug("Using labelSelector: %s" % selector) - params = {"labelSelector": selector} - url = self._get_url(namespace, "pod", params=params) - (status_code, return_data) = \ - Utils.make_rest_request("get", url, verify=self.oc._requests_tls_verify()) - if status_code != 200: - raise ProviderFailedException("Cannot get Pods for " - "ReplicationController %s" - " (status code %s)" % - (name, status_code)) - # kind of returned data is ReplicationControllerList - # https://docs.openshift.com/enterprise/3.1/rest_api/kubernetes_v1.html#v1-podlist - # we need to modify items to get valid Pod - items = return_data["items"] - for item in items: - item["kind"] = "Pod" - item["apiVersion"] = return_data["apiVersion"] - # add items to list of artifact to be deleted - delete_artifacts.extend(items) - url = self._get_url(namespace, kind, name) + # Scale down RC to 0 replicas before deleting. + # This should take care of all asocciated pods. + if kind.lower() == "replicationcontroller": + if self.dryrun: + logger.info("DRY-RUN: SCALE %s down to 0", url) + else: + self.oc.scale(url, 0) + if self.dryrun: logger.info("DRY-RUN: DELETE %s", url) else: diff --git a/atomicapp/utils.py b/atomicapp/utils.py index 7fc50866..14418a78 100644 --- a/atomicapp/utils.py +++ b/atomicapp/utils.py @@ -427,6 +427,9 @@ def make_rest_request(method, url, verify=True, data=None): res = requests.put(url, json=data, verify=verify) elif method.lower() == "delete": res = requests.delete(url, json=data, verify=verify) + elif method.lower() == "patch": + headers = {"Content-Type": "application/json-patch+json"} + res = requests.patch(url, json=data, verify=verify, headers=headers) status_code = res.status_code return_data = res.json() From 585fe90bf01067b2fabfba6ed086f73b3a8bd617 Mon Sep 17 00:00:00 2001 From: Tomas Kral Date: Mon, 18 Jan 2016 15:20:34 +0100 Subject: [PATCH 011/193] openshift provider: fix typos, add more explanation --- atomicapp/providers/openshift.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/atomicapp/providers/openshift.py b/atomicapp/providers/openshift.py index 9ed7f220..fcf64184 100644 --- a/atomicapp/providers/openshift.py +++ b/atomicapp/providers/openshift.py @@ -395,7 +395,9 @@ def undeploy(self): logger.debug("Starting undeploy") delete_artifacts = [] for kind, objects in self.openshift_artifacts.iteritems(): - # Add DCs to beggining of list so they are deleted first. + # Add DCs to beginning of list so they are deleted first. + # Do DC first because if you do RC before DC then the DC + # will re-spawn the RC before the DC is deleted. if kind == "deploymentconfig": delete_artifacts = objects + delete_artifacts else: From 0d93b17f10446ccdb5e34fb2de75a0aa153bbb9c Mon Sep 17 00:00:00 2001 From: Tomas Kral Date: Mon, 18 Jan 2016 16:23:59 +0100 Subject: [PATCH 012/193] openshift provider: remove acronyms from comments --- atomicapp/providers/openshift.py | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/atomicapp/providers/openshift.py b/atomicapp/providers/openshift.py index fcf64184..780c8cef 100644 --- a/atomicapp/providers/openshift.py +++ b/atomicapp/providers/openshift.py @@ -395,9 +395,10 @@ def undeploy(self): logger.debug("Starting undeploy") delete_artifacts = [] for kind, objects in self.openshift_artifacts.iteritems(): - # Add DCs to beginning of list so they are deleted first. - # Do DC first because if you do RC before DC then the DC - # will re-spawn the RC before the DC is deleted. + # Add deployment configs to beginning of the list so they are deleted first. + # Do deployment config first because if you do replication controller + # before deployment config then the deployment config will re-spawn + # the replication controller before the deployment config is deleted. if kind == "deploymentconfig": delete_artifacts = objects + delete_artifacts else: @@ -418,9 +419,10 @@ def undeploy(self): logger.info("Undeploying artifact name=%s kind=%s" % (name, kind)) - # If this is a DeploymentConfig we need to delete all - # ReplicationControllers that were created by this DC. Find the RC - # that belong to this DC by querying for all RC and filtering based + # If this is a deployment config we need to delete all + # replication controllers that were created by this. + # Find the replication controller that was created by this deployment + # config by querying for all replication controllers and filtering based # on automatically created label openshift.io/deployment-config.name if kind.lower() == "deploymentconfig": params = {"labelSelector": @@ -447,8 +449,9 @@ def undeploy(self): url = self._get_url(namespace, kind, name) - # Scale down RC to 0 replicas before deleting. - # This should take care of all asocciated pods. + # Scale down replication controller to 0 replicas before deleting. + # This should take care of all pods created by this replication + # controller and we can safely delete it. if kind.lower() == "replicationcontroller": if self.dryrun: logger.info("DRY-RUN: SCALE %s down to 0", url) From 6652ed59801f400aa8e2ad4c4daaaa43b3bb421b Mon Sep 17 00:00:00 2001 From: Dusty Mabe Date: Thu, 14 Jan 2016 23:53:24 -0500 Subject: [PATCH 013/193] docker: fix stopping for artifacts with '--name=' Fixes #509 --- atomicapp/providers/docker.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/atomicapp/providers/docker.py b/atomicapp/providers/docker.py index b7d7c484..c65a7355 100644 --- a/atomicapp/providers/docker.py +++ b/atomicapp/providers/docker.py @@ -113,6 +113,12 @@ def undeploy(self): label_run = None with open(artifact_path, "r") as fp: label_run = fp.read().strip() + + # If user specified a name of the container via --name=NAME then + # then remove the equals sign since it breaks our later processing + label_run = label_run.replace('--name=', '--name ') + + # Convert to list for processing run_args = label_run.split() # If any artifacts are labelled by name, add it to a container dict list From e0f297071804995ba30398b748c3c976c81c4b93 Mon Sep 17 00:00:00 2001 From: Ratnadeep Debnath Date: Tue, 19 Jan 2016 14:43:25 +0530 Subject: [PATCH 014/193] Support specifying default provider in Nulecule spec file. Fixes #378 --- atomicapp/constants.py | 1 - atomicapp/nulecule/base.py | 5 ++- tests/units/nulecule/test_nulecule.py | 59 +++++++++++++++++++++++++-- 3 files changed, 60 insertions(+), 5 deletions(-) diff --git a/atomicapp/constants.py b/atomicapp/constants.py index 087269fe..1321b10c 100644 --- a/atomicapp/constants.py +++ b/atomicapp/constants.py @@ -61,7 +61,6 @@ DEFAULT_NAMESPACE = "default" DEFAULT_ANSWERS = { "general": { - "provider": DEFAULT_PROVIDER, "namespace": DEFAULT_NAMESPACE } } diff --git a/atomicapp/nulecule/base.py b/atomicapp/nulecule/base.py index 59fcdfb1..503c3ed1 100644 --- a/atomicapp/nulecule/base.py +++ b/atomicapp/nulecule/base.py @@ -16,7 +16,8 @@ NAME_KEY, INHERIT_KEY, ARTIFACTS_KEY, - REQUIREMENTS_KEY) + REQUIREMENTS_KEY, + DEFAULT_PROVIDER) from atomicapp.utils import Utils from atomicapp.requirements import Requirements from atomicapp.nulecule.lib import NuleculeBase @@ -197,6 +198,8 @@ def load_config(self, config=None, ask=False, skip_asking=False): """ super(Nulecule, self).load_config( config=config, ask=ask, skip_asking=skip_asking) + if self.namespace == GLOBAL_CONF and self.config[GLOBAL_CONF].get('provider') is None: + self.config[GLOBAL_CONF]['provider'] = DEFAULT_PROVIDER for component in self.components: # FIXME: Find a better way to expose config data to components. # A component should not get access to all the variables, diff --git a/tests/units/nulecule/test_nulecule.py b/tests/units/nulecule/test_nulecule.py index 106333de..f35119e3 100644 --- a/tests/units/nulecule/test_nulecule.py +++ b/tests/units/nulecule/test_nulecule.py @@ -43,19 +43,72 @@ class TestNuleculeLoadConfig(unittest.TestCase): """Test Nulecule load_config""" - def test_load_config(self): - config = {'group1': {'a': 'b'}} + def test_load_config_without_specified_provider(self): + """ + Test Nulecule load_config without specifying a provider. + """ + config = {'general': {}, 'group1': {'a': 'b'}} mock_component_1 = mock.Mock() mock_component_1.config = { 'group1': {'a': 'c', 'k': 'v'}, 'group2': {'1': '2'} } - n = Nulecule('some-id', '0.0.2', {}, [], 'some/path') + n = Nulecule(id='some-id', specversion='0.0.2', metadata={}, graph=[], basepath='some/path') + n.components = [mock_component_1] + n.load_config(config) + + self.assertEqual(n.config, { + 'general': {'provider': 'kubernetes'}, + 'group1': {'a': 'b', 'k': 'v'}, + 'group2': {'1': '2'} + }) + + def test_load_config_with_defaultprovider(self): + """ + Test Nulecule load_config with default provider specified + in global params in Nulecule spec. + """ + config = {'general': {}, 'group1': {'a': 'b'}} + mock_component_1 = mock.Mock() + mock_component_1.config = { + 'group1': {'a': 'c', 'k': 'v'}, + 'group2': {'1': '2'} + } + + n = Nulecule(id='some-id', specversion='0.0.2', metadata={}, graph=[], + basepath='some/path', + params=[{'name': 'provider', 'default': 'some-provider'}]) + n.components = [mock_component_1] + n.load_config(config) + + self.assertEqual(n.config, { + 'general': {'provider': 'some-provider'}, + 'group1': {'a': 'b', 'k': 'v'}, + 'group2': {'1': '2'} + }) + + def test_load_config_with_defaultprovider_overridden_by_provider_in_answers(self): + """ + Test Nulecule load_config with default provider specified + in global params in Nulecule spec, but overridden in answers config. + """ + config = {'general': {'provider': 'new-provider'}, + 'group1': {'a': 'b'}} + mock_component_1 = mock.Mock() + mock_component_1.config = { + 'group1': {'a': 'c', 'k': 'v'}, + 'group2': {'1': '2'} + } + + n = Nulecule(id='some-id', specversion='0.0.2', metadata={}, graph=[], + basepath='some/path', + params=[{'name': 'provider', 'default': 'some-provider'}]) n.components = [mock_component_1] n.load_config(config) self.assertEqual(n.config, { + 'general': {'provider': 'new-provider'}, 'group1': {'a': 'b', 'k': 'v'}, 'group2': {'1': '2'} }) From 93eb31637b047726bf3a96840f62541f0c954c1d Mon Sep 17 00:00:00 2001 From: Dusty Mabe Date: Mon, 18 Jan 2016 18:22:02 -0500 Subject: [PATCH 015/193] cli: allow specifying target dir during atomic run This commit has one simple goal which is to allow `atomic run` to be executed against an installed directory just a little easier. Before this commit we need: atomic install myapp --destination=/path/to/local/dir atomic run myapp --destination=/path/to/local/dir After this commit: atomic install myapp --destination=/path/to/local/dir atomic run myapp /path/to/local/dir --- Dockerfiles.git/Dockerfile.centos | 4 ++-- Dockerfiles.git/Dockerfile.debian | 4 ++-- Dockerfiles.git/Dockerfile.fedora | 4 ++-- Dockerfiles.pkgs/Dockerfile.centos | 4 ++-- Dockerfiles.pkgs/Dockerfile.fedora | 4 ++-- atomicapp/cli/main.py | 12 ++++++++++++ 6 files changed, 22 insertions(+), 10 deletions(-) diff --git a/Dockerfiles.git/Dockerfile.centos b/Dockerfiles.git/Dockerfile.centos index 4af28781..3661d0dc 100644 --- a/Dockerfiles.git/Dockerfile.centos +++ b/Dockerfiles.git/Dockerfile.centos @@ -7,9 +7,9 @@ ENV ATOMICAPPVERSION="0.3.1" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ io.openshift.generate.job=true \ io.openshift.generate.token.as=env:TOKEN_ENV_VAR \ - RUN="docker run -it --rm \${OPT1} --privileged -v `pwd`:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} -v \${OPT2} run \${OPT3} \${IMAGE}" \ + RUN="docker run -it --rm \${OPT1} --privileged -v `pwd`:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} -v \${OPT2} run \${OPT3}" \ STOP="docker run -it --rm \${OPT1} --privileged -v `pwd`:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} -v \${OPT2} stop \${OPT3}" \ - INSTALL="docker run -it --rm \${OPT1} --privileged -v `pwd`:/atomicapp -v /run:/run -v /:/host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} -v \${OPT2} install \${OPT3} \${IMAGE}" + INSTALL="docker run -it --rm \${OPT1} --privileged -v `pwd`:/atomicapp -v /run:/run -v /:/host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} -v \${OPT2} install \${OPT3}" WORKDIR /opt/atomicapp diff --git a/Dockerfiles.git/Dockerfile.debian b/Dockerfiles.git/Dockerfile.debian index 6150a04c..d149cac5 100644 --- a/Dockerfiles.git/Dockerfile.debian +++ b/Dockerfiles.git/Dockerfile.debian @@ -5,9 +5,9 @@ MAINTAINER Red Hat, Inc. ENV ATOMICAPPVERSION="0.3.1" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ - RUN="docker run -it --rm \${OPT1} --privileged -v `pwd`:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} -v \${OPT2} run \${OPT3} \${IMAGE}" \ + RUN="docker run -it --rm \${OPT1} --privileged -v `pwd`:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} -v \${OPT2} run \${OPT3}" \ STOP="docker run -it --rm \${OPT1} --privileged -v `pwd`:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} -v \${OPT2} stop \${OPT3}" \ - INSTALL="docker run -it --rm \${OPT1} --privileged -v `pwd`:/atomicapp -v /run:/run -v /:/host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} -v \${OPT2} install \${OPT3} \${IMAGE}" + INSTALL="docker run -it --rm \${OPT1} --privileged -v `pwd`:/atomicapp -v /run:/run -v /:/host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} -v \${OPT2} install \${OPT3}" WORKDIR /opt/atomicapp diff --git a/Dockerfiles.git/Dockerfile.fedora b/Dockerfiles.git/Dockerfile.fedora index 1ef9dd80..a9418e83 100644 --- a/Dockerfiles.git/Dockerfile.fedora +++ b/Dockerfiles.git/Dockerfile.fedora @@ -7,9 +7,9 @@ ENV ATOMICAPPVERSION="0.3.1" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ io.openshift.generate.job=true \ io.openshift.generate.token.as=env:TOKEN_ENV_VAR \ - RUN="docker run -it --rm \${OPT1} --privileged -v `pwd`:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} -v \${OPT2} run \${OPT3} \${IMAGE}" \ + RUN="docker run -it --rm \${OPT1} --privileged -v `pwd`:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} -v \${OPT2} run \${OPT3}" \ STOP="docker run -it --rm \${OPT1} --privileged -v `pwd`:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} -v \${OPT2} stop \${OPT3}" \ - INSTALL="docker run -it --rm \${OPT1} --privileged -v `pwd`:/atomicapp -v /run:/run -v /:/host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} -v \${OPT2} install \${OPT3} \${IMAGE}" + INSTALL="docker run -it --rm \${OPT1} --privileged -v `pwd`:/atomicapp -v /run:/run -v /:/host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} -v \${OPT2} install \${OPT3}" WORKDIR /opt/atomicapp diff --git a/Dockerfiles.pkgs/Dockerfile.centos b/Dockerfiles.pkgs/Dockerfile.centos index 857ae1a4..02c8633f 100644 --- a/Dockerfiles.pkgs/Dockerfile.centos +++ b/Dockerfiles.pkgs/Dockerfile.centos @@ -10,9 +10,9 @@ ENV TESTING="--enablerepo=epel-testing" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ io.openshift.generate.job=true \ io.openshift.generate.token.as=env:TOKEN_ENV_VAR \ - RUN="docker run -it --rm \${OPT1} --privileged -v `pwd`:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} -v \${OPT2} run \${OPT3} \${IMAGE}" \ + RUN="docker run -it --rm \${OPT1} --privileged -v `pwd`:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} -v \${OPT2} run \${OPT3}" \ STOP="docker run -it --rm \${OPT1} --privileged -v `pwd`:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} -v \${OPT2} stop \${OPT3}" \ - INSTALL="docker run -it --rm \${OPT1} --privileged -v `pwd`:/atomicapp -v /run:/run -v /:/host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} -v \${OPT2} install \${OPT3} \${IMAGE}" + INSTALL="docker run -it --rm \${OPT1} --privileged -v `pwd`:/atomicapp -v /run:/run -v /:/host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} -v \${OPT2} install \${OPT3}" WORKDIR /atomicapp diff --git a/Dockerfiles.pkgs/Dockerfile.fedora b/Dockerfiles.pkgs/Dockerfile.fedora index c798c25f..cdb8a013 100644 --- a/Dockerfiles.pkgs/Dockerfile.fedora +++ b/Dockerfiles.pkgs/Dockerfile.fedora @@ -10,9 +10,9 @@ ENV TESTING="--enablerepo=updates-testing" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ io.openshift.generate.job=true \ io.openshift.generate.token.as=env:TOKEN_ENV_VAR \ - RUN="docker run -it --rm \${OPT1} --privileged -v `pwd`:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} -v \${OPT2} run \${OPT3} \${IMAGE}" \ + RUN="docker run -it --rm \${OPT1} --privileged -v `pwd`:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} -v \${OPT2} run \${OPT3}" \ STOP="docker run -it --rm \${OPT1} --privileged -v `pwd`:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} -v \${OPT2} stop \${OPT3}" \ - INSTALL="docker run -it --rm \${OPT1} --privileged -v `pwd`:/atomicapp -v /run:/run -v /:/host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} -v \${OPT2} install \${OPT3} \${IMAGE}" + INSTALL="docker run -it --rm \${OPT1} --privileged -v `pwd`:/atomicapp -v /run:/run -v /:/host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} -v \${OPT2} install \${OPT3}" WORKDIR /atomicapp diff --git a/atomicapp/cli/main.py b/atomicapp/cli/main.py index 341d54ff..34c4dee9 100644 --- a/atomicapp/cli/main.py +++ b/atomicapp/cli/main.py @@ -269,6 +269,8 @@ def create_parser(self): help="Ask for params even if the defaul value is provided") run_subparser.add_argument( "app_spec", + nargs='?', + default=None, help=( "Application to run. This is a container image or a path " "that contains the metadata describing the whole application.")) @@ -313,6 +315,8 @@ def create_parser(self): files and have them cleaned up when finished.''' % CACHE_DIR)) install_subparser.add_argument( "app_spec", + nargs='?', + default=None, help=( "Application to run. This is a container image or a path " "that contains the metadata describing the whole application.")) @@ -393,6 +397,14 @@ def run(self): # Finally, parse args and give error if necessary args = self.parser.parse_args(cmdline) + # In the case of Atomic CLI we want to allow the user to specify + # a directory if they want to for "run". For that reason we won't + # default the RUN label for Atomic App to provide an app_spec argument. + # In this case pick up app_spec from $IMAGE env var (set by RUN label). + if args.app_spec is None and os.environ.get('IMAGE') is not None: + logger.debug("Setting app_spec based on $IMAGE env var") + args.app_spec = os.environ['IMAGE'] + # Take the arguments that correspond to "answers" config file data # and make a dictionary of it to pass along in args. setattr(args, 'cli_answers', {}) From e6125b2b377ff4062c5f511a8883dc86cda347e6 Mon Sep 17 00:00:00 2001 From: Dusty Mabe Date: Mon, 18 Jan 2016 18:35:33 -0500 Subject: [PATCH 016/193] cli: add --namespace option to cli --- atomicapp/cli/main.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/atomicapp/cli/main.py b/atomicapp/cli/main.py index 34c4dee9..255c16ee 100644 --- a/atomicapp/cli/main.py +++ b/atomicapp/cli/main.py @@ -224,6 +224,10 @@ def create_parser(self): default=ANSWERS_FILE_SAMPLE_FORMAT, choices=['ini', 'json', 'xml', 'yaml'], help="The format for the answers.conf.sample file. Default: %s" % ANSWERS_FILE_SAMPLE_FORMAT) + globals_parser.add_argument( + "--namespace", + dest="namespace", + help=('The namespace to use in the target provider')) globals_parser.add_argument( "--providertlsverify", dest="providertlsverify", @@ -409,7 +413,7 @@ def run(self): # and make a dictionary of it to pass along in args. setattr(args, 'cli_answers', {}) for item in ['providerapi', 'providercafile', - 'providerconfig', 'providertlsverify']: + 'providerconfig', 'providertlsverify', 'namespace']: if hasattr(args, item) and getattr(args, item) is not None: args.cli_answers[item] = getattr(args, item) From 1689a9720f6cac976d5b0db3818692d82e65476f Mon Sep 17 00:00:00 2001 From: Dusty Mabe Date: Mon, 18 Jan 2016 23:53:14 -0500 Subject: [PATCH 017/193] Allow users to provide answers file as url. This will allow one to specify a location of an answers file that can be retrieved by Atomic App during deployment. A common case for this would be if someone deploying the application wants to deploy many times with a known good configuration where this configuration is stored in source control or some other location accessed via a url. This also may be one avenue for providing answers to applications started via `oc new-app` in openshift. --- atomicapp/nulecule/main.py | 32 +++++++++++++++++++++++--------- 1 file changed, 23 insertions(+), 9 deletions(-) diff --git a/atomicapp/nulecule/main.py b/atomicapp/nulecule/main.py index 0aa52f8b..10417b0b 100644 --- a/atomicapp/nulecule/main.py +++ b/atomicapp/nulecule/main.py @@ -5,6 +5,8 @@ import logging import os import tempfile +import urlparse +import urllib from atomicapp.constants import (GLOBAL_CONF, ANSWERS_FILE_SAMPLE_FORMAT, @@ -91,12 +93,8 @@ def __init__(self, app_spec, destination=None, # Set where the main nulecule file should be self.main_file = os.path.join(self.app_path, MAIN_FILE) - # If user provided a path to answers, then load them - if answers_file: - if not os.path.isfile(answers_file): - raise NuleculeException( - "Answers file doesn't exist: {}".format(answers_file)) - self.answers_file = answers_file + # Process answers. + self.answers_file = answers_file self._process_answers() def unpack(self, update=False, @@ -287,16 +285,32 @@ def _process_answers(self): Returns: None """ + app_path_answers = os.path.join(self.app_path, ANSWERS_FILE) # If the user didn't provide an answers file then check the app # dir to see if one exists. if not self.answers_file: - f = os.path.join(self.app_path, ANSWERS_FILE) - if os.path.isfile(f): - self.answers_file = f + if os.path.isfile(app_path_answers): + self.answers_file = app_path_answers # At this point if we have an answers file, load it if self.answers_file: + + # If this is a url then download answers file to app directory + if urlparse.urlparse(self.answers_file).scheme != "": + logger.debug("Retrieving answers file from: {}" + .format(self.answers_file)) + with open(app_path_answers, 'w+') as f: + stream = urllib.urlopen(self.answers_file) + f.write(stream.read()) + self.answers_file = app_path_answers + + # Check to make sure the file exists + if not os.path.isfile(self.answers_file): + raise NuleculeException( + "Provided answers file doesn't exist: {}".format(self.answers_file)) + + # Load answers self.answers = Utils.loadAnswers(self.answers_file) # If there is answers data from the cli then merge it in now From 9bc9fae2a317b27b016e9deb9e55f362de02b8b5 Mon Sep 17 00:00:00 2001 From: Dusty Mabe Date: Tue, 19 Jan 2016 13:13:33 -0500 Subject: [PATCH 018/193] Create destination app_path dir if it doesn't exist yet. This facilitates e21f719 where someone specifies a url but the application hasn't yet been extracted to the target dir. --- atomicapp/nulecule/main.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/atomicapp/nulecule/main.py b/atomicapp/nulecule/main.py index 10417b0b..07904146 100644 --- a/atomicapp/nulecule/main.py +++ b/atomicapp/nulecule/main.py @@ -90,6 +90,10 @@ def __init__(self, app_spec, destination=None, logger.debug("NuleculeManager init app_path: %s", self.app_path) logger.debug("NuleculeManager init image: %s", self.image) + # Create the app_path if it doesn't exist yet + if not os.path.isdir(self.app_path): + os.makedirs(self.app_path) + # Set where the main nulecule file should be self.main_file = os.path.join(self.app_path, MAIN_FILE) From 42a56330412fc3dcd3f176830033b2d2acf6e553 Mon Sep 17 00:00:00 2001 From: Charlie Drage Date: Mon, 11 Jan 2016 16:55:55 -0500 Subject: [PATCH 019/193] Change undeploy/deploy functions to run/stop --- atomicapp/nulecule/base.py | 4 ++-- atomicapp/providers/docker.py | 4 ++-- atomicapp/providers/kubernetes.py | 4 ++-- atomicapp/providers/marathon.py | 4 ++-- atomicapp/providers/openshift.py | 4 ++-- 5 files changed, 10 insertions(+), 10 deletions(-) diff --git a/atomicapp/nulecule/base.py b/atomicapp/nulecule/base.py index 503c3ed1..787acb2e 100644 --- a/atomicapp/nulecule/base.py +++ b/atomicapp/nulecule/base.py @@ -293,7 +293,7 @@ def run(self, provider_key, dryrun=False): provider_key, provider = self.get_provider(provider_key, dryrun) provider.artifacts = self.rendered_artifacts.get(provider_key, []) provider.init() - provider.deploy() + provider.run() def stop(self, provider_key=None, dryrun=False): """ @@ -305,7 +305,7 @@ def stop(self, provider_key=None, dryrun=False): provider_key, provider = self.get_provider(provider_key, dryrun) provider.artifacts = self.rendered_artifacts.get(provider_key, []) provider.init() - provider.undeploy() + provider.stop() def load_config(self, config=None, ask=False, skip_asking=False): """ diff --git a/atomicapp/providers/docker.py b/atomicapp/providers/docker.py index c65a7355..6b116c8a 100644 --- a/atomicapp/providers/docker.py +++ b/atomicapp/providers/docker.py @@ -77,7 +77,7 @@ def _get_containers(self): else: return dict((line, 1) for line in subprocess.check_output(docker_cmd, shell=True).splitlines()) - def deploy(self): + def run(self): logger.info("Deploying to provider: Docker") for container in self._get_containers(): if re.match("%s_+%s+_+[a-zA-Z0-9]{12}" % (self.namespace, self.image), container): @@ -103,7 +103,7 @@ def deploy(self): else: subprocess.check_call(cmd) - def undeploy(self): + def stop(self): logger.info("Undeploying to provider: Docker") artifact_names = list() diff --git a/atomicapp/providers/kubernetes.py b/atomicapp/providers/kubernetes.py index 41757249..600e28fa 100644 --- a/atomicapp/providers/kubernetes.py +++ b/atomicapp/providers/kubernetes.py @@ -167,7 +167,7 @@ def _scale_replicas(self, path, replicas=0): self._call(cmd) - def deploy(self): + def run(self): """Deploys the app by given resource manifests. """ logger.info("Deploying to Kubernetes") @@ -184,7 +184,7 @@ def deploy(self): cmd.append("--kubeconfig=%s" % self.config_file) self._call(cmd) - def undeploy(self): + def stop(self): """Undeploys the app by given resource manifests. Undeploy operation first scale down the replicas to 0 and then deletes the resource from cluster. diff --git a/atomicapp/providers/marathon.py b/atomicapp/providers/marathon.py index 6a978ecf..466a4bd3 100644 --- a/atomicapp/providers/marathon.py +++ b/atomicapp/providers/marathon.py @@ -49,7 +49,7 @@ def init(self): logger.debug("marathon_api = %s", self.marathon_api) self._process_artifacts() - def deploy(self): + def run(self): """ Deploys the app by given resource manifests. """ for artifact in self.marathon_artifacts: @@ -72,7 +72,7 @@ def deploy(self): logger.error(msg) raise ProviderFailedException(msg) - def undeploy(self): + def stop(self): """ Undeploys the app by given resource manifests. Undeploy operation deletes Marathon apps from cluster. """ diff --git a/atomicapp/providers/openshift.py b/atomicapp/providers/openshift.py index 780c8cef..0d4cc8c0 100644 --- a/atomicapp/providers/openshift.py +++ b/atomicapp/providers/openshift.py @@ -368,7 +368,7 @@ def _get_namespace(self, artifact): return artifact["metadata"]["namespace"] return self.namespace - def deploy(self): + def run(self): logger.debug("Deploying to OpenShift") # TODO: remove running components if one component fails issue:#428 for kind, objects in self.openshift_artifacts.iteritems(): @@ -381,7 +381,7 @@ def deploy(self): continue self.oc.deploy(url, artifact) - def undeploy(self): + def stop(self): """ Undeploy application. From 31d0ae638f4d084a4b942f50655af01fe1bed681 Mon Sep 17 00:00:00 2001 From: Charlie Drage Date: Mon, 18 Jan 2016 14:13:41 -0500 Subject: [PATCH 020/193] Rename install to fetch --- atomicapp/cli/main.py | 27 ++++++++++++++------------- atomicapp/nulecule/lib.py | 2 +- atomicapp/nulecule/main.py | 6 ++---- 3 files changed, 17 insertions(+), 18 deletions(-) diff --git a/atomicapp/cli/main.py b/atomicapp/cli/main.py index 255c16ee..514572b8 100644 --- a/atomicapp/cli/main.py +++ b/atomicapp/cli/main.py @@ -65,7 +65,7 @@ def cli_genanswers(args): sys.exit(1) -def cli_install(args): +def cli_fetch(args): try: argdict = args.__dict__ destination = argdict['destination'] @@ -73,7 +73,7 @@ def cli_install(args): destination=destination, cli_answers=argdict['cli_answers'], answers_file=argdict['answers']) - nm.install(**argdict) + nm.fetch(**argdict) # Clean up the files if the user asked us to. Otherwise # notify the user where they can manage the application if destination and destination.lower() == 'none': @@ -131,6 +131,7 @@ def cli_stop(args): # want to store a value if the user didn't provide one. "store_true" does # not allow this; it will always create an attribute and store a value. class TrueOrFalseAction(argparse.Action): + def __call__(self, parser, namespace, values, option_string=None): if values.lower() == 'true': booleanvalue = True @@ -202,7 +203,7 @@ def create_parser(self): "--mode", dest="mode", default=None, - choices=['install', 'run', 'stop', 'genanswers'], + choices=['fetch', 'run', 'stop', 'genanswers'], help=(''' The mode Atomic App is run in. This option has the effect of switching the 'verb' that was passed by the @@ -288,43 +289,43 @@ def create_parser(self): files and have them cleaned up when finished.''' % CACHE_DIR)) run_subparser.set_defaults(func=cli_run) - # === "install" SUBPARSER === - install_subparser = toplevel_subparsers.add_parser( - "install", parents=[globals_parser]) - install_subparser.add_argument( + # === "fetch" SUBPARSER === + fetch_subparser = toplevel_subparsers.add_parser( + "fetch", parents=[globals_parser]) + fetch_subparser.add_argument( "-a", "--answers", dest="answers", help="Path to %s" % ANSWERS_FILE) - install_subparser.add_argument( + fetch_subparser.add_argument( "--no-deps", dest="nodeps", default=False, action="store_true", help="Skip pulling dependencies of the app") - install_subparser.add_argument( + fetch_subparser.add_argument( "-u", "--update", dest="update", default=False, action="store_true", help="Re-pull images and overwrite existing files") - install_subparser.add_argument( + fetch_subparser.add_argument( "--destination", dest="destination", default=None, help=(''' - Destination directory for install. This defaults to a + Destination directory for fetch. This defaults to a directory under %s. Specify 'none' to not persist files and have them cleaned up when finished.''' % CACHE_DIR)) - install_subparser.add_argument( + fetch_subparser.add_argument( "app_spec", nargs='?', default=None, help=( "Application to run. This is a container image or a path " "that contains the metadata describing the whole application.")) - install_subparser.set_defaults(func=cli_install) + fetch_subparser.set_defaults(func=cli_fetch) # === "stop" SUBPARSER === stop_subparser = toplevel_subparsers.add_parser( diff --git a/atomicapp/nulecule/lib.py b/atomicapp/nulecule/lib.py index 141a12d2..4dd41f39 100644 --- a/atomicapp/nulecule/lib.py +++ b/atomicapp/nulecule/lib.py @@ -104,7 +104,7 @@ def run(self, provider_key=None, dry=False): def stop(self, provider): raise NotImplementedError - def install(self): + def fetch(self): raise NotImplementedError def uninstall(self): diff --git a/atomicapp/nulecule/main.py b/atomicapp/nulecule/main.py index 07904146..32be5d1e 100644 --- a/atomicapp/nulecule/main.py +++ b/atomicapp/nulecule/main.py @@ -161,12 +161,11 @@ def genanswers(self, dryrun=False, answers_format=None, **kwargs): self.nulecule.config, None) self._write_answers(answers_file, answers, answers_format) - def install(self, nodeps=False, update=False, dryrun=False, - answers_format=ANSWERS_FILE_SAMPLE_FORMAT, **kwargs): + def fetch(self, nodeps=False, update=False, dryrun=False, + answers_format=ANSWERS_FILE_SAMPLE_FORMAT, **kwargs): """ Installs (unpacks) a Nulecule application from a Nulecule image to a target path. - Args: answers (dict or str): Answers data or local path to answers file nodeps (bool): Install the nulecule application without installing @@ -176,7 +175,6 @@ def install(self, nodeps=False, update=False, dryrun=False, dryrun (bool): Do not make any change to the host system if True answers_format (str): File format for writing sample answers file kwargs (dict): Extra keyword arguments - Returns: None """ From 43317553cd564e3d42ec6bac5dbe3aba3258d346 Mon Sep 17 00:00:00 2001 From: Charlie Drage Date: Mon, 18 Jan 2016 14:38:19 -0500 Subject: [PATCH 021/193] Remove mention of uninstall function --- atomicapp/nulecule/base.py | 5 ----- atomicapp/nulecule/main.py | 5 ----- atomicapp/requirements.py | 3 --- 3 files changed, 13 deletions(-) diff --git a/atomicapp/nulecule/base.py b/atomicapp/nulecule/base.py index 787acb2e..3c0ace08 100644 --- a/atomicapp/nulecule/base.py +++ b/atomicapp/nulecule/base.py @@ -177,11 +177,6 @@ def stop(self, provider_key=None, dryrun=False): for component in self.components: component.stop(provider_key, dryrun) - # TODO: NOT YET IMPLEMENTED - def uninstall(self): - for component in self.components: - component.uninstall() - def load_config(self, config=None, ask=False, skip_asking=False): """ Load config data for the entire Nulecule application, by traversing diff --git a/atomicapp/nulecule/main.py b/atomicapp/nulecule/main.py index 32be5d1e..a644ebc2 100644 --- a/atomicapp/nulecule/main.py +++ b/atomicapp/nulecule/main.py @@ -262,11 +262,6 @@ def stop(self, cli_provider, **kwargs): self.nulecule.render(cli_provider, dryrun=dryrun) self.nulecule.stop(cli_provider, dryrun) - def uninstall(self): - # For future use - self.stop() - self.nulecule.uninstall() - def clean(self, force=False): # For future use self.uninstall() diff --git a/atomicapp/requirements.py b/atomicapp/requirements.py index 48db99cb..899b26d2 100644 --- a/atomicapp/requirements.py +++ b/atomicapp/requirements.py @@ -44,9 +44,6 @@ def run(self): def stop(self): self._exec("stop") - def uninstall(self): - self._exec("uninstall") - # Find if the requirement does not exist within REQUIREMENT_FUNCTIONS def _find_requirement_function_name(self, key): logging.debug("Checking if %s matches any of %s" % From 814c9eef83c35d36faed7b07974f14364a676daa Mon Sep 17 00:00:00 2001 From: Charlie Drage Date: Mon, 18 Jan 2016 16:30:07 -0500 Subject: [PATCH 022/193] Fix test names --- tests/units/cli/test_cli.py | 8 ++--- tests/units/cli/test_cli_gitlab_example.py | 4 +-- .../units/nulecule/test_nulecule_component.py | 4 +-- tests/units/persistent_storage/test_ps.py | 3 -- tests/units/providers/test_docker_provider.py | 4 +-- .../providers/test_openshift_provider.py | 32 +++++++++---------- 6 files changed, 26 insertions(+), 29 deletions(-) diff --git a/tests/units/cli/test_cli.py b/tests/units/cli/test_cli.py index 2459b9ca..02dbbe8c 100644 --- a/tests/units/cli/test_cli.py +++ b/tests/units/cli/test_cli.py @@ -71,13 +71,13 @@ def test_run_helloapache_app(self): assert exec_info.value.code == 0 - def test_install_helloapache_app(self): + def test_fetch_helloapache_app(self): command = [ "main.py", "--verbose", "--answers-format=json", "--dry-run", - "install", + "fetch", self.examples_dir + 'helloapache/' ] @@ -119,13 +119,13 @@ def test_run_helloapache_app_marathon(self): assert exec_info.value.code == 0 - def test_install_helloapache_app_docker(self): + def test_fetch_helloapache_app_docker(self): command = [ "main.py", "--verbose", "--answers-format=json", "--dry-run", - "install", + "fetch", self.examples_dir + 'helloapache/' ] diff --git a/tests/units/cli/test_cli_gitlab_example.py b/tests/units/cli/test_cli_gitlab_example.py index e5369335..20686e45 100644 --- a/tests/units/cli/test_cli_gitlab_example.py +++ b/tests/units/cli/test_cli_gitlab_example.py @@ -76,12 +76,12 @@ def tearDownClass(cls): os.remove(os.path.join(root, f)) # Installs the gitlab example similarly to `test_cli.py` examples - def test_install_gitlab_app(self): + def test_fetch_gitlab_app(self): command = [ "main.py", "--verbose", "--dry-run", - "install", + "fetch", self.examples_dir + 'gitlab/' ] diff --git a/tests/units/nulecule/test_nulecule_component.py b/tests/units/nulecule/test_nulecule_component.py index a61583a0..3dbc62dc 100644 --- a/tests/units/nulecule/test_nulecule_component.py +++ b/tests/units/nulecule/test_nulecule_component.py @@ -88,7 +88,7 @@ def test_run_local_artifacts(self, mock_get_provider): mock_get_provider.assert_called_once_with(provider_key, dryrun) self.assertEqual(mock_provider.artifacts, ['a', 'b', 'c']) mock_provider.init.assert_called_once_with() - mock_provider.deploy.assert_called_once_with() + mock_provider.run.assert_called_once_with() class TestNuleculeComponentStop(unittest.TestCase): @@ -120,7 +120,7 @@ def test_stop_local_app(self, mock_get_provider): mock_get_provider.assert_called_once_with(provider_key, dryrun) self.assertEqual(mock_provider.artifacts, ['a', 'b', 'c']) mock_provider.init.assert_called_once_with() - mock_provider.undeploy.assert_called_once_with() + mock_provider.stop.assert_called_once_with() class TestNuleculeComponentLoadConfig(unittest.TestCase): diff --git a/tests/units/persistent_storage/test_ps.py b/tests/units/persistent_storage/test_ps.py index 5afb0217..69d0b0b1 100644 --- a/tests/units/persistent_storage/test_ps.py +++ b/tests/units/persistent_storage/test_ps.py @@ -31,6 +31,3 @@ def test_run(self): def test_stop(self): self.test.stop() - - def test_uninstall(self): - self.test.uninstall() diff --git a/tests/units/providers/test_docker_provider.py b/tests/units/providers/test_docker_provider.py index 20c9d661..613153a4 100644 --- a/tests/units/providers/test_docker_provider.py +++ b/tests/units/providers/test_docker_provider.py @@ -66,7 +66,7 @@ def test_multiple_artifact_load(self): ["test_fedora-httpd_e9b9a7bfe8f9", "test_centos-httpd_e9b9a7bfe8f9", "test_centos-httpd_e9b9a7bfe8f9"] ]) with mock.patch("atomicapp.providers.docker.DockerProvider._get_containers", mock_container_list): - provider.deploy() + provider.run() # Patch in a general container list and make sure it fails if there is already a container with the same name @@ -77,4 +77,4 @@ def test_namespace_name_check(self): provider.init() provider.artifacts = [self.artifact_dir + 'hello-world-one'] with pytest.raises(ProviderFailedException): - provider.deploy() + provider.run() diff --git a/tests/units/providers/test_openshift_provider.py b/tests/units/providers/test_openshift_provider.py index 0d092475..b24b6ccc 100644 --- a/tests/units/providers/test_openshift_provider.py +++ b/tests/units/providers/test_openshift_provider.py @@ -37,12 +37,12 @@ def tearDown(self): class TestOpenshiftProviderDeploy(OpenshiftProviderTestMixin, unittest.TestCase): """ - Test OpenShiftProvider.deploy + Test OpenShiftProvider.run """ - def test_deploy(self): + def test_run(self): """ - Test calling OpenshiftClient.deploy from OpenShiftProvider.deploy + Test calling OpenshiftClient.run from OpenShiftProvider.run """ op = self.get_oc_provider() op.oapi_resources = ['foo'] @@ -56,15 +56,15 @@ def test_deploy(self): ] } - op.deploy() + op.run() self.mock_oc.deploy.assert_called_once_with( 'namespaces/foo/pods/?access_token=test', op.openshift_artifacts['pods'][0]) - def test_deploy_dryrun(self): + def test_run_dryrun(self): """ - Test running OpenShiftProvider.deploy as dryrun + Test running OpenShiftProvider.run as dryrun """ op = self.get_oc_provider(dryrun=True) op.oapi_resources = ['foo'] @@ -78,18 +78,18 @@ def test_deploy_dryrun(self): ] } - op.deploy() + op.run() - self.assertFalse(self.mock_oc.deploy.call_count) + self.assertFalse(self.mock_oc.run.call_count) -class TestOpenshiftProviderUndeploy(OpenshiftProviderTestMixin, unittest.TestCase): +class TestOpenshiftProviderUnrun(OpenshiftProviderTestMixin, unittest.TestCase): """ - Test OpenShiftProvider.undeploy + Test OpenShiftProvider.stop """ - def test_undeploy(self): + def test_stop(self): """ - Test calling OpenshiftClient.delete from OpenShiftProvider.undeploy + Test calling OpenshiftClient.delete from OpenShiftProvider.stop """ op = self.get_oc_provider() op.oapi_resources = ['foo'] @@ -105,15 +105,15 @@ def test_undeploy(self): ] } - op.undeploy() + op.stop() self.mock_oc.delete.assert_called_once_with( 'namespaces/foo/pods/%s?access_token=test' % op.openshift_artifacts['pods'][0]['metadata']['name']) - def test_undeploy_dryrun(self): + def test_stop_dryrun(self): """ - Test running OpenShiftProvider.undeploy as dryrun + Test running OpenShiftProvider.stop as dryrun """ op = self.get_oc_provider(dryrun=True) op.oapi_resources = ['foo'] @@ -129,7 +129,7 @@ def test_undeploy_dryrun(self): ] } - op.deploy() + op.stop() self.assertFalse(self.mock_oc.delete.call_count) From 471c14e59fdf6c475372f191272f7e57a5844ac4 Mon Sep 17 00:00:00 2001 From: Charlie Drage Date: Wed, 20 Jan 2016 10:06:27 -0500 Subject: [PATCH 023/193] Remove install label from Dockerfiles --- Dockerfiles.git/Dockerfile.centos | 3 +-- Dockerfiles.git/Dockerfile.debian | 3 +-- Dockerfiles.git/Dockerfile.fedora | 3 +-- Dockerfiles.pkgs/Dockerfile.centos | 3 +-- Dockerfiles.pkgs/Dockerfile.fedora | 3 +-- 5 files changed, 5 insertions(+), 10 deletions(-) diff --git a/Dockerfiles.git/Dockerfile.centos b/Dockerfiles.git/Dockerfile.centos index 3661d0dc..d6a9730e 100644 --- a/Dockerfiles.git/Dockerfile.centos +++ b/Dockerfiles.git/Dockerfile.centos @@ -8,8 +8,7 @@ LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ io.openshift.generate.job=true \ io.openshift.generate.token.as=env:TOKEN_ENV_VAR \ RUN="docker run -it --rm \${OPT1} --privileged -v `pwd`:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} -v \${OPT2} run \${OPT3}" \ - STOP="docker run -it --rm \${OPT1} --privileged -v `pwd`:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} -v \${OPT2} stop \${OPT3}" \ - INSTALL="docker run -it --rm \${OPT1} --privileged -v `pwd`:/atomicapp -v /run:/run -v /:/host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} -v \${OPT2} install \${OPT3}" + STOP="docker run -it --rm \${OPT1} --privileged -v `pwd`:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} -v \${OPT2} stop \${OPT3}" WORKDIR /opt/atomicapp diff --git a/Dockerfiles.git/Dockerfile.debian b/Dockerfiles.git/Dockerfile.debian index d149cac5..c3c7735e 100644 --- a/Dockerfiles.git/Dockerfile.debian +++ b/Dockerfiles.git/Dockerfile.debian @@ -6,8 +6,7 @@ ENV ATOMICAPPVERSION="0.3.1" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ RUN="docker run -it --rm \${OPT1} --privileged -v `pwd`:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} -v \${OPT2} run \${OPT3}" \ - STOP="docker run -it --rm \${OPT1} --privileged -v `pwd`:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} -v \${OPT2} stop \${OPT3}" \ - INSTALL="docker run -it --rm \${OPT1} --privileged -v `pwd`:/atomicapp -v /run:/run -v /:/host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} -v \${OPT2} install \${OPT3}" + STOP="docker run -it --rm \${OPT1} --privileged -v `pwd`:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} -v \${OPT2} stop \${OPT3}" WORKDIR /opt/atomicapp diff --git a/Dockerfiles.git/Dockerfile.fedora b/Dockerfiles.git/Dockerfile.fedora index a9418e83..c23750c6 100644 --- a/Dockerfiles.git/Dockerfile.fedora +++ b/Dockerfiles.git/Dockerfile.fedora @@ -8,8 +8,7 @@ LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ io.openshift.generate.job=true \ io.openshift.generate.token.as=env:TOKEN_ENV_VAR \ RUN="docker run -it --rm \${OPT1} --privileged -v `pwd`:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} -v \${OPT2} run \${OPT3}" \ - STOP="docker run -it --rm \${OPT1} --privileged -v `pwd`:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} -v \${OPT2} stop \${OPT3}" \ - INSTALL="docker run -it --rm \${OPT1} --privileged -v `pwd`:/atomicapp -v /run:/run -v /:/host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} -v \${OPT2} install \${OPT3}" + STOP="docker run -it --rm \${OPT1} --privileged -v `pwd`:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} -v \${OPT2} stop \${OPT3}" WORKDIR /opt/atomicapp diff --git a/Dockerfiles.pkgs/Dockerfile.centos b/Dockerfiles.pkgs/Dockerfile.centos index 02c8633f..a792050c 100644 --- a/Dockerfiles.pkgs/Dockerfile.centos +++ b/Dockerfiles.pkgs/Dockerfile.centos @@ -11,8 +11,7 @@ LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ io.openshift.generate.job=true \ io.openshift.generate.token.as=env:TOKEN_ENV_VAR \ RUN="docker run -it --rm \${OPT1} --privileged -v `pwd`:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} -v \${OPT2} run \${OPT3}" \ - STOP="docker run -it --rm \${OPT1} --privileged -v `pwd`:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} -v \${OPT2} stop \${OPT3}" \ - INSTALL="docker run -it --rm \${OPT1} --privileged -v `pwd`:/atomicapp -v /run:/run -v /:/host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} -v \${OPT2} install \${OPT3}" + STOP="docker run -it --rm \${OPT1} --privileged -v `pwd`:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} -v \${OPT2} stop \${OPT3}" WORKDIR /atomicapp diff --git a/Dockerfiles.pkgs/Dockerfile.fedora b/Dockerfiles.pkgs/Dockerfile.fedora index cdb8a013..bfd913fd 100644 --- a/Dockerfiles.pkgs/Dockerfile.fedora +++ b/Dockerfiles.pkgs/Dockerfile.fedora @@ -11,8 +11,7 @@ LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ io.openshift.generate.job=true \ io.openshift.generate.token.as=env:TOKEN_ENV_VAR \ RUN="docker run -it --rm \${OPT1} --privileged -v `pwd`:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} -v \${OPT2} run \${OPT3}" \ - STOP="docker run -it --rm \${OPT1} --privileged -v `pwd`:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} -v \${OPT2} stop \${OPT3}" \ - INSTALL="docker run -it --rm \${OPT1} --privileged -v `pwd`:/atomicapp -v /run:/run -v /:/host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} -v \${OPT2} install \${OPT3}" + STOP="docker run -it --rm \${OPT1} --privileged -v `pwd`:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} -v \${OPT2} stop \${OPT3}" WORKDIR /atomicapp From 7673e0920cdd1cd1cfdafe6bfeea1168d916e13c Mon Sep 17 00:00:00 2001 From: Charlie Drage Date: Wed, 20 Jan 2016 12:34:01 -0500 Subject: [PATCH 024/193] 0.4.0 Release --- CHANGELOG.md | 34 +++++++++++++++++++++++++++++++ Dockerfiles.git/Dockerfile.centos | 2 +- Dockerfiles.git/Dockerfile.debian | 2 +- Dockerfiles.git/Dockerfile.fedora | 2 +- README.md | 2 +- atomicapp/constants.py | 2 +- setup.py | 2 +- 7 files changed, 40 insertions(+), 6 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 577c44c5..250d3fa0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,37 @@ +## Atomic App 0.4.0 (01-20-2016) + +With this release we bump our version to 0.4.0 to coincide with our BETA-4 release as well as the change to our "install" verb. + +The most significant new features are: + - Renaming install to fetch + - Allowing users to pass an answers file as a URL + +For an extended list of changes, please see the git shortlog below. + +``` +Charlie Drage : + Change undeploy/deploy functions to run/stop + Rename install to fetch + Remove mention of uninstall function + Fix test names + Remove install label from Dockerfiles + +Dusty Mabe : + docker: fix stopping for artifacts with '--name=' + cli: allow specifying target dir during atomic run + cli: add --namespace option to cli + Allow users to provide answers file as url. + Create destination app_path dir if it doesn't exist yet. + +Ratnadeep Debnath : + Support specifying default provider in Nulecule spec file. Fixes #378 + +Tomas Kral : + openshift provider: safer stop + openshift provider: fix typos, add more explanation + openshift provider: remove acronyms from comments +``` + ## Atomic App 0.3.1 (01-14-2016) This release introduces some significant features to Atomic App as well as our first release since 0.3.0. diff --git a/Dockerfiles.git/Dockerfile.centos b/Dockerfiles.git/Dockerfile.centos index d6a9730e..7a98fe74 100644 --- a/Dockerfiles.git/Dockerfile.centos +++ b/Dockerfiles.git/Dockerfile.centos @@ -2,7 +2,7 @@ FROM centos:7 MAINTAINER Red Hat, Inc. -ENV ATOMICAPPVERSION="0.3.1" +ENV ATOMICAPPVERSION="0.4.0" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ io.openshift.generate.job=true \ diff --git a/Dockerfiles.git/Dockerfile.debian b/Dockerfiles.git/Dockerfile.debian index c3c7735e..879188b0 100644 --- a/Dockerfiles.git/Dockerfile.debian +++ b/Dockerfiles.git/Dockerfile.debian @@ -2,7 +2,7 @@ FROM debian:jessie MAINTAINER Red Hat, Inc. -ENV ATOMICAPPVERSION="0.3.1" +ENV ATOMICAPPVERSION="0.4.0" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ RUN="docker run -it --rm \${OPT1} --privileged -v `pwd`:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} -v \${OPT2} run \${OPT3}" \ diff --git a/Dockerfiles.git/Dockerfile.fedora b/Dockerfiles.git/Dockerfile.fedora index c23750c6..5e6c336c 100644 --- a/Dockerfiles.git/Dockerfile.fedora +++ b/Dockerfiles.git/Dockerfile.fedora @@ -2,7 +2,7 @@ FROM fedora:23 MAINTAINER Red Hat, Inc. -ENV ATOMICAPPVERSION="0.3.1" +ENV ATOMICAPPVERSION="0.4.0" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ io.openshift.generate.job=true \ diff --git a/README.md b/README.md index 65205878..b97f6bf7 100644 --- a/README.md +++ b/README.md @@ -7,7 +7,7 @@ Atomic App is a reference implementation of the [Nulecule Specification](http:// Atomic App is packaged as a container. End-users typically do not install the software from source. Instead use the atomicapp container as the `FROM` line in a Dockerfile and package your application on top. For example: ``` -FROM projectatomic/atomicapp:0.3.1 +FROM projectatomic/atomicapp:0.4.0 MAINTAINER Your Name diff --git a/atomicapp/constants.py b/atomicapp/constants.py index 1321b10c..cd26212f 100644 --- a/atomicapp/constants.py +++ b/atomicapp/constants.py @@ -23,7 +23,7 @@ 2) LABEL io.projectatomic.nulecule.specversion in app Dockefile """ -__ATOMICAPPVERSION__ = '0.3.1' +__ATOMICAPPVERSION__ = '0.4.0' __NULECULESPECVERSION__ = '0.0.2' EXTERNAL_APP_DIR = "external" diff --git a/setup.py b/setup.py index b81692e5..24a420bc 100644 --- a/setup.py +++ b/setup.py @@ -41,7 +41,7 @@ def _install_requirements(): setup( name='atomicapp', - version='0.3.1', + version='0.4.0', description='A tool to install and run Nulecule apps', author='Red Hat, Inc.', author_email='container-tools@redhat.com', From c9676e3eea6ba15ccbdec764121dbafa50e2810f Mon Sep 17 00:00:00 2001 From: Charlie Drage Date: Thu, 21 Jan 2016 10:13:47 -0500 Subject: [PATCH 025/193] Remove roadmap in favour of wiki --- ROADMAP.md | 28 ---------------------------- 1 file changed, 28 deletions(-) delete mode 100644 ROADMAP.md diff --git a/ROADMAP.md b/ROADMAP.md deleted file mode 100644 index 33396192..00000000 --- a/ROADMAP.md +++ /dev/null @@ -1,28 +0,0 @@ -# Atomic App Roadmap - -This document provides a roadmap for current Atomic App development. The dates and features listed below are not considered final but rather an indication of what the core contributors are working on and the direction of Atomic App. - -Atomic App is the implementation of the [Nulecule spec](https://github.com/projectatomic/nulecule). We follow the spec closely, the current spec version as well as Atomic App version can be found via `atomicapp --version`. - -__Unless otherwise announced, the Atomic App CLI as well as Nulecule spec are subject to change. Backwards compatibility is a priority for version 1.0.0__ - -We rank all ROADMAP objectives by order of priority. These are subject to frequent change. - -#### High priority - - __Persistent storage__ - - Implement stop for OpenShift provider - - Support running Kubernetes from an Openshift template - -#### Medium priority - - Refactor logging - - AWS provider support - - Docker compose provider - -#### Low priority - - Nulecule index / library - - Keep versioning info in one location - - Ansible provider - - Nspawn provider - - Add a `USER` to Atomic App image - - https/ssh/sftp support for artifacts - - Use API instead of direct command-line for Docker && Kubernetes orchestration From b7c73ed57eec15df454f59c2899e18185c1fbacf Mon Sep 17 00:00:00 2001 From: Charlie Drage Date: Thu, 21 Jan 2016 11:53:37 -0500 Subject: [PATCH 026/193] Remove symbolic link from Dockerfile --- Dockerfile | 44 +++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 43 insertions(+), 1 deletion(-) mode change 120000 => 100644 Dockerfile diff --git a/Dockerfile b/Dockerfile deleted file mode 120000 index 9b8457c0..00000000 --- a/Dockerfile +++ /dev/null @@ -1 +0,0 @@ -Dockerfiles.git/Dockerfile.centos \ No newline at end of file diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 00000000..7a98fe74 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,43 @@ +FROM centos:7 + +MAINTAINER Red Hat, Inc. + +ENV ATOMICAPPVERSION="0.4.0" + +LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ + io.openshift.generate.job=true \ + io.openshift.generate.token.as=env:TOKEN_ENV_VAR \ + RUN="docker run -it --rm \${OPT1} --privileged -v `pwd`:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} -v \${OPT2} run \${OPT3}" \ + STOP="docker run -it --rm \${OPT1} --privileged -v `pwd`:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} -v \${OPT2} stop \${OPT3}" + +WORKDIR /opt/atomicapp + +# Add the requirements file into the container +ADD requirements.txt ./ + +# Install needed requirements +RUN yum install -y epel-release && \ + yum install -y --setopt=tsflags=nodocs docker && \ + yum install -y --setopt=tsflags=nodocs $(sed s/^/python-/ requirements.txt) && \ + yum clean all + +WORKDIR /atomicapp + +# If a volume doesn't get mounted over /atomicapp (like when running in +# an openshift pod) then open up permissions so files can be copied into +# the directory by non-root. +RUN chmod 777 /atomicapp + +# If a volume doesn't get mounted over /run (like when running in an +# openshift pod) then open up permissions so the lock file can be +# created by non-root. +RUN chmod 777 /run/lock + +ENV PYTHONPATH /opt/atomicapp/ + +# the entrypoint +ENTRYPOINT ["/usr/bin/python", "/opt/atomicapp/atomicapp/cli/main.py"] + +# Add all of Atomic App's files to the container image +# NOTE: Do this last so rebuilding after development is fast +ADD atomicapp/ /opt/atomicapp/atomicapp/ From 18993a5da7ba7fbd891fb2f847a1cbe197aaa001 Mon Sep 17 00:00:00 2001 From: Dusty Mabe Date: Wed, 27 Jan 2016 19:50:57 -0500 Subject: [PATCH 027/193] cli: Fix bug with atomic cli + genanswers Fixes #536 --- atomicapp/cli/main.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/atomicapp/cli/main.py b/atomicapp/cli/main.py index 514572b8..75e1985b 100644 --- a/atomicapp/cli/main.py +++ b/atomicapp/cli/main.py @@ -347,6 +347,8 @@ def create_parser(self): "genanswers", parents=[globals_parser]) gena_subparser.add_argument( "app_spec", + nargs='?', + default=None, help='The name of a container image containing an Atomic App.') gena_subparser.set_defaults(func=cli_genanswers) From 3a1a1bec126fca9396f35aa6444a6a1fbf8fecab Mon Sep 17 00:00:00 2001 From: Dusty Mabe Date: Wed, 27 Jan 2016 23:47:20 -0500 Subject: [PATCH 028/193] openshift: Fix a few spelling mistakes. --- atomicapp/providers/openshift.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/atomicapp/providers/openshift.py b/atomicapp/providers/openshift.py index 0d4cc8c0..0fa2120b 100644 --- a/atomicapp/providers/openshift.py +++ b/atomicapp/providers/openshift.py @@ -138,7 +138,7 @@ def deploy(self, url, artifact): verify=self._requests_tls_verify(), data=artifact) if status_code == 201: - logger.info("Object %s sucessfully deployed.", + logger.info("Object %s successfully deployed.", artifact['metadata']['name']) else: msg = "%s %s" % (status_code, return_data) @@ -161,7 +161,7 @@ def delete(self, url): url, verify=self._requests_tls_verify()) if status_code == 200: - logger.info("Sucessfully deleted.") + logger.info("Successfully deleted.") else: msg = "%s %s" % (status_code, return_data) logger.error(msg) @@ -185,7 +185,7 @@ def scale(self, url, replicas): data=patch, verify=self._requests_tls_verify()) if status_code == 200: - logger.info("Sucessfully scaled %s to %s replicas", url, replicas) + logger.info("Successfully scaled %s to %s replicas", url, replicas) else: msg = "%s %s" % (status_code, return_data) logger.error(msg) @@ -198,7 +198,7 @@ def process_template(self, url, template): verify=self._requests_tls_verify(), data=template) if status_code == 201: - logger.info("template proccessed %s", template['metadata']['name']) + logger.info("template processed %s", template['metadata']['name']) logger.debug("processed template %s", return_data) return return_data['objects'] else: From b6bd363dfc811c01b3f5fda55818c63714f899c8 Mon Sep 17 00:00:00 2001 From: Dusty Mabe Date: Thu, 28 Jan 2016 00:41:37 -0500 Subject: [PATCH 029/193] openshift: clean up scale function log message. An INFO level message shouldn't spit out a long url. --- atomicapp/providers/openshift.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/atomicapp/providers/openshift.py b/atomicapp/providers/openshift.py index 0fa2120b..3695d12f 100644 --- a/atomicapp/providers/openshift.py +++ b/atomicapp/providers/openshift.py @@ -185,7 +185,7 @@ def scale(self, url, replicas): data=patch, verify=self._requests_tls_verify()) if status_code == 200: - logger.info("Successfully scaled %s to %s replicas", url, replicas) + logger.info("Successfully scaled to %s replicas", replicas) else: msg = "%s %s" % (status_code, return_data) logger.error(msg) From f5ef1abf37e12467221dc9f1eb95d7419a9c5175 Mon Sep 17 00:00:00 2001 From: Tomas Kral Date: Mon, 1 Feb 2016 16:33:33 +0100 Subject: [PATCH 030/193] marathon: do not convert types when parsing json artifact --- atomicapp/providers/marathon.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/atomicapp/providers/marathon.py b/atomicapp/providers/marathon.py index 466a4bd3..40d2d7f7 100644 --- a/atomicapp/providers/marathon.py +++ b/atomicapp/providers/marathon.py @@ -108,7 +108,9 @@ def _process_artifacts(self): data = None with open(os.path.join(self.path, artifact), "r") as fp: try: - data = anymarkup.parse(fp) + # env variables in marathon artifacts have to be string:string + # force_types=None respects types from json file + data = anymarkup.parse(fp, force_types=None) logger.debug("Parsed artifact %s", data) # every marathon app has to have id. 'id' key is also used for showing messages if "id" not in data.keys(): From 3dbe998fe44ce74a0ac95a780c64370938e912fb Mon Sep 17 00:00:00 2001 From: Dusty Mabe Date: Wed, 27 Jan 2016 22:44:44 -0500 Subject: [PATCH 031/193] If not given, don't populate namespace in answers.conf.gen. If the namespace is not provided in the answers.conf, but it is detected in the providerconfig, then don't put namespace into the answers.conf.gen file because that will make stopping the application not work. A better solution to this would be to populate the right value for namespace (the detected value) in the answers.conf.gen, but the way our config data is handled, passing data back from the provider code would be really messy. This is a workaround for #486. It is not a fix. --- atomicapp/nulecule/main.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/atomicapp/nulecule/main.py b/atomicapp/nulecule/main.py index a644ebc2..b3c0586d 100644 --- a/atomicapp/nulecule/main.py +++ b/atomicapp/nulecule/main.py @@ -14,9 +14,7 @@ ANSWERS_FILE_SAMPLE, ANSWERS_RUNTIME_FILE, DEFAULT_ANSWERS, - DEFAULT_NAMESPACE, MAIN_FILE, - NAMESPACE_KEY, PROVIDER_KEY) from atomicapp.nulecule.base import Nulecule from atomicapp.nulecule.exceptions import NuleculeException @@ -332,6 +330,10 @@ def _write_answers(self, path, answers, answers_format): logger.debug("ANSWERS: %s", answers) anymarkup.serialize_file(answers, path, format=answers_format) + # TODO - once we rework config data we shouldn't need this + # function anymore, we should be able to take the data + # straight from the config object since the defaults and args + # provided from the cli would have already been merged. def _get_runtime_answers(self, config, cli_provider): """ Get runtime answers data from config (Nulecule config) by adding @@ -346,8 +348,7 @@ def _get_runtime_answers(self, config, cli_provider): """ _config = copy.deepcopy(config) _config[GLOBAL_CONF] = config.get(GLOBAL_CONF) or {} - _config[GLOBAL_CONF][NAMESPACE_KEY] = _config[GLOBAL_CONF].get( - NAMESPACE_KEY) or DEFAULT_NAMESPACE + # If a provider is provided via CLI, override the config parameter if cli_provider: _config[GLOBAL_CONF][PROVIDER_KEY] = cli_provider From 264d47f02eacb7f31bcf3acdc78cb1c366c6028c Mon Sep 17 00:00:00 2001 From: Charlie Drage Date: Tue, 2 Feb 2016 12:16:16 -0500 Subject: [PATCH 032/193] 0.4.1 Release --- CHANGELOG.md | 19 +++++++++++++++++++ Dockerfile | 2 +- Dockerfiles.git/Dockerfile.centos | 2 +- Dockerfiles.git/Dockerfile.debian | 2 +- Dockerfiles.git/Dockerfile.fedora | 2 +- README.md | 2 +- atomicapp/constants.py | 2 +- setup.py | 2 +- 8 files changed, 26 insertions(+), 7 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 250d3fa0..b778d1a3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,22 @@ +## Atomic App 0.4.1 (02-02-2016) + +0.4.1 is a minor bug fix release. + +``` +Charlie Drage : + Remove roadmap in favour of wiki + Remove symbolic link from Dockerfile + +Dusty Mabe : + cli: Fix bug with atomic cli + genanswers + openshift: Fix a few spelling mistakes. + openshift: clean up scale function log message. + If not given, don't populate namespace in answers.conf.gen. + +Tomas Kral : + marathon: do not convert types when parsing json artifact +``` + ## Atomic App 0.4.0 (01-20-2016) With this release we bump our version to 0.4.0 to coincide with our BETA-4 release as well as the change to our "install" verb. diff --git a/Dockerfile b/Dockerfile index 7a98fe74..6279c2f1 100644 --- a/Dockerfile +++ b/Dockerfile @@ -2,7 +2,7 @@ FROM centos:7 MAINTAINER Red Hat, Inc. -ENV ATOMICAPPVERSION="0.4.0" +ENV ATOMICAPPVERSION="0.4.1" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ io.openshift.generate.job=true \ diff --git a/Dockerfiles.git/Dockerfile.centos b/Dockerfiles.git/Dockerfile.centos index 7a98fe74..6279c2f1 100644 --- a/Dockerfiles.git/Dockerfile.centos +++ b/Dockerfiles.git/Dockerfile.centos @@ -2,7 +2,7 @@ FROM centos:7 MAINTAINER Red Hat, Inc. -ENV ATOMICAPPVERSION="0.4.0" +ENV ATOMICAPPVERSION="0.4.1" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ io.openshift.generate.job=true \ diff --git a/Dockerfiles.git/Dockerfile.debian b/Dockerfiles.git/Dockerfile.debian index 879188b0..f4d7bcd4 100644 --- a/Dockerfiles.git/Dockerfile.debian +++ b/Dockerfiles.git/Dockerfile.debian @@ -2,7 +2,7 @@ FROM debian:jessie MAINTAINER Red Hat, Inc. -ENV ATOMICAPPVERSION="0.4.0" +ENV ATOMICAPPVERSION="0.4.1" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ RUN="docker run -it --rm \${OPT1} --privileged -v `pwd`:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} -v \${OPT2} run \${OPT3}" \ diff --git a/Dockerfiles.git/Dockerfile.fedora b/Dockerfiles.git/Dockerfile.fedora index 5e6c336c..4f390056 100644 --- a/Dockerfiles.git/Dockerfile.fedora +++ b/Dockerfiles.git/Dockerfile.fedora @@ -2,7 +2,7 @@ FROM fedora:23 MAINTAINER Red Hat, Inc. -ENV ATOMICAPPVERSION="0.4.0" +ENV ATOMICAPPVERSION="0.4.1" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ io.openshift.generate.job=true \ diff --git a/README.md b/README.md index b97f6bf7..befa4bdb 100644 --- a/README.md +++ b/README.md @@ -7,7 +7,7 @@ Atomic App is a reference implementation of the [Nulecule Specification](http:// Atomic App is packaged as a container. End-users typically do not install the software from source. Instead use the atomicapp container as the `FROM` line in a Dockerfile and package your application on top. For example: ``` -FROM projectatomic/atomicapp:0.4.0 +FROM projectatomic/atomicapp:0.4.1 MAINTAINER Your Name diff --git a/atomicapp/constants.py b/atomicapp/constants.py index cd26212f..e4f4af71 100644 --- a/atomicapp/constants.py +++ b/atomicapp/constants.py @@ -23,7 +23,7 @@ 2) LABEL io.projectatomic.nulecule.specversion in app Dockefile """ -__ATOMICAPPVERSION__ = '0.4.0' +__ATOMICAPPVERSION__ = '0.4.1' __NULECULESPECVERSION__ = '0.0.2' EXTERNAL_APP_DIR = "external" diff --git a/setup.py b/setup.py index 24a420bc..24f6fc9d 100644 --- a/setup.py +++ b/setup.py @@ -41,7 +41,7 @@ def _install_requirements(): setup( name='atomicapp', - version='0.4.0', + version='0.4.1', description='A tool to install and run Nulecule apps', author='Red Hat, Inc.', author_email='container-tools@redhat.com', From d6400280fc64ba9cb2c4d46016b6583eecbe516f Mon Sep 17 00:00:00 2001 From: Suraj Deshmukh Date: Wed, 10 Feb 2016 10:10:41 +0530 Subject: [PATCH 033/193] This makes `metadata` an optional argument Atomicapp would fail if `metadata` was not specified in Nulecule file. While according to Nulecule specification `metadata` is an optional argument. This would fail because while creating `Nulecule` object `metadata` was positional argument. So fixed this by making it a keyword argument. Also tests have been modified accordingly. This closes issue #502 --- atomicapp/nulecule/base.py | 6 +++--- tests/units/nulecule/test_nulecule.py | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/atomicapp/nulecule/base.py b/atomicapp/nulecule/base.py index 3c0ace08..c6bbba1c 100644 --- a/atomicapp/nulecule/base.py +++ b/atomicapp/nulecule/base.py @@ -39,7 +39,7 @@ class Nulecule(NuleculeBase): componenents, but does not have access to its parent's scope. """ - def __init__(self, id, specversion, metadata, graph, basepath, + def __init__(self, id, specversion, graph, basepath, metadata=None, requirements=None, params=None, config=None, namespace=GLOBAL_CONF): """ @@ -48,9 +48,9 @@ def __init__(self, id, specversion, metadata, graph, basepath, Args: id (str): Nulecule application ID specversion (str): Nulecule spec version - metadata (dict): Nulecule metadata graph (list): Nulecule graph of components basepath (str): Basepath for Nulecule application + metadata (dict): Nulecule metadata requirements (dict): Requirements for the Nulecule application params (list): List of params for the Nulecule application config (dict): Config data for the Nulecule application @@ -62,7 +62,7 @@ def __init__(self, id, specversion, metadata, graph, basepath, super(Nulecule, self).__init__(basepath, params, namespace) self.id = id self.specversion = specversion - self.metadata = metadata + self.metadata = metadata or {} self.graph = graph self.requirements = requirements self.config = config or {} diff --git a/tests/units/nulecule/test_nulecule.py b/tests/units/nulecule/test_nulecule.py index f35119e3..4287de83 100644 --- a/tests/units/nulecule/test_nulecule.py +++ b/tests/units/nulecule/test_nulecule.py @@ -13,7 +13,7 @@ def test_run(self): mock_component_1 = mock.Mock() mock_component_2 = mock.Mock() - n = Nulecule('some-id', '0.0.2', {}, [{}], 'some/path') + n = Nulecule('some-id', '0.0.2', [{}], 'some/path', {}) n.components = [mock_component_1, mock_component_2] n.run(provider) @@ -134,7 +134,7 @@ def test_load_components(self, MockNuleculeComponent): } ] - n = Nulecule('some-id', '0.0.2', {}, graph, 'some/path') + n = Nulecule('some-id', '0.0.2', graph, 'some/path', {}) n.load_components() MockNuleculeComponent.assert_any_call( From 9577cec23d7fbfc6280351b19b6423efab1ea370 Mon Sep 17 00:00:00 2001 From: Dusty Mabe Date: Thu, 11 Feb 2016 08:03:57 -0500 Subject: [PATCH 034/193] providerconfig: support someone specifying a relative path This will allow someone to support a relative path for the providerconfig. The providerconfig must be under the users current working directory and can't be outside of it. --- atomicapp/plugin.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/atomicapp/plugin.py b/atomicapp/plugin.py index 0a245df9..85f52e2e 100644 --- a/atomicapp/plugin.py +++ b/atomicapp/plugin.py @@ -72,8 +72,9 @@ def getConfigFile(self): """ if PROVIDER_CONFIG_KEY in self.config: self.config_file = self.config[PROVIDER_CONFIG_KEY] - if self.container: - self.config_file = os.path.join(Utils.getRoot(), self.config_file.lstrip("/")) + if os.path.isabs(self.config_file): + self.config_file = os.path.join(Utils.getRoot(), + self.config_file.lstrip('/')) else: logger.warning("Configuration option '%s' not found" % PROVIDER_CONFIG_KEY) From 66de083b08bbbd165db620ee04dda754d061a8a3 Mon Sep 17 00:00:00 2001 From: Dusty Mabe Date: Wed, 10 Feb 2016 18:11:24 -0500 Subject: [PATCH 035/193] cli: Print helpful error if no app_spec provided. If a user doesn't provide an app_spec and it can't be derived from the $IMAGE environment variable, then print an error and exit. --- atomicapp/cli/main.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/atomicapp/cli/main.py b/atomicapp/cli/main.py index 75e1985b..f4af6f7d 100644 --- a/atomicapp/cli/main.py +++ b/atomicapp/cli/main.py @@ -408,9 +408,14 @@ def run(self): # a directory if they want to for "run". For that reason we won't # default the RUN label for Atomic App to provide an app_spec argument. # In this case pick up app_spec from $IMAGE env var (set by RUN label). - if args.app_spec is None and os.environ.get('IMAGE') is not None: - logger.debug("Setting app_spec based on $IMAGE env var") - args.app_spec = os.environ['IMAGE'] + if args.app_spec is None: + if os.environ.get('IMAGE') is not None: + logger.debug("Setting app_spec based on $IMAGE env var") + args.app_spec = os.environ['IMAGE'] + else: + print("Error. Too few arguments. Must provide app_spec.") + print("Run with '--help' for more info") + sys.exit(1) # Take the arguments that correspond to "answers" config file data # and make a dictionary of it to pass along in args. From 0a51b34849b7789c3dbe9831e7a206a01ecf8514 Mon Sep 17 00:00:00 2001 From: Dusty Mabe Date: Wed, 27 Jan 2016 23:06:17 -0500 Subject: [PATCH 036/193] Do not use artifacts dir to select provider. In e0f2970 the user can now specify a default provider for their application if they want it to be something other than the default we choose. This allows us to get rid of the somewhat crude heuristic we were using in b76acee to default to a provider if we detected only one provider existed. There are problems with the way we did it though (see description in #515). Fixes #515 --- atomicapp/nulecule/main.py | 6 ------ atomicapp/utils.py | 8 +------- 2 files changed, 1 insertion(+), 13 deletions(-) diff --git a/atomicapp/nulecule/main.py b/atomicapp/nulecule/main.py index b3c0586d..f8416707 100644 --- a/atomicapp/nulecule/main.py +++ b/atomicapp/nulecule/main.py @@ -223,12 +223,6 @@ def run(self, cli_provider, answers_output, ask, if not self.answers_file: self._process_answers() - # Unless otherwise specified with CLI arguments we will - # default to the first provider available - providers = Utils.getSupportedProviders(self.app_path) - if cli_provider is None and len(providers) == 1: - self.answers[GLOBAL_CONF][PROVIDER_KEY] = providers[0] - self.nulecule.load_config(config=self.nulecule.config, ask=ask) self.nulecule.render(cli_provider, dryrun) self.nulecule.run(cli_provider, dryrun) diff --git a/atomicapp/utils.py b/atomicapp/utils.py index 14418a78..d25bd6d8 100644 --- a/atomicapp/utils.py +++ b/atomicapp/utils.py @@ -36,8 +36,7 @@ CACHE_DIR, EXTERNAL_APP_DIR, HOST_DIR, - WORKDIR, - ARTIFACTS_FOLDER) + WORKDIR) __all__ = ('Utils') @@ -387,11 +386,6 @@ def rm_dir(directory): logger.debug('Recursively removing directory: %s' % directory) distutils.dir_util.remove_tree(directory) - @staticmethod - def getSupportedProviders(path): - providers = os.listdir(path + '/' + ARTIFACTS_FOLDER) - return providers - @staticmethod def make_rest_request(method, url, verify=True, data=None): """ From 3608796340934a3344ecfe490a32edfbb0a5f884 Mon Sep 17 00:00:00 2001 From: Dusty Mabe Date: Thu, 11 Feb 2016 15:13:40 -0500 Subject: [PATCH 037/193] tests: update cli test The TestCli.test_run_helloapache_app test previously tested some work done by cdrage in b76acee within #440. In 893c3c5 that functionality was removed in favor of the work done in e0f2970. This commit updates the test to test e0f2970 at the cli level. --- tests/units/cli/test_default_provider.py | 2 +- .../units/cli/test_examples/oneprovider-helloapache/Nulecule | 4 ++++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/tests/units/cli/test_default_provider.py b/tests/units/cli/test_default_provider.py index 7a9aeea3..73437966 100644 --- a/tests/units/cli/test_default_provider.py +++ b/tests/units/cli/test_default_provider.py @@ -70,7 +70,7 @@ def test_run_helloapache_app(self, capsys): print out # Since this a Docker-only provider test, docker *should* be in it, NOT Kubernetes - assert "'provider': 'docker'" in out + assert "u'provider': u'docker'" in out assert "Deploying to Kubernetes" not in out assert exec_info.value.code == 0 diff --git a/tests/units/cli/test_examples/oneprovider-helloapache/Nulecule b/tests/units/cli/test_examples/oneprovider-helloapache/Nulecule index 75ae781c..98552ce1 100644 --- a/tests/units/cli/test_examples/oneprovider-helloapache/Nulecule +++ b/tests/units/cli/test_examples/oneprovider-helloapache/Nulecule @@ -6,6 +6,10 @@ metadata: name: Hello Apache App appversion: 0.0.1 description: Atomic app for deploying a really basic Apache HTTP server +params: + - name: provider + description: The provider that is used to deploy the application + default: docker graph: - name: helloapache-app params: From 50aaaf1aa3ec503ed8a1ada2d7c666dd3f51755c Mon Sep 17 00:00:00 2001 From: Charlie Drage Date: Thu, 11 Feb 2016 13:50:03 -0500 Subject: [PATCH 038/193] Fail on missing artifacts within Nulecule file --- atomicapp/nulecule/base.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/atomicapp/nulecule/base.py b/atomicapp/nulecule/base.py index c6bbba1c..e9b53015 100644 --- a/atomicapp/nulecule/base.py +++ b/atomicapp/nulecule/base.py @@ -372,6 +372,9 @@ def render(self, provider_key=None, dryrun=False): if self._app: self._app.render(provider_key=provider_key, dryrun=dryrun) return + if self.artifacts is None: + raise NuleculeException( + "No artifacts specified in the Nulecule file") context = self.get_context() if provider_key and provider_key not in self.artifacts: raise NuleculeException( From 96045ab9807f3d28ff07127f24317b8172ea6f89 Mon Sep 17 00:00:00 2001 From: Charlie Drage Date: Thu, 11 Feb 2016 13:50:18 -0500 Subject: [PATCH 039/193] Add tests for failure of finding Nulecule artifacts --- tests/units/nulecule/test_nulecule_component.py | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/tests/units/nulecule/test_nulecule_component.py b/tests/units/nulecule/test_nulecule_component.py index 3dbc62dc..d8b6b272 100644 --- a/tests/units/nulecule/test_nulecule_component.py +++ b/tests/units/nulecule/test_nulecule_component.py @@ -238,7 +238,7 @@ def test_render_for_external_app(self): provider_key = 'some-provider' dryrun = False - nc = NuleculeComponent(name='some-app', basepath='some/path') + nc = NuleculeComponent(name='some-app', basepath='some/path', artifacts="/foo/bar") nc._app = mock_nulecule nc.render(provider_key, dryrun) @@ -259,6 +259,17 @@ def test_render_for_local_app_with_missing_artifacts_for_provider(self): self.assertRaises(NuleculeException, nc.render, provider_key, dryrun) + def test_render_for_local_app_with_missing_artifacts_from_nulecule(self): + """ + Test rendering a Nulecule component with no artifacts provided in the + Nulecule file. + """ + nc = NuleculeComponent(name='some-app', basepath='some/path') + nc.config = {} + + with self.assertRaises(NuleculeException): + nc.render() + @mock.patch('atomicapp.nulecule.base.NuleculeComponent.get_context') @mock.patch('atomicapp.nulecule.base.NuleculeComponent.' 'get_artifact_paths_for_provider') From c47cb70c39197cbdb03d19b2b341977b518b192c Mon Sep 17 00:00:00 2001 From: Charlie Drage Date: Fri, 12 Feb 2016 13:30:54 -0500 Subject: [PATCH 040/193] Remove instances of install verb to fetch --- atomicapp/cli/main.py | 6 +++--- atomicapp/nulecule/base.py | 6 +++--- atomicapp/nulecule/main.py | 2 +- docs/file_handling.md | 4 ++-- 4 files changed, 9 insertions(+), 9 deletions(-) diff --git a/atomicapp/cli/main.py b/atomicapp/cli/main.py index 514572b8..6434a19b 100644 --- a/atomicapp/cli/main.py +++ b/atomicapp/cli/main.py @@ -159,7 +159,7 @@ def create_parser(self): formatter_class=argparse.RawDescriptionHelpFormatter, add_help=False, description=( - "This will install and run an Atomic App, " + "This will fetch and run an Atomic App, " "a containerized application conforming to the Nulecule Specification")) # Add a help function to the toplevel parser but don't output # help information for it. We need this because of the way we @@ -284,7 +284,7 @@ def create_parser(self): dest="destination", default=None, help=(''' - Destination directory for install. This defaults to a + Destination directory for fetching. This defaults to a directory under %s. Specify 'none' to not persist files and have them cleaned up when finished.''' % CACHE_DIR)) run_subparser.set_defaults(func=cli_run) @@ -338,7 +338,7 @@ def create_parser(self): stop_subparser.add_argument( "app_spec", help=(''' - Path to the directory where the Atomic App is installed + Path to the directory where the Atomic App is fetched that is to be stopped.''')) stop_subparser.set_defaults(func=cli_stop) diff --git a/atomicapp/nulecule/base.py b/atomicapp/nulecule/base.py index 3c0ace08..04334fc5 100644 --- a/atomicapp/nulecule/base.py +++ b/atomicapp/nulecule/base.py @@ -120,12 +120,12 @@ def load_from_path(cls, src, config=None, namespace=GLOBAL_CONF, update (bool): Update existing application if True, else reuse it. Returns: - A Nulecule instance or None in case of some dry run (installing - from image). + A Nulecule instance or None in case of some dry run (fetching + an image). """ nulecule_path = os.path.join(src, MAIN_FILE) if dryrun and not os.path.exists(nulecule_path): - raise NuleculeException("Installed Nulecule components are required to initiate dry-run. " + raise NuleculeException("Fetched Nulecule components are required to initiate dry-run. " "Please specify your app via atomicapp --dry-run /path/to/your-app") nulecule_data = anymarkup.parse_file(nulecule_path) nulecule = Nulecule(config=config, basepath=src, diff --git a/atomicapp/nulecule/main.py b/atomicapp/nulecule/main.py index a644ebc2..e6dbb60d 100644 --- a/atomicapp/nulecule/main.py +++ b/atomicapp/nulecule/main.py @@ -28,7 +28,7 @@ class NuleculeManager(object): """ - Interface to install, run, stop a Nulecule application. + Interface to fetch, run, stop a Nulecule application. """ def __init__(self, app_spec, destination=None, diff --git a/docs/file_handling.md b/docs/file_handling.md index 3e4f885f..9ca38cb5 100644 --- a/docs/file_handling.md +++ b/docs/file_handling.md @@ -1,6 +1,6 @@ -## Install +## Fetch -Installing an Atomic App means to download the artifacts and sample answerfile. +Fetching an Atomic App means to download the artifacts and sample answerfile. ## Developing and Debugging From f7790995264c760f044818a5f13cb37b40cbd63e Mon Sep 17 00:00:00 2001 From: Dusty Mabe Date: Fri, 12 Feb 2016 13:12:11 -0500 Subject: [PATCH 041/193] labels: update run labels to no longer use backticks In d81e7e6 [1] the atomic CLI stopped passing the command through a shell and thus the backticks no longer work. Updating here to use $PWD instead. [1] https://github.com/projectatomic/atomic/commit/d81e7e651e8eaa55fcbc6d9ddb4b1d7ba9de5ff1 --- Dockerfile | 4 ++-- Dockerfiles.git/Dockerfile.centos | 4 ++-- Dockerfiles.git/Dockerfile.debian | 4 ++-- Dockerfiles.git/Dockerfile.fedora | 4 ++-- Dockerfiles.pkgs/Dockerfile.centos | 4 ++-- Dockerfiles.pkgs/Dockerfile.fedora | 4 ++-- 6 files changed, 12 insertions(+), 12 deletions(-) diff --git a/Dockerfile b/Dockerfile index 6279c2f1..fc505158 100644 --- a/Dockerfile +++ b/Dockerfile @@ -7,8 +7,8 @@ ENV ATOMICAPPVERSION="0.4.1" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ io.openshift.generate.job=true \ io.openshift.generate.token.as=env:TOKEN_ENV_VAR \ - RUN="docker run -it --rm \${OPT1} --privileged -v `pwd`:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} -v \${OPT2} run \${OPT3}" \ - STOP="docker run -it --rm \${OPT1} --privileged -v `pwd`:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} -v \${OPT2} stop \${OPT3}" + RUN="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} -v \${OPT2} run \${OPT3}" \ + STOP="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} -v \${OPT2} stop \${OPT3}" WORKDIR /opt/atomicapp diff --git a/Dockerfiles.git/Dockerfile.centos b/Dockerfiles.git/Dockerfile.centos index 6279c2f1..fc505158 100644 --- a/Dockerfiles.git/Dockerfile.centos +++ b/Dockerfiles.git/Dockerfile.centos @@ -7,8 +7,8 @@ ENV ATOMICAPPVERSION="0.4.1" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ io.openshift.generate.job=true \ io.openshift.generate.token.as=env:TOKEN_ENV_VAR \ - RUN="docker run -it --rm \${OPT1} --privileged -v `pwd`:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} -v \${OPT2} run \${OPT3}" \ - STOP="docker run -it --rm \${OPT1} --privileged -v `pwd`:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} -v \${OPT2} stop \${OPT3}" + RUN="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} -v \${OPT2} run \${OPT3}" \ + STOP="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} -v \${OPT2} stop \${OPT3}" WORKDIR /opt/atomicapp diff --git a/Dockerfiles.git/Dockerfile.debian b/Dockerfiles.git/Dockerfile.debian index f4d7bcd4..3ac7edc6 100644 --- a/Dockerfiles.git/Dockerfile.debian +++ b/Dockerfiles.git/Dockerfile.debian @@ -5,8 +5,8 @@ MAINTAINER Red Hat, Inc. ENV ATOMICAPPVERSION="0.4.1" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ - RUN="docker run -it --rm \${OPT1} --privileged -v `pwd`:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} -v \${OPT2} run \${OPT3}" \ - STOP="docker run -it --rm \${OPT1} --privileged -v `pwd`:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} -v \${OPT2} stop \${OPT3}" + RUN="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} -v \${OPT2} run \${OPT3}" \ + STOP="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} -v \${OPT2} stop \${OPT3}" WORKDIR /opt/atomicapp diff --git a/Dockerfiles.git/Dockerfile.fedora b/Dockerfiles.git/Dockerfile.fedora index 4f390056..6f0c03b9 100644 --- a/Dockerfiles.git/Dockerfile.fedora +++ b/Dockerfiles.git/Dockerfile.fedora @@ -7,8 +7,8 @@ ENV ATOMICAPPVERSION="0.4.1" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ io.openshift.generate.job=true \ io.openshift.generate.token.as=env:TOKEN_ENV_VAR \ - RUN="docker run -it --rm \${OPT1} --privileged -v `pwd`:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} -v \${OPT2} run \${OPT3}" \ - STOP="docker run -it --rm \${OPT1} --privileged -v `pwd`:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} -v \${OPT2} stop \${OPT3}" + RUN="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} -v \${OPT2} run \${OPT3}" \ + STOP="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} -v \${OPT2} stop \${OPT3}" WORKDIR /opt/atomicapp diff --git a/Dockerfiles.pkgs/Dockerfile.centos b/Dockerfiles.pkgs/Dockerfile.centos index a792050c..edfcb74d 100644 --- a/Dockerfiles.pkgs/Dockerfile.centos +++ b/Dockerfiles.pkgs/Dockerfile.centos @@ -10,8 +10,8 @@ ENV TESTING="--enablerepo=epel-testing" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ io.openshift.generate.job=true \ io.openshift.generate.token.as=env:TOKEN_ENV_VAR \ - RUN="docker run -it --rm \${OPT1} --privileged -v `pwd`:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} -v \${OPT2} run \${OPT3}" \ - STOP="docker run -it --rm \${OPT1} --privileged -v `pwd`:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} -v \${OPT2} stop \${OPT3}" + RUN="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} -v \${OPT2} run \${OPT3}" \ + STOP="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} -v \${OPT2} stop \${OPT3}" WORKDIR /atomicapp diff --git a/Dockerfiles.pkgs/Dockerfile.fedora b/Dockerfiles.pkgs/Dockerfile.fedora index bfd913fd..a5aeca1a 100644 --- a/Dockerfiles.pkgs/Dockerfile.fedora +++ b/Dockerfiles.pkgs/Dockerfile.fedora @@ -10,8 +10,8 @@ ENV TESTING="--enablerepo=updates-testing" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ io.openshift.generate.job=true \ io.openshift.generate.token.as=env:TOKEN_ENV_VAR \ - RUN="docker run -it --rm \${OPT1} --privileged -v `pwd`:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} -v \${OPT2} run \${OPT3}" \ - STOP="docker run -it --rm \${OPT1} --privileged -v `pwd`:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} -v \${OPT2} stop \${OPT3}" + RUN="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} -v \${OPT2} run \${OPT3}" \ + STOP="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} -v \${OPT2} stop \${OPT3}" WORKDIR /atomicapp From a88908acfad112e2c7f054384165965319b0e809 Mon Sep 17 00:00:00 2001 From: Dusty Mabe Date: Fri, 12 Feb 2016 13:48:34 -0500 Subject: [PATCH 042/193] labels: no longer default to verbose output --- Dockerfile | 4 ++-- Dockerfiles.git/Dockerfile.centos | 4 ++-- Dockerfiles.git/Dockerfile.debian | 4 ++-- Dockerfiles.git/Dockerfile.fedora | 4 ++-- Dockerfiles.pkgs/Dockerfile.centos | 4 ++-- Dockerfiles.pkgs/Dockerfile.fedora | 4 ++-- 6 files changed, 12 insertions(+), 12 deletions(-) diff --git a/Dockerfile b/Dockerfile index fc505158..195c4d1c 100644 --- a/Dockerfile +++ b/Dockerfile @@ -7,8 +7,8 @@ ENV ATOMICAPPVERSION="0.4.1" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ io.openshift.generate.job=true \ io.openshift.generate.token.as=env:TOKEN_ENV_VAR \ - RUN="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} -v \${OPT2} run \${OPT3}" \ - STOP="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} -v \${OPT2} stop \${OPT3}" + RUN="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} \${OPT2} run \${OPT3}" \ + STOP="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} \${OPT2} stop \${OPT3}" WORKDIR /opt/atomicapp diff --git a/Dockerfiles.git/Dockerfile.centos b/Dockerfiles.git/Dockerfile.centos index fc505158..195c4d1c 100644 --- a/Dockerfiles.git/Dockerfile.centos +++ b/Dockerfiles.git/Dockerfile.centos @@ -7,8 +7,8 @@ ENV ATOMICAPPVERSION="0.4.1" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ io.openshift.generate.job=true \ io.openshift.generate.token.as=env:TOKEN_ENV_VAR \ - RUN="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} -v \${OPT2} run \${OPT3}" \ - STOP="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} -v \${OPT2} stop \${OPT3}" + RUN="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} \${OPT2} run \${OPT3}" \ + STOP="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} \${OPT2} stop \${OPT3}" WORKDIR /opt/atomicapp diff --git a/Dockerfiles.git/Dockerfile.debian b/Dockerfiles.git/Dockerfile.debian index 3ac7edc6..b1f527e5 100644 --- a/Dockerfiles.git/Dockerfile.debian +++ b/Dockerfiles.git/Dockerfile.debian @@ -5,8 +5,8 @@ MAINTAINER Red Hat, Inc. ENV ATOMICAPPVERSION="0.4.1" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ - RUN="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} -v \${OPT2} run \${OPT3}" \ - STOP="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} -v \${OPT2} stop \${OPT3}" + RUN="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} \${OPT2} run \${OPT3}" \ + STOP="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} \${OPT2} stop \${OPT3}" WORKDIR /opt/atomicapp diff --git a/Dockerfiles.git/Dockerfile.fedora b/Dockerfiles.git/Dockerfile.fedora index 6f0c03b9..250307de 100644 --- a/Dockerfiles.git/Dockerfile.fedora +++ b/Dockerfiles.git/Dockerfile.fedora @@ -7,8 +7,8 @@ ENV ATOMICAPPVERSION="0.4.1" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ io.openshift.generate.job=true \ io.openshift.generate.token.as=env:TOKEN_ENV_VAR \ - RUN="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} -v \${OPT2} run \${OPT3}" \ - STOP="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} -v \${OPT2} stop \${OPT3}" + RUN="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} \${OPT2} run \${OPT3}" \ + STOP="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} \${OPT2} stop \${OPT3}" WORKDIR /opt/atomicapp diff --git a/Dockerfiles.pkgs/Dockerfile.centos b/Dockerfiles.pkgs/Dockerfile.centos index edfcb74d..ea4a0d0c 100644 --- a/Dockerfiles.pkgs/Dockerfile.centos +++ b/Dockerfiles.pkgs/Dockerfile.centos @@ -10,8 +10,8 @@ ENV TESTING="--enablerepo=epel-testing" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ io.openshift.generate.job=true \ io.openshift.generate.token.as=env:TOKEN_ENV_VAR \ - RUN="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} -v \${OPT2} run \${OPT3}" \ - STOP="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} -v \${OPT2} stop \${OPT3}" + RUN="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} \${OPT2} run \${OPT3}" \ + STOP="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} \${OPT2} stop \${OPT3}" WORKDIR /atomicapp diff --git a/Dockerfiles.pkgs/Dockerfile.fedora b/Dockerfiles.pkgs/Dockerfile.fedora index a5aeca1a..5eed0d62 100644 --- a/Dockerfiles.pkgs/Dockerfile.fedora +++ b/Dockerfiles.pkgs/Dockerfile.fedora @@ -10,8 +10,8 @@ ENV TESTING="--enablerepo=updates-testing" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ io.openshift.generate.job=true \ io.openshift.generate.token.as=env:TOKEN_ENV_VAR \ - RUN="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} -v \${OPT2} run \${OPT3}" \ - STOP="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} -v \${OPT2} stop \${OPT3}" + RUN="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} \${OPT2} run \${OPT3}" \ + STOP="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} \${OPT2} stop \${OPT3}" WORKDIR /atomicapp From 4b609887d822fa7b18f0cd04ff7b6cb176c17669 Mon Sep 17 00:00:00 2001 From: Dusty Mabe Date: Mon, 15 Feb 2016 10:58:29 -0500 Subject: [PATCH 043/193] Add Tomas to MAINTAINERS --- MAINTAINERS | 1 + 1 file changed, 1 insertion(+) diff --git a/MAINTAINERS b/MAINTAINERS index 53abf08d..ab7fab43 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2,4 +2,5 @@ Charlie Drage (@cdrage) Christoph Goern (@goern) Dusty Mabe (@dustymabe) Ratnadeep Debnath (@rtnpro) +Tomas Kral (@kadel, @tkral) Vaclav Pavlin (@vpavlin) From 3565a48490f4bcb85d72d54deeffa641ba4cfcf7 Mon Sep 17 00:00:00 2001 From: Charlie Drage Date: Tue, 26 Jan 2016 10:57:56 -0500 Subject: [PATCH 044/193] Remove unneeded files in test examples dirs --- .../units/cli/test_examples/gitlab/Dockerfile | 11 - .../cli/test_examples/guestbook-go/Dockerfile | 9 - .../cli/test_examples/guestbook-go/README.md | 62 -- .../cli/test_examples/helloapache/Dockerfile | 9 - .../cli/test_examples/helloapache/README.md | 73 -- .../kubernetes-atomicapp/Dockerfile | 9 - .../oneprovider-helloapache/Dockerfile | 9 - .../wordpress-centos7-atomicapp/Dockerfile | 11 - .../README.asciidoc | 12 - .../wordpress-centos7-atomicapp/gpl-3.0.txt | 674 ------------------ 10 files changed, 879 deletions(-) delete mode 100644 tests/units/cli/test_examples/gitlab/Dockerfile delete mode 100644 tests/units/cli/test_examples/guestbook-go/Dockerfile delete mode 100644 tests/units/cli/test_examples/guestbook-go/README.md delete mode 100644 tests/units/cli/test_examples/helloapache/Dockerfile delete mode 100644 tests/units/cli/test_examples/helloapache/README.md delete mode 100644 tests/units/cli/test_examples/kubernetes-atomicapp/Dockerfile delete mode 100644 tests/units/cli/test_examples/oneprovider-helloapache/Dockerfile delete mode 100644 tests/units/cli/test_examples/wordpress-centos7-atomicapp/Dockerfile delete mode 100644 tests/units/cli/test_examples/wordpress-centos7-atomicapp/README.asciidoc delete mode 100644 tests/units/cli/test_examples/wordpress-centos7-atomicapp/gpl-3.0.txt diff --git a/tests/units/cli/test_examples/gitlab/Dockerfile b/tests/units/cli/test_examples/gitlab/Dockerfile deleted file mode 100644 index 28ef1ded..00000000 --- a/tests/units/cli/test_examples/gitlab/Dockerfile +++ /dev/null @@ -1,11 +0,0 @@ -FROM projectatomic/atomicapp:0.1.10 - -MAINTAINER Navid Shaikh - -LABEL io.projectatomic.nulecule.specversion 0.0.2 -LABEL io.projectatomic.nulecule.providers "kubernetes" - -LABEL Build docker build --rm --tag swordphilic/gitlab-centos7-atomicapp . - -ADD /Nulecule /Dockerfile /application-entity/ -ADD /artifacts /application-entity/artifacts diff --git a/tests/units/cli/test_examples/guestbook-go/Dockerfile b/tests/units/cli/test_examples/guestbook-go/Dockerfile deleted file mode 100644 index 70e9f565..00000000 --- a/tests/units/cli/test_examples/guestbook-go/Dockerfile +++ /dev/null @@ -1,9 +0,0 @@ -FROM projectatomic/atomicapp:0.1.10 - -MAINTAINER Jason Brooks - -LABEL io.projectatomic.nulecule.providers="kubernetes" \ - io.projectatomic.nulecule.specversion="0.0.2" - -ADD /Nulecule /Dockerfile README.md /application-entity/ -ADD /artifacts /application-entity/artifacts diff --git a/tests/units/cli/test_examples/guestbook-go/README.md b/tests/units/cli/test_examples/guestbook-go/README.md deleted file mode 100644 index e23813ce..00000000 --- a/tests/units/cli/test_examples/guestbook-go/README.md +++ /dev/null @@ -1,62 +0,0 @@ -This is the [guestbook-go](https://github.com/GoogleCloudPlatform/kubernetes/tree/master/examples/guestbook-go) sample application from the kubernetes project, packaged as an atomic application based on the nulecule specification. - -Kubernetes is currently the only supported provider. You'll need to run this from a workstation that has the atomic CLI and kubectl client that can connect to a kubernetes master. This example depends on kube-dns being configured on your kubernetes cluster. - -### Step 1 - -Build: - -``` -# docker build -t $USER/guestbookgo-atomicapp . -``` - -### Step 2 - -Install and Run: - - -``` -# atomic install $USER/guestbookgo-atomicapp -# atomic run $USER/guestbookgo-atomicapp -``` - -### Step 3 - -Access the guestbook through a random NodePort on your cluster. Find the port by running: - -``` -$ kubectl describe service guestbook | grep NodePort - -NodePort: 31288/TCP -``` - -To find the ip address on your node, run: - -``` -$ kubectl get nodes -NAME LABELS STATUS -kube-node-1 kubernetes.io/hostname=kube-node-1 Ready -``` - -And using the node name from above, run: - -``` -$ kubectl describe nodes kube-node-1 | grep Addresses -Addresses: 192.168.121.174 -``` - -Once the app's container images are pulled and pods are running, you'll be able to reach the guestbook: - -``` -curl 192.168.121.174:31288 - - - - - - - - Guestbook - -... -``` diff --git a/tests/units/cli/test_examples/helloapache/Dockerfile b/tests/units/cli/test_examples/helloapache/Dockerfile deleted file mode 100644 index 6c6ce3d6..00000000 --- a/tests/units/cli/test_examples/helloapache/Dockerfile +++ /dev/null @@ -1,9 +0,0 @@ -FROM projectatomic/atomicapp:0.1.10 - -MAINTAINER Aaron Weitekamp - -LABEL io.projectatomic.nulecule.providers="kubernetes,docker" \ - io.projectatomic.nulecule.specversion="0.0.2" - -ADD /Nulecule /Dockerfile README.md /application-entity/ -ADD /artifacts /application-entity/artifacts diff --git a/tests/units/cli/test_examples/helloapache/README.md b/tests/units/cli/test_examples/helloapache/README.md deleted file mode 100644 index abac3e68..00000000 --- a/tests/units/cli/test_examples/helloapache/README.md +++ /dev/null @@ -1,73 +0,0 @@ -# helloapache - -This is an atomic application based on the nulecule specification. Kubernetes and native docker are currently the only supported providers. You'll need to run this from a workstation that has the atomic command. If you wish to use the kubernetes provider, you will also need a kubectl client that can connect to a kubernetes master. - -It's a single container application based on the centos/httpd image, but you can use your own. - -## Option 1: Non-interactive defaults - -Run the image. It will automatically use kubernetes as the orchestration provider. -``` -[sudo] atomic run projectatomic/helloapache -``` - -Note: This option is not interactive because all params in the Nulecule file have default values. - -## Option 2: Unattended - -1. Create the file `answers.conf` with these contents: - - This sets up the values for the two configurable parameters (image and hostport) and indicates that kubernetes should be the orchestration provider. - - [general] - provider = kubernetes - - [helloapache-app] - image = centos/httpd # optional: choose a different image - hostport = 80 # optional: choose a different port to expose -1. Run the application from the current working directory - - $ [sudo] atomic run projectatomic/helloapache - ... - helloapache - - -1. As an additional experiment, remove the kubernetes pod and change the provider to 'docker' and re-run the application to see it get deployed on base docker. - -## Option 3: Install and Run - -You may want to download the application, review the configuraton and parameters as specified in the Nulecule file, and edit the answerfile before running the application. - -1. Download the application files using `atomic install` - - [sudo] atomic install projectatomic/helloapache - -1. Rename `answers.conf.sample` - - mv answers.conf.sample answers.conf - -1. Edit `answers.conf`, review files if desired and then run - - $ [sudo] atomic run projectatomic/helloapache - ... - helloapache - -## Test -Any of these approaches should create a kubernetes pod or a running docker container. - -With a kubernetes pod, once its state is "Running" curl the minion it's running on. - -``` -$ kubectl get pod helloapache -POD IP CONTAINER(S) IMAGE(S) HOST LABELS STATUS -helloapache 172.17.0.8 helloapache centos/httpd 10.3.9.216/ name=helloapache Running -$ curl 10.3.9.216 - -``` - -If you test the docker provider, once the container is running, curl the port on your localhost. - -``` -$ curl localhost - -``` diff --git a/tests/units/cli/test_examples/kubernetes-atomicapp/Dockerfile b/tests/units/cli/test_examples/kubernetes-atomicapp/Dockerfile deleted file mode 100644 index 4ed549c3..00000000 --- a/tests/units/cli/test_examples/kubernetes-atomicapp/Dockerfile +++ /dev/null @@ -1,9 +0,0 @@ -FROM projectatomic/atomicapp:0.1.3 - -MAINTAINER Jason Brooks - -LABEL io.projectatomic.nulecule.specversion 0.0.2 -LABEL io.projectatomic.nulecule.providers = "docker" - -ADD /Nulecule /Dockerfile /answers.conf /application-entity/ -ADD /artifacts /application-entity/artifacts diff --git a/tests/units/cli/test_examples/oneprovider-helloapache/Dockerfile b/tests/units/cli/test_examples/oneprovider-helloapache/Dockerfile deleted file mode 100644 index 6c6ce3d6..00000000 --- a/tests/units/cli/test_examples/oneprovider-helloapache/Dockerfile +++ /dev/null @@ -1,9 +0,0 @@ -FROM projectatomic/atomicapp:0.1.10 - -MAINTAINER Aaron Weitekamp - -LABEL io.projectatomic.nulecule.providers="kubernetes,docker" \ - io.projectatomic.nulecule.specversion="0.0.2" - -ADD /Nulecule /Dockerfile README.md /application-entity/ -ADD /artifacts /application-entity/artifacts diff --git a/tests/units/cli/test_examples/wordpress-centos7-atomicapp/Dockerfile b/tests/units/cli/test_examples/wordpress-centos7-atomicapp/Dockerfile deleted file mode 100644 index 11c7dda8..00000000 --- a/tests/units/cli/test_examples/wordpress-centos7-atomicapp/Dockerfile +++ /dev/null @@ -1,11 +0,0 @@ -FROM projectatomic/atomicapp:0.1.10 - -MAINTAINER Christoph Görn - -LABEL io.projectatomic.nulecule.specversion 0.0.1-alpha - -LABEL Build docker build --rm --tag goern/wordpress-centos7-atomicapp . - - -ADD /Nulecule /Dockerfile README.asciidoc gpl-3.0.txt /application-entity/ -ADD /artifacts /application-entity/artifacts diff --git a/tests/units/cli/test_examples/wordpress-centos7-atomicapp/README.asciidoc b/tests/units/cli/test_examples/wordpress-centos7-atomicapp/README.asciidoc deleted file mode 100644 index 0a912f72..00000000 --- a/tests/units/cli/test_examples/wordpress-centos7-atomicapp/README.asciidoc +++ /dev/null @@ -1,12 +0,0 @@ -= Wordpress Atomic App - -This is a Wordpress Atomic App, it will use reuse MySQL and SkyDNS to -provide a Kubernetes based Wordpress to you. - -== Usage - -`atomic run goern/wordpress-centos7-atomicapp` - -= Version - -This is version 1.1.0 of Wordpress Atomic App. diff --git a/tests/units/cli/test_examples/wordpress-centos7-atomicapp/gpl-3.0.txt b/tests/units/cli/test_examples/wordpress-centos7-atomicapp/gpl-3.0.txt deleted file mode 100644 index 94a9ed02..00000000 --- a/tests/units/cli/test_examples/wordpress-centos7-atomicapp/gpl-3.0.txt +++ /dev/null @@ -1,674 +0,0 @@ - GNU GENERAL PUBLIC LICENSE - Version 3, 29 June 2007 - - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - Preamble - - The GNU General Public License is a free, copyleft license for -software and other kinds of works. - - The licenses for most software and other practical works are designed -to take away your freedom to share and change the works. By contrast, -the GNU General Public License is intended to guarantee your freedom to -share and change all versions of a program--to make sure it remains free -software for all its users. We, the Free Software Foundation, use the -GNU General Public License for most of our software; it applies also to -any other work released this way by its authors. You can apply it to -your programs, too. - - When we speak of free software, we are referring to freedom, not -price. Our General Public Licenses are designed to make sure that you -have the freedom to distribute copies of free software (and charge for -them if you wish), that you receive source code or can get it if you -want it, that you can change the software or use pieces of it in new -free programs, and that you know you can do these things. - - To protect your rights, we need to prevent others from denying you -these rights or asking you to surrender the rights. Therefore, you have -certain responsibilities if you distribute copies of the software, or if -you modify it: responsibilities to respect the freedom of others. - - For example, if you distribute copies of such a program, whether -gratis or for a fee, you must pass on to the recipients the same -freedoms that you received. You must make sure that they, too, receive -or can get the source code. And you must show them these terms so they -know their rights. - - Developers that use the GNU GPL protect your rights with two steps: -(1) assert copyright on the software, and (2) offer you this License -giving you legal permission to copy, distribute and/or modify it. - - For the developers' and authors' protection, the GPL clearly explains -that there is no warranty for this free software. For both users' and -authors' sake, the GPL requires that modified versions be marked as -changed, so that their problems will not be attributed erroneously to -authors of previous versions. - - Some devices are designed to deny users access to install or run -modified versions of the software inside them, although the manufacturer -can do so. This is fundamentally incompatible with the aim of -protecting users' freedom to change the software. The systematic -pattern of such abuse occurs in the area of products for individuals to -use, which is precisely where it is most unacceptable. Therefore, we -have designed this version of the GPL to prohibit the practice for those -products. If such problems arise substantially in other domains, we -stand ready to extend this provision to those domains in future versions -of the GPL, as needed to protect the freedom of users. - - Finally, every program is threatened constantly by software patents. -States should not allow patents to restrict development and use of -software on general-purpose computers, but in those that do, we wish to -avoid the special danger that patents applied to a free program could -make it effectively proprietary. To prevent this, the GPL assures that -patents cannot be used to render the program non-free. - - The precise terms and conditions for copying, distribution and -modification follow. - - TERMS AND CONDITIONS - - 0. Definitions. - - "This License" refers to version 3 of the GNU General Public License. - - "Copyright" also means copyright-like laws that apply to other kinds of -works, such as semiconductor masks. - - "The Program" refers to any copyrightable work licensed under this -License. Each licensee is addressed as "you". "Licensees" and -"recipients" may be individuals or organizations. - - To "modify" a work means to copy from or adapt all or part of the work -in a fashion requiring copyright permission, other than the making of an -exact copy. The resulting work is called a "modified version" of the -earlier work or a work "based on" the earlier work. - - A "covered work" means either the unmodified Program or a work based -on the Program. - - To "propagate" a work means to do anything with it that, without -permission, would make you directly or secondarily liable for -infringement under applicable copyright law, except executing it on a -computer or modifying a private copy. Propagation includes copying, -distribution (with or without modification), making available to the -public, and in some countries other activities as well. - - To "convey" a work means any kind of propagation that enables other -parties to make or receive copies. Mere interaction with a user through -a computer network, with no transfer of a copy, is not conveying. - - An interactive user interface displays "Appropriate Legal Notices" -to the extent that it includes a convenient and prominently visible -feature that (1) displays an appropriate copyright notice, and (2) -tells the user that there is no warranty for the work (except to the -extent that warranties are provided), that licensees may convey the -work under this License, and how to view a copy of this License. If -the interface presents a list of user commands or options, such as a -menu, a prominent item in the list meets this criterion. - - 1. Source Code. - - The "source code" for a work means the preferred form of the work -for making modifications to it. "Object code" means any non-source -form of a work. - - A "Standard Interface" means an interface that either is an official -standard defined by a recognized standards body, or, in the case of -interfaces specified for a particular programming language, one that -is widely used among developers working in that language. - - The "System Libraries" of an executable work include anything, other -than the work as a whole, that (a) is included in the normal form of -packaging a Major Component, but which is not part of that Major -Component, and (b) serves only to enable use of the work with that -Major Component, or to implement a Standard Interface for which an -implementation is available to the public in source code form. A -"Major Component", in this context, means a major essential component -(kernel, window system, and so on) of the specific operating system -(if any) on which the executable work runs, or a compiler used to -produce the work, or an object code interpreter used to run it. - - The "Corresponding Source" for a work in object code form means all -the source code needed to generate, install, and (for an executable -work) run the object code and to modify the work, including scripts to -control those activities. However, it does not include the work's -System Libraries, or general-purpose tools or generally available free -programs which are used unmodified in performing those activities but -which are not part of the work. For example, Corresponding Source -includes interface definition files associated with source files for -the work, and the source code for shared libraries and dynamically -linked subprograms that the work is specifically designed to require, -such as by intimate data communication or control flow between those -subprograms and other parts of the work. - - The Corresponding Source need not include anything that users -can regenerate automatically from other parts of the Corresponding -Source. - - The Corresponding Source for a work in source code form is that -same work. - - 2. Basic Permissions. - - All rights granted under this License are granted for the term of -copyright on the Program, and are irrevocable provided the stated -conditions are met. This License explicitly affirms your unlimited -permission to run the unmodified Program. The output from running a -covered work is covered by this License only if the output, given its -content, constitutes a covered work. This License acknowledges your -rights of fair use or other equivalent, as provided by copyright law. - - You may make, run and propagate covered works that you do not -convey, without conditions so long as your license otherwise remains -in force. You may convey covered works to others for the sole purpose -of having them make modifications exclusively for you, or provide you -with facilities for running those works, provided that you comply with -the terms of this License in conveying all material for which you do -not control copyright. Those thus making or running the covered works -for you must do so exclusively on your behalf, under your direction -and control, on terms that prohibit them from making any copies of -your copyrighted material outside their relationship with you. - - Conveying under any other circumstances is permitted solely under -the conditions stated below. Sublicensing is not allowed; section 10 -makes it unnecessary. - - 3. Protecting Users' Legal Rights From Anti-Circumvention Law. - - No covered work shall be deemed part of an effective technological -measure under any applicable law fulfilling obligations under article -11 of the WIPO copyright treaty adopted on 20 December 1996, or -similar laws prohibiting or restricting circumvention of such -measures. - - When you convey a covered work, you waive any legal power to forbid -circumvention of technological measures to the extent such circumvention -is effected by exercising rights under this License with respect to -the covered work, and you disclaim any intention to limit operation or -modification of the work as a means of enforcing, against the work's -users, your or third parties' legal rights to forbid circumvention of -technological measures. - - 4. Conveying Verbatim Copies. - - You may convey verbatim copies of the Program's source code as you -receive it, in any medium, provided that you conspicuously and -appropriately publish on each copy an appropriate copyright notice; -keep intact all notices stating that this License and any -non-permissive terms added in accord with section 7 apply to the code; -keep intact all notices of the absence of any warranty; and give all -recipients a copy of this License along with the Program. - - You may charge any price or no price for each copy that you convey, -and you may offer support or warranty protection for a fee. - - 5. Conveying Modified Source Versions. - - You may convey a work based on the Program, or the modifications to -produce it from the Program, in the form of source code under the -terms of section 4, provided that you also meet all of these conditions: - - a) The work must carry prominent notices stating that you modified - it, and giving a relevant date. - - b) The work must carry prominent notices stating that it is - released under this License and any conditions added under section - 7. This requirement modifies the requirement in section 4 to - "keep intact all notices". - - c) You must license the entire work, as a whole, under this - License to anyone who comes into possession of a copy. This - License will therefore apply, along with any applicable section 7 - additional terms, to the whole of the work, and all its parts, - regardless of how they are packaged. This License gives no - permission to license the work in any other way, but it does not - invalidate such permission if you have separately received it. - - d) If the work has interactive user interfaces, each must display - Appropriate Legal Notices; however, if the Program has interactive - interfaces that do not display Appropriate Legal Notices, your - work need not make them do so. - - A compilation of a covered work with other separate and independent -works, which are not by their nature extensions of the covered work, -and which are not combined with it such as to form a larger program, -in or on a volume of a storage or distribution medium, is called an -"aggregate" if the compilation and its resulting copyright are not -used to limit the access or legal rights of the compilation's users -beyond what the individual works permit. Inclusion of a covered work -in an aggregate does not cause this License to apply to the other -parts of the aggregate. - - 6. Conveying Non-Source Forms. - - You may convey a covered work in object code form under the terms -of sections 4 and 5, provided that you also convey the -machine-readable Corresponding Source under the terms of this License, -in one of these ways: - - a) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by the - Corresponding Source fixed on a durable physical medium - customarily used for software interchange. - - b) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by a - written offer, valid for at least three years and valid for as - long as you offer spare parts or customer support for that product - model, to give anyone who possesses the object code either (1) a - copy of the Corresponding Source for all the software in the - product that is covered by this License, on a durable physical - medium customarily used for software interchange, for a price no - more than your reasonable cost of physically performing this - conveying of source, or (2) access to copy the - Corresponding Source from a network server at no charge. - - c) Convey individual copies of the object code with a copy of the - written offer to provide the Corresponding Source. This - alternative is allowed only occasionally and noncommercially, and - only if you received the object code with such an offer, in accord - with subsection 6b. - - d) Convey the object code by offering access from a designated - place (gratis or for a charge), and offer equivalent access to the - Corresponding Source in the same way through the same place at no - further charge. You need not require recipients to copy the - Corresponding Source along with the object code. If the place to - copy the object code is a network server, the Corresponding Source - may be on a different server (operated by you or a third party) - that supports equivalent copying facilities, provided you maintain - clear directions next to the object code saying where to find the - Corresponding Source. Regardless of what server hosts the - Corresponding Source, you remain obligated to ensure that it is - available for as long as needed to satisfy these requirements. - - e) Convey the object code using peer-to-peer transmission, provided - you inform other peers where the object code and Corresponding - Source of the work are being offered to the general public at no - charge under subsection 6d. - - A separable portion of the object code, whose source code is excluded -from the Corresponding Source as a System Library, need not be -included in conveying the object code work. - - A "User Product" is either (1) a "consumer product", which means any -tangible personal property which is normally used for personal, family, -or household purposes, or (2) anything designed or sold for incorporation -into a dwelling. In determining whether a product is a consumer product, -doubtful cases shall be resolved in favor of coverage. For a particular -product received by a particular user, "normally used" refers to a -typical or common use of that class of product, regardless of the status -of the particular user or of the way in which the particular user -actually uses, or expects or is expected to use, the product. A product -is a consumer product regardless of whether the product has substantial -commercial, industrial or non-consumer uses, unless such uses represent -the only significant mode of use of the product. - - "Installation Information" for a User Product means any methods, -procedures, authorization keys, or other information required to install -and execute modified versions of a covered work in that User Product from -a modified version of its Corresponding Source. The information must -suffice to ensure that the continued functioning of the modified object -code is in no case prevented or interfered with solely because -modification has been made. - - If you convey an object code work under this section in, or with, or -specifically for use in, a User Product, and the conveying occurs as -part of a transaction in which the right of possession and use of the -User Product is transferred to the recipient in perpetuity or for a -fixed term (regardless of how the transaction is characterized), the -Corresponding Source conveyed under this section must be accompanied -by the Installation Information. But this requirement does not apply -if neither you nor any third party retains the ability to install -modified object code on the User Product (for example, the work has -been installed in ROM). - - The requirement to provide Installation Information does not include a -requirement to continue to provide support service, warranty, or updates -for a work that has been modified or installed by the recipient, or for -the User Product in which it has been modified or installed. Access to a -network may be denied when the modification itself materially and -adversely affects the operation of the network or violates the rules and -protocols for communication across the network. - - Corresponding Source conveyed, and Installation Information provided, -in accord with this section must be in a format that is publicly -documented (and with an implementation available to the public in -source code form), and must require no special password or key for -unpacking, reading or copying. - - 7. Additional Terms. - - "Additional permissions" are terms that supplement the terms of this -License by making exceptions from one or more of its conditions. -Additional permissions that are applicable to the entire Program shall -be treated as though they were included in this License, to the extent -that they are valid under applicable law. If additional permissions -apply only to part of the Program, that part may be used separately -under those permissions, but the entire Program remains governed by -this License without regard to the additional permissions. - - When you convey a copy of a covered work, you may at your option -remove any additional permissions from that copy, or from any part of -it. (Additional permissions may be written to require their own -removal in certain cases when you modify the work.) You may place -additional permissions on material, added by you to a covered work, -for which you have or can give appropriate copyright permission. - - Notwithstanding any other provision of this License, for material you -add to a covered work, you may (if authorized by the copyright holders of -that material) supplement the terms of this License with terms: - - a) Disclaiming warranty or limiting liability differently from the - terms of sections 15 and 16 of this License; or - - b) Requiring preservation of specified reasonable legal notices or - author attributions in that material or in the Appropriate Legal - Notices displayed by works containing it; or - - c) Prohibiting misrepresentation of the origin of that material, or - requiring that modified versions of such material be marked in - reasonable ways as different from the original version; or - - d) Limiting the use for publicity purposes of names of licensors or - authors of the material; or - - e) Declining to grant rights under trademark law for use of some - trade names, trademarks, or service marks; or - - f) Requiring indemnification of licensors and authors of that - material by anyone who conveys the material (or modified versions of - it) with contractual assumptions of liability to the recipient, for - any liability that these contractual assumptions directly impose on - those licensors and authors. - - All other non-permissive additional terms are considered "further -restrictions" within the meaning of section 10. If the Program as you -received it, or any part of it, contains a notice stating that it is -governed by this License along with a term that is a further -restriction, you may remove that term. If a license document contains -a further restriction but permits relicensing or conveying under this -License, you may add to a covered work material governed by the terms -of that license document, provided that the further restriction does -not survive such relicensing or conveying. - - If you add terms to a covered work in accord with this section, you -must place, in the relevant source files, a statement of the -additional terms that apply to those files, or a notice indicating -where to find the applicable terms. - - Additional terms, permissive or non-permissive, may be stated in the -form of a separately written license, or stated as exceptions; -the above requirements apply either way. - - 8. Termination. - - You may not propagate or modify a covered work except as expressly -provided under this License. Any attempt otherwise to propagate or -modify it is void, and will automatically terminate your rights under -this License (including any patent licenses granted under the third -paragraph of section 11). - - However, if you cease all violation of this License, then your -license from a particular copyright holder is reinstated (a) -provisionally, unless and until the copyright holder explicitly and -finally terminates your license, and (b) permanently, if the copyright -holder fails to notify you of the violation by some reasonable means -prior to 60 days after the cessation. - - Moreover, your license from a particular copyright holder is -reinstated permanently if the copyright holder notifies you of the -violation by some reasonable means, this is the first time you have -received notice of violation of this License (for any work) from that -copyright holder, and you cure the violation prior to 30 days after -your receipt of the notice. - - Termination of your rights under this section does not terminate the -licenses of parties who have received copies or rights from you under -this License. If your rights have been terminated and not permanently -reinstated, you do not qualify to receive new licenses for the same -material under section 10. - - 9. Acceptance Not Required for Having Copies. - - You are not required to accept this License in order to receive or -run a copy of the Program. Ancillary propagation of a covered work -occurring solely as a consequence of using peer-to-peer transmission -to receive a copy likewise does not require acceptance. However, -nothing other than this License grants you permission to propagate or -modify any covered work. These actions infringe copyright if you do -not accept this License. Therefore, by modifying or propagating a -covered work, you indicate your acceptance of this License to do so. - - 10. Automatic Licensing of Downstream Recipients. - - Each time you convey a covered work, the recipient automatically -receives a license from the original licensors, to run, modify and -propagate that work, subject to this License. You are not responsible -for enforcing compliance by third parties with this License. - - An "entity transaction" is a transaction transferring control of an -organization, or substantially all assets of one, or subdividing an -organization, or merging organizations. If propagation of a covered -work results from an entity transaction, each party to that -transaction who receives a copy of the work also receives whatever -licenses to the work the party's predecessor in interest had or could -give under the previous paragraph, plus a right to possession of the -Corresponding Source of the work from the predecessor in interest, if -the predecessor has it or can get it with reasonable efforts. - - You may not impose any further restrictions on the exercise of the -rights granted or affirmed under this License. For example, you may -not impose a license fee, royalty, or other charge for exercise of -rights granted under this License, and you may not initiate litigation -(including a cross-claim or counterclaim in a lawsuit) alleging that -any patent claim is infringed by making, using, selling, offering for -sale, or importing the Program or any portion of it. - - 11. Patents. - - A "contributor" is a copyright holder who authorizes use under this -License of the Program or a work on which the Program is based. The -work thus licensed is called the contributor's "contributor version". - - A contributor's "essential patent claims" are all patent claims -owned or controlled by the contributor, whether already acquired or -hereafter acquired, that would be infringed by some manner, permitted -by this License, of making, using, or selling its contributor version, -but do not include claims that would be infringed only as a -consequence of further modification of the contributor version. For -purposes of this definition, "control" includes the right to grant -patent sublicenses in a manner consistent with the requirements of -this License. - - Each contributor grants you a non-exclusive, worldwide, royalty-free -patent license under the contributor's essential patent claims, to -make, use, sell, offer for sale, import and otherwise run, modify and -propagate the contents of its contributor version. - - In the following three paragraphs, a "patent license" is any express -agreement or commitment, however denominated, not to enforce a patent -(such as an express permission to practice a patent or covenant not to -sue for patent infringement). To "grant" such a patent license to a -party means to make such an agreement or commitment not to enforce a -patent against the party. - - If you convey a covered work, knowingly relying on a patent license, -and the Corresponding Source of the work is not available for anyone -to copy, free of charge and under the terms of this License, through a -publicly available network server or other readily accessible means, -then you must either (1) cause the Corresponding Source to be so -available, or (2) arrange to deprive yourself of the benefit of the -patent license for this particular work, or (3) arrange, in a manner -consistent with the requirements of this License, to extend the patent -license to downstream recipients. "Knowingly relying" means you have -actual knowledge that, but for the patent license, your conveying the -covered work in a country, or your recipient's use of the covered work -in a country, would infringe one or more identifiable patents in that -country that you have reason to believe are valid. - - If, pursuant to or in connection with a single transaction or -arrangement, you convey, or propagate by procuring conveyance of, a -covered work, and grant a patent license to some of the parties -receiving the covered work authorizing them to use, propagate, modify -or convey a specific copy of the covered work, then the patent license -you grant is automatically extended to all recipients of the covered -work and works based on it. - - A patent license is "discriminatory" if it does not include within -the scope of its coverage, prohibits the exercise of, or is -conditioned on the non-exercise of one or more of the rights that are -specifically granted under this License. You may not convey a covered -work if you are a party to an arrangement with a third party that is -in the business of distributing software, under which you make payment -to the third party based on the extent of your activity of conveying -the work, and under which the third party grants, to any of the -parties who would receive the covered work from you, a discriminatory -patent license (a) in connection with copies of the covered work -conveyed by you (or copies made from those copies), or (b) primarily -for and in connection with specific products or compilations that -contain the covered work, unless you entered into that arrangement, -or that patent license was granted, prior to 28 March 2007. - - Nothing in this License shall be construed as excluding or limiting -any implied license or other defenses to infringement that may -otherwise be available to you under applicable patent law. - - 12. No Surrender of Others' Freedom. - - If conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot convey a -covered work so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you may -not convey it at all. For example, if you agree to terms that obligate you -to collect a royalty for further conveying from those to whom you convey -the Program, the only way you could satisfy both those terms and this -License would be to refrain entirely from conveying the Program. - - 13. Use with the GNU Affero General Public License. - - Notwithstanding any other provision of this License, you have -permission to link or combine any covered work with a work licensed -under version 3 of the GNU Affero General Public License into a single -combined work, and to convey the resulting work. The terms of this -License will continue to apply to the part which is the covered work, -but the special requirements of the GNU Affero General Public License, -section 13, concerning interaction through a network will apply to the -combination as such. - - 14. Revised Versions of this License. - - The Free Software Foundation may publish revised and/or new versions of -the GNU General Public License from time to time. Such new versions will -be similar in spirit to the present version, but may differ in detail to -address new problems or concerns. - - Each version is given a distinguishing version number. If the -Program specifies that a certain numbered version of the GNU General -Public License "or any later version" applies to it, you have the -option of following the terms and conditions either of that numbered -version or of any later version published by the Free Software -Foundation. If the Program does not specify a version number of the -GNU General Public License, you may choose any version ever published -by the Free Software Foundation. - - If the Program specifies that a proxy can decide which future -versions of the GNU General Public License can be used, that proxy's -public statement of acceptance of a version permanently authorizes you -to choose that version for the Program. - - Later license versions may give you additional or different -permissions. However, no additional obligations are imposed on any -author or copyright holder as a result of your choosing to follow a -later version. - - 15. Disclaimer of Warranty. - - THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY -APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT -HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY -OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, -THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM -IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF -ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - - 16. Limitation of Liability. - - IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS -THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY -GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE -USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF -DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD -PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), -EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF -SUCH DAMAGES. - - 17. Interpretation of Sections 15 and 16. - - If the disclaimer of warranty and limitation of liability provided -above cannot be given local legal effect according to their terms, -reviewing courts shall apply local law that most closely approximates -an absolute waiver of all civil liability in connection with the -Program, unless a warranty or assumption of liability accompanies a -copy of the Program in return for a fee. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Programs - - If you develop a new program, and you want it to be of the greatest -possible use to the public, the best way to achieve this is to make it -free software which everyone can redistribute and change under these terms. - - To do so, attach the following notices to the program. It is safest -to attach them to the start of each source file to most effectively -state the exclusion of warranty; and each file should have at least -the "copyright" line and a pointer to where the full notice is found. - - - Copyright (C) - - This program is free software: you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program. If not, see . - -Also add information on how to contact you by electronic and paper mail. - - If the program does terminal interaction, make it output a short -notice like this when it starts in an interactive mode: - - Copyright (C) - This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. - This is free software, and you are welcome to redistribute it - under certain conditions; type `show c' for details. - -The hypothetical commands `show w' and `show c' should show the appropriate -parts of the General Public License. Of course, your program's commands -might be different; for a GUI interface, you would use an "about box". - - You should also get your employer (if you work as a programmer) or school, -if any, to sign a "copyright disclaimer" for the program, if necessary. -For more information on this, and how to apply and follow the GNU GPL, see -. - - The GNU General Public License does not permit incorporating your program -into proprietary programs. If your program is a subroutine library, you -may consider it more useful to permit linking proprietary applications with -the library. If this is what you want to do, use the GNU Lesser General -Public License instead of this License. But first, please read -. From 870c678d909c90516636205cf96277f7970d2ca7 Mon Sep 17 00:00:00 2001 From: Charlie Drage Date: Tue, 16 Feb 2016 10:06:48 -0500 Subject: [PATCH 045/193] Fix xpathing tests on missing files --- .../artifact_xpath_test/artifacts/docker/hello-apache-pod_run | 0 .../artifacts/kubernetes/hello-apache-pod.json | 0 tests/units/nulecule/test_xpathing.py | 4 ++-- 3 files changed, 2 insertions(+), 2 deletions(-) create mode 100644 tests/units/nulecule/artifact_xpath_test/artifacts/docker/hello-apache-pod_run create mode 100644 tests/units/nulecule/artifact_xpath_test/artifacts/kubernetes/hello-apache-pod.json diff --git a/tests/units/nulecule/artifact_xpath_test/artifacts/docker/hello-apache-pod_run b/tests/units/nulecule/artifact_xpath_test/artifacts/docker/hello-apache-pod_run new file mode 100644 index 00000000..e69de29b diff --git a/tests/units/nulecule/artifact_xpath_test/artifacts/kubernetes/hello-apache-pod.json b/tests/units/nulecule/artifact_xpath_test/artifacts/kubernetes/hello-apache-pod.json new file mode 100644 index 00000000..e69de29b diff --git a/tests/units/nulecule/test_xpathing.py b/tests/units/nulecule/test_xpathing.py index 5a4cbba3..e8ff5cee 100644 --- a/tests/units/nulecule/test_xpathing.py +++ b/tests/units/nulecule/test_xpathing.py @@ -35,10 +35,10 @@ class TestNuleculeXpathing(unittest.TestCase): # Create a temporary directory for our setup as well as load the required NuleculeComponent def setUp(self): - self.tmpdir = tempfile.mkdtemp(prefix = "atomicapp-test", dir = "/tmp") + self.example_dir = os.path.dirname(__file__) + '/artifact_xpath_test/' self.artifact_path = os.path.dirname(__file__) + '/artifact_xpath_test/xpath.json' self.artifact_content = open(self.artifact_path, 'r').read(); - self.test = NuleculeComponent(name = None, basepath = self.tmpdir, params = None) + self.test = NuleculeComponent(name = None, basepath = self.example_dir, params = None) def tearDown(self): pass From fa249d51bcd3164bed2df820fc77c2032a62e0a3 Mon Sep 17 00:00:00 2001 From: Dusty Mabe Date: Fri, 9 Oct 2015 15:58:19 -0400 Subject: [PATCH 046/193] Adds Atomicapp lifecycle definition. Closes #290 --- docs/atomicapp_lifecycle.md | 41 +++++++++++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) create mode 100644 docs/atomicapp_lifecycle.md diff --git a/docs/atomicapp_lifecycle.md b/docs/atomicapp_lifecycle.md new file mode 100644 index 00000000..3fef59b4 --- /dev/null +++ b/docs/atomicapp_lifecycle.md @@ -0,0 +1,41 @@ +Atomicapp Lifecycle Definition +============================== + +The Atomic App software allows for several actions to be applied to +specified applications. The four actions that exist today are briefly +described below. + +`genanswers` +------------ +Will download and combine artifacts from the target application in a +temporary directory and then take the generated sample answers.conf +file and populate it in the users working directory. The temporary +directory is then cleaned up. + +`fetch` +------- +Will download and combine artifacts from the target application and any +dependent applications including sample answers.conf file into a local +directory for inspection and/or modification. Same for all providers. + +`run` +----- +Run an application. + +| Provider | Implementation | +| ------------- | -------------- | +| Docker | Run application containers on local machine. | +| Kubernetes | Run requested application in kubernetes target environment. | +| Openshift | Run requested application in OpenShift target environment. | +| Marathon | Run requested application in Marathon target environment. | + +`stop` +------ +Stop an application. + +| Provider | Implementation | +| ------------- | -------------- | +| Docker | Stop application containers on local machine. | +| Kubernetes | Stop requested application in kubernetes target environment. | +| Openshift | Stop requested application in OpenShift target environment. | +| Marathon | Stop requested application in Marathon target environment. | From 439599a4da92ab56db426a2d16fa75de0de109de Mon Sep 17 00:00:00 2001 From: Ratnadeep Debnath Date: Tue, 16 Feb 2016 18:41:06 +0530 Subject: [PATCH 047/193] Update file handling doc. Fixes #285 --- docs/file_handling.md | 31 ++++++++++++++++++++++++++++--- 1 file changed, 28 insertions(+), 3 deletions(-) diff --git a/docs/file_handling.md b/docs/file_handling.md index 9ca38cb5..ec0e15e5 100644 --- a/docs/file_handling.md +++ b/docs/file_handling.md @@ -1,6 +1,11 @@ ## Fetch -Fetching an Atomic App means to download the artifacts and sample answerfile. +Fetching an Atomic App means to download the metadata files: artifacts and +sample answerfile for an Atomic App. By default, it downloads the metadata +files for the atomicapp to a directory of the form +``/var/lib/atomicapp/-``. If needed, +you can also specify a target directory to download the metadata for the +Atomic App using the ``--destination`` option. ## Developing and Debugging @@ -8,11 +13,31 @@ Image developers may run the root container and point to a Nulecule directory on ## Directories -* `/tmp/`: Host directory for temporary Nulecule files. May be overridden. -* `/tmp//.workdir`: Host directory for artifact template files with variable substitution. +* `/var/lib/atomicapp/-`: This is where an Atomic App + and it's dependencies are fetched when fetching or running the Atomic App, + unless, a specific destination is specified. +* `/var/lib/atomicapp/-/external`: + External Atomic Apps, if any, for the given Atomic App are + fetched into ``external`` directory inside the directory of + the Atomic App, during, fetching the Atomic App with + dependencies or running the Atomic App. ## Artifact path Local path to an artifact file or a directory containing artifact files as its immediate children. +## Runtime answers file + +When running an Atomic App, it asks the users for missing values for +parameters defined in the Atomic App and it's child Atomic Apps. This +aggregated answers data is used to run the Atomic App, and is dumped +to a file: ``answers.conf.gen`` in the Atomic App's directory, to be +used later when stopping the Atomic App. + +## Rendered artifact files + +Artifact files are rendered with runtime answers data along side the original +artifact files, but with the filenames prefixed with a `.` (dot), to make +them hidden. + From fa6cdde460010553046bf8533e88173792c69083 Mon Sep 17 00:00:00 2001 From: Dusty Mabe Date: Wed, 17 Feb 2016 15:28:57 -0500 Subject: [PATCH 048/193] nulecule: error if no artifacts in spec for inherited provider If the Nulecule file for an app specifies a provider to inherit from but the specfile doesn't have any entry for that provider, then error. Fixes #435 --- atomicapp/nulecule/base.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/atomicapp/nulecule/base.py b/atomicapp/nulecule/base.py index 3ec58194..f7efca7b 100644 --- a/atomicapp/nulecule/base.py +++ b/atomicapp/nulecule/base.py @@ -400,6 +400,13 @@ def get_artifact_paths_for_provider(self, provider_key): """ artifact_paths = [] artifacts = self.artifacts.get(provider_key) + + # If there are no artifacts for the requested provider then error + # This can happen for incorrectly named inherited provider (#435) + if artifacts is None: + raise NuleculeException( + "No artifacts for provider {}".format(provider_key)) + for artifact in artifacts: # Convert dict if the Nulecule file references "resource" if isinstance(artifact, dict) and artifact.get(RESOURCE_KEY): From 8e977a5a50c7fb7beafbe15739ebd8064d9356ce Mon Sep 17 00:00:00 2001 From: Charlie Drage Date: Thu, 18 Feb 2016 16:56:50 -0500 Subject: [PATCH 049/193] 0.4.2 Release --- CHANGELOG.md | 45 +++++++++++++++++++++++++++++++ Dockerfile | 2 +- Dockerfiles.git/Dockerfile.centos | 2 +- Dockerfiles.git/Dockerfile.debian | 2 +- Dockerfiles.git/Dockerfile.fedora | 2 +- README.md | 2 +- atomicapp/constants.py | 2 +- setup.py | 2 +- 8 files changed, 52 insertions(+), 7 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b778d1a3..d6a6ec83 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,48 @@ +## Atomic App 0.4.2 (02-18-2016) + +As we start to get closer to a 1.0.0 release, we continue to focus on tests and user interaction. This weeks release focus on both as well as a minor feature. + +The main features of this release are: + - Meta data is now an optional requirement when building + +UI: + - Relative path support for provider ocnfig data + - Raise on missing artifact or docker image failure + +Bug fixes: + - Label fixes against `atomic` cli master branch. We now pass our current working directory as a variable + +Misc: + - Remove uneeded test suite files (Dockerfiles, licenses, etc.) + - All references to `install` have now been removed within the code in favour of `fetch` + + +``` +Charlie Drage : + Fail on missing artifacts within Nulecule file + Add tests for failure of finding Nulecule artifacts + Remove instances of install verb to fetch + Remove unneeded files in test examples dirs + Fix xpathing tests on missing files + +Dusty Mabe : + providerconfig: support someone specifying a relative path + cli: Print helpful error if no app_spec provided. + Do not use artifacts dir to select provider. + tests: update cli test + labels: update run labels to no longer use backticks + labels: no longer default to verbose output + Add Tomas to MAINTAINERS + Adds Atomicapp lifecycle definition. Closes #290 + nulecule: error if no artifacts in spec for inherited provider + +Ratnadeep Debnath : + Update file handling doc. Fixes #285 + +Suraj Deshmukh : + This makes `metadata` an optional argument +``` + ## Atomic App 0.4.1 (02-02-2016) 0.4.1 is a minor bug fix release. diff --git a/Dockerfile b/Dockerfile index 195c4d1c..43714e23 100644 --- a/Dockerfile +++ b/Dockerfile @@ -2,7 +2,7 @@ FROM centos:7 MAINTAINER Red Hat, Inc. -ENV ATOMICAPPVERSION="0.4.1" +ENV ATOMICAPPVERSION="0.4.2" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ io.openshift.generate.job=true \ diff --git a/Dockerfiles.git/Dockerfile.centos b/Dockerfiles.git/Dockerfile.centos index 195c4d1c..43714e23 100644 --- a/Dockerfiles.git/Dockerfile.centos +++ b/Dockerfiles.git/Dockerfile.centos @@ -2,7 +2,7 @@ FROM centos:7 MAINTAINER Red Hat, Inc. -ENV ATOMICAPPVERSION="0.4.1" +ENV ATOMICAPPVERSION="0.4.2" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ io.openshift.generate.job=true \ diff --git a/Dockerfiles.git/Dockerfile.debian b/Dockerfiles.git/Dockerfile.debian index b1f527e5..2d159f1a 100644 --- a/Dockerfiles.git/Dockerfile.debian +++ b/Dockerfiles.git/Dockerfile.debian @@ -2,7 +2,7 @@ FROM debian:jessie MAINTAINER Red Hat, Inc. -ENV ATOMICAPPVERSION="0.4.1" +ENV ATOMICAPPVERSION="0.4.2" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ RUN="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} \${OPT2} run \${OPT3}" \ diff --git a/Dockerfiles.git/Dockerfile.fedora b/Dockerfiles.git/Dockerfile.fedora index 250307de..cd49acc0 100644 --- a/Dockerfiles.git/Dockerfile.fedora +++ b/Dockerfiles.git/Dockerfile.fedora @@ -2,7 +2,7 @@ FROM fedora:23 MAINTAINER Red Hat, Inc. -ENV ATOMICAPPVERSION="0.4.1" +ENV ATOMICAPPVERSION="0.4.2" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ io.openshift.generate.job=true \ diff --git a/README.md b/README.md index befa4bdb..a9ccc516 100644 --- a/README.md +++ b/README.md @@ -7,7 +7,7 @@ Atomic App is a reference implementation of the [Nulecule Specification](http:// Atomic App is packaged as a container. End-users typically do not install the software from source. Instead use the atomicapp container as the `FROM` line in a Dockerfile and package your application on top. For example: ``` -FROM projectatomic/atomicapp:0.4.1 +FROM projectatomic/atomicapp:0.4.2 MAINTAINER Your Name diff --git a/atomicapp/constants.py b/atomicapp/constants.py index e4f4af71..04c61806 100644 --- a/atomicapp/constants.py +++ b/atomicapp/constants.py @@ -23,7 +23,7 @@ 2) LABEL io.projectatomic.nulecule.specversion in app Dockefile """ -__ATOMICAPPVERSION__ = '0.4.1' +__ATOMICAPPVERSION__ = '0.4.2' __NULECULESPECVERSION__ = '0.0.2' EXTERNAL_APP_DIR = "external" diff --git a/setup.py b/setup.py index 24f6fc9d..09ea0bb1 100644 --- a/setup.py +++ b/setup.py @@ -41,7 +41,7 @@ def _install_requirements(): setup( name='atomicapp', - version='0.4.1', + version='0.4.2', description='A tool to install and run Nulecule apps', author='Red Hat, Inc.', author_email='container-tools@redhat.com', From 4ccb1d5fed42b3edf7eaa7af6be3ee572c2bd374 Mon Sep 17 00:00:00 2001 From: Dusty Mabe Date: Thu, 18 Feb 2016 22:57:13 -0500 Subject: [PATCH 050/193] logging: Add in Atomic App Logging class Adds colorized output as well as a new command line option for choosing between different logging choices. Also adds support for a 'cockpit' logging mode, which just prints out select logging messages for the cockpit UI. Fixes #134 --- atomicapp/__init__.py | 41 -------- atomicapp/applogging.py | 152 ++++++++++++++++++++++++++++++ atomicapp/cli/main.py | 39 +++++--- atomicapp/constants.py | 4 + atomicapp/nulecule/base.py | 3 +- atomicapp/nulecule/container.py | 3 +- atomicapp/nulecule/main.py | 3 +- atomicapp/plugin.py | 6 +- atomicapp/providers/docker.py | 6 +- atomicapp/providers/kubernetes.py | 3 +- atomicapp/providers/marathon.py | 3 +- atomicapp/providers/openshift.py | 3 +- atomicapp/requirements.py | 5 +- atomicapp/utils.py | 3 +- 14 files changed, 205 insertions(+), 69 deletions(-) create mode 100644 atomicapp/applogging.py diff --git a/atomicapp/__init__.py b/atomicapp/__init__.py index 24dd79f8..e69de29b 100644 --- a/atomicapp/__init__.py +++ b/atomicapp/__init__.py @@ -1,41 +0,0 @@ -""" - Copyright 2015 Red Hat, Inc. - - This file is part of Atomic App. - - Atomic App is free software: you can redistribute it and/or modify - it under the terms of the GNU Lesser General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - Atomic App is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public License - along with Atomic App. If not, see . -""" - -import logging - - -def set_logging(name="atomicapp", level=logging.DEBUG): - # create logger - logger = logging.getLogger() - logger.handlers = [] - logger.setLevel(level) - - # create console handler - ch = logging.StreamHandler() - - # create formatter - formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') - - # add formatter to ch - ch.setFormatter(formatter) - - # add ch to logger - logger.addHandler(ch) - -set_logging(level=logging.DEBUG) # override this however you want diff --git a/atomicapp/applogging.py b/atomicapp/applogging.py new file mode 100644 index 00000000..ad5030a6 --- /dev/null +++ b/atomicapp/applogging.py @@ -0,0 +1,152 @@ +""" + Copyright 2015 Red Hat, Inc. + + This file is part of Atomic App. + + Atomic App is free software: you can redistribute it and/or modify + it under the terms of the GNU Lesser General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + Atomic App is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with Atomic App. If not, see . +""" + +import sys +import logging + +from atomicapp.constants import (LOGGER_COCKPIT, + LOGGER_DEFAULT) + + +class colorizeOutputFormatter(logging.Formatter): + """ + A class to colorize the log msgs based on log level + """ + + def format(self, record): + # Call the parent class to do formatting. + msg = super(colorizeOutputFormatter, self).format(record) + + # Now post process and colorize if needed + if record.levelno == logging.DEBUG: + msg = self._colorize(msg, 'cyan') + elif record.levelno == logging.WARNING: + msg = self._colorize(msg, 'yellow') + elif record.levelno == logging.INFO: + msg = self._colorize(msg, 'white') + elif record.levelno == logging.ERROR: + msg = self._colorize(msg, 'red') + else: + raise Exception("Invalid logging level {}".format(record.levelno)) + return self._make_unicode(msg) + + def _colorize(self, text, color): + """ + Colorize based upon the color codes indicated. + """ + # Console color codes + colorCodes = { + 'white': '0', 'bright white': '1;37', + 'blue': '0;34', 'bright blue': '1;34', + 'green': '0;32', 'bright green': '1;32', + 'cyan': '0;36', 'bright cyan': '1;36', + 'red': '0;31', 'bright red': '1;31', + 'purple': '0;35', 'bright purple': '1;35', + 'yellow': '0;33', 'bright yellow': '1;33', + } + return "\033[" + colorCodes[color] + "m" + text + "\033[0m" + + def _make_unicode(self, input): + """ + Convert all input to utf-8 for multi language support + """ + if type(input) != unicode: + input = input.decode('utf-8') + return input + + +class Logging: + + @staticmethod + def setup_logging(verbose=None, quiet=None, logtype=None): + """ + This function sets up logging based on the logtype requested. + The 'none' level outputs no logs at all + The 'cockpit' level outputs just logs for the cockpit logger + The 'nocolor' level prints out normal log msgs (no cockpit) without color + The 'color' level prints out normal log msgs (no cockpit) with color + """ + + # If no logtype was set then let's have a sane default + # If connected to a tty, then default to color, else, no color + if not logtype: + if sys.stdout.isatty(): + logtype = 'color' + else: + logtype = 'nocolor' + + # Determine what logging level we should use + if verbose: + logging_level = logging.DEBUG + elif quiet: + logging_level = logging.WARNING + else: + logging_level = logging.INFO + + # Get the loggers and clear out the handlers (allows this function + # to be ran more than once) + logger = logging.getLogger(LOGGER_DEFAULT) + logger.handlers = [] + cockpit_logger = logging.getLogger(LOGGER_COCKPIT) + cockpit_logger.handlers = [] + + if logtype == 'none': + # blank out both loggers + logger.addHandler(logging.NullHandler()) + cockpit_logger.addHandler(logging.NullHandler()) + return + + if logtype == 'cockpit': + # blank out normal log messages + logger.addHandler(logging.NullHandler()) + + # configure cockpit logger + handler = logging.StreamHandler(stream=sys.stdout) + formatter = logging.Formatter('atomicapp.status.%(levelname)s.message=%(message)s') + handler.setFormatter(formatter) + cockpit_logger.addHandler(handler) + cockpit_logger.setLevel(logging_level) + return + + if logtype == 'nocolor': + # blank out cockpit log messages + cockpit_logger.addHandler(logging.NullHandler()) + + # configure logger for basic no color printing to stdout + handler = logging.StreamHandler(stream=sys.stdout) + formatter = logging.Formatter('%(asctime)s - [%(levelname)s] - %(filename)s - %(message)s') + handler.setFormatter(formatter) + logger.addHandler(handler) + logger.setLevel(logging_level) + return + + if logtype == 'color': + # blank out cockpit log messages + cockpit_logger.addHandler(logging.NullHandler()) + + # configure logger for color printing to stdout + handler = logging.StreamHandler(stream=sys.stdout) + formatter = colorizeOutputFormatter('%(asctime)s - [%(levelname)s] - %(filename)s - %(message)s') + handler.setFormatter(formatter) + logger.addHandler(handler) + logger.setLevel(logging_level) + return + + # If we made it here then there is an error + raise Exception("Invalid logging output type: {}".format(logtype)) diff --git a/atomicapp/cli/main.py b/atomicapp/cli/main.py index d1fc4d35..2cf50ed6 100644 --- a/atomicapp/cli/main.py +++ b/atomicapp/cli/main.py @@ -25,7 +25,7 @@ from lockfile import LockFile from lockfile import AlreadyLocked -from atomicapp import set_logging +from atomicapp.applogging import Logging from atomicapp.constants import (__ATOMICAPPVERSION__, __NULECULESPECVERSION__, ANSWERS_FILE, @@ -34,12 +34,13 @@ CACHE_DIR, HOST_DIR, LOCK_FILE, + LOGGER_DEFAULT, PROVIDERS) from atomicapp.nulecule import NuleculeManager from atomicapp.nulecule.exceptions import NuleculeException from atomicapp.utils import Utils -logger = logging.getLogger(__name__) +logger = logging.getLogger(LOGGER_DEFAULT) def print_app_location(app_path): @@ -218,7 +219,7 @@ def create_parser(self): action="store_true", help=( "Don't actually call provider. The commands that should be " - "run will be sent to stdout but not run.")) + "run will be logged but not run.")) globals_parser.add_argument( "--answers-format", dest="answers_format", @@ -249,6 +250,18 @@ def create_parser(self): "--providerapi", dest="providerapi", help='Value for providerapi answers option.') + globals_parser.add_argument( + "--logtype", + dest="logtype", + choices=['cockpit', 'color', 'nocolor', 'none'], + help=""" + Override the default logging output. The options are: + nocolor: we will only log to stdout; + color: log to stdout with color; + cockpit: used with cockpit integration; + none: atomicapp will disable any logging. + If nothing is set and logging to file then 'nocolor' by default. + If nothing is set and logging to tty then 'color' by default.""") # === "run" SUBPARSER === run_subparser = toplevel_subparsers.add_parser( @@ -367,6 +380,9 @@ def create_parser(self): def run(self): cmdline = sys.argv[1:] # Grab args from cmdline + # Initial setup of logging (to allow for a few early debug statements) + Logging.setup_logging(verbose=True, quiet=False) + # If we are running in an openshift pod (via `oc new-app`) then # there is no cmdline but we want to default to "atomicapp run". if Utils.running_on_openshift(): @@ -399,11 +415,15 @@ def run(self): if args.mode: args.action = args.mode # Allow mode to override 'action' cmdline.insert(0, args.action) # Place 'action' at front - logger.info("Action/Mode Selected is: %s" % args.action) # Finally, parse args and give error if necessary args = self.parser.parse_args(cmdline) + # Setup logging (now with arguments from cmdline) and log a few msgs + Logging.setup_logging(args.verbose, args.quiet, args.logtype) + logger.info("Action/Mode Selected is: %s" % args.action) + logger.debug("Final parsed cmdline: {}".format(' '.join(cmdline))) + # In the case of Atomic CLI we want to allow the user to specify # a directory if they want to for "run". For that reason we won't # default the RUN label for Atomic App to provide an app_spec argument. @@ -425,17 +445,6 @@ def run(self): if hasattr(args, item) and getattr(args, item) is not None: args.cli_answers[item] = getattr(args, item) - # Set logging level - if args.verbose: - set_logging(level=logging.DEBUG) - elif args.quiet: - set_logging(level=logging.WARNING) - else: - set_logging(level=logging.INFO) - - # Now that we have set the logging level let's print out the cmdline - logger.debug("Final parsed cmdline: {}".format(' '.join(cmdline))) - lock = LockFile(os.path.join(Utils.getRoot(), LOCK_FILE)) try: lock.acquire(timeout=-1) diff --git a/atomicapp/constants.py b/atomicapp/constants.py index 04c61806..ab2a89cd 100644 --- a/atomicapp/constants.py +++ b/atomicapp/constants.py @@ -54,6 +54,10 @@ ANSWERS_FILE_SAMPLE_FORMAT = 'ini' WORKDIR = ".workdir" LOCK_FILE = "/run/lock/atomicapp.lock" + +LOGGER_DEFAULT = "atomicapp" +LOGGER_COCKPIT = "cockpit" + HOST_DIR = "/host" DEFAULT_PROVIDER = "kubernetes" diff --git a/atomicapp/nulecule/base.py b/atomicapp/nulecule/base.py index f7efca7b..09016c79 100644 --- a/atomicapp/nulecule/base.py +++ b/atomicapp/nulecule/base.py @@ -10,6 +10,7 @@ from atomicapp.constants import (APP_ENT_PATH, EXTERNAL_APP_DIR, GLOBAL_CONF, + LOGGER_DEFAULT, MAIN_FILE, RESOURCE_KEY, PARAMS_KEY, @@ -27,7 +28,7 @@ from jsonpointer import resolve_pointer, set_pointer, JsonPointerException -logger = logging.getLogger(__name__) +logger = logging.getLogger(LOGGER_DEFAULT) class Nulecule(NuleculeBase): diff --git a/atomicapp/nulecule/container.py b/atomicapp/nulecule/container.py index cb50c80f..29126c10 100644 --- a/atomicapp/nulecule/container.py +++ b/atomicapp/nulecule/container.py @@ -4,11 +4,12 @@ import logging from atomicapp.constants import (APP_ENT_PATH, + LOGGER_DEFAULT, MAIN_FILE) from atomicapp.utils import Utils from atomicapp.nulecule.exceptions import NuleculeException -logger = logging.getLogger(__name__) +logger = logging.getLogger(LOGGER_DEFAULT) class DockerHandler(object): diff --git a/atomicapp/nulecule/main.py b/atomicapp/nulecule/main.py index 6c38ace5..88cd8a62 100644 --- a/atomicapp/nulecule/main.py +++ b/atomicapp/nulecule/main.py @@ -14,13 +14,14 @@ ANSWERS_FILE_SAMPLE, ANSWERS_RUNTIME_FILE, DEFAULT_ANSWERS, + LOGGER_DEFAULT, MAIN_FILE, PROVIDER_KEY) from atomicapp.nulecule.base import Nulecule from atomicapp.nulecule.exceptions import NuleculeException from atomicapp.utils import Utils -logger = logging.getLogger(__name__) +logger = logging.getLogger(LOGGER_DEFAULT) class NuleculeManager(object): diff --git a/atomicapp/plugin.py b/atomicapp/plugin.py index 85f52e2e..96a43f11 100644 --- a/atomicapp/plugin.py +++ b/atomicapp/plugin.py @@ -26,9 +26,11 @@ import logging from utils import Utils -from constants import HOST_DIR, PROVIDER_CONFIG_KEY +from constants import (HOST_DIR, + LOGGER_DEFAULT, + PROVIDER_CONFIG_KEY) -logger = logging.getLogger(__name__) +logger = logging.getLogger(LOGGER_DEFAULT) class Provider(object): diff --git a/atomicapp/providers/docker.py b/atomicapp/providers/docker.py index 6b116c8a..a157ccc7 100644 --- a/atomicapp/providers/docker.py +++ b/atomicapp/providers/docker.py @@ -21,11 +21,13 @@ import subprocess import re import logging -from atomicapp.constants import DEFAULT_CONTAINER_NAME, DEFAULT_NAMESPACE +from atomicapp.constants import (DEFAULT_CONTAINER_NAME, + DEFAULT_NAMESPACE, + LOGGER_DEFAULT) from atomicapp.plugin import Provider, ProviderFailedException from atomicapp.utils import Utils -logger = logging.getLogger(__name__) +logger = logging.getLogger(LOGGER_DEFAULT) class DockerProvider(Provider): diff --git a/atomicapp/providers/kubernetes.py b/atomicapp/providers/kubernetes.py index 600e28fa..9d6545b7 100644 --- a/atomicapp/providers/kubernetes.py +++ b/atomicapp/providers/kubernetes.py @@ -22,10 +22,11 @@ import os from string import Template +from atomicapp.constants import LOGGER_DEFAULT from atomicapp.plugin import Provider, ProviderFailedException from atomicapp.utils import printErrorStatus, Utils -logger = logging.getLogger(__name__) +logger = logging.getLogger(LOGGER_DEFAULT) class KubernetesProvider(Provider): diff --git a/atomicapp/providers/marathon.py b/atomicapp/providers/marathon.py index 40d2d7f7..e98bc3ae 100644 --- a/atomicapp/providers/marathon.py +++ b/atomicapp/providers/marathon.py @@ -21,12 +21,13 @@ import urlparse import logging import os +from atomicapp.constants import LOGGER_DEFAULT from atomicapp.plugin import Provider, ProviderFailedException from atomicapp.utils import printErrorStatus from atomicapp.utils import Utils from atomicapp.constants import PROVIDER_API_KEY -logger = logging.getLogger(__name__) +logger = logging.getLogger(LOGGER_DEFAULT) class Marathon(Provider): diff --git a/atomicapp/providers/openshift.py b/atomicapp/providers/openshift.py index 3695d12f..0a73b811 100644 --- a/atomicapp/providers/openshift.py +++ b/atomicapp/providers/openshift.py @@ -32,13 +32,14 @@ from atomicapp.constants import (ACCESS_TOKEN_KEY, ANSWERS_FILE, DEFAULT_NAMESPACE, + LOGGER_DEFAULT, NAMESPACE_KEY, PROVIDER_API_KEY, PROVIDER_TLS_VERIFY_KEY, PROVIDER_CA_KEY) from requests.exceptions import SSLError import logging -logger = logging.getLogger(__name__) +logger = logging.getLogger(LOGGER_DEFAULT) # If running in an openshift POD via `oc new-app`, the ca file is here OPENSHIFT_POD_CA_FILE = "/run/secrets/kubernetes.io/serviceaccount/ca.crt" diff --git a/atomicapp/requirements.py b/atomicapp/requirements.py index 899b26d2..f618ffbc 100644 --- a/atomicapp/requirements.py +++ b/atomicapp/requirements.py @@ -1,9 +1,10 @@ import logging -from atomicapp.constants import REQUIREMENT_FUNCTIONS +from atomicapp.constants import (LOGGER_DEFAULT, + REQUIREMENT_FUNCTIONS) from atomicapp.plugin import Plugin -logger = logging.getLogger(__name__) +logger = logging.getLogger(LOGGER_DEFAULT) class Requirements: diff --git a/atomicapp/utils.py b/atomicapp/utils.py index d25bd6d8..7808aedb 100644 --- a/atomicapp/utils.py +++ b/atomicapp/utils.py @@ -36,11 +36,12 @@ CACHE_DIR, EXTERNAL_APP_DIR, HOST_DIR, + LOGGER_DEFAULT, WORKDIR) __all__ = ('Utils') -logger = logging.getLogger(__name__) +logger = logging.getLogger(LOGGER_DEFAULT) class AtomicAppUtilsException(Exception): From 2b8264c36f32ee97bc50766e5cd8e3ad5e860d62 Mon Sep 17 00:00:00 2001 From: Dusty Mabe Date: Thu, 18 Feb 2016 22:59:15 -0500 Subject: [PATCH 051/193] logging: add cockpit logging output Add in messages that the cockpit UI is concerned with. Also remove old print* functions from utils as they are no longer needed. Fixes #432 --- atomicapp/nulecule/base.py | 7 +++++++ atomicapp/nulecule/container.py | 6 ++++++ atomicapp/nulecule/lib.py | 6 ++++++ atomicapp/nulecule/main.py | 4 ++++ atomicapp/providers/kubernetes.py | 8 +++++--- atomicapp/providers/marathon.py | 9 +++++---- atomicapp/utils.py | 16 +++------------- 7 files changed, 36 insertions(+), 20 deletions(-) diff --git a/atomicapp/nulecule/base.py b/atomicapp/nulecule/base.py index 09016c79..fbac1ec2 100644 --- a/atomicapp/nulecule/base.py +++ b/atomicapp/nulecule/base.py @@ -10,6 +10,7 @@ from atomicapp.constants import (APP_ENT_PATH, EXTERNAL_APP_DIR, GLOBAL_CONF, + LOGGER_COCKPIT, LOGGER_DEFAULT, MAIN_FILE, RESOURCE_KEY, @@ -28,6 +29,7 @@ from jsonpointer import resolve_pointer, set_pointer, JsonPointerException +cockpit_logger = logging.getLogger(LOGGER_COCKPIT) logger = logging.getLogger(LOGGER_DEFAULT) @@ -101,6 +103,7 @@ def unpack(cls, image, dest, config=None, namespace=GLOBAL_CONF, docker_handler = DockerHandler(dryrun=dryrun) docker_handler.pull(image) docker_handler.extract(image, APP_ENT_PATH, dest, update) + cockpit_logger.info("All dependencies installed successfully.") return cls.load_from_path( dest, config=config, namespace=namespace, nodeps=nodeps, dryrun=dryrun, update=update) @@ -160,6 +163,7 @@ def run(self, provider_key=None, dryrun=False): # Process components for component in self.components: component.run(provider_key, dryrun) + cockpit_logger.info("Component %s installed successfully" % provider_key) def stop(self, provider_key=None, dryrun=False): """ @@ -272,6 +276,7 @@ def load(self, nodeps=False, dryrun=False): """ Load external application of the Nulecule component. """ + cockpit_logger.info("Loading app %s ." % self.name) if self.source: if nodeps: logger.info( @@ -283,6 +288,7 @@ def run(self, provider_key, dryrun=False): """ Run the Nulecule component with the specified provider, """ + cockpit_logger.info("Deploying component %s ..." % self.name) if self._app: self._app.run(provider_key, dryrun) return @@ -347,6 +353,7 @@ def load_external_application(self, dryrun=False, update=False): update=update ) self._app = nulecule + cockpit_logger.info("Copied app successfully.") @property def components(self): diff --git a/atomicapp/nulecule/container.py b/atomicapp/nulecule/container.py index 29126c10..cc40b426 100644 --- a/atomicapp/nulecule/container.py +++ b/atomicapp/nulecule/container.py @@ -4,11 +4,13 @@ import logging from atomicapp.constants import (APP_ENT_PATH, + LOGGER_COCKPIT, LOGGER_DEFAULT, MAIN_FILE) from atomicapp.utils import Utils from atomicapp.nulecule.exceptions import NuleculeException +cockpit_logger = logging.getLogger(LOGGER_COCKPIT) logger = logging.getLogger(LOGGER_DEFAULT) @@ -48,6 +50,7 @@ def pull(self, image, update=False): """ if not self.is_image_present(image) or update: logger.info('Pulling Docker image: %s' % image) + cockpit_logger.info('Pulling Docker image: %s' % image) pull_cmd = [self.docker_cli, 'pull', image] logger.debug(' '.join(pull_cmd)) else: @@ -59,6 +62,8 @@ def pull(self, image, update=False): elif subprocess.call(pull_cmd) != 0: raise Exception("Could not pull Docker image %s" % image) + cockpit_logger.info('Skipping pulling Docker image: %s' % image) + def extract(self, image, source, dest, update=False): """ Extracts content from a directory in a Docker image to specified @@ -110,6 +115,7 @@ def extract(self, image, source, dest, update=False): if os.path.exists(mainfile): existing_id = Utils.getAppId(mainfile) new_id = Utils.getAppId(tmpmainfile) + cockpit_logger.info("Loading app_id %s ." % new_id) if existing_id != new_id: raise NuleculeException( "Existing app (%s) and requested app (%s) differ" % diff --git a/atomicapp/nulecule/lib.py b/atomicapp/nulecule/lib.py index 4dd41f39..c1e052da 100644 --- a/atomicapp/nulecule/lib.py +++ b/atomicapp/nulecule/lib.py @@ -1,11 +1,16 @@ # -*- coding: utf-8 -*- +import logging + from atomicapp.constants import (GLOBAL_CONF, + LOGGER_COCKPIT, NAME_KEY, DEFAULTNAME_KEY, PROVIDER_KEY) from atomicapp.utils import Utils from atomicapp.plugin import Plugin +cockpit_logger = logging.getLogger(LOGGER_COCKPIT) + class NuleculeBase(object): @@ -44,6 +49,7 @@ def load_config(self, config, ask=False, skip_asking=False): config.get(GLOBAL_CONF, {}).get(param[NAME_KEY]) if value is None and (ask or ( not skip_asking and param.get(DEFAULTNAME_KEY) is None)): + cockpit_logger.info("%s is missing in answers.conf." % param[NAME_KEY]) value = Utils.askFor(param[NAME_KEY], param) elif value is None: value = param.get(DEFAULTNAME_KEY) diff --git a/atomicapp/nulecule/main.py b/atomicapp/nulecule/main.py index 88cd8a62..79a5a723 100644 --- a/atomicapp/nulecule/main.py +++ b/atomicapp/nulecule/main.py @@ -14,6 +14,7 @@ ANSWERS_FILE_SAMPLE, ANSWERS_RUNTIME_FILE, DEFAULT_ANSWERS, + LOGGER_COCKPIT, LOGGER_DEFAULT, MAIN_FILE, PROVIDER_KEY) @@ -21,6 +22,7 @@ from atomicapp.nulecule.exceptions import NuleculeException from atomicapp.utils import Utils +cockpit_logger = logging.getLogger(LOGGER_COCKPIT) logger = logging.getLogger(LOGGER_DEFAULT) @@ -192,6 +194,8 @@ def fetch(self, nodeps=False, update=False, dryrun=False, os.path.join(self.app_path, ANSWERS_FILE_SAMPLE), runtime_answers, answers_format) + cockpit_logger.info("Install Successful.") + def run(self, cli_provider, answers_output, ask, answers_format=ANSWERS_FILE_SAMPLE_FORMAT, **kwargs): """ diff --git a/atomicapp/providers/kubernetes.py b/atomicapp/providers/kubernetes.py index 9d6545b7..ad6c8be8 100644 --- a/atomicapp/providers/kubernetes.py +++ b/atomicapp/providers/kubernetes.py @@ -22,10 +22,12 @@ import os from string import Template -from atomicapp.constants import LOGGER_DEFAULT +from atomicapp.constants import (LOGGER_COCKPIT, + LOGGER_DEFAULT) from atomicapp.plugin import Provider, ProviderFailedException -from atomicapp.utils import printErrorStatus, Utils +from atomicapp.utils import Utils +cockpit_logger = logging.getLogger(LOGGER_COCKPIT) logger = logging.getLogger(LOGGER_DEFAULT) @@ -124,7 +126,7 @@ def process_k8s_artifacts(self): except Exception: msg = "Error processing %s artifcats, Error:" % os.path.join( self.path, artifact) - printErrorStatus(msg) + cockpit_logger.error(msg) raise if "kind" in data: self.k8s_manifests.append((data["kind"].lower(), artifact)) diff --git a/atomicapp/providers/marathon.py b/atomicapp/providers/marathon.py index e98bc3ae..c22165d8 100644 --- a/atomicapp/providers/marathon.py +++ b/atomicapp/providers/marathon.py @@ -21,12 +21,13 @@ import urlparse import logging import os -from atomicapp.constants import LOGGER_DEFAULT +from atomicapp.constants import (LOGGER_COCKPIT, + LOGGER_DEFAULT) from atomicapp.plugin import Provider, ProviderFailedException -from atomicapp.utils import printErrorStatus from atomicapp.utils import Utils from atomicapp.constants import PROVIDER_API_KEY +cockpit_logger = logging.getLogger(LOGGER_COCKPIT) logger = logging.getLogger(LOGGER_DEFAULT) @@ -116,10 +117,10 @@ def _process_artifacts(self): # every marathon app has to have id. 'id' key is also used for showing messages if "id" not in data.keys(): msg = "Error processing %s artifact. There is no id" % artifact - printErrorStatus(msg) + cockpit_logger.error(msg) raise ProviderFailedException(msg) except anymarkup.AnyMarkupError, e: msg = "Error processing artifact - %s" % e - printErrorStatus(msg) + cockpit_logger.error(msg) raise ProviderFailedException(msg) self.marathon_artifacts.append(data) diff --git a/atomicapp/utils.py b/atomicapp/utils.py index 7808aedb..bb7a77c3 100644 --- a/atomicapp/utils.py +++ b/atomicapp/utils.py @@ -36,29 +36,19 @@ CACHE_DIR, EXTERNAL_APP_DIR, HOST_DIR, + LOGGER_COCKPIT, LOGGER_DEFAULT, WORKDIR) __all__ = ('Utils') +cockpit_logger = logging.getLogger(LOGGER_COCKPIT) logger = logging.getLogger(LOGGER_DEFAULT) class AtomicAppUtilsException(Exception): pass -# Following Methods(printStatus, printErrorStatus) -# are required for Cockpit or thirdparty management tool integration -# DONOT change the atomicapp.status.* prefix in the logger method. - - -def printStatus(message): - logger.info("atomicapp.status.info.message=" + str(message)) - - -def printErrorStatus(message): - logger.info("atomicapp.status.error.message=" + str(message)) - def find_binary(executable, path=None): """Tries to find 'executable' in the directories listed in 'path'. @@ -271,7 +261,7 @@ def run_cmd(cmd, checkexitcode=True, stdin=None): # we were asked not to. if checkexitcode: if ec != 0: - printErrorStatus("cmd failed: %s" % str(cmd)) # For cockpit + cockpit_logger.error("cmd failed: %s" % str(cmd)) raise AtomicAppUtilsException( "cmd: %s failed: \n%s" % (str(cmd), stderr)) From 86e87be534bfe41ab71fc03b9c89afd6b8327802 Mon Sep 17 00:00:00 2001 From: Dusty Mabe Date: Fri, 19 Feb 2016 11:22:11 -0500 Subject: [PATCH 052/193] tests: fix test to look for output in stdout vs stderr --- tests/units/cli/test_default_provider.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/units/cli/test_default_provider.py b/tests/units/cli/test_default_provider.py index 73437966..cb16a9b2 100644 --- a/tests/units/cli/test_default_provider.py +++ b/tests/units/cli/test_default_provider.py @@ -61,16 +61,16 @@ def test_run_helloapache_app(self, capsys): # Run the dry-run command with pytest.raises(SystemExit) as exec_info: self.exec_cli(command) - nil, out = capsys.readouterr() + stdout, stderr = capsys.readouterr() # Tear down and remove all those useless generated files self.tear_down() # Print out what we've captured just in case the test fails - print out + print stdout # Since this a Docker-only provider test, docker *should* be in it, NOT Kubernetes - assert "u'provider': u'docker'" in out - assert "Deploying to Kubernetes" not in out + assert "u'provider': u'docker'" in stdout + assert "Deploying to Kubernetes" not in stdout assert exec_info.value.code == 0 From 6181b3ac481465247ef0544e34e35a36c9e81347 Mon Sep 17 00:00:00 2001 From: Shubham Minglani Date: Mon, 22 Feb 2016 18:16:38 +0530 Subject: [PATCH 053/193] Remove extra whitespaces from logging output. --- atomicapp/nulecule/main.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/atomicapp/nulecule/main.py b/atomicapp/nulecule/main.py index 79a5a723..1ce88b8d 100644 --- a/atomicapp/nulecule/main.py +++ b/atomicapp/nulecule/main.py @@ -89,7 +89,7 @@ def __init__(self, app_spec, destination=None, self.app_path = Utils.getNewAppCacheDir(self.image) logger.debug("NuleculeManager init app_path: %s", self.app_path) - logger.debug("NuleculeManager init image: %s", self.image) + logger.debug("NuleculeManager init image: %s", self.image) # Create the app_path if it doesn't exist yet if not os.path.isdir(self.app_path): From 0b9b5807a4ab61bafae4a5b837be0e2cbce868c0 Mon Sep 17 00:00:00 2001 From: Charlie Drage Date: Thu, 18 Feb 2016 11:19:55 -0500 Subject: [PATCH 054/193] Fail if unable to find artifact --- atomicapp/nulecule/base.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/atomicapp/nulecule/base.py b/atomicapp/nulecule/base.py index fbac1ec2..7184e3db 100644 --- a/atomicapp/nulecule/base.py +++ b/atomicapp/nulecule/base.py @@ -548,6 +548,8 @@ def _get_artifact_paths_for_path(self, path): immediate children, i.e., we do not deal with nested artifact directories at this moment. + If a file or directory is not found, raise an exception. + Args: path (str): Local path @@ -558,9 +560,14 @@ def _get_artifact_paths_for_path(self, path): if os.path.isfile(path): artifact_paths.append(path) elif os.path.isdir(path): + if os.listdir(path) == []: + raise NuleculeException("Artifact directory %s is empty" % path) for dir_child in os.listdir(path): dir_child_path = os.path.join(path, dir_child) if dir_child.startswith('.') or os.path.isdir(dir_child_path): continue artifact_paths.append(dir_child_path) + else: + raise NuleculeException("Unable to find artifact %s" % path) + return artifact_paths From b3060ad1e62eadd905f83be0b33c22a45c365110 Mon Sep 17 00:00:00 2001 From: Charlie Drage Date: Wed, 10 Feb 2016 09:14:29 -0500 Subject: [PATCH 055/193] Change order of getting context --- atomicapp/nulecule/base.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/atomicapp/nulecule/base.py b/atomicapp/nulecule/base.py index fbac1ec2..469bf7e1 100644 --- a/atomicapp/nulecule/base.py +++ b/atomicapp/nulecule/base.py @@ -380,14 +380,15 @@ def render(self, provider_key=None, dryrun=False): if self._app: self._app.render(provider_key=provider_key, dryrun=dryrun) return + if self.artifacts is None: raise NuleculeException( "No artifacts specified in the Nulecule file") - context = self.get_context() if provider_key and provider_key not in self.artifacts: raise NuleculeException( "Data for provider \"%s\" are not part of this app" % provider_key) + context = self.get_context() for provider in self.artifacts: if provider_key and provider != provider_key: continue From 71d8cb7b3d355e08e95a2639708d5a8316d5a897 Mon Sep 17 00:00:00 2001 From: Charlie Drage Date: Tue, 23 Feb 2016 11:14:14 -0500 Subject: [PATCH 056/193] Update readme --- README.md | 39 ++++++++++++++++++++------------------- 1 file changed, 20 insertions(+), 19 deletions(-) diff --git a/README.md b/README.md index a9ccc516..79585f2e 100644 --- a/README.md +++ b/README.md @@ -1,13 +1,15 @@ # Atomic App -Atomic App is a reference implementation of the [Nulecule Specification](http://www.projectatomic.io/docs/nulecule/). It can be used to bootstrap container applications and to install and run them. Atomic App is designed to be run in a container context. Examples using this tool may be found in the [Nulecule examples directory](https://github.com/projectatomic/nulecule/tree/master/examples). +Atomic App is a reference implementation of the [Nulecule specification](https://github.com/projectatomic/nulecule). It can be used to bootstrap packaged container environments and then run them. Atomic App is designed to be ran within a container. + +Examples of this tool may be found within the [Nulecule library repo](https://github.com/projectatomic/nulecule/tree/master/examples). ## Getting Started -Atomic App is packaged as a container. End-users typically do not install the software from source. Instead use the atomicapp container as the `FROM` line in a Dockerfile and package your application on top. For example: +Atomic App itself is packaged as a container. End-users typically do not install the software from source. Instead use the `atomicapp` container as the `FROM` line in a Dockerfile and package your application on top. For example: ``` -FROM projectatomic/atomicapp:0.4.2 +FROM projectatomic/atomicapp MAINTAINER Your Name @@ -15,17 +17,17 @@ ADD /Nulecule /Dockerfile README.md /application-entity/ ADD /artifacts /application-entity/artifacts ``` -For more information see the [Atomic App getting started guide](http://www.projectatomic.io/docs/atomicapp/). +For more information see the [Nulecule getting started guide](https://github.com/projectatomic/nulecule/blob/master/docs/getting-started.md). ## Developers First of all, clone the github repository: `git clone https://github.com/projectatomic/atomicapp`. -### Install this project +### Installing Atomic App locally Simply run ``` -pip install . +make install ``` If you want to do some changes to the code, I suggest to do: @@ -36,36 +38,35 @@ export PYTHONPATH=`pwd`:$PYTHONPATH alias atomicapp="python `pwd`/atomicapp/cli/main.py" ``` -### Build +### Building for containerized execution ``` docker build -t [TAG] . ``` -Just a call to Docker to package up the application and tag the resulting image. +Use 'docker build' to package up the application and tag the resulting image. -### Install and Run +### Fetch and run ``` -atomicapp [--dry-run] [-a answers.conf] install|run [--recursive] [--update] [--destination DST_PATH] APP|PATH +atomicapp [--dry-run] [-v] [-a answers.conf] fetch|run|stop|genanswers [--provider docker] [--destination DST_PATH] APP|PATH ``` -Pulls the application and it's dependencies. If the last argument is +Pulls the application and its dependencies. If the last argument is existing path, it looks for `Nulecule` file there instead of pulling anything. -* `--recursive yes|no` Pull whole dependency tree -* `--update` Overwrite any existing files +* `--provider docker` Use the Docker provider within the Atomic App * `--destination DST_PATH` Unpack the application into given directory instead of current directory -* `APP` Name of the image containing the application (f.e. `vpavlin/wp-app`) -* `PATH` Path to a directory with installed (i.e. result of `atomicapp install ...`) app +* `APP` Name of the image containing the application (ex. `projectatomic/apache-centos7-atomicapp`) +* `PATH` Path to a directory with installed (ex. result of `atomicapp fetch...`) app -Action `run` performs `install` prior its own tasks are executed if `APP` is given. When `run` is selected, providers' code is invoked and containers are deployed. +Action `run` performs `fetch` prior to its own tasks if an `APP` is provided. Otherwise, it will use its respective `PATH`. When `run` is selected, providers' code is invoked and containers are deployed. ## Providers -Providers represent various deployment targets. They can be added by placing a file called `provider_name.py` in `providers/`. This file needs to implement the interface explained in (providers/README.md). For a detailed description of all providers available see the [Provider description](docs/providers.md). +Providers represent various deployment targets. They can be added by placing the artifact within the respective in `provider/` folder. For example, placing `deploy_pod.yml` within `providers/kubernetes/`. For a detailed description of all providers available see [docs/providers.md](docs/providers.md). ## Dependencies -Please see [REQUIREMENTS](https://github.com/projectatomic/atomicapp/blob/master/docs/requirements.md) for current Atomic App dependencies. +See [REQUIREMENTS](https://github.com/projectatomic/atomicapp/blob/master/docs/requirements.md) for current Atomic App dependencies. ##Communication channels @@ -82,7 +83,7 @@ Please see [REQUIREMENTS](https://github.com/projectatomic/atomicapp/blob/master # Copyright -Copyright (C) 2015 Red Hat Inc. +Copyright (C) 2016 Red Hat Inc. This program is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by From e2c57cc8f81507a45f1a129c239238f687e0aaa2 Mon Sep 17 00:00:00 2001 From: Dusty Mabe Date: Mon, 29 Feb 2016 17:38:55 -0500 Subject: [PATCH 057/193] logging: add more context to filename in verbose mode Previously where the filename would have been 'main.py' in the log messages, it would now be 'cli/main.py' for verbose mode. --- atomicapp/applogging.py | 30 +++++++++++++++++++++++++++--- 1 file changed, 27 insertions(+), 3 deletions(-) diff --git a/atomicapp/applogging.py b/atomicapp/applogging.py index ad5030a6..5d63ae2c 100644 --- a/atomicapp/applogging.py +++ b/atomicapp/applogging.py @@ -24,7 +24,24 @@ LOGGER_DEFAULT) -class colorizeOutputFormatter(logging.Formatter): +class customOutputFormatter(logging.Formatter): + """ + A class that adds 'longerfilename' support to the logging formatter + This 'longerfilename' will be filename + parent dir. + """ + + def format(self, record): + + # Add the 'longerfilename' field to the record dict. This is + # then used by the Formatter in the logging library when + # formatting the message string. + record.longerfilename = '/'.join(record.pathname.split('/')[-2:]) + + # Call the parent class to do formatting. + return super(customOutputFormatter, self).format(record) + + +class colorizeOutputFormatter(customOutputFormatter): """ A class to colorize the log msgs based on log level """ @@ -99,6 +116,13 @@ def setup_logging(verbose=None, quiet=None, logtype=None): else: logging_level = logging.INFO + # Set the format string to use based on the logging level. + # For debug we include more of the filename than for !debug. + if logging_level == logging.DEBUG: + formatstr = '%(asctime)s - [%(levelname)s] - %(longerfilename)s - %(message)s' + else: + formatstr = '%(asctime)s - [%(levelname)s] - %(filename)s - %(message)s' + # Get the loggers and clear out the handlers (allows this function # to be ran more than once) logger = logging.getLogger(LOGGER_DEFAULT) @@ -130,7 +154,7 @@ def setup_logging(verbose=None, quiet=None, logtype=None): # configure logger for basic no color printing to stdout handler = logging.StreamHandler(stream=sys.stdout) - formatter = logging.Formatter('%(asctime)s - [%(levelname)s] - %(filename)s - %(message)s') + formatter = customOutputFormatter(formatstr) handler.setFormatter(formatter) logger.addHandler(handler) logger.setLevel(logging_level) @@ -142,7 +166,7 @@ def setup_logging(verbose=None, quiet=None, logtype=None): # configure logger for color printing to stdout handler = logging.StreamHandler(stream=sys.stdout) - formatter = colorizeOutputFormatter('%(asctime)s - [%(levelname)s] - %(filename)s - %(message)s') + formatter = colorizeOutputFormatter(formatstr) handler.setFormatter(formatter) logger.addHandler(handler) logger.setLevel(logging_level) From 5b946258bbf4f251d200a203eacb767cb621fd58 Mon Sep 17 00:00:00 2001 From: Charlie Drage Date: Tue, 1 Mar 2016 11:11:10 -0500 Subject: [PATCH 058/193] 0.4.3 Release --- CHANGELOG.md | 47 +++++++++++++++++++++++++++++++ Dockerfile | 2 +- Dockerfiles.git/Dockerfile.centos | 2 +- Dockerfiles.git/Dockerfile.debian | 2 +- Dockerfiles.git/Dockerfile.fedora | 2 +- atomicapp/constants.py | 2 +- setup.py | 2 +- 7 files changed, 53 insertions(+), 6 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index d6a6ec83..e62064ad 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,50 @@ +## Atomic App 0.4.3 (03-01-2016) + +You'll now see pretty colors with logging / output! + +With this release, we've refactored our logging formatter making it easier to decipher between information, debug, warning and errors. + +You are now able to specify what logging format you'd like to output via the command line: + +``` + --logtype {cockpit,color,nocolor,none} + Override the default logging output. The options are: + nocolor: we will only log to stdout; color: log to + stdout with color; cockpit: used with cockpit + integration; none: atomicapp will disable any logging. + If nothing is set and logging to file then 'nocolor' + by default. If nothing is set and logging to tty then + 'color' by default. +``` + +The main features are: + + - A new logging mechanism that outputs color-coordinated logging messages + - Added CLI commands for color, nocolor, cockpit and 'none' output + +UI: + + - Failure on finding no artifacts + +Other: + + - Readme updates / typo fixes + +``` +Charlie Drage (3): + Fail if unable to find artifact + Change order of getting context + Update readme + +Dusty Mabe (4): + logging: Add in Atomic App Logging class + logging: add cockpit logging output + tests: fix test to look for output in stdout vs stderr + +Shubham Minglani (1): + Remove extra whitespaces from logging output. +``` + ## Atomic App 0.4.2 (02-18-2016) As we start to get closer to a 1.0.0 release, we continue to focus on tests and user interaction. This weeks release focus on both as well as a minor feature. diff --git a/Dockerfile b/Dockerfile index 43714e23..896e5e2e 100644 --- a/Dockerfile +++ b/Dockerfile @@ -2,7 +2,7 @@ FROM centos:7 MAINTAINER Red Hat, Inc. -ENV ATOMICAPPVERSION="0.4.2" +ENV ATOMICAPPVERSION="0.4.3" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ io.openshift.generate.job=true \ diff --git a/Dockerfiles.git/Dockerfile.centos b/Dockerfiles.git/Dockerfile.centos index 43714e23..896e5e2e 100644 --- a/Dockerfiles.git/Dockerfile.centos +++ b/Dockerfiles.git/Dockerfile.centos @@ -2,7 +2,7 @@ FROM centos:7 MAINTAINER Red Hat, Inc. -ENV ATOMICAPPVERSION="0.4.2" +ENV ATOMICAPPVERSION="0.4.3" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ io.openshift.generate.job=true \ diff --git a/Dockerfiles.git/Dockerfile.debian b/Dockerfiles.git/Dockerfile.debian index 2d159f1a..719bd5f0 100644 --- a/Dockerfiles.git/Dockerfile.debian +++ b/Dockerfiles.git/Dockerfile.debian @@ -2,7 +2,7 @@ FROM debian:jessie MAINTAINER Red Hat, Inc. -ENV ATOMICAPPVERSION="0.4.2" +ENV ATOMICAPPVERSION="0.4.3" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ RUN="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} \${OPT2} run \${OPT3}" \ diff --git a/Dockerfiles.git/Dockerfile.fedora b/Dockerfiles.git/Dockerfile.fedora index cd49acc0..8fc2e6a1 100644 --- a/Dockerfiles.git/Dockerfile.fedora +++ b/Dockerfiles.git/Dockerfile.fedora @@ -2,7 +2,7 @@ FROM fedora:23 MAINTAINER Red Hat, Inc. -ENV ATOMICAPPVERSION="0.4.2" +ENV ATOMICAPPVERSION="0.4.3" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ io.openshift.generate.job=true \ diff --git a/atomicapp/constants.py b/atomicapp/constants.py index ab2a89cd..2c2a6d38 100644 --- a/atomicapp/constants.py +++ b/atomicapp/constants.py @@ -23,7 +23,7 @@ 2) LABEL io.projectatomic.nulecule.specversion in app Dockefile """ -__ATOMICAPPVERSION__ = '0.4.2' +__ATOMICAPPVERSION__ = '0.4.3' __NULECULESPECVERSION__ = '0.0.2' EXTERNAL_APP_DIR = "external" diff --git a/setup.py b/setup.py index 09ea0bb1..b4a99405 100644 --- a/setup.py +++ b/setup.py @@ -41,7 +41,7 @@ def _install_requirements(): setup( name='atomicapp', - version='0.4.2', + version='0.4.3', description='A tool to install and run Nulecule apps', author='Red Hat, Inc.', author_email='container-tools@redhat.com', From bed3215563a16907c9415a212b2cfe00555ed260 Mon Sep 17 00:00:00 2001 From: Shubham Minglani Date: Mon, 22 Feb 2016 15:12:41 +0530 Subject: [PATCH 059/193] Handle docker pull exception, improve #441, fix #568 --- atomicapp/cli/main.py | 5 ++++- atomicapp/nulecule/container.py | 4 ++-- atomicapp/nulecule/exceptions.py | 4 ++++ 3 files changed, 10 insertions(+), 3 deletions(-) diff --git a/atomicapp/cli/main.py b/atomicapp/cli/main.py index 2cf50ed6..6de39ca7 100644 --- a/atomicapp/cli/main.py +++ b/atomicapp/cli/main.py @@ -37,7 +37,7 @@ LOGGER_DEFAULT, PROVIDERS) from atomicapp.nulecule import NuleculeManager -from atomicapp.nulecule.exceptions import NuleculeException +from atomicapp.nulecule.exceptions import NuleculeException, DockerException from atomicapp.utils import Utils logger = logging.getLogger(LOGGER_DEFAULT) @@ -106,6 +106,9 @@ def cli_run(args): else: print_app_location(nm.app_path) sys.exit(0) + except DockerException as e: + logger.error(e) + sys.exit(1) except NuleculeException as e: logger.error(e) sys.exit(1) diff --git a/atomicapp/nulecule/container.py b/atomicapp/nulecule/container.py index cc40b426..bdb30d43 100644 --- a/atomicapp/nulecule/container.py +++ b/atomicapp/nulecule/container.py @@ -8,7 +8,7 @@ LOGGER_DEFAULT, MAIN_FILE) from atomicapp.utils import Utils -from atomicapp.nulecule.exceptions import NuleculeException +from atomicapp.nulecule.exceptions import NuleculeException, DockerException cockpit_logger = logging.getLogger(LOGGER_COCKPIT) logger = logging.getLogger(LOGGER_DEFAULT) @@ -60,7 +60,7 @@ def pull(self, image, update=False): if self.dryrun: logger.info("DRY-RUN: %s", pull_cmd) elif subprocess.call(pull_cmd) != 0: - raise Exception("Could not pull Docker image %s" % image) + raise DockerException("Could not pull Docker image %s" % image) cockpit_logger.info('Skipping pulling Docker image: %s' % image) diff --git a/atomicapp/nulecule/exceptions.py b/atomicapp/nulecule/exceptions.py index ca2dd159..39a7b926 100644 --- a/atomicapp/nulecule/exceptions.py +++ b/atomicapp/nulecule/exceptions.py @@ -1,5 +1,9 @@ # -*- coding: utf-8 -*- +class DockerException(Exception): + pass + + class NuleculeException(Exception): pass From 0a76aa293c2cbcf145c0fce3492f7679df02cb87 Mon Sep 17 00:00:00 2001 From: Suraj Deshmukh Date: Thu, 25 Feb 2016 00:07:21 -0500 Subject: [PATCH 060/193] Install requirements before make test In a clean environment where nothing is installed and if I run `make test` then the required modules should be installed first. So before `make test` added pip install from requirements.txt and test-requirements.txt. Fixes issue #574 --- Makefile | 2 ++ 1 file changed, 2 insertions(+) diff --git a/Makefile b/Makefile index 7f42b19e..792651fa 100644 --- a/Makefile +++ b/Makefile @@ -5,6 +5,8 @@ install: python setup.py install test: + pip install -qr requirements.txt + pip install -qr test-requirements.txt python -m pytest -vv image: From 063a59b08ed9e8b313bdb0c6f0cee878f8a1704c Mon Sep 17 00:00:00 2001 From: Suraj Deshmukh Date: Wed, 17 Feb 2016 04:52:56 -0500 Subject: [PATCH 061/193] Wrong provider name in answers.conf, exits AtomicApp with readable error When provider name in answers.conf is wrong then AtomicApp fails with stack-trace and no error output which is user understandable. This happened because there was no check for the condition if given provider_key does not match with any of the providers that are supported for now. Fixes Issue #562. --- atomicapp/nulecule/lib.py | 9 ++++++- tests/units/nulecule/test_lib.py | 40 ++++++++++++++++++++++++++++++++ tests/units/test_plugin.py | 27 +++++++++++++++++++++ 3 files changed, 75 insertions(+), 1 deletion(-) create mode 100644 tests/units/nulecule/test_lib.py create mode 100644 tests/units/test_plugin.py diff --git a/atomicapp/nulecule/lib.py b/atomicapp/nulecule/lib.py index c1e052da..b38e47e4 100644 --- a/atomicapp/nulecule/lib.py +++ b/atomicapp/nulecule/lib.py @@ -5,9 +5,11 @@ LOGGER_COCKPIT, NAME_KEY, DEFAULTNAME_KEY, - PROVIDER_KEY) + PROVIDER_KEY, + PROVIDERS) from atomicapp.utils import Utils from atomicapp.plugin import Plugin +from atomicapp.nulecule.exceptions import NuleculeException cockpit_logger = logging.getLogger(LOGGER_COCKPIT) @@ -101,6 +103,11 @@ def get_provider(self, provider_key=None, dry=False): if provider_key is None: provider_key = self.config.get(GLOBAL_CONF)[PROVIDER_KEY] provider_class = self.plugin.getProvider(provider_key) + if provider_class is None: + raise NuleculeException("Invalid Provider - '{}', provided in " + "answers.conf (choose from {})" + .format(provider_key, ', ' + .join(PROVIDERS))) return provider_key, provider_class( self.get_context(), self.basepath, dry) diff --git a/tests/units/nulecule/test_lib.py b/tests/units/nulecule/test_lib.py new file mode 100644 index 00000000..742738d8 --- /dev/null +++ b/tests/units/nulecule/test_lib.py @@ -0,0 +1,40 @@ +import mock +import unittest + +from atomicapp.nulecule.lib import NuleculeBase +from atomicapp.nulecule.exceptions import NuleculeException + + +class TestNuleculeBaseGetProvider(unittest.TestCase): + """ Test NuleculeBase get_provider""" + def test_get_provider_success(self): + """ + Test if get_provider method when passed a particular valid key returns + the corresponding class. + """ + nb = NuleculeBase(params = [], basepath = '', namespace = '') + provider_key = u'openshift' + # method `get_provider` will read from this config, we give it here + # since we have neither provided it before nor it is auto-generated + nb.config = {u'general': {u'provider': provider_key}} + + return_provider = mock.Mock() + # mocking return value of method plugin.getProvider,because it returns + # provider class and that class gets called with values + nb.plugin.getProvider = mock.Mock(return_value=return_provider) + ret_provider_key, ret_provider = nb.get_provider() + self.assertEqual(provider_key, ret_provider_key) + return_provider.assert_called_with({u'provider': provider_key}, + '', False) + + def test_get_provider_failure(self): + """ + Test if get_provider method when passed an invalid key raises an + exception. + """ + nb = NuleculeBase(params = [], basepath = '', namespace = '') + # purposefully give the wrong provider key + provider_key = u'mesos' + nb.config = {u'general': {u'provider': provider_key}} + with self.assertRaises(NuleculeException): + nb.get_provider() diff --git a/tests/units/test_plugin.py b/tests/units/test_plugin.py new file mode 100644 index 00000000..9a2699c9 --- /dev/null +++ b/tests/units/test_plugin.py @@ -0,0 +1,27 @@ +import mock +import unittest + +from atomicapp.plugin import Plugin + +class TestPluginGetProvider(unittest.TestCase): + + """Test Plugin getProvider""" + def test_getProvider(self): + """ + Test if getProvider is returning appropriate classes to the + corresponding keys. + """ + p = Plugin() + + docker_mock = mock.Mock() + kubernetes_mock = mock.Mock() + # keep some mock objects in place of the actual corresponding + # classes, getProvider reads from `plugins` dict. + p.plugins = { + 'docker': docker_mock, + 'kubernetes': kubernetes_mock, + } + self.assertEqual(p.getProvider('docker'), docker_mock) + self.assertEqual(p.getProvider('kubernetes'), kubernetes_mock) + # if non-existent key provided + self.assertEqual(p.getProvider('some_random'), None) From fd6cbe34e6e250ea550f0611496bd739f2ff68e5 Mon Sep 17 00:00:00 2001 From: Charlie Drage Date: Mon, 22 Feb 2016 12:00:28 -0500 Subject: [PATCH 062/193] Doc for current functions implemented by spec --- docs/spec_coverage.md | 10 ++++++++++ 1 file changed, 10 insertions(+) create mode 100644 docs/spec_coverage.md diff --git a/docs/spec_coverage.md b/docs/spec_coverage.md new file mode 100644 index 00000000..f043368c --- /dev/null +++ b/docs/spec_coverage.md @@ -0,0 +1,10 @@ +# Implementation + +This is a table of a list of current functions which are implemented by Atomic App against the Nulecule (spec)[https://github.com/projectatomic/nulcule]. + +| Status | Description | Version implemented in | Notes | +|--------|-------------|------------------------|-------| +| Completed | Persistent storage | 0.3.1 | Current functionality is only for the Kubernetes provider | +| Incomplete | ssh/sftp/ftp support | - | - | +| Completed | Docker image support and extraction | 0.1.1 | - | +| Completed | XPathing | 0.2.1 | Support for JSON pointers for artifacts | From 7ae96e87a01292fbf91383f0ed1e4e3992b2f479 Mon Sep 17 00:00:00 2001 From: Charlie Drage Date: Wed, 2 Mar 2016 09:23:00 -0500 Subject: [PATCH 063/193] Fix minor markdown error in spec_coverage --- docs/spec_coverage.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/spec_coverage.md b/docs/spec_coverage.md index f043368c..afe4ac2a 100644 --- a/docs/spec_coverage.md +++ b/docs/spec_coverage.md @@ -1,6 +1,6 @@ # Implementation -This is a table of a list of current functions which are implemented by Atomic App against the Nulecule (spec)[https://github.com/projectatomic/nulcule]. +This is a table of a list of current functions which are implemented by Atomic App against the Nulecule [spec](https://github.com/projectatomic/nulecule). | Status | Description | Version implemented in | Notes | |--------|-------------|------------------------|-------| From a4815ec11d8f3b981bd999b54d69d3273154d146 Mon Sep 17 00:00:00 2001 From: Charlie Drage Date: Fri, 4 Mar 2016 10:17:02 -0500 Subject: [PATCH 064/193] Major update to README and documentation --- CONTRIBUTING.md | 50 +++++++- README.md | 113 +++++++++-------- docs/images/logo.png | Bin 0 -> 77293 bytes docs/nulecule.md | 240 ++++++++++++++++++++++++++++++++++++ docs/quick_start.md | 153 +++++++++++++++++++++++ docs/start_guide.md | 288 +++++++++++++++++++++++++++++++++++++++++++ 6 files changed, 790 insertions(+), 54 deletions(-) create mode 100644 docs/images/logo.png create mode 100644 docs/nulecule.md create mode 100644 docs/quick_start.md create mode 100644 docs/start_guide.md diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index b53c5a99..a7a09e53 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -7,6 +7,48 @@ which is hosted in the [Project Atomic Organization](https://github.com/projecta These are just guidelines, not rules, use your best judgment and feel free to propose changes to this document in a pull request. +## Initial dev environment + +First of all, clone the github repository: `git clone https://github.com/projectatomic/atomicapp`.. + +### Installing Atomic App locally +Simply run + +``` +make install +``` + +If you want to do some changes to the code, I suggest to do: + +``` +cd atomicapp +export PYTHONPATH=`pwd`:$PYTHONPATH +alias atomicapp="python `pwd`/atomicapp/cli/main.py" +``` + +### Building for containerized execution +``` +docker build -t [TAG] . +``` + +Use 'docker build' to package up the application and tag the resulting image. + +### Fetch and run +``` +atomicapp [--dry-run] [-v] [-a answers.conf] fetch|run|stop|genanswers [--provider docker] [--destination DST_PATH] APP|PATH +``` + +Pulls the application and its dependencies. If the last argument is +existing path, it looks for `Nulecule` file there instead of pulling anything. + +* `--provider docker` Use the Docker provider within the Atomic App +* `--destination DST_PATH` Unpack the application into given directory instead of current directory +* `APP` Name of the image containing the application (ex. `projectatomic/apache-centos7-atomicapp`) +* `PATH` Path to a directory with installed (ex. result of `atomicapp fetch...`) app + +Action `run` performs `fetch` prior to its own tasks if an `APP` is provided. Otherwise, it will use its respective `PATH`. When `run` is selected, providers' code is invoked and containers are deployed. + + ## Submitting Issues * You can create an issue [here](https://github.com/projectatomic/atomicapp/issues/new), include as many details as possible with your report. @@ -33,13 +75,11 @@ Before you submit your pull request consider the following guidelines: * Include documentation that either describe a change to a behavior of atomicapp or the changed capability to an end user of atomicapp. * Commit your changes using **a descriptive commit message**. If you are fixing an issue please include something like 'this closes issue #xyz'. -* Additionally think about implementing a git hook, as flake8 is part of the [travis-ci tests](https://travis-ci.org/projectatomic/atomicapp) it will help you pass the CI tests. +* Make sure your tests pass! As we use [travis-ci](https://travis-ci.org/projectatomic/atomicapp) with __flake8__ it's recommended to run both commands before submitting a PR. ```shell - $ cat .git/hooks/pre-push - #!/bin/bash - - flake8 -v atomicapp + make syntax-check + make test ``` * Push your branch to GitHub: diff --git a/README.md b/README.md index 79585f2e..d70ce9e6 100644 --- a/README.md +++ b/README.md @@ -1,87 +1,102 @@ # Atomic App -Atomic App is a reference implementation of the [Nulecule specification](https://github.com/projectatomic/nulecule). It can be used to bootstrap packaged container environments and then run them. Atomic App is designed to be ran within a container. +![](docs/images/logo.png "Project Atomic") -Examples of this tool may be found within the [Nulecule library repo](https://github.com/projectatomic/nulecule/tree/master/examples). +Atomic App is a reference implementation of the [Nulecule](https://github.com/projectatomic/nulecule) specification. Packaged Atomic App containers are "Nuleculized" and each component of the package is a "Nulecule". -## Getting Started +Atomic App is used to bootstrap packaged container environments and run them on multiple container orchestrators. It is designed from the ground-up to be portable and provider pluggable. -Atomic App itself is packaged as a container. End-users typically do not install the software from source. Instead use the `atomicapp` container as the `FROM` line in a Dockerfile and package your application on top. For example: + - __A "packaged installer" for all container-based environments and applications.__ Replace all those bash and Ansible scripts with one container-based deployment tool. -``` -FROM projectatomic/atomicapp - -MAINTAINER Your Name - -ADD /Nulecule /Dockerfile README.md /application-entity/ -ADD /artifacts /application-entity/artifacts -``` + - __Target multiple providers:__ Specify the provider you want the Atomic App to run on. It supports Kubernetes, OpenShift, Mesos+Marathon and Docker. -For more information see the [Nulecule getting started guide](https://github.com/projectatomic/nulecule/blob/master/docs/getting-started.md). + - __Inherit already packaged containers:__ Create composite applications by referencing other Nulecule-compliant applications. For example, plugging in an alternative well-orchestrated database in another referenced container image. -## Developers + - __Fetch and run entire environments:__ Use `atomicapp fetch` and `atomicapp run` to run pre-packaged Nuleculized containers. -First of all, clone the github repository: `git clone https://github.com/projectatomic/atomicapp`. +## Installing Atomic App +From Linux: +```sh +git clone https://github.com/projectatomic/atomicapp && cd atomicapp +make install +``` -### Installing Atomic App locally -Simply run +_or_ -``` +Download a pre-signed .tar.gz from [download.projectatomic.io](https://download.projectatomic.io) / [GitHub](https://github.com/projectatomic/atomicapp/releases): +```sh +export RELEASE=0.4.2 +wget https://github.com/projectatomic/atomicapp/releases/download/$RELEASE/atomicapp-$RELEASE.tar.gz +tar -xvf atomicapp-$RELEASE.tar.gz && cd atomicapp-$RELEASE.tar.gz make install ``` -If you want to do some changes to the code, I suggest to do: +## Documentation -``` -cd atomicapp -export PYTHONPATH=`pwd`:$PYTHONPATH -alias atomicapp="python `pwd`/atomicapp/cli/main.py" -``` +This README contains some high level overview information on Atomic App. The detailed documentation for Atomic App resides in the [docs](docs) directory. The index provided conveniently links to each section below: -### Building for containerized execution -``` -docker build -t [TAG] . -``` +1. [Quick start](docs/quick_start.md) +2. [Getting started](docs/start_guide.md) +3. [Providers](docs/providers.md) + 1. [Docker](docs/providers/docker/overview.md) + 2. [Kubernetes](docs/providers/kubernetes/overview.md) + 3. [OpenShift](docs/providers/openshift/overview.md) +4. [CLI](docs/cli.md) +5. [Nulecule file](docs/nulecule.md) +6. [Atomic App lifecycle](docs/atomicapp_lifecycle.md) +7. [File handling](docs/file_handling.md) +8. [Specification coverage](docs/spec_coverage.md) +9. [Contributing](CONTRIBUTING.md) +10. [Dependencies](docs/requirements.md) -Use 'docker build' to package up the application and tag the resulting image. -### Fetch and run -``` -atomicapp [--dry-run] [-v] [-a answers.conf] fetch|run|stop|genanswers [--provider docker] [--destination DST_PATH] APP|PATH -``` +## Getting started -Pulls the application and its dependencies. If the last argument is -existing path, it looks for `Nulecule` file there instead of pulling anything. +Atomic App can be used either natively on your OS __or__ ran via the [atomic](https://github.com/projectatomic/atomic) command on [Fedora or CentOS Atomic hosts](https://www.projectatomic.io/download/). -* `--provider docker` Use the Docker provider within the Atomic App -* `--destination DST_PATH` Unpack the application into given directory instead of current directory -* `APP` Name of the image containing the application (ex. `projectatomic/apache-centos7-atomicapp`) -* `PATH` Path to a directory with installed (ex. result of `atomicapp fetch...`) app +__Detailed instructions on [getting started](docs/start_guide.md) are available.__ Alternatively, use the [quick start guide](docs/quick_start.md) to get a Nuleculized application running immediately. -Action `run` performs `fetch` prior to its own tasks if an `APP` is provided. Otherwise, it will use its respective `PATH`. When `run` is selected, providers' code is invoked and containers are deployed. +An extended guide on the `Nulecule` file format is [also available](docs/nulecule.md). -## Providers +## Real-world examples +Atomic App can be used to launch a cluster of containers (application servers, databases, etc.). -Providers represent various deployment targets. They can be added by placing the artifact within the respective in `provider/` folder. For example, placing `deploy_pod.yml` within `providers/kubernetes/`. For a detailed description of all providers available see [docs/providers.md](docs/providers.md). +For a list of already packaged examples, visit the [nulecule-library](https://github.com/projectatomic/nulecule-library) repo. -## Dependencies +## Providers -See [REQUIREMENTS](https://github.com/projectatomic/atomicapp/blob/master/docs/requirements.md) for current Atomic App dependencies. +We currently support: -##Communication channels + - Docker + - Kubernetes + - OpenShift 3 + - Marathon (Mesos) -* IRC: #nulecule (On Freenode) -* Mailing List: [container-tools@redhat.com](https://www.redhat.com/mailman/listinfo/container-tools) +Providers represent various deployment targets. They can be added by placing the artifact within the respective in `artifacts/` folder. For example, placing `deploy_pod.yml` within `artifacts/kubernetes/`. -# The Badges +For a detailed description of all providers available see [docs/providers.md](docs/providers.md). +## Contributing to Atomic App [![Code Health](https://landscape.io/github/projectatomic/atomicapp/master/landscape.svg?style=flat)](https://landscape.io/github/projectatomic/atomicapp/master) [![Build Status](https://travis-ci.org/projectatomic/atomicapp.svg?branch=master)](https://travis-ci.org/projectatomic/atomicapp) [![Coverage Status](https://coveralls.io/repos/projectatomic/atomicapp/badge.svg?branch=master&service=github)](https://coveralls.io/github/projectatomic/atomicapp?branch=master) [![Issue Stats](http://issuestats.com/github/projectatomic/atomicapp/badge/pr)](http://issuestats.com/github/projectatomic/atomicapp) [![Issue Stats](http://issuestats.com/github/projectatomic/atomicapp/badge/issue)](http://issuestats.com/github/projectatomic/atomicapp) -# Copyright +First of all, awesome! We have [a development guide to help you get started!](CONTRIBUTING.md) + +If you have any issues or get stuck, feel free to open a GitHub issue or reach us at our communication channels (see below). + +## Dependencies + +See [REQUIREMENTS.md](docs/requirements.md) for a list of current Atomic App dependencies. + +## Communication channels + +* IRC: __#nulecule__ on irc.freenode.net +* Mailing List: [container-tools@redhat.com](https://www.redhat.com/mailman/listinfo/container-tools) + +## Copyright Copyright (C) 2016 Red Hat Inc. diff --git a/docs/images/logo.png b/docs/images/logo.png new file mode 100644 index 0000000000000000000000000000000000000000..a28fc9c0f2468652308373e2f75d81e0f1f8cc5e GIT binary patch literal 77293 zcmcG$Wmr}F7B`9_B@&8&v?w91bYr5lf^KKiua$dmlvhVy-#Ie~e#^_4&2zOT6pk*U`|>@FZS6mq$ZGuSG+M-*K}P zZiD~B)RlVq9PJYIzaLfUVelOs>sP9FXlPhmsDG|#V6(@;H?iy`WL{uRUnRRuLEWf~ zbrlWmE}F#iCvTlc*C(9ZZyQ|HZg&|L#3q;vnxusKKEC^t=t)fNXG!8~tVUP%I~BST z&npWwb~}aSghhmFKh0vi{hHRBiit!2lm5!BUZIr{x{8tYiwo~SRvf~+@_!D`T^AZR zg}Dm2>f1Nnms}E%$@wa*B))`Cv1#F7{_LIN`0xMs2W%YgkNB>T+&!^lbe{DUajry#E0*dk^k?tZ=p^7J=mOR`*#23 zubdpL$LW1Db92vBRPKnl?v-X-6tI!tGRnqH&$onyM@GskDELUG{uvpGo~&}?vYvii zcJF^LqZ!$%o2OE~+L@(M7HTn8+%rBtp4JXOXgN{-$os?t!nv9R#vt>nrkTyK}P!T!x^V6*C#R#X#CR>#wc|A>}q&;_(Q8n zVN86=zuzC}Ee~c<6c_Vt{(KZI4S$ns(#LSHF*Y=_u;(f=GBR=t4Hxx*v1#ehs^8oR z68{nqz~NF><#uShKK$qt9!1l1ozM2(UggX|RZ`Li5x0X}y=MH?)m3(v!YoAzY&PBb|c={md?(_0FLUr z92_AyL<|~3GD$KFL}r7Tv`kE&o0^(-JC|l=o**Zywzsx^WM&4!TPT(wG}P0B2N=ro z?4L5mroFNi^f*gtZhBgWr(<$5AwwaZ(|Jp$-;{?W;3NFOc5nHOQnrS~ix(J6i|A(N z=KA%Y@SqMj-44v-;^ML^JO-`8y)U)L?w)Xb)Tf>U2cv@l~Lt!`mqp*@x_#H26nk^S;(`mfa_?S@zkXG zJ`h~j(9pzOv|qwtUrg1&S)MrP(OiU_5m z3_MFq%f&X1nv|3jUm;{Ak(=8oqFOsBD9HVEqXe$IYT_I!OusipVsE8yDd{|tjU-?| zkdl(p+Q}&j@o#bc^W++uo3RK82)=yz5?dKz6mr-Zs@AD69Yip|I_DmTW85@y@>my+!1x8N%{uWTE)zw-q$eEewEvgmX+v6CTE0%*RY@tSoYo5{ z4EVzbM<4AV@%rVSWncgIK|GJrAB%9xR?g9)6mh*B z*5a#`lb)MP=;d{ZPq3?JWz_`@$HvLY2unN}q@gvrMC{^XD63XtTHn%N>+9y#ngx*r zr0glEGucd4bBj6R3!~R5x06iVD`Vpa3}K(kN7Y0%O8t>FmkE{Q<7T$z?j9axX1&in zcl#Ci1q6P|XZ&qxnTcI!JDyJt;cyE)VOdyQY*Oeti-exl*U=>>s|)Xsk8OV~r{=;{ zc|sswwi)`}!)R$Z+Le`+{$IY7r`T9pn44qc;nDN+f8X1)N02M1EAFn7M7>&SM66MEU`jS{CKxL!_9&aM58Pa!#74g#5pP~kQ__AB-^ z?Ck7&%RSFr5~HGCYHCgxWFcOLPzzt4?ZR&=VM1hP&evR?b3{i+$Csazk&(G?)nUC= zRxV+}#m1f;aKU}~8>>S6#@%=EX+&>4HZ){_moF_V(;O@xpO|o&{miIRXna#Z zKtRa=)91a*?&9{?)J3Gfzdy_5=3H~g?!_sxM4jZ;tv};sb{uYE)Aot81?UIRiP{G4 zLPgCc-D~Z_`Ehcv5pam{Nn=f#I9>KzM8pdVi;WdzeyF^pl%%z-{rpj$o+JwuA0J=& z@o^E^E##68Z>)*2@m(%1A`cG{85xm>`C8LL$nm!GSdSs8HH!CbG z+$JVwBJje+z1}J0Scz&gSFA}1si=aWtH7H9Py%mF`uXz)dOk8sR58M(s^;tCz@4{< zDd>P+rhTy!wTGO#%x~(0V?I;+9-URIvQ#D-x-%TV_5=|MXoM_+?0)!E^2HNfUC4a1 z(2s|10(_)mJw1D?6Q{r3p$~?5Jw}5YSi3?# zvmQ25niy7B9jndvX_7Pi3OeT5`j|dvH1r+4#vyH%zQB%-`T2Qk&NVt79-i724csSB zp4=c8_NV^#O?knYEGFiajKoaC_8eMxWcX_Z&HIe`fFENcJ34a1U`{RDm-q3}i{Do- zHZPlXC@(MP)N8sS9z;HnmYI?9c>tjt79MUdu{K_0Mhdh2C$w#ukza3;zl(afrgO)R zRl0C>bad3NJXfo7-4A7Xg55)FV37JY!Q`o#p5C*=%?TU(N>`i+jd2Ye>>IBqhXw~j zV`Ik{uzcVydMY!G<*8*g!4y$eRyG)}gr{bkUm)PJL+4;*iYQWJqy71Fcw|JgXfD;x zskvlKZmwdezaP5Bz$P;Af0a11u_5E^T)N{q*Ax^V8raa&^Q~gDJmBVi@j@mPUKq(Y zfH`MxtcitBIgJ{6@SbHx{BPbbUCXYpwzsdGwcw5IFrTbcb)mU=^Bs>C+?~Q^-MrTd z3eRL5f_l4@vXqE!k5j#5Y(^AQLF>78?HV&uT}|z8R~O@ct=v=*c@SoN?50sm`xJGA z9vS!-x&WRDiF*b9DmmU-Hx6epOvS7mYD$-&NKxJDqR zF)*j3pqb^aOc4|FSNNNRq9QRv*oytuNbge@7lE-7tL6n}s)CqR^3*OzgdSP*(Z!e* zj?h}2wQ};n=*+^#iyW=0c|KWe+J+8NG{jrx4?aFPFn&Ztr~(?F+S+p7e;K?UuBKR| z){&`N@G!Q6j*+p1spd6dTxh86_ z@QCP^ot)}fXmb5SiP=?!Dy-Z*yz)v)2Ym$oS!^g4VOmG&T~NSv<;oR)LBZbs{uF&3 zOJn2Q>(dGcG5TslL5=WBUfsdTH6!>A~2vYLC;<=;+Tq#?V7!;)R?zEyqh` zRcq9;7NLtbLpK3dQpBVUtr4bvX=x&7C) zn9TuA?z?oyQhb*Vx5mImzLWir*ttfc>%1l3e>B<;6ib<<)3e??i=jzOjO`hge&JHK zLwHYwEq?X4>7>V{NZBu7LgVgN9zJ$=7nYEaK+yxv=vNXFKecN-<9TIMBvW;F=39n` zhqvklWROslpFe*#<|c=Z*9aW$m9+HRliqlX_2InuUg1-7z_W1v9>|cMpPzFKv#Mj; z?2Z;YumIOEE+p4=ZJu55xIq5RQZLa8v#__dsas(l6b_(x-evsd`gH+MRv&oc#hS^x z3cwex)ed32eED*AZmy-Rtsai3hK97O>j^!9vOO(%&?g`vuVIiF`qNcubpbmg6LIB^ z<+D<{l^kaQ6%1?J>2ZrS!mlPxN&!BzQysS5(@s<6_k-Vxx$aMu6_9M z!I4N=lL|TTa8#eji}Q z@JVmQ#>BWv%vF{!bpHPJOJ`%WsCDn`%>80N$1Bfz<`2tg!r+b34QQOWW<%LUCu8{r z0g9YCEFvV$Av86!8B@+Kouj!B?Vg~)kLKg0Z%nQl8W}Z1PhQ9*eQGbz($X?KI*K~k zyN+A}V>9)B5koLgGPSDIToS{=>iguxEC$q;g9~dtg?B`3WpazbcliS_y1MtEOe?pWC<4iOzQ7YnlaBt=87~y! zke-%?V%WEC-TDqcK2~gz38Ng6CQ&P_f}WJu#^*w>(i3INukl zF+U9S^>y-8(6$Z^(x6HG8ZZ@D`$(uMsi{c?;>|**5R#6a9Y0>yNHJivySuhEf+d>Q zf*e`7i_cNHbrqgpdkjxNdZH(Ja?h(bZ*F?a6P?ZY(Quj#vXZc=JvT97S{uq?1XaUk zuIUj7IcT7OxU6QRI|H$hkd&M)8q%6uh|=;Eo^Yb}T#iOgIcf4Wc)rK0CZIa0s~Ks5ybAgYMqKNFaP``HPwSMKfG@K7sMC(8t#4HOz?6zk$L=}nQ+I`lZ4 z@KehYM-0fndGkIwIa&T$h1;R!RdjSRkT}rJPY;*s3B;bK_c{D}@y67Y8A=*>Sy)6w zBXH~E*#Hjg>({U9bj0ukQVQJ4Qj&s40f0!q?gPg9WEF)^oi{KJxK1H!SL`ZZUtdSD z=FZMwJPO|F+1aPi$wy!SyOcj0INmUf$vd&OkTXJadArSo=?=*=MXQ@^R6ocCH=v`{%uV4Pq+Yp9b0O5^` z)bjK5N9Tk2wb?(5U2KQ2sulX9&LI4H@nFgpq}S!CW3z*((`brBnDb(^k@LZbk!pb< zp3{UwRGc;NG|TlnTZUq2fKO>?P}Avk#@qP(&e>UAr5r683k!?b{GZSKb{dG}R1AJv zDk><{|5ijiU}xV9-+-S4k_tMk-6Wj`%$@r}o@e}kO!T0PQN8Y%0&u0a$Nm6u&gFoz zHa#zIq)aStp7{tTqCx)R;=-aSS~E}gg`Xdd;8>35J_`&h=YA>r4x~kutmLW`b)e8t6T72kIonVx5231qDFyz(g$WjF{=-%Td11!L z%ZJL(p&B{p(rBR4vT9cYR1D=cc=YIzbPP8R07)pRu`DLgdEbF>7SBtPLip6)fJHf* zj+XZQ)|U0z`FTVQoI>^e&mvI)0k{JL1Ed}O4^dj_cn;8(rJKKXO0cA8HkJ+#GL#-uEjz+Ykgn zZf@>e=WSh($W$sE*#J&$WFM9-B`B270mw)iFwHW}4*2q=(=85dvwV$x(hVsBP1m#d zCN8`=(DkiePr2{np{R7)>FKE*v`-x?D+2GTDv`r+I|9_5UX+oOGg@ekuzmQEaA>@2 zfL+Ijj4gY@@W?aLG8N!K+t zcmh49MeQ)Pp)u^jXbX#rGatZGN$+ENvN?5m;Us!_X4e!#(?#Vp{sfM9M9erex80TW z#2t`ee+-z8SulGp#XCW-v8m_wvP4-6`ljD-Rj_HFQ!q=J#pu{HLZMGs@)}`bVWAo) zG@`L^;@h`TV*CQusW}KNQ5Q;2{SoZ<=x$rl(3~_hzXRb9`f3VAqeB`TMnLguL5jjK z3aQ<V*kvIc-Acd zjn-+hJ%&K+%(|kE_CkLK=9f*nA)ZGfPWD40!>BPtV3>dr(iSnG+~}AHZ2N zP`uw~;kFkjjnk+i!_>ROkc%YX1sO#NwDOY^*VVN(m^aBke_o-Wpa6t9!BDXD@%HW8 zTRksn-e_q}rmb&QAPwjLek0XvhO7RucPbbg6-8>d)LE568wmfkfc-KdNdO9kgY<0e z=-32@!Q?}hd7S>0Zx8}5_lVSk z*uM4jq|gX_Y1E}gX z?d;skwwmIK-gM{(O+{7pJ%+Wtb!~|{AALa$XyHG8c&9aqzhx$t?d-gEbUHV^?BA&S zGe7AAppaCGFC`@eT9aC+>*tNl$o`!KRzg#gIQTZ5?ChoHa0TlDitxkj2fhXB3E(Od%j%;L z-6v13P-Wb)qN*sVqUA_RP9_ucboaB;g0}^SKOGD-l>D6^7Jf*yH;QW!g4Hk~fi6kHB?>i}hfBtIsHg(> zJ-hDW=9WBQ8iC{n1S;de)4@Ol4o+!#IkQLTOMHsXlC>eGCn;fJM5y{HgXd8G`?^n$ z|Chd^(QHdgr6-ke_danYZh(0L;^IKH(%ZM)P~1&}B+XoFR#d!QptUQj2hk|1Og6BG9j47^cS4`|V!wN-P%!oYZBGuwcVkqugD;cpX*g_V^S zm=|Cg)uePeIyo&Jj9I0FS}T)V?s4h}z6OU{vN<3zfG;Z5?gAJX7^HC}Wo6Nz0`9q1 z?YMw6!~6)mCGTnd$AJY#oA2W$Lg%Ej=LVbP?77E zRUBqx{ia2|Ptz555=7jqogaZ9f6W0I3>xLZM0pDiisX*BU$K7i0s|&E3XE216{l%+PXXa~59OvsV- z-Me=i?8?1y9enAN2Kh^UM<&n4!u9gQEWO<$8WW!AOml7%wj`xJKWA za>20RaDMi{clkv0*gQ|ZUR_&TE+8XS!IXJ-cL#fQvZ{X9YN9MzifSly_7*7mfofR{ z_eJ7MOZnaD(V%0}S|*^C79FdDcmd7|igf_s!o~CkZ!_bKwXN+R7*b%g0@4E%Up{N` z`}c3%&K~1~{KA3pAfQ2@uhVK*{Kbwr7=Sj19T=PxLL0hWC?t>0E<hTKCvLzSm*9(uiGfn`%-NYgJ3HIBXf!~S8!#YMnSg*j0X_i#;3CA(*KgmR z=0>S_p z$OLUXW`^_reV_jRx2aykBj)Dj-KIt0C7+$00iCa_uSbU~fKDq|MSy70*X>J_u{asI z3eNOHw}bTqA43JE-GD@foJV-CzYTqFI-5;*VbHm`SzN7D$5IOzV2!g$sRvOo0*xhM@~|KNo+5u89PrO&&I_3(j+GF2d3j+H zP)mbrBNU#ehHW!<9S5&YtXBKDlbwZap+7^Re{c{&Bu8^=ISNjOl!Hr&9wgqw!;{Z_ zeK`$c9IVw<-$;N_!%?+$m6rDBomioSy2A+N%kI=*Gka+{YR~zbz}S|>bi5V+u)}?M zYF~$T1m_NlF&GjTj$FK0n3%U9SzvF^4a47Q>*O}V=6UC}Zi?Tcby^DzL>I^+)y}c8*pI>3pMJ4MW9AM!bu(kPSI->mA{)46~~Pno=*}*J?5_y zB?X+c#ac1vs1pek8Ai|uS`)e{;Qx@%IBFiVQfD*bCjY!aR-c#I?l@$|8+&|Mfpl~h zlH};7UlhMhNa&rD!% zM<{`d8XL}fzV*ygE!UM)gk93`s4lbyaLys*a)W|55MC@F$h4`cY4|Z94QLc|b0K1j zAOPw2|9T_or4LH44YFNTl4v=1tFNBU0cul@g9G%Oh)qf_*K@qzT_?e4{$GL(U)NmR zpm{VaJQYAPdA6(Ve0-RrVK0QT=1-}ef3#5!!IUB=Cx3Y7hUABrEjf2lPdg8-l&5|> zl1+Uyd}Mg|A>>#fSY+RERok1R`smMZCR@bczpp@mnb_p4SX26@a-#hDo#v?$j)mF$C<(zC=+t|GO`;~TYbszx7LY`3< zHNt``S{j6yTcCKu7@e0SN@BsPKb-11(muNyA!#shYjFg$r46S^6Q}4GxFn(Al}uf1 zE5+u^Q1EW;Ahl-}NVR7>YA!cEBxbEntE};Tf!|ncKH30${f?mUNz_f5qjY~eOWzz9m6Y5 zmyQ3TgeZ!c?C)=A3ZlsP^{dpd`Bj#Z9Wc`bryJcgo<}p4bNINlmeQ9;r&AJAQs_Ub zvxEP9xC+6pv3RGI%Uc49p4WrK_4cD|j|lr1h`TNqHqmx{*E!9n0t*KG;DdU+a84Hy$_>cw!_ zhtt*v)vf3m7}6#dl$0yG4n{eIsLv9JuUR2fTT2va3e_ozi0YwNj7f8_vg$)3j5Les zSO9#k#F9ic1qFqQdT|st$zaNe<9DR=o}brOV%6@9?1&xn7>Y)g9=!I z{=6DvK9;A~{7P2Vl1t>?y$?OfFH2&_O|;KlBg3m~R_c(dPqvIwbBHWy(J*mNnkQ@S!xs%YSC^_fn z*Vs8z;!p^<=;}1Ji0ky`<|IP}XI$rvD$jFQaOwO50xEvX{edBP1d8t{w}+#x?JU6E z=Ela<4o;U(HzwKK!wm~&u8XR^C@A~vbbV>Z@UQEK^`tD&s5Q&xf`yJi05Rc5n8d*i z0X63u28QOqBj+IT`(Rt^KnBU;lb`7Vx@>|+P8)|vF81x%~90IO;#z{#@qn1WeJV+8@ z$>>MO>te_D12$!kLR>zE$WO^n7y|oV<}I29+zQY%l0(~bAoz`%8o5*y`vSb?)109a zuMZ=`55da@7M>JP=5%YM+%mG1&>k(}JGrIA|gaU!^GZf=+aWVQ_6d&-MGeG?B2Cxl`v$2Cs& zHUiYLlv-hOp`s6#!H5xauxvgB?%AIZ+_BcQh=n4Dd=KN7prD1z3*@CrrL#>1Sr9-j z2M1JmG%O(@82Imf@j#ue>AKR=Qa}lU^y>Bw4m`2E7N&i1sOV6c-BN;6S+pnzSN8(x zTb?L+qwJ*Y)X+2%5xhk~HCA!*j{Q3wLELJ4Dy)WYnJFmW${JfxTIbq`XZ6k+&wcP2 zB{QOM(&*nTHFL*t@)tuIfhQG3s^xs8U?fx8%z~F1!bh>O>gnTwMbxJde)t9+2E;Y& zdKQ3PqD&cxLVQC=`?D{Hb&;^-ybQd9!a3b5_N6QKRBHdR^pYSdgmmA~n#;~-Mt)Du z08x*V2YrgQpV!02IrSe@c#g}s*LX^fTt>1LsUb%UjA{-bh1l0uAT#p!U40J1vjGU} z`fzc)h}eZcxbe!!$=(}kaG;jIfOQV>Wt3xyP|CW;#T~vVcRz+47d#<-BO_2z$2T9Dgn(E8gV}Jl;dA^gZ(Q1JIXi0i z@6m^yv&DIOIC!|pr_p|&-pt&jQmd}cruL}XcXF4I!0Dho5(6XN zsm`FawN(Y)GaBSgA*nJAgcu+KN=R{bh87wODmJ8Ve+{fUo+=s`8it04_ZyykrG4ff z99+$Qi4F!5nXog8Eak;in+>sp@AWw_(7YfJ^pDh3jIfsHkh%iB2fPE2uTT+Th@(NG zT4S#?v14E$0utgxq@+%hGe3O^HDBr}B;Tqn_6)hbxU?Iao;W+U%vy!8Q5gEiO;$1{d6bRKBjR5YR#4hOG={#RjS}HOR~sa6~>} zlfEd?$Ek4KyvI-H+uq(cRW+4aYHheU9!FV@g~h^VHha3a5aM;rwVjt9EBeyccc-!U zc__g;W3kbV(}PWe+S>P|H+0;5KW(qB<;<l2{8zWyz%M~5 zCjRVq&`L0!>gwvQVPbMZ>Kw??-}ZJoz+ym=@w1Zi@_Lwa+d4ayOuZnF+HK5@68!+r zKY3Coz<19Se&|<5MxD+>9V4UkYkhD8hSp0V)y(w7%xrozOPvq2kIWNvh;QzKA)vz< z4L|*}tW1@uM1_@9TFZeJOtd`+7{gN`q^1rAQzw?2){;}~YzrL(i9b*wp!?ykPs!QY zan*QjC~hVQfAsgKK3i&V8|SsX4H}Sp^-+Q2&>SSpF0Y_Nl%AGdG$Fj+FC5Kz>nm+5 z;VW&gW8x68lRKyK_=Oa;2iSP`@9$oN(il#7M1}92!&l{cxHx%=Tsq3TVM!)R2^NFE z2;?1&;$TbQyy)xee@#r314Rx}b)=d@BkvPO%$xK%<4YF% zK-`D)eQ#gi8^i#F5R!U~!-ukvR1nLyw|}0cq-SL02l8=mzglw71DtYh@*u_T4Ec=k zAN{&{-Q(k#+%k}J1Mg5xO*RAp{KVNXPKJmfOk!keaEKu+W5WAj{KtiS(WVvH0^p1? z2n$n!ZMYJ~&>*uJMj-Z7Te&NRDXeE`t!Hf?UWRC|NQv_nEFX8=wC;$mEZgM>6j=dqu*gfyEuB=0~mF;F%lv<1r^VY?HR4`?s(nXO|HC`X1wB0}RU0#&n zZu+eXx^+7(Pd}+vtwycRY#|suJGyQkT zNHg@w@wqS)gY8NyDyr4#O4~rJkny(gV0CM2@3k#y;ZhOb

$n^^hDdkV@nebe>V+U+!Boes}tF#4JHiLvh}O3;dj{)UqKt9M^Q4qPBJygr`RLkPz=)dXGh&yP7jLJnXNPqx|U>$SU&rQhcDcVzd}x zwPo@7WL?>Hxg_emR+#m~((LtE%qs-XOeEM`(5w0R`5$;eIFVmSNPVF4dxs&)&V@ZM zgftg~Rwl|LP&{gVozq96m-4q-Qpyp zzYsHUHiz^ws`DcKYGXK@q9jyowe%Cqv*l-DvaqNVDm%V7#$!hI$d(T1sM2m4rRekL zuYM_dS6CrM6Fe>iptz7;4{3brY8!Bza!)F(#wJ46EbgR?j;g^NfzThA8JZ8VUcfK_ z7{RJu9B5T@UMz)K3oLwiSY@;7Pz$mEUtn)X!~LI1sir(V*tpNa(qd6`aRcu>dfMp@1>v%= z5LM=OVn2Jjuh-WEHz)t&*WA}huHthuGd7dRybw~aRx8hav%wto=BX)Hgv{}lx)sJ< z2F@>r$={A~Nxv4Fk#ZSznL2q~xwyEoK9pF6b%37hIdNFIk@<+m1bAb_o`Gv)sQ#mv* zLnvu^c!(o{@Qz(vAi%eaSqm7j^0Ll7)NwNfiBYbfLM7HTXSc-ZIL;sHxG_5va`}7W znECf_Yyw=p2DbWTVv4p;4r>osSy#6_HROtF&^%^OY^Tr(P&IBz!58f9?Y-MMCfJ~5 zX7)2OGLqdTyGZm-WJBP;&q-R_efvc$NL)m8bq~xYFzE5!3Ax zb9>z;!Lq^2g8aUt=LJZ`{8!qJgcl_>+l@w2DtWH-V_}XxEX*-KHYct)AAcl$6x(rj zo_L8i4cyqMd%8|K){~Dz^iY_Wnb_)Lj*T-U5%o7qL1LGm!Dh}pauvw7%!a}dL-A_# ztrD-wMz2Y?L{OYI6CTCGPC@1IQVrUJH6*KA7719R6&0s1mWuN7>R{CpGN99-Q2-_% zs*uPlqJQ}CYi;dgvRjh>RuU>SVHTFbEhso@y55=%!HG}5em#Kgq%`RQ6;^jhkqDnG zC);9R)3UcAwLCwAV)rdNOh(1{PRXR;hlDC!YX=8|%j$D*w0aRvRtFmsShe2o(H}6e zr~UZx^=mgFY$RbEV^9v)Bzm5ohQ)+_Xz71ib8+t7*OCcx85*4H^|6W&XqsOmq9Mc1 z^X<*Iv&D>bm9kli{}*;W??a-~xU*1s(zJ#KoLXun@>X93>Y)9BMa8;jpCE#=y?cn_ z*D^sy{||mGxIbe8tQa2g{NlWITkbmmx0(+*e8H6L=m>g`j@2r`^bZ0s-vHbPA!{`n zG<9`|sty{QHVE?bTeq>5e2s`m)lVu`VFlG5zJ7rce=wf*Xbxa`iTbnX!!6Z%I8Vm_d{qB7@il$6A)suLI!U0Z(>~c=e>B39lExf zi9istGNN?f}Wyg@B-66FmQe6{0k^du>F*~N}~iDT96ThJZ#M> zPC58(q7jmfXBd|l)Ae#_Uu@)@~BMGNpwTHTFmE;_>Gc48S5ujzF95bSo}C$JkE z&a0KHXMXYV<%2d67!YW4O(CdZ%TIMD5$7v1UI5pliaJXPkqK*%zDru}??D@UbU|8^ zJ39Fj9cz0fv}1i~Z34W1gNPVV`eAmRq$FN<{W?*RYN(p~|V#~=pOsjZoMi0rp2 z!YbE2KLRS<<00*WcjqVg9DT$6{?%GmJZv0%QB%U_Z~Y=K=L;Igc}$Xas-NU6Rb8I> zHGA!|q#6o}umu6zKwF=Tm45rS|HV*jVw97_sb`hJH5}(q?@(J1?Qs2UIf5b{Y-s|8h%& z`Ng)!iM+b9l>7QhS{E>E@YzD$4c(_b+zz$4y*F->Hx1GhYwZg1^~Dn(7UU%ltN8znn^!VQAV*s*2;`tgIYc@c71QP((wb%fh{If{M3eyUvvmSE# zF|5JC79^?k__Zxm01*ZnZ-<-M#h!xe%R{<*Nm!Qx+tejNlrJC@n87l`DznLv-a#B*X2eOJ%)ZQPORMOgG-sQomz)zn(u{&92446Xb6v%y)4QksT zn8O?J;i1Y@WcpU@AqQsPdIEXBr!dZ74Xm{gjC z_&)Y@_v1$n+f&NZK6x-MRTt=|hB%2!Wwj?)||9BA_d56nBK{vM?ZKznXUgA14j9?uJJr z&L>tAuxJe28?}E6p)P>1y$hXeuxAXm{r2`Y+1UAqo`VJOutCXxLBw~+ zW8qWgTJEIDXOwOfk7YYgKu~uMY8N)$-b9m-k)hJ4>=jdyvgvxAaRTl~Gk%hUnL}&G zU0xUU$swYWA=Dz-#<8$M+X46_^W=PUswNSn>sc?ubtwoq-#(o5qA9sS^9dA1(BhpQ zpk2%*UfTD#OhR-MXRb*>O=&O<^Jp@<;3Bc=6hGv*G=9{?+Wcl-)mP*d_#Si zu^;pj8$7AaHq#p#I6VA~SXOg(P=3+lG?aa+ru615VcxwD*u5#&unEhmMECWT@8iub2OG^eK_gj+gq#X@*-dE(x$)M&ykWEx-ooAnbpOrmk07umxY}O% zVSRadYPGa8T+086A^o9i`yI!$4SnmKX@%t12`(qd_RFzkBjF~mntfiraeIaI__o#G z%$!_YBPVSf?FmApu>H+D1j$1KuwYPWUfAN+3Qx>hB-sQq5D9ef69El7Dar}L@s=|J z{Li+CM=gAmxq30W;RtK)twxC+i|u=v53YmnuHkjQZ=|X87otn3Jrzg+_xAR_R*V4u zQp@YyA-n284H(5qY{h#>e{li%cSKHZ#4TC#gZ$?QL&kq9#R)kH1pDWJ@#y*a5M^!g zAjYVRkFDL@nBr8y^k>FV4WnLSqPRIvIpQe7E}4??_ETh4b5fL+z3Rb1td$KN7m?q@ zpzg@3BI5gH$;;@=?t05J?`ggyb$8yz5zf=Of_{tW$w#<(OX%%XVMj1L+y5X zx@`C##O5vtkrPO%SG42e;fWw(ml+e?|G@jwXmyv47mR={k+S}gjT@luN=I{II5|00 zA1_k;SZSVu#gmN8Ono?oH6}GET@(vTK|^CbP8oJhm6Rfd-@%H}@TO)3zw`9niz^op z!6O5Whl>I2Y-=t=aF2Y2k@HyFV?ohQWY^k|-I@$vSVCS% z1+vsHK~Pp%%L@;ZSBk)87D}cH zT{SvXQ-lJqbwM$Z=tPXN>wD8B(V$tw=SZ~23k?atr{m)%8HEwBh6P}hGY(E}POa&x8(6DWxU*2~WJ>OF>>dk_23DmhEt7K9Vgj>G@QYxwq zPB}$IxcWR5)*T-y?4B9qSQ?3x(7vHv0CHkbpQ`O1qmW-+OA8e);rY4C!v4^3;X8D6 zIfZJrx$Y;M)pPjSupJ>o!MBrMu~by9rTPNH!}ZXxoj&=Mw50ApYsCB#Y2m4}U;!Mx zBmbL6r&zg$`ve-WiQ)UvuMJBYAm3NCqjr!EHCHFD!m9H0#>T{^2R|h~YApfw2{oIP zFhGsBTa<I1(8wuo3Y%lnTf;dwfPcNzUi4-*t}QzvWKQ-(mJgNrt7aZ-wT z#%VDQ^xlR?^+s`kY)_Cf*wLcEx(642&Dp_YoB6-^3s0Okf`;TODy{6T1}(-POTV=b zaL`Li8XLD<ElQ-kgb3e)FbowlM&;Ii#_AGcmZw*!)d; zX;RXk)ChHK+EKmfAMZ6dS7D#x7B(KPnX-;4_nofa2S-6enq6NOwBrVBINT3ZF&Xxj zUKuj1@mbJ4;tlpEsxn!;+@}Si^kk`=KxFG~o2dHg+2tA5<@LvlKkJ3}YJFg(V+%qO zs4vgXGt%SMMBuAmzgt-@U8DV|oxfVBm&mKCu4oL7)?cfbn9NqPhf;fLKmKCu>bM%O zbiC|PcKQl*5R^PnSt%Sv1AAXgs80~elziau349xr))dLA`Uik58hAU?%`{$gTwJIR zN=eLEC|L9x&^M&g^Ag8&a4!4vxfM%MXAW!$(-8Yz#lKmAnq-P=jGX zSX^&^s_SX-28d!;WcH^^tj1(lzrKY{W7xKks2TPgw?kIBZy(#9G&G=fb0sledR(HT zwO7r!uJfuZyuR*UZB7=#;e5+mV5q}^p;K}>@o(JPBBZj(c-{G7D0CCiFDz;dg;zXz zb9+(Ci%MW*=o=4?2-x~xsvPL(hP?w9JGr^ z2CKIx&ko1NUHdxV!+pLrN{KFh{MeQJoB8rI$Ls1=Z_*2h(xB&>Ey-%ZY8UFSBfP4l zZEPMD{U6HSJD%(QeII^nAY^5=C`v;K7 z+1aCP?&H*~?<=KUJa=lML(<2;Vzw9i@{{OO`wHQmzOG7}=R z5h`~!kZ2DKyKWVA9X}jG!%U;u>oNa@F=WAh_T%#n4to0D?}G)-?H{hiHUS*1 zWMxskdesNa(_aH$9z@>Ula`QxrHe*+zfF%g2k=nDLYSGDud(4#ufJX<<*`If%IUG$owGh8Ql)Rbb=vIOyEbXTx(OmilDIF1f8IgO zBD}Tg(58X0;cxF!vg_U*`UZWFb8l4f?Aq7hi<0k>8hrN4IZu_-X%z<6@p-A~>Ry5z z;7Rn;kPyeJiyLryI$_o_JzK+{^M1-bWeF-qtQzQiN}I?h{!=?gTvTt?hI6~sM=C0mlNl_QnbasTGFzBHVq zU;GAI{=4%d_mtvea8yDDn%69N6RffB^LHN5Z!%c=XnbS5W?!78{>}ObuSV0=OM^q= z(VwE;g+4dBz&Q*rg<*pTMIQI;mj?7C52+?;7{&FRreh2f!_-nTbj+>j( zoI6___G(%v1=#Mfb_w4lDY560ktb|4XuUW85$>r5GC{af5lUOMCP2`x8ykKLy0~%8 z%RZcQ+fR0#`gMLV7OMg2t;g9X(W2ER5eZQK4%URkw2q~BP9(P^($LThpWK{z7TEqe+bec)OSWaF!tba1GU-N|FCsMG;6EaN6iB< ze0yty%0uUsFNmAR-2qk!K}~e@Cf`b%pE4B4J0N_54aP(0E~p7RvMnXURx;?L<5p@*z{CVOl`zBuc7Z5^z8MMZVLjQ zB@$Q~Pw(ve^>akqwiiMKvU*--;&19j3Y} z)B4JRMOxbGBHKjO9xCl74#PGy-6C7-?}74kLr4(xEv&))tdIV^-g;)_^xz@G;mmYl z8M!EAlaIkl(Qla2Rb>K?CU@;`XM4I41t=`h;Q4eb_XC|c040E(PpLtPsc>}DXhYUB z-Svg($>NQ`{MTA>`nLAI#x8W@!};yD?~@V?SL%dWBxfGj(pY7j9j+h#ZI*KVb3@c> z?%nMkZj1Xll@sp$Ow{DJD4q}*&g{N^=*R~dLDfc`Id6Th&erRSSWXrrE76c@C zYy8yMa15BdLfu>kpeZ>dwElz#gn{^p3n|UFx53v5tttbUJfH_0fjfrh8bN+pobCfY zQ1g&DI1u1IKpQ?k(HR1fb533WT+#rFM0;$k)5xfP`eg_XtL){=j8K%{glQds715ge znQ`ckA^t1FHSCNxjwsLBSsPAHE`XJihjx0dpndQ0df)+v+s6k0Uo&b4Jv@eX4sT2q zt~m&01c=37q1b-2vrhQYyVz%;_I|J4o4%0=^3R7C5Y{g(`e1;x39A(M9%zMIVMH*xBdkzq^?$1<7OQ@te~V zIX%xJA}*oL`Xrh_-_cHGU|@hX+YVSXsLFh38T;e6^Y|mY+vj0nHQ?OBxF$X+i6*1n zFChWFKY9?wfj|RG34*s< z*1jygh`ECu?HOnli{kSR*EFtOdmMcjLbB-}fdS2lV)BtojIDg}kb9yS_Pr5!gv z&6R4g_@a&EpS6vb%0li!B>n7KvnDEILEry`dn_rZl5A2>mDTF>UB zcZbdjRRJ(PubL+hS;K;iplfB7godmzCNa(PyEL%Pq{(@BB$nCs2fM9wXP^I8U}W zQKZI89uvJgf4Il>4|65N6uSk(+4oD;vHi!#agzabi081N2Kn7&ZF@N&?mQ$%*?1&i zuattlJ@ZfQCb5-I>P!#+YG~axEZszYfcribxD0h#m1uCDh40V4zE&Hln;APj$+kTf z7SYQ6^|7Ws?@*CE`$KJug5anxM-~^Cv#-7szd?xu?e*{5Ng89fII>f8i|dT@KSn=8 zi2`VPY2y9nei+gsb67VwYv#DD&AY9f zt8Y7tE@iCAcz;!xW!cS~jHzC-@5ld&Z4|9UE`+tzw^g@U_k)o5D=hU>TxRT#B~toQ zPo2~#x_ z$IRTX*4~d__lC%Ueiuu}@Ab0sGPY;oHs8xVr_P;Q&60075P;C9akAeRSs;W+5b_Z& zlh6YXZvID}=5)@)H5PaHS!h8BdR$*$z?U21<`W%tAOBla4NZ6l z?rD#@)0|o~vwSOL!GE$g-+#By<0lsB7AUR4I%^AWl5==0Kei1|`*Ne~xX4SN&*vnS z>>d)>|C!++i9}TF8A3NkRJbpNUi|$uY76<+i05x>O+^c4(ogC+O7Xn(ux_btlzg-> zogPD@c;0xGS<}vR<=HPoc9(HMKpKz!!w=2vUg^y@-txr z&ljf|aoyrUnZyPa{m6Ob$mo5iKq7gz zO^NJ`uCcij$%afm^jdZ9N`Qn@|3nTE?|`VCE~x9=Ot%({l9OBM^?;Df;ln-)(yLHB zT^(LU1~1f~FiKN!Po);lgi8MmC1HB+>KK z@F4Zc_;~Zr?VoGKj*H7Rsp!l~n}oitS8$V)vd^;Tn}fSs-tZ?EkEZo+_V$IVzY~e; z_mB<}yo+k}{LCdfS`o|6-;Ar9<|n&%gMD`M+pBxt-e-N?M6Mny+=$!|t)?I>nBX|X zO51!-H|x6k?sV~8=^r%4e~)_jd`Nhrn^mxS<(7U-ZbE{nn)h5J>h7}?e>n_5s~G>8 z<@@XzBj1An`HgF<`!#i?x{dX2r0Ugml-0;=6w0mamNeMce4?2~X%|U2YZ?7VplMT? zoyncK_s}XJTt*37JWj9d@;-y+1a-;3ZAQrL^`03L9CDM<=nMDlW3RI&@@&Y(#IUP0 z9M-yT$Kbb65OgYMS|jJk9m%g934v(|{+>@XthrQJ>kh{4e3fY)()Kxh+(ffG@JHKM zRi>>GhWozQ(pXQ*b=oIsWCeK*CC>}bm$&lfHrtOU_{)x|{+`IP;ym1XRJ?3nknIG! z`D8)G>$Ay4dkSt>zyEXK)b2+GEs3>%cJHzOA#qz(g;cqu#lCCDU4PXS#{CtyZVB&V zk}vJEGRV^6_aQmK2ZCm=c&0pOqT^4Oz;)}Hii-Kc=5qey?&G51YA*J!ZDi(00{6Up z`3jPM$OKW@u z2-4KqQi5HT??MVU5&O()yfN^yx7g^1lR^1clY{)dB~|vPJwayx+tX}@MbcS)3ZSU3EYp$xUQ}N4N9yr%J=U1&|mfGuI zwvwA(z3Gv&ve}$H%TV_gTIQIl9qOtxKNhXDKOaA{>z8)-O}XabV|>n3C*OUDi>*{u z&J=uHAg5nepZsUb2^kIEp|OuA8?K*sy^XoP)joT8#vr9FuN?zr8PS^jjq zpo%enl->zLP4idHM|bUgDUx%G3S4yIieJMmn@{?`;ceExY|L}ZHjGi5&$z{PlG?*} zOJLl<0p@JZNoh$>zh!^F##-tA^aRoOwrWQ46MP-C<1d7Vgu3hw20e1xld#v(pOwtw zy17M0>^#SVk(k`9YMHb|5u4p~(-+nw#SGRTGQ=fnb#vFbiL&35zM=hh#(7DxtGQpk zu*>zaKvfMh=L6B0F^k+c121kALi6t9cP6pfVaaH6#}3<$3cdeHj&)b^=l*tTAJDoB z)RXOXcv(y4-uUjnSV09BJMV}9vJ`tA`UMt3XkSTO&9gD~6)b^K+jw_D2=d_XJd4Elz8s?dV6yH-0g zTOR)={3r8tHcsOu;GJI{Z`^J+EQw9hxa6 zi2uv;qYz9%GiIYr7=3helPuJBr+qK!iP11rlkCj$Z`boWKUQzsD8m?Sd34QsCCX`b zH%cD=FTA9%+s3xFtS_!nlY$w|*zNxwmEA6AZsGXeor34<`a|!kGg(D68AZDyqAevz z3hz|21Wqn8Ea%4?XikhUZ`QIirOGXA0Nx{3Yuf)y^ax9T5m zmNMu$hA-B|G(tgcLyvzxK>`I7shuK;uh`TE70RM#~ZA! zPP})(Cd+@vs{K#?{EVL1(tMLL{70VFhzLm&+_aH6GOPR5=O(FLb`*VIXYu(txj9$7 zw|Kg7>)Ntc)kYv9rfL- z_^X>Ib*f4wg9eP6%c)6SHk&hwC8*>w2S3|GQS&^M^|2d-KqQEwiRg}N$kCgtv_XD;J6XtQ5do2 z9j0{)CKmxYIgVi~YwO0G$ zn4`kKeZoT4lb7H&W^eNxL z7=U~e1rE;|_+O%hspm0P+ccS)M{_x&kL%I#jmEPG=MtKiB9-uF5<6j{%B>WBxZmqP zl`E0rE4C=`Xe2Thbl=NLpSdqOlw@>m;Ks0>DCzIQ!fd=Q?>qqJdf_E*JMq1DBT~9of@Vr_ngHIyzw7Q@FMqUyiRp}*n(+0$KKND}WoTJ9 zjlxyKYNwZ$zCP5ZaWiKW(;@06_agIG@G9?n+h|CbmV|W&=%rRSH8q7AcOZp8iaz)d zjsv*vI*rTf>QKJogTV9QTlC4j_@yT(eW<~&r(b=akW|)LLtUhYDgfL-yj2wN<+rnb zq0xxOg+feYXhRL$JOLq;_mvn3ps;^jYG5^446GFG-f@xLcfH7fBoAjh6hsjH-*lJU zg8s?A(g!ts!$bm-(|P0wU!PN>bwXl7npQH+Zs|Sz{He5Syf^-Bjs0XqV%pO<@Zm~0 znJN4$c#2m7p3K^~x~-i%chSJg=Hu~_+OJi8?dl*_N6Le{X9$H`I4A z+hXlu!q=m+<+A6MEvL6?JTItC{RHiVo?+)U&*s~M)hEnAR6V20 z#L2}KDsKK7zq`JMC~a(PZI*^QEkBFj_3K{w`|NqdC42krT(#x~=R7(DD zkX{q4ZEd;44u<0IXKPz~32Ku+YzvL{CtmBg82J{l(@$4pEG&l zp=qrTnCSeFl7g#Fo9(o`^*%>n^%(=r-et39PjDlo(Q0DWcRdZa^3CM52SoT%%E<&i zFB;(VAZ8%+xUcX62($`~KTHMx>XpfYKoG_pCJ2Y2dYKKvTFs$3T8(L|clkX4QV@B+ zg{#ek^v$qZ!e-NXs%W7NM$qULKo|lua>AN0clIj}{PU+&cG9b>LrhAIcHXI&#P2B;luwcO?3iH%t?)VlB)hFcaH2u*c!Lg;@0 zRd4AlHUZO%Q1Kw$6!fO!hyy_xG$Xepd^QPziAS;;|!C{;puK2_*wwyJTO z_MV!|9aAD?DN$Y&ON%jqS2ONz7YSqb`=a}rsLP#;nQev zV838cHLE1y`@r*Q@&2G7IJ>Zi-N`SHf637V3L8+Y@LIPo9GSe5H3iE{Sgx|baO8$nA+PBXtM zUFTKD1`i{pD2Jt7Q_30`AJaH!ZQ)u)TlC4>n*#ov6SSVbPo6jtmY08;HMNf^!g3C; z6d2{VV`hwRtftInlQsf95^uCRs?qU3zu8(VPS9aFZ>Fevi(a-dG+q*A-U99YVV~nWl z2>nf4DTNq`{`iZ1*)l&Lp>w-&_YX6ONVl@@{FIfG+bqR(*QT}58M|gZV^OYUBQu~? z-;Fu3`AI^;e*}}hh{L2l2B~e7Gzclbb}+uWJ1D%mx+XC=#rf}m@!?}6Y5o&Hwhzrl zz*xma|37|@CN;NQLNCT?j2+9a>a((H)pbnA2$80g9FGlmi=sao(u=*DVOY>QX>^OH zGai=)&)sD563ER>?Zci|zsnl#3cbi3LduQlFC!YOX!11u@>Vw^b3q@>62HJZK$Wub z4?c6*n|5${x8++%is&5akO5FmHg3kc1rTji6;N4I69(J}hf#kjn=*8;l?p!e9a%U@PrzRB&-!4nUGZbKG_wO}~=;2`A9F`qp{pbkXtqQd8c&n$5BYcFjf^?Hzx1MY_&@+3D4D6V1`WrJ*pKAW$&Y{gyldgA=?W5X~lq zdJNM=Jw_sw;nFL)GM-^V;~vHfTMbF99hj(6$a8b`B0@_%?=BzZAEl z{2=EoI^69tRdlBFVt_EdV7izeH;bHW-A)Nu@LI{5_L80MCtrNlwVSAZy6dg&xJ7^|1r?fhO zk*K}phsRQlK4CgZsBf;OdBO)Z#B&K2H;4A`?@OwCj`kRQdw7SYdWwq`U429>x;~>$ z)ETO*sF;pN&j9TK*ezCl875b zyX-@GwU(Y9^xFY7g}W^ag9yn--6+7cl-yrbA@&fCS+$T^&ABUOxvl)=o7pc8qfkY~ z%$%H)gbO5LWZNM8rtN0N48rZ^OT19nPL$sff!dT;Gm8G#eOsGPgn@?u3SVlQ@uYu? ztFb#g$Nf@50(2q0G!D%2Q6LC{vL@P!F$yS{7LxSikB{R4$)8NSQg9#;9tHBgLm!lz zJrXgBD8%A630BS$Qylx$)wS))|IW=BLB!8D_w4y|-Kq>Myvuree420Z-7mtz##W1g zbHTxjb$$i(xFNg=6WD1b2du$xbolY1HwHKXc&p%S@_?R^l;efx3_5!qNj;_!r0IPj zAJ3v60riX0c6Sf6Qy~3Md`yl$4CFogYN}X_Nk$#jECwf`aq_;VFr{JBxRoffLfg>z z=G)qmltenZq-va{HF)*~E+;rNbW|wf<;x*PCVijR)fD_!)800x7uWWCaR8=3I}f+G+OEEJTeJteSw978pj-ZhIC$FCchTHa6oyQL)lrPFQc zlW23BQAC>6W<621|D7Iw_ftoQ5~cObZHTIo{``vOoSmCR3R_{Au48rYMe}X@FAEKK zaPO>@Js+H1r?~5$gK+t&BAW)KU}9G??p!u=IZaetDRV@CZcg2Ny;6?^zfno=$C* zP;@8MQ;ccU`|EgNE8jT*ZvV%Ok4QIT)82Z52aM>2`99qH(mqJ1>7^F$`@{++n)db% zV(1w<(OTPg@t=thlWw!fX+mm?ECm|pf5Oa&OZ9K0MtAHW#;$R5aL|x@-H1Rev$iHL zxtY;Ecr&4O?6JT9!NZ3s{T2FGtsBFS5Wi0@Mk5HHl+kw3}w*jTTb@p2r!XfpA za&_t>nHAXR@IKqbyosEP)L)S>t zp+}El7`yg5QG2h}_vY}9-NQFvBx@P;bVk-O`;V8p`av1~z|`X9Cy*cnJ&*9}wy3_D zL68kb|IAa;-kg7>7%Jt+2VTNcDl#kyzE7k@fCnIyO2mh4jwV!x;Jxh8)IyBL73I)) zZr-#Bvyk3l_>*UN_?|tq6bM@ohl+AA68t%Mw7%0;!eJdM;h3UhEo%plwNMyNOs`8! zeYEV6KFV`6oUT+!L;H%Nt~oCfu`ausS}b#iQPN=viGo^V5UY)$lcU)qIvu^Nz2351 z`XB1SbjxnbG`CI%0AC+04{pdA%Wp;A@q#HtU?o2feEzphqXV31Rann#DM6O&;QV9{ z0|VJ9*x8|C+DQs|`SMw0WDjSem7{?G8w_Y{$xFace-Rwaz{FGme=k*aW_W1hg@SJ< z6sH0AM#7t63bhaDg@##LV9V3&5%$qjEkbTYW>7zAhY%@h)k}hVi_d3DzM}cdNHhw@ zSHyLx1xi2~ISuN)e0y+C%pb2cSGT3fFv~^f?z`}?fFQi0veNp?NTZsDnt@{*dO3sB zQRUDyK`$a_ph7>X;J94*_qi`D{34%CKRFI2B zHoJm?ibU@YP?Fsvd*JO2F-<*j(`bT%RCqJjZQNI^B4sDt1CNQpmmpnIYtp#EE4jvaAh0utgsHUcF&%MqScG5f=v4KM~m76pA zkelK1IZab+Z&qcJ>+NliLF#%tIy(EI-y5A974@vngO`NZXdpkR7SmNz8`v|fjbKRQ zSzcDA!_JK-vZMhkMlh3*37{=10Msv~pzkgyC&hQ#%TgjFc1l(J=fFV(czi{_K8%ioG2s|GI7S7s*lH{mKX zeo?VMky-zluoV(Bzx^V70Uy6#20NB`>E3iEaB_uD>q}qEqo{ZYG1@<(lg`iyN9(^c zGy8>wf4ozehUF5*v#_4Ac|puf7-`Rlx7AWthd|*7f>3rgi|N;tJuEB=*4F7ndoACu z|KUTd3DPAxk*UHJJ`mh%6M}ip_e2Dg_w|khan1Yt8~+$l6w`C+3LX8F_Vy_;$Z#hq z(qeV$3l`S3Vs|OLALpE0%?JzEv}jcyzuyl*qIw_uN>krGRJ|uPI_lFKa47F@+8ioy zixHgu9UD%|w~%cF2QxG6mErqW`YSo=ksro7%A)jKS(u{p#0iXakIyI`MjgMO2_zPP zl#c+6B*-)F02knAu0w~sk+Z6)6SkvJZ(W$?c`%|yW~Iu&9Lx+3Mwh>WU4Gol9-#W5 zfy1l+EYv^;D=Xi>XH(v_d|BnGvc0-`h^zj@c26?QXn0&{I4a^Xcf6S{TPqny$HUe8 zn?S$AKj@X?@b!YlLrrIm^OU3Lp0-#K0~3XFGoQqR$QQ_ z{tm@5qMV7M^>KcFOB!-HTxL9Nk5~lm7j>4m#X^oHJBK7jAB2Rn*ZNIP(kNZJZeSzy zL?J4=@W|#&oU+z@;URntSgk;hB;u9%SfdV8f%(Qq27fy1oV!ncUQ|~{yxc}Z1J)`G zS_sc*C|AHe6y;#UMcTD57a!WFZB5Y~J9h6^UtcMl>%x;eanp)wYD_WwdoQKx1yvk6 zFkQN`VSV28{b4E~QSzz;ziJ@XJ36R{v)~5H2-Il9)6f5E!CwY~gKNHBEZwDu^N&Z` zpTn)0)z;0;cg4v-WgpqS7}Lya9}1CW#(8RBcSSC&Dul zfnOnpU@T^8CC@3C=5)3fd&nSjn{XgY1D^V&(DKv$QKzc1!PL>`0>sLA>a-M9nA69sE$dc%~| z;ih_NE}4@d@T=~UlBA@ipqdPe%n7&&T zK$nDLteDGu_#TpSxbS08LlEAGBX75Kc)RToJ=319gT)-L$5I`U(itPo?Q)%~?VdgU zHsR=KLDLc5BT;Z3&ouAcMmu`Cr>%|UbV^7L>-H9f?A2N7Mi>hnOD5n{zIhUSFZ1O| zUF?V~GE-zaE|&9$nr>&AtaX4n45h#^7T=76S?!)=o9oOJZ=2mScd1i!$=M;R(*~pu z?uU_zZ#MDs^Mgm-fDuAqJ*9V!BVs;nD3-z3IDTCinu_Au`Bpj0xQ2yW`Arw^W^wQG z)60|HIOxTOgM(i%GO<+v*=+OZ^*o*`p8-poH%`B{5x)75zZDF!5%a}Z&y#fEkc8*MNm9_P@ z<0*Mn41w)+-n)Tuuqb?OLQOK-wGK$YOThliKr&Jl3V(W5G1!72?cEAB1z8#<*m*st z08V>a$5j&X7S5f&%!TaRdvo5t?M-?nC=OOFzkZ#rV|R11^k$JTfBaS;Sh|y;$iE&` zY^n0wTVvm(Gs+WtaSnxw-WupsORJ!ygZK1?B@$@6+*K#hh97bxufdfBPsw#@FobvZ0r3f_$1u#G7FmR{i+=LS zu=LJ%$KK-Z`Itx?qE;mbpY9PfJd>zgv3u99MNpa}$e@)!fwWQDlFSM~%-ZswemK$A zIm3!iKE4TKN+~ZWDmp{#p{w0|qv5`?_ZH^KICUZ*3jNKZ*~7!b z)1K}ov32Vb>qTF^$@5Yn9c$gmO|W(sWb~jvr;b#-TX3u3pJq)qc^Y->nfCM$*gbQ9CDVr20&X9E69;N5BOstqBo(4 zL;xI47vV1+dTmBHo2rB?()&XJhR9=r*W055V!me5_`kkv5(Hts6i`srv)ty5tM zP2`c{pVGvZyWqW`Lpu+MF(SZ>Hd!nN*7YJ$*@y2aCdZ=S1g+@L&EpU^#ZLmOQc+X0J00=dXXNnL3yL33 z5<`rSC*Ko&}bKwXFkwZ_qbE;};sAC#OzMCJFkO)IX zF@M*sz%nhK{JP2989fa0f*0p65Zf`?sblEM5c-F2Fa-pK&3D>~jUPr5dG88A@8xY# z#t_buPPfBulyK%*+SzD*U=qe42Hpg>=s;5cfVQUNoJn1^6hy2)5soS1(-=O zFf)VSZth6hgBh}zr#7|9fOD{3{75iN`)g13+<<oVRJ5Yht&9-Q_fw+7Dd zxM{JU)P?v9jGTJcSdhvMk^q+ASz+4$evYFMTlIIiyMeMhtN zdveusc4eOxh8)zuz9{^}czrB7RA>JFMx0RgH5Ms|PzcMa9R5O>0IV(liffK`Y`Q)> z14FlfYpr@btVCYhnyV<)v-1Vt^llfl+W}czqdj+FC45ovsy{U363>iZ{Z=%xPT}H zXdP2t6#-|2eVMttFk0T+s#A;Fu~W)5h6jc;uzzYk_-glw+b>*6O4D<5F>(2ikXUL| zKQI&Ch5ihhXNChKsuOo}?+CCF!{>qb4`5~-a-O_nmKf>^v}(L74^!so2@WqEQXEhM z@$X{zJDoapY99)Qu%aVo?nXv2$4|;#iTf$7BkeA!d8wF6U%!VsPWby5urXjw>2KR% zm@a_lxPv8XN2oDWN*5E;59%RvQXmlsTY48(hn@fYfan)J(fg((^Q~ZU* zaQ7wjH{IvNpgcCl5NtMo!hS>hMjzkNqlVr2_BcX`Ye)_&CNd-61Ev4CuB8|sMi;VN zyztY7AMHFF!#Xb%+8WgqC+B8opAqzc#!FvvKH;((w_%-NVO+Q{5F36784Lkz6O(3P zwV!oWPg}bNf=god9g;!MZ~>e<3bEA0=4spp_9;|RR!S6Gw>B1gc!&uO2mQmWt&bk@ zIzTWs?&EcZi~`peJED6ALti2HBqkcdzWk&7TJQY49ZIVjyyZ|#O@u*|&3A(8bx@ax zbUsQAF=y#QMqy5lANGNh0enr-B_L2>=>On6HPdDkn2w9os3s8U-0Aa-$FLrvpLbzq z7GJwJD^z;0?h1d!hnXEAm~8$6ZY{uJj~(-ata1m81rI4Ao#zxH|HmvkaO>7BRQ3|_ z&!0U5t>Wp9rmbIyZu!WY*scHh!0wY47~*~pwJ@5R&I%3RfS3u^nmmizW9hq7;5dwy;eU;JS2t8LmHed@He zhT0ZZwyn$z!YmKYotO8y_j!83yOuXd;NZFGxx4Idya#@UyI^7(_pqBxA_Im8Zce}Y zt+4gOl|yX1c75-qhXs?s$&*Mhq~QA*h)wV!`)r}h0xcTMx$bM?c6N3KhKBb+JZ>yu z*mC;ECZ^37>9?Gwzu;Lj__e4GD#X+K2i`6k+~4Pu?|rNU|7T0o7FdJ^J$qJG2wy@4 z;1{&i0DXVqV|9WGkxxjiTZk!)^3Fu%=F-q1jg zuH)70JJ<47gw0y~wg{-218gF?P?$ZUdws1JO~`N=4|WU(RFiqNkMjN&s|l{)3+Y%b z@3cHAqxsS@GQ0rs7)K?O1H`xr&+E{IN<+c<`T2xyJ(N7)F-4`MEGFiBbdZvgsxjBp z)-Jd(0g2d)FrN?6_2BeIy?=jhQj}wMb;%a0Lba7b9`TT`%kNA*l$Gm+jNXkD!nX!f z3bfvd$lQQcy=iy;i8!SQE$$Z0D(@*1W8)}vA3%(MD(r8zJGs4?!S*Rno4D7?1eUrWdGIQ-}qjY>#CO4WwA}NgY>?YFwn2Uv_edI z+5~6ZT`!J4efI1=47B?(-|`WlBI5Bv3W%Thg;0L@@L^(5E8AEfHfP`O-%nIy6v#+Q zZzLZh4gr7JA{#{yFAoLuagdLJ)(5IgfF~M*FrTKUdI(>H(||rRv$6>1RXc3bi;)=J z;D>dLtdiE3h4=o|V^we)3W|u>&KKfK488N_a5vD?|Kz$;hmX}Toek=?)6gN}5v!`G zcmnhndN~2r)+*eW8d_S&0d_!Z-#0L@MOj%H6UZ(`O7BHx_YilIl$;FDfgqt@d{R<- zBgF0TRpcQxmB5OgzIgEfXx4zG*wGRJ9!O>Xi>nrj8qlxGq|uDQSdrl?a6x`g;#|FR%hU7cbcW>O!0i zCFnY12K{yvVi>=^9dtN08Fxvo>^qNPwH$gBx>TR(sQTWOT{_Z{^-R>HiG ziHd=PgANE7UY6<-m8@}kc6Jr!pINQ1Iuoun^ZVkd!lm7I!OF<5`Q!WdlqW5^N&tVr z1(G~U#YZVZJPISK*f7HLfozn{Y9egdAJh-khBbaPR@BM0BEvHSlv|1g2lMSaBuv|6 zgqHOYm=Xl_82Q?j5ruHT)@ZAcPB}P;0P&;Wy_*`7A~2eS8YN@dL}yOvpiLv>o#@?^ zqDsRC{ru(2Y0Lt^i$;U$!P4BY5z{k$Zy>w{Z+7}HaKT2@4=uu&w{J;!=2cyJw${BRWDk3C zkYVJyEb!qgkv?z7s7VfB0cqLU(_=Tab95_FhsBW=qVe3y3w#p zzS?=Hy=`T2v820uFR*pu;)$Oa{K|~p-u?RF@*h8#kd)C=Zh>3*BOvr>EN($6($Jt1 zF6oTike%cSI!?6R&vO`-2$HGBVb46BlyH*!((J))2GI%#p`}$-ab3%$=+2(S{%WS< zro8`t3)QXR>_O?c%P-2MP7;h7BFdP_Sxb^$WlOoG)S0SkN+M5_cL`cPM{^=5?Sk(T7$ z$TN@nn_Rkh@eJ07(f-z!ma31&HC9MUZDjg?|E5B5@hU8gK%Jn{B$f$&0OaV+7@`Qz zfP3}BToMwyF%FqT!aG0#qkBCJXEb<$AY@@HE6B=jvQ2q%HO+wR7%wk&58)UO7T-aqfX1re2|rOBID9wrk4!M@4CZmZEO(F zpCA?qO{pKeu8G@6NH{V)`EAgKxr(ypsnnLIZW~lvHlbAdBIVg@C5q53fRR=b9@s_? zqg`Q5fwQj~5*k99pBot#7REC!H|iaM%CQn8m;LmV^w>r~*MoQ3hRT1Ebdr42Wp?3# z{15U-ZPzrlqw*InP(6^{e~{~-8tnyz3!APUj2eG(M(z|m^GT*ND)NZgH9pKyDu+Y% z9f3u!#r13X$CO4ZoN+gcz)c7eF7`LE3lYU-G2AR7yawoFBYf9&3LLg0<-)>OKx`wG zbam~)YqMDiQKB_AHT6N6NJz0sKM(oj!@caCo**JUlpHo+;y^=b$QM&7_xd^c4mrbp1;tbgMl%%jPPz>f2w6t0-`^zl<$LqB6m-phOOM~mFylCK+-pnv_yS|H+HGbNSnwpw$iigW1 zOpu9>&B>^GAfoa1r)cAV!(cW8oF~0=OGG1=PbZhuTeMXO%tXA`ho|j=K zXg`ubxkogmFm~!48KK19Cyri(yjTY#BH>$Ea!N|w)!?s2 zxDJecqyW6>xNzdM=Qb80?93}v8x;9Q61Qhq!D&S z%sVP5-y+Oj$EheFBy=Cs9pEIsGv_+Epv1%C=H@`=c8!re*OyyL}}Y@MG)HG?*TN#&RcM~vqq zRCyKRQ-DsacgtY=J%3u$(!zoh+wj4ssHpv1HVfnJ=Dvk?aX3$~pQyKZeBwf&Tahhs42nLnk*|iueyls=Cn(5r9d&v$kZqv&Ab-im| z)h=jT@VhGu0itYHG_V*zO~BVQ@2GlVJ`u=Rf^>*Y+Eo-&he;%$*dK!3Cw>-%F%-dH7bDBp-^bqIcoTo_ z!2N&#rDPcfM=Prmq%2Hz5l{{nx-Kzd^ApJ)HCU-X#5_wH#zDY6FvWO=A6 zPVb|CYd_XvYN{WKv;nM?=QTIFvdy&##PW!kS`Qxi)vVjhNP?<-hOkS^J35wQ*aMDm z9y&yU7wrTa?a($;^gAT)&igz&!1(7Gx;zhON`;&x^@;-NUM2_Si`gsGuXm3`GajwY zyTTg8wn2$y`$1qQSCs2#jzrp^OMhn#>ruZBFn%IU@m3EWY!}1W^-n945*V3IhNK?6 z3c-vCoat9{t$;GJ-NU$OaIH3%toWsBrs!P%+dsF2)AMqhJ2nE+nO?joG#8fu^zj|7z!4&-(Cq86vpK+#R#2%`2`HBQB_L{zzYYt+n0zyKd7+s z)+13rfxumc7W)heexYin$7hJyE|}=;xorox0Wp{)8^7*m5^?k~Fq9sn2Z19UOjhFB z;WZkeBZGsRs5o(K;U`MU%VTs-4u8ei&vLBbuezBS;=Tuig-Wo`2CE*w*M^Ob7g8vZ z5!)BnKAK<^5?9MEwF`Z=i{C8W&L}I>vGVIxpl~4)N{9g%8XFtI?n!L!M-STwlkzY< zMJF`B8h2HNUwVF(@ed2q0&j2cfBC^Q5&DyPbu3vDnWRq%lW1tRlT`Rv7)v5|!ps%E zXO+l+nX2l~A7DhdsVQRJu>;0>^Fk|=7ywI5+eZCF*mHK}+fx&+AzOZow`X8w(7|M9 z?|EFzQV9$N6fUhH6r6kM;1>utc0;Zu~|2B2EFf^gE4_8T;w})ECZ2< zb7B5jJ2&IwG^1)-7|R(To)ai2PQ}_x$IZ?&-h++ND#-i$plN|R zqXhS3J&75Ap#JVmL_&FrGpMAehXe5j>n%z)N^g1p{cp24<|yu+Qkl$pxvd{)Fp+1r zVW8%5B{lKECWBkIzT{HditgdIR+B43We$SsLyU<)Ov3~$zs?-Xnmo7Gv#4ijP@N)y z%DmJ5>;s)n82-zM2vZJ9dd;1FE79rs`Pi@r?-%|T(jcoG|9_B%V>^hfu8}^9tydhA zcnCoKVPqr+sGmCh4rurKAg>TQpXI*3WRhD@P>!dC)ELJwF%y%FWIOcjUTEmfpP$^c z2mj(}8({2vrXU>JiSsgqPq*xqpwZWHkFgfsa|al?rRyi0sn5)>9j9dzyiV6;N_wfvT2I)izZJifhh@$`XsI|Mq%>Q}MgY;<7@5~8#mQR?3M$W~>1>EcMs0L%niFjZhNVbqqyt&*F ziMu3TM>96vt&b5^Y*Ie-2VFih<#^8(2&o9^k*JVbujR16_e zS$#bnP&ULLuap#vckNonGnMaB&lfmMATVghwU~^~@w@R2rh}w@yD|`~l}k)4?^|xo zuLLtMtV6GwK?Ji+B~^?EF60)|#tm^yh}nIK5b8Tl02Z zi#+w!_3;<3Pu0tN%>?ZhdjoIFNbu~IyX)>-P;aq`q__5b^Gk8X55q4ag8hAiJj%?g z28258c6;bdKJRMHbNZtk@KtY$F7>SL%;JoZnB}*QGtiGc+K%Jp3RNdO z-+ftkFKG0DWs15MTZU$E*3?ySXkWeh7CGt&PMqMe3`Rhk3771DQ%Fci$f3$AkpAA# z;Cb&J87k!0!ncr+dTA*h4Gj-Z5{^<~*Jzm#FloHF)N_-C`Vj4#q?e?Zg+aZ|GGe_V zmQ_AFALe-vSgyMm2)Ioy1iLG#N(kw#%~3r&xnz3%vUQxZ+vYW);aHzv(T0@A^~*-6 z%a1x_k{Yks?beP9Cg24jp7E9np~kc8*GehW6+$j>4P zTj^lq&$f1U6*!KF3211bf#|o7lXDY>0DGU|yqA%2eE!d$o%i@MNT7dXYvWmXRW46e zSG2XUVFDqsr26l>{{C^Kt*z}xC<#FMakPV|QKO+A#^;GbJ4V6345z)>aCP@rDinQy zI&ZE6>21g|)1=uY?9*ieh)jA%!eyC76ni#PS+|?1Fh&N`SIEL@HEv((!(;(*){-ZO zkMfa^^>`2Dr1Vt%a{jq^d~#35n%T+KpwgTZfLlCU&#;y?fH^2f0rlb)$~qr|Fv%+$rJSz4c3&tCe57 z=$`f6G_ObGW)<$sXT01u^K-M6pV()Y2=54roTy8kEhof_56%_NIKHIxkXoU>{DCR; zj96Q)$=>DdlfKSZSWA-3&7An&l>1Cd@1@u$G(`7c&-QZPOxCLzgRF7>+O3vyirPC) zvg^uyP8Ueh&)b%)71n#}>v7ky-!zdY6v;frdMaG#*zV;Ik0`Zucs#@p7-iLOoK{ra z;bCITC84;5zj&+YUK^G&n*9$#`3H~9p7l~P7|Y*ldaJB_&g9lB@y-b6T$Knc;FJac z_ZJeY_6J`;(Mc`oJi7(blDI7PZ3fPu4H6s!sQ3i*7coQTTj0H*AO;|UFStGLZRkWP zQg?oNe#i*9oM~&yUZjMLcv%l4*OrDO319{6Iaz&u4&>%QJ1BfB@1xK`j!$HtK!xCB z4=^FgGbM!&9gxZ!m+I>4y}{a%T{s9(jqpGK9|d62R`ARTun`=4l|H%`oQm1_HCS&b z`N)uw502_PMT zFk)6`6c|&lPzJp|X&C^fF$S**J&yhM?OO?^s^wVfucD-|F$c`F3n(EPPqGNAxXZz$ z^shu(LZr-awg$35cJGuY5>6h!^kH_V|KS1@q#QxfXXW7VNbTrWfRcCa9AlSp_d_v% z=Iq&W49-P9yl+1eotd2~$Xsb)O%BHvCbYxvG+YbiI4E6}ZNH&D=Ja}&hP8h6v95QH zW}bh%@|(W;+>D0r-^&U=P}94PFku1rS}j^GcDO$ccjBv^Gx6B}>oz6)D|@?4wkYl0 zE_dS_hl&z3t~{>d9Tn~@zS4ic!|BI~m0LSw0|Hd!bQdUM( zWF;iCp|TPs4P}PxWD_DgBRi>tN}}xDGO|ZxWMq`eOi0L<`Ml2W?|J^mbNrwCxR2u= zedD^$^E2M>*Lu@bm+!vKA?qcMZCK@7=dmm5c;LH7zZyO@e#p;5m-ddlWa;;cM$bvs z_KprE^!#lIYa&jIZV!IHO};~i*l>nLoXv$`9BVA4Gr2R&`Z5j|B8FwE48@djbDTuO zr%x&<41S6}OtGPSf`tjd2DQUIwG@zdvEgSDpsm8JZwee&sHlF?=AgJlZBJa)SjLTd zhbAW{N32P$@lw=~44gi9E+`|zs(1tmjnR^z>^~)VZl++m9W^G?fdVB#wkPUrJK%jO zgJ=fej}+Tp=<;K@V{_v7Z&T1A1haPa>a*|elV1wR@j2Ah9mbvDa8ZSD1N~!Lww_e6 z)7$)f;}0=)rXLq-zJ4ukW_cfGy`=pnvVK_N+u6;7EQ=T_9H79XfED_1aRh^IgHV1Y z+w|l%G&Z7HVh5rhj?NESWn)0{9LL@Bm16n*!9AIn7#owEot^O~0}B&97VplcT*3f{ z0esYja-YBT#InIL1?1yHU4uOFZh6f%(mPvHJOnrFY$=L#ax3 zQhsNfn*ukE)9Ic7L)pRFss|*$sj`x!q9P8b!^0;1UE6&rRE|a%W)gEismTG5H%3M~RGcjHN^}{=Iv1i4#Gy{*XM&02JvC3JB-)fwp8HRgwccDW? z;xYgMSQ*uGmIr$TaSUj&onRGcmz0*)j9Wf)`SOF%rW|^2?{&2Ys#?@+;aYMeJ7?HsI1tCILNpkc~PRWh$5Dy&u~bhhtwTW8#QmBbgVPX#YhQuOKy7xo zzNq#xJ$&)-eYxuNsFvHSG z!zsa1$-ZmlEWNYqo$!gn%}WrDzE%r2*)4ICL$Ijmwpnx4VZJ+>AFnjv+nfGOyfJ-3 z%_|~xgl6(_2a17+a&hxLS4awf-acvTNi)Cjrdx#jZR;REq&i_J_=ZtY0r=D{aj}Z2 z%@egNx+wJW{Rp3gj7(1cMh;YkK$5MVKMYaeAzhRa@x&QLl-mT-2Kyg${e;(}x=0-K zpDIWdvDIrBAHDvLalyx%Kb!$B&da)7y4vzzP{`@zUA4Pc!NgL*%^)%Xi<(tkMRL)B zu&{9A7I-GVx;4vtEz9sI|L^LqsLVr>9vwoq2CpgCidGxE6M|*>HiLiYFUE+d3pmU& z-4M-g?hlrUYBFSbV;6bIPT#Emu0jOIs$GoSXW!FCrKde_;}qswa@~_GW>6OzuusCH zBmGL5Tz`pAz|WUEiiew5?DyQbI@0-d;i8t@=-aCSg{1oWdNT-yQQ-#SxJ9^|#z@-3 z-LfuzckcXG3$M&uITjnNyye%t3(6i7KZkI@0E`bPF1{5bhyL-7gxQD6#zs0g4HYr% zz1CC$jhu>86@>;lutV@#!!AO{!~NW(*hjjHqOP zE1s>ddiP7hd z(UI3xv8$t2MTL(cu_wx>!lsPelD4X*OBeX|DK#e^9vm7(P zK9lfaK;V4<=4#z}9+pTb$#0`i<-rv?byZ2gpr(uhE)`AE zW<_^9NAUtmFAm@`s^P(y2>m(XtXDS{8NMP5HkuNo2LNb;LR)D3?~T?hdxnt-YTokJ z)LS>!^K!%z16-Oaey2zh%@~P9NqMQnJ+N_3T8~6pkad=Y+5l%Up#lD0?#*Q&R(SPF zmi!|stDZi%VjyU&9JztwLNWI-g~nMtYbpp{C`q_0?@@;HoF=g({#!ZC#$AfN9wb0v zF;gX?lM62O(%argBiW}*5+l6M$QddqS^OKF)?u_*Cx^`Njs1_3t4nXey2Q!Yupw^O z5!8cqe_^IVKsec5$ey4}e2$I~ER%H6iVKAX_kS!C(6&8M)P&)_L77p@yvm;vXm+y= zyiOntrPCKQ?CPl zoz&;7WFkS{fP(#A!l{a1rq|rkB4PjiNX&R1+C@NSVZ`D-F~Jpr<6URy7sI%u>0`mStmd za&m-r?f|sEj0;@%Lo<+k1@(b0+lO448OB)b^8QD*{R`B

j_ zJ#Ycd7Uz!Ss=O#1Zu!WcL|U_uO9fP4m4HbLFtTelj3>n7gC7~Wv8``QbimE?}?G3?@l>$vNv)mifWx?x1&R4iWas7W<0wg3`%kC>c!aB<$FLtf7^AkA zIrO@5Nb9x#b3Jjair}7_nW@5E`0;%R&u=o&QR3p_3IG(a-ZeM8IF;%1n`c39s!BcN z`ON`zV;90Gz}hO9m?Sj{fE0|vw;EQ-q7+CV`8 z91G`gII&P9c>x*#bs2)*lm|}UjP`R|mRa_)h0n~7UnWMTNP|478pGhM&mv69}~Fci>~#Vs1nkZZpVO`UZt z9M{8$i!btCJ%pE&)wn_78@_#B5NL< zBO`%lmX^Zz*xMwkCc{Z<0igRf0Z`VNR&{l;o_j2vq?kditFJ!{6UY(YjpGU@Uw4dNQNDA8f{Fei9E^(5uqc3yPEA4L>A_p2B+V4T}q=gEwAM{nk-=aH} z89ea<^A+uMQx#~n+UzUSU2hUDUV3NSOM&xlm+Z|Q zpny`jw1X)ALa5vH{Xym(HTgpi)KbuUp~LkD0@$MFzpOkqKCTFG9w>@wK^#?Jx~utI zSj}LYai>)I^3Yb2W=BSOZWQ7uG#8J{6|PhnPVR2Z*(L_g-zLZ7!?Tmr43C&9Bq3oI z_nGHdTUa~#e!7Q+G4(f-4}U+iFkUIgNwQd;3gP8XvFlflxuV@Pbd9 zoB0hBmN7>U8aX7YEjjH3^mF3$OD-MCwyv%qd^N;3U^5|u;S8xq29-UDgb5{&qXRn# z_E%A{%F^`{G39Wh!a2=aOvm;R7j(;zDw&x2(Z%|Hn%-?va zm^o@tc7sxN`<_chb`K|nB3xWk6qB*atj1=lDk!P6tSb5g{54fbiN`$OsjZgXn!96v zg|+(2mpI~uWB-z#cTAm7UjI;4b^AkQ71`!$|E4%3G;~W3PB?A;{dinaGbr)bEkb!$ zS66584r7q6k6+JPJK6`5i~vKxj0}#Ce;vp*sa~fzerrv4@ZC$*sJ1f83k19@cq}JM zF6<5`_u8wly%d@AT&rV)Crfs6@!+cwLvofPgp)hU&5?6JYM^|~Gm~Wt3*lYBC7d)$ zR$!BYhS*6!U-lljcPM1e#mPw#6*%Wl8BsAYVYhDx#%d}nr^t!2t{!FM&JIh@vT5Uy zRzUwvYPe;bj=ODSD^Ed0guE^Vig8h^z52%y^2qp#(GXoqa`Qes= zZnP564z?))(ZKx75u1mre*pS6yw+Ou-aIjC64#nJ038S)e{`B(3E&lKB{Ha0P(LN3 zl)~aKa-Co!=tA81ypod4#4L!34)4f_2xSN|flrBp%{tl|j*)37}#SoX88yj?#?0!_57?*oGY=F?C83Q(|Ho4Y)=f z4D-i(jK=TS8cPQ*JX;xc9|T_9ookpSJtJ}w{utsJ9XnMA3@lkuXqvjj*%B9n{@|gn z$^OiPj50}SFZkY3{!v)w_N^?n(=WCIGF%@XLI3;_<=-6rLUFe#1BiF&@uE>O72TQ> zMC1Y)iLiU3x&Sy$zinIey?cIWe?d=h;LIfAazI)m(}-?QRB&J*1tIZ;?j|BctOXNH zqamlpcm%)JQ8Sd9peI!E6ftxo2nxNxJ9mh1wp(_#w)6lJfwh^TCK{M=C;lJZo^#wlxPnjs(@f>4A9YdM+$KP5D+7f&AqsLuqHt( z1{M@ZpexTuoJsA652gc^w{O%lhbjXJQiAA_L<$X^mU^~`ezdQzui3Jtxp`n>qN(&z z@wN#t$xwHaQ_=fD6N8CjQMj5&m59rRZc5zj13j=aa$JIOk`{ENO3-Ck^hGY?%)nqJ zEI;kSvG@XA+wAgH@M z$D6%b;mSI-#3R9E*ZW2zbOJWAT6eyOst~a_E`QN@TuMP}Y{B<_C$1orDKR%9nw!-h z8!89MfB$(AxC#zX#!%&mx_RPq;~ACQ z?`ixPD+zS}Fhs3{W|qZQ=-URaG0FP-v{&Pg-GEM!L|}xZ$`CySZx;7xZ)DRpq9#9$=zXMBGzw0b6DR3`a{`Lj+!X!6-*{=T7A?r?>-rkXYB2Nx`Zl9X@ zNAjzoW*GdQ67E8dQiP3r7dLklXgx`1LpJU;xDQQGqSBd#$WzKUKc+n9>8buqygKWQUx&{wY*_Vfa?wi)RCaX-ZM1v77O*fg6PxpmL;oJ4GQjBq;S&WH z7uV6ME4^n=zjL?gD?R#PN3M%OmU^dfm}`vpJN)Gcj;^jQ3yK-+(4@S))>?*r{QQ;B z1E7i_POe|Ai42~eo&ZCa~FpmDDHd=;Vs+4a}UxFEKc?Xz(QXN zi8UK1T{-i8gP7pv>SI1@?!-xnL-t0ph9GYqi0YvsQ=A*aIBD_=3#p^(%?w1Kh}sJR z_VME|l$@wWm6P-f)OWf4`ND*&84JeYBcoVBw-Dq8G3NBmo6?b(a0)W*Ur?&W-C5lLTJlLx;%393BRKv;>yhb!%(MZx&lTp%Dj}`~iWYsDf~O zQ?j$Or~Its30YA?(+X9hlwHGAD@1$y*N>a?7KJCA_|=*~O>(^Hl%b*T8yGZLSg8OY zDH&4v>(;jPu-u+lT};CN;ui2ZX!QPy${GZQ#5YF`i5_AAwsutmgI(|zK-3u>A5SYK zC567@B&0C7L!s^`jxSJ4gV03cOMV!#!3}_`bpW<<3epi-SxJHWY8dJnfr3$LrUi@` zVbd6%GWcqPErX0Ub`Z1vIXMTQB)W$mkCqY?EvnbS%G>12tJ4*PLKDI_9M_VMjPP;M zHbNG^7kWj6PMO!rD0QZL*FS*MaNpb$JKp~MRDSbsx;6G`Pw3iJLSB8f6)b*8 zAKz6E<_b3hP!rvh@U9M8_OG_@zVm(sezQ1jUc@1u>F8*cF1N_tR>Od2dXT8s4Ko=C-iFe z{L|w*b@B`p=N!s2qL;OPE~+!~`^l3-l8tme$}#zU)>0oS6&V*|anow+3Jef%>Wur_ zD@y69kjt1r8bi>>$Q#`X{)B){8+D25$T6J-}pGXA1x?^p9OLd z1XFM5?sL5Z>zjRlr0VGTn}44kV~^yNslGUxN!v4Pny@fGPuPv%lj+a{PYw;E=;4s*jS-a5a7aJz; zx14`H$3o?pZMkUMU&4x0Z138<6~|$l)Qcpy$zCm!Ck&e{{&^um^6P=COH+G!C7IFj z|MwX){Ual{fq=pS&UoWd2Bp_RW~D3~VBRlYpgh06g^Ay9LJIm#sNGa- zZL6Mi`=z>> z9>4qSqXXom+L?bGhp*8iqO|>m3CscFq>?C?Vv!L?T{?Z-!t*}6Zhh(N2b?3?Z zW0>R@ZV%U0b)v82$DBQKJ`s7cE|uf2Euli>_tT;T9t9FI{fd#f>jHli4qoTykXmqH zJSIR&>fO5&)XaVn%3DH)C$vNItyAcQC@6gp0i#Z?C@mxm;DHV?58{%N_s_mdQ&&2v zB<|S8qq5d`#AEHqnwPiJ-@<;!rHys9Z&tl;_Ls&U3|^WYDRSI6zU#f4>ROrSjXS%M zKUMp^G<3?aglGIC%VTkzgUO~Swy`KE3v{zPhY6kFr%!iHqiOxq3pML6f2gb^5&0qT zdjQ;&YnoY6x`{jAs_nYaJCeE zx9+Lcq}qSsneifn)J%_&?|%CJk1e+%^FK3zTgZy-nX)zKU+c-HB;k$FgvTC8p*@#Q zf7qcfyL}XogOF;%!BzuwE5qC1_hjVuM~{kbjh;{NVxZq#)c@007$r!ZbR)f;v2*wT zD2KD8X1134u0TX!A$UFTjN=0lMWQF{UgSoS@fD!v@Q?7jGBCu9~d@Z62mKQxRAhVb%`P*Bvwe@>g2 zT)0}^%w^@f9`vSSqdK}Kc$|tylj@YKZ&mTyhL`}PFc{hL0EiywN{ zSCaIPhQ5IlQhdgT?7{unk9Tvj?_Yq%N~X?ckayQnFAkHWB;EPXTT(1W^0q5dZ#V7- zGI8O8>i-lln5K0bTacaM86(u1}FFJAx7Z#M^CBkVf` zH`irJ)7s0;goZ}flK#OT(51rS~klLu}5QQ|DkKw;4-{}C@DBxI7I%Q z*eJVjEk!WoPDA|_-?KHNb4&)*3>k#;4^`9tob{_^o_tfEc0b+~*r+5?sRo-d3lxBa z{RbEyEA>8<5yY^nUz^~sihB~xlUA1{;KCunuFLy(slV;?xozmEszc4{8NJpha5)hs zy1<}2L-tmeTzlOiCDn?#_M@!{HI>$yntN%O zD}_U#1$?`BP)JAtFO&Fcg%16fQcGL?DmGRo9_%VCZO%g%QSo~b zZ(E&6@<|-|>g@OQia%Z`^ry_pal=MSXxIL934zB^z%ogoz2lfsN|`NJ79xA@dD%Aab(J z1j$B#5lBV>9iMe&a_;-r*G}snB88{r%^MhT(Nw|Qf%`HfG?ehe#K_sqb?E>{>=L$A zMWw?fj*liTB~^pO8PIXmBn8a7-bSy;7#QXiEaDLbw{3N8==E>!4wS}<23-`YmET+o zKrz85#rL{fZT0VTU)}g$M<7tovEKi#T37^yH$#jfz6pO#cay~Ab(?TcFF6wNr_vuo zJfdK4lXV%Z%pNv}t`77I%45muBgm7aO-iQ&fJ)Rykb)DYrp^lNLDRa~5%?UhccwF; zXs!I^m?>I)>-g1pIfvdiv^{x7!IrGd2F1niYcg~W;wSzxSsmDWGXDgRpCE(cmSyMK z{(htJg+560G~;p8Hd&D8hL^;wy?5Kmw7A!8c)D?&+hstZcyb3DTXNxL?p^X8^rXKl z3pT_dge`4%Ku%8Z$h;=a>*u!Z$&_M7B`+^NJUo1ZI0M6PPR1?m*>@s0w#V~F9v34U z^A?=?drt;;Y*R=VMb&ftbLuv5iI;zQ4x;^k^qQ@ywji1LH1NYOm`H%3zE@u4zr>WW z$^gzs#Rz=RiHr8wSo=q~L7}5TH>zvG#iw$&HiY4LzyJ1KmGL1o^sYBkiPgCA{UX1Q zjI^|zp6_o{lQWDI8oRN+hb#NM33JY>PuSCtEW1n3j^A$WuyBVlxA^Umt#ov8n+`up zLs6}1efbp}UXM$LM^yTa_Uws=Hw(OQ&cCz!n&~JHjpw#==Uk?~>)A71lzD$Xbe5V~Ki!Q*@Knh!y0JE~@Qg~#FByKV9t%P*>f4Pt0#9$y5PWb$YXb;%I2 zkE($RU738RAOBcMmqD$U35F^UCG>_&d-hx%4QD|mGXLl612GaRY`R#7ic`Q$FV0Tw zhN?FCO>iLE^SJdIb%3joY@&L!%i$i5#MSz62llkoBYlfqX{EzgFWz1wicjEYX`vUq z9zk*nH`R?F8LwZz9=XAwnb9I#a!iD|68KA&V?eP;IIy655$ zm;tnyAM*4M^>}pbB)RKc@{jS^tSNqN8NUQ=#mwCOI<*spZV*$TaNPVYBb`K}bgF_{ z-X$e>wq*aY_aAHmpgYIRx%c0_t*@kxyiH={kQs zo*&m7LgzUa1n#1s-^ko!zAM)`76!T*%WPBa=C8 zu7xq)MaSHX?aWTH2jd=O?9$^gEODU)j`W|P4VDg<$=(?2Zf0e20Yq*()S9M*B~@&0 zaFNLM&a&_C+XpN#OKi1LBqROXA4XiL2#`EXXaPV* zVvSv9Yj5R%4<9&w*Glpmn9jtp!;wrcKmHFs>?CB80h7&M^zVa1f$-&kg9e-`Y&&{6 zak~76C&%6$z@~a6Uc?A9Ptc(Yx83^X_E9NI0?*xCn`2;Ge?x*>tSj_eUUalZ*{;}* z;)6S4jQ(+ez9X!jS6C}OJUqbF2ixuDm*NZQHLd@YKup~{?z5bvwrT{g&xiUF$Sy*S zu>Lh9;F(WUUfZJJ;Zf2^H^2~d0d+SBZA!4Tz&pTcs*PlStF3Q`8Wyn#oh2ktls+l0 z3=_w&9Ej;fq?{dg^ylIq|-a$h_gfRez2YiQXF3_arP8D2A zcyfdjzoJ4O#y9xP1JF4Q0D^_^Do)l_81x(&iO8cs69r%a=4`^w4&NW~cY*LD6P!cF zS~Gr_ot4!Dw|SBK3@>s6h|H~^)dR50!q5e#9QPpMdIR1R1UZ@+G8!Qb{g8YrJgx^3 z2h9NwK+yPOiV@;aplYADxS;vO{>S^8_4m`6qJB1w&wjiwxHX~8>GR_5tiQ~r(r5MwuxXRf z0IVl(uEq)e`XU0{poK7_okeI04CjcrVC3MSLF@+MQN^Aieh7ij!4imKd`CXS913qIIzts$UpAM(+KLN>EYioS&seVdReFb;yJ;1 zK>?=;wBY^RO9e71W>7i9Z(t$W0lsM8g zNq*3Gjrw|4p5sV4~DL$lKNEE;MM;F>4V`yiX zNB0ZEL(DBJD~n;^kA#BjhtGK34GB>|J~GhP_5w3i$Z!1dVUT~c=X|o#+WVUaw*Tpm z%J5y&u&Dns_B(9!pwayT;$}UJDklu#6a#2{56l^aJs!eo9r2t|L-RBeXxYx{@E$as zgJuqK(Wj8FF6%ZG0oi0tLqmG?_(M2<_~kA+6d?({(mrA)p!de!dsAPavbxYQKi^^FJd}q zR9*`e+{<1ig@8KI#Kf*%U>(poPp-Wa!mUDHD z+;QU9xgv-D+pM3?zg@h#8xl9Om1xqX(&AYRdu%;ed+N6t&v`pDX=WY~>Gm-IEFMGs7;U4Mc>Mrw&Q zu)z{)c<62#L=!HZ8466l0!^MDM1I6dfii`LmX-|BtuW*hX*#4LuYZy_;DHbk?+&7P zKc)?&4AdI=u2Vj*bd`{n-hr|LYc3GPBhr2R($j^A>>9kQIu$2Gni8y2L^YCJIDpPm z;G2SEhG*mN_~(6C32YR{)XuGUABj80j@5G#wh-v9;jC2H;U9jo77Z*o^SS$1E=e@SDP?qRS_lcD-}86f5X)r$jCZ*Fw94P-dRI= zw*!jh=9|e&;nL76Hp*;MR8=HR;-d6+OfaOhj{PA4sRNH z#2KjavyA6)KlP4?T2&PlHEG^cks45A0XYYxFBM@K$MW^{FqhNJ#)!!?Ksb#E>BV4u~U6YufSw7~eV z4cko*cf|JaZG4q-$=f>}g-2ZJy}BK&Z^X{S?+jMMN)_;$^DSHVd^mZMGkJ6xyMbVP z@#65ZMMfrhZdkB|PQU@Ao*>JLC4Y(B;mjN_`_N4DvAl|*=C7}{_Hd0fE=*w4 z_<1tyAv$hzcchIO*45Xi8JOxJMH*iW*Siv?lM|?y@{W@9k1R4r4;NqIEHWK`NHFM- zIBR}6@i<-1*3Pc|wdDl@yhibXfUEnAJCch_lB1u%MlF?~iQWV0aWOY6x*#~j=0{q| z^X%-1TXVl)^4bcP2VVF9h!3InBAkFlMMbCoaA9{t9J?2%rh!;2QvxaOUvYAAEsh=m zI*n*9b8G}$VEnRMlTLF^>0tayr|EdmFyQecWn;qu~J4{czsd zW_*;4j$KuteRPLkZGSzvVa|K<*r%J7Gne;D?u=fa9ohQYz{N%LxSGwm4*~h{(zG?A z2lnouY21BOn}5vA)O28Ym>i`A$#_c|6hj8ZHz;kov#UB%&O%A-k6kv@7$XuA8F>mH zZv!c|bcyeUc;8?=4#Aput<2LxRv_>xg?*X>v$Z0YXc0pWCrsHusIUoA$bk`)pFCM( zeYEd^WoxF_n|Nq%dw6lcYBzVq)(kd`6o+{<)o}$5Mecw8eDun3r7Q&=p?8s`9r-3F z0jRw|IN$PkTH@)oZf0CDaUblBjh_dOVG!m?L?vL-X9$FZCK#7T2&nM^v0{+PfDe8t z`Iwnr+%Y#X;=)409mqVyMT%lBmDAU#kPs5wZukmw5g~|dL2%hAyh3op;`*YTqqrX9 zC2{r>(KE@USwOaU@QCUQ!uq>bi}8C5!0d>!lcQks1m;SGAgH?}Z{1i^Qg4zDAI5T-JFF9_MBf14HJHNYmXqWt1wru4n(27wi zmqg&%NkhXjrPbdP6MVR{K{5bv-vU1{aXgl?pkE?p>s?5cBRap8g$Y8UgE|59G*KO{ ztxV*R{1BWVnQ;Ep<=rrAY=?LT3ctE?WQ`KZDMaQCsE5#uZ9^fk2bpW|3*Zk{Mwf)x z2||LBY5>PJL-jm@1y$=_;t$X!fALAhV@MN?kXR2 zLSzThJ@&vQi15IjcxgKio1iHWTsQ+@6q<7i78Vwjhd5Ft#hS3=nO{6jk@2akcmk{X z=LSp+^+)y&!Cqrx8Okx@z924dV`Cn;kp(!jH#b%f!Y>Qn+6l&f!YJ76hFytrPX$wX zz=l%oOB+%Qg@GA6J^p$Jq)&)ZQa=iFL)Bv$8$^@@!%nN#Y$px+9E>rs_-;T%LQO)g zR)-ydj2~l5OP&3()_QHFx11why}NT!C@?eAI^lw4D)dc#-Ydp+YL-V1=p*aMFPvS> zII%)L^<3iBj8~_odmp7VN9Ig|B0E?8Lzn+LS!H`W6B6T^EpdL^_Lb^cA03)-b#Xz} ze{jYt>_Qsj=6O_Q@M&L4{CdBUeFkGC&XiJ9A#`6f-2ZFLd^}HMW5-BS`*|boAD;6H zSZwhNw?P~<{c1VlGB4CSJ^>1aWO>j)=&dNXgM&LJ>O6z}*tti{WxSKN;6kFB?dL2{ zx3$+UJ9j=ycNGFMX<~Qc9|UudYoW^wJ7alK3#kdQO_WzSx#henQ7Rw>EAxjodSoGq zD=DQvS|78Op~tHR_a5(je@UoMs}7kt`Q{(*n@(7^JPrcG)-^UK)Ott?k(-#U(s}DP zL`1iVMQK#7ULn^Na@&WCR9RIM+?M8G6gvIA&>?trarfAdGzR~>yTl422F&Um*Ty8` zf6{zvZn!rl-rm`%Y<8|iE#}{^1)EgoKt5Lo3AlC=R_&PUgK@;S-5EPPmn27tFv%^QeNlJR4wj4_IF~Siuv0is}2g0r%%Q08h2z^K-QLo7)a&` zLM>VE*gVGz6xEnc4j_%6R$@PblKy&}@3JQQKl_(C|i=8czikf?+)}g{@ z{kMTjdFBlVvHii*gx}%Ci-7SM7Y+X3asgD^l&sFn;AAuBmpDR8OC#q&u(xLQQ2|bc z_BY(_O0>5h*|CEH`3NR#Y;lLoWL+$NvfghV8EPGuitAKBRqCzUR}OU^5hF_5g3qRPP$EG{~BO5rV~W%-*25@!#K#Z|U}^SliWX zr;G@}>>KN@%z@pbgZNfa+8HFaP$N=IBegnXVBGS0h7=LDF*e3DzkFUlHJ$W`h)pcK zaZ_c(O_u(Jimu6z$1;$aVSQxV)T4PGroJ@zrw&o#8PP61)Iu`}=bO=sTqIzpa< zjEw9$m}uOWr(@!H%n3ks_|n?%E|Jo6YWI(kHJ=d*>FZB(;J%KCtT(n`5OpJWg%W-F za)B|Cd-h_23YT%Lh`__5rq?DIIciNh4BZ^G=~SfSqsP|WN%Z|4(HucfvHNKA z@%jU>bp&{8zt;H#SlNPBH9o1b$jqnDv0VyEzYbnJ@V0s;ag z90w||JXn2K^5XE4sH($$5_1K`-jlVXpXn(4h2xDxvYfNk)p9xr-s$_lxfyX;DpY0} zM&-soDs1vU*S~L`b2`;N7a0*LGXH7EBwy6xJ{l!im4x>gxI=zB*uU-lg~YJ8XZU#W z_WS?-{K$ec4l`!hKKyvEQ^Ys>(|6NkK3&66@BMbAq^4JO zpaF7Ifg7H}!Wik5gYL2t?xW0e=?uG$Sv9^-^NV=>l31ZEu z#C3fqcIzqwRkG3M#*JHd_bDkEo7>Xs#4dc9m^gbl5?zWBaZ@A4b-P%sNTL7J^r!bG zzrG%IQ@w=*xiwqbC&=lR6*D zq=W)|Ta|1=uZ=hDc2;y48jr5u+s`q+r9`S9?S+WUW<~w}A1w#|j}|}=;=Gxmf8oON z=b!0Xj-YE7?{~7#$oP|-JSEX2ceM{ETlV+*HP|ZNesP@ZK5)c!mLG+uXfQ(>wI5H| zz+%N_m1nF@!2({at_H^epZ?qAV-AwfwGA%({m8;`*n|F%LoffrDmdN2(L~wd_6PMY zbAXFxPa}lM#KFNRNd=mR|9l(LnCZ~>&Z0j=MEL0Iwl$ljDWMu z5NkyPfb787?_d3RtnagmBn(bZP(zh%C6yX0;_u{IGdeQ^yi{2JZsuc7dV2cGK}UkS z*FWr_1U>8@X8(?mOFS7_S%?}hR2q%jSg?8fA`E2^FNT%vJB7x9$n4fcf_*Ozp>vSA z!B4e)7tQU9H0REpE557gU}6=z(^va~ID3);2@SS82TK5aY2d?ZECOsK3I~HFB#Jyo zx05P6o9RV14Goiq>yEn<|D=|WVe*@^6pB0QyXsQZzpc;pFe^XJ+imwTaCup>PCwM= zub(hUaZ8aH@ww5+RyH?BgqCBXp7z1q_tH2!g><&)KBf_yXmBV{$-1bU8a?p)Cu;C{ zab{*Bh71BZ70`Fahzw^taHaE0F(f#O=g#G5e>rD(ayNj#ym!MnSKm`z-gh2WE^7D} z$J^RWHInL7J$(-y%KZD!s;k3bX7)VLW6ndir|2^`(-h6iVsm&dK%(ro-ahwF9A=lTnABX}EKVIFR4x>dVI z$OHw&ulJXq@2{B~Z6$QoSathK+@|jWhu5eRRI``=sQ3Of+s_dVk~WUaGNmhR_mL^b zD=qy-r8Y4s$s~sRBp{hTf&qm}!4ml^@i2LhuWCaRpYN1+$Sd0=dhkJ>9d$q4B}n zv}PYndpqR^O1&t5Q0(9Kqsyj8Jn_ZUj~Y2&-_+p&!mUHsvGwEJniW%_eV;Pic93lW zp3+cwe6n3$eB6*9dsjd^0}fEqLCCVZ<#v#@2TzflbUrB1_|LKvvsk(EZISsmW*?rEx}od%gac*Mazr$nqUN58;) zX6RBOE$w>Le;X+GGeL^tlV1I%&{sE&V%1#u#M5vr!SFWep#wJ8*pkK9jQ2Oj`}Q(jJb@Q-qo%FFKB_$V70O z1l1Q5)Ro&nYVgUZTyrA#qsRO-BMs&9Xr92Oeut`k7oKZPbpLm{Do?V$brooxM9Nb; zzy%8AE50n9PC3m>bBk#C|6d4RyQr`GrZl`AZ>$A=wKxT$EFL)t zzHE}@I`vp3YG**Z*@w0k0r-ktqj1v@QQciW2KW{zGO)v%T+DAFy@wFu7F4IF#3YIC zzyF&BY*R>7)V`CQ_a;S}{x6L7lx)h1-@k(u#&JQEpH!525YLd;hj?)oTI&30M zgB-@D{y5)H>gdGwvCAxHM{^**8dYq=Q8fG(%$0z+?th#i7*klcrJO%vFPY2b*@EUL z_-4eknqRt#7v!WPdc($>4UBQpd|T%Yp@d5dIJfg%{z)odt=KM?*jhS4wQef*x%ex@ zvNhn6oPNz)xlFf{pVbkKjc3%>(Vs7xlZY zF!d6`?8KOd4>Ny01}z2_lt2EV03(g{IhQdYD{6*D-7Y%H>o}#@iI~)lja3I6r{VOM zD97Gf2aVs966jud;B*ZcK18+}JobZY#m;dOEo@AKgRqEfR8xX{qb31Ld}M!Jpe`S% z8!+jM`=rD%4FP4Q^)_q+-S;=`LWgv;`Q8|a=~&m~Wa^f>7X*IE6~)_nwCbtXk?w}F zE+d(xj#!znCg&r1PBM)_O7hUPN89M>h1mZt`Q)>*N__rw-qi4m@2lNA*QcAU&^~Pa zpKRdaqZoy9{*xqA)u9K1gY#c$49ah^HtAgK&1#gh$<56rc+i#_Xw-!E{oHb{;VF7! zQ=9qymA&NY z%YgD}kgepQYwZUmAXIwGtq>nn1Z;YD2nb%BL!?AzSg^3$V6 z%I-%5FC_53p4Wg_vvw_LtAu?ug{`Ar6jyXK`l+haZCoa%)^u<;(Zv#h!V#pN;H9AB zCu~sN5q$yQZD;&->{A-TgoKcw8HW$f-GYp5J{Z zZ~@_=&JJvNLu1+@@AWsU?ndB-!YWkQ=2veXD{i+?dQum0{8LMiPr++g`cfSubA( zLXcqEzwqZP3p#{gaO@Z~+<3yF5W3}Nxh|Q}B~M)Y+(dJX36F+J>sTOHhTMdOg>O9Q z^RIPSe+~~Pu2AHvX}nc_WLWak!STp0^zPq_y}4EcwrdNIiUOJUsxK~bofDkN0tdj7Azf@PD@px_A=Q<`Gx?Aggf0Mj z+wYW!66j2%`GHT^E#gp1GM6jIbi6#nLfAL}mg1>cz1?#R_Wki@oQAA@f&yAR9PL^nohqp&J!461Cy|(u4&fJR2k|=P9I{9Ml^hz~c^bkXQJl{Wvx^sE$ zpVVXc0xT1E>UF*V@p(G}=oG;9qzNZ2jo$RUBd12!>4KD?VjaY;CMIa0+^slv_zQGQ zerpkHY!=mlv``y=!1W*R3W;sRH#ZF;!~la|NyKPi47uQaTcEtIQ!ZXu;ZuwW+`J3jIFo;4y#Ko)|^|7p`oQAIjtAUG2nS^&#;lD6lO!u*=GQ zIF9*v1s!Al^`-R#s>XOv3A>U5yGC0AQd0#_9C^E}mnA0Wwd@Dqj+A04%{XKoQGYZE z>f8~>m#&^3G5OE@-lgA68lT*x;-A|S>ZL55JV~X?Xa&K&O3L5Gd*<_T09KsY_UPA# z)xU!$ulYuwHh6PgBeZ@C$=BDn*#0>EPDu*5o6Y6YUcUVAh(qa)gCG<<`mbZrm{kEK z)#W;Y%x#Ew7@78Lzu?9|OZFf!kpV}&-PAF0-$5lHN}mHxwsOM$l5kta zZ-N7r34{vl9PL7kH207w1%ijQDM6nJPH+}4H9fszb!W}9tL^M`K}4m3>}Dcr11Z91 zBesM_JwyC-W7)cDBxexRN5&|r>dK*Vd_kwydBk;Z+g<6}ey*0UEfbxkYNR9VPLK5V zNV=rgx*;wOy3uOglp-bn*sQ-r9h5eW9E$jz#_DQ`}uz zmVHVXGIZXRx9zp|LP)Bx#txfTEQ$-KpCwH-$f^*x*R9!~rV}tAK{#`9C=nnnp#aXz zq{AX6WZ+nI5auwOYV!MVpYYK9MLO4Squ{;160>s~Tg$eqB`*8!(S$}MEXv=YQ@ z6Fo-+Sj@-+GOr4x74`R!iIzv5rs+wI?w=+=qtNHt5XhWXQ`2C{PLg3Y{iiwnDLnA-^qu)QBw!M zhCcUDMwr@7OoXQf-~Co;71t3V<;2&^>pU7#Yk2C{P?x}j z_0#Pmw0iE#fA>OqeecPG%j#ocsoK!!aO~u&scnAfl%%bt6+}#0XJyT@dzOb44+2D) zwPHJ8CL#9YH@lZWlc|b}X{83O~EQrJQA#q*7$;x}*I- zkjUQ$`Xz4n!9|@@o=idU`0e?57s1~%xM<@4<)aykJG8(6z>UQD`}cKAV-hKXC(?&8 z(tBfr`S#Psnv(_6*Gcyn6DyVfdVL!7s~y_R`2wm)f6aX!TL z;YWl?TltBIh{zdnguCk;AtRY4zxWT(;+Xt3<9@{F4+p9J`|zTs-UJ7RagmXv?Z3xG zDBk2v-xEy|{LO9I%GRtu0gPs601r(DSXe+J9}VasnpK4Elb5QxgSBa^@?gOA3zS{iBFHj-Lne5r!}RJmhQ z`~4RNVcQy&Tj_Yp#eq!l1!e>XinN~v8A7c+&*T#WRNc9C48$*bg4#|k7S%!q^}t;o-$*%)cS#rvN#IDNLa1OfRGmn$iySKFG#;N~8$>pyxf_w`Rj4FrhSOCrO-_C(g z2bQxyXizicFkitIem5n`AH-Ed)vuIPeUYB<$@}kpJbU&iD~A|fw^I&T{#o6)i$EHX zzjSbj4lTh)L(ppS{XY*j^U{I@j-LQ%cKjf1T00n0a5+19`n&BBqhu0BtW-1&HmrOc za&pWxy``Er$6NOqBmtprWn!}7euN5OI`4hF=dd%fgX1s?0vfZA|M#wgaVU?A_K+1> z$^i!l61^A~9@_w;`k^JM9X5DV z`P8CPi^N776pmQep6b%q#2ZBE@}!Uiu;2P$+j#10PF-4{(BGggrTc}XGxXooBIUC` z;}r6`Ku@DOQGtFj1M`U`t%8A2DmoS%uM8+UIsFpzdlmWW4F)%1t%U+afSUHL+~|q# z-^hy>ECXWEqV-B>qF>rsD6O?>U62hBurlV_d%=P?gKcQgBrWyL7Pnze*e57;Ct#B^ zUhJg2q};DNq2lA%X-RvLGH^_$2MaKe%6p(~SkN)oGC+d%|0>J5qWbiKI|l>_n6^V^ z`mCwTjQ{|^74s?%6e5X{JP5=)?z-P_PdDnFO>x+t{4-v6Ei@&)S6`le5-&yVf5il+x9)(Uy(Rq1q*}Q5W z>ewHymn7CYjqP94sN4|d7`M(WOUznt*?g`cg5CzKvI0B1_P`eT1l*XIIL_uKLL#OZ@OftKYbvf>%ZPcf08c=^M+Bu1)zXsmisloAlIA$Y z(VqR7i!Ji%X2r^-ds7VO8wL@ff+H+a~A|Oe?YaOGl;m4xT(*qN)Mh) z0NH*7;2tOyqK!&>sx<@zkkC=DxzcoX!y%Dl-O$m1{4`GKfP);j;u!v!c6@#-`B@w+ z*cDbS^~W87LE-I2i9rDKBd2p(&DJYFZcfL#b<3b)Uj`upW)U_^E}~*<$cF#--k-km zL9sz9lO>Xhc*` zo&>?txv~uy0XWx=+X`?VqQA3CB?8x0;9-ga|IJ%qm1)@ z2`MR$hhvVp3K1#D$wij|Pv?KU^fSA@lJbhZhd}4IBYMaE#-yX`SC5}r;}yckcCq6H z9(UCX+U|0mw z=`dJFkw6_vJYK~G=l#V2~r3*u`-RmIcet57AKL`9*(4Y{{Ag^i>dXls)ryD0V0 z$0M`i}pWhO`F`D5Wu=u0K?)(QNuIA*As!Tx&1n?N_=jjljjflXV z@&1jMayG|(521jtRg(6mQ)|UyJ*)*J8u0 z;Plg)P+?(vHO*(D4FeV39E)co$FpRw+T%(Mp(&H4H7Wfiqpm7WJH?*2ZTdVvL4Ff+ zS+fMl-)M+;v(-VR(DZy$YS}$%{wx4CwVzjW*AL;&GHLHsX<_I^kUyJmw5CL`*~grE z2r&g<<&W;8^V#BA?KF3O`d?fC*wwDu_T7&AsG+w)27w*X1nn$vQcLQTxT`U6b;4T^dhyaOY8$+-C zGt4rNH9edntp;DokIjzR^5-9{Tm&BrZ%=L8Lk+=WHJ2B;kAu;PtEy^eT{dJ&J@7Tb zcPYpo8ZbT1Og0+HoUY%sCMBlpYqI@n>F9_N>R}}GYDb-&F-Z^9KXaqLGBiV%hIZ5) zMeKDL4H{fX4L(b9;PXfJOh6FXwS<9$&6szhG!O|$Uh4rN0yy%o*F4{(w1>C|+=Bcc z%1yAb)Zd+x)iZ$M$&~~(x;MUkckL6qDL%q?+J$-f=hFQGc=>jwIUW5AOMg9}kxLVM zC_TGk10FW2r{{$N|AdL?)Q)iPe0({ti7XOOVEpn8k?%axOSd-X=MkWVV#gE32}NJn zKkYGfz4r^O#{(fV9Is1yF6P>fq|7nwU7e_jkn@r z`Y+=?H#LX-k_vJ(Y!74pj4A&74LI(B+ykQS*C7RAC~Q=6nR%>de3hRl_;xHd+nw0B zA(jIFyaTD%6W3gO<#oc^VojVIJ)K(TlqQ8b@(eHYo7yR(q`DRo2xRLS+m6q!53m z2VPgFt9uxA*XP1D1qzX4#j^L4SS-fUli#BNV*255L#t$_l^p0JAETn!S9trHbweH) zF8>|jy>ExtpVDTyabyXq7@wOV5B@B7i-{#+bg)ov+odWj*q`1ZI_g-W=XE)GReA^+ zY=oiVpVC(HN)A)1rhe=*%c*pb{qWi{0VmP}mw+e`3=#kldw*XxR{wC4E07mIJTsF= zeQU|@X-z(jw>i?+HeTx$@bauw8gw0_uz` zyT|2Iu^?%x5l{n+6p$~x{U@`Hgda7WoDe=5Wt;lj^RI$k6}Cx3x?8epSYlWQlXhD01z)6V^(OQ_nUz7JH|F2PpNGX9Ys zDUgx|;rSv~6aY`{(?a~JL12WGfWj0{+3fOUF#_he{c+y^$*Ng`Gv)NI`1`ps;iUUv z|9av#C7BI{uO5)S1{wn01r_UG#~^;kQ?oszn){6+qN^#8n*gwrb#=%?-1P4JpmPYX zYG`=a@4Rv|^n{$liplMGUHR|P?<>%ql=L|Y`fI4Ob$G&1I+NZwWTy3{)*3LDcu(l@ z zUW{X>C)+cOu}*&{*PrBW@K{ap@1+|R>@5e5|OkG4U7=hDBta5JY#s2`vAq-l8p*k|M4 zConx<@QQ4fBF7U(^jndQS7Ad(19|Z68ZES&w;nakQ6_5fAWsun zYY+nMd+4-4eVs||=rq^fGCr9#K z`u?!>f6l499Zn(CGF%Fdkb2%tLO{-yeaXR~cuxt3$@d#?I$b}AQ<#osppya3L!W#D zTm>jA|Kj1AjP?1HpjRiGr*Y7fmC_kpUALh>IUvJfIg*L6JyDu3YwBY3#bc7t?Hg zZrUx8b0kJx_4R8pi%ZI<1Jh(MeXw2En-vA|GKn*2Vyn0@c7ACWpUB86Ty$l{*iK*2 z>42_1jFQ#aDS=~5@$XrD+0{?@9R$J4Qzf$jgyzT7&xsq~2cZ!LvM{zI5GMHgc_Mt; z%mZ_WvQZEZm^A%V`WQ<`jkR-EIlQ+7{JJ#|9Ei6gwT`8ZnsVhd8`B~j@Nnxlr;E3} z+s(NwFxa4#i&xs|!*eETtnBOklk} zT@EyS%=t;T-i>}xs%Y(X_8;no8j(3~qWzgoQiyo5PIy_A3^M-XbZ^XMWIlEQy%~27 zzY#GYBA*jB&GhdcPR`qz(yH^a=f=fAjV;qUO4Hxtx@jG|D!+b6NBv^KK3qMd}{fZ63X-RQQiLH z54SnG>*{}i0tZ15dchCX77z#!O#s~iujo~_V(iH9Fk914v=p5>Hj`;}1ad|?@DBrk zHHw6R;}S9bz;r2esw_8NqvGgnuGSSFmKbvOkAcA>L4=)OIr;#i1S+#cL|-gHlSX_z zu(4gDPw+5ycY)u4=>a-;V56rsi6jYlA#H1fbacSghg?2|_dN(1N{n_5pve9s7#l(w z$&*Pj_C3koKR+Z2^%gll5e=>XWAUEbEQIiD#X1ds5wV0|V}yFN)fTo9i9AS|RaF%> z@<`yffIJEWC0^GHG+)#kVAA;bu$pv+yE`j6Q`|>hR_-&73#*Ql8wxb66FWaNfKKT@ zR35eB`8s!OhzK|iJ5+tC&jNlDvc|csG&-}&2Yh@8j3c2eBjWffy#}obNlmo32wno0 zS4xPGnov_?6C~$h;dEchZT}YZYvOAc7neVtD=JC_Mg+jADhYY}!wVEc!Pl14bh}zI z_dgsC+)MzV$&&XOVNWw1eK4R+hj$xi+;_HSp8Z(0vt^(`gCecs%*~6N-8a%!^eh9^ z&idJ#4i|iew9q!921{%32>>Jz8Q&ll8hd#_CQ)uVhK1}Za37QR)C5ka&;9V~4HW_H z?|iDUZ2pMQ0*cMstx^v~CAKULtg?7xXfVc@*aOGhV6{QBG*ykr;M_wJwLm>TS>ati0m}-2}~HH7IL-WhEnX1a0Bj zl|)(l{RdIXSP7*QK!a$4IgBOFHa^gv1)_TsLK(mO?VztlPF`2bc825&Sl~xkSV%Jf zFG>!Q&oE!zh?F$9!|}vE2Fe*t9HuY{5-81#g|_Jq$Or~O31dRw;hzk1TGo6Ey=GN>R{QO;IC!oSf5U)BT9s zg68NVR|Mn`jabB}K;eUgzGavkKK8-d1pX!_^4Ac+^hTaH6saeT0TD#*VO6C_*f)ec z)>jW#>?V%*zJs$f>wYBxF1Ub265Y7t+hL?OFe9r2NPKftV=S^#1cC!VDiTYF8^9%s z#A$?ft;%sm(KmF!1lRy=L+aw-W+6K^AV5W*10l%H!Q@Rr`uFC6XF)Se>s#LzbTGx0 z$-L(6y0yGwi{|XkitM%fnVgxMKdR__&?A3ltM&JcGJ(r5W!mIs@FyWQJg?`>mQ9RHqZ_j4xK-sv59`cAH2a2 z9rtZ3QUZdXoCL@~MA-?@IZd4Uc76(gAIdhCJiL zz{nrjqLbCLDu2%eXN7w9RwGhr8oC)W5G)B(D%0F4&oZ&uE|i|{d{2;=^pv~YyBSYB z_YO+`UZzxG=b|3tkh`_%X3zd6}4(g#R5;*#(ID zf{tq6n#)T+Pegv!_f0LQXvV$kybWx5hGzC3;lr9ULhguU9{Sukw>_Oyk#GqAURlJw z`{p$|=-!cY6Ceh1jn~!!{7$Bn$T_C~MEz>1~NA1!({*OKOdM;Oa&M#tI zt2fFn;b>b}gk{s=`pPqR!~J-(mC0g5D!jxj8p8PCqxZk2xvj5?^w_VL32Vyn16t`> zc1A`9)BaS=o5&mF2318yO03|cPmBBJJFaQpM)pj@i{>>!5mT&vMd?(Z22ZPcNT z_KOc1R&R*rOPHAA&Mif{#!Pw2lE7q_Y2d*t;Sa;VS*aB1v*nZTq`)zu2MsU1@Z==b@S-|N(e-0cJ8 zO+@4L&5_EBr^PzoZtP70(DM9pf%_kD;4Ni~87};mh1Zdsvt3}oZEE-BZS>DWvajXnpFWVl`H!?jm zKWmkYSo$sh+gC3uD_f@epfAp(o5^~zy0phwx6w7@XWuQw%*%8EI{l@FY&v@MQ7da{ zlzu{cwX3~l+4yeVtpnzK-x~FR&#&md{N2)`T-<}!EQ5~g9ql#bu;!vmF~R1gTXHyu zq>5h*(x`Ypi$>z&q+988nBmE%hBYl)sFixO-C++T!^L~R;8+jlX=MekX#ykV%P70F zuApsb;@U&EVU5Zn;ii`gZJp)c@(TD1jTSlWno+p3TovVAuaBZ-V`+p~&sX1kIl8<; z`F&jDCC9Q!!;Gl}-NUam+^9^m7_<~S8}1hu%BCiKkh2DDYmLhECo@@#mOv>MTiXB( zV!YHg<>iXe>(ZX(V)e>U4INHzl(MAQ*Z=~0*F1Xk2clnYC`Jsjm)r0iF0whe>4=5m+4agZ|;iO;G;!Af*FDk+UPDHu+Ol{BpqoBX~)7u$YMRSYS%tR`K zuD-#>p_eEKnc%s~k+Ct;n6_ulKO@Je)B%H+(H7o2woY$IA0l@#$jCtKEJn4AKl5S)_YVVpU-Ub^;yA`X>)70M#ofci?EULv+2`Ja zKuPYuO(Fx;PMGD23V`y30uEJ!k= zP$y0|XIq)t0erJxU_h&152S| z`C0av>%U2Q$4jPw38~Ad;9*wXCMA`&v0>D~L>MQK)N-*>Wjzbp{TF~=`eIR_A^ei= ziC&bM-kh6jU{+j77?~5B`oR~j6jApt)f*OCVMz5Jst=IFtY)j~GQ06$UhX5&Ag1>T&u;aBez3T;3mRu^gS9fnG=ILREmVJUKA;Gp(#(YN3^t zopCC?_si%jvdMmfl$?7)sTi||TwIuP-UBGC$sI3Akt9+;M``L^KTv;kO^+bszh@Qc zW8mEv*U7ZHyeyq5j;BZ|=@Y$Pq44@km%kj$s(J`|(^Bw{vg@Zt_SemkjqUOBuTL%&~y=y6Ap!NGSpDl)9EuLkH|T5y=z zy?<5qz&@G#=~H@IB5|OpNDG=ceXWk+0T!%G@@)Z^UpU!bp^0oMwjI6m`|*K&NQ3F6 z=f~EHf_SkSp3@_{IBt~}r5`jriM;CbSk>r;zx1^KcDMW9R2!_f^ zbxqwly&(*}OxETmIdcrER0^H+Meu$@;9UnzbMuek6*jvDCN-H7swiQ76nYx3;H#>{NFsFgO$>*Eva}>+Oc$ z-#r8p@WQje=@{f}Ya~VHzR4_^Bxq?{azEif7d8yaNOl==34t@ZMyV#sbQ5}6w#Oi~ z2gCuyhmW|H@4kLbGH;1bsH#w&<+61 zgw0sU^HIHGBP$gI{MSk(G`8c+Z>YGksEp-IByN3YzsrB#pDKtP20`<)gZgCrDZ*#0 zPaCGSwgI2$PgPEn$@z^VfHPz9j3+}$pB$%F8wYYQTs>mNri>m=Yh{dRre zzC!YiN?5pY$5pwxE|Z3)!R0*wk1n#?CV0z5J zNm|cJmCu1o%K`MVR(5LeZ+z0%ti!>W?^!VGrbvym!_;ZE2% ztX$n#{mJ|76?+o#Z1KikZMZ3xJ(g}?{HbUEwsS*csbPMF_S)&gANs-=vHM9Xqbt4l zDJY5{FL`@$dg{4Y=b-2j%MxW&kpDxgM#e5fLWSo=uO=GfWyRmeEBeB@L`Ty!G(YK* z93|%(%*RWAdI{Pi+{!vW$(6ZR4dyP|7~@VwF)?PJ$_`p+EyexViSmhKn2Hgzn1jgwto{<<;w zD${9ucJq)z?!^l(s|B=TR3VF5Z(Y{T%W1TNHqS1_!* z^QPXT0PL}g-48K^-bK_-bxEsz>)_&(z~dgCEuU&~aP;JGBL}P`*9JSDm_bnQI$gMX zc@`sgUb^RpML>|lEf2^@Ab}nBW{JSva1Ym{>_@rH^go~}tWbHL{jZ|r4 zK{xt?=GC$H+rI~9Zo90k-%N`EtrlNjCcMt;RL-F@l&&qhO3jZRE>6`s+21GOvGs+U zFAhNE*r)S)_ZbV#VQ!pBcdukb-D-rr+G9WK1>|6K-N9pWKD$hC7aj%$Y6jbb3k6%- zHV%b9D%09%dF)OtF2$zRkSS$*7iq0WB1{{OQ$~y5s2M+22x&{NDO+`UtYvT7Kgbh+ zrQ&K@xAP0vabr;X7?Yw$$fG>s?%esHbc8KR+&9mI!4DUf5eN`Xw?5u?R?LiECYOOz%tNmAWam&Z#TW4=*1_m zypaj#;Cl1y&NAX?QXPQ#_tTYQinN4M`|R;`&8B;YFtxmDMys>3-^h1rxU$OE1(u>Bbf(kaaqD2#X4PHpfs+rBA- z`mRFru{88SJ*G~5+{f*qJ5s!Mo}H7!NKM^ZzI>r52xe#p)hzG3YmO!@4u5_g;yO0# zHydC{BFWnM6&d0s#G5SdX}|Vq>@4K5gHqefMhms%cmVb>?iinZs^we}42Tju*pWIc zHgF>w);^~z?O>L8yhiQhUJ)#)Qfa$c%jk9igf~@(I;ZRq3x~u?`0kbG=f9j1Q3qRp zM`Go!{{Iyt%#*zaB1}Ku_z@()WSWM{|+PuO9RzMXTHQHuA2i30JGBae7PCg;YAoP;q^?D48Oj|}bA zoT$m2!GP>gjlrh&3HnW_(#|26aDa`!N9Mr^Cuj?hveeXoVQ#)*j;^o6UnLdb%M<+ zbPC^H#RAJzcPcqLOWxKtn=fTuIdx##bR}`cg*KElM~#q`yLXp2xsT*=^ZS86f5ueO zqRt34?*Vk8vMf8hjf1}#6`BT6Y9^S?ze^1D|sK#GJ zjpe}j7MCkp1(yan0)dGoEhhSUPcMk_Jp*bdO=y8HULizE>~-W3rhy(bw~jh9YCWbSgQ(g)E? z4bl?0ObZUSsqZEhcFAm#pF9ie;bONu8-5}Xtapl^*!uuyQ?OA1Iif83^XClUhvCgr z>Vf_?n=b~4qibM6dZBoMw5qffL`7WWdm2@)N-5vE==$=MOL>n`I)weV9Ptwl*cLc1 zbDzjL3Yf@xS=dpxB_qz=-TR9JL}cd_I?J55by6sA{uQycEd`?4eKN8F(1@U-BD_Y0 zWd7+F`8}qdNeW6P&OXR}x6>pFF(amTrS|LIc}~yay`7SW$BBPk@}8mbv$86HkLVq2 z?BjC^Jviys)>deXSVYIhGD1_-TA-<8x-}KX?$G<(M0kx0{T&moX%G7|X*6o~iGfg= z;d-l%pU=O{w6QT>AHKwr#9vs{8!z1dy2JjuUp;%g^X%lOg6jNV}z&9 z%#tbl$*T@W0}_R}wUc0it|dWR!G^tbRJL5ad7jy)BemMu9-NH7@*kGAcBoLegFt9m zs8g>A*{B*6bJ1^6DCz0wUdhXY5XU$4j}tt;=9J4`r_rSzaz>d#6O|S6;bVe7t2>wl zUlRBC9M2BvUY(`Wtk+jq48y6x#|&C*myN2u_#Q&PSpMYM8`$q-Upx29sZkW^%%J8s z>`ff?3tZ`rJgua8#oU{^% z@Cfkmo{J!wu%{#~MvM0+p3V@>iOY1xVQZQxqq-fx);R&$IQM zb}|)&xq_8d5%3e?Ow+wC&v0=NJ3~i`zsWs4oCI3^h0}KN+exqcwZIxY5nJe_=}YOm z>$`J@9>zu5hWBKXlVY+1B*XS5SCXTzIgOJ%G|=+*PL)S$Xr#9j3hdvVl-%~HEM~!I z91dZJ~?O47P`Pd*cif>-Y#TfE9_V`!-#cIWT*4Lh1+yR}#pwr=lQ#8Sy4CbEm|^36#y z*SIaYTn6gfzq7`#pXy4>cw6R<)k4dZ$aa0|+$ptvO{xF-!TtMYFa#CmKBKAhRWa1RUmllrM>Ms3-+n1vf$5 z=PHwcObgfJ%LKCX>_|DRl5!XDNl5;dd1||FR%q7erFR`9o$epNfok`Q>I6PFha4b) zfuO^kC?_f^O2VcmBH~HQZKG{9UzKXhpX_!tVG6rW4j6{Zr3lna?!Q5)E{8bL3wA?J zb=r{klBl$CNdITmZqAfRmX7HbA8w4AwQpa)%27!}UgXK+IW=AU(HLmuMvj=PK(67a zbCZ929tD%ap?EOgTw<%yRD09?wCjHG_HS|T1U_Yj%Pr^Zpzd7doMD)ato)M>ez-=9 zrC4SF&Jvi6QA6vm!Kh#4CCoUdzDWcLBUt(N#&bx5l0~!fT|9-yj%dTF595G~+u?|s zan%%Z@5F@pfN2O3M0rF?61O(voJJOi4c7x_w)RaqSr4`vXSs|~$-G}YStb`q%P*>x zxjd&GnXu(F`z;pb@Y9NCxn{J^G@aY)A~MowBpN3pr{aC?0*Cu(>OqGFM;NYyqh~#{ z`=w*pw=ayFbB(%S^BX<1J}h`Vjc?OgOw)Nkt>Nm-VhqN5Xn_dW$#T4;`R06w>qcSK zfBp|cooc`lNL~a%*tt`Ey~pYC)beT_SX^$nC`N98GhcfxjPmSTJhWf!Bkq>` z=Y-tKcF!~dS`DXu|5h?Q1w)u37~Hbxm@~A_z{=_@$mvm?yIAEp#XZK7*n8qR)7U?D zdRhbqjUiVJx~r>o;i|!5i@xI^Msj@zQ|jm=#=!2n^oQk!>exW@ime#b$ zFXVnyVDbB>q+s|4xKkIw(Cje~9cf$t*p+DV!n!q$v(^RmULj02)_4#i4)8O33ihp6 zWhrxD7GeQ(QCR!G7bAozUVX$tq&0l?M4NvO4K+=$ga;Wp0V!!d7!lw8XPAj;3SvR( zSJyBIDDn;l#vr(@ltcU`7xvQPnYQ^b#_NnwVPs_;gPAB`aIXp*FJNCXkdIKYAwK=M|_jnm)6^ zfNI`tJuB9zYRb4mGddw54;B{c4R|OKXo#GlT)<^ETCs({&_FPfJ)UvaZ6d@eBE%@6 zxlcgw7RI~6{-WgKQVmb8xFJQgSjJm$qF`WTRE9vo3Bz{9#l&!k5NV!hR0xruvSoInbwiEL#nvG$3Ix3|R;}uRI5Z&^7Wo^uv0lTXs=k9z5x6cjXuGq# zvO+TU3r?c|>`#3E=gBo4HkgkvfVYgCe2y|C#3bM>jpWr>8@wvU)5}fI7Y30buq|I} zXv8x;2mv4UY-pZ&92^wkDe3i@dV>M{~H=`gXCDG7#?BU7p>ggE< zFPm2ha_Vu+oi?4#F204hVnSdkYh- zYNa8D0mYsKEiEs)w+V z7Z=S3R-3V@DQL)L0TY9j#-kGw5;BzRJgZUc|LikD*LTJ$w%0@%a!E)61$FLXV8G57 zJz_pGqNWS=OBea8I2~8l8u=7~Tv)HkytdDg<=zQSxjGX(qDmefKfrU7&h94i-A-^J zw18L$d?sE(!N>qt1FmrgH-um~&kXp%gQ3)`FKLkowL*x@APj(y2-pONTPMPs5E4)* zQu?ahb>~t}5r_dHR8$Rk9tKBY-F|@KdKh;wYi?zlbXsWN*ijgz(90E&RY?*pAa_?@P|tJI}R%H9|#P#|GdyN{paPs z-xfi=_n-g%e|?h*80/tcp default_centos-httpd_ec75a2fe2a50 +``` + +__Runnning Apache on Kubernetes:__ +```sh +▶ sudo atomicapp run projectatomic/helloapache +2016-02-25 15:03:04,341 - [INFO] - main.py - Action/Mode Selected is: run +2016-02-25 15:03:04,343 - [INFO] - base.py - Unpacking image: projectatomic/helloapache to /var/lib/atomicapp/projectatomic-helloapache-c0dd79b5e757 +2016-02-25 15:03:07,983 - [INFO] - container.py - Skipping pulling Docker image: projectatomic/helloapache +2016-02-25 15:03:07,984 - [INFO] - container.py - Extracting nulecule data from image: projectatomic/helloapache to /var/lib/atomicapp/projectatomic-helloapache-c0dd79b5e757 +886e10a3244f982f3302ab9058ab7b377c6f83e2cf63f001e1ba011358d0b471 +2016-02-25 15:03:08,332 - [INFO] - kubernetes.py - Using namespace default +2016-02-25 15:03:08,332 - [INFO] - kubernetes.py - trying kubectl at /usr/bin/kubectl +2016-02-25 15:03:08,332 - [INFO] - kubernetes.py - trying kubectl at /usr/local/bin/kubectl +2016-02-25 15:03:08,332 - [INFO] - kubernetes.py - found kubectl at /usr/local/bin/kubectl +2016-02-25 15:03:08,332 - [INFO] - kubernetes.py - Deploying to Kubernetes +... + +Your application resides in /var/lib/atomicapp/projectatomic-helloapache-c0dd79b5e757 +Please use this directory for managing your application + +▶ kubectl get po +NAME READY STATUS RESTARTS AGE +helloapache 1/1 Running 0 2m +k8s-etcd-127.0.0.1 1/1 Running 0 1d +k8s-master-127.0.0.1 4/4 Running 0 1d +k8s-proxy-127.0.0.1 1/1 Running 0 1d +``` + +__Fetch, edit and run Apache on Kubernetes:__ +```sh +▶ mkdir ./localdir + +▶ sudo atomicapp fetch projectatomic/helloapache --destination ./localdir/ +2016-02-25 15:35:41,439 - [INFO] - main.py - Action/Mode Selected is: fetch +2016-02-25 15:35:41,440 - [INFO] - base.py - Unpacking image: projectatomic/helloapache to helloapache +2016-02-25 15:35:45,067 - [INFO] - container.py - Skipping pulling Docker image: projectatomic/helloapache +2016-02-25 15:35:45,067 - [INFO] - container.py - Extracting nulecule data from image: projectatomic/helloapache to helloapache +c12d2047fab44f2906b9cbee3ac86c6c6499921ce33a90085e8765491b44f447 + +Your application resides in localdir +Please use this directory for managing your application + +▶ cd localdir + +▶ cat Nulecule +... + - name: hostport + description: The host TCP port as the external endpoint + default: 80 +... + +▶ vim Nulecule # edit port 80 to 8080 + +▶ cat Nulecule +... + - name: hostport + description: The host TCP port as the external endpoint + default: 8080 +... + +▶ sudo atomicapp run . + +OR + +▶ docker build -t myapp +▶ sudo atomicapp run myapp +``` + +### Quickstart: Atomic App on Atomic Host + +__Running Apache on Docker:__ +```sh +▶ sudo atomic run projectatomic/helloapache --provider=docker +docker run -it --rm --privileged -v /home/wikus:/atomicapp -v /run:/run -v /:/host --net=host --name helloapache -e NAME=helloapache -e IMAGE=projectatomic/helloapache projectatomic/helloapache run --provider=docker +docker run -it --rm --privileged -v /home/wikus:/atomicapp -v /run:/run -v /:/host --net=host --name helloapache -e NAME=helloapache -e IMAGE=projectatomic/helloapache projectatomic/helloapache run --provider=docker +2016-03-01 20:54:37,617 - [INFO] - main.py - Action/Mode Selected is: run +2016-03-01 20:54:37,618 - [INFO] - base.py - Unpacking image: projectatomic/helloapache to /host/var/lib/atomicapp/projectatomic-helloapache-a68057164f09 +2016-03-01 20:54:38,357 - [INFO] - container.py - Skipping pulling Docker image: projectatomic/helloapache +2016-03-01 20:54:38,358 - [INFO] - container.py - Extracting nulecule data from image: projectatomic/helloapache to /host/var/lib/atomicapp/projectatomic-helloapache-a68057164f09 +6eedd332f9938c7b4bacca694fdc77309ca5b43aabb05a1cb644ff8a0b713012 +2016-03-01 20:54:38,558 - [WARNING] - plugin.py - Configuration option 'providerconfig' not found +2016-03-01 20:54:38,558 - [WARNING] - plugin.py - Configuration option 'providerconfig' not found +2016-03-01 20:54:38,602 - [INFO] - docker.py - Deploying to provider: Docker +a98d9a3305496803c38a90a9ef65c52030dc23dae4b04f36ce167ff98335395f + +Your application resides in /var/lib/atomicapp/projectatomic-helloapache-a68057164f09 +Please use this directory for managing your application +``` + +__Runnning Apache on Kubernetes:__ +```sh +▶ sudo atomic run projectatomic/helloapache +docker run -it --rm --privileged -v /home/wikus:/atomicapp -v /run:/run -v /:/host --net=host --name helloapache -e NAME=helloapache -e IMAGE=projectatomic/helloapache projectatomic/helloapache run +docker run -it --rm --privileged -v /home/wikus:/atomicapp -v /run:/run -v /:/host --net=host --name helloapache -e NAME=helloapache -e IMAGE=projectatomic/helloapache projectatomic/helloapache run +2016-03-01 20:58:03,396 - [INFO] - main.py - Action/Mode Selected is: run +2016-03-01 20:58:03,397 - [INFO] - base.py - Unpacking image: projectatomic/helloapache to /host/var/lib/atomicapp/projectatomic-helloapache-89e975ea7438 +2016-03-01 20:58:04,153 - [INFO] - container.py - Skipping pulling Docker image: projectatomic/helloapache +2016-03-01 20:58:04,153 - [INFO] - container.py - Extracting nulecule data from image: projectatomic/helloapache to /host/var/lib/atomicapp/projectatomic-helloapache-89e975ea7438 +c85cbb2d28857f2b283e23a72a70e077daceeb2b72f6964605af6f7efa8fbc2f +2016-03-01 20:58:04,387 - [WARNING] - plugin.py - Configuration option 'providerconfig' not found +2016-03-01 20:58:04,388 - [WARNING] - plugin.py - Configuration option 'providerconfig' not found +2016-03-01 20:58:04,388 - [INFO] - kubernetes.py - Using namespace default +2016-03-01 20:58:04,388 - [INFO] - kubernetes.py - trying kubectl at /host/usr/bin/kubectl +2016-03-01 20:58:04,388 - [INFO] - kubernetes.py - trying kubectl at /host/usr/local/bin/kubectl +2016-03-01 20:58:04,388 - [INFO] - kubernetes.py - found kubectl at /host/usr/local/bin/kubectl +2016-03-01 20:58:04,388 - [INFO] - kubernetes.py - Deploying to Kubernetes + +Your application resides in /var/lib/atomicapp/projectatomic-helloapache-89e975ea7438 +Please use this directory for managing your application +``` + +__Stopping Apache on Kubernetes:__ +```sh +▶ sudo atomic stop projectatomic/helloapache /var/lib/atomicapp/projectatomic-helloapache-89e975ea7438 +docker run -it --rm --privileged -v /home/wikus:/atomicapp -v /run:/run -v /:/host --net=host --name helloapache -e NAME=helloapache -e IMAGE=projectatomic/helloapache projectatomic/helloapache stop /var/lib/atomicapp/projectatomic-helloapache-89e975ea7438 +2016-03-01 20:59:57,067 - [INFO] - main.py - Action/Mode Selected is: stop +2016-03-01 20:59:57,075 - [WARNING] - plugin.py - Configuration option 'providerconfig' not found +2016-03-01 20:59:57,075 - [WARNING] - plugin.py - Configuration option 'providerconfig' not found +2016-03-01 20:59:57,075 - [INFO] - kubernetes.py - Using namespace default +2016-03-01 20:59:57,075 - [INFO] - kubernetes.py - trying kubectl at /host/usr/bin/kubectl +2016-03-01 20:59:57,075 - [INFO] - kubernetes.py - trying kubectl at /host/usr/local/bin/kubectl +2016-03-01 20:59:57,075 - [INFO] - kubernetes.py - found kubectl at /host/usr/local/bin/kubectl +2016-03-01 20:59:57,075 - [INFO] - kubernetes.py - Undeploying from Kubernetes +``` diff --git a/docs/start_guide.md b/docs/start_guide.md new file mode 100644 index 00000000..c47692c2 --- /dev/null +++ b/docs/start_guide.md @@ -0,0 +1,288 @@ +# Getting Started + +This is a thorough start guide to show you each detail of an Atomic App. Teaching you the basic commands as well as the generation of your first Atomic App. + +## Basic commands + +The __four__ basic commands of atomicapp are: + +__atomicapp fetch__: Retrieving a packaged container and exporting it to a directory. + +ex. `atomicapp fetch projectatomic/helloapache` + +__atomicapp run__: Running a packaged container on a specified provider. Unless a directory is specified, `run` will also perform `fetch`. + +ex. `atomicapp run projectatomic/helloapache --provider=kubernetes` + +__atomicapp stop__: Stopping a deployed Nulecule on a specified provider. Whether you're using Kubernetes, OpenShift or Docker, Atomic App will stop the containers. + +ex. `atomicapp stop ./myappdir --provider=kubernetes` + +__atomicapp genanswers__: By examing the `Nulecule` file. Atomic App will generate an `answers.conf` file to be used for non-interactive deployment. + +ex. `atomicapp genanswers ./myappdir` + +For more detailed information as well as a list of all parameters, use `atomicapp --help` on the command line. Alternatively, you can read our [CLI doc](docs/cli.md). + +## Atomic App on Project Atomic hosts + +If you are on a [Project Atomic host](https://projectatomic.io/download) you can interact with `atomicapp` via the `atomic` cli command. + +Some commands for `atomicapp` on an atomic host are a bit different. + +However. Regardless of the `atomic run` command, a `--mode` can be passed to change the functionality of the command. + +`atomicapp fetch projectatomic/helloapache` + +vs + +`atomic run projectatomic/helloapache --mode fetch` + +`atomicapp run projectatomic/helloapache` + +vs + +`atomic run projectatomic/helloapache` + +`atomicapp stop ./myappdir` + +vs + +`atomic stop projectatomic/helloapache ./myappdir` + +`atomicapp genanswers ./myappdir` + +vs + +`atomic run projectatomic/helloapache ./myappdir --mode genanswers` + +## Building your first Atomic App + +A typical Atomic App or "Nulecule" container consists of the following files: +```sh +~/helloapache +▶ tree +. +├── answers.conf.sample +├── artifacts +│   ├── docker +│   │   └── hello-apache-pod_run +│   ├── kubernetes +│   │   └── hello-apache-pod.json +│   └── marathon +│   └── helloapache.json +├── Dockerfile +├── Nulecule +└── README.md +``` + +We will go through each file and folder as we build our first Atomic App container. + +For this example, we will be using the [helloapache](https://github.com/projectatomic/nulecule-library/tree/master/helloapache) example from the [nulecule-library](https://github.com/projectatomic/nulecule-library) repo. + +In order to follow along, fetch the container and `cd` into the directory: +```sh +atomicapp fetch --destination localdir projectatomic/helloapache +cd localdir +``` + +### ./localdir/Dockerfile +Atomic App itself is packaged as a container. End-users typically do not install the software from source, instead using the `atomicapp` container as the `FROM` line in a Dockerfile and packaging your application on top. For example: + + +```Dockerfile +FROM projectatomic/atomicapp + +MAINTAINER Your Name + +ADD /Nulecule /Dockerfile README.md /application-entity/ +ADD /artifacts /application-entity/artifacts +``` + +Within `helloapache` we specify a bit more within our labels: +```Dockerfile +FROM projectatomic/atomicapp:0.4.2 + +MAINTAINER Red Hat, Inc. + +LABEL io.projectatomic.nulecule.providers="kubernetes,docker,marathon" \ + io.projectatomic.nulecule.specversion="0.0.2" + +ADD /Nulecule /Dockerfile README.md /application-entity/ +ADD /artifacts /application-entity/artifacts +``` + +__Optionally__, you may indicate what providers you specifically support via the Docker LABEL command. + +__NOTE:__ The Dockerfile you supply here is for building a Nuleculized container image (often called an 'Atomic App'). It is not the Dockerfile you use to build your upstream Docker image. The actual `atomicapp` code should already be built at this time and imported in the `FROM projectatomic/atomicapp` line. + +### ./localdir/Nulecule + +This is the `Nulecule` file for Atomic App. The `Nulecule` file is composed of graph and metadata in order to link one or more containers for your application. +```yaml +--- +specversion: 0.0.2 +id: helloapache-app + +metadata: + name: Hello Apache App + appversion: 0.0.1 + description: Atomic app for deploying a really basic Apache HTTP server + +graph: + - name: helloapache-app + + params: + - name: image + description: The webserver image + default: centos/httpd + - name: hostport + description: The host TCP port as the external endpoint + default: 80 + + artifacts: + docker: + - file://artifacts/docker/hello-apache-pod_run + kubernetes: + - file://artifacts/kubernetes/hello-apache-pod.json + marathon: + - file://artifacts/marathon/helloapache.json +``` + +##### Spec and id information +```yaml +--- +specversion: 0.0.2 +id: helloapache-app +``` + +Here we indicate the specversion of our Atomic App (similar to a `v1` or `v2` of an API) as well as our ID. + +##### Metadata +```yaml +metadata: + name: Hello Apache App + appversion: 0.0.1 + description: Atomic app for deploying a really basic Apache HTTP server +``` + +__Optionally__, a good metadata section will indiciate to a user of your app what it does as well as what version it's on. + +##### Graph + +```yaml +graph: + - name: helloapache-app + + params: + - name: image + description: The webserver image + default: centos/httpd + - name: hostport + description: The host TCP port as the external endpoint + default: 80 + + artifacts: + docker: + - file://artifacts/docker/hello-apache-pod_run + kubernetes: + - file://artifacts/kubernetes/hello-apache-pod.json + marathon: + - file://artifacts/marathon/helloapache.json +``` + +__Graph__ is the most important section. In here we will indicate all the default parameters as well as all associated artifacts. + +```yaml +params: + - name: image + description: The webserver image + default: centos/httpd +``` +There will likely be many parameters that need to be exposed at deployment. It's best to provide defaults whenever possible. Variable templating is used within artifact files. For example: `$image` within `artifacts/kubernetes/hello-apache-pod.json` becomes `centos/httpd`. + +**NOTE:** Not providing a default variable will require Atomic App to ask the user. Alternatively, an `answers.conf` file can be provided. + +```yaml +artifacts: + docker: + - file://artifacts/docker/hello-apache-pod_run + kubernetes: + - file://artifacts/kubernetes/hello-apache-pod.json + marathon: + - file://artifacts/marathon/helloapache.json +``` +In order to use a particular provider, name as well as a file location required. Each file is a variable-subtituted template of how your Atomic App container is ran. We go more into detail below. + +```yaml +kubernetes: + - file://artifacts/kubernetes/hello-apache-pod.json + - file://artifacts/kubernetes/hello-apache-service.json +``` +Multiple files may also be specified. For example, specifying a pod, service and replication controller for the `kubernetes` provider. + +### ./localdir/artifacts/docker/hello-apache-pod_run +```sh +docker run -d -p $hostport:80 $image +``` +Each artifact uses variable replacement values. For our Docker provider, we substitute the port number with `$hostport` as indicated by our graph in our `Nulecule` file. The same as our `$image` variable. + +### ./localdir/artifacts/kubernetes/hello-apache-pod.json +```json +"image": "$image", +"name": "helloapache", +"ports": [ + { + "containerPort": 80, + "hostPort": $hostport, + "protocol": "TCP" + } +``` + +Similarly, the kubernetes provider uses both `$image` and `$hostport` variables for pod deployment. + +### ./localdir/answers.conf.sample + +`answers.conf.sample` is an answers file generated while fetching. It is a generated ini file that provides parameter answers for non-interactive deployments. + +```ini +[helloapache-app] +image = centos/httpd +hostport = 80 + +[general] +namespace = default +provider = kubernetes +``` + +Default values such as the provider as well as the namespace can be provided. + +In order to use an answers file, simply specify the location of the file when deploying: +```sh +cp answers.conf.sample answers.conf +sudo atomicapp run -a answers.conf . +``` + +### Conclusion + +Now you know how to build your very own first app! After you have created the necessary files go ahead and build/run it! + +```sh +docker build -t myapp . +sudo atomicapp run myapp +``` + +Atomic App is portable and hence you can also deploy regardless of the host: +```sh +# Host 1 +docker build -t myrepo/myapp . +docker push myrepo/myapp + +# Host 2 +docker pull myrepo/myapp +sudo atomicapp run myrepo/myapp +``` + +Although we have yet to cover every `atomicapp` command. Feel free to use `atomicapp [run/fetch/stop] --help` for a list of all options. + +For an extended guide on the `Nulecule` file, read our [extended Nulecule doc](docs/nulecule.md). From e63f0d9828cbf990a752b4a7c1b0724729a1131d Mon Sep 17 00:00:00 2001 From: Charlie Drage Date: Fri, 4 Mar 2016 13:00:43 -0500 Subject: [PATCH 065/193] Error cleanly on missing Nulecule or invalid formatted Nulecule This commit checks to see if the actual Nulecule exists before loading. If the Nulecule file is missing it will cleanly error out. If a Nulecule is wrongly formatted, atomicapp will exit cleanly with a line number, column and the output of the invalid line. ex. ``` 2016-03-04 13:03:59,617 - [ERROR] - main.py - Failure parsing Nulecule file. Validation error on line 12, column 23: { ``` Fixes issues: https://github.com/projectatomic/atomicapp/issues/581 https://github.com/projectatomic/atomicapp/issues/553 --- atomicapp/nulecule/base.py | 22 +++++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) diff --git a/atomicapp/nulecule/base.py b/atomicapp/nulecule/base.py index 17c19a51..39236a9c 100644 --- a/atomicapp/nulecule/base.py +++ b/atomicapp/nulecule/base.py @@ -3,6 +3,8 @@ import copy import logging import os +import yaml +import re from collections import defaultdict from string import Template @@ -28,6 +30,7 @@ from atomicapp.providers.openshift import OpenShiftProvider from jsonpointer import resolve_pointer, set_pointer, JsonPointerException +from anymarkup import AnyMarkupError cockpit_logger = logging.getLogger(LOGGER_COCKPIT) logger = logging.getLogger(LOGGER_DEFAULT) @@ -128,10 +131,27 @@ def load_from_path(cls, src, config=None, namespace=GLOBAL_CONF, an image). """ nulecule_path = os.path.join(src, MAIN_FILE) + + if os.path.exists(nulecule_path): + nulecule_data = open(nulecule_path, 'r').read() + else: + raise NuleculeException("No Nulecule file exists in directory: %s" % src) + if dryrun and not os.path.exists(nulecule_path): raise NuleculeException("Fetched Nulecule components are required to initiate dry-run. " "Please specify your app via atomicapp --dry-run /path/to/your-app") - nulecule_data = anymarkup.parse_file(nulecule_path) + + # By default, AnyMarkup converts all formats to YAML when parsing. + # Thus the rescue works either on JSON or YAML. + try: + nulecule_data = anymarkup.parse(nulecule_data) + except (yaml.parser.ParserError, AnyMarkupError), e: + line = re.search('line (\d+)', str(e)).group(1) + column = re.search('column (\d+)', str(e)).group(1) + data = nulecule_data.splitlines()[int(line)] + raise NuleculeException("Failure parsing Nulecule file. Validation error on line %s, column %s:\n%s" + % (line, column, data)) + nulecule = Nulecule(config=config, basepath=src, namespace=namespace, **nulecule_data) nulecule.load_components(nodeps, dryrun) From 61dbf84059c6ca46cc57e501a60a299157075e2d Mon Sep 17 00:00:00 2001 From: Charlie Drage Date: Mon, 7 Mar 2016 12:12:24 -0500 Subject: [PATCH 066/193] Multiple problems with issuestats.com For the past two weeks, issuestats have been having multiple problems with uptime as well as API calls. (even right now at this moment: 03/07/2016 18:13 UTC each page load is more than 3000ms. Do we still want these on our README? --- README.md | 2 -- 1 file changed, 2 deletions(-) diff --git a/README.md b/README.md index d70ce9e6..710ed2d7 100644 --- a/README.md +++ b/README.md @@ -80,8 +80,6 @@ For a detailed description of all providers available see [docs/providers.md](do [![Code Health](https://landscape.io/github/projectatomic/atomicapp/master/landscape.svg?style=flat)](https://landscape.io/github/projectatomic/atomicapp/master) [![Build Status](https://travis-ci.org/projectatomic/atomicapp.svg?branch=master)](https://travis-ci.org/projectatomic/atomicapp) [![Coverage Status](https://coveralls.io/repos/projectatomic/atomicapp/badge.svg?branch=master&service=github)](https://coveralls.io/github/projectatomic/atomicapp?branch=master) -[![Issue Stats](http://issuestats.com/github/projectatomic/atomicapp/badge/pr)](http://issuestats.com/github/projectatomic/atomicapp) -[![Issue Stats](http://issuestats.com/github/projectatomic/atomicapp/badge/issue)](http://issuestats.com/github/projectatomic/atomicapp) First of all, awesome! We have [a development guide to help you get started!](CONTRIBUTING.md) From 3411ddf407ed4ace2716de52998a2f49ad160149 Mon Sep 17 00:00:00 2001 From: Suraj Deshmukh Date: Tue, 8 Mar 2016 07:56:14 -0500 Subject: [PATCH 067/193] Typo in providers docs --- docs/providers.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/providers.md b/docs/providers.md index 2058095e..0726b49c 100644 --- a/docs/providers.md +++ b/docs/providers.md @@ -1,6 +1,6 @@ # Providers -This chapter includes linkes to documentation on how to use and configure the +This chapter includes links to documentation on how to use and configure the providers that are supported by Atomic App. The linked documentation will give you a short overview of all available providers and how to use them. From e9bb661c186a1aedd503b7e4707f4e1fd59024eb Mon Sep 17 00:00:00 2001 From: Suraj Deshmukh Date: Tue, 8 Mar 2016 08:46:35 -0500 Subject: [PATCH 068/193] Instruction to skip travis CI Added a NOTE in submitting pull request section, specifying for documentation PR how to skip travis CI build. Fixes #605. --- CONTRIBUTING.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index a7a09e53..c9f5dec1 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -100,6 +100,8 @@ Before you submit your pull request consider the following guidelines: That's it! Thank you for your contribution! +**NOTE**: When submitting a documentation PR, you can skip the travis ci by adding `[ci skip]` to your commit message. + ### Merge Rules * Include unit or integration tests for the capability you have implemented From 85f17396247667b0ef04c562006d5babc4fad8fc Mon Sep 17 00:00:00 2001 From: Dusty Mabe Date: Tue, 8 Mar 2016 22:21:44 -0500 Subject: [PATCH 069/193] docs: fix broken link --- docs/start_guide.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/start_guide.md b/docs/start_guide.md index c47692c2..5006c7ff 100644 --- a/docs/start_guide.md +++ b/docs/start_guide.md @@ -285,4 +285,4 @@ sudo atomicapp run myrepo/myapp Although we have yet to cover every `atomicapp` command. Feel free to use `atomicapp [run/fetch/stop] --help` for a list of all options. -For an extended guide on the `Nulecule` file, read our [extended Nulecule doc](docs/nulecule.md). +For an extended guide on the `Nulecule` file, read our [extended Nulecule doc](nulecule.md). From fe6f67c8d7bcace1755bbd1b68c05726adee6c11 Mon Sep 17 00:00:00 2001 From: Shubham Minglani Date: Mon, 7 Mar 2016 22:18:58 +0530 Subject: [PATCH 070/193] fix --version output, fix #481 --- atomicapp/cli/main.py | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/atomicapp/cli/main.py b/atomicapp/cli/main.py index 6de39ca7..e86b20b3 100644 --- a/atomicapp/cli/main.py +++ b/atomicapp/cli/main.py @@ -170,10 +170,16 @@ def create_parser(self): # are stitching help output together from multiple parsers toplevel_parser.add_argument( "-h", - "--help" - "--version", + "--help", action='help', help=argparse.SUPPRESS) + toplevel_parser.add_argument( + "-V", + "--version", + action='version', + version='atomicapp %s, Nulecule Specification %s' % ( + __ATOMICAPPVERSION__, __NULECULESPECVERSION__), + help=argparse.SUPPRESS) # Allow for subparsers of the toplevel_parser. Store the name # in the "action" attribute toplevel_subparsers = toplevel_parser.add_subparsers(dest="action") @@ -182,13 +188,13 @@ def create_parser(self): # Create the globals argument parser next. This will be a # parent parser for the subparsers globals_parser = argparse.ArgumentParser(add_help=False) + # Adding version argument again to avoid optional arguments from + # being listed twice in -h. This only serves the help message. globals_parser.add_argument( "-V", "--version", - action='version', - version='atomicapp %s, Nulecule Specification %s' % ( - __ATOMICAPPVERSION__, __NULECULESPECVERSION__), - help="show the version and exit.") + action="store_true", + help="Show the version and exit.") globals_parser.add_argument( "-v", "--verbose", From 834f463949a4f61599e74e57c77070b14f5ad77a Mon Sep 17 00:00:00 2001 From: Charlie Drage Date: Wed, 9 Mar 2016 09:27:03 -0500 Subject: [PATCH 071/193] Add tests for missing Nulecule or wrongly formatted file for missing Adds tests to see if Nulecule is missing and if the format of the file is invalid. --- atomicapp/nulecule/base.py | 14 +++++-- .../units/nulecule/invalid_nulecule/Nulecule | 37 +++++++++++++++++++ tests/units/nulecule/test_nulecule.py | 16 ++++++++ 3 files changed, 63 insertions(+), 4 deletions(-) create mode 100644 tests/units/nulecule/invalid_nulecule/Nulecule diff --git a/atomicapp/nulecule/base.py b/atomicapp/nulecule/base.py index 39236a9c..8a9faebf 100644 --- a/atomicapp/nulecule/base.py +++ b/atomicapp/nulecule/base.py @@ -133,7 +133,8 @@ def load_from_path(cls, src, config=None, namespace=GLOBAL_CONF, nulecule_path = os.path.join(src, MAIN_FILE) if os.path.exists(nulecule_path): - nulecule_data = open(nulecule_path, 'r').read() + with open(nulecule_path, 'r') as f: + nulecule_data = f.read() else: raise NuleculeException("No Nulecule file exists in directory: %s" % src) @@ -148,9 +149,14 @@ def load_from_path(cls, src, config=None, namespace=GLOBAL_CONF, except (yaml.parser.ParserError, AnyMarkupError), e: line = re.search('line (\d+)', str(e)).group(1) column = re.search('column (\d+)', str(e)).group(1) - data = nulecule_data.splitlines()[int(line)] - raise NuleculeException("Failure parsing Nulecule file. Validation error on line %s, column %s:\n%s" - % (line, column, data)) + + output = "" + for i, l in enumerate(nulecule_data.splitlines()): + if (i == int(line) - 1) or (i == int(line)) or (i == int(line) + 1): + output += "%s %s\n" % (str(i), str(l)) + + raise NuleculeException("Failure parsing %s file. Validation error on line %s, column %s:\n%s" + % (nulecule_path, line, column, output)) nulecule = Nulecule(config=config, basepath=src, namespace=namespace, **nulecule_data) diff --git a/tests/units/nulecule/invalid_nulecule/Nulecule b/tests/units/nulecule/invalid_nulecule/Nulecule new file mode 100644 index 00000000..e1e3add5 --- /dev/null +++ b/tests/units/nulecule/invalid_nulecule/Nulecule @@ -0,0 +1,37 @@ +{ + "specversion": "0.0.2", + "id": "helloapache-app", + "metadata": { + "name": "Hello Apache App", + "appversion": "0.0.1", + "description": "Atomic app for deploying a really basic Apache HTTP server" + }, + "graph": [ + { + "name": "helloapache-app", + "params": [[ + { + "name": "image", + "description": "The webserver image", + "default": "centos/httpd" + }, + { + "name": "hostport", + "description": "The host TCP port as the external endpoint", + "default": 80 + } + ], + "artifacts": { + "docker": [ + "file://artifacts/docker/hello-apache-pod_run" + ], + "kubernetes": [ + "file://artifacts/kubernetes/hello-apache-pod.json" + ], + "marathon": [ + "file://artifacts/marathon/helloapache.json" + ] + } + } + ] +} diff --git a/tests/units/nulecule/test_nulecule.py b/tests/units/nulecule/test_nulecule.py index 4287de83..1bb60179 100644 --- a/tests/units/nulecule/test_nulecule.py +++ b/tests/units/nulecule/test_nulecule.py @@ -1,6 +1,9 @@ import mock import unittest +import pytest +import os from atomicapp.nulecule.base import Nulecule +from atomicapp.nulecule.exceptions import NuleculeException class TestNuleculeRun(unittest.TestCase): @@ -163,3 +166,16 @@ def test_render(self): provider_key=provider_key, dryrun=dryrun) mock_component_2.render.assert_called_once_with( provider_key=provider_key, dryrun=dryrun) + + +class TestLoadNuleculeParsing(unittest.TestCase): + + def test_missing_nulecule(self): + n = Nulecule('some-id', '0.0.2', {}, [], 'some/path') + with pytest.raises(NuleculeException): + n.load_from_path(src='foo/bar') + + def test_invalid_nulecule_format(self): + n = Nulecule('some-id', '0.0.2', {}, [], 'some/path') + with pytest.raises(NuleculeException): + n.load_from_path(src=os.path.dirname(__file__) + '/invalid_nulecule/') From f007a4533c8a5998732984fbcae8f02db30b59ed Mon Sep 17 00:00:00 2001 From: Charlie Drage Date: Wed, 9 Mar 2016 09:42:23 -0500 Subject: [PATCH 072/193] Change from ReadWrite to ReadWriteOnce in persistent storage tests In 1.0.0 of Kubernetes it is no longer ReadWrite but now ReadWriteOnce. http://kubernetes.io/v1.1/docs/user-guide/persistent-volumes.html This modifies the tests to ReadWriteOnce instead of ReadWrite. --- .../persistent_storage/test_examples/ps-helloapache/Nulecule | 4 ++-- tests/units/persistent_storage/test_ps.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/units/persistent_storage/test_examples/ps-helloapache/Nulecule b/tests/units/persistent_storage/test_examples/ps-helloapache/Nulecule index ca94bbb3..a6f72274 100644 --- a/tests/units/persistent_storage/test_examples/ps-helloapache/Nulecule +++ b/tests/units/persistent_storage/test_examples/ps-helloapache/Nulecule @@ -23,9 +23,9 @@ graph: requirements: - persistentVolume: name: "var-lib-mongodb-data" - accessMode: "ReadWrite" + accessMode: "ReadWriteOnce" size: 4 - persistentVolume: name: "var-log-mongodb" - accessMode: "ReadWrite" + accessMode: "ReadWriteOnce" size: 4 diff --git a/tests/units/persistent_storage/test_ps.py b/tests/units/persistent_storage/test_ps.py index 69d0b0b1..2900fe86 100644 --- a/tests/units/persistent_storage/test_ps.py +++ b/tests/units/persistent_storage/test_ps.py @@ -10,8 +10,8 @@ class TestPersistentStorage(unittest.TestCase): def setUp(self): config = {'helloapache-app': {'image': 'centos/httpd', 'hostport': 80}, 'general': {'namespace': 'default', 'provider': 'kubernetes'}} - graph = [{'persistentVolume': {'accessMode': 'ReadWrite', 'name': 'var-lib-mongodb-data', 'size': 4}}, - {'persistentVolume': {'accessMode': 'ReadWrite', 'name': 'var-log-mongodb', 'size': 4}}] + graph = [{'persistentVolume': {'accessMode': 'ReadWriteOnce', 'name': 'var-lib-mongodb-data', 'size': 4}}, + {'persistentVolume': {'accessMode': 'ReadWriteOnce', 'name': 'var-log-mongodb', 'size': 4}}] self.tmpdir = tempfile.mkdtemp(prefix="atomicapp-test", dir="/tmp") self.test = Requirements( config=config, basepath=self.tmpdir, graph=graph, provider="kubernetes", dryrun=True) From 007efe329b1a68de25cb171409687192414c75dc Mon Sep 17 00:00:00 2001 From: Charlie Drage Date: Wed, 9 Mar 2016 13:34:05 -0500 Subject: [PATCH 073/193] Add Persistent Storage validation on ReadWriteOnce, etc. This commit add validation for the Kubernetes provider for persistent storage. We check against a list of method (ReadWriteOnce, ReadWriteMany and ReadOnlyMany) before proceeding. See: http://kubernetes.io/v1.1/docs/user-guide/persistent-volumes.html --- atomicapp/constants.py | 3 +++ atomicapp/providers/kubernetes.py | 9 ++++++++- 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/atomicapp/constants.py b/atomicapp/constants.py index 2c2a6d38..075fa7dc 100644 --- a/atomicapp/constants.py +++ b/atomicapp/constants.py @@ -75,3 +75,6 @@ PROVIDER_CONFIG_KEY = "providerconfig" PROVIDER_TLS_VERIFY_KEY = "providertlsverify" PROVIDER_CA_KEY = "providercafile" + +# Persistent Storage Formats +PERSISTENT_STORAGE_FORMAT = ["ReadWriteOnce", "ReadOnlyMany", "ReadWriteMany"] diff --git a/atomicapp/providers/kubernetes.py b/atomicapp/providers/kubernetes.py index ad6c8be8..6a496177 100644 --- a/atomicapp/providers/kubernetes.py +++ b/atomicapp/providers/kubernetes.py @@ -23,7 +23,8 @@ from string import Template from atomicapp.constants import (LOGGER_COCKPIT, - LOGGER_DEFAULT) + LOGGER_DEFAULT, + PERSISTENT_STORAGE_FORMAT) from atomicapp.plugin import Provider, ProviderFailedException from atomicapp.utils import Utils @@ -217,6 +218,12 @@ def persistent_storage(self, graph, action): logger.debug("Persistent storage enabled! Running action: %s" % action) + if graph["accessMode"] not in PERSISTENT_STORAGE_FORMAT: + raise ProviderFailedException("{} is an invalid storage format " + "(choose from {})" + .format(graph["accessMode"], + ', '.join(PERSISTENT_STORAGE_FORMAT))) + if action not in ['run']: logger.warning( "%s action is not available for provider %s. Doing nothing." % From 20d9ef06d488ef83121ecee4660657b10d88609a Mon Sep 17 00:00:00 2001 From: Suraj Deshmukh Date: Tue, 8 Mar 2016 08:37:00 -0500 Subject: [PATCH 074/193] Inform user when provider not specified When user has not specified the provider, Atomic App defaults to `kubernetes` as provider. This should be informed to user, in the form of log as INFO. --- atomicapp/nulecule/base.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/atomicapp/nulecule/base.py b/atomicapp/nulecule/base.py index 17c19a51..8846e90a 100644 --- a/atomicapp/nulecule/base.py +++ b/atomicapp/nulecule/base.py @@ -200,6 +200,8 @@ def load_config(self, config=None, ask=False, skip_asking=False): config=config, ask=ask, skip_asking=skip_asking) if self.namespace == GLOBAL_CONF and self.config[GLOBAL_CONF].get('provider') is None: self.config[GLOBAL_CONF]['provider'] = DEFAULT_PROVIDER + logger.info("Provider not specified, using default provider - {}". + format(DEFAULT_PROVIDER)) for component in self.components: # FIXME: Find a better way to expose config data to components. # A component should not get access to all the variables, From f662c39de083a49ba04ef825920bd426b57af5be Mon Sep 17 00:00:00 2001 From: Dusty Mabe Date: Wed, 9 Mar 2016 16:53:24 -0500 Subject: [PATCH 075/193] logging: fix duplicated log messages This was caused by a direct call to logging in requirements.py. After fixing the logging call to use logger instead all is well. Fixes #614 --- atomicapp/requirements.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/atomicapp/requirements.py b/atomicapp/requirements.py index f618ffbc..7b7c3f80 100644 --- a/atomicapp/requirements.py +++ b/atomicapp/requirements.py @@ -47,8 +47,8 @@ def stop(self): # Find if the requirement does not exist within REQUIREMENT_FUNCTIONS def _find_requirement_function_name(self, key): - logging.debug("Checking if %s matches any of %s" % - (key, REQUIREMENT_FUNCTIONS)) + logger.debug("Checking if %s matches any of %s" % + (key, REQUIREMENT_FUNCTIONS)) if key in REQUIREMENT_FUNCTIONS.keys(): return REQUIREMENT_FUNCTIONS[key] raise RequirementFailedException("Requirement %s does not exist." % key) From 6542d40ea551f87c4ad9bb80fe13aea778efc6cb Mon Sep 17 00:00:00 2001 From: Charlie Drage Date: Thu, 10 Mar 2016 16:47:15 -0500 Subject: [PATCH 076/193] Remove time from default output Only display time on -v --- atomicapp/applogging.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/atomicapp/applogging.py b/atomicapp/applogging.py index 5d63ae2c..f8a0c196 100644 --- a/atomicapp/applogging.py +++ b/atomicapp/applogging.py @@ -121,7 +121,7 @@ def setup_logging(verbose=None, quiet=None, logtype=None): if logging_level == logging.DEBUG: formatstr = '%(asctime)s - [%(levelname)s] - %(longerfilename)s - %(message)s' else: - formatstr = '%(asctime)s - [%(levelname)s] - %(filename)s - %(message)s' + formatstr = '[%(levelname)s] - %(filename)s - %(message)s' # Get the loggers and clear out the handlers (allows this function # to be ran more than once) From 2f986fe3c911d1390b4eec048937489461569392 Mon Sep 17 00:00:00 2001 From: Charlie Drage Date: Fri, 11 Mar 2016 12:54:48 -0500 Subject: [PATCH 077/193] Convert to epoch time No more complete year, hour, second, minute, etc. Use epoch time instead. --- atomicapp/applogging.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/atomicapp/applogging.py b/atomicapp/applogging.py index f8a0c196..4665e853 100644 --- a/atomicapp/applogging.py +++ b/atomicapp/applogging.py @@ -123,6 +123,10 @@ def setup_logging(verbose=None, quiet=None, logtype=None): else: formatstr = '[%(levelname)s] - %(filename)s - %(message)s' + # Set a tuple of options that will be passed to the formatter. The %s + # will tell the logging library to use seconds since epoch for time stamps + formattup = (formatstr, "%s") + # Get the loggers and clear out the handlers (allows this function # to be ran more than once) logger = logging.getLogger(LOGGER_DEFAULT) @@ -154,7 +158,7 @@ def setup_logging(verbose=None, quiet=None, logtype=None): # configure logger for basic no color printing to stdout handler = logging.StreamHandler(stream=sys.stdout) - formatter = customOutputFormatter(formatstr) + formatter = customOutputFormatter(*formattup) handler.setFormatter(formatter) logger.addHandler(handler) logger.setLevel(logging_level) @@ -166,7 +170,7 @@ def setup_logging(verbose=None, quiet=None, logtype=None): # configure logger for color printing to stdout handler = logging.StreamHandler(stream=sys.stdout) - formatter = colorizeOutputFormatter(formatstr) + formatter = colorizeOutputFormatter(*formattup) handler.setFormatter(formatter) logger.addHandler(handler) logger.setLevel(logging_level) From 46f0d6297b66a7a19f10606f26f4f843deda7400 Mon Sep 17 00:00:00 2001 From: Shubham Minglani Date: Wed, 24 Feb 2016 23:08:32 +0530 Subject: [PATCH 078/193] handle docker error, raise instead of print, fix #570 --- atomicapp/nulecule/container.py | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/atomicapp/nulecule/container.py b/atomicapp/nulecule/container.py index bdb30d43..69ce75f1 100644 --- a/atomicapp/nulecule/container.py +++ b/atomicapp/nulecule/container.py @@ -31,10 +31,18 @@ def __init__(self, dryrun=False, docker_cli='/usr/bin/docker'): except subprocess.CalledProcessError as e: if "client and server don't have same version" in e.output \ or "client is newer than server" in e.output: - print("\nThe docker version in this Atomic App differs " - "greatly from the host version.\nPlease use a different " - "Atomic App version for this host.\n") - raise e + raise DockerException("\nThe docker version in this " + "Atomic App differs greatly from " + "the host version.\nPlease use a " + "different Atomic App version for " + "this host.\n") + elif "Is your docker daemon up and running" in e.output or \ + "Are you trying to connect to a TLS-enabled daemon " \ + "without TLS" in e.output: + raise DockerException("Could not connect to the " + "docker daemon.") + else: + raise DockerException(e.output) def pull(self, image, update=False): """ From 0d80eb80b10abc9085f7cde5a83de163df555679 Mon Sep 17 00:00:00 2001 From: Shubham Minglani Date: Wed, 24 Feb 2016 23:08:32 +0530 Subject: [PATCH 079/193] handle docker error, raise instead of print, fix #570 --- atomicapp/nulecule/container.py | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/atomicapp/nulecule/container.py b/atomicapp/nulecule/container.py index bdb30d43..69ce75f1 100644 --- a/atomicapp/nulecule/container.py +++ b/atomicapp/nulecule/container.py @@ -31,10 +31,18 @@ def __init__(self, dryrun=False, docker_cli='/usr/bin/docker'): except subprocess.CalledProcessError as e: if "client and server don't have same version" in e.output \ or "client is newer than server" in e.output: - print("\nThe docker version in this Atomic App differs " - "greatly from the host version.\nPlease use a different " - "Atomic App version for this host.\n") - raise e + raise DockerException("\nThe docker version in this " + "Atomic App differs greatly from " + "the host version.\nPlease use a " + "different Atomic App version for " + "this host.\n") + elif "Is your docker daemon up and running" in e.output or \ + "Are you trying to connect to a TLS-enabled daemon " \ + "without TLS" in e.output: + raise DockerException("Could not connect to the " + "docker daemon.") + else: + raise DockerException(e.output) def pull(self, image, update=False): """ From 3aa144ae4110341ec9afe887c4e074e295f5573d Mon Sep 17 00:00:00 2001 From: Shubham Minglani Date: Sat, 12 Mar 2016 23:12:08 +0530 Subject: [PATCH 080/193] refactor exceptions, fix #621, fix #622 --- atomicapp/cli/main.py | 123 ++++++++++++++++++------------------------ 1 file changed, 53 insertions(+), 70 deletions(-) diff --git a/atomicapp/cli/main.py b/atomicapp/cli/main.py index e86b20b3..443692cc 100644 --- a/atomicapp/cli/main.py +++ b/atomicapp/cli/main.py @@ -51,84 +51,53 @@ def print_app_location(app_path): def cli_genanswers(args): - try: - argdict = args.__dict__ - nm = NuleculeManager(app_spec=argdict['app_spec'], - destination='none') - nm.genanswers(**argdict) - Utils.rm_dir(nm.app_path) # clean up files - sys.exit(0) - except NuleculeException as e: - logger.error(e) - sys.exit(1) - except Exception as e: - logger.error(e, exc_info=True) - sys.exit(1) + argdict = args.__dict__ + nm = NuleculeManager(app_spec=argdict['app_spec'], + destination='none') + nm.genanswers(**argdict) + Utils.rm_dir(nm.app_path) # clean up files + sys.exit(0) def cli_fetch(args): - try: - argdict = args.__dict__ - destination = argdict['destination'] - nm = NuleculeManager(app_spec=argdict['app_spec'], - destination=destination, - cli_answers=argdict['cli_answers'], - answers_file=argdict['answers']) - nm.fetch(**argdict) - # Clean up the files if the user asked us to. Otherwise - # notify the user where they can manage the application - if destination and destination.lower() == 'none': - Utils.rm_dir(nm.app_path) - else: - print_app_location(nm.app_path) - sys.exit(0) - except NuleculeException as e: - logger.error(e) - sys.exit(1) - except Exception as e: - logger.error(e, exc_info=True) - sys.exit(1) + argdict = args.__dict__ + destination = argdict['destination'] + nm = NuleculeManager(app_spec=argdict['app_spec'], + destination=destination, + cli_answers=argdict['cli_answers'], + answers_file=argdict['answers']) + nm.fetch(**argdict) + # Clean up the files if the user asked us to. Otherwise + # notify the user where they can manage the application + if destination and destination.lower() == 'none': + Utils.rm_dir(nm.app_path) + else: + print_app_location(nm.app_path) + sys.exit(0) def cli_run(args): - try: - argdict = args.__dict__ - destination = argdict['destination'] - nm = NuleculeManager(app_spec=argdict['app_spec'], - destination=destination, - cli_answers=argdict['cli_answers'], - answers_file=argdict['answers']) - nm.run(**argdict) - # Clean up the files if the user asked us to. Otherwise - # notify the user where they can manage the application - if destination and destination.lower() == 'none': - Utils.rm_dir(nm.app_path) - else: - print_app_location(nm.app_path) - sys.exit(0) - except DockerException as e: - logger.error(e) - sys.exit(1) - except NuleculeException as e: - logger.error(e) - sys.exit(1) - except Exception as e: - logger.error(e, exc_info=True) - sys.exit(1) + argdict = args.__dict__ + destination = argdict['destination'] + nm = NuleculeManager(app_spec=argdict['app_spec'], + destination=destination, + cli_answers=argdict['cli_answers'], + answers_file=argdict['answers']) + nm.run(**argdict) + # Clean up the files if the user asked us to. Otherwise + # notify the user where they can manage the application + if destination and destination.lower() == 'none': + Utils.rm_dir(nm.app_path) + else: + print_app_location(nm.app_path) + sys.exit(0) def cli_stop(args): - try: - argdict = args.__dict__ - nm = NuleculeManager(app_spec=argdict['app_spec']) - nm.stop(**argdict) - sys.exit(0) - except NuleculeException as e: - logger.error(e) - sys.exit(1) - except Exception as e: - logger.error(e, exc_info=True) - sys.exit(1) + argdict = args.__dict__ + nm = NuleculeManager(app_spec=argdict['app_spec']) + nm.stop(**argdict) + sys.exit(0) # Create a custom action parser. Need this because for some args we don't @@ -144,6 +113,20 @@ def __call__(self, parser, namespace, values, option_string=None): setattr(namespace, self.dest, booleanvalue) +def cli_func_exec(cli_func, cli_func_args): + try: + cli_func(cli_func_args) + except DockerException as e: + logger.error(e) + sys.exit(1) + except NuleculeException as e: + logger.error(e) + sys.exit(1) + except Exception as e: + logger.error(e, exc_info=True) + sys.exit(1) + + class CLI(): def __init__(self): @@ -457,7 +440,7 @@ def run(self): lock = LockFile(os.path.join(Utils.getRoot(), LOCK_FILE)) try: lock.acquire(timeout=-1) - args.func(args) + cli_func_exec(args.func, args) except AttributeError: if hasattr(args, 'func'): raise From bfa2373fede4602649fb2948ecb63382a3866a9b Mon Sep 17 00:00:00 2001 From: Preeti Chandrashekar Date: Tue, 15 Mar 2016 15:08:25 +0530 Subject: [PATCH 081/193] Minor edits to atomicapp_lifecycle.md --- docs/atomicapp_lifecycle.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/docs/atomicapp_lifecycle.md b/docs/atomicapp_lifecycle.md index 3fef59b4..5cfd27fd 100644 --- a/docs/atomicapp_lifecycle.md +++ b/docs/atomicapp_lifecycle.md @@ -16,26 +16,26 @@ directory is then cleaned up. ------- Will download and combine artifacts from the target application and any dependent applications including sample answers.conf file into a local -directory for inspection and/or modification. Same for all providers. +directory for inspection and/or modification. This is the same for all providers. `run` ----- -Run an application. +Will run an application. | Provider | Implementation | | ------------- | -------------- | | Docker | Run application containers on local machine. | -| Kubernetes | Run requested application in kubernetes target environment. | +| Kubernetes | Run requested application in Kubernetes target environment. | | Openshift | Run requested application in OpenShift target environment. | | Marathon | Run requested application in Marathon target environment. | `stop` ------ -Stop an application. +Will stop an application. | Provider | Implementation | | ------------- | -------------- | | Docker | Stop application containers on local machine. | -| Kubernetes | Stop requested application in kubernetes target environment. | +| Kubernetes | Stop requested application in Kubernetes target environment. | | Openshift | Stop requested application in OpenShift target environment. | | Marathon | Stop requested application in Marathon target environment. | From f008b742181aceb2d5264d1ebf6f297b18dc267e Mon Sep 17 00:00:00 2001 From: Suraj Deshmukh Date: Mon, 14 Mar 2016 06:21:13 -0400 Subject: [PATCH 082/193] Docker-run file with multi-line command supported If user has provided a docker-run artifact with multi-line command then it can be parsed now, by removing the backslashes and backslash-n. Fixes issue #617 --- atomicapp/providers/docker.py | 2 ++ .../docker_artifact_test/run-with-backslashes | 6 ++++++ tests/units/providers/test_docker_provider.py | 12 ++++++++++++ 3 files changed, 20 insertions(+) create mode 100644 tests/units/providers/docker_artifact_test/run-with-backslashes diff --git a/atomicapp/providers/docker.py b/atomicapp/providers/docker.py index a157ccc7..dfd9c138 100644 --- a/atomicapp/providers/docker.py +++ b/atomicapp/providers/docker.py @@ -90,6 +90,8 @@ def run(self): label_run = None with open(artifact_path, "r") as fp: label_run = fp.read().strip() + # if docker-run provided as multiline command + label_run = ' '.join(label_run.split('\\\n')) run_args = label_run.split() # If --name is provided, do not re-name due to potential linking of containers. Warn user instead. diff --git a/tests/units/providers/docker_artifact_test/run-with-backslashes b/tests/units/providers/docker_artifact_test/run-with-backslashes new file mode 100644 index 00000000..6dda45e1 --- /dev/null +++ b/tests/units/providers/docker_artifact_test/run-with-backslashes @@ -0,0 +1,6 @@ +docker run \ +-d \ +-p \ +80:80 \ +--name centos7 \ +centos7 diff --git a/tests/units/providers/test_docker_provider.py b/tests/units/providers/test_docker_provider.py index 613153a4..20aa264a 100644 --- a/tests/units/providers/test_docker_provider.py +++ b/tests/units/providers/test_docker_provider.py @@ -78,3 +78,15 @@ def test_namespace_name_check(self): provider.artifacts = [self.artifact_dir + 'hello-world-one'] with pytest.raises(ProviderFailedException): provider.run() + + def test_docker_run_with_backslashes(self): + data = {'namespace': 'test', 'provider': 'docker'} + provider = self.prepare_provider(data) + provider.init() + provider.artifacts = [ + self.artifact_dir + 'run-with-backslashes', + ] + expected_output = 'docker run -d -p 80:80 --name centos7 centos7' + with mock.patch('atomicapp.providers.docker.logger') as mock_logger: + provider.run() + mock_logger.info.assert_called_with('DRY-RUN: %s', expected_output) From 07af6e0ae7a1e813fdbf5bb5294095cf6a1432c7 Mon Sep 17 00:00:00 2001 From: Charlie Drage Date: Tue, 15 Mar 2016 16:07:45 -0400 Subject: [PATCH 083/193] 0.4.4 Release --- CHANGELOG.md | 54 +++++++++++++++++++++++++++++++ Dockerfile | 2 +- Dockerfiles.git/Dockerfile.centos | 2 +- Dockerfiles.git/Dockerfile.debian | 2 +- Dockerfiles.git/Dockerfile.fedora | 2 +- atomicapp/constants.py | 2 +- setup.py | 2 +- 7 files changed, 60 insertions(+), 6 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e62064ad..335f506b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,57 @@ +## Atomic App 0.4.4 (03-15-2016) + +This release includes a major update to our documentation as well as the user experience when deploying an Atomic App. + +The main features are: + + - Major update to documentation + - Cleaner logging output + - Error-out validation on JSON/YAML + +UI: + + - Fix --version output on CLI + - Handle docker exception errors + - Inform on wrong provider name provided within answers.conf + +Other: + + - Fix requirements on 'make test' + +For a full list of changes between 0.4.4 and 0.4.3 please see the git shortlog below! + +``` +Charlie Drage (10): + Doc for current functions implemented by spec + Fix minor markdown error in spec_coverage + Major update to README and documentation + Error cleanly on missing Nulecule or invalid formatted Nulecule + Multiple problems with issuestats.com + Add tests for missing Nulecule or wrongly formatted file for missing + Change from ReadWrite to ReadWriteOnce in persistent storage tests + Add Persistent Storage validation on ReadWriteOnce, etc. + Remove time from default output + Convert to epoch time + +Dusty Mabe (2): + docs: fix broken link + logging: fix duplicated log messages + +Preeti Chandrashekar (1): + Minor edits to atomicapp_lifecycle.md + +Shubham Minglani (2): + Handle docker pull exception, improve #441, fix #568 + fix --version output, fix #481 + +Suraj Deshmukh (5): + Install requirements before make test + Wrong provider name in answers.conf, exits AtomicApp with readable error + Typo in providers docs + Instruction to skip travis CI + Inform user when provider not specified +``` + ## Atomic App 0.4.3 (03-01-2016) You'll now see pretty colors with logging / output! diff --git a/Dockerfile b/Dockerfile index 896e5e2e..14803a48 100644 --- a/Dockerfile +++ b/Dockerfile @@ -2,7 +2,7 @@ FROM centos:7 MAINTAINER Red Hat, Inc. -ENV ATOMICAPPVERSION="0.4.3" +ENV ATOMICAPPVERSION="0.4.4" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ io.openshift.generate.job=true \ diff --git a/Dockerfiles.git/Dockerfile.centos b/Dockerfiles.git/Dockerfile.centos index 896e5e2e..14803a48 100644 --- a/Dockerfiles.git/Dockerfile.centos +++ b/Dockerfiles.git/Dockerfile.centos @@ -2,7 +2,7 @@ FROM centos:7 MAINTAINER Red Hat, Inc. -ENV ATOMICAPPVERSION="0.4.3" +ENV ATOMICAPPVERSION="0.4.4" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ io.openshift.generate.job=true \ diff --git a/Dockerfiles.git/Dockerfile.debian b/Dockerfiles.git/Dockerfile.debian index 719bd5f0..e4e90800 100644 --- a/Dockerfiles.git/Dockerfile.debian +++ b/Dockerfiles.git/Dockerfile.debian @@ -2,7 +2,7 @@ FROM debian:jessie MAINTAINER Red Hat, Inc. -ENV ATOMICAPPVERSION="0.4.3" +ENV ATOMICAPPVERSION="0.4.4" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ RUN="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} \${OPT2} run \${OPT3}" \ diff --git a/Dockerfiles.git/Dockerfile.fedora b/Dockerfiles.git/Dockerfile.fedora index 8fc2e6a1..c0d0d5c5 100644 --- a/Dockerfiles.git/Dockerfile.fedora +++ b/Dockerfiles.git/Dockerfile.fedora @@ -2,7 +2,7 @@ FROM fedora:23 MAINTAINER Red Hat, Inc. -ENV ATOMICAPPVERSION="0.4.3" +ENV ATOMICAPPVERSION="0.4.4" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ io.openshift.generate.job=true \ diff --git a/atomicapp/constants.py b/atomicapp/constants.py index 075fa7dc..fd12adbc 100644 --- a/atomicapp/constants.py +++ b/atomicapp/constants.py @@ -23,7 +23,7 @@ 2) LABEL io.projectatomic.nulecule.specversion in app Dockefile """ -__ATOMICAPPVERSION__ = '0.4.3' +__ATOMICAPPVERSION__ = '0.4.4' __NULECULESPECVERSION__ = '0.0.2' EXTERNAL_APP_DIR = "external" diff --git a/setup.py b/setup.py index b4a99405..fde45513 100644 --- a/setup.py +++ b/setup.py @@ -41,7 +41,7 @@ def _install_requirements(): setup( name='atomicapp', - version='0.4.3', + version='0.4.4', description='A tool to install and run Nulecule apps', author='Red Hat, Inc.', author_email='container-tools@redhat.com', From 3b3505ac4d8d2298e82bac0660c53dd10b7485d0 Mon Sep 17 00:00:00 2001 From: Charlie Drage Date: Thu, 17 Mar 2016 15:29:40 -0400 Subject: [PATCH 084/193] Update docs / more clarity on cli comparison Create more spacing / update markdown clarity on CLI comparisons --- docs/start_guide.md | 29 ++++++----------------------- 1 file changed, 6 insertions(+), 23 deletions(-) diff --git a/docs/start_guide.md b/docs/start_guide.md index 5006c7ff..4a816708 100644 --- a/docs/start_guide.md +++ b/docs/start_guide.md @@ -32,29 +32,12 @@ Some commands for `atomicapp` on an atomic host are a bit different. However. Regardless of the `atomic run` command, a `--mode` can be passed to change the functionality of the command. -`atomicapp fetch projectatomic/helloapache` - -vs - -`atomic run projectatomic/helloapache --mode fetch` - -`atomicapp run projectatomic/helloapache` - -vs - -`atomic run projectatomic/helloapache` - -`atomicapp stop ./myappdir` - -vs - -`atomic stop projectatomic/helloapache ./myappdir` - -`atomicapp genanswers ./myappdir` - -vs - -`atomic run projectatomic/helloapache ./myappdir --mode genanswers` +| Atomic App | Atomic CLI +|-----------|--------| +| `atomicapp fetch projectatomic/helloapache` | `atomic run projectatomic/helloapache --mode fetch` +| `atomicapp run projectatomic/helloapache` | `atomic run projectatomic/helloapache` +| `atomicapp stop ./myappdir` | `atomic stop projectatomic/helloapache ./myappdir` +| `atomicapp genanswers ./myappdir` | `atomic run projectatomic/helloapache ./myappdir --mode genanswers` ## Building your first Atomic App From f888d294dfaef2c9c214ba5a6085730b97ab8fbb Mon Sep 17 00:00:00 2001 From: Charlie Drage Date: Tue, 15 Mar 2016 10:36:32 -0400 Subject: [PATCH 085/193] Change asking format This commit changes the asking format to as follows: ==> mongodb_admin_password (MongoDB Administrator password): ==> mongodb_database (MongoDB Database name): ==> mongodb_password (Password for the $mongodb_user account): Making it more clear to the user what is being asked / required. This is similar to Vagrant's formatting. --- atomicapp/utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/atomicapp/utils.py b/atomicapp/utils.py index bb7a77c3..ca36e5e4 100644 --- a/atomicapp/utils.py +++ b/atomicapp/utils.py @@ -278,12 +278,12 @@ def askFor(what, info): repeat = False if "default" in info: value = raw_input( - "%s (%s, default: %s): " % (what, desc, info["default"])) + "==> %s (%s, default: %s): " % (what, desc, info["default"])) if len(value) == 0: value = info["default"] else: try: - value = raw_input("%s (%s): " % (what, desc)) + value = raw_input("==> %s (%s): " % (what, desc)) except EOFError: raise From ca24c3b17e3063522b57abfb1b36e639c7fa47d1 Mon Sep 17 00:00:00 2001 From: Charlie Drage Date: Tue, 22 Mar 2016 13:01:35 -0400 Subject: [PATCH 086/193] Move kubeconfig parsing to a separate file This commit move the kube parsing to a separate file in order to begin the changes to the kubernetes API from using the CLI to the API. --- atomicapp/providers/lib/__init__.py | 0 atomicapp/providers/lib/kubeconfig.py | 118 ++++++++++++++++++++++++++ atomicapp/providers/openshift.py | 105 +---------------------- 3 files changed, 120 insertions(+), 103 deletions(-) create mode 100644 atomicapp/providers/lib/__init__.py create mode 100644 atomicapp/providers/lib/kubeconfig.py diff --git a/atomicapp/providers/lib/__init__.py b/atomicapp/providers/lib/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/atomicapp/providers/lib/kubeconfig.py b/atomicapp/providers/lib/kubeconfig.py new file mode 100644 index 00000000..d264e0d1 --- /dev/null +++ b/atomicapp/providers/lib/kubeconfig.py @@ -0,0 +1,118 @@ +import anymarkup + +from atomicapp.plugin import ProviderFailedException +from atomicapp.constants import (ACCESS_TOKEN_KEY, + LOGGER_DEFAULT, + NAMESPACE_KEY, + PROVIDER_API_KEY, + PROVIDER_TLS_VERIFY_KEY, + PROVIDER_CA_KEY) +import logging +logger = logging.getLogger(LOGGER_DEFAULT) + + +class KubeConfig(object): + + @staticmethod + def parse_kubeconf(filename): + """" + Parse kubectl config file + + Args: + filename (string): path to configuration file (e.g. ./kube/config) + + Returns: + dict of parsed values from config + + Example of expected file format: + apiVersion: v1 + clusters: + - cluster: + server: https://10.1.2.2:8443 + certificate-authority: path-to-ca.cert + insecure-skip-tls-verify: false + name: 10-1-2-2:8443 + contexts: + - context: + cluster: 10-1-2-2:8443 + namespace: test + user: test-admin/10-1-2-2:8443 + name: test/10-1-2-2:8443/test-admin + current-context: test/10-1-2-2:8443/test-admin + kind: Config + preferences: {} + users: + - name: test-admin/10-1-2-2:8443 + user: + token: abcdefghijklmnopqrstuvwxyz0123456789ABCDEF + """ + logger.debug("Parsing %s", filename) + + with open(filename, 'r') as fp: + kubecfg = anymarkup.parse(fp.read()) + + try: + return KubeConfig.parse_kubeconf_data(kubecfg) + except ProviderFailedException: + raise ProviderFailedException('Invalid %s' % filename) + + @staticmethod + def parse_kubeconf_data(kubecfg): + """ + Parse kubeconf data. + + Args: + kubecfg (dict): Kubernetes config data + + Returns: + dict of parsed values from config + """ + url = None + token = None + namespace = None + tls_verify = True + ca = None + + current_context = kubecfg["current-context"] + + logger.debug("current context: %s", current_context) + + context = None + for co in kubecfg["contexts"]: + if co["name"] == current_context: + context = co + + if not context: + raise ProviderFailedException() + + cluster = None + for cl in kubecfg["clusters"]: + if cl["name"] == context["context"]["cluster"]: + cluster = cl + + user = None + for usr in kubecfg["users"]: + if usr["name"] == context["context"]["user"]: + user = usr + + if not cluster or not user: + raise ProviderFailedException() + + logger.debug("context: %s", context) + logger.debug("cluster: %s", cluster) + logger.debug("user: %s", user) + + url = cluster["cluster"]["server"] + token = user["user"]["token"] + if "namespace" in context["context"]: + namespace = context["context"]["namespace"] + if "insecure-skip-tls-verify" in cluster["cluster"]: + tls_verify = not cluster["cluster"]["insecure-skip-tls-verify"] + elif "certificate-authority" in cluster["cluster"]: + ca = cluster["cluster"]["certificate-authority"] + + return {PROVIDER_API_KEY: url, + ACCESS_TOKEN_KEY: token, + NAMESPACE_KEY: namespace, + PROVIDER_TLS_VERIFY_KEY: tls_verify, + PROVIDER_CA_KEY: ca} diff --git a/atomicapp/providers/openshift.py b/atomicapp/providers/openshift.py index 0a73b811..abe2b247 100644 --- a/atomicapp/providers/openshift.py +++ b/atomicapp/providers/openshift.py @@ -37,6 +37,7 @@ PROVIDER_API_KEY, PROVIDER_TLS_VERIFY_KEY, PROVIDER_CA_KEY) +from atomicapp.providers.lib.kubeconfig import KubeConfig from requests.exceptions import SSLError import logging logger = logging.getLogger(LOGGER_DEFAULT) @@ -602,108 +603,6 @@ def _get_url(self, namespace, kind, name=None, params=None): logger.debug("url: %s", url) return url - def _parse_kubeconf(self, filename): - """" - Parse kubectl config file - - Args: - filename (string): path to configuration file (e.g. ./kube/config) - - Returns: - dict of parsed values from config - - Example of expected file format: - apiVersion: v1 - clusters: - - cluster: - server: https://10.1.2.2:8443 - certificate-authority: path-to-ca.cert - insecure-skip-tls-verify: false - name: 10-1-2-2:8443 - contexts: - - context: - cluster: 10-1-2-2:8443 - namespace: test - user: test-admin/10-1-2-2:8443 - name: test/10-1-2-2:8443/test-admin - current-context: test/10-1-2-2:8443/test-admin - kind: Config - preferences: {} - users: - - name: test-admin/10-1-2-2:8443 - user: - token: abcdefghijklmnopqrstuvwxyz0123456789ABCDEF - """ - logger.debug("Parsing %s", filename) - - with open(filename, 'r') as fp: - kubecfg = anymarkup.parse(fp.read()) - - try: - return self._parse_kubeconf_data(kubecfg) - except ProviderFailedException: - raise ProviderFailedException('Invalid %s' % filename) - - def _parse_kubeconf_data(self, kubecfg): - """ - Parse kubeconf data. - - Args: - kubecfg (dict): Kubernetes config data - - Returns: - dict of parsed values from config - """ - url = None - token = None - namespace = None - tls_verify = True - ca = None - - current_context = kubecfg["current-context"] - - logger.debug("current context: %s", current_context) - - context = None - for co in kubecfg["contexts"]: - if co["name"] == current_context: - context = co - - if not context: - raise ProviderFailedException() - - cluster = None - for cl in kubecfg["clusters"]: - if cl["name"] == context["context"]["cluster"]: - cluster = cl - - user = None - for usr in kubecfg["users"]: - if usr["name"] == context["context"]["user"]: - user = usr - - if not cluster or not user: - raise ProviderFailedException() - - logger.debug("context: %s", context) - logger.debug("cluster: %s", cluster) - logger.debug("user: %s", user) - - url = cluster["cluster"]["server"] - token = user["user"]["token"] - if "namespace" in context["context"]: - namespace = context["context"]["namespace"] - if "insecure-skip-tls-verify" in cluster["cluster"]: - tls_verify = not cluster["cluster"]["insecure-skip-tls-verify"] - elif "certificate-authority" in cluster["cluster"]: - ca = cluster["cluster"]["certificate-authority"] - - return {PROVIDER_API_KEY: url, - ACCESS_TOKEN_KEY: token, - NAMESPACE_KEY: namespace, - PROVIDER_TLS_VERIFY_KEY: tls_verify, - PROVIDER_CA_KEY: ca} - def _set_config_values(self): """ Reads providerapi, namespace and accesstoken from answers.conf and @@ -749,7 +648,7 @@ def _set_config_values(self): # get values from providerconfig if self.config_file: - providerconfig = self._parse_kubeconf(self.config_file) + providerconfig = KubeConfig.parse_kubeconf(self.config_file) # decide between values from answers.conf and providerconfig # if only one is set use that, report if they are in conflict From 929a08bac55fe77f80922781417124b58ed1f5c3 Mon Sep 17 00:00:00 2001 From: Charlie Drage Date: Tue, 22 Mar 2016 13:02:25 -0400 Subject: [PATCH 087/193] Change kubeconfig testing name and move into a separate file --- tests/units/nulecule/test_kubeconfig.py | 166 ++++++++++++++++++ .../providers/test_openshift_provider.py | 165 ----------------- 2 files changed, 166 insertions(+), 165 deletions(-) create mode 100644 tests/units/nulecule/test_kubeconfig.py diff --git a/tests/units/nulecule/test_kubeconfig.py b/tests/units/nulecule/test_kubeconfig.py new file mode 100644 index 00000000..2161efe2 --- /dev/null +++ b/tests/units/nulecule/test_kubeconfig.py @@ -0,0 +1,166 @@ +import unittest +from atomicapp.plugin import ProviderFailedException +from atomicapp.providers.lib.kubeconfig import KubeConfig + + +class TestKubeConfParsing(unittest.TestCase): + + def test_parse_kubeconf_data_insecure(self): + """ + Test parsing kubeconf data with current context containing + cluster, user, namespace info and skipping tls verification + """ + kubecfg_data = { + 'current-context': 'context2', + 'contexts': [ + { + 'name': 'context1', + }, + { + 'name': 'context2', + 'context': { + 'cluster': 'cluster1', + 'user': 'user1', + 'namespace': 'namespace1' + } + } + ], + 'clusters': [ + { + 'name': 'cluster1', + 'cluster': { + 'insecure-skip-tls-verify': 'true', + 'server': 'server1' + } + } + ], + 'users': [ + { + 'name': 'user1', + 'user': { + 'token': 'token1' + } + } + ] + } + + self.assertEqual(KubeConfig.parse_kubeconf_data(kubecfg_data), + {'providerapi': 'server1', + 'accesstoken': 'token1', + 'namespace': 'namespace1', + 'providertlsverify': False, + 'providercafile': None}) + + def test_parse_kubeconf_data_cafile(self): + """ + Test parsing kubeconf data with current context containing + cluster, user, namespace info and certificate-authority + """ + kubecfg_data = { + 'current-context': 'context2', + 'contexts': [ + { + 'name': 'context1', + }, + { + 'name': 'context2', + 'context': { + 'cluster': 'cluster1', + 'user': 'user1', + 'namespace': 'namespace1' + } + } + ], + 'clusters': [ + { + 'name': 'cluster1', + 'cluster': { + 'certificate-authority': '/foo/bar', + 'server': 'server1' + } + } + ], + 'users': [ + { + 'name': 'user1', + 'user': { + 'token': 'token1' + } + } + ] + } + + self.assertEqual(KubeConfig.parse_kubeconf_data(kubecfg_data), + {'providerapi': 'server1', + 'accesstoken': 'token1', + 'namespace': 'namespace1', + 'providertlsverify': True, + 'providercafile': '/foo/bar'}) + + def test_parse_kubeconf_data_no_context(self): + """ + Test parsing kubeconf data with missing context data for + current context. + """ + kubecfg_data = { + 'current-context': 'context2', + 'contexts': [ + { + 'name': 'context1', + } + ], + 'clusters': [ + { + 'name': 'cluster1', + 'cluster': { + 'server': 'server1' + } + } + ], + 'users': [ + { + 'name': 'user1', + 'user': { + 'token': 'token1' + } + } + ] + } + + self.assertRaises(ProviderFailedException, + KubeConfig.parse_kubeconf_data, kubecfg_data) + + def test_parse_kubeconf_data_no_user(self): + """ + Test parsing kubeconf data with missing user data in current + context. + """ + kubecfg_data = { + 'current-context': 'context2', + 'contexts': [ + { + 'name': 'context1', + }, + { + 'name': 'context2', + 'context': { + 'cluster': 'cluster1', + 'user': 'user1', + 'namespace': 'namespace1' + } + } + ], + 'clusters': [ + { + 'name': 'cluster1', + 'cluster': { + 'server': 'server1' + } + } + ], + 'users': [ + ] + } + + self.assertRaises(ProviderFailedException, + KubeConfig.parse_kubeconf_data, kubecfg_data) diff --git a/tests/units/providers/test_openshift_provider.py b/tests/units/providers/test_openshift_provider.py index b24b6ccc..6a4ba386 100644 --- a/tests/units/providers/test_openshift_provider.py +++ b/tests/units/providers/test_openshift_provider.py @@ -225,168 +225,3 @@ def test_process_artifact_data_error_kind_key_missing(self): op._process_artifact_data, 'foo', artifact_data) -class TestOpenshiftProviderParseKubeconfData(OpenshiftProviderTestMixin, unittest.TestCase): - - def test_parse_kubeconf_data_insecure(self): - """ - Test parsing kubeconf data with current context containing - cluster, user, namespace info and skipping tls verification - """ - kubecfg_data = { - 'current-context': 'context2', - 'contexts': [ - { - 'name': 'context1', - }, - { - 'name': 'context2', - 'context': { - 'cluster': 'cluster1', - 'user': 'user1', - 'namespace': 'namespace1' - } - } - ], - 'clusters': [ - { - 'name': 'cluster1', - 'cluster': { - 'insecure-skip-tls-verify': 'true', - 'server': 'server1' - } - } - ], - 'users': [ - { - 'name': 'user1', - 'user': { - 'token': 'token1' - } - } - ] - } - - op = self.get_oc_provider() - self.assertEqual(op._parse_kubeconf_data(kubecfg_data), - {'providerapi': 'server1', - 'accesstoken': 'token1', - 'namespace': 'namespace1', - 'providertlsverify': False, - 'providercafile': None}) - - def test_parse_kubeconf_data_cafile(self): - """ - Test parsing kubeconf data with current context containing - cluster, user, namespace info and certificate-authority - """ - kubecfg_data = { - 'current-context': 'context2', - 'contexts': [ - { - 'name': 'context1', - }, - { - 'name': 'context2', - 'context': { - 'cluster': 'cluster1', - 'user': 'user1', - 'namespace': 'namespace1' - } - } - ], - 'clusters': [ - { - 'name': 'cluster1', - 'cluster': { - 'certificate-authority': '/foo/bar', - 'server': 'server1' - } - } - ], - 'users': [ - { - 'name': 'user1', - 'user': { - 'token': 'token1' - } - } - ] - } - - op = self.get_oc_provider() - self.assertEqual(op._parse_kubeconf_data(kubecfg_data), - {'providerapi': 'server1', - 'accesstoken': 'token1', - 'namespace': 'namespace1', - 'providertlsverify': True, - 'providercafile': '/foo/bar'}) - - def test_parse_kubeconf_data_no_context(self): - """ - Test parsing kubeconf data with missing context data for - current context. - """ - kubecfg_data = { - 'current-context': 'context2', - 'contexts': [ - { - 'name': 'context1', - } - ], - 'clusters': [ - { - 'name': 'cluster1', - 'cluster': { - 'server': 'server1' - } - } - ], - 'users': [ - { - 'name': 'user1', - 'user': { - 'token': 'token1' - } - } - ] - } - - op = self.get_oc_provider() - self.assertRaises(ProviderFailedException, - op._parse_kubeconf_data, kubecfg_data) - - def test_parse_kubeconf_data_no_user(self): - """ - Test parsing kubeconf data with missing user data in current - context. - """ - kubecfg_data = { - 'current-context': 'context2', - 'contexts': [ - { - 'name': 'context1', - }, - { - 'name': 'context2', - 'context': { - 'cluster': 'cluster1', - 'user': 'user1', - 'namespace': 'namespace1' - } - } - ], - 'clusters': [ - { - 'name': 'cluster1', - 'cluster': { - 'server': 'server1' - } - } - ], - 'users': [ - ] - } - - op = self.get_oc_provider() - self.assertRaises(ProviderFailedException, - op._parse_kubeconf_data, kubecfg_data) From 48c7c0bee32b6da93e060d6458d48690f76ecefb Mon Sep 17 00:00:00 2001 From: Charlie Drage Date: Tue, 22 Mar 2016 13:16:32 -0400 Subject: [PATCH 088/193] Add weekly meeting information Add weekly meeting information to the README in regards to our SCRUM meeting as well as IRC :) --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index 710ed2d7..ff25ecdb 100644 --- a/README.md +++ b/README.md @@ -93,6 +93,8 @@ See [REQUIREMENTS.md](docs/requirements.md) for a list of current Atomic App dep * IRC: __#nulecule__ on irc.freenode.net * Mailing List: [container-tools@redhat.com](https://www.redhat.com/mailman/listinfo/container-tools) +* Weekly IRC Nulecule meeting: Monday's @ 0930 EST / 0130 UTC +* Weekly SCRUM Container-Tools meeting: Wednesday's @ 0830 EST / 1230 UTC on [Bluejeans](https://bluejeans.com/381583203/) ## Copyright From a88f375a809fb9ff7a1fd8da46a8d5d1be85b494 Mon Sep 17 00:00:00 2001 From: Charlie Drage Date: Tue, 29 Mar 2016 18:12:49 -0400 Subject: [PATCH 089/193] 0.4.5 Release --- CHANGELOG.md | 36 +++++++++++++++++++++++++++++++ Dockerfile | 2 +- Dockerfiles.git/Dockerfile.centos | 2 +- Dockerfiles.git/Dockerfile.debian | 2 +- Dockerfiles.git/Dockerfile.fedora | 2 +- atomicapp/constants.py | 2 +- setup.py | 2 +- 7 files changed, 42 insertions(+), 6 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 335f506b..2ec7b201 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,39 @@ +## Atomic App 0.4.5 (03-29-2016) + +This is a minor release of Atomic App where we make some changes to the UI output as well as fix a few provider-specific bugs. + +The main features of this release are: + + - Cleaner logging output + - More prominent output when Atomic App asks for a missing answers variable + - Multi-line artifact support for the Docker provider + +Other: + + - Update documentation on cli comparison + - Move kubeconfig parsing to a separate class + - Refactor cli exceptions code + +``` +Charlie Drage : + Convert to epoch time + Update docs / more clarity on cli comparison + Change asking format + Move kubeconfig parsing to a separate file + Change kubeconfig testing name and move into a separate file + Add weekly meeting information + +Preeti Chandrashekar : + Minor edits to atomicapp_lifecycle.md + +Shubham Minglani : + handle docker error, raise instead of print, fix #570 + refactor exceptions, fix #621, fix #622 + +Suraj Deshmukh : + Docker-run file with multi-line command supported +``` + ## Atomic App 0.4.4 (03-15-2016) This release includes a major update to our documentation as well as the user experience when deploying an Atomic App. diff --git a/Dockerfile b/Dockerfile index 14803a48..4702604d 100644 --- a/Dockerfile +++ b/Dockerfile @@ -2,7 +2,7 @@ FROM centos:7 MAINTAINER Red Hat, Inc. -ENV ATOMICAPPVERSION="0.4.4" +ENV ATOMICAPPVERSION="0.4.5" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ io.openshift.generate.job=true \ diff --git a/Dockerfiles.git/Dockerfile.centos b/Dockerfiles.git/Dockerfile.centos index 14803a48..4702604d 100644 --- a/Dockerfiles.git/Dockerfile.centos +++ b/Dockerfiles.git/Dockerfile.centos @@ -2,7 +2,7 @@ FROM centos:7 MAINTAINER Red Hat, Inc. -ENV ATOMICAPPVERSION="0.4.4" +ENV ATOMICAPPVERSION="0.4.5" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ io.openshift.generate.job=true \ diff --git a/Dockerfiles.git/Dockerfile.debian b/Dockerfiles.git/Dockerfile.debian index e4e90800..70b37350 100644 --- a/Dockerfiles.git/Dockerfile.debian +++ b/Dockerfiles.git/Dockerfile.debian @@ -2,7 +2,7 @@ FROM debian:jessie MAINTAINER Red Hat, Inc. -ENV ATOMICAPPVERSION="0.4.4" +ENV ATOMICAPPVERSION="0.4.5" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ RUN="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} \${OPT2} run \${OPT3}" \ diff --git a/Dockerfiles.git/Dockerfile.fedora b/Dockerfiles.git/Dockerfile.fedora index c0d0d5c5..c9c695c3 100644 --- a/Dockerfiles.git/Dockerfile.fedora +++ b/Dockerfiles.git/Dockerfile.fedora @@ -2,7 +2,7 @@ FROM fedora:23 MAINTAINER Red Hat, Inc. -ENV ATOMICAPPVERSION="0.4.4" +ENV ATOMICAPPVERSION="0.4.5" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ io.openshift.generate.job=true \ diff --git a/atomicapp/constants.py b/atomicapp/constants.py index fd12adbc..0afa9442 100644 --- a/atomicapp/constants.py +++ b/atomicapp/constants.py @@ -23,7 +23,7 @@ 2) LABEL io.projectatomic.nulecule.specversion in app Dockefile """ -__ATOMICAPPVERSION__ = '0.4.4' +__ATOMICAPPVERSION__ = '0.4.5' __NULECULESPECVERSION__ = '0.0.2' EXTERNAL_APP_DIR = "external" diff --git a/setup.py b/setup.py index fde45513..f01a3766 100644 --- a/setup.py +++ b/setup.py @@ -41,7 +41,7 @@ def _install_requirements(): setup( name='atomicapp', - version='0.4.4', + version='0.4.5', description='A tool to install and run Nulecule apps', author='Red Hat, Inc.', author_email='container-tools@redhat.com', From 8bd8a3039cbe1c16f66784d0de04a792da0f67c1 Mon Sep 17 00:00:00 2001 From: Charlie Drage Date: Wed, 23 Mar 2016 09:56:22 -0400 Subject: [PATCH 090/193] Add more legal information Add missing legal information to the top of each .py file. --- atomicapp/nulecule/__init__.py | 18 ++++++++++++++++++ atomicapp/nulecule/base.py | 18 ++++++++++++++++++ atomicapp/nulecule/container.py | 18 ++++++++++++++++++ atomicapp/nulecule/exceptions.py | 18 ++++++++++++++++++ atomicapp/nulecule/lib.py | 18 ++++++++++++++++++ atomicapp/nulecule/main.py | 18 ++++++++++++++++++ atomicapp/providers/__init__.py | 18 ++++++++++++++++++ atomicapp/requirements.py | 18 ++++++++++++++++++ 8 files changed, 144 insertions(+) diff --git a/atomicapp/nulecule/__init__.py b/atomicapp/nulecule/__init__.py index 7cf82d51..7d20122f 100644 --- a/atomicapp/nulecule/__init__.py +++ b/atomicapp/nulecule/__init__.py @@ -1,3 +1,21 @@ +""" + Copyright 2015 Red Hat, Inc. + + This file is part of Atomic App. + + Atomic App is free software: you can redistribute it and/or modify + it under the terms of the GNU Lesser General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + Atomic App is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with Atomic App. If not, see . +""" from __future__ import absolute_import from .main import NuleculeManager diff --git a/atomicapp/nulecule/base.py b/atomicapp/nulecule/base.py index ea86a1ca..a3e77a8c 100644 --- a/atomicapp/nulecule/base.py +++ b/atomicapp/nulecule/base.py @@ -1,4 +1,22 @@ # -*- coding: utf-8 -*- +""" + Copyright 2015 Red Hat, Inc. + + This file is part of Atomic App. + + Atomic App is free software: you can redistribute it and/or modify + it under the terms of the GNU Lesser General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + Atomic App is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with Atomic App. If not, see . +""" import anymarkup import copy import logging diff --git a/atomicapp/nulecule/container.py b/atomicapp/nulecule/container.py index 69ce75f1..6fc45cdd 100644 --- a/atomicapp/nulecule/container.py +++ b/atomicapp/nulecule/container.py @@ -1,3 +1,21 @@ +""" + Copyright 2015 Red Hat, Inc. + + This file is part of Atomic App. + + Atomic App is free software: you can redistribute it and/or modify + it under the terms of the GNU Lesser General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + Atomic App is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with Atomic App. If not, see . +""" import os import subprocess import uuid diff --git a/atomicapp/nulecule/exceptions.py b/atomicapp/nulecule/exceptions.py index 39a7b926..b932ab49 100644 --- a/atomicapp/nulecule/exceptions.py +++ b/atomicapp/nulecule/exceptions.py @@ -1,4 +1,22 @@ # -*- coding: utf-8 -*- +""" + Copyright 2015 Red Hat, Inc. + + This file is part of Atomic App. + + Atomic App is free software: you can redistribute it and/or modify + it under the terms of the GNU Lesser General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + Atomic App is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with Atomic App. If not, see . +""" class DockerException(Exception): diff --git a/atomicapp/nulecule/lib.py b/atomicapp/nulecule/lib.py index b38e47e4..98ba8f36 100644 --- a/atomicapp/nulecule/lib.py +++ b/atomicapp/nulecule/lib.py @@ -1,4 +1,22 @@ # -*- coding: utf-8 -*- +""" + Copyright 2015 Red Hat, Inc. + + This file is part of Atomic App. + + Atomic App is free software: you can redistribute it and/or modify + it under the terms of the GNU Lesser General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + Atomic App is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with Atomic App. If not, see . +""" import logging from atomicapp.constants import (GLOBAL_CONF, diff --git a/atomicapp/nulecule/main.py b/atomicapp/nulecule/main.py index 1ce88b8d..1fcbb2f6 100644 --- a/atomicapp/nulecule/main.py +++ b/atomicapp/nulecule/main.py @@ -1,4 +1,22 @@ # -*- coding: utf-8 -*- +""" + Copyright 2015 Red Hat, Inc. + + This file is part of Atomic App. + + Atomic App is free software: you can redistribute it and/or modify + it under the terms of the GNU Lesser General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + Atomic App is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with Atomic App. If not, see . +""" import anymarkup import copy import distutils.dir_util diff --git a/atomicapp/providers/__init__.py b/atomicapp/providers/__init__.py index e69de29b..479ef0b6 100644 --- a/atomicapp/providers/__init__.py +++ b/atomicapp/providers/__init__.py @@ -0,0 +1,18 @@ +""" + Copyright 2015 Red Hat, Inc. + + This file is part of Atomic App. + + Atomic App is free software: you can redistribute it and/or modify + it under the terms of the GNU Lesser General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + Atomic App is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with Atomic App. If not, see . +""" diff --git a/atomicapp/requirements.py b/atomicapp/requirements.py index 7b7c3f80..8e81be2e 100644 --- a/atomicapp/requirements.py +++ b/atomicapp/requirements.py @@ -1,3 +1,21 @@ +""" + Copyright 2015 Red Hat, Inc. + + This file is part of Atomic App. + + Atomic App is free software: you can redistribute it and/or modify + it under the terms of the GNU Lesser General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + Atomic App is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with Atomic App. If not, see . +""" import logging from atomicapp.constants import (LOGGER_DEFAULT, From e47b062d6d6f1a1512e5c05827afd07abe6576af Mon Sep 17 00:00:00 2001 From: Charlie Drage Date: Wed, 23 Mar 2016 10:00:46 -0400 Subject: [PATCH 091/193] Update year --- atomicapp/applogging.py | 2 +- atomicapp/cli/__init__.py | 2 +- atomicapp/cli/main.py | 2 +- atomicapp/constants.py | 2 +- atomicapp/nulecule/__init__.py | 2 +- atomicapp/nulecule/base.py | 2 +- atomicapp/nulecule/container.py | 2 +- atomicapp/nulecule/exceptions.py | 2 +- atomicapp/nulecule/lib.py | 2 +- atomicapp/nulecule/main.py | 2 +- atomicapp/plugin.py | 2 +- atomicapp/providers/__init__.py | 2 +- atomicapp/providers/docker.py | 2 +- atomicapp/providers/kubernetes.py | 2 +- atomicapp/providers/marathon.py | 2 +- atomicapp/providers/openshift.py | 2 +- atomicapp/requirements.py | 2 +- atomicapp/utils.py | 2 +- setup.py | 2 +- tests/units/cli/test_cli.py | 2 +- tests/units/cli/test_cli_gitlab_example.py | 2 +- tests/units/cli/test_default_provider.py | 2 +- tests/units/nulecule/test_xpathing.py | 2 +- tests/units/persistent_storage/test_ps_cli.py | 2 +- tests/units/providers/test_docker_provider.py | 2 +- tests/units/providers/test_kubernetes_provider.py | 2 +- 26 files changed, 26 insertions(+), 26 deletions(-) diff --git a/atomicapp/applogging.py b/atomicapp/applogging.py index 4665e853..4695cb61 100644 --- a/atomicapp/applogging.py +++ b/atomicapp/applogging.py @@ -1,5 +1,5 @@ """ - Copyright 2015 Red Hat, Inc. + Copyright 2014-2016 Red Hat, Inc. This file is part of Atomic App. diff --git a/atomicapp/cli/__init__.py b/atomicapp/cli/__init__.py index 9734a072..a38c54ce 100644 --- a/atomicapp/cli/__init__.py +++ b/atomicapp/cli/__init__.py @@ -1,5 +1,5 @@ """ - Copyright 2015 Red Hat, Inc. + Copyright 2014-2016 Red Hat, Inc. This file is part of Atomic App. diff --git a/atomicapp/cli/main.py b/atomicapp/cli/main.py index 443692cc..8f2fca05 100644 --- a/atomicapp/cli/main.py +++ b/atomicapp/cli/main.py @@ -1,5 +1,5 @@ """ - Copyright 2015 Red Hat, Inc. + Copyright 2014-2016 Red Hat, Inc. This file is part of Atomic App. diff --git a/atomicapp/constants.py b/atomicapp/constants.py index 0afa9442..62dd8097 100644 --- a/atomicapp/constants.py +++ b/atomicapp/constants.py @@ -1,5 +1,5 @@ """ - Copyright 2015 Red Hat, Inc. + Copyright 2014-2016 Red Hat, Inc. This file is part of Atomic App. diff --git a/atomicapp/nulecule/__init__.py b/atomicapp/nulecule/__init__.py index 7d20122f..9da0a487 100644 --- a/atomicapp/nulecule/__init__.py +++ b/atomicapp/nulecule/__init__.py @@ -1,5 +1,5 @@ """ - Copyright 2015 Red Hat, Inc. + Copyright 2014-2016 Red Hat, Inc. This file is part of Atomic App. diff --git a/atomicapp/nulecule/base.py b/atomicapp/nulecule/base.py index a3e77a8c..18b71e1d 100644 --- a/atomicapp/nulecule/base.py +++ b/atomicapp/nulecule/base.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- """ - Copyright 2015 Red Hat, Inc. + Copyright 2014-2016 Red Hat, Inc. This file is part of Atomic App. diff --git a/atomicapp/nulecule/container.py b/atomicapp/nulecule/container.py index 6fc45cdd..4a28d903 100644 --- a/atomicapp/nulecule/container.py +++ b/atomicapp/nulecule/container.py @@ -1,5 +1,5 @@ """ - Copyright 2015 Red Hat, Inc. + Copyright 2014-2016 Red Hat, Inc. This file is part of Atomic App. diff --git a/atomicapp/nulecule/exceptions.py b/atomicapp/nulecule/exceptions.py index b932ab49..02bf391f 100644 --- a/atomicapp/nulecule/exceptions.py +++ b/atomicapp/nulecule/exceptions.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- """ - Copyright 2015 Red Hat, Inc. + Copyright 2014-2016 Red Hat, Inc. This file is part of Atomic App. diff --git a/atomicapp/nulecule/lib.py b/atomicapp/nulecule/lib.py index 98ba8f36..89257046 100644 --- a/atomicapp/nulecule/lib.py +++ b/atomicapp/nulecule/lib.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- """ - Copyright 2015 Red Hat, Inc. + Copyright 2014-2016 Red Hat, Inc. This file is part of Atomic App. diff --git a/atomicapp/nulecule/main.py b/atomicapp/nulecule/main.py index 1fcbb2f6..f9b2ee43 100644 --- a/atomicapp/nulecule/main.py +++ b/atomicapp/nulecule/main.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- """ - Copyright 2015 Red Hat, Inc. + Copyright 2014-2016 Red Hat, Inc. This file is part of Atomic App. diff --git a/atomicapp/plugin.py b/atomicapp/plugin.py index 96a43f11..bf23de62 100644 --- a/atomicapp/plugin.py +++ b/atomicapp/plugin.py @@ -1,5 +1,5 @@ """ - Copyright 2015 Red Hat, Inc. + Copyright 2014-2016 Red Hat, Inc. This file is part of Atomic App. diff --git a/atomicapp/providers/__init__.py b/atomicapp/providers/__init__.py index 479ef0b6..4989594f 100644 --- a/atomicapp/providers/__init__.py +++ b/atomicapp/providers/__init__.py @@ -1,5 +1,5 @@ """ - Copyright 2015 Red Hat, Inc. + Copyright 2014-2016 Red Hat, Inc. This file is part of Atomic App. diff --git a/atomicapp/providers/docker.py b/atomicapp/providers/docker.py index dfd9c138..43341e26 100644 --- a/atomicapp/providers/docker.py +++ b/atomicapp/providers/docker.py @@ -1,5 +1,5 @@ """ - Copyright 2015 Red Hat, Inc. + Copyright 2014-2016 Red Hat, Inc. This file is part of Atomic App. diff --git a/atomicapp/providers/kubernetes.py b/atomicapp/providers/kubernetes.py index 6a496177..50e32cdd 100644 --- a/atomicapp/providers/kubernetes.py +++ b/atomicapp/providers/kubernetes.py @@ -1,5 +1,5 @@ """ - Copyright 2015 Red Hat, Inc. + Copyright 2014-2016 Red Hat, Inc. This file is part of Atomic App. diff --git a/atomicapp/providers/marathon.py b/atomicapp/providers/marathon.py index c22165d8..9370c8bf 100644 --- a/atomicapp/providers/marathon.py +++ b/atomicapp/providers/marathon.py @@ -1,5 +1,5 @@ """ - Copyright 2015 Red Hat, Inc. + Copyright 2014-2016 Red Hat, Inc. This file is part of Atomic App. diff --git a/atomicapp/providers/openshift.py b/atomicapp/providers/openshift.py index abe2b247..2c2166c0 100644 --- a/atomicapp/providers/openshift.py +++ b/atomicapp/providers/openshift.py @@ -1,5 +1,5 @@ """ - Copyright 2015 Red Hat, Inc. + Copyright 2014-2016 Red Hat, Inc. This file is part of Atomic App. diff --git a/atomicapp/requirements.py b/atomicapp/requirements.py index 8e81be2e..ab8df2cf 100644 --- a/atomicapp/requirements.py +++ b/atomicapp/requirements.py @@ -1,5 +1,5 @@ """ - Copyright 2015 Red Hat, Inc. + Copyright 2014-2016 Red Hat, Inc. This file is part of Atomic App. diff --git a/atomicapp/utils.py b/atomicapp/utils.py index ca36e5e4..ca2473c1 100644 --- a/atomicapp/utils.py +++ b/atomicapp/utils.py @@ -1,5 +1,5 @@ """ - Copyright 2015 Red Hat, Inc. + Copyright 2014-2016 Red Hat, Inc. This file is part of Atomic App. diff --git a/setup.py b/setup.py index f01a3766..d957dedc 100644 --- a/setup.py +++ b/setup.py @@ -1,7 +1,7 @@ #!/usr/bin/env python """ - Copyright 2015 Red Hat, Inc. + Copyright 2014-2016 Red Hat, Inc. This file is part of Atomic App. diff --git a/tests/units/cli/test_cli.py b/tests/units/cli/test_cli.py index 02dbbe8c..1b3e62e7 100644 --- a/tests/units/cli/test_cli.py +++ b/tests/units/cli/test_cli.py @@ -1,5 +1,5 @@ """ - Copyright 2015 Red Hat, Inc. + Copyright 2014-2016 Red Hat, Inc. This file is part of Atomic App. diff --git a/tests/units/cli/test_cli_gitlab_example.py b/tests/units/cli/test_cli_gitlab_example.py index 20686e45..b0451a30 100644 --- a/tests/units/cli/test_cli_gitlab_example.py +++ b/tests/units/cli/test_cli_gitlab_example.py @@ -1,5 +1,5 @@ """ - Copyright 2015 Red Hat, Inc. + Copyright 2014-2016 Red Hat, Inc. This file is part of Atomic App. diff --git a/tests/units/cli/test_default_provider.py b/tests/units/cli/test_default_provider.py index cb16a9b2..fa1e4272 100644 --- a/tests/units/cli/test_default_provider.py +++ b/tests/units/cli/test_default_provider.py @@ -1,5 +1,5 @@ """ - Copyright 2015 Red Hat, Inc. + Copyright 2014-2016 Red Hat, Inc. This file is part of Atomic App. diff --git a/tests/units/nulecule/test_xpathing.py b/tests/units/nulecule/test_xpathing.py index e8ff5cee..f9666469 100644 --- a/tests/units/nulecule/test_xpathing.py +++ b/tests/units/nulecule/test_xpathing.py @@ -1,5 +1,5 @@ """ - Copyright 2015 Red Hat, Inc. + Copyright 2014-2016 Red Hat, Inc. This file is part of Atomic App. diff --git a/tests/units/persistent_storage/test_ps_cli.py b/tests/units/persistent_storage/test_ps_cli.py index 24692bc1..6093fb12 100644 --- a/tests/units/persistent_storage/test_ps_cli.py +++ b/tests/units/persistent_storage/test_ps_cli.py @@ -1,5 +1,5 @@ """ - Copyright 2015 Red Hat, Inc. + Copyright 2014-2016 Red Hat, Inc. This file is part of Atomic App. diff --git a/tests/units/providers/test_docker_provider.py b/tests/units/providers/test_docker_provider.py index 20aa264a..c0047d07 100644 --- a/tests/units/providers/test_docker_provider.py +++ b/tests/units/providers/test_docker_provider.py @@ -1,5 +1,5 @@ """ - Copyright 2015 Red Hat, Inc. + Copyright 2014-2016 Red Hat, Inc. This file is part of Atomic App. diff --git a/tests/units/providers/test_kubernetes_provider.py b/tests/units/providers/test_kubernetes_provider.py index 97b94ed3..412d84dd 100644 --- a/tests/units/providers/test_kubernetes_provider.py +++ b/tests/units/providers/test_kubernetes_provider.py @@ -1,5 +1,5 @@ """ - Copyright 2015 Red Hat, Inc. + Copyright 2014-2016 Red Hat, Inc. This file is part of Atomic App. From 371c6569bc1dea3450a36f60200c816c8bc92e5a Mon Sep 17 00:00:00 2001 From: Shubham Minglani Date: Mon, 4 Apr 2016 18:07:34 +0530 Subject: [PATCH 092/193] Handle ProviderFailedException, fix #627 --- atomicapp/cli/main.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/atomicapp/cli/main.py b/atomicapp/cli/main.py index 8f2fca05..9d06e0fa 100644 --- a/atomicapp/cli/main.py +++ b/atomicapp/cli/main.py @@ -38,6 +38,7 @@ PROVIDERS) from atomicapp.nulecule import NuleculeManager from atomicapp.nulecule.exceptions import NuleculeException, DockerException +from atomicapp.plugin import ProviderFailedException from atomicapp.utils import Utils logger = logging.getLogger(LOGGER_DEFAULT) @@ -122,6 +123,9 @@ def cli_func_exec(cli_func, cli_func_args): except NuleculeException as e: logger.error(e) sys.exit(1) + except ProviderFailedException as e: + logger.error(e) + sys.exit(1) except Exception as e: logger.error(e, exc_info=True) sys.exit(1) From 81915e82bf5c190fb51e1e938bceed40d6605975 Mon Sep 17 00:00:00 2001 From: Shubham Minglani Date: Mon, 4 Apr 2016 18:18:33 +0530 Subject: [PATCH 093/193] add pass for improved readability --- atomicapp/plugin.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/atomicapp/plugin.py b/atomicapp/plugin.py index bf23de62..ae44943a 100644 --- a/atomicapp/plugin.py +++ b/atomicapp/plugin.py @@ -117,8 +117,8 @@ def __repr__(self): class ProviderFailedException(Exception): - """Error during provider execution""" + pass class Plugin(object): From b93cb52b0d6f863920688dbf3788e6669de6410c Mon Sep 17 00:00:00 2001 From: Charlie Drage Date: Fri, 1 Apr 2016 13:40:45 -0400 Subject: [PATCH 094/193] Requirements should retrieve from Nulecule object not graph According to the Nulecule spec, requirements should be it's own object rather than being retreieved via graph. ... requirements: - persistentVolume: name: "var-log-httpd" accessMode: "ReadWrite" size: 4 This is an undesired effect and should retrieve from it's own object: graph: ... requirements: - persistentVolume: name: "var-log-httpd" accessMode: "ReadWrite" size: 4 --- atomicapp/nulecule/base.py | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/atomicapp/nulecule/base.py b/atomicapp/nulecule/base.py index 18b71e1d..07cc1f26 100644 --- a/atomicapp/nulecule/base.py +++ b/atomicapp/nulecule/base.py @@ -38,7 +38,6 @@ NAME_KEY, INHERIT_KEY, ARTIFACTS_KEY, - REQUIREMENTS_KEY, DEFAULT_PROVIDER) from atomicapp.utils import Utils from atomicapp.requirements import Requirements @@ -195,14 +194,11 @@ def run(self, provider_key=None, dryrun=False): """ provider_key, provider = self.get_provider(provider_key, dryrun) - # Process preliminary requirements - # Pass configuration, path of the app, graph, provider as well as dry-run - # for provider init() - if REQUIREMENTS_KEY in self.graph[0]: - logger.debug("Requirements key detected. Running action.") - r = Requirements(self.config, self.basepath, self.graph[0][REQUIREMENTS_KEY], - provider_key, dryrun) - r.run() + # Process preliminary requirements before componenets + if self.requirements: + logger.debug("Requirements detected. Running action.") + Requirements(self.config, self.basepath, self.requirements, + provider_key, dryrun).run() # Process components for component in self.components: From 490567d27f8e3b2f0bc78f98a6806b550f4d8541 Mon Sep 17 00:00:00 2001 From: Charlie Drage Date: Fri, 1 Apr 2016 14:00:06 -0400 Subject: [PATCH 095/193] Warn not error on missing requirement Fixes https://github.com/projectatomic/atomicapp/issues/584 Instead of erroring out, if a requirement is missing for a provider (ex. Persistent Volumes on Docker, OpenShift, etc.). Warn the user. --- atomicapp/requirements.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/atomicapp/requirements.py b/atomicapp/requirements.py index ab8df2cf..adc7c4f0 100644 --- a/atomicapp/requirements.py +++ b/atomicapp/requirements.py @@ -79,13 +79,14 @@ def _exec(self, action): requirement_function = self._find_requirement_function_name(key_name) # Check to see if the function exists in the provider, - # if it does not: fail + # if it does not: warn the user try: requirement = getattr(self.provider, requirement_function) except AttributeError: - raise RequirementFailedException( - "Requirement %s does not exist within %s." % + logger.warning( + "Requirement %s does not exist within %s. Skipping." % (requirement_function, self.provider)) + continue # Run the requirement function requirement(req[key_name], action) From 7c9cad933cd0588f3ba8395901d1ed9d4b1145d3 Mon Sep 17 00:00:00 2001 From: Shubham Minglani Date: Wed, 6 Apr 2016 21:58:22 +0530 Subject: [PATCH 096/193] replace inspect with ps, fix #672 --- atomicapp/providers/docker.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/atomicapp/providers/docker.py b/atomicapp/providers/docker.py index 43341e26..0dfb5944 100644 --- a/atomicapp/providers/docker.py +++ b/atomicapp/providers/docker.py @@ -72,7 +72,7 @@ def init(self): raise ProviderFailedException(msg) def _get_containers(self): - docker_cmd = 'docker inspect --format="{{ .Name }}" $(docker ps -aq --no-trunc) | sed "s,/,,g"' + docker_cmd = 'docker ps -a --format="{{ .Names }}"' if self.dryrun: logger.info("DRY-RUN: %s", docker_cmd) return [] From f1e57ec1cffa5d05ca99d046e8f2bb2129494c92 Mon Sep 17 00:00:00 2001 From: Suraj Deshmukh Date: Wed, 6 Apr 2016 22:50:02 +0530 Subject: [PATCH 097/193] Added elif to consecutive exclusive if statements The for loop had conditions and at any given point of time only one can be true not all so added elif to the only if statements in a for loop. --- atomicapp/providers/openshift.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/atomicapp/providers/openshift.py b/atomicapp/providers/openshift.py index 2c2166c0..7e8264c9 100644 --- a/atomicapp/providers/openshift.py +++ b/atomicapp/providers/openshift.py @@ -655,9 +655,9 @@ def _set_config_values(self): for k in result.keys(): if answers[k] is not None and providerconfig[k] is None: result[k] = answers[k] - if answers[k] is None and providerconfig[k] is not None: + elif answers[k] is None and providerconfig[k] is not None: result[k] = providerconfig[k] - if answers[k] is not None and providerconfig[k] is not None: + elif answers[k] is not None and providerconfig[k] is not None: if answers[k] == providerconfig[k]: result[k] = answers[k] else: From c9e7a3f8c19950644d86900c6a51edcf2731561e Mon Sep 17 00:00:00 2001 From: Charlie Drage Date: Mon, 4 Apr 2016 16:47:11 -0400 Subject: [PATCH 098/193] Util to gather what user is running Atomic App and which home dir it should use This call finds out what user is running the Atomic App. Whether they are: sudo root user If it is running under sudo, this util will find the user running the sudo command. Once the user is found, os.path.expanduser(user) is utilized to find the home directory. If a home directory is not found atomic app will error out. If Atomic App is running within a Docker container it will utilize the passed /host dir (from the RUN/STOP label of Atomic CLI) in order to find the directory of the corresponding user --- Dockerfile | 4 +-- Dockerfiles.git/Dockerfile.centos | 4 +-- Dockerfiles.git/Dockerfile.debian | 4 +-- Dockerfiles.git/Dockerfile.fedora | 4 +-- atomicapp/utils.py | 56 ++++++++++++++++++++++++++++++- 5 files changed, 63 insertions(+), 9 deletions(-) diff --git a/Dockerfile b/Dockerfile index 4702604d..6ca9e67c 100644 --- a/Dockerfile +++ b/Dockerfile @@ -7,8 +7,8 @@ ENV ATOMICAPPVERSION="0.4.5" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ io.openshift.generate.job=true \ io.openshift.generate.token.as=env:TOKEN_ENV_VAR \ - RUN="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} \${OPT2} run \${OPT3}" \ - STOP="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} \${OPT2} stop \${OPT3}" + RUN="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e USER -e SUDO_USER -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} \${OPT2} run \${OPT3}" \ + STOP="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e USER -e SUDO_USER -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} \${OPT2} stop \${OPT3}" WORKDIR /opt/atomicapp diff --git a/Dockerfiles.git/Dockerfile.centos b/Dockerfiles.git/Dockerfile.centos index 4702604d..6ca9e67c 100644 --- a/Dockerfiles.git/Dockerfile.centos +++ b/Dockerfiles.git/Dockerfile.centos @@ -7,8 +7,8 @@ ENV ATOMICAPPVERSION="0.4.5" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ io.openshift.generate.job=true \ io.openshift.generate.token.as=env:TOKEN_ENV_VAR \ - RUN="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} \${OPT2} run \${OPT3}" \ - STOP="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} \${OPT2} stop \${OPT3}" + RUN="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e USER -e SUDO_USER -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} \${OPT2} run \${OPT3}" \ + STOP="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e USER -e SUDO_USER -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} \${OPT2} stop \${OPT3}" WORKDIR /opt/atomicapp diff --git a/Dockerfiles.git/Dockerfile.debian b/Dockerfiles.git/Dockerfile.debian index 70b37350..37493811 100644 --- a/Dockerfiles.git/Dockerfile.debian +++ b/Dockerfiles.git/Dockerfile.debian @@ -5,8 +5,8 @@ MAINTAINER Red Hat, Inc. ENV ATOMICAPPVERSION="0.4.5" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ - RUN="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} \${OPT2} run \${OPT3}" \ - STOP="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} \${OPT2} stop \${OPT3}" + RUN="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e USER -e SUDO_USER -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} \${OPT2} run \${OPT3}" \ + STOP="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e USER -e SUDO_USER -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} \${OPT2} stop \${OPT3}" WORKDIR /opt/atomicapp diff --git a/Dockerfiles.git/Dockerfile.fedora b/Dockerfiles.git/Dockerfile.fedora index c9c695c3..e9288761 100644 --- a/Dockerfiles.git/Dockerfile.fedora +++ b/Dockerfiles.git/Dockerfile.fedora @@ -7,8 +7,8 @@ ENV ATOMICAPPVERSION="0.4.5" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ io.openshift.generate.job=true \ io.openshift.generate.token.as=env:TOKEN_ENV_VAR \ - RUN="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} \${OPT2} run \${OPT3}" \ - STOP="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} \${OPT2} stop \${OPT3}" + RUN="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e USER -e SUDO_USER -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} \${OPT2} run \${OPT3}" \ + STOP="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e USER -e SUDO_USER -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} \${OPT2} stop \${OPT3}" WORKDIR /opt/atomicapp diff --git a/atomicapp/utils.py b/atomicapp/utils.py index ca2473c1..ed5bbf8e 100644 --- a/atomicapp/utils.py +++ b/atomicapp/utils.py @@ -335,7 +335,9 @@ def getDockerCli(dryrun=False): @staticmethod def inContainer(): """ - Determine if we are running inside a container or not. + Determine if we are running inside a container or not. This is done by + checking to see if /host has been passed as well as if .dockerenv and + .dockerinit files exist Returns: (bool): True == we are in a container @@ -377,6 +379,58 @@ def rm_dir(directory): logger.debug('Recursively removing directory: %s' % directory) distutils.dir_util.remove_tree(directory) + @staticmethod + def getUserName(): + """ + Finds the username of the user running the application. Uses the + SUDO_USER and USER environment variables. If runnning within a + container, SUDO_USER and USER varibles must be passed for proper + detection. + Ex. docker run -v /:/host -e SUDO_USER -e USER foobar + """ + sudo_user = os.environ.get('SUDO_USER') + + if os.getegid() == 0 and sudo_user is None: + user = 'root' + elif sudo_user is not None: + user = sudo_user + else: + user = os.environ.get('USER') + return user + + @staticmethod + def getUserHome(): + """ + Finds the home directory of the user running the application. + If runnning within a container, the root dir must be passed as + a volume. + Ex. docker run -v /:/host -e SUDO_USER -e USER foobar + """ + logger.debug("Finding the users home directory") + user = Utils.getUserName() + incontainer = Utils.inContainer() + + # Check to see if we are running in a container. If we are we + # will chroot into the /host path before calling os.path.expanduser + if incontainer: + os.chroot(HOST_DIR) + + # Call os.path.expanduser to determine the user's home dir. + # See https://docs.python.org/2/library/os.path.html#os.path.expanduser + # Warn if none is detected, don't error as not having a home + # dir doesn't mean we fail. + home = os.path.expanduser("~%s" % user) + if home == ("~%s" % user): + logger.error("No home directory exists for user %s" % user) + + # Back out of chroot if necessary + if incontainer: + os.chroot("../..") + + logger.debug("Running as user %s. Using home directory %s for configuration data" + % (user, home)) + return home + @staticmethod def make_rest_request(method, url, verify=True, data=None): """ From 303a5485e123a2b272613415af883efa72749d85 Mon Sep 17 00:00:00 2001 From: Charlie Drage Date: Mon, 4 Apr 2016 16:48:01 -0400 Subject: [PATCH 099/193] Check to see if it's a Docker container As we don't support any other container platform (yet), check to see if this is a Docker container. --- atomicapp/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/atomicapp/utils.py b/atomicapp/utils.py index ed5bbf8e..e8917d18 100644 --- a/atomicapp/utils.py +++ b/atomicapp/utils.py @@ -342,7 +342,7 @@ def inContainer(): Returns: (bool): True == we are in a container """ - if os.path.isdir(HOST_DIR): + if os.path.isfile('/.dockerenv') and os.path.isfile('/.dockerinit') and os.path.isdir(HOST_DIR): return True else: return False From 5a8872e8b410a11d19ee6339d7e1069ec9699df1 Mon Sep 17 00:00:00 2001 From: Charlie Drage Date: Thu, 7 Apr 2016 14:07:32 -0400 Subject: [PATCH 100/193] Update Dockerfile.pkgs testing repos --- Dockerfiles.pkgs/Dockerfile.centos | 6 +++--- Dockerfiles.pkgs/Dockerfile.fedora | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/Dockerfiles.pkgs/Dockerfile.centos b/Dockerfiles.pkgs/Dockerfile.centos index ea4a0d0c..3a0d937b 100644 --- a/Dockerfiles.pkgs/Dockerfile.centos +++ b/Dockerfiles.pkgs/Dockerfile.centos @@ -4,14 +4,14 @@ MAINTAINER Red Hat, Inc. # Check https://bodhi.fedoraproject.org/updates/?packages=atomicapp # for the most recent builds of atomicapp in epel -ENV ATOMICAPPVERSION="0.1.12" +ENV ATOMICAPPVERSION="0.4.5" ENV TESTING="--enablerepo=epel-testing" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ io.openshift.generate.job=true \ io.openshift.generate.token.as=env:TOKEN_ENV_VAR \ - RUN="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} \${OPT2} run \${OPT3}" \ - STOP="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} \${OPT2} stop \${OPT3}" + RUN="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e USER -e SUDO_USER -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} \${OPT2} run \${OPT3}" \ + STOP="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e USER -e SUDO_USER -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} \${OPT2} stop \${OPT3}" WORKDIR /atomicapp diff --git a/Dockerfiles.pkgs/Dockerfile.fedora b/Dockerfiles.pkgs/Dockerfile.fedora index 5eed0d62..c3e76743 100644 --- a/Dockerfiles.pkgs/Dockerfile.fedora +++ b/Dockerfiles.pkgs/Dockerfile.fedora @@ -4,14 +4,14 @@ MAINTAINER Red Hat, Inc. # Check https://bodhi.fedoraproject.org/updates/?packages=atomicapp # for the most recent builds of atomicapp in fedora -ENV ATOMICAPPVERSION="0.1.12" +ENV ATOMICAPPVERSION="0.4.5" ENV TESTING="--enablerepo=updates-testing" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ io.openshift.generate.job=true \ io.openshift.generate.token.as=env:TOKEN_ENV_VAR \ - RUN="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} \${OPT2} run \${OPT3}" \ - STOP="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} \${OPT2} stop \${OPT3}" + RUN="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e USER -e SUDO_USER -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} \${OPT2} run \${OPT3}" \ + STOP="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e USER -e SUDO_USER -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} \${OPT2} stop \${OPT3}" WORKDIR /atomicapp From d9570f5f1e706c35929e44f32dad5f932d5f8a31 Mon Sep 17 00:00:00 2001 From: Charlie Drage Date: Mon, 4 Apr 2016 16:47:11 -0400 Subject: [PATCH 101/193] Util to gather what user is running Atomic App and which home dir it should use This call finds out what user is running the Atomic App. Whether they are: sudo root user If it is running under sudo, this util will find the user running the sudo command. Once the user is found, os.path.expanduser(user) is utilized to find the home directory. If a home directory is not found atomic app will error out. If Atomic App is running within a Docker container it will utilize the passed /host dir (from the RUN/STOP label of Atomic CLI) in order to find the directory of the corresponding user --- Dockerfile | 4 +-- Dockerfiles.git/Dockerfile.centos | 4 +-- Dockerfiles.git/Dockerfile.debian | 4 +-- Dockerfiles.git/Dockerfile.fedora | 4 +-- atomicapp/utils.py | 56 ++++++++++++++++++++++++++++++- 5 files changed, 63 insertions(+), 9 deletions(-) diff --git a/Dockerfile b/Dockerfile index 4702604d..6ca9e67c 100644 --- a/Dockerfile +++ b/Dockerfile @@ -7,8 +7,8 @@ ENV ATOMICAPPVERSION="0.4.5" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ io.openshift.generate.job=true \ io.openshift.generate.token.as=env:TOKEN_ENV_VAR \ - RUN="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} \${OPT2} run \${OPT3}" \ - STOP="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} \${OPT2} stop \${OPT3}" + RUN="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e USER -e SUDO_USER -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} \${OPT2} run \${OPT3}" \ + STOP="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e USER -e SUDO_USER -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} \${OPT2} stop \${OPT3}" WORKDIR /opt/atomicapp diff --git a/Dockerfiles.git/Dockerfile.centos b/Dockerfiles.git/Dockerfile.centos index 4702604d..6ca9e67c 100644 --- a/Dockerfiles.git/Dockerfile.centos +++ b/Dockerfiles.git/Dockerfile.centos @@ -7,8 +7,8 @@ ENV ATOMICAPPVERSION="0.4.5" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ io.openshift.generate.job=true \ io.openshift.generate.token.as=env:TOKEN_ENV_VAR \ - RUN="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} \${OPT2} run \${OPT3}" \ - STOP="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} \${OPT2} stop \${OPT3}" + RUN="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e USER -e SUDO_USER -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} \${OPT2} run \${OPT3}" \ + STOP="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e USER -e SUDO_USER -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} \${OPT2} stop \${OPT3}" WORKDIR /opt/atomicapp diff --git a/Dockerfiles.git/Dockerfile.debian b/Dockerfiles.git/Dockerfile.debian index 70b37350..37493811 100644 --- a/Dockerfiles.git/Dockerfile.debian +++ b/Dockerfiles.git/Dockerfile.debian @@ -5,8 +5,8 @@ MAINTAINER Red Hat, Inc. ENV ATOMICAPPVERSION="0.4.5" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ - RUN="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} \${OPT2} run \${OPT3}" \ - STOP="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} \${OPT2} stop \${OPT3}" + RUN="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e USER -e SUDO_USER -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} \${OPT2} run \${OPT3}" \ + STOP="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e USER -e SUDO_USER -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} \${OPT2} stop \${OPT3}" WORKDIR /opt/atomicapp diff --git a/Dockerfiles.git/Dockerfile.fedora b/Dockerfiles.git/Dockerfile.fedora index c9c695c3..e9288761 100644 --- a/Dockerfiles.git/Dockerfile.fedora +++ b/Dockerfiles.git/Dockerfile.fedora @@ -7,8 +7,8 @@ ENV ATOMICAPPVERSION="0.4.5" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ io.openshift.generate.job=true \ io.openshift.generate.token.as=env:TOKEN_ENV_VAR \ - RUN="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} \${OPT2} run \${OPT3}" \ - STOP="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} \${OPT2} stop \${OPT3}" + RUN="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e USER -e SUDO_USER -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} \${OPT2} run \${OPT3}" \ + STOP="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e USER -e SUDO_USER -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} \${OPT2} stop \${OPT3}" WORKDIR /opt/atomicapp diff --git a/atomicapp/utils.py b/atomicapp/utils.py index ca2473c1..ed5bbf8e 100644 --- a/atomicapp/utils.py +++ b/atomicapp/utils.py @@ -335,7 +335,9 @@ def getDockerCli(dryrun=False): @staticmethod def inContainer(): """ - Determine if we are running inside a container or not. + Determine if we are running inside a container or not. This is done by + checking to see if /host has been passed as well as if .dockerenv and + .dockerinit files exist Returns: (bool): True == we are in a container @@ -377,6 +379,58 @@ def rm_dir(directory): logger.debug('Recursively removing directory: %s' % directory) distutils.dir_util.remove_tree(directory) + @staticmethod + def getUserName(): + """ + Finds the username of the user running the application. Uses the + SUDO_USER and USER environment variables. If runnning within a + container, SUDO_USER and USER varibles must be passed for proper + detection. + Ex. docker run -v /:/host -e SUDO_USER -e USER foobar + """ + sudo_user = os.environ.get('SUDO_USER') + + if os.getegid() == 0 and sudo_user is None: + user = 'root' + elif sudo_user is not None: + user = sudo_user + else: + user = os.environ.get('USER') + return user + + @staticmethod + def getUserHome(): + """ + Finds the home directory of the user running the application. + If runnning within a container, the root dir must be passed as + a volume. + Ex. docker run -v /:/host -e SUDO_USER -e USER foobar + """ + logger.debug("Finding the users home directory") + user = Utils.getUserName() + incontainer = Utils.inContainer() + + # Check to see if we are running in a container. If we are we + # will chroot into the /host path before calling os.path.expanduser + if incontainer: + os.chroot(HOST_DIR) + + # Call os.path.expanduser to determine the user's home dir. + # See https://docs.python.org/2/library/os.path.html#os.path.expanduser + # Warn if none is detected, don't error as not having a home + # dir doesn't mean we fail. + home = os.path.expanduser("~%s" % user) + if home == ("~%s" % user): + logger.error("No home directory exists for user %s" % user) + + # Back out of chroot if necessary + if incontainer: + os.chroot("../..") + + logger.debug("Running as user %s. Using home directory %s for configuration data" + % (user, home)) + return home + @staticmethod def make_rest_request(method, url, verify=True, data=None): """ From 81c98f7ac639344bceca53e1000147caac225097 Mon Sep 17 00:00:00 2001 From: Charlie Drage Date: Mon, 4 Apr 2016 16:48:01 -0400 Subject: [PATCH 102/193] Check to see if it's a Docker container As we don't support any other container platform (yet), check to see if this is a Docker container. --- atomicapp/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/atomicapp/utils.py b/atomicapp/utils.py index ed5bbf8e..e8917d18 100644 --- a/atomicapp/utils.py +++ b/atomicapp/utils.py @@ -342,7 +342,7 @@ def inContainer(): Returns: (bool): True == we are in a container """ - if os.path.isdir(HOST_DIR): + if os.path.isfile('/.dockerenv') and os.path.isfile('/.dockerinit') and os.path.isdir(HOST_DIR): return True else: return False From 09ce03684123ba6e67f0882efb8edf28d62d2ed7 Mon Sep 17 00:00:00 2001 From: Charlie Drage Date: Thu, 7 Apr 2016 14:07:32 -0400 Subject: [PATCH 103/193] Update Dockerfile.pkgs testing repos --- Dockerfiles.pkgs/Dockerfile.centos | 6 +++--- Dockerfiles.pkgs/Dockerfile.fedora | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/Dockerfiles.pkgs/Dockerfile.centos b/Dockerfiles.pkgs/Dockerfile.centos index ea4a0d0c..3a0d937b 100644 --- a/Dockerfiles.pkgs/Dockerfile.centos +++ b/Dockerfiles.pkgs/Dockerfile.centos @@ -4,14 +4,14 @@ MAINTAINER Red Hat, Inc. # Check https://bodhi.fedoraproject.org/updates/?packages=atomicapp # for the most recent builds of atomicapp in epel -ENV ATOMICAPPVERSION="0.1.12" +ENV ATOMICAPPVERSION="0.4.5" ENV TESTING="--enablerepo=epel-testing" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ io.openshift.generate.job=true \ io.openshift.generate.token.as=env:TOKEN_ENV_VAR \ - RUN="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} \${OPT2} run \${OPT3}" \ - STOP="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} \${OPT2} stop \${OPT3}" + RUN="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e USER -e SUDO_USER -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} \${OPT2} run \${OPT3}" \ + STOP="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e USER -e SUDO_USER -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} \${OPT2} stop \${OPT3}" WORKDIR /atomicapp diff --git a/Dockerfiles.pkgs/Dockerfile.fedora b/Dockerfiles.pkgs/Dockerfile.fedora index 5eed0d62..c3e76743 100644 --- a/Dockerfiles.pkgs/Dockerfile.fedora +++ b/Dockerfiles.pkgs/Dockerfile.fedora @@ -4,14 +4,14 @@ MAINTAINER Red Hat, Inc. # Check https://bodhi.fedoraproject.org/updates/?packages=atomicapp # for the most recent builds of atomicapp in fedora -ENV ATOMICAPPVERSION="0.1.12" +ENV ATOMICAPPVERSION="0.4.5" ENV TESTING="--enablerepo=updates-testing" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ io.openshift.generate.job=true \ io.openshift.generate.token.as=env:TOKEN_ENV_VAR \ - RUN="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} \${OPT2} run \${OPT3}" \ - STOP="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} \${OPT2} stop \${OPT3}" + RUN="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e USER -e SUDO_USER -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} \${OPT2} run \${OPT3}" \ + STOP="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e USER -e SUDO_USER -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} \${OPT2} stop \${OPT3}" WORKDIR /atomicapp From 525954940050f7adf861caf4cbdb8713014cf888 Mon Sep 17 00:00:00 2001 From: Charlie Drage Date: Fri, 8 Apr 2016 11:15:52 -0400 Subject: [PATCH 104/193] Dashes added to CLI commands to distinguish provider config data This commit adds dashes to the command line (as well as answers file) for the provider configuration data. For example: --providercafile because --provider-cafile --- atomicapp/cli/main.py | 28 ++++++++++++++-------------- atomicapp/constants.py | 8 ++++---- 2 files changed, 18 insertions(+), 18 deletions(-) diff --git a/atomicapp/cli/main.py b/atomicapp/cli/main.py index 9d06e0fa..ac86509b 100644 --- a/atomicapp/cli/main.py +++ b/atomicapp/cli/main.py @@ -227,25 +227,25 @@ def create_parser(self): dest="namespace", help=('The namespace to use in the target provider')) globals_parser.add_argument( - "--providertlsverify", - dest="providertlsverify", + "--provider-tlsverify", + dest="provider-tlsverify", action=TrueOrFalseAction, choices=['True', 'False'], help=(''' - Value for providertlsverify answers option. + Value for provider-tlsverify answers option. --providertlsverify=False to disable tls verification''')) globals_parser.add_argument( - "--providerconfig", - dest="providerconfig", - help='Value for providerconfig answers option.') + "--provider-config", + dest="provider-config", + help='Value for provider-config answers option.') globals_parser.add_argument( - "--providercafile", - dest="providercafile", - help='Value for providercafile answers option.') + "--provider-cafile", + dest="provider-cafile", + help='Value for provider-cafile answers option.') globals_parser.add_argument( - "--providerapi", - dest="providerapi", - help='Value for providerapi answers option.') + "--provider-api", + dest="provider-api", + help='Value for provider-api answers option.') globals_parser.add_argument( "--logtype", dest="logtype", @@ -436,8 +436,8 @@ def run(self): # Take the arguments that correspond to "answers" config file data # and make a dictionary of it to pass along in args. setattr(args, 'cli_answers', {}) - for item in ['providerapi', 'providercafile', - 'providerconfig', 'providertlsverify', 'namespace']: + for item in ['provider-api', 'provider-cafile', + 'provider-config', 'provider-tlsverify', 'namespace']: if hasattr(args, item) and getattr(args, item) is not None: args.cli_answers[item] = getattr(args, item) diff --git a/atomicapp/constants.py b/atomicapp/constants.py index 62dd8097..ab8ec418 100644 --- a/atomicapp/constants.py +++ b/atomicapp/constants.py @@ -70,11 +70,11 @@ } PROVIDERS = ["docker", "kubernetes", "openshift", "marathon"] -PROVIDER_API_KEY = "providerapi" +PROVIDER_API_KEY = "provider-api" ACCESS_TOKEN_KEY = "accesstoken" -PROVIDER_CONFIG_KEY = "providerconfig" -PROVIDER_TLS_VERIFY_KEY = "providertlsverify" -PROVIDER_CA_KEY = "providercafile" +PROVIDER_CONFIG_KEY = "provider-config" +PROVIDER_TLS_VERIFY_KEY = "provider-tlsverify" +PROVIDER_CA_KEY = "provider-cafile" # Persistent Storage Formats PERSISTENT_STORAGE_FORMAT = ["ReadWriteOnce", "ReadOnlyMany", "ReadWriteMany"] From fadfb7fcf45246d85724c3b8fcd319babc413b05 Mon Sep 17 00:00:00 2001 From: Charlie Drage Date: Fri, 8 Apr 2016 11:20:39 -0400 Subject: [PATCH 105/193] Fix test params with the new dashes --- tests/units/nulecule/test_kubeconfig.py | 12 ++++++------ tests/units/providers/test_kubernetes_provider.py | 2 +- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/tests/units/nulecule/test_kubeconfig.py b/tests/units/nulecule/test_kubeconfig.py index 2161efe2..93960150 100644 --- a/tests/units/nulecule/test_kubeconfig.py +++ b/tests/units/nulecule/test_kubeconfig.py @@ -45,11 +45,11 @@ def test_parse_kubeconf_data_insecure(self): } self.assertEqual(KubeConfig.parse_kubeconf_data(kubecfg_data), - {'providerapi': 'server1', + {'provider-api': 'server1', 'accesstoken': 'token1', 'namespace': 'namespace1', - 'providertlsverify': False, - 'providercafile': None}) + 'provider-tlsverify': False, + 'provider-cafile': None}) def test_parse_kubeconf_data_cafile(self): """ @@ -91,11 +91,11 @@ def test_parse_kubeconf_data_cafile(self): } self.assertEqual(KubeConfig.parse_kubeconf_data(kubecfg_data), - {'providerapi': 'server1', + {'provider-api': 'server1', 'accesstoken': 'token1', 'namespace': 'namespace1', - 'providertlsverify': True, - 'providercafile': '/foo/bar'}) + 'provider-tlsverify': True, + 'provider-cafile': '/foo/bar'}) def test_parse_kubeconf_data_no_context(self): """ diff --git a/tests/units/providers/test_kubernetes_provider.py b/tests/units/providers/test_kubernetes_provider.py index 412d84dd..6afbc323 100644 --- a/tests/units/providers/test_kubernetes_provider.py +++ b/tests/units/providers/test_kubernetes_provider.py @@ -60,7 +60,7 @@ def test_provider_config_exist(self): with open(provider_config_path, "w") as fp: fp.write(mock_content) - data = {'namespace': 'testing', 'provider': 'kubernetes', 'providerconfig': provider_config_path} + data = {'namespace': 'testing', 'provider': 'kubernetes', 'provider-config': provider_config_path} provider = self.prepare_provider(data) From 87872b319e7ca46d5f21eeea3fc3f905f2ece237 Mon Sep 17 00:00:00 2001 From: Charlie Drage Date: Fri, 8 Apr 2016 11:34:04 -0400 Subject: [PATCH 106/193] Add provider-auth as a CLI command and convert ACCESSTOKEN to provider-auth --- atomicapp/cli/main.py | 6 +++++- atomicapp/constants.py | 2 +- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/atomicapp/cli/main.py b/atomicapp/cli/main.py index ac86509b..a20a0a8d 100644 --- a/atomicapp/cli/main.py +++ b/atomicapp/cli/main.py @@ -246,6 +246,10 @@ def create_parser(self): "--provider-api", dest="provider-api", help='Value for provider-api answers option.') + globals_parser.add_argument( + "--provider-auth", + dest="provider-auth", + help='Value for provider-auth answers option.') globals_parser.add_argument( "--logtype", dest="logtype", @@ -436,7 +440,7 @@ def run(self): # Take the arguments that correspond to "answers" config file data # and make a dictionary of it to pass along in args. setattr(args, 'cli_answers', {}) - for item in ['provider-api', 'provider-cafile', + for item in ['provider-api', 'provider-cafile', 'provider-auth', 'provider-config', 'provider-tlsverify', 'namespace']: if hasattr(args, item) and getattr(args, item) is not None: args.cli_answers[item] = getattr(args, item) diff --git a/atomicapp/constants.py b/atomicapp/constants.py index ab8ec418..ba3fa582 100644 --- a/atomicapp/constants.py +++ b/atomicapp/constants.py @@ -71,7 +71,7 @@ PROVIDERS = ["docker", "kubernetes", "openshift", "marathon"] PROVIDER_API_KEY = "provider-api" -ACCESS_TOKEN_KEY = "accesstoken" +ACCESS_TOKEN_KEY = "provider-auth" PROVIDER_CONFIG_KEY = "provider-config" PROVIDER_TLS_VERIFY_KEY = "provider-tlsverify" PROVIDER_CA_KEY = "provider-cafile" From 550c1be178f06c1329362de2f078692a62aed963 Mon Sep 17 00:00:00 2001 From: Charlie Drage Date: Fri, 8 Apr 2016 11:35:14 -0400 Subject: [PATCH 107/193] Modify accesstoken tests to provider-auth --- tests/units/nulecule/test_kubeconfig.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/units/nulecule/test_kubeconfig.py b/tests/units/nulecule/test_kubeconfig.py index 93960150..974b694e 100644 --- a/tests/units/nulecule/test_kubeconfig.py +++ b/tests/units/nulecule/test_kubeconfig.py @@ -46,7 +46,7 @@ def test_parse_kubeconf_data_insecure(self): self.assertEqual(KubeConfig.parse_kubeconf_data(kubecfg_data), {'provider-api': 'server1', - 'accesstoken': 'token1', + 'provider-auth': 'token1', 'namespace': 'namespace1', 'provider-tlsverify': False, 'provider-cafile': None}) @@ -92,7 +92,7 @@ def test_parse_kubeconf_data_cafile(self): self.assertEqual(KubeConfig.parse_kubeconf_data(kubecfg_data), {'provider-api': 'server1', - 'accesstoken': 'token1', + 'provider-auth': 'token1', 'namespace': 'namespace1', 'provider-tlsverify': True, 'provider-cafile': '/foo/bar'}) From 6f274a75a425a8924517257787efe9fa4c228727 Mon Sep 17 00:00:00 2001 From: Charlie Drage Date: Mon, 11 Apr 2016 10:26:17 -0400 Subject: [PATCH 108/193] Change constant ACCESS_TOKEN_KEY to PROVIDER_AUTH_KEY --- atomicapp/constants.py | 2 +- atomicapp/providers/lib/kubeconfig.py | 4 ++-- atomicapp/providers/openshift.py | 8 ++++---- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/atomicapp/constants.py b/atomicapp/constants.py index ba3fa582..6f36f9ee 100644 --- a/atomicapp/constants.py +++ b/atomicapp/constants.py @@ -71,7 +71,7 @@ PROVIDERS = ["docker", "kubernetes", "openshift", "marathon"] PROVIDER_API_KEY = "provider-api" -ACCESS_TOKEN_KEY = "provider-auth" +PROVIDER_AUTH_KEY = "provider-auth" PROVIDER_CONFIG_KEY = "provider-config" PROVIDER_TLS_VERIFY_KEY = "provider-tlsverify" PROVIDER_CA_KEY = "provider-cafile" diff --git a/atomicapp/providers/lib/kubeconfig.py b/atomicapp/providers/lib/kubeconfig.py index d264e0d1..9f71792d 100644 --- a/atomicapp/providers/lib/kubeconfig.py +++ b/atomicapp/providers/lib/kubeconfig.py @@ -1,7 +1,7 @@ import anymarkup from atomicapp.plugin import ProviderFailedException -from atomicapp.constants import (ACCESS_TOKEN_KEY, +from atomicapp.constants import (PROVIDER_AUTH_KEY, LOGGER_DEFAULT, NAMESPACE_KEY, PROVIDER_API_KEY, @@ -112,7 +112,7 @@ def parse_kubeconf_data(kubecfg): ca = cluster["cluster"]["certificate-authority"] return {PROVIDER_API_KEY: url, - ACCESS_TOKEN_KEY: token, + PROVIDER_AUTH_KEY: token, NAMESPACE_KEY: namespace, PROVIDER_TLS_VERIFY_KEY: tls_verify, PROVIDER_CA_KEY: ca} diff --git a/atomicapp/providers/openshift.py b/atomicapp/providers/openshift.py index 2c2166c0..3741fa06 100644 --- a/atomicapp/providers/openshift.py +++ b/atomicapp/providers/openshift.py @@ -29,7 +29,7 @@ from atomicapp.utils import Utils from atomicapp.plugin import Provider, ProviderFailedException -from atomicapp.constants import (ACCESS_TOKEN_KEY, +from atomicapp.constants import (PROVIDER_AUTH_KEY, ANSWERS_FILE, DEFAULT_NAMESPACE, LOGGER_DEFAULT, @@ -630,7 +630,7 @@ def _set_config_values(self): # initialize result to default values result = {PROVIDER_API_KEY: self.providerapi, - ACCESS_TOKEN_KEY: self.access_token, + PROVIDER_AUTH_KEY: self.access_token, NAMESPACE_KEY: self.namespace, PROVIDER_TLS_VERIFY_KEY: self.provider_tls_verify, PROVIDER_CA_KEY: self.provider_ca} @@ -670,7 +670,7 @@ def _set_config_values(self): logger.debug("config values: %s" % result) # this items are required, they have to be not None - for k in [PROVIDER_API_KEY, ACCESS_TOKEN_KEY, NAMESPACE_KEY]: + for k in [PROVIDER_API_KEY, PROVIDER_AUTH_KEY, NAMESPACE_KEY]: if result[k] is None: msg = "You need to set %s in %s" % (k, ANSWERS_FILE) logger.error(msg) @@ -678,7 +678,7 @@ def _set_config_values(self): # set config values self.providerapi = result[PROVIDER_API_KEY] - self.access_token = result[ACCESS_TOKEN_KEY] + self.access_token = result[PROVIDER_AUTH_KEY] self.namespace = result[NAMESPACE_KEY] self.provider_tls_verify = result[PROVIDER_TLS_VERIFY_KEY] if result[PROVIDER_CA_KEY]: From 1c438d2d6c0416a0cfc6e7b184e409240096e26f Mon Sep 17 00:00:00 2001 From: Suraj Deshmukh Date: Mon, 11 Apr 2016 12:18:57 -0400 Subject: [PATCH 109/193] Added OrderedDict so as to deploy application in given order Openshift deployement is done via API, so the artifacts are read locally and stored in a dict, which was causing the order of artifacts to change while deployment, using OrderedDict paves a way to keeping that sequence. Fixes issue #669 --- atomicapp/providers/openshift.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/atomicapp/providers/openshift.py b/atomicapp/providers/openshift.py index 3741fa06..268bae0d 100644 --- a/atomicapp/providers/openshift.py +++ b/atomicapp/providers/openshift.py @@ -25,6 +25,7 @@ import time from urlparse import urljoin from urllib import urlencode +from collections import OrderedDict import websocket from atomicapp.utils import Utils @@ -331,11 +332,9 @@ class OpenShiftProvider(Provider): # path to file or dir with CA certificates provider_ca = None - # Parsed artifacts. Key is kind of artifacts. Value is list of artifacts. - openshift_artifacts = {} - def init(self): - self.openshift_artifacts = {} + # Parsed artifacts. Key is kind of artifacts. Value is list of artifacts. + self.openshift_artifacts = OrderedDict() self._set_config_values() From 49294c8fb6ab9c447b2d4e0a02efe02c783dcc1c Mon Sep 17 00:00:00 2001 From: Ratnadeep Debnath Date: Sat, 5 Mar 2016 19:20:36 +0530 Subject: [PATCH 110/193] Added 'init' command to initialize a new atomic app. --- atomicapp/cli/main.py | 25 ++++++++++++- atomicapp/nulecule/main.py | 35 +++++++++++++++++++ atomicapp/templates/nulecule/Dockerfile.tpl | 10 ++++++ atomicapp/templates/nulecule/Nulecule.tpl | 20 +++++++++++ atomicapp/templates/nulecule/README.md.tpl | 3 ++ .../nulecule/answers.conf.sample.tpl | 7 ++++ .../nulecule/artifacts/docker/run.tpl | 1 + .../artifacts/kubernetes/pod.yaml.tpl | 11 ++++++ 8 files changed, 111 insertions(+), 1 deletion(-) create mode 100644 atomicapp/templates/nulecule/Dockerfile.tpl create mode 100644 atomicapp/templates/nulecule/Nulecule.tpl create mode 100644 atomicapp/templates/nulecule/README.md.tpl create mode 100644 atomicapp/templates/nulecule/answers.conf.sample.tpl create mode 100644 atomicapp/templates/nulecule/artifacts/docker/run.tpl create mode 100644 atomicapp/templates/nulecule/artifacts/kubernetes/pod.yaml.tpl diff --git a/atomicapp/cli/main.py b/atomicapp/cli/main.py index a20a0a8d..21daa8c4 100644 --- a/atomicapp/cli/main.py +++ b/atomicapp/cli/main.py @@ -101,6 +101,16 @@ def cli_stop(args): sys.exit(0) +def cli_init(args): + try: + argdict = args.__dict__ + NuleculeManager.init(argdict['app_name'], argdict['destination']) + sys.exit(0) + except Exception as e: + logger.error(e, exc_info=True) + sys.exit(1) + + # Create a custom action parser. Need this because for some args we don't # want to store a value if the user didn't provide one. "store_true" does # not allow this; it will always create an attribute and store a value. @@ -365,6 +375,19 @@ def create_parser(self): help='The name of a container image containing an Atomic App.') gena_subparser.set_defaults(func=cli_genanswers) + # === "init" SUBPARSER === + init_subparser = toplevel_subparsers.add_parser( + "init", parents=[globals_parser]) + init_subparser.add_argument( + "app_name", + help="App name.") + init_subparser.add_argument( + "destination", + help=(''' + Path to the directory where the Atomic App + is to be initialized.''')) + init_subparser.set_defaults(func=cli_init) + # Some final fixups.. We want the "help" from the global # parser to be output when someone runs 'atomicapp --help' # To get that functionality we will add the help from the @@ -428,7 +451,7 @@ def run(self): # a directory if they want to for "run". For that reason we won't # default the RUN label for Atomic App to provide an app_spec argument. # In this case pick up app_spec from $IMAGE env var (set by RUN label). - if args.app_spec is None: + if args.action != 'init' and args.app_spec is None: if os.environ.get('IMAGE') is not None: logger.debug("Setting app_spec based on $IMAGE env var") args.app_spec = os.environ['IMAGE'] diff --git a/atomicapp/nulecule/main.py b/atomicapp/nulecule/main.py index f9b2ee43..cce3aa8c 100644 --- a/atomicapp/nulecule/main.py +++ b/atomicapp/nulecule/main.py @@ -25,6 +25,7 @@ import tempfile import urlparse import urllib +from string import Template from atomicapp.constants import (GLOBAL_CONF, ANSWERS_FILE_SAMPLE_FORMAT, @@ -120,6 +121,40 @@ def __init__(self, app_spec, destination=None, self.answers_file = answers_file self._process_answers() + @staticmethod + def init(app_name, destination='.', app_version='1.0', + app_desc='App description'): + """Initialize a new Nulecule app""" + context = dict( + app_name=app_name, + app_version=app_version, + app_desc=app_desc) + tmpdir = tempfile.mkdtemp(prefix='nulecule-new-app-') + template_dir = os.path.join(os.path.dirname(__file__), + '../templates/nulecule') + distutils.dir_util.copy_tree(template_dir, tmpdir) + for item in os.walk(tmpdir): + parent_dir, dirs, files = item + for filename in files: + if not filename.endswith('.tpl'): + continue + templ_path = os.path.join(parent_dir, filename) + if parent_dir.endswith('artifacts/docker') or parent_dir.endswith('artifacts/kubernetes'): + file_path = os.path.join( + parent_dir, + '{}_{}'.format(app_name, filename[:-4])) + else: + file_path = os.path.join(parent_dir, filename[:-4]) + with open(templ_path) as f: + s = f.read() + t = Template(s) + with open(file_path, 'w') as f: + f.write(t.safe_substitute(**context)) + os.remove(templ_path) + + distutils.dir_util.copy_tree(tmpdir, destination, True) + distutils.dir_util.remove_tree(tmpdir) + def unpack(self, update=False, dryrun=False, nodeps=False, config=None): """ diff --git a/atomicapp/templates/nulecule/Dockerfile.tpl b/atomicapp/templates/nulecule/Dockerfile.tpl new file mode 100644 index 00000000..95e04536 --- /dev/null +++ b/atomicapp/templates/nulecule/Dockerfile.tpl @@ -0,0 +1,10 @@ +FROM projectatomic/atomicapp:0.4.1 + +MAINTAINER Your Name + +LABEL io.projectatomic.nulecule.specversion="0.0.2" \ + io.projectatomic.nulecule.providers="kubernetes, docker" \ + Build="docker build --rm --tag test/$app_name-atomicapp ." + +ADD /Nulecule /Dockerfile README.md /application-entity/ +ADD /artifacts /application-entity/artifacts diff --git a/atomicapp/templates/nulecule/Nulecule.tpl b/atomicapp/templates/nulecule/Nulecule.tpl new file mode 100644 index 00000000..47aa32fe --- /dev/null +++ b/atomicapp/templates/nulecule/Nulecule.tpl @@ -0,0 +1,20 @@ +--- +specversion: 0.0.2 +id: ${app_name} + +metadata: + name: ${app_name} + appversion: ${app_version} + description: ${app_desc} + +graph: + - name: ${app_name} + params: + - name: image + description: Container image to use + default: centos/httpd + artifacts: + kubernetes: + - file://artifacts/kubernetes/${app_name}_pod.yaml + docker: + - file://artifacts/docker/${app_name}_run diff --git a/atomicapp/templates/nulecule/README.md.tpl b/atomicapp/templates/nulecule/README.md.tpl new file mode 100644 index 00000000..e9b5cfff --- /dev/null +++ b/atomicapp/templates/nulecule/README.md.tpl @@ -0,0 +1,3 @@ +# $app_name Atomic App + +My awesome Atomic App. diff --git a/atomicapp/templates/nulecule/answers.conf.sample.tpl b/atomicapp/templates/nulecule/answers.conf.sample.tpl new file mode 100644 index 00000000..158081b2 --- /dev/null +++ b/atomicapp/templates/nulecule/answers.conf.sample.tpl @@ -0,0 +1,7 @@ +[general] +namespace = default +provider = kubernetes + +[${app_name}] +image = centos/httpd + diff --git a/atomicapp/templates/nulecule/artifacts/docker/run.tpl b/atomicapp/templates/nulecule/artifacts/docker/run.tpl new file mode 100644 index 00000000..6399f0e6 --- /dev/null +++ b/atomicapp/templates/nulecule/artifacts/docker/run.tpl @@ -0,0 +1 @@ +docker run -d --name $app_name -P $image diff --git a/atomicapp/templates/nulecule/artifacts/kubernetes/pod.yaml.tpl b/atomicapp/templates/nulecule/artifacts/kubernetes/pod.yaml.tpl new file mode 100644 index 00000000..2fdd065b --- /dev/null +++ b/atomicapp/templates/nulecule/artifacts/kubernetes/pod.yaml.tpl @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: Pod +metadata: + name: $app_name + labels: + name: $app_name + +spec: + containers: + - name: $app_name + image: $image From 8c442da3130fde78e34a63919fd1e3f2fe4fbe69 Mon Sep 17 00:00:00 2001 From: Ratnadeep Debnath Date: Wed, 30 Mar 2016 12:46:04 +0530 Subject: [PATCH 111/193] Make destination optional atomicapp init command - if destination is specified, intialize app in destination - if destiniation is not specified, app is initialized in a directory by the name of the app in the current directory. --- atomicapp/cli/main.py | 4 +++- atomicapp/nulecule/main.py | 5 ++++- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/atomicapp/cli/main.py b/atomicapp/cli/main.py index 21daa8c4..4dc8daaa 100644 --- a/atomicapp/cli/main.py +++ b/atomicapp/cli/main.py @@ -382,7 +382,9 @@ def create_parser(self): "app_name", help="App name.") init_subparser.add_argument( - "destination", + "--destination", + dest="destination", + default=None, help=(''' Path to the directory where the Atomic App is to be initialized.''')) diff --git a/atomicapp/nulecule/main.py b/atomicapp/nulecule/main.py index cce3aa8c..d505875a 100644 --- a/atomicapp/nulecule/main.py +++ b/atomicapp/nulecule/main.py @@ -122,7 +122,7 @@ def __init__(self, app_spec, destination=None, self._process_answers() @staticmethod - def init(app_name, destination='.', app_version='1.0', + def init(app_name, destination=None, app_version='1.0', app_desc='App description'): """Initialize a new Nulecule app""" context = dict( @@ -132,6 +132,9 @@ def init(app_name, destination='.', app_version='1.0', tmpdir = tempfile.mkdtemp(prefix='nulecule-new-app-') template_dir = os.path.join(os.path.dirname(__file__), '../templates/nulecule') + if destination is None: + destination = os.path.join('.', app_name) + distutils.dir_util.copy_tree(template_dir, tmpdir) for item in os.walk(tmpdir): parent_dir, dirs, files = item From 4986abb2fe12db258884ab4332bc41ed7496bb50 Mon Sep 17 00:00:00 2001 From: Ratnadeep Debnath Date: Wed, 30 Mar 2016 12:48:57 +0530 Subject: [PATCH 112/193] Ask user if destination is not empty when initializting atomic app. --- atomicapp/nulecule/main.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/atomicapp/nulecule/main.py b/atomicapp/nulecule/main.py index d505875a..9198a7d5 100644 --- a/atomicapp/nulecule/main.py +++ b/atomicapp/nulecule/main.py @@ -135,6 +135,11 @@ def init(app_name, destination=None, app_version='1.0', if destination is None: destination = os.path.join('.', app_name) + if os.path.exists(destination) and os.path.isdir(destination) and os.listdir(destination): + value = raw_input('Destination directory is not empty! Do you still want to proceed? (y/n): ') + if value != 'y': + return + distutils.dir_util.copy_tree(template_dir, tmpdir) for item in os.walk(tmpdir): parent_dir, dirs, files = item From 77a611d945291773a69bed9acb23ea13095de367 Mon Sep 17 00:00:00 2001 From: Ratnadeep Debnath Date: Wed, 30 Mar 2016 13:21:46 +0530 Subject: [PATCH 113/193] Do not acquire lock for initializing atomicapp. --- atomicapp/cli/main.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/atomicapp/cli/main.py b/atomicapp/cli/main.py index 4dc8daaa..5450199f 100644 --- a/atomicapp/cli/main.py +++ b/atomicapp/cli/main.py @@ -472,7 +472,8 @@ def run(self): lock = LockFile(os.path.join(Utils.getRoot(), LOCK_FILE)) try: - lock.acquire(timeout=-1) + if args.action != 'init': + lock.acquire(timeout=-1) cli_func_exec(args.func, args) except AttributeError: if hasattr(args, 'func'): From 70b8570b9252d3aa161b1994565fa1d5152013f6 Mon Sep 17 00:00:00 2001 From: Ratnadeep Debnath Date: Tue, 5 Apr 2016 20:07:22 +0530 Subject: [PATCH 114/193] Set default action as 'Y' when atomicapp init asks to clean dest directory. --- atomicapp/nulecule/main.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/atomicapp/nulecule/main.py b/atomicapp/nulecule/main.py index 9198a7d5..060d588d 100644 --- a/atomicapp/nulecule/main.py +++ b/atomicapp/nulecule/main.py @@ -136,8 +136,9 @@ def init(app_name, destination=None, app_version='1.0', destination = os.path.join('.', app_name) if os.path.exists(destination) and os.path.isdir(destination) and os.listdir(destination): - value = raw_input('Destination directory is not empty! Do you still want to proceed? (y/n): ') - if value != 'y': + value = raw_input('Destination directory is not empty! Do you still want to proceed? [Y]/n: ') + value = value or 'y' + if value.lower() != 'y': return distutils.dir_util.copy_tree(template_dir, tmpdir) From dab5f2aa0fe49d383d060ad545d74315a158002a Mon Sep 17 00:00:00 2001 From: Ratnadeep Debnath Date: Thu, 7 Apr 2016 11:58:16 +0530 Subject: [PATCH 115/193] Include nulecule template files in package data. --- setup.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/setup.py b/setup.py index d957dedc..ad8ab927 100644 --- a/setup.py +++ b/setup.py @@ -51,7 +51,10 @@ def _install_requirements(): 'console_scripts': ['atomicapp=atomicapp.cli.main:main'], }, packages=find_packages(), - package_data={'atomicapp': ['providers/external/kubernetes/*.yaml']}, + package_data={'atomicapp': ['providers/external/kubernetes/*.yaml', + 'templates/nulecule/*.tpl', + 'templates/nulecule/artifacts/docker/*.tpl', + 'templates/nulecule/artifacts/kubernetes/*.tpl']}, include_package_data=True, install_requires=_install_requirements() ) From 834a5910d350923fe9398c92b6c76390e68525f3 Mon Sep 17 00:00:00 2001 From: Ratnadeep Debnath Date: Thu, 7 Apr 2016 12:41:26 +0530 Subject: [PATCH 116/193] Add k8s service for initialized app. --- atomicapp/templates/nulecule/Nulecule.tpl | 1 + .../nulecule/artifacts/kubernetes/service.yaml.tpl | 12 ++++++++++++ 2 files changed, 13 insertions(+) create mode 100644 atomicapp/templates/nulecule/artifacts/kubernetes/service.yaml.tpl diff --git a/atomicapp/templates/nulecule/Nulecule.tpl b/atomicapp/templates/nulecule/Nulecule.tpl index 47aa32fe..ec7750f3 100644 --- a/atomicapp/templates/nulecule/Nulecule.tpl +++ b/atomicapp/templates/nulecule/Nulecule.tpl @@ -16,5 +16,6 @@ graph: artifacts: kubernetes: - file://artifacts/kubernetes/${app_name}_pod.yaml + - file://artifacts/kubernetes/${app_name}_service.yaml docker: - file://artifacts/docker/${app_name}_run diff --git a/atomicapp/templates/nulecule/artifacts/kubernetes/service.yaml.tpl b/atomicapp/templates/nulecule/artifacts/kubernetes/service.yaml.tpl new file mode 100644 index 00000000..da008db8 --- /dev/null +++ b/atomicapp/templates/nulecule/artifacts/kubernetes/service.yaml.tpl @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: Service +metadata: + name: $app_name + labels: + name: $app_name +spec: + ports: + - port: 80 + targetPort: 80 + selector: + name: $app_name From bb07142e93b51b60ffe3bce352af232d4c8f9cee Mon Sep 17 00:00:00 2001 From: Ratnadeep Debnath Date: Fri, 8 Apr 2016 13:14:23 +0530 Subject: [PATCH 117/193] Moved Nulecule template files to external dir. --- atomicapp/{ => external}/templates/nulecule/Dockerfile.tpl | 0 atomicapp/{ => external}/templates/nulecule/Nulecule.tpl | 0 atomicapp/{ => external}/templates/nulecule/README.md.tpl | 0 .../templates/nulecule/answers.conf.sample.tpl | 0 .../templates/nulecule/artifacts/docker/run.tpl | 0 .../templates/nulecule/artifacts/kubernetes/pod.yaml.tpl | 0 .../nulecule/artifacts/kubernetes/service.yaml.tpl | 0 atomicapp/nulecule/main.py | 2 +- setup.py | 6 +++--- 9 files changed, 4 insertions(+), 4 deletions(-) rename atomicapp/{ => external}/templates/nulecule/Dockerfile.tpl (100%) rename atomicapp/{ => external}/templates/nulecule/Nulecule.tpl (100%) rename atomicapp/{ => external}/templates/nulecule/README.md.tpl (100%) rename atomicapp/{ => external}/templates/nulecule/answers.conf.sample.tpl (100%) rename atomicapp/{ => external}/templates/nulecule/artifacts/docker/run.tpl (100%) rename atomicapp/{ => external}/templates/nulecule/artifacts/kubernetes/pod.yaml.tpl (100%) rename atomicapp/{ => external}/templates/nulecule/artifacts/kubernetes/service.yaml.tpl (100%) diff --git a/atomicapp/templates/nulecule/Dockerfile.tpl b/atomicapp/external/templates/nulecule/Dockerfile.tpl similarity index 100% rename from atomicapp/templates/nulecule/Dockerfile.tpl rename to atomicapp/external/templates/nulecule/Dockerfile.tpl diff --git a/atomicapp/templates/nulecule/Nulecule.tpl b/atomicapp/external/templates/nulecule/Nulecule.tpl similarity index 100% rename from atomicapp/templates/nulecule/Nulecule.tpl rename to atomicapp/external/templates/nulecule/Nulecule.tpl diff --git a/atomicapp/templates/nulecule/README.md.tpl b/atomicapp/external/templates/nulecule/README.md.tpl similarity index 100% rename from atomicapp/templates/nulecule/README.md.tpl rename to atomicapp/external/templates/nulecule/README.md.tpl diff --git a/atomicapp/templates/nulecule/answers.conf.sample.tpl b/atomicapp/external/templates/nulecule/answers.conf.sample.tpl similarity index 100% rename from atomicapp/templates/nulecule/answers.conf.sample.tpl rename to atomicapp/external/templates/nulecule/answers.conf.sample.tpl diff --git a/atomicapp/templates/nulecule/artifacts/docker/run.tpl b/atomicapp/external/templates/nulecule/artifacts/docker/run.tpl similarity index 100% rename from atomicapp/templates/nulecule/artifacts/docker/run.tpl rename to atomicapp/external/templates/nulecule/artifacts/docker/run.tpl diff --git a/atomicapp/templates/nulecule/artifacts/kubernetes/pod.yaml.tpl b/atomicapp/external/templates/nulecule/artifacts/kubernetes/pod.yaml.tpl similarity index 100% rename from atomicapp/templates/nulecule/artifacts/kubernetes/pod.yaml.tpl rename to atomicapp/external/templates/nulecule/artifacts/kubernetes/pod.yaml.tpl diff --git a/atomicapp/templates/nulecule/artifacts/kubernetes/service.yaml.tpl b/atomicapp/external/templates/nulecule/artifacts/kubernetes/service.yaml.tpl similarity index 100% rename from atomicapp/templates/nulecule/artifacts/kubernetes/service.yaml.tpl rename to atomicapp/external/templates/nulecule/artifacts/kubernetes/service.yaml.tpl diff --git a/atomicapp/nulecule/main.py b/atomicapp/nulecule/main.py index 060d588d..09e3085a 100644 --- a/atomicapp/nulecule/main.py +++ b/atomicapp/nulecule/main.py @@ -131,7 +131,7 @@ def init(app_name, destination=None, app_version='1.0', app_desc=app_desc) tmpdir = tempfile.mkdtemp(prefix='nulecule-new-app-') template_dir = os.path.join(os.path.dirname(__file__), - '../templates/nulecule') + '../external/templates/nulecule') if destination is None: destination = os.path.join('.', app_name) diff --git a/setup.py b/setup.py index ad8ab927..f7952db7 100644 --- a/setup.py +++ b/setup.py @@ -52,9 +52,9 @@ def _install_requirements(): }, packages=find_packages(), package_data={'atomicapp': ['providers/external/kubernetes/*.yaml', - 'templates/nulecule/*.tpl', - 'templates/nulecule/artifacts/docker/*.tpl', - 'templates/nulecule/artifacts/kubernetes/*.tpl']}, + 'external/templates/nulecule/*.tpl', + 'external/templates/nulecule/artifacts/docker/*.tpl', + 'external/templates/nulecule/artifacts/kubernetes/*.tpl']}, include_package_data=True, install_requires=_install_requirements() ) From c943d13a2939b237acb0db014da82d99014d36b1 Mon Sep 17 00:00:00 2001 From: Ratnadeep Debnath Date: Tue, 12 Apr 2016 19:41:06 +0530 Subject: [PATCH 118/193] Don't hard code atomicapp/nulecule versions in atomic app template. --- atomicapp/external/templates/nulecule/Dockerfile.tpl | 7 +++---- atomicapp/external/templates/nulecule/Nulecule.tpl | 2 +- atomicapp/nulecule/main.py | 9 +++++++-- 3 files changed, 11 insertions(+), 7 deletions(-) diff --git a/atomicapp/external/templates/nulecule/Dockerfile.tpl b/atomicapp/external/templates/nulecule/Dockerfile.tpl index 95e04536..6fff6d69 100644 --- a/atomicapp/external/templates/nulecule/Dockerfile.tpl +++ b/atomicapp/external/templates/nulecule/Dockerfile.tpl @@ -1,10 +1,9 @@ -FROM projectatomic/atomicapp:0.4.1 +FROM projectatomic/atomicapp:${atomicapp_version} MAINTAINER Your Name -LABEL io.projectatomic.nulecule.specversion="0.0.2" \ - io.projectatomic.nulecule.providers="kubernetes, docker" \ - Build="docker build --rm --tag test/$app_name-atomicapp ." +LABEL io.projectatomic.nulecule.providers="kubernetes,docker,marathon" \ + io.projectatomic.nulecule.specversion="${nulecule_spec_version}" ADD /Nulecule /Dockerfile README.md /application-entity/ ADD /artifacts /application-entity/artifacts diff --git a/atomicapp/external/templates/nulecule/Nulecule.tpl b/atomicapp/external/templates/nulecule/Nulecule.tpl index ec7750f3..ecfa73e7 100644 --- a/atomicapp/external/templates/nulecule/Nulecule.tpl +++ b/atomicapp/external/templates/nulecule/Nulecule.tpl @@ -1,5 +1,5 @@ --- -specversion: 0.0.2 +specversion: ${nulecule_spec_version} id: ${app_name} metadata: diff --git a/atomicapp/nulecule/main.py b/atomicapp/nulecule/main.py index 09e3085a..9bb238de 100644 --- a/atomicapp/nulecule/main.py +++ b/atomicapp/nulecule/main.py @@ -36,7 +36,9 @@ LOGGER_COCKPIT, LOGGER_DEFAULT, MAIN_FILE, - PROVIDER_KEY) + PROVIDER_KEY, + __ATOMICAPPVERSION__, + __NULECULESPECVERSION__) from atomicapp.nulecule.base import Nulecule from atomicapp.nulecule.exceptions import NuleculeException from atomicapp.utils import Utils @@ -128,7 +130,10 @@ def init(app_name, destination=None, app_version='1.0', context = dict( app_name=app_name, app_version=app_version, - app_desc=app_desc) + app_desc=app_desc, + atomicapp_version=__ATOMICAPPVERSION__, + nulecule_spec_version=__NULECULESPECVERSION__ + ) tmpdir = tempfile.mkdtemp(prefix='nulecule-new-app-') template_dir = os.path.join(os.path.dirname(__file__), '../external/templates/nulecule') From 8068055e75710eda5c5492b3d00c5cd526ff3ed9 Mon Sep 17 00:00:00 2001 From: Charlie Drage Date: Tue, 12 Apr 2016 11:02:45 -0400 Subject: [PATCH 119/193] Modify documentation to reflect changes in params This commit modified the documentation within atomic app to reflect the addition of dashes to separate provider config related information. --- docs/providers/docker/overview.md | 4 +- docs/providers/kubernetes/overview.md | 12 ++--- docs/providers/marathon/overview.md | 8 +-- .../openshift/overview_atomic_app.md | 54 +++++++++---------- docs/providers/openshift/overview_native.md | 2 +- 5 files changed, 40 insertions(+), 40 deletions(-) diff --git a/docs/providers/docker/overview.md b/docs/providers/docker/overview.md index a519a087..925bdd03 100644 --- a/docs/providers/docker/overview.md +++ b/docs/providers/docker/overview.md @@ -30,9 +30,9 @@ answers.conf file. An example is below: namespace: mynamespace ``` -#### providerconfig +#### provider-config This communicates directly with the docker daemon on the host. It does -not use the `providerconfig` option. +not use the `provider-config` option. #### Configuration Value Defaults diff --git a/docs/providers/kubernetes/overview.md b/docs/providers/kubernetes/overview.md index 6fe9597a..5de1c800 100644 --- a/docs/providers/kubernetes/overview.md +++ b/docs/providers/kubernetes/overview.md @@ -28,22 +28,22 @@ section of the answers.conf file. An example is below: namespace: mynamespace ``` -#### providerconfig +#### provider-config -For Kubernetes the configuration file as specified by `providerconfig` +For Kubernetes the configuration file as specified by `provider-config` is optional. Hosts that have kubernetes set up and running on them -may not need a `providerconfig` to be specified because kubernetes +may not need a `provider-config` to be specified because kubernetes services are listening on default ports/addresses. However, if kubernetes was set up to listen on different ports, or you wish to connect to a remote kubernetes environment, then you will need to specify a location for a provider config file. -One example of specifying a `providerconfig` is below: +One example of specifying a `provider-config` is below: ``` [general] provider: kubernetes -providerconfig: /host/home/foo/.kube/config +provider-config: /host/home/foo/.kube/config ``` #### Configuration Value Defaults @@ -53,7 +53,7 @@ Table 1. Kubernetes default configuration values Keyword | Required | Description | Default value ---------|----------|---------------------------------------------------------|-------------- namespace| no | namespace to use with each kubectl call | default -providerconfig| no | config file that specifies how to connect to kubernetes | none +provider-config| no | config file that specifies how to connect to kubernetes | none ### Operations diff --git a/docs/providers/marathon/overview.md b/docs/providers/marathon/overview.md index 5a360252..f38b69ae 100644 --- a/docs/providers/marathon/overview.md +++ b/docs/providers/marathon/overview.md @@ -6,15 +6,15 @@ The Marathon provider will deploy an application into Mesos cluster using Marathon scheduler. ### Configuration -This provider requires configuration (`providerapi`) to be able to connect to Marathon API. -If no `providerapi` is specified it will use `http://localhost:8080` as Marathon API url. +This provider requires configuration (`provider-api`) to be able to connect to Marathon API. +If no `provider-api` is specified it will use `http://localhost:8080` as Marathon API url. This configuration can be provided in the `answers.conf` file. Example: [general] provider=marathon - providerapi=http://10.0.2.15:8080 + provider-api=http://10.0.2.15:8080 #### Configuration values @@ -22,7 +22,7 @@ Table 1. Marathon default configuration values Keyword | Required | Description | Default value ------------|----------|---------------------------------------------|-------------------------- -providerapi | no | url for Marathon REST API | `http://localhost:8080` +provider-api | no | url for Marathon REST API | `http://localhost:8080` ### Operations diff --git a/docs/providers/openshift/overview_atomic_app.md b/docs/providers/openshift/overview_atomic_app.md index 06a8624d..6793cdb9 100644 --- a/docs/providers/openshift/overview_atomic_app.md +++ b/docs/providers/openshift/overview_atomic_app.md @@ -11,8 +11,8 @@ the application. One piece of the puzzle is telling Atomic App how to communicate with the OpenShift master. This can be done in one of two ways. -1. Passing in the `providerconfig` value in answers.conf -2. Passing in both the `providerapi` and `accesstoken` values in answers.conf. +1. Passing in the `provider-config` value in answers.conf +2. Passing in both the `provider-api` and `provider-auth` values in answers.conf. These config items are detailed below. @@ -34,27 +34,27 @@ namespace: mynamespace **NOTE**: If there is a namespace value set in the artifact metadata then that value will always be used and won't be overridden. -#### providerconfig +#### provider-config For OpenShift, one way to let Atomic App know how to communicate with the master is by re-using the provider config file that already exists on a user's machine. Basically whatever the user can do with the `oc` command, Atomic App can do by re-using the same provider config. -One example of specifying a `providerconfig` is below: +One example of specifying a `provider-config` is below: ``` [general] provider = openshift -providerconfig = /home/user/.kube/config +provider-config = /home/user/.kube/config ``` -#### providerapi + accesstoken +#### provider-api + provider-auth Another to pass credential information in is by passing in both the location where the openshift API is being served, as well as the access token that can be used to authenticate. These are done with the -`providerapi` and `accesstoken` config variables in the `[general]` +`provider-api` and `provider-auth` config variables in the `[general]` section within answers.conf. An example of this is below: @@ -62,8 +62,8 @@ An example of this is below: ``` [general] provider = openshift -providerapi = https://10.1.2.2:8443 -accesstoken = sadfasdfasfasfdasfasfasdfsafasfd +provider-api = https://10.1.2.2:8443 +provider-auth = sadfasdfasfasfdasfasfasdfsafasfd namespace = mynamespace ``` @@ -71,29 +71,29 @@ namespace = mynamespace `oc whoami -t` or if you are not using `oc` client you can get it via web browser on `https:///oauth/token/request` -#### providertlsverify -If `providerapi` is using https protocol you can optionally +#### provider-tlsverify +If `provider-api` is using https protocol you can optionally disable verification of tls/ssl certificates. This can be especially useful when using self-signed certificates. ``` [general] provider = openshift -providerapi = https://127.0.0.1:8443 -accesstoken = sadfasdfasfasfdasfasfasdfsafasfd +provider-api = https://127.0.0.1:8443 +provider-auth = sadfasdfasfasfdasfasfasdfsafasfd namespace = mynamespace -providertlsverify = False +provider-tlsverify = False ``` -**NOTE**: If `providerconfig` is used values of `providertlsverify` -and `providercafile` are set according to settings in `providerconfig` file. +**NOTE**: If `provider-config` is used values of `provider-tlsverify` +and `provider-cafile` are set according to settings in `provider-config` file. -#### providercafile -If `providerapi` is using https protocol you can optionally specify +#### provider-cafile +If `provider-api` is using https protocol you can optionally specify path to a CA_BAUNDLE file or directory with certificates of trusted CAs. -**NOTE**: If `providerconfig` is used values of `providertlsverify` -and `providercafile` are set according to settings in `providerconfig` file. +**NOTE**: If `provider-config` is used values of `provider-tlsverify` +and `provider-cafile` are set according to settings in `provider-config` file. #### Configuration Value Defaults @@ -103,15 +103,15 @@ Table 1. OpenShift default configuration values Keyword | Required | Description | Default value ---------|----------|---------------------------------------------------------|-------------- namespace| no | namespace to use with each kubectl call | default -providerconfig| no | config file that specifies how to connect to kubernetes | none -providerapi| no | the API endpoint where API requests can be sent | none -accesstoken| no | the access token that can be used to authenticate | none -providertlsverify|no| turn off verificatoin of tls/ssl certificates | False -providercafile| no | path to file or directory with trusted CAs | none +provider-config| no | config file that specifies how to connect to kubernetes | none +provider-api| no | the API endpoint where API requests can be sent | none +provider-auth| no | the access token that can be used to authenticate | none +provider-tlsverify|no| turn off verificatoin of tls/ssl certificates | False +provider-cafile| no | path to file or directory with trusted CAs | none -**NOTE**: One of `providerconfig` or `providerapi` + `accesstoken` are required +**NOTE**: One of `provider-config` or `provider-api` + `provider-auth` are required -**NOTE**: Namespace can be set in the file pointed to by `providerconfig` or +**NOTE**: Namespace can be set in the file pointed to by `provider-config` or in the `answers.conf`. If it is set in both places then the values must match, or an error will be reported. diff --git a/docs/providers/openshift/overview_native.md b/docs/providers/openshift/overview_native.md index a5fc56cf..f214a999 100644 --- a/docs/providers/openshift/overview_native.md +++ b/docs/providers/openshift/overview_native.md @@ -20,7 +20,7 @@ from the environment of the installation container that is used to bootstrap the start of the application. It is not necessary to provide the namespace in the config. -#### providerconfig / providerapi / access_token +#### provider-config / provider-api / provider-auth At the time of execution, the Atomic App container is already running inside of the openshift environment and has access to the credentials From be9859496e311b99e7f5715a1dcc3692338beb57 Mon Sep 17 00:00:00 2001 From: Charlie Drage Date: Tue, 12 Apr 2016 11:03:30 -0400 Subject: [PATCH 120/193] Remove /host from provider config example path --- docs/providers/kubernetes/overview.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/providers/kubernetes/overview.md b/docs/providers/kubernetes/overview.md index 5de1c800..f8cc8d07 100644 --- a/docs/providers/kubernetes/overview.md +++ b/docs/providers/kubernetes/overview.md @@ -43,7 +43,7 @@ One example of specifying a `provider-config` is below: ``` [general] provider: kubernetes -provider-config: /host/home/foo/.kube/config +provider-config: /home/foo/.kube/config ``` #### Configuration Value Defaults From 065da8a981fc3a04805a6f9066231250db8a33a6 Mon Sep 17 00:00:00 2001 From: Ratnadeep Debnath Date: Tue, 12 Apr 2016 20:04:43 +0530 Subject: [PATCH 121/193] Show status message on atomicapp init run. --- atomicapp/cli/main.py | 9 ++++++++- atomicapp/nulecule/main.py | 5 ++++- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/atomicapp/cli/main.py b/atomicapp/cli/main.py index 5450199f..c5f3c55f 100644 --- a/atomicapp/cli/main.py +++ b/atomicapp/cli/main.py @@ -104,7 +104,14 @@ def cli_stop(args): def cli_init(args): try: argdict = args.__dict__ - NuleculeManager.init(argdict['app_name'], argdict['destination']) + created, destination = NuleculeManager.init(argdict['app_name'], argdict['destination']) + if created: + print('\nAtomic App: %s initialized at %s' % (argdict['app_name'], destination)) + else: + print( + '\nAtomic App: %s was not initialized because destination directory: %s is not empty.' % ( + argdict['app_name'], destination) + ) sys.exit(0) except Exception as e: logger.error(e, exc_info=True) diff --git a/atomicapp/nulecule/main.py b/atomicapp/nulecule/main.py index 9bb238de..04fd5503 100644 --- a/atomicapp/nulecule/main.py +++ b/atomicapp/nulecule/main.py @@ -134,6 +134,7 @@ def init(app_name, destination=None, app_version='1.0', atomicapp_version=__ATOMICAPPVERSION__, nulecule_spec_version=__NULECULESPECVERSION__ ) + created = False tmpdir = tempfile.mkdtemp(prefix='nulecule-new-app-') template_dir = os.path.join(os.path.dirname(__file__), '../external/templates/nulecule') @@ -144,7 +145,7 @@ def init(app_name, destination=None, app_version='1.0', value = raw_input('Destination directory is not empty! Do you still want to proceed? [Y]/n: ') value = value or 'y' if value.lower() != 'y': - return + return created, destination distutils.dir_util.copy_tree(template_dir, tmpdir) for item in os.walk(tmpdir): @@ -168,6 +169,8 @@ def init(app_name, destination=None, app_version='1.0', distutils.dir_util.copy_tree(tmpdir, destination, True) distutils.dir_util.remove_tree(tmpdir) + created = True + return created, destination def unpack(self, update=False, dryrun=False, nodeps=False, config=None): From 1cb664270e6c2b71a7ef0d1e224a8c90cd96a5b7 Mon Sep 17 00:00:00 2001 From: Ratnadeep Debnath Date: Tue, 12 Apr 2016 23:14:52 +0530 Subject: [PATCH 122/193] Added doc strings for atomicapp init method. --- atomicapp/nulecule/main.py | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/atomicapp/nulecule/main.py b/atomicapp/nulecule/main.py index 04fd5503..6a5e84d8 100644 --- a/atomicapp/nulecule/main.py +++ b/atomicapp/nulecule/main.py @@ -126,7 +126,19 @@ def __init__(self, app_spec, destination=None, @staticmethod def init(app_name, destination=None, app_version='1.0', app_desc='App description'): - """Initialize a new Nulecule app""" + """Initialize a new Nulecule app + + Args: + app_name (str): Application name + destination (str): Destination path + app_version (str): Application version + app_desc (str): Application description + + Returns: + created (bool), destination (str) + """ + + # context to render template files for Atomic App context = dict( app_name=app_name, app_version=app_version, @@ -135,18 +147,22 @@ def init(app_name, destination=None, app_version='1.0', nulecule_spec_version=__NULECULESPECVERSION__ ) created = False + + # Temporary working dir to render the templates tmpdir = tempfile.mkdtemp(prefix='nulecule-new-app-') template_dir = os.path.join(os.path.dirname(__file__), '../external/templates/nulecule') if destination is None: destination = os.path.join('.', app_name) + # Check if destination directory exists and is not empty if os.path.exists(destination) and os.path.isdir(destination) and os.listdir(destination): value = raw_input('Destination directory is not empty! Do you still want to proceed? [Y]/n: ') value = value or 'y' if value.lower() != 'y': return created, destination + # Copy template dir to temporary working directory and render templates distutils.dir_util.copy_tree(template_dir, tmpdir) for item in os.walk(tmpdir): parent_dir, dirs, files = item @@ -167,7 +183,9 @@ def init(app_name, destination=None, app_version='1.0', f.write(t.safe_substitute(**context)) os.remove(templ_path) + # Copy rendered templates to destination directory distutils.dir_util.copy_tree(tmpdir, destination, True) + # Remove temporary working directory distutils.dir_util.remove_tree(tmpdir) created = True return created, destination From df380ddd0621cf10bc3fe2130b8af2b05111694a Mon Sep 17 00:00:00 2001 From: Dusty Mabe Date: Tue, 12 Apr 2016 15:17:26 -0400 Subject: [PATCH 123/193] init: modify docker template to publish to host port 80 This should make it easier for a user to demo without having to figure out what random port was assigned to be used. --- atomicapp/external/templates/nulecule/artifacts/docker/run.tpl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/atomicapp/external/templates/nulecule/artifacts/docker/run.tpl b/atomicapp/external/templates/nulecule/artifacts/docker/run.tpl index 6399f0e6..31f94adb 100644 --- a/atomicapp/external/templates/nulecule/artifacts/docker/run.tpl +++ b/atomicapp/external/templates/nulecule/artifacts/docker/run.tpl @@ -1 +1 @@ -docker run -d --name $app_name -P $image +docker run -d --name $app_name -p 80:80 $image From f1d3c11c4649f3ec3b895038c08665f3fc299b92 Mon Sep 17 00:00:00 2001 From: Charlie Drage Date: Tue, 12 Apr 2016 16:05:28 -0400 Subject: [PATCH 124/193] 0.5.0 Release --- CHANGELOG.md | 116 +++++++++++++++++++++++++++++ Dockerfile | 2 +- Dockerfiles.git/Dockerfile.centos | 2 +- Dockerfiles.git/Dockerfile.debian | 2 +- Dockerfiles.git/Dockerfile.fedora | 2 +- Dockerfiles.pkgs/Dockerfile.centos | 2 +- Dockerfiles.pkgs/Dockerfile.fedora | 2 +- atomicapp/constants.py | 2 +- setup.py | 2 +- 9 files changed, 124 insertions(+), 8 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2ec7b201..b9ff3c58 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,119 @@ +## Atomic App 0.5.0 (04-12-2016) + +This is a major release of Atomic App where we introduce a new CLI command as well as the renaming of multiple provider configuration parameters. + +The main features of this release are: + + - Introduction of the `atomicapp init` CLI command + - Renaming of provider configuration related parameters + - --provider-auth added as a CLI command + +Other: + + - Updated legal information + - Bug fix on persistent storage initialization + - Utility method to gather sudo user path and information + - Improved detection if we're inside a Docker container + - Improved readility on provider failed exceptions + - docker inspect bugfix + +## Atomic App Initialization + +We've included support for initializing a basic Atomic App via the `atomicapp init` command. This creates a basic example that can be used on __Docker__ and __Kubernetes__ providers based on the [centos/httpd](https://hub.docker.com/r/centos/httpd/) docker image. + +```bash +▶ atomicapp init helloworld +[INFO] - main.py - Action/Mode Selected is: init + +Atomic App: helloworld initialized at ./helloworld + +▶ vim ./helloworld/Nulecule # Make changes to the Nulecule file + +▶ atomicapp run ./helloworld +[INFO] - main.py - Action/Mode Selected is: run +[INFO] - base.py - Provider not specified, using default provider - kubernetes +[WARNING] - plugin.py - Configuration option 'provider-config' not found +[WARNING] - plugin.py - Configuration option 'provider-config' not found +[INFO] - kubernetes.py - Using namespace default +[INFO] - kubernetes.py - trying kubectl at /usr/bin/kubectl +[INFO] - kubernetes.py - trying kubectl at /usr/local/bin/kubectl +[INFO] - kubernetes.py - found kubectl at /usr/local/bin/kubectl +[INFO] - kubernetes.py - Deploying to Kubernetes + +Your application resides in ./helloworld +Please use this directory for managing your application + +``` + +## New provider configuration parameter names + +We've renamed the provider-specific parameters for better clarity by adding dashes in-between 'provider' and the specified function. + +Major changes include the renaming of __accesstoken__ to __provider-auth__. + +``` +providerapi --> provider-api +accesstoken --> provider-auth +providertlsverify --> provider-tlsverify +providercafile --> provider-cafile +``` + +```ini +[general] +provider = openshift +namespace = mynamespace +provider-api = https://127.0.0.1:8443 +provider-auth = sadfasdfasfasfdasfasfasdfsafasfd +provider-tlsverify = True +provider-cafile = /etc/myca/ca.pem +``` + +```sh +atomicapp run projectatomic/etherpad-centos7-atomicapp --provider openshift --provider-tlsverify False --provider-auth foo --provider-api "https://localhost:8443" +``` + +``` +Charlie Drage : + Add more legal information + Update year + Requirements should retrieve from Nulecule object not graph + Warn not error on missing requirement + Util to gather what user is running Atomic App and which home dir it should use + Check to see if it's a Docker container + Update Dockerfile.pkgs testing repos + Dashes added to CLI commands to distinguish provider config data + Fix test params with the new dashes + Add provider-auth as a CLI command and convert ACCESSTOKEN to provider-auth + Modify accesstoken tests to provider-auth + Change constant ACCESS_TOKEN_KEY to PROVIDER_AUTH_KEY + Modify documentation to reflect changes in params + Remove /host from provider config example path + +Dusty Mabe : + init: modify docker template to publish to host port 80 + +Ratnadeep Debnath : + Added 'init' command to initialize a new atomic app. + Make destination optional atomicapp init command + Ask user if destination is not empty when initializting atomic app. + Do not acquire lock for initializing atomicapp. + Set default action as 'Y' when atomicapp init asks to clean dest directory. + Include nulecule template files in package data. + Add k8s service for initialized app. + Moved Nulecule template files to external dir. + Don't hard code atomicapp/nulecule versions in atomic app template. + Show status message on atomicapp init run. + Added doc strings for atomicapp init method. + +Shubham Minglani : + Handle ProviderFailedException, fix #627 + add pass for improved readability + replace inspect with ps, fix #672 + +Suraj Deshmukh : + Added OrderedDict so as to deploy application in given order +``` + ## Atomic App 0.4.5 (03-29-2016) This is a minor release of Atomic App where we make some changes to the UI output as well as fix a few provider-specific bugs. diff --git a/Dockerfile b/Dockerfile index 6ca9e67c..aa871428 100644 --- a/Dockerfile +++ b/Dockerfile @@ -2,7 +2,7 @@ FROM centos:7 MAINTAINER Red Hat, Inc. -ENV ATOMICAPPVERSION="0.4.5" +ENV ATOMICAPPVERSION="0.5.0" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ io.openshift.generate.job=true \ diff --git a/Dockerfiles.git/Dockerfile.centos b/Dockerfiles.git/Dockerfile.centos index 6ca9e67c..aa871428 100644 --- a/Dockerfiles.git/Dockerfile.centos +++ b/Dockerfiles.git/Dockerfile.centos @@ -2,7 +2,7 @@ FROM centos:7 MAINTAINER Red Hat, Inc. -ENV ATOMICAPPVERSION="0.4.5" +ENV ATOMICAPPVERSION="0.5.0" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ io.openshift.generate.job=true \ diff --git a/Dockerfiles.git/Dockerfile.debian b/Dockerfiles.git/Dockerfile.debian index 37493811..8f902911 100644 --- a/Dockerfiles.git/Dockerfile.debian +++ b/Dockerfiles.git/Dockerfile.debian @@ -2,7 +2,7 @@ FROM debian:jessie MAINTAINER Red Hat, Inc. -ENV ATOMICAPPVERSION="0.4.5" +ENV ATOMICAPPVERSION="0.5.0" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ RUN="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e USER -e SUDO_USER -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} \${OPT2} run \${OPT3}" \ diff --git a/Dockerfiles.git/Dockerfile.fedora b/Dockerfiles.git/Dockerfile.fedora index e9288761..a7eba28a 100644 --- a/Dockerfiles.git/Dockerfile.fedora +++ b/Dockerfiles.git/Dockerfile.fedora @@ -2,7 +2,7 @@ FROM fedora:23 MAINTAINER Red Hat, Inc. -ENV ATOMICAPPVERSION="0.4.5" +ENV ATOMICAPPVERSION="0.5.0" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ io.openshift.generate.job=true \ diff --git a/Dockerfiles.pkgs/Dockerfile.centos b/Dockerfiles.pkgs/Dockerfile.centos index 3a0d937b..4edf3ef3 100644 --- a/Dockerfiles.pkgs/Dockerfile.centos +++ b/Dockerfiles.pkgs/Dockerfile.centos @@ -4,7 +4,7 @@ MAINTAINER Red Hat, Inc. # Check https://bodhi.fedoraproject.org/updates/?packages=atomicapp # for the most recent builds of atomicapp in epel -ENV ATOMICAPPVERSION="0.4.5" +ENV ATOMICAPPVERSION="0.5.0" ENV TESTING="--enablerepo=epel-testing" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ diff --git a/Dockerfiles.pkgs/Dockerfile.fedora b/Dockerfiles.pkgs/Dockerfile.fedora index c3e76743..3fe66452 100644 --- a/Dockerfiles.pkgs/Dockerfile.fedora +++ b/Dockerfiles.pkgs/Dockerfile.fedora @@ -4,7 +4,7 @@ MAINTAINER Red Hat, Inc. # Check https://bodhi.fedoraproject.org/updates/?packages=atomicapp # for the most recent builds of atomicapp in fedora -ENV ATOMICAPPVERSION="0.4.5" +ENV ATOMICAPPVERSION="0.5.0" ENV TESTING="--enablerepo=updates-testing" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ diff --git a/atomicapp/constants.py b/atomicapp/constants.py index 6f36f9ee..4227ce0c 100644 --- a/atomicapp/constants.py +++ b/atomicapp/constants.py @@ -23,7 +23,7 @@ 2) LABEL io.projectatomic.nulecule.specversion in app Dockefile """ -__ATOMICAPPVERSION__ = '0.4.5' +__ATOMICAPPVERSION__ = '0.5.0' __NULECULESPECVERSION__ = '0.0.2' EXTERNAL_APP_DIR = "external" diff --git a/setup.py b/setup.py index f7952db7..17464748 100644 --- a/setup.py +++ b/setup.py @@ -41,7 +41,7 @@ def _install_requirements(): setup( name='atomicapp', - version='0.4.5', + version='0.5.0', description='A tool to install and run Nulecule apps', author='Red Hat, Inc.', author_email='container-tools@redhat.com', From 50b70f8ed455fc99125060d202c4f61d3f8ce6da Mon Sep 17 00:00:00 2001 From: Charlie Drage Date: Mon, 18 Apr 2016 13:19:33 -0400 Subject: [PATCH 125/193] Add Marathon to index --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index ff25ecdb..c3a1571a 100644 --- a/README.md +++ b/README.md @@ -41,6 +41,7 @@ This README contains some high level overview information on Atomic App. The det 1. [Docker](docs/providers/docker/overview.md) 2. [Kubernetes](docs/providers/kubernetes/overview.md) 3. [OpenShift](docs/providers/openshift/overview.md) + 4. [Marathon](docs/providers/marathon/overview.md) 4. [CLI](docs/cli.md) 5. [Nulecule file](docs/nulecule.md) 6. [Atomic App lifecycle](docs/atomicapp_lifecycle.md) From b15c594f497ef8212a9e7389bc5fdcd5af3c6668 Mon Sep 17 00:00:00 2001 From: Shubham Minglani Date: Tue, 19 Apr 2016 12:52:23 +0530 Subject: [PATCH 126/193] fix typo --- atomicapp/cli/main.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/atomicapp/cli/main.py b/atomicapp/cli/main.py index c5f3c55f..5d474a49 100644 --- a/atomicapp/cli/main.py +++ b/atomicapp/cli/main.py @@ -301,7 +301,7 @@ def create_parser(self): "--ask", default=False, action="store_true", - help="Ask for params even if the defaul value is provided") + help="Ask for params even if the default value is provided") run_subparser.add_argument( "app_spec", nargs='?', From e3ac61476ab2fe12fc3421675c19ede334fe1631 Mon Sep 17 00:00:00 2001 From: Charlie Drage Date: Tue, 19 Apr 2016 13:02:56 -0400 Subject: [PATCH 127/193] Update README.md with correct installation instructions. Forgot to update the release version in README.md Also the commands for installing via tar.gz were wrong (cd into a .tar.gz) --- README.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index c3a1571a..f149ed6b 100644 --- a/README.md +++ b/README.md @@ -18,17 +18,17 @@ Atomic App is used to bootstrap packaged container environments and run them on From Linux: ```sh git clone https://github.com/projectatomic/atomicapp && cd atomicapp -make install +sudo make install ``` _or_ Download a pre-signed .tar.gz from [download.projectatomic.io](https://download.projectatomic.io) / [GitHub](https://github.com/projectatomic/atomicapp/releases): ```sh -export RELEASE=0.4.2 +export RELEASE=0.5.0 wget https://github.com/projectatomic/atomicapp/releases/download/$RELEASE/atomicapp-$RELEASE.tar.gz -tar -xvf atomicapp-$RELEASE.tar.gz && cd atomicapp-$RELEASE.tar.gz -make install +tar -xvf atomicapp-$RELEASE.tar.gz && cd atomicapp-$RELEASE +sudo make install ``` ## Documentation From 263edeabf1699818fff7dee323df72e7230311ae Mon Sep 17 00:00:00 2001 From: Suraj Deshmukh Date: Tue, 19 Apr 2016 13:36:46 -0400 Subject: [PATCH 128/193] Moved a constant from openshift.py to constants.py --- atomicapp/constants.py | 3 +++ atomicapp/providers/openshift.py | 6 ++---- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/atomicapp/constants.py b/atomicapp/constants.py index 4227ce0c..365a6e1c 100644 --- a/atomicapp/constants.py +++ b/atomicapp/constants.py @@ -78,3 +78,6 @@ # Persistent Storage Formats PERSISTENT_STORAGE_FORMAT = ["ReadWriteOnce", "ReadOnlyMany", "ReadWriteMany"] + +# If running in an openshift POD via `oc new-app`, the ca file is here +OPENSHIFT_POD_CA_FILE = "/run/secrets/kubernetes.io/serviceaccount/ca.crt" diff --git a/atomicapp/providers/openshift.py b/atomicapp/providers/openshift.py index 268bae0d..a31726e6 100644 --- a/atomicapp/providers/openshift.py +++ b/atomicapp/providers/openshift.py @@ -37,15 +37,13 @@ NAMESPACE_KEY, PROVIDER_API_KEY, PROVIDER_TLS_VERIFY_KEY, - PROVIDER_CA_KEY) + PROVIDER_CA_KEY, + OPENSHIFT_POD_CA_FILE) from atomicapp.providers.lib.kubeconfig import KubeConfig from requests.exceptions import SSLError import logging logger = logging.getLogger(LOGGER_DEFAULT) -# If running in an openshift POD via `oc new-app`, the ca file is here -OPENSHIFT_POD_CA_FILE = "/run/secrets/kubernetes.io/serviceaccount/ca.crt" - class OpenshiftClient(object): From cb5176a4b8da14363cb837b9525d7d6c8f1ad313 Mon Sep 17 00:00:00 2001 From: Dusty Mabe Date: Tue, 12 Apr 2016 15:21:27 -0400 Subject: [PATCH 129/193] init: remove unnecessary message to the user If the user already answered 'no' to the question about whether they want to proceed or not then we don't need to give them a message about it. --- atomicapp/cli/main.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/atomicapp/cli/main.py b/atomicapp/cli/main.py index 5d474a49..689391f2 100644 --- a/atomicapp/cli/main.py +++ b/atomicapp/cli/main.py @@ -107,11 +107,6 @@ def cli_init(args): created, destination = NuleculeManager.init(argdict['app_name'], argdict['destination']) if created: print('\nAtomic App: %s initialized at %s' % (argdict['app_name'], destination)) - else: - print( - '\nAtomic App: %s was not initialized because destination directory: %s is not empty.' % ( - argdict['app_name'], destination) - ) sys.exit(0) except Exception as e: logger.error(e, exc_info=True) From 2e2a5594c765ecc65cb2bd6611b9b24a164a8c6c Mon Sep 17 00:00:00 2001 From: Dusty Mabe Date: Tue, 12 Apr 2016 15:25:02 -0400 Subject: [PATCH 130/193] init: remove number of return variables from NuleculeManager.init() We did not need both created and destination. Reduced to a single variable for now. --- atomicapp/cli/main.py | 8 +++++--- atomicapp/nulecule/main.py | 8 +++----- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/atomicapp/cli/main.py b/atomicapp/cli/main.py index 689391f2..90460204 100644 --- a/atomicapp/cli/main.py +++ b/atomicapp/cli/main.py @@ -104,9 +104,11 @@ def cli_stop(args): def cli_init(args): try: argdict = args.__dict__ - created, destination = NuleculeManager.init(argdict['app_name'], argdict['destination']) - if created: - print('\nAtomic App: %s initialized at %s' % (argdict['app_name'], destination)) + appdir = NuleculeManager.init(argdict['app_name'], + argdict['destination']) + if appdir: + print('\nAtomic App: %s initialized at %s' % + (argdict['app_name'], appdir)) sys.exit(0) except Exception as e: logger.error(e, exc_info=True) diff --git a/atomicapp/nulecule/main.py b/atomicapp/nulecule/main.py index 6a5e84d8..df4bae29 100644 --- a/atomicapp/nulecule/main.py +++ b/atomicapp/nulecule/main.py @@ -135,7 +135,7 @@ def init(app_name, destination=None, app_version='1.0', app_desc (str): Application description Returns: - created (bool), destination (str) + destination (str) """ # context to render template files for Atomic App @@ -146,7 +146,6 @@ def init(app_name, destination=None, app_version='1.0', atomicapp_version=__ATOMICAPPVERSION__, nulecule_spec_version=__NULECULESPECVERSION__ ) - created = False # Temporary working dir to render the templates tmpdir = tempfile.mkdtemp(prefix='nulecule-new-app-') @@ -160,7 +159,7 @@ def init(app_name, destination=None, app_version='1.0', value = raw_input('Destination directory is not empty! Do you still want to proceed? [Y]/n: ') value = value or 'y' if value.lower() != 'y': - return created, destination + return # Exit out as the user has chosen not to proceed # Copy template dir to temporary working directory and render templates distutils.dir_util.copy_tree(template_dir, tmpdir) @@ -187,8 +186,7 @@ def init(app_name, destination=None, app_version='1.0', distutils.dir_util.copy_tree(tmpdir, destination, True) # Remove temporary working directory distutils.dir_util.remove_tree(tmpdir) - created = True - return created, destination + return destination def unpack(self, update=False, dryrun=False, nodeps=False, config=None): From 1b0fe3a4540101ab0b254042037bfb9be192ef8b Mon Sep 17 00:00:00 2001 From: Dusty Mabe Date: Tue, 12 Apr 2016 15:39:29 -0400 Subject: [PATCH 131/193] init: add logic to properly cleanup tmp dir --- atomicapp/nulecule/main.py | 61 ++++++++++++++++++++------------------ 1 file changed, 32 insertions(+), 29 deletions(-) diff --git a/atomicapp/nulecule/main.py b/atomicapp/nulecule/main.py index df4bae29..b28a5d86 100644 --- a/atomicapp/nulecule/main.py +++ b/atomicapp/nulecule/main.py @@ -147,10 +147,6 @@ def init(app_name, destination=None, app_version='1.0', nulecule_spec_version=__NULECULESPECVERSION__ ) - # Temporary working dir to render the templates - tmpdir = tempfile.mkdtemp(prefix='nulecule-new-app-') - template_dir = os.path.join(os.path.dirname(__file__), - '../external/templates/nulecule') if destination is None: destination = os.path.join('.', app_name) @@ -161,31 +157,38 @@ def init(app_name, destination=None, app_version='1.0', if value.lower() != 'y': return # Exit out as the user has chosen not to proceed - # Copy template dir to temporary working directory and render templates - distutils.dir_util.copy_tree(template_dir, tmpdir) - for item in os.walk(tmpdir): - parent_dir, dirs, files = item - for filename in files: - if not filename.endswith('.tpl'): - continue - templ_path = os.path.join(parent_dir, filename) - if parent_dir.endswith('artifacts/docker') or parent_dir.endswith('artifacts/kubernetes'): - file_path = os.path.join( - parent_dir, - '{}_{}'.format(app_name, filename[:-4])) - else: - file_path = os.path.join(parent_dir, filename[:-4]) - with open(templ_path) as f: - s = f.read() - t = Template(s) - with open(file_path, 'w') as f: - f.write(t.safe_substitute(**context)) - os.remove(templ_path) - - # Copy rendered templates to destination directory - distutils.dir_util.copy_tree(tmpdir, destination, True) - # Remove temporary working directory - distutils.dir_util.remove_tree(tmpdir) + # Temporary working dir to render the templates + tmpdir = tempfile.mkdtemp(prefix='nulecule-new-app-') + template_dir = os.path.join(os.path.dirname(__file__), + '../external/templates/nulecule') + + try: + # Copy template dir to temporary working directory and render templates + distutils.dir_util.copy_tree(template_dir, tmpdir) + for item in os.walk(tmpdir): + parent_dir, dirs, files = item + for filename in files: + if not filename.endswith('.tpl'): + continue + templ_path = os.path.join(parent_dir, filename) + if parent_dir.endswith('artifacts/docker') or parent_dir.endswith('artifacts/kubernetes'): + file_path = os.path.join( + parent_dir, + '{}_{}'.format(app_name, filename[:-4])) + else: + file_path = os.path.join(parent_dir, filename[:-4]) + with open(templ_path) as f: + s = f.read() + t = Template(s) + with open(file_path, 'w') as f: + f.write(t.safe_substitute(**context)) + os.remove(templ_path) + + # Copy rendered templates to destination directory + distutils.dir_util.copy_tree(tmpdir, destination, True) + finally: + # Remove temporary working directory + distutils.dir_util.remove_tree(tmpdir) return destination def unpack(self, update=False, From 545fa5ed3aa2476b1f87856f1a2afe524b4040ca Mon Sep 17 00:00:00 2001 From: Dusty Mabe Date: Tue, 12 Apr 2016 15:40:54 -0400 Subject: [PATCH 132/193] init: break a few long lines into shorter ones --- atomicapp/nulecule/main.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/atomicapp/nulecule/main.py b/atomicapp/nulecule/main.py index b28a5d86..cb861429 100644 --- a/atomicapp/nulecule/main.py +++ b/atomicapp/nulecule/main.py @@ -151,8 +151,10 @@ def init(app_name, destination=None, app_version='1.0', destination = os.path.join('.', app_name) # Check if destination directory exists and is not empty - if os.path.exists(destination) and os.path.isdir(destination) and os.listdir(destination): - value = raw_input('Destination directory is not empty! Do you still want to proceed? [Y]/n: ') + if os.path.exists(destination) and \ + os.path.isdir(destination) and os.listdir(destination): + value = raw_input('Destination directory is not empty! ' + 'Do you still want to proceed? [Y]/n: ') value = value or 'y' if value.lower() != 'y': return # Exit out as the user has chosen not to proceed @@ -171,7 +173,8 @@ def init(app_name, destination=None, app_version='1.0', if not filename.endswith('.tpl'): continue templ_path = os.path.join(parent_dir, filename) - if parent_dir.endswith('artifacts/docker') or parent_dir.endswith('artifacts/kubernetes'): + if parent_dir.endswith('artifacts/docker') or \ + parent_dir.endswith('artifacts/kubernetes'): file_path = os.path.join( parent_dir, '{}_{}'.format(app_name, filename[:-4])) From 001b3e40f297c637dde4aeee96aa387f413de7d5 Mon Sep 17 00:00:00 2001 From: Suraj Deshmukh Date: Wed, 20 Apr 2016 05:56:36 -0400 Subject: [PATCH 133/193] Abstracted the way we get absolute path Added a staticmethod in utils.Utils named get_real_abspath which abstracts the use of os.path.join(Utils.getRoot, path) --- atomicapp/cli/main.py | 2 +- atomicapp/nulecule/main.py | 9 +++------ atomicapp/plugin.py | 3 +-- atomicapp/providers/kubernetes.py | 2 +- atomicapp/providers/openshift.py | 3 +-- atomicapp/utils.py | 15 +++++++++++++++ 6 files changed, 22 insertions(+), 12 deletions(-) diff --git a/atomicapp/cli/main.py b/atomicapp/cli/main.py index 90460204..964d3b83 100644 --- a/atomicapp/cli/main.py +++ b/atomicapp/cli/main.py @@ -474,7 +474,7 @@ def run(self): if hasattr(args, item) and getattr(args, item) is not None: args.cli_answers[item] = getattr(args, item) - lock = LockFile(os.path.join(Utils.getRoot(), LOCK_FILE)) + lock = LockFile(Utils.get_real_abspath(LOCK_FILE)) try: if args.action != 'init': lock.acquire(timeout=-1) diff --git a/atomicapp/nulecule/main.py b/atomicapp/nulecule/main.py index cb861429..a9bc9509 100644 --- a/atomicapp/nulecule/main.py +++ b/atomicapp/nulecule/main.py @@ -74,14 +74,11 @@ def __init__(self, app_spec, destination=None, # Adjust app_spec, destination, and answer file paths if absolute. if os.path.isabs(app_spec): - app_spec = os.path.join(Utils.getRoot(), - app_spec.lstrip('/')) + app_spec = Utils.get_real_abspath(app_spec) if destination and os.path.isabs(destination): - destination = os.path.join(Utils.getRoot(), - destination.lstrip('/')) + destination = Utils.get_real_abspath(destination) if answers_file and os.path.isabs(answers_file): - answers_file = os.path.join(Utils.getRoot(), - answers_file.lstrip('/')) + answers_file = Utils.get_real_abspath(answers_file) # If the user doesn't want the files copied to a permanent # location then he provides 'none'. If that is the case we'll diff --git a/atomicapp/plugin.py b/atomicapp/plugin.py index ae44943a..482e86ce 100644 --- a/atomicapp/plugin.py +++ b/atomicapp/plugin.py @@ -75,8 +75,7 @@ def getConfigFile(self): if PROVIDER_CONFIG_KEY in self.config: self.config_file = self.config[PROVIDER_CONFIG_KEY] if os.path.isabs(self.config_file): - self.config_file = os.path.join(Utils.getRoot(), - self.config_file.lstrip('/')) + self.config_file = Utils.get_real_abspath(self.config_file) else: logger.warning("Configuration option '%s' not found" % PROVIDER_CONFIG_KEY) diff --git a/atomicapp/providers/kubernetes.py b/atomicapp/providers/kubernetes.py index 50e32cdd..5981ccfe 100644 --- a/atomicapp/providers/kubernetes.py +++ b/atomicapp/providers/kubernetes.py @@ -55,7 +55,7 @@ def init(self): if self.container: self.kubectl = self._find_kubectl(Utils.getRoot()) kube_conf_path = "/etc/kubernetes" - host_kube_conf_path = os.path.join(Utils.getRoot(), kube_conf_path.lstrip("/")) + host_kube_conf_path = Utils.get_real_abspath(kube_conf_path) if not os.path.exists(kube_conf_path) and os.path.exists(host_kube_conf_path): if self.dryrun: logger.info("DRY-RUN: link %s from %s" % (kube_conf_path, host_kube_conf_path)) diff --git a/atomicapp/providers/openshift.py b/atomicapp/providers/openshift.py index a31726e6..cb20f26a 100644 --- a/atomicapp/providers/openshift.py +++ b/atomicapp/providers/openshift.py @@ -680,8 +680,7 @@ def _set_config_values(self): self.provider_tls_verify = result[PROVIDER_TLS_VERIFY_KEY] if result[PROVIDER_CA_KEY]: # if we are in container translate path to path on host - self.provider_ca = os.path.join(Utils.getRoot(), - result[PROVIDER_CA_KEY].lstrip('/')) + self.provider_ca = Utils.get_real_abspath(result[PROVIDER_CA_KEY]) else: self.provider_ca = None diff --git a/atomicapp/utils.py b/atomicapp/utils.py index e8917d18..5e0456cc 100644 --- a/atomicapp/utils.py +++ b/atomicapp/utils.py @@ -354,6 +354,21 @@ def getRoot(): else: return "/" + @staticmethod + def get_real_abspath(path): + """ + Take the user provided 'path' and return the real path to the resource + irrespective of the app running location either inside container or + outside. + + Args: + path (str): path to a resource + + Returns: + str: absolute path to resource in the filesystem. + """ + return os.path.join(Utils.getRoot(), path.lstrip('/')) + # generates a unique 12 character UUID @staticmethod def getUniqueUUID(): From 193e6bcb99d25d7b3a5f6eca52338bbeba746655 Mon Sep 17 00:00:00 2001 From: Charlie Drage Date: Wed, 20 Apr 2016 16:41:18 -0400 Subject: [PATCH 134/193] Remove dockerenv and dockerinit check Found this out when using Docker 1.11 .dockerenv and .dockerinit files have been removed in 1.10 of Docker due to not using the LXC built-in exec driver anymore and thus the files no longer appear within Atomic App and fail to pick up whether or not we are running in a container or not. See: https://docs.docker.com/engine/deprecated/ ``` LXC built-in exec driver Deprecated In Release: v1.8 Target For Removal In Release: v1.10 The built-in LXC execution driver is deprecated for an external implementation. The lxc-conf flag and API fields will also be removed. ``` --- atomicapp/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/atomicapp/utils.py b/atomicapp/utils.py index e8917d18..ed5bbf8e 100644 --- a/atomicapp/utils.py +++ b/atomicapp/utils.py @@ -342,7 +342,7 @@ def inContainer(): Returns: (bool): True == we are in a container """ - if os.path.isfile('/.dockerenv') and os.path.isfile('/.dockerinit') and os.path.isdir(HOST_DIR): + if os.path.isdir(HOST_DIR): return True else: return False From 433476e65033af536e0c8c6ed06986d688c3313a Mon Sep 17 00:00:00 2001 From: Charlie Drage Date: Wed, 20 Apr 2016 09:24:29 -0400 Subject: [PATCH 135/193] Remove locking from Atomic App Removed due to numerous issues with locking (dev + test environments) as well as the deprecation of LockFile. --- Dockerfile | 5 ----- Dockerfile.test | 2 -- Dockerfiles.git/Dockerfile.centos | 5 ----- Dockerfiles.git/Dockerfile.debian | 5 ----- Dockerfiles.git/Dockerfile.fedora | 5 ----- Dockerfiles.pkgs/Dockerfile.centos | 5 ----- Dockerfiles.pkgs/Dockerfile.fedora | 5 ----- atomicapp/cli/main.py | 11 ----------- atomicapp/constants.py | 1 - requirements.txt | 1 - 10 files changed, 45 deletions(-) diff --git a/Dockerfile b/Dockerfile index aa871428..413d353e 100644 --- a/Dockerfile +++ b/Dockerfile @@ -28,11 +28,6 @@ WORKDIR /atomicapp # the directory by non-root. RUN chmod 777 /atomicapp -# If a volume doesn't get mounted over /run (like when running in an -# openshift pod) then open up permissions so the lock file can be -# created by non-root. -RUN chmod 777 /run/lock - ENV PYTHONPATH /opt/atomicapp/ # the entrypoint diff --git a/Dockerfile.test b/Dockerfile.test index be7b97e1..81b76613 100644 --- a/Dockerfile.test +++ b/Dockerfile.test @@ -12,8 +12,6 @@ RUN yum install -y epel-release && \ yum install -y --setopt=tsflags=nodocs $(sed s/^/python-/ test-requirements.txt) && \ yum clean all -RUN mkdir /run/lock - ENV PYTHONPATH $PYTHONPATH:/opt/atomicapp/atomicapp CMD python -m pytest -vv tests --cov atomicapp diff --git a/Dockerfiles.git/Dockerfile.centos b/Dockerfiles.git/Dockerfile.centos index aa871428..413d353e 100644 --- a/Dockerfiles.git/Dockerfile.centos +++ b/Dockerfiles.git/Dockerfile.centos @@ -28,11 +28,6 @@ WORKDIR /atomicapp # the directory by non-root. RUN chmod 777 /atomicapp -# If a volume doesn't get mounted over /run (like when running in an -# openshift pod) then open up permissions so the lock file can be -# created by non-root. -RUN chmod 777 /run/lock - ENV PYTHONPATH /opt/atomicapp/ # the entrypoint diff --git a/Dockerfiles.git/Dockerfile.debian b/Dockerfiles.git/Dockerfile.debian index 8f902911..fc3eb563 100644 --- a/Dockerfiles.git/Dockerfile.debian +++ b/Dockerfiles.git/Dockerfile.debian @@ -32,11 +32,6 @@ WORKDIR /atomicapp # the directory by non-root. RUN chmod 777 /atomicapp -# If a volume doesn't get mounted over /run (like when running in an -# openshift pod) then open up permissions so the lock file can be -# created by non-root. -RUN chmod 777 /run/lock - ENV PYTHONPATH /opt/atomicapp/ # the entrypoint diff --git a/Dockerfiles.git/Dockerfile.fedora b/Dockerfiles.git/Dockerfile.fedora index a7eba28a..77e5610e 100644 --- a/Dockerfiles.git/Dockerfile.fedora +++ b/Dockerfiles.git/Dockerfile.fedora @@ -27,11 +27,6 @@ WORKDIR /atomicapp # the directory by non-root. RUN chmod 777 /atomicapp -# If a volume doesn't get mounted over /run (like when running in an -# openshift pod) then open up permissions so the lock file can be -# created by non-root. -RUN chmod 777 /run/lock - ENV PYTHONPATH /opt/atomicapp/ # the entrypoint diff --git a/Dockerfiles.pkgs/Dockerfile.centos b/Dockerfiles.pkgs/Dockerfile.centos index 4edf3ef3..39334510 100644 --- a/Dockerfiles.pkgs/Dockerfile.centos +++ b/Dockerfiles.pkgs/Dockerfile.centos @@ -20,11 +20,6 @@ WORKDIR /atomicapp # the directory by non-root. RUN chmod 777 /atomicapp -# If a volume doesn't get mounted over /run (like when running in an -# openshift pod) then open up permissions so the lock file can be -# created by non-root. -RUN chmod 777 /run/lock - RUN yum install -y epel-release && \ yum install -y atomicapp-${ATOMICAPPVERSION} ${TESTING} --setopt=tsflags=nodocs && \ yum clean all diff --git a/Dockerfiles.pkgs/Dockerfile.fedora b/Dockerfiles.pkgs/Dockerfile.fedora index 3fe66452..234b824a 100644 --- a/Dockerfiles.pkgs/Dockerfile.fedora +++ b/Dockerfiles.pkgs/Dockerfile.fedora @@ -20,11 +20,6 @@ WORKDIR /atomicapp # the directory by non-root. RUN chmod 777 /atomicapp -# If a volume doesn't get mounted over /run (like when running in an -# openshift pod) then open up permissions so the lock file can be -# created by non-root. -RUN chmod 777 /run/lock - RUN dnf install -y atomicapp-${ATOMICAPPVERSION} ${TESTING} --setopt=tsflags=nodocs && \ dnf clean all diff --git a/atomicapp/cli/main.py b/atomicapp/cli/main.py index 964d3b83..22ae7a00 100644 --- a/atomicapp/cli/main.py +++ b/atomicapp/cli/main.py @@ -22,8 +22,6 @@ import argparse import logging -from lockfile import LockFile -from lockfile import AlreadyLocked from atomicapp.applogging import Logging from atomicapp.constants import (__ATOMICAPPVERSION__, @@ -33,7 +31,6 @@ APP_ENT_PATH, CACHE_DIR, HOST_DIR, - LOCK_FILE, LOGGER_DEFAULT, PROVIDERS) from atomicapp.nulecule import NuleculeManager @@ -474,10 +471,7 @@ def run(self): if hasattr(args, item) and getattr(args, item) is not None: args.cli_answers[item] = getattr(args, item) - lock = LockFile(Utils.get_real_abspath(LOCK_FILE)) try: - if args.action != 'init': - lock.acquire(timeout=-1) cli_func_exec(args.func, args) except AttributeError: if hasattr(args, 'func'): @@ -486,8 +480,6 @@ def run(self): self.parser.print_help() except KeyboardInterrupt: pass - except AlreadyLocked: - logger.error("Could not proceed - there is probably another instance of Atomic App running on this machine.") except Exception as ex: if args.verbose: raise @@ -495,9 +487,6 @@ def run(self): logger.error("Exception caught: %s", repr(ex)) logger.error( "Run the command again with -v option to get more information.") - finally: - if lock.i_am_locking(): - lock.release() def main(): diff --git a/atomicapp/constants.py b/atomicapp/constants.py index 365a6e1c..2beb3437 100644 --- a/atomicapp/constants.py +++ b/atomicapp/constants.py @@ -53,7 +53,6 @@ ANSWERS_FILE_SAMPLE = "answers.conf.sample" ANSWERS_FILE_SAMPLE_FORMAT = 'ini' WORKDIR = ".workdir" -LOCK_FILE = "/run/lock/atomicapp.lock" LOGGER_DEFAULT = "atomicapp" LOGGER_COCKPIT = "cockpit" diff --git a/requirements.txt b/requirements.txt index e5cbfc5f..b39d04b6 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,4 @@ anymarkup -lockfile jsonpointer requests websocket-client From 4f6de13d1d47a3acb7df76545d4dedf8897e6a73 Mon Sep 17 00:00:00 2001 From: Suraj Deshmukh Date: Thu, 21 Apr 2016 11:19:03 -0400 Subject: [PATCH 136/193] Use filter to search in kubeconfig.py Using filter function to search in iterable on a single key. Optimized code in kubeconfig.py --- atomicapp/providers/lib/kubeconfig.py | 36 ++++++++++----------------- 1 file changed, 13 insertions(+), 23 deletions(-) diff --git a/atomicapp/providers/lib/kubeconfig.py b/atomicapp/providers/lib/kubeconfig.py index 9f71792d..a61c7f08 100644 --- a/atomicapp/providers/lib/kubeconfig.py +++ b/atomicapp/providers/lib/kubeconfig.py @@ -77,31 +77,21 @@ def parse_kubeconf_data(kubecfg): logger.debug("current context: %s", current_context) - context = None - for co in kubecfg["contexts"]: - if co["name"] == current_context: - context = co - - if not context: - raise ProviderFailedException() - - cluster = None - for cl in kubecfg["clusters"]: - if cl["name"] == context["context"]["cluster"]: - cluster = cl - - user = None - for usr in kubecfg["users"]: - if usr["name"] == context["context"]["user"]: - user = usr - - if not cluster or not user: + try: + context = filter(lambda co: co["name"] == current_context, + kubecfg["contexts"])[0] + logger.debug("context: %s", context) + + cluster = filter(lambda cl: cl["name"] == context["context"]["cluster"], + kubecfg["clusters"])[0] + logger.debug("cluster: %s", cluster) + + user = filter(lambda usr: usr["name"] == context["context"]["user"], + kubecfg["users"])[0] + logger.debug("user: %s", user) + except IndexError: raise ProviderFailedException() - logger.debug("context: %s", context) - logger.debug("cluster: %s", cluster) - logger.debug("user: %s", user) - url = cluster["cluster"]["server"] token = user["user"]["token"] if "namespace" in context["context"]: From 636a92d561033700cf3882e9f7c1e3928f911991 Mon Sep 17 00:00:00 2001 From: Charlie Drage Date: Tue, 26 Apr 2016 14:17:52 -0400 Subject: [PATCH 137/193] 0.5.1 Release --- CHANGELOG.md | 32 ++++++++++++++++++++++++++++++ Dockerfile | 2 +- Dockerfiles.git/Dockerfile.centos | 2 +- Dockerfiles.git/Dockerfile.debian | 2 +- Dockerfiles.git/Dockerfile.fedora | 2 +- Dockerfiles.pkgs/Dockerfile.centos | 2 +- Dockerfiles.pkgs/Dockerfile.fedora | 2 +- README.md | 2 +- atomicapp/constants.py | 2 +- setup.py | 2 +- 10 files changed, 41 insertions(+), 9 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b9ff3c58..4299aef0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,35 @@ +## Atomic App 0.5.1 (04-26-2016) + +This is a minor release of Atomic App where we refactor, fix code bugs as well as deprecate an old feature. Due to the numerous issues of locking as well as the deprectation of the `lockfile` library we have remove the locking mechanism from Atomic App. + +The main features of this release are: + + - The deprecation of locking + - `atomicapp init` code and UI clean-up + - Documentation updates + - inContainer() function bug fix for Docker 1.10+ + +``` +Charlie Drage : + Add Marathon to index + Update README.md with correct installation instructions. + Remove dockerenv and dockerinit check + Remove locking from Atomic App + +Dusty Mabe : + init: remove unnecessary message to the user + init: remove number of return variables from NuleculeManager.init() + init: add logic to properly cleanup tmp dir + init: break a few long lines into shorter ones + +Shubham Minglani : + fix typo + +Suraj Deshmukh : + Moved a constant from openshift.py to constants.py + Abstracted the way we get absolute path +``` + ## Atomic App 0.5.0 (04-12-2016) This is a major release of Atomic App where we introduce a new CLI command as well as the renaming of multiple provider configuration parameters. diff --git a/Dockerfile b/Dockerfile index 413d353e..f58a6045 100644 --- a/Dockerfile +++ b/Dockerfile @@ -2,7 +2,7 @@ FROM centos:7 MAINTAINER Red Hat, Inc. -ENV ATOMICAPPVERSION="0.5.0" +ENV ATOMICAPPVERSION="0.5.1" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ io.openshift.generate.job=true \ diff --git a/Dockerfiles.git/Dockerfile.centos b/Dockerfiles.git/Dockerfile.centos index 413d353e..f58a6045 100644 --- a/Dockerfiles.git/Dockerfile.centos +++ b/Dockerfiles.git/Dockerfile.centos @@ -2,7 +2,7 @@ FROM centos:7 MAINTAINER Red Hat, Inc. -ENV ATOMICAPPVERSION="0.5.0" +ENV ATOMICAPPVERSION="0.5.1" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ io.openshift.generate.job=true \ diff --git a/Dockerfiles.git/Dockerfile.debian b/Dockerfiles.git/Dockerfile.debian index fc3eb563..662da76d 100644 --- a/Dockerfiles.git/Dockerfile.debian +++ b/Dockerfiles.git/Dockerfile.debian @@ -2,7 +2,7 @@ FROM debian:jessie MAINTAINER Red Hat, Inc. -ENV ATOMICAPPVERSION="0.5.0" +ENV ATOMICAPPVERSION="0.5.1" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ RUN="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e USER -e SUDO_USER -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} \${OPT2} run \${OPT3}" \ diff --git a/Dockerfiles.git/Dockerfile.fedora b/Dockerfiles.git/Dockerfile.fedora index 77e5610e..f89e7aa0 100644 --- a/Dockerfiles.git/Dockerfile.fedora +++ b/Dockerfiles.git/Dockerfile.fedora @@ -2,7 +2,7 @@ FROM fedora:23 MAINTAINER Red Hat, Inc. -ENV ATOMICAPPVERSION="0.5.0" +ENV ATOMICAPPVERSION="0.5.1" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ io.openshift.generate.job=true \ diff --git a/Dockerfiles.pkgs/Dockerfile.centos b/Dockerfiles.pkgs/Dockerfile.centos index 39334510..aa7e5a87 100644 --- a/Dockerfiles.pkgs/Dockerfile.centos +++ b/Dockerfiles.pkgs/Dockerfile.centos @@ -4,7 +4,7 @@ MAINTAINER Red Hat, Inc. # Check https://bodhi.fedoraproject.org/updates/?packages=atomicapp # for the most recent builds of atomicapp in epel -ENV ATOMICAPPVERSION="0.5.0" +ENV ATOMICAPPVERSION="0.5.1" ENV TESTING="--enablerepo=epel-testing" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ diff --git a/Dockerfiles.pkgs/Dockerfile.fedora b/Dockerfiles.pkgs/Dockerfile.fedora index 234b824a..f3949150 100644 --- a/Dockerfiles.pkgs/Dockerfile.fedora +++ b/Dockerfiles.pkgs/Dockerfile.fedora @@ -4,7 +4,7 @@ MAINTAINER Red Hat, Inc. # Check https://bodhi.fedoraproject.org/updates/?packages=atomicapp # for the most recent builds of atomicapp in fedora -ENV ATOMICAPPVERSION="0.5.0" +ENV ATOMICAPPVERSION="0.5.1" ENV TESTING="--enablerepo=updates-testing" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ diff --git a/README.md b/README.md index f149ed6b..60f5a282 100644 --- a/README.md +++ b/README.md @@ -25,7 +25,7 @@ _or_ Download a pre-signed .tar.gz from [download.projectatomic.io](https://download.projectatomic.io) / [GitHub](https://github.com/projectatomic/atomicapp/releases): ```sh -export RELEASE=0.5.0 +export RELEASE=0.5.1 wget https://github.com/projectatomic/atomicapp/releases/download/$RELEASE/atomicapp-$RELEASE.tar.gz tar -xvf atomicapp-$RELEASE.tar.gz && cd atomicapp-$RELEASE sudo make install diff --git a/atomicapp/constants.py b/atomicapp/constants.py index 2beb3437..af0bcc12 100644 --- a/atomicapp/constants.py +++ b/atomicapp/constants.py @@ -23,7 +23,7 @@ 2) LABEL io.projectatomic.nulecule.specversion in app Dockefile """ -__ATOMICAPPVERSION__ = '0.5.0' +__ATOMICAPPVERSION__ = '0.5.1' __NULECULESPECVERSION__ = '0.0.2' EXTERNAL_APP_DIR = "external" diff --git a/setup.py b/setup.py index 17464748..680f8f32 100644 --- a/setup.py +++ b/setup.py @@ -41,7 +41,7 @@ def _install_requirements(): setup( name='atomicapp', - version='0.5.0', + version='0.5.1', description='A tool to install and run Nulecule apps', author='Red Hat, Inc.', author_email='container-tools@redhat.com', From e7105ce78ebb3ce30a32d2753cbaa2258fbec22f Mon Sep 17 00:00:00 2001 From: Suraj Deshmukh Date: Tue, 19 Apr 2016 14:09:39 -0400 Subject: [PATCH 138/193] Now logs can show path to file under root atomicapp folder Earlier atomicapp in the logs would show only the directory + file that is being executed but this was nice when atomicapp was only two level deep in its directory structure. So this was not showing the complete path after root atomicapp folder. But now it will show the complete path after root atomicapp folder. Fixes #695 --- atomicapp/applogging.py | 26 ++++++++++++++++++++------ 1 file changed, 20 insertions(+), 6 deletions(-) diff --git a/atomicapp/applogging.py b/atomicapp/applogging.py index 4695cb61..85a87dfe 100644 --- a/atomicapp/applogging.py +++ b/atomicapp/applogging.py @@ -27,15 +27,29 @@ class customOutputFormatter(logging.Formatter): """ A class that adds 'longerfilename' support to the logging formatter - This 'longerfilename' will be filename + parent dir. + This 'longerfilename' will be path/to/file.py after the root atomicapp + folder. """ - def format(self, record): + def __init__(self, *args): + super(customOutputFormatter, self).__init__(*args) + + # setting the root directory path of code base, currently this file is + # at /home/xyz/atomicapp/applogging.py so we need this path except for + # the 'applogging.py' so while splitting here, only last component + # i.e. 'applogging.py' is excluded by using -1, so if applogging.py + # is moved into another directory and if path becomes + # /home/xyz/atomicapp/logs/applogging.py to remove + # 'logs/applogging.py' use -2 + self.atomicapproot = '/'.join(__file__.split('/')[:-1]) - # Add the 'longerfilename' field to the record dict. This is - # then used by the Formatter in the logging library when - # formatting the message string. - record.longerfilename = '/'.join(record.pathname.split('/')[-2:]) + def format(self, record): + """ + Add the 'longerfilename' field to the record dict. This is + then used by the Formatter in the logging library when + formatting the message string. + """ + record.longerfilename = record.pathname.split(self.atomicapproot)[-1].lstrip('/') # Call the parent class to do formatting. return super(customOutputFormatter, self).format(record) From 16033ba3d5edcfda56ba7725e95d19454ded7703 Mon Sep 17 00:00:00 2001 From: Charlie Drage Date: Wed, 27 Apr 2016 17:20:45 -0400 Subject: [PATCH 139/193] Ignore vim .swo files --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index 4c4a2fb6..6f955943 100644 --- a/.gitignore +++ b/.gitignore @@ -66,3 +66,4 @@ tags TAGS *.swp *~ +*.swo From bd2a285d3fd4e26828b49908f7e9be1a1ce2b17d Mon Sep 17 00:00:00 2001 From: Suraj Deshmukh Date: Thu, 28 Apr 2016 11:38:19 +0530 Subject: [PATCH 140/193] Updated the inContainer function doc string Since the last few changes in the code now .dockerenv and .dockerinit are not checked in the code, but the same thing was not updated in the doc, so updated the stale doc string. --- atomicapp/utils.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/atomicapp/utils.py b/atomicapp/utils.py index 29a73b45..4e44e871 100644 --- a/atomicapp/utils.py +++ b/atomicapp/utils.py @@ -336,8 +336,7 @@ def getDockerCli(dryrun=False): def inContainer(): """ Determine if we are running inside a container or not. This is done by - checking to see if /host has been passed as well as if .dockerenv and - .dockerinit files exist + checking to see if /host has been passed. Returns: (bool): True == we are in a container From 1f61344da90004707c770c81990dcdd1f3377ac9 Mon Sep 17 00:00:00 2001 From: Suraj Deshmukh Date: Fri, 29 Apr 2016 15:15:01 +0530 Subject: [PATCH 141/193] Removed unused function update from utils.py This function is not used anywhere in the code. --- atomicapp/utils.py | 17 ----------------- 1 file changed, 17 deletions(-) diff --git a/atomicapp/utils.py b/atomicapp/utils.py index 4e44e871..95228a9f 100644 --- a/atomicapp/utils.py +++ b/atomicapp/utils.py @@ -23,7 +23,6 @@ import sys import tempfile import re -import collections import anymarkup import uuid import requests @@ -296,22 +295,6 @@ def askFor(what, info): return value - @staticmethod - def update(old_dict, new_dict): - for key, val in new_dict.iteritems(): - if isinstance(val, collections.Mapping): - tmp = Utils.update(old_dict.get(key, {}), val) - old_dict[key] = tmp - elif isinstance(val, list) and key in old_dict: - res = (old_dict[key] + val) - if isinstance(val[0], collections.Mapping): - old_dict[key] = [dict(y) for y in set(tuple(x.items()) for x in res)] - else: - old_dict[key] = list(set(res)) - else: - old_dict[key] = new_dict[key] - return old_dict - @staticmethod def getAppId(path): # obsolete From baf3b1920666a1379742d25247816fb04db3a608 Mon Sep 17 00:00:00 2001 From: Charlie Drage Date: Wed, 4 May 2016 02:36:57 -0400 Subject: [PATCH 142/193] Change provider-config warning to debug This should be within the debug log rather than warn the user. Basically, if the user has not provided --provider-config Atomic App will warn. This should instead be put in the debugging output to let the developer know that --provider-config hasn't been passed via answers.conf or the CLI. --- atomicapp/plugin.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/atomicapp/plugin.py b/atomicapp/plugin.py index 482e86ce..74d090fd 100644 --- a/atomicapp/plugin.py +++ b/atomicapp/plugin.py @@ -77,7 +77,7 @@ def getConfigFile(self): if os.path.isabs(self.config_file): self.config_file = Utils.get_real_abspath(self.config_file) else: - logger.warning("Configuration option '%s' not found" % PROVIDER_CONFIG_KEY) + logger.debug("Configuration option '%s' not provided" % PROVIDER_CONFIG_KEY) def checkConfigFile(self): if not self.config_file: From 1c7ba6de0a785a06056d3a61a8f4b5d631ca86b2 Mon Sep 17 00:00:00 2001 From: Charlie Drage Date: Thu, 5 May 2016 05:13:58 -0400 Subject: [PATCH 143/193] Remove README.MD from init Init should be clean and thus no README.md should be provided. Some people use pandoc, ascii, etc. --- atomicapp/external/templates/nulecule/README.md.tpl | 3 --- 1 file changed, 3 deletions(-) delete mode 100644 atomicapp/external/templates/nulecule/README.md.tpl diff --git a/atomicapp/external/templates/nulecule/README.md.tpl b/atomicapp/external/templates/nulecule/README.md.tpl deleted file mode 100644 index e9b5cfff..00000000 --- a/atomicapp/external/templates/nulecule/README.md.tpl +++ /dev/null @@ -1,3 +0,0 @@ -# $app_name Atomic App - -My awesome Atomic App. From a38fc34b34d765c765cd8d9366fab4949bb1894a Mon Sep 17 00:00:00 2001 From: Suraj Deshmukh Date: Wed, 6 Apr 2016 22:42:13 +0530 Subject: [PATCH 144/193] Replaced a for loop that initialized a dict Replaced the for loop that initialized the dict with keys similar to other dict and values as None, using the `dict` datatype function called `fromkeys` --- atomicapp/providers/openshift.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/atomicapp/providers/openshift.py b/atomicapp/providers/openshift.py index 46c58752..88ff9404 100644 --- a/atomicapp/providers/openshift.py +++ b/atomicapp/providers/openshift.py @@ -633,11 +633,8 @@ def _set_config_values(self): PROVIDER_CA_KEY: self.provider_ca} # create keys in dicts and initialize values to None - answers = {} - providerconfig = {} - for k in result.keys(): - answers[k] = None - providerconfig[k] = None + answers = dict.fromkeys(result) + providerconfig = dict.fromkeys(result) # get values from answers.conf for k in result.keys(): From 11237dcb7e08adae40f17947b7ccb5a53a67d99d Mon Sep 17 00:00:00 2001 From: Charlie Drage Date: Wed, 10 Feb 2016 09:40:54 -0500 Subject: [PATCH 145/193] Makes Makefile faster using .PHONY && add default python location --- Makefile | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/Makefile b/Makefile index 792651fa..eddeafe9 100644 --- a/Makefile +++ b/Makefile @@ -1,19 +1,29 @@ +# Default dir locations +PYTHON ?= /usr/bin/python + +# Methods +.PHONY: all all: - python -m pytest -vv + $(PYTHON) -m pytest -vv +.PHONY: install install: - python setup.py install + $(PYTHON) setup.py install +.PHONY: test test: pip install -qr requirements.txt pip install -qr test-requirements.txt - python -m pytest -vv + $(PYTHON) -m pytest -vv +.PHONY: image image: docker build -t $(tag) . +.PHONY: syntax-check syntax-check: flake8 atomicapp +.PHONY: clean clean: - python setup.py clean --all + $(PYTHON) setup.py clean --all From f0130252a419d3228387374a6b33e13aef0d0977 Mon Sep 17 00:00:00 2001 From: Charlie Drage Date: Thu, 12 May 2016 12:28:26 -0400 Subject: [PATCH 146/193] Modify TravisCI for updated Makefile --- .travis.yml | 13 ++++++------- Makefile | 7 ++++--- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/.travis.yml b/.travis.yml index 96f15f0c..f654faf9 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,6 +1,7 @@ # Travis CI checks for atomicapp language: python +sudo: required python: - "2.7" @@ -9,18 +10,16 @@ notifications: irc: "chat.freenode.net#nulecule" before_install: - - pip install pytest-cov coveralls --use-mirrors - - pip install pep8 --use-mirrors - - pip install flake8 --use-mirrors + - sudo pip install pytest-cov coveralls pep8 flake8 install: - - make install + - sudo make install before_script: - - make syntax-check + - sudo make syntax-check script: - - make test + - sudo make test after_success: - - coveralls + - sudo coveralls diff --git a/Makefile b/Makefile index eddeafe9..ec21931b 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,8 @@ -# Default dir locations +# This can be overriden (for eg): +# make install PYTHON=/usr/bin/python2.7 PYTHON ?= /usr/bin/python +DOCKER ?= /usr/bin/docker -# Methods .PHONY: all all: $(PYTHON) -m pytest -vv @@ -18,7 +19,7 @@ test: .PHONY: image image: - docker build -t $(tag) . + $(DOCKER) build -t $(tag) . .PHONY: syntax-check syntax-check: From 2278eec2ca6c60d9735a540338d27214f8b9a3d8 Mon Sep 17 00:00:00 2001 From: Charlie Drage Date: Thu, 12 May 2016 15:23:11 -0400 Subject: [PATCH 147/193] Removes loading via .py files The old code dynamically loaded providers/ .py files in order for the user to dynamically add providers when need be. Due to changes to Atomic App, we no longer require this and using a provider is explicitly implied now on the command line. This removes the work previously done in favour of loading modules via the provider_key name. --- atomicapp/nulecule/base.py | 4 +- atomicapp/nulecule/lib.py | 1 - atomicapp/plugin.py | 48 ++++--------------- atomicapp/providers/marathon.py | 2 +- atomicapp/providers/openshift.py | 2 +- atomicapp/requirements.py | 1 - .../providers/test_openshift_provider.py | 20 ++++---- tests/units/test_plugin.py | 7 ++- 8 files changed, 27 insertions(+), 58 deletions(-) diff --git a/atomicapp/nulecule/base.py b/atomicapp/nulecule/base.py index 07cc1f26..0b5e170c 100644 --- a/atomicapp/nulecule/base.py +++ b/atomicapp/nulecule/base.py @@ -44,7 +44,7 @@ from atomicapp.nulecule.lib import NuleculeBase from atomicapp.nulecule.container import DockerHandler from atomicapp.nulecule.exceptions import NuleculeException -from atomicapp.providers.openshift import OpenShiftProvider +from atomicapp.providers.openshift import OpenshiftProvider from jsonpointer import resolve_pointer, set_pointer, JsonPointerException from anymarkup import AnyMarkupError @@ -115,7 +115,7 @@ def unpack(cls, image, dest, config=None, namespace=GLOBAL_CONF, if Utils.running_on_openshift(): # pass general config data containing provider specific data # to Openshift provider - op = OpenShiftProvider(config.get('general', {}), './', False) + op = OpenshiftProvider(config.get('general', {}), './', False) op.artifacts = [] op.init() op.extract(image, APP_ENT_PATH, dest, update) diff --git a/atomicapp/nulecule/lib.py b/atomicapp/nulecule/lib.py index 89257046..aba51924 100644 --- a/atomicapp/nulecule/lib.py +++ b/atomicapp/nulecule/lib.py @@ -41,7 +41,6 @@ class NuleculeBase(object): def __init__(self, basepath, params, namespace): self.plugin = Plugin() - self.plugin.load_plugins() self.basepath = basepath self.params = params or [] self.namespace = namespace diff --git a/atomicapp/plugin.py b/atomicapp/plugin.py index 74d090fd..7234c1f7 100644 --- a/atomicapp/plugin.py +++ b/atomicapp/plugin.py @@ -22,9 +22,8 @@ from __future__ import print_function import os -import imp - import logging +import importlib from utils import Utils from constants import (HOST_DIR, LOGGER_DEFAULT, @@ -126,42 +125,11 @@ class Plugin(object): def __init__(self, ): pass - def load_plugins(self): - run_path = os.path.dirname(os.path.realpath(__file__)) - providers_dir = os.path.join(run_path, "providers") - logger.debug("Loading providers from %s", providers_dir) - - plugin_classes = {} - plugin_class = globals()["Provider"] - - for f in os.listdir(providers_dir): - if f.endswith(".py"): - module_name = os.path.basename(f).rsplit('.', 1)[0] - try: - f_module = imp.load_source( - module_name, os.path.join(providers_dir, f)) - except (IOError, OSError, ImportError) as ex: - logger.warning("can't load module '%s': %s", f, repr(ex)) - continue - - for name in dir(f_module): - binding = getattr(f_module, name, None) - try: - # if you try to compare binding and PostBuildPlugin, python won't match them if you call - # this script directly b/c: - # ! <= - # but - # <= - is_sub = issubclass(binding, plugin_class) - except TypeError: - is_sub = False - if binding and is_sub and plugin_class.__name__ != binding.__name__: - plugin_classes[binding.key] = binding - - self.plugins = plugin_classes - def getProvider(self, provider_key): - for key, provider in self.plugins.iteritems(): - if key == provider_key: - logger.debug("Found provider %s", provider) - return provider + try: + module = importlib.import_module("atomicapp.providers.%s" % provider_key) + provider_class = "%sProvider" % provider_key.capitalize() + provider = getattr(module, provider_class) + except ImportError: + provider = None + return provider diff --git a/atomicapp/providers/marathon.py b/atomicapp/providers/marathon.py index 9370c8bf..ec3b43f0 100644 --- a/atomicapp/providers/marathon.py +++ b/atomicapp/providers/marathon.py @@ -31,7 +31,7 @@ logger = logging.getLogger(LOGGER_DEFAULT) -class Marathon(Provider): +class MarathonProvider(Provider): key = "marathon" config_file = None diff --git a/atomicapp/providers/openshift.py b/atomicapp/providers/openshift.py index 88ff9404..5683ca25 100644 --- a/atomicapp/providers/openshift.py +++ b/atomicapp/providers/openshift.py @@ -313,7 +313,7 @@ def get_pod_status(self, namespace, pod): return return_data['status']['phase'].lower() -class OpenShiftProvider(Provider): +class OpenshiftProvider(Provider): key = "openshift" cli_str = "oc" cli = None diff --git a/atomicapp/requirements.py b/atomicapp/requirements.py index adc7c4f0..0212bd59 100644 --- a/atomicapp/requirements.py +++ b/atomicapp/requirements.py @@ -44,7 +44,6 @@ class Requirements: def __init__(self, config, basepath, graph, provider, dryrun): self.plugin = Plugin() - self.plugin.load_plugins() self.config = config self.basepath = basepath diff --git a/tests/units/providers/test_openshift_provider.py b/tests/units/providers/test_openshift_provider.py index 6a4ba386..31f6d2b5 100644 --- a/tests/units/providers/test_openshift_provider.py +++ b/tests/units/providers/test_openshift_provider.py @@ -9,23 +9,23 @@ import unittest import mock -from atomicapp.providers.openshift import OpenShiftProvider +from atomicapp.providers.openshift import OpenshiftProvider from atomicapp.plugin import ProviderFailedException class OpenshiftProviderTestMixin(object): def setUp(self): - # Patch OpenshiftClient to test OpenShiftProvider + # Patch OpenshiftClient to test OpenshiftProvider self.patcher = mock.patch('atomicapp.providers.openshift.OpenshiftClient') self.mock_OpenshiftClient = self.patcher.start() self.mock_oc = self.mock_OpenshiftClient() def get_oc_provider(self, dryrun=False, artifacts=[]): """ - Get OpenShiftProvider instance + Get OpenshiftProvider instance """ - op = OpenShiftProvider({}, '.', dryrun) + op = OpenshiftProvider({}, '.', dryrun) op.artifacts = artifacts op.access_token = 'test' op.init() @@ -37,12 +37,12 @@ def tearDown(self): class TestOpenshiftProviderDeploy(OpenshiftProviderTestMixin, unittest.TestCase): """ - Test OpenShiftProvider.run + Test OpenshiftProvider.run """ def test_run(self): """ - Test calling OpenshiftClient.run from OpenShiftProvider.run + Test calling OpenshiftClient.run from OpenshiftProvider.run """ op = self.get_oc_provider() op.oapi_resources = ['foo'] @@ -64,7 +64,7 @@ def test_run(self): def test_run_dryrun(self): """ - Test running OpenShiftProvider.run as dryrun + Test running OpenshiftProvider.run as dryrun """ op = self.get_oc_provider(dryrun=True) op.oapi_resources = ['foo'] @@ -84,12 +84,12 @@ def test_run_dryrun(self): class TestOpenshiftProviderUnrun(OpenshiftProviderTestMixin, unittest.TestCase): """ - Test OpenShiftProvider.stop + Test OpenshiftProvider.stop """ def test_stop(self): """ - Test calling OpenshiftClient.delete from OpenShiftProvider.stop + Test calling OpenshiftClient.delete from OpenshiftProvider.stop """ op = self.get_oc_provider() op.oapi_resources = ['foo'] @@ -113,7 +113,7 @@ def test_stop(self): def test_stop_dryrun(self): """ - Test running OpenShiftProvider.stop as dryrun + Test running OpenshiftProvider.stop as dryrun """ op = self.get_oc_provider(dryrun=True) op.oapi_resources = ['foo'] diff --git a/tests/units/test_plugin.py b/tests/units/test_plugin.py index 9a2699c9..614d1c2b 100644 --- a/tests/units/test_plugin.py +++ b/tests/units/test_plugin.py @@ -2,6 +2,8 @@ import unittest from atomicapp.plugin import Plugin +from atomicapp.providers.docker import DockerProvider +from atomicapp.providers.kubernetes import KubernetesProvider class TestPluginGetProvider(unittest.TestCase): @@ -13,8 +15,8 @@ def test_getProvider(self): """ p = Plugin() - docker_mock = mock.Mock() - kubernetes_mock = mock.Mock() + docker_mock = DockerProvider + kubernetes_mock = KubernetesProvider # keep some mock objects in place of the actual corresponding # classes, getProvider reads from `plugins` dict. p.plugins = { @@ -23,5 +25,6 @@ def test_getProvider(self): } self.assertEqual(p.getProvider('docker'), docker_mock) self.assertEqual(p.getProvider('kubernetes'), kubernetes_mock) + # if non-existent key provided self.assertEqual(p.getProvider('some_random'), None) From 94961674f8ec49b65067a62d395b6fd431e36f54 Mon Sep 17 00:00:00 2001 From: Charlie Drage Date: Fri, 13 May 2016 10:26:42 -0400 Subject: [PATCH 148/193] Hide container id output on container creation This commit removes container ID output when talking to Docker either for the Nulecule data extraction or when deploying a multi-container application. --- atomicapp/nulecule/container.py | 6 +++--- atomicapp/providers/docker.py | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/atomicapp/nulecule/container.py b/atomicapp/nulecule/container.py index 4a28d903..610edbee 100644 --- a/atomicapp/nulecule/container.py +++ b/atomicapp/nulecule/container.py @@ -85,7 +85,7 @@ def pull(self, image, update=False): if self.dryrun: logger.info("DRY-RUN: %s", pull_cmd) - elif subprocess.call(pull_cmd) != 0: + elif subprocess.check_output(pull_cmd) != 0: raise DockerException("Could not pull Docker image %s" % image) cockpit_logger.info('Skipping pulling Docker image: %s' % image) @@ -123,7 +123,7 @@ def extract(self, image, source, dest, update=False): tmpdir] logger.debug( 'Copying data from Docker container: %s' % ' '.join(cp_cmd)) - subprocess.call(cp_cmd) + subprocess.check_output(cp_cmd) # There has been some inconsistent behavior where docker cp # will either copy out the entire dir /APP_ENT_PATH/*files* or @@ -162,7 +162,7 @@ def extract(self, image, source, dest, update=False): # Clean up dummy container rm_cmd = [self.docker_cli, 'rm', '-f', container_id] logger.debug('Removing Docker container: %s' % ' '.join(rm_cmd)) - subprocess.call(rm_cmd) + subprocess.check_output(rm_cmd) def is_image_present(self, image): """ diff --git a/atomicapp/providers/docker.py b/atomicapp/providers/docker.py index 0dfb5944..7cac7150 100644 --- a/atomicapp/providers/docker.py +++ b/atomicapp/providers/docker.py @@ -105,7 +105,7 @@ def run(self): if self.dryrun: logger.info("DRY-RUN: %s", " ".join(cmd)) else: - subprocess.check_call(cmd) + subprocess.check_output(cmd) def stop(self): logger.info("Undeploying to provider: Docker") @@ -142,4 +142,4 @@ def stop(self): if self.dryrun: logger.info("DRY-RUN: STOPPING CONTAINER %s", " ".join(cmd)) else: - subprocess.check_call(cmd) + subprocess.check_output(cmd) From 70221193188eebaa6773007242f9e022ff1338ab Mon Sep 17 00:00:00 2001 From: Charlie Drage Date: Fri, 13 May 2016 12:07:32 -0400 Subject: [PATCH 149/193] Change dir of /external to /nulecule/external This folder should be located within /nulecule/external instead of /external. Thus within the code there is no need to include '/../'. --- .../external/templates/nulecule/Dockerfile.tpl | 0 .../{ => nulecule}/external/templates/nulecule/Nulecule.tpl | 0 .../external/templates/nulecule/README.md.tpl | 0 .../external/templates/nulecule/answers.conf.sample.tpl | 0 .../external/templates/nulecule/artifacts/docker/run.tpl | 0 .../templates/nulecule/artifacts/kubernetes/pod.yaml.tpl | 0 .../nulecule/artifacts/kubernetes/service.yaml.tpl | 0 atomicapp/nulecule/main.py | 2 +- setup.py | 6 +++--- 9 files changed, 4 insertions(+), 4 deletions(-) rename atomicapp/{ => nulecule}/external/templates/nulecule/Dockerfile.tpl (100%) rename atomicapp/{ => nulecule}/external/templates/nulecule/Nulecule.tpl (100%) rename atomicapp/{ => nulecule}/external/templates/nulecule/README.md.tpl (100%) rename atomicapp/{ => nulecule}/external/templates/nulecule/answers.conf.sample.tpl (100%) rename atomicapp/{ => nulecule}/external/templates/nulecule/artifacts/docker/run.tpl (100%) rename atomicapp/{ => nulecule}/external/templates/nulecule/artifacts/kubernetes/pod.yaml.tpl (100%) rename atomicapp/{ => nulecule}/external/templates/nulecule/artifacts/kubernetes/service.yaml.tpl (100%) diff --git a/atomicapp/external/templates/nulecule/Dockerfile.tpl b/atomicapp/nulecule/external/templates/nulecule/Dockerfile.tpl similarity index 100% rename from atomicapp/external/templates/nulecule/Dockerfile.tpl rename to atomicapp/nulecule/external/templates/nulecule/Dockerfile.tpl diff --git a/atomicapp/external/templates/nulecule/Nulecule.tpl b/atomicapp/nulecule/external/templates/nulecule/Nulecule.tpl similarity index 100% rename from atomicapp/external/templates/nulecule/Nulecule.tpl rename to atomicapp/nulecule/external/templates/nulecule/Nulecule.tpl diff --git a/atomicapp/external/templates/nulecule/README.md.tpl b/atomicapp/nulecule/external/templates/nulecule/README.md.tpl similarity index 100% rename from atomicapp/external/templates/nulecule/README.md.tpl rename to atomicapp/nulecule/external/templates/nulecule/README.md.tpl diff --git a/atomicapp/external/templates/nulecule/answers.conf.sample.tpl b/atomicapp/nulecule/external/templates/nulecule/answers.conf.sample.tpl similarity index 100% rename from atomicapp/external/templates/nulecule/answers.conf.sample.tpl rename to atomicapp/nulecule/external/templates/nulecule/answers.conf.sample.tpl diff --git a/atomicapp/external/templates/nulecule/artifacts/docker/run.tpl b/atomicapp/nulecule/external/templates/nulecule/artifacts/docker/run.tpl similarity index 100% rename from atomicapp/external/templates/nulecule/artifacts/docker/run.tpl rename to atomicapp/nulecule/external/templates/nulecule/artifacts/docker/run.tpl diff --git a/atomicapp/external/templates/nulecule/artifacts/kubernetes/pod.yaml.tpl b/atomicapp/nulecule/external/templates/nulecule/artifacts/kubernetes/pod.yaml.tpl similarity index 100% rename from atomicapp/external/templates/nulecule/artifacts/kubernetes/pod.yaml.tpl rename to atomicapp/nulecule/external/templates/nulecule/artifacts/kubernetes/pod.yaml.tpl diff --git a/atomicapp/external/templates/nulecule/artifacts/kubernetes/service.yaml.tpl b/atomicapp/nulecule/external/templates/nulecule/artifacts/kubernetes/service.yaml.tpl similarity index 100% rename from atomicapp/external/templates/nulecule/artifacts/kubernetes/service.yaml.tpl rename to atomicapp/nulecule/external/templates/nulecule/artifacts/kubernetes/service.yaml.tpl diff --git a/atomicapp/nulecule/main.py b/atomicapp/nulecule/main.py index a9bc9509..a089092d 100644 --- a/atomicapp/nulecule/main.py +++ b/atomicapp/nulecule/main.py @@ -159,7 +159,7 @@ def init(app_name, destination=None, app_version='1.0', # Temporary working dir to render the templates tmpdir = tempfile.mkdtemp(prefix='nulecule-new-app-') template_dir = os.path.join(os.path.dirname(__file__), - '../external/templates/nulecule') + 'external/templates/nulecule') try: # Copy template dir to temporary working directory and render templates diff --git a/setup.py b/setup.py index 680f8f32..c983fe9e 100644 --- a/setup.py +++ b/setup.py @@ -52,9 +52,9 @@ def _install_requirements(): }, packages=find_packages(), package_data={'atomicapp': ['providers/external/kubernetes/*.yaml', - 'external/templates/nulecule/*.tpl', - 'external/templates/nulecule/artifacts/docker/*.tpl', - 'external/templates/nulecule/artifacts/kubernetes/*.tpl']}, + 'nulecule/external/templates/nulecule/*.tpl', + 'nulecule/external/templates/nulecule/artifacts/docker/*.tpl', + 'nulecule/external/templates/nulecule/artifacts/kubernetes/*.tpl']}, include_package_data=True, install_requires=_install_requirements() ) From 925fba1b60de59c99b2a6be2ea75e7f8e74298eb Mon Sep 17 00:00:00 2001 From: Charlie Drage Date: Wed, 18 May 2016 12:41:50 -0400 Subject: [PATCH 150/193] Add binary generation This commit adds the command `make binary` to generate a binary using pyinstaller. This generates a stand-alone executable that will work on any operating system even if it doesn't have Python installed. Having a binary would allow us to create a minimal container >10MB without the need of relying on the underlying operating system. We use a custom atomicapp.spec file in order to generate the binary. Theoretically, this should work cross-platform on Windows / Mac OS X once each provider uses their respective http API instead of cli binaries. --- .gitignore | 4 ++++ Makefile | 4 ++++ atomicapp.spec | 57 ++++++++++++++++++++++++++++++++++++++++++++++++ script/binary.sh | 13 +++++++++++ 4 files changed, 78 insertions(+) create mode 100644 atomicapp.spec create mode 100755 script/binary.sh diff --git a/.gitignore b/.gitignore index 6f955943..0c1aa680 100644 --- a/.gitignore +++ b/.gitignore @@ -26,6 +26,7 @@ var/ *.egg-info/ .installed.cfg *.egg +bin/ # PyInstaller # Usually these files are written by a python script from a template @@ -33,6 +34,9 @@ var/ *.manifest *.spec +# Don't ignore atomicapp.spec for PyInstaller builds +!atomicapp.spec + # Installer logs pip-log.txt pip-delete-this-directory.txt diff --git a/Makefile b/Makefile index ec21931b..d2241bd5 100644 --- a/Makefile +++ b/Makefile @@ -28,3 +28,7 @@ syntax-check: .PHONY: clean clean: $(PYTHON) setup.py clean --all + +.PHONY: binary +binary: + ./script/binary.sh diff --git a/atomicapp.spec b/atomicapp.spec new file mode 100644 index 00000000..a55e04e4 --- /dev/null +++ b/atomicapp.spec @@ -0,0 +1,57 @@ +# -*- mode: python -*- + +# Function in order to recursively add data directories to pyinstaller +def extra_datas(mydir): + def rec_glob(p, files): + import os + import glob + for d in glob.glob(p): + if os.path.isfile(d): + files.append(d) + rec_glob("%s/*" % d, files) + files = [] + rec_glob("%s/*" % mydir, files) + extra_datas = [] + for f in files: + extra_datas.append((f, f, 'DATA')) + + return extra_datas + +block_cipher = None + +# Due to the way that we dynamically load providers via import_module +# in atomicapp/plugin.py we have to specify explicitly the modules directly +# so pyinstaller can "see" them. This is indicated by 'hiddenimports' +a = Analysis(['atomicapp/cli/main.py'], + pathex=['.'], + binaries=None, + datas=None, + hiddenimports=[ + 'atomicapp.providers.docker', + 'atomicapp.providers.kubernetes', + 'atomicapp.providers.openshift', + 'atomicapp.providers.marathon' + ], + hookspath=[], + runtime_hooks=[], + excludes=[], + win_no_prefer_redirects=False, + win_private_assemblies=False, + cipher=block_cipher) + +# Add external data (atomicapp init + provider external data) +a.datas += extra_datas('atomicapp/providers/external') +a.datas += extra_datas('atomicapp/nulecule/external') + +pyz = PYZ(a.pure, a.zipped_data, + cipher=block_cipher) +exe = EXE(pyz, + a.scripts, + a.binaries, + a.zipfiles, + a.datas, + name='atomicapp/cli/main', + debug=False, + strip=False, + upx=True, + console=True ) diff --git a/script/binary.sh b/script/binary.sh new file mode 100755 index 00000000..549680ce --- /dev/null +++ b/script/binary.sh @@ -0,0 +1,13 @@ +#!/bin/bash +set -ex +pip install -r requirements.txt +pip install pyinstaller + +# Due to the way that we dynamically load providers via import_module +# in atomicapp/plugin.py we have to specify explicitly the modules directly +# so pyinstaller can "see" them. +pyinstaller atomicapp.spec + +mkdir -p bin +mv dist/main bin/atomicapp +echo "Binary created at bin/atomicapp" From 9df4a470399c99c5ca2b50b35eaa9c4e28fa50bd Mon Sep 17 00:00:00 2001 From: Charlie Drage Date: Wed, 18 May 2016 12:46:33 -0400 Subject: [PATCH 151/193] Update to a cleaner logging output. This update cleans up the logging output and removes output that would otherwise clutter the logging of Atomic App Action/Mode Selected Is output has been updated to "Mode selected:" and each run of Atomic App will also output both the version number of Atomic App as well as the spec that's being used. This commit also adds ASCII art and aligns versioning of both Atomic App and Nulecule into the image. --- atomicapp/applogging.py | 9 +++++++-- atomicapp/cli/main.py | 15 ++++++++++++++- atomicapp/utils.py | 4 ++-- 3 files changed, 23 insertions(+), 5 deletions(-) diff --git a/atomicapp/applogging.py b/atomicapp/applogging.py index 85a87dfe..f5827dc0 100644 --- a/atomicapp/applogging.py +++ b/atomicapp/applogging.py @@ -114,6 +114,10 @@ def setup_logging(verbose=None, quiet=None, logtype=None): The 'color' level prints out normal log msgs (no cockpit) with color """ + # Shorten the name of WARNING to WARN in order to decrease + # output width / character wrapping + logging.addLevelName(logging.WARNING, 'WARN') + # If no logtype was set then let's have a sane default # If connected to a tty, then default to color, else, no color if not logtype: @@ -132,10 +136,11 @@ def setup_logging(verbose=None, quiet=None, logtype=None): # Set the format string to use based on the logging level. # For debug we include more of the filename than for !debug. + # We use -6s spacing to align all logging outputs if logging_level == logging.DEBUG: - formatstr = '%(asctime)s - [%(levelname)s] - %(longerfilename)s - %(message)s' + formatstr = '%(levelname)-6s :: - %(longerfilename)s :: %(message)s' else: - formatstr = '[%(levelname)s] - %(filename)s - %(message)s' + formatstr = '%(levelname)-6s :: %(message)s' # Set a tuple of options that will be passed to the formatter. The %s # will tell the logging library to use seconds since epoch for time stamps diff --git a/atomicapp/cli/main.py b/atomicapp/cli/main.py index 22ae7a00..5d7681af 100644 --- a/atomicapp/cli/main.py +++ b/atomicapp/cli/main.py @@ -447,7 +447,20 @@ def run(self): # Setup logging (now with arguments from cmdline) and log a few msgs Logging.setup_logging(args.verbose, args.quiet, args.logtype) - logger.info("Action/Mode Selected is: %s" % args.action) + + art = """ + _ _ _ _ + /_\| |_ ___ _ __ (_)__ /_\ _ __ _ __ + / _ \ _/ _ \ ' \| / _|/ _ \| '_ \ '_ \\ +/_/ \_\__\___/_|_|_|_\__/_/ \_\ .__/ .__/ + |_| |_| + """.splitlines() + logger.info("%s" % art[1]) + logger.info("%s Version: %s" % (art[2], __ATOMICAPPVERSION__)) + logger.info("%s Nulecule: %s" % (art[3], __NULECULESPECVERSION__)) + logger.info("%s Mode: %s" % (art[4], str(args.action).capitalize())) + logger.info("%s" % art[5]) + logger.debug("Final parsed cmdline: {}".format(' '.join(cmdline))) # In the case of Atomic CLI we want to allow the user to specify diff --git a/atomicapp/utils.py b/atomicapp/utils.py index 95228a9f..2a78c000 100644 --- a/atomicapp/utils.py +++ b/atomicapp/utils.py @@ -277,12 +277,12 @@ def askFor(what, info): repeat = False if "default" in info: value = raw_input( - "==> %s (%s, default: %s): " % (what, desc, info["default"])) + "ANSWER >> %s (%s, default: %s): " % (what, desc, info["default"])) if len(value) == 0: value = info["default"] else: try: - value = raw_input("==> %s (%s): " % (what, desc)) + value = raw_input("ANSWER >> %s (%s): " % (what, desc)) except EOFError: raise From 0cb9594e80e1f6d4863297a4862a24d4aef4480f Mon Sep 17 00:00:00 2001 From: Charlie Drage Date: Wed, 18 May 2016 13:00:21 -0400 Subject: [PATCH 152/193] Logging formatting Changes s/Docker/docker/g (terminology states when discussing tech to use lowercase, but company uppercase) Clean up other misc logging outputs --- atomicapp/nulecule/base.py | 8 ++++---- atomicapp/nulecule/container.py | 18 +++++++++--------- 2 files changed, 13 insertions(+), 13 deletions(-) diff --git a/atomicapp/nulecule/base.py b/atomicapp/nulecule/base.py index 0b5e170c..125d2bc9 100644 --- a/atomicapp/nulecule/base.py +++ b/atomicapp/nulecule/base.py @@ -111,7 +111,7 @@ def unpack(cls, image, dest, config=None, namespace=GLOBAL_CONF, Returns: A Nulecule instance, or None in case of dry run. """ - logger.info('Unpacking image: %s to %s' % (image, dest)) + logger.info('Unpacking image %s to %s' % (image, dest)) if Utils.running_on_openshift(): # pass general config data containing provider specific data # to Openshift provider @@ -380,12 +380,12 @@ def load_external_application(self, dryrun=False, update=False): self.basepath, EXTERNAL_APP_DIR, self.name) if os.path.isdir(external_app_path) and not update: logger.info( - 'Found existing external application for %s. ' - 'Loading it.' % self.name) + 'Found existing external application: %s ' + 'Loading: ' % self.name) nulecule = Nulecule.load_from_path( external_app_path, dryrun=dryrun, update=update) elif not dryrun: - logger.info('Pulling external application for %s.' % self.name) + logger.info('Pulling external application: %s' % self.name) nulecule = Nulecule.unpack( self.source, external_app_path, diff --git a/atomicapp/nulecule/container.py b/atomicapp/nulecule/container.py index 610edbee..9152c44f 100644 --- a/atomicapp/nulecule/container.py +++ b/atomicapp/nulecule/container.py @@ -75,20 +75,20 @@ def pull(self, image, update=False): None """ if not self.is_image_present(image) or update: - logger.info('Pulling Docker image: %s' % image) - cockpit_logger.info('Pulling Docker image: %s' % image) + logger.info('Pulling docker image: %s' % image) + cockpit_logger.info('Pulling docker image: %s' % image) pull_cmd = [self.docker_cli, 'pull', image] logger.debug(' '.join(pull_cmd)) else: - logger.info('Skipping pulling Docker image: %s' % image) + logger.info('Skipping pulling docker image: %s' % image) return if self.dryrun: logger.info("DRY-RUN: %s", pull_cmd) elif subprocess.check_output(pull_cmd) != 0: - raise DockerException("Could not pull Docker image %s" % image) + raise DockerException("Could not pull docker image: %s" % image) - cockpit_logger.info('Skipping pulling Docker image: %s' % image) + cockpit_logger.info('Skipping pulling docker image: %s' % image) def extract(self, image, source, dest, update=False): """ @@ -106,7 +106,7 @@ def extract(self, image, source, dest, update=False): None """ logger.info( - 'Extracting nulecule data from image: %s to %s' % (image, dest)) + 'Extracting Nulecule data from image %s to %s' % (image, dest)) if self.dryrun: return @@ -122,7 +122,7 @@ def extract(self, image, source, dest, update=False): '%s:/%s' % (container_id, source), tmpdir] logger.debug( - 'Copying data from Docker container: %s' % ' '.join(cp_cmd)) + 'Copying data from docker container: %s' % ' '.join(cp_cmd)) subprocess.check_output(cp_cmd) # There has been some inconsistent behavior where docker cp @@ -141,7 +141,7 @@ def extract(self, image, source, dest, update=False): if os.path.exists(mainfile): existing_id = Utils.getAppId(mainfile) new_id = Utils.getAppId(tmpmainfile) - cockpit_logger.info("Loading app_id %s ." % new_id) + cockpit_logger.info("Loading app_id %s" % new_id) if existing_id != new_id: raise NuleculeException( "Existing app (%s) and requested app (%s) differ" % @@ -161,7 +161,7 @@ def extract(self, image, source, dest, update=False): # Clean up dummy container rm_cmd = [self.docker_cli, 'rm', '-f', container_id] - logger.debug('Removing Docker container: %s' % ' '.join(rm_cmd)) + logger.debug('Removing docker container: %s' % ' '.join(rm_cmd)) subprocess.check_output(rm_cmd) def is_image_present(self, image): From 73705a2130a1a481a59ab2d36835c5eb3cd7608e Mon Sep 17 00:00:00 2001 From: Charlie Drage Date: Tue, 24 May 2016 13:21:12 -0400 Subject: [PATCH 153/193] 0.5.2 Release --- CHANGELOG.md | 33 ++++++++++++++++++++++++++++++ Dockerfile | 2 +- Dockerfiles.git/Dockerfile.centos | 2 +- Dockerfiles.git/Dockerfile.debian | 2 +- Dockerfiles.git/Dockerfile.fedora | 2 +- Dockerfiles.pkgs/Dockerfile.centos | 2 +- Dockerfiles.pkgs/Dockerfile.fedora | 2 +- README.md | 2 +- atomicapp/constants.py | 2 +- setup.py | 2 +- 10 files changed, 42 insertions(+), 9 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4299aef0..648f56f1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,36 @@ +## Atomic App 0.5.2 (05-24-2016) + +This release of Atomic App we include binary generation, a logging refactor/clean-up as well as a minor code refactor to the main Nulecule codebase. + +The main features are: + - Add support for generating a static binary of Atomic App + - Logging clean-up + - README.md removed from `atomicapp init` generation + - Removal of dynamic module/class loading from providers in favour of static + +``` +Charlie Drage : + Ignore vim .swo files + Change provider-config warning to debug + Remove README.MD from init + Makes Makefile faster using .PHONY && add default python location + Modify TravisCI for updated Makefile + Removes loading via .py files + Hide container id output on container creation + Change dir of /external to /nulecule/external + Add binary generation + Update to a cleaner logging output. + Logging formatting + +Suraj Deshmukh : + Added elif to consecutive exclusive if statements + Use filter to search in kubeconfig.py + Now logs can show path to file under root atomicapp folder + Updated the inContainer function doc string + Removed unused function update from utils.py + Replaced a for loop that initialized a dict +``` + ## Atomic App 0.5.1 (04-26-2016) This is a minor release of Atomic App where we refactor, fix code bugs as well as deprecate an old feature. Due to the numerous issues of locking as well as the deprectation of the `lockfile` library we have remove the locking mechanism from Atomic App. diff --git a/Dockerfile b/Dockerfile index f58a6045..995142a0 100644 --- a/Dockerfile +++ b/Dockerfile @@ -2,7 +2,7 @@ FROM centos:7 MAINTAINER Red Hat, Inc. -ENV ATOMICAPPVERSION="0.5.1" +ENV ATOMICAPPVERSION="0.5.2" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ io.openshift.generate.job=true \ diff --git a/Dockerfiles.git/Dockerfile.centos b/Dockerfiles.git/Dockerfile.centos index f58a6045..995142a0 100644 --- a/Dockerfiles.git/Dockerfile.centos +++ b/Dockerfiles.git/Dockerfile.centos @@ -2,7 +2,7 @@ FROM centos:7 MAINTAINER Red Hat, Inc. -ENV ATOMICAPPVERSION="0.5.1" +ENV ATOMICAPPVERSION="0.5.2" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ io.openshift.generate.job=true \ diff --git a/Dockerfiles.git/Dockerfile.debian b/Dockerfiles.git/Dockerfile.debian index 662da76d..47bb7b7a 100644 --- a/Dockerfiles.git/Dockerfile.debian +++ b/Dockerfiles.git/Dockerfile.debian @@ -2,7 +2,7 @@ FROM debian:jessie MAINTAINER Red Hat, Inc. -ENV ATOMICAPPVERSION="0.5.1" +ENV ATOMICAPPVERSION="0.5.2" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ RUN="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e USER -e SUDO_USER -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} \${OPT2} run \${OPT3}" \ diff --git a/Dockerfiles.git/Dockerfile.fedora b/Dockerfiles.git/Dockerfile.fedora index f89e7aa0..cb3cd7cb 100644 --- a/Dockerfiles.git/Dockerfile.fedora +++ b/Dockerfiles.git/Dockerfile.fedora @@ -2,7 +2,7 @@ FROM fedora:23 MAINTAINER Red Hat, Inc. -ENV ATOMICAPPVERSION="0.5.1" +ENV ATOMICAPPVERSION="0.5.2" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ io.openshift.generate.job=true \ diff --git a/Dockerfiles.pkgs/Dockerfile.centos b/Dockerfiles.pkgs/Dockerfile.centos index aa7e5a87..671a80bc 100644 --- a/Dockerfiles.pkgs/Dockerfile.centos +++ b/Dockerfiles.pkgs/Dockerfile.centos @@ -4,7 +4,7 @@ MAINTAINER Red Hat, Inc. # Check https://bodhi.fedoraproject.org/updates/?packages=atomicapp # for the most recent builds of atomicapp in epel -ENV ATOMICAPPVERSION="0.5.1" +ENV ATOMICAPPVERSION="0.5.2" ENV TESTING="--enablerepo=epel-testing" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ diff --git a/Dockerfiles.pkgs/Dockerfile.fedora b/Dockerfiles.pkgs/Dockerfile.fedora index f3949150..25e3ab6e 100644 --- a/Dockerfiles.pkgs/Dockerfile.fedora +++ b/Dockerfiles.pkgs/Dockerfile.fedora @@ -4,7 +4,7 @@ MAINTAINER Red Hat, Inc. # Check https://bodhi.fedoraproject.org/updates/?packages=atomicapp # for the most recent builds of atomicapp in fedora -ENV ATOMICAPPVERSION="0.5.1" +ENV ATOMICAPPVERSION="0.5.2" ENV TESTING="--enablerepo=updates-testing" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ diff --git a/README.md b/README.md index 60f5a282..b156e9aa 100644 --- a/README.md +++ b/README.md @@ -25,7 +25,7 @@ _or_ Download a pre-signed .tar.gz from [download.projectatomic.io](https://download.projectatomic.io) / [GitHub](https://github.com/projectatomic/atomicapp/releases): ```sh -export RELEASE=0.5.1 +export RELEASE=0.5.2 wget https://github.com/projectatomic/atomicapp/releases/download/$RELEASE/atomicapp-$RELEASE.tar.gz tar -xvf atomicapp-$RELEASE.tar.gz && cd atomicapp-$RELEASE sudo make install diff --git a/atomicapp/constants.py b/atomicapp/constants.py index af0bcc12..f2144aa5 100644 --- a/atomicapp/constants.py +++ b/atomicapp/constants.py @@ -23,7 +23,7 @@ 2) LABEL io.projectatomic.nulecule.specversion in app Dockefile """ -__ATOMICAPPVERSION__ = '0.5.1' +__ATOMICAPPVERSION__ = '0.5.2' __NULECULESPECVERSION__ = '0.0.2' EXTERNAL_APP_DIR = "external" diff --git a/setup.py b/setup.py index c983fe9e..1e117d18 100644 --- a/setup.py +++ b/setup.py @@ -41,7 +41,7 @@ def _install_requirements(): setup( name='atomicapp', - version='0.5.1', + version='0.5.2', description='A tool to install and run Nulecule apps', author='Red Hat, Inc.', author_email='container-tools@redhat.com', From 0fc777ddf91a318450aa93aa7cb49d7a7ab22915 Mon Sep 17 00:00:00 2001 From: Charlie Drage Date: Fri, 27 May 2016 15:05:18 -0400 Subject: [PATCH 154/193] Ignore .swn vim temp files --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index 0c1aa680..b13b2f6e 100644 --- a/.gitignore +++ b/.gitignore @@ -71,3 +71,4 @@ TAGS *.swp *~ *.swo +*.swn From 39d11dd7903a8b92690093b4bca38c492c69916b Mon Sep 17 00:00:00 2001 From: Charlie Drage Date: Mon, 30 May 2016 09:12:14 -0400 Subject: [PATCH 155/193] Remove ASCII art --- atomicapp/cli/main.py | 15 +++------------ 1 file changed, 3 insertions(+), 12 deletions(-) diff --git a/atomicapp/cli/main.py b/atomicapp/cli/main.py index 5d7681af..01694608 100644 --- a/atomicapp/cli/main.py +++ b/atomicapp/cli/main.py @@ -448,18 +448,9 @@ def run(self): # Setup logging (now with arguments from cmdline) and log a few msgs Logging.setup_logging(args.verbose, args.quiet, args.logtype) - art = """ - _ _ _ _ - /_\| |_ ___ _ __ (_)__ /_\ _ __ _ __ - / _ \ _/ _ \ ' \| / _|/ _ \| '_ \ '_ \\ -/_/ \_\__\___/_|_|_|_\__/_/ \_\ .__/ .__/ - |_| |_| - """.splitlines() - logger.info("%s" % art[1]) - logger.info("%s Version: %s" % (art[2], __ATOMICAPPVERSION__)) - logger.info("%s Nulecule: %s" % (art[3], __NULECULESPECVERSION__)) - logger.info("%s Mode: %s" % (art[4], str(args.action).capitalize())) - logger.info("%s" % art[5]) + logger.info("Atomic App: %s - Mode: %s" + % (__ATOMICAPPVERSION__, + str(args.action).capitalize())) logger.debug("Final parsed cmdline: {}".format(' '.join(cmdline))) From 69875f6cf280f3a56fd0a86e75879ff9e3c46c7c Mon Sep 17 00:00:00 2001 From: Charlie Drage Date: Tue, 7 Jun 2016 10:23:12 -0400 Subject: [PATCH 156/193] Add a new 'library' for k8s/openshift providers. This is the first commit and first initialization into refactoring the OpenShift provider in order to create a library that is both compatible with OpenShift as well as Kubernetes. With this 'library', the *ONLY* thing that is passed in is an object of the .kube/config configuration. After passing the configuration, you may make API calls such as .create(object) and .delete(object). --- atomicapp/constants.py | 2 + atomicapp/plugin.py | 4 +- atomicapp/providers/kubernetes.py | 387 ++++++++++-------- atomicapp/providers/lib/kubeshift/__init__.py | 0 atomicapp/providers/lib/kubeshift/client.py | 57 +++ .../providers/lib/kubeshift/exceptions.py | 42 ++ atomicapp/providers/lib/kubeshift/kubebase.py | 341 +++++++++++++++ .../lib/{ => kubeshift}/kubeconfig.py | 64 +++ .../providers/lib/kubeshift/kubernetes.py | 171 ++++++++ atomicapp/providers/openshift.py | 2 +- tests/units/nulecule/test_kubeconfig.py | 2 +- .../providers/test_kubernetes_provider.py | 4 - 12 files changed, 889 insertions(+), 187 deletions(-) create mode 100644 atomicapp/providers/lib/kubeshift/__init__.py create mode 100644 atomicapp/providers/lib/kubeshift/client.py create mode 100644 atomicapp/providers/lib/kubeshift/exceptions.py create mode 100644 atomicapp/providers/lib/kubeshift/kubebase.py rename atomicapp/providers/lib/{ => kubeshift}/kubeconfig.py (66%) create mode 100644 atomicapp/providers/lib/kubeshift/kubernetes.py diff --git a/atomicapp/constants.py b/atomicapp/constants.py index f2144aa5..0ab748de 100644 --- a/atomicapp/constants.py +++ b/atomicapp/constants.py @@ -75,6 +75,8 @@ PROVIDER_TLS_VERIFY_KEY = "provider-tlsverify" PROVIDER_CA_KEY = "provider-cafile" +K8S_DEFAULT_API = "http://localhost:8080" + # Persistent Storage Formats PERSISTENT_STORAGE_FORMAT = ["ReadWriteOnce", "ReadOnlyMany", "ReadWriteMany"] diff --git a/atomicapp/plugin.py b/atomicapp/plugin.py index 7234c1f7..2b13b498 100644 --- a/atomicapp/plugin.py +++ b/atomicapp/plugin.py @@ -40,7 +40,9 @@ class Provider(object): dryrun = None container = False config_file = None - __artifacts = None + + # By default, no artifacts are loaded + __artifacts = [] @property def artifacts(self): diff --git a/atomicapp/providers/kubernetes.py b/atomicapp/providers/kubernetes.py index 5981ccfe..92c2ed2e 100644 --- a/atomicapp/providers/kubernetes.py +++ b/atomicapp/providers/kubernetes.py @@ -20,14 +20,21 @@ import anymarkup import logging import os -from string import Template -from atomicapp.constants import (LOGGER_COCKPIT, +from atomicapp.constants import (PROVIDER_AUTH_KEY, + ANSWERS_FILE, + DEFAULT_NAMESPACE, LOGGER_DEFAULT, - PERSISTENT_STORAGE_FORMAT) + PROVIDER_API_KEY, + PROVIDER_CA_KEY, + PROVIDER_TLS_VERIFY_KEY, + LOGGER_COCKPIT, + K8S_DEFAULT_API) from atomicapp.plugin import Provider, ProviderFailedException -from atomicapp.utils import Utils +from atomicapp.providers.lib.kubeshift.kubeconfig import KubeConfig +from atomicapp.providers.lib.kubeshift.client import Client +from atomicapp.utils import Utils cockpit_logger = logging.getLogger(LOGGER_COCKPIT) logger = logging.getLogger(LOGGER_DEFAULT) @@ -38,155 +45,227 @@ class KubernetesProvider(Provider): This class implements deploy, stop and undeploy of an atomicapp on Kubernetes provider. """ + + # Class variables key = "kubernetes" + namespace = DEFAULT_NAMESPACE + k8s_artifacts = {} + + # From the provider configuration config_file = None - kubectl = None - def init(self): - self.namespace = "default" + # Essential provider parameters + provider_api = None + provider_auth = None + provider_tls_verify = None + provider_ca = None - self.k8s_manifests = [] + def init(self): + self.k8s_artifacts = {} logger.debug("Given config: %s", self.config) if self.config.get("namespace"): self.namespace = self.config.get("namespace") logger.info("Using namespace %s", self.namespace) - if self.container: - self.kubectl = self._find_kubectl(Utils.getRoot()) - kube_conf_path = "/etc/kubernetes" - host_kube_conf_path = Utils.get_real_abspath(kube_conf_path) - if not os.path.exists(kube_conf_path) and os.path.exists(host_kube_conf_path): - if self.dryrun: - logger.info("DRY-RUN: link %s from %s" % (kube_conf_path, host_kube_conf_path)) - else: - os.symlink(host_kube_conf_path, kube_conf_path) + + self._process_artifacts() + + if self.dryrun: + return + + ''' + Config_file: + If a config_file has been provided, use the configuration + from the file and load the associated generated file. + If a config_file exists (--provider-config) use that. + + Params: + If any provider specific parameters have been provided, + load the configuration through the answers.conf file + + .kube/config: + If no config file or params are provided by user then try to find and + use a config file at the default location. + + no config at all: + If no .kube/config file can be found then try to connect to the default + unauthenticated http://localhost:8080/api end-point. + ''' + + default_config_loc = os.path.join( + Utils.getRoot(), Utils.getUserHome().strip('/'), '.kube/config') + + if self.config_file: + logger.debug("Provider configuration provided") + self.api = Client(KubeConfig.from_file(self.config_file), "kubernetes") + elif self._check_required_params(): + logger.debug("Generating .kube/config from given parameters") + self.api = Client(self._from_required_params(), "kubernetes") + elif os.path.isfile(default_config_loc): + logger.debug(".kube/config exists, using default configuration file") + self.api = Client(KubeConfig.from_file(default_config_loc), "kubernetes") else: - self.kubectl = self._find_kubectl() + self.config["provider-api"] = K8S_DEFAULT_API + self.api = Client(self._from_required_params(), "kubernetes") + + # Check if the namespace that the app is being deployed to is available + self._check_namespaces() + + def _build_param_dict(self): + # Initialize the values + paramdict = {PROVIDER_API_KEY: self.provider_api, + PROVIDER_AUTH_KEY: self.provider_auth, + PROVIDER_TLS_VERIFY_KEY: self.provider_tls_verify, + PROVIDER_CA_KEY: self.provider_ca} + + # Get values from the loaded answers.conf / passed CLI params + for k in paramdict.keys(): + paramdict[k] = self.config.get(k) + + return paramdict + + def _check_required_params(self, exception=False): + ''' + This checks to see if required parameters associated to the Kubernetes + provider are passed. Only PROVIDER_API_KEY is *required*. Token may be blank. + ''' + + paramdict = self._build_param_dict() + logger.debug("List of parameters passed: %s" % paramdict) + + # Check that the required parameters are passed. If not, error out. + for k in [PROVIDER_API_KEY]: + if paramdict[k] is None: + if exception: + msg = "You need to set %s in %s or pass it as a CLI param" % (k, ANSWERS_FILE) + raise ProviderFailedException(msg) + else: + return False + + return True + + def _from_required_params(self): + ''' + Create a default configuration from passed environment parameters. + ''' + + self._check_required_params(exception=True) + paramdict = self._build_param_dict() + + # Generate the configuration from the paramters + config = KubeConfig().from_params(api=paramdict[PROVIDER_API_KEY], + auth=paramdict[PROVIDER_AUTH_KEY], + ca=paramdict[PROVIDER_CA_KEY], + verify=paramdict[PROVIDER_TLS_VERIFY_KEY]) + return config + + def _check_namespaces(self): + ''' + This function checks to see whether or not the namespaces created in the cluster match the + namespace that is associated and/or provided in the deployed application + ''' + + # Get the namespaces and output the currently used ones + namespace_list = self.api.namespaces() + logger.debug("There are currently %s namespaces in the cluster." % str(len(namespace_list))) + + # Create a namespace list + namespaces = [] + for ns in namespace_list: + namespaces.append(ns["metadata"]["name"]) + + # Output the namespaces and check to see if the one provided exists + logger.debug("Namespaces: %s" % namespaces) + if self.namespace not in namespaces: + msg = "%s namespace does not exist. Please create the namespace and try again." % self.namespace + raise ProviderFailedException(msg) - if not self.dryrun: - if not os.access(self.kubectl, os.X_OK): - raise ProviderFailedException("Command: " + self.kubectl + " not found") + def _process_artifacts(self): + """ + Parse each Kubernetes file and convert said format into an Object for + deployment. + """ + for artifact in self.artifacts: + logger.debug("Processing artifact: %s", artifact) + data = None - # Check if Kubernetes config file is accessible, but only - # if one was provided by the user; config file is optional. - if self.config_file: - self.checkConfigFile() + # Open and parse the artifact data + with open(os.path.join(self.path, artifact), "r") as fp: + data = anymarkup.parse(fp, force_types=None) - def _find_kubectl(self, prefix=""): - """Determine the path to the kubectl program on the host. - 1) Check the config for a provider_cli in the general section - remember to add /host prefix - 2) Search /usr/bin:/usr/local/bin + # Process said artifacts + self._process_artifact_data(artifact, data) - Use the first valid value found + def _process_artifact_data(self, artifact, data): """ + Process the data for an artifact - if self.dryrun: - # Testing env does not have kubectl in it - return "/usr/bin/kubectl" + Args: + artifact (str): Artifact name + data (dict): Artifact data + """ - test_paths = ['/usr/bin/kubectl', '/usr/local/bin/kubectl'] - if self.config.get("provider_cli"): - logger.info("caller gave provider_cli: " + self.config.get("provider_cli")) - test_paths.insert(0, self.config.get("provider_cli")) + # Check if kind exists + if "kind" not in data.keys(): + raise ProviderFailedException( + "Error processing %s artifact. There is no kind" % artifact) - for path in test_paths: - test_path = prefix + path - logger.info("trying kubectl at " + test_path) - kubectl = test_path - if os.access(kubectl, os.X_OK): - logger.info("found kubectl at " + test_path) - return kubectl + # Change to lower case so it's easier to parse + kind = data["kind"].lower() - raise ProviderFailedException("No kubectl found in %s" % ":".join(test_paths)) + if kind not in self.k8s_artifacts.keys(): + self.k8s_artifacts[kind] = [] - def _call(self, cmd): - """Calls given command + # Fail if there is no metadata + if 'metadata' not in data: + raise ProviderFailedException( + "Error processing %s artifact. There is no metadata object" % artifact) - :arg cmd: Command to be called in a form of list - :raises: Exception - """ + # Change to the namespace specified on init() + data['metadata']['namespace'] = self.namespace - if self.dryrun: - logger.info("DRY-RUN: %s", " ".join(cmd)) + if 'labels' not in data['metadata']: + data['metadata']['labels'] = {'namespace': self.namespace} else: - ec, stdout, stderr = Utils.run_cmd(cmd, checkexitcode=True) - return stdout + data['metadata']['labels']['namespace'] = self.namespace - def process_k8s_artifacts(self): - """Processes Kubernetes manifests files and checks if manifest under - process is valid. + self.k8s_artifacts[kind].append(data) + + ''' + This is DEPRECATED and not needed anymore as we check the /resource URL of the kubernetes api against the artifact + def _identify_api(self, artifact, data): """ - for artifact in self.artifacts: - data = None - with open(os.path.join(self.path, artifact), "r") as fp: - logger.debug(os.path.join(self.path, artifact)) - try: - data = anymarkup.parse(fp) - except Exception: - msg = "Error processing %s artifcats, Error:" % os.path.join( - self.path, artifact) - cockpit_logger.error(msg) - raise - if "kind" in data: - self.k8s_manifests.append((data["kind"].lower(), artifact)) - else: - apath = os.path.join(self.path, artifact) - raise ProviderFailedException("Malformed kube file: %s" % apath) - - def _resource_identity(self, path): - """Finds the Kubernetes resource name / identity from resource manifest - and raises if manifest is not supported. - - :arg path: Absolute path to Kubernetes resource manifest - - :return: str -- Resource name / identity - - :raises: ProviderFailedException + Make sure that the artifact is using the correct API + + Args: + artifact (str): Artifact name + data (dict): Artifact data """ - data = anymarkup.parse_file(path) if data["apiVersion"] == "v1": - return data["metadata"]["name"] + pass elif data["apiVersion"] in ["v1beta3", "v1beta2", "v1beta1"]: - msg = ("%s is not supported API version, update Kubernetes " + msg = ("%s is not a supported API version, update Kubernetes " "artifacts to v1 API version. Error in processing " - "%s manifest." % (data["apiVersion"], path)) + "%s manifest." % (data["apiVersion"], artifact)) raise ProviderFailedException(msg) else: - raise ProviderFailedException("Malformed kube file: %s" % path) - - def _scale_replicas(self, path, replicas=0): - """Scales replicationController to specified replicas size - - :arg path: Path to replicationController manifest - :arg replicas: Replica size to scale to. - """ - rname = self._resource_identity(path) - cmd = [self.kubectl, "scale", "rc", rname, - "--replicas=%s" % str(replicas), - "--namespace=%s" % self.namespace] - if self.config_file: - cmd.append("--kubeconfig=%s" % self.config_file) - - self._call(cmd) + raise ProviderFailedException("Malformed kubernetes artifact: %s" % artifact) + ''' def run(self): - """Deploys the app by given resource manifests. + """ + Deploys the app by given resource artifacts. """ logger.info("Deploying to Kubernetes") - self.process_k8s_artifacts() - - for kind, artifact in self.k8s_manifests: - if not artifact: - continue - k8s_file = os.path.join(self.path, artifact) - - cmd = [self.kubectl, "create", "-f", k8s_file, "--namespace=%s" % self.namespace] - if self.config_file: - cmd.append("--kubeconfig=%s" % self.config_file) - self._call(cmd) + for kind, objects in self.k8s_artifacts.iteritems(): + for artifact in objects: + if self.dryrun: + logger.info("DRY-RUN: Deploying k8s KIND: %s, ARTIFACT: %s" + % (kind, artifact)) + else: + self.api.create(artifact, self.namespace) def stop(self): """Undeploys the app by given resource manifests. @@ -194,71 +273,19 @@ def stop(self): the resource from cluster. """ logger.info("Undeploying from Kubernetes") - self.process_k8s_artifacts() - - for kind, artifact in self.k8s_manifests: - if not artifact: - continue - path = os.path.join(self.path, artifact) - - if kind in ["ReplicationController", "rc", "replicationcontroller"]: - self._scale_replicas(path, replicas=0) - - cmd = [self.kubectl, "delete", "-f", path, "--namespace=%s" % self.namespace] - if self.config_file: - cmd.append("--kubeconfig=%s" % self.config_file) - self._call(cmd) + for kind, objects in self.k8s_artifacts.iteritems(): + for artifact in objects: + if self.dryrun: + logger.info("DRY-RUN: Deploying k8s KIND: %s, ARTIFACT: %s" + % (kind, artifact)) + else: + self.api.delete(artifact, self.namespace) + # TODO def persistent_storage(self, graph, action): - """ - Actions are either: run, stop or uninstall as per the Requirements class - Curently run is the only function implemented for k8s persistent storage - """ - - logger.debug("Persistent storage enabled! Running action: %s" % action) - - if graph["accessMode"] not in PERSISTENT_STORAGE_FORMAT: - raise ProviderFailedException("{} is an invalid storage format " - "(choose from {})" - .format(graph["accessMode"], - ', '.join(PERSISTENT_STORAGE_FORMAT))) - - if action not in ['run']: - logger.warning( - "%s action is not available for provider %s. Doing nothing." % - (action, self.key)) - return + pass - self._check_persistent_volumes() - - # Get the path of the persistent storage yaml file includes in /external - # Plug the information from the graph into the persistent storage file - base_path = os.path.dirname(os.path.realpath(__file__)) - template_path = os.path.join(base_path, - 'external/kubernetes/persistent_storage.yaml') - with open(template_path, 'r') as f: - content = f.read() - template = Template(content) - rendered_template = template.safe_substitute(graph) - - tmp_file = Utils.getTmpFile(rendered_template, '.yaml') - - # Pass the .yaml file and execute - if action is "run": - cmd = [self.kubectl, "create", "-f", tmp_file, "--namespace=%s" % self.namespace] - if self.config_file: - cmd.append("--kubeconfig=%s" % self.config_file) - self._call(cmd) - os.unlink(tmp_file) - - def _check_persistent_volumes(self): - cmd = [self.kubectl, "get", "pv"] - if self.config_file: - cmd.append("--kubeconfig=%s" % self.config_file) - lines = self._call(cmd) - - # If there are no persistent volumes to claim, warn the user - if not self.dryrun and len(lines.split("\n")) == 2: - logger.warning("No persistent volumes detected in Kubernetes. Volume claim will not " - "initialize unless persistent volumes exist.") + # TODO + def _check_persistent_volumes(self, graph, action): + pass diff --git a/atomicapp/providers/lib/kubeshift/__init__.py b/atomicapp/providers/lib/kubeshift/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/atomicapp/providers/lib/kubeshift/client.py b/atomicapp/providers/lib/kubeshift/client.py new file mode 100644 index 00000000..ff59b8b2 --- /dev/null +++ b/atomicapp/providers/lib/kubeshift/client.py @@ -0,0 +1,57 @@ +""" + Copyright 2014-2016 Red Hat, Inc. + + This file is part of Atomic App. + + Atomic App is free software: you can redistribute it and/or modify + it under the terms of the GNU Lesser General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + Atomic App is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with Atomic App. If not, see . +""" + +from atomicapp.providers.lib.kubeshift.kubernetes import KubeKubernetesClient +from atomicapp.providers.lib.kubeshift.exceptions import KubeClientError +from atomicapp.constants import LOGGER_DEFAULT +import logging +logger = logging.getLogger(LOGGER_DEFAULT) + + +class Client(object): + + def __init__(self, config, provider): + ''' + + Args: + config (obj): Object of the configuration data + provider (str): String value of the provider that is being used + + ''' + self.config = config + self.provider = provider + + # Choose the type of provider that is being used. Error out if it is not available + if provider is "kubernetes": + self.connection = KubeKubernetesClient(config) + logger.debug("Using Kubernetes Provider KubeClient library") + else: + raise KubeClientError("No provider by that name.") + + # Create an object using its respective API + def create(self, obj, namespace="default"): + self.connection.create(obj, namespace) + + # Delete an object using its respective API + def delete(self, obj, namespace="default"): + self.connection.delete(obj, namespace) + + # Current support: kubernetes only + def namespaces(self): + return self.connection.namespaces() diff --git a/atomicapp/providers/lib/kubeshift/exceptions.py b/atomicapp/providers/lib/kubeshift/exceptions.py new file mode 100644 index 00000000..eb00886b --- /dev/null +++ b/atomicapp/providers/lib/kubeshift/exceptions.py @@ -0,0 +1,42 @@ +""" + Copyright 2014-2016 Red Hat, Inc. + + This file is part of Atomic App. + + Atomic App is free software: you can redistribute it and/or modify + it under the terms of the GNU Lesser General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + Atomic App is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with Atomic App. If not, see . +""" + + +class KubeOpenshiftError(Exception): + pass + + +class KubeKubernetesError(Exception): + pass + + +class KubeConfigError(Exception): + pass + + +class KubeClientError(Exception): + pass + + +class KubeConnectionError(Exception): + pass + + +class KubeBaseError(Exception): + pass diff --git a/atomicapp/providers/lib/kubeshift/kubebase.py b/atomicapp/providers/lib/kubeshift/kubebase.py new file mode 100644 index 00000000..9e3aee35 --- /dev/null +++ b/atomicapp/providers/lib/kubeshift/kubebase.py @@ -0,0 +1,341 @@ +""" + Copyright 2014-2016 Red Hat, Inc. + + This file is part of Atomic App. + + Atomic App is free software: you can redistribute it and/or modify + it under the terms of the GNU Lesser General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + Atomic App is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with Atomic App. If not, see . +""" + +import requests +import websocket +import tempfile +import base64 +import ssl +from requests.exceptions import SSLError +from atomicapp.providers.lib.kubeshift.exceptions import (KubeBaseError, + KubeConnectionError) +from atomicapp.constants import LOGGER_DEFAULT +import logging +logger = logging.getLogger(LOGGER_DEFAULT) + + +class KubeBase(object): + + ''' + The role of Kube Base is to parse the Kube Config file and create an + understandable API as well as initiation of connection to + Kubernetes-based APIs (OpenShift/Kubernetes). + + ''' + cluster = None + user = None + token = None + client_certification = None + client_key = None + certificate_authority_data = None # Not yet implemented + certificate_authority = None + certificate_ca = None # Not yet implemented + insecure_skip_tls_verify = False + + def __init__(self, config): + ''' + Args: + config (object): An object of the .kube/config configuration + ''' + self.kubeconfig = config + + # Gather the "current-context" from .kube/config which lists what the + # associated cluster, user, token, etc. is being used. + if "current-context" not in config: + raise KubeBaseError("'current-context' needs to be set within .kube/config") + else: + self.current_context = config["current-context"] + + # Gather the context and cluster details of .kube/config based upon the current_context + kubeconfig_context = self._contexts()[self.current_context] + kubeconfig_cluster = kubeconfig_context['cluster'] + self.cluster = self._clusters()[kubeconfig_cluster] + + # Gather cluster information (certificate authority) + if "certificate-authority" in self.cluster: + self.certificate_authority = self.cluster["certificate-authority"] + + if "insecure-skip-tls-verify" in self.cluster: + self.insecure_skip_tls_verify = self.cluster["insecure-skip-tls-verify"] + + # If a 'user' is present, gather the information in order to retrieve the token(s), + # certificate(s) as well as client-key. A user is OPTIONAL in the .kube/config data + # and hence the if statement. + if "user" in kubeconfig_context: + + kubeconfig_user = kubeconfig_context['user'] + self.user = self._users()[kubeconfig_user] + + if "token" in self.user: + self.token = self.user['token'] + + if "client-certificate" in self.user: + self.client_certification = self.user['client-certificate'] + + if "client-key" in self.user: + self.client_key = self.user['client-key'] + + # Initialize the connection using all the .kube/config credentials + self.api = self._connection() + + def request(self, method, url, data=None): + ''' + Completes the request to the API and fails if the status_code is != 200/201 + + Args: + method (str): put/get/post/patch + url (str): url of the api call + data (object): object of the data that is being passed (will be converted to json) + ''' + status_code = None + return_data = None + + try: + res = self._request_method(method, url, data) + status_code = res.status_code + return_data = res.json() + except requests.exceptions.ConnectTimeout: + msg = "Timeout when connecting to %s" % url + raise KubeConnectionError(msg) + except requests.exceptions.ReadTimeout: + msg = "Timeout when reading from %s" % url + raise KubeConnectionError(msg) + except requests.exceptions.ConnectionError: + msg = "Refused connection to %s" % url + raise KubeConnectionError(msg) + except SSLError: + raise KubeConnectionError("SSL/TLS ERROR: invalid certificate") + except ValueError: + return_data = None + + # 200 = OK + # 201 = PENDING + # EVERYTHING ELSE == FAIL + if status_code is not 200 and status_code is not 201: + raise KubeConnectionError("Unable to complete request: Status: %s, Error: %s" + % (status_code, return_data)) + return return_data + + def websocket_request(self, url, outfile=None): + ''' + Due to the requests library not supporting SPDY, websocket(s) are required + to communicate to the API. + + Args: + url (str): URL of the API + outfile (str): path of the outfile/data. + ''' + url = 'wss://' + url.split('://', 1)[-1] + logger.debug('Converted http to wss url: {}'.format(url)) + results = [] + + ws = websocket.WebSocketApp( + url, + on_message=lambda ws, message: self._handle_exec_reply(ws, message, results, outfile)) + + ws.run_forever(sslopt={ + 'ca_certs': self.cert_ca if self.cert_ca is not None else ssl.CERT_NONE, + 'cert_reqs': ssl.CERT_REQUIRED if self.insecure_skip_tls_verify else ssl.CERT_NONE}) + + # If an outfile was not provided, return the results in its entirety + if not outfile: + return ''.join(results) + + def get_resources(self, url): + ''' + Get the resources available to the API. This is a list of all available + API calls that can be made to the API. + ''' + data = self.request("get", url) + resources = data["resources"] + resources = [res['name'] for res in resources] + return resources + + def test_connection(self, url): + self.api.request("get", url) + logger.debug("Connection successfully tested") + + @staticmethod + def cert_file(data, key): + ''' + Some certificate .kube/config components are required to be a filename. + + Returns either the filename or a tmp location of said data in a file. + All certificates used with Kubernetes are base64 encoded and thus need to be decoded + + Keys which have "-data" associated with the name are base64 encoded. All others are not. + ''' + + # If it starts with /, we assume it's a filename so we just return that. + if data.startswith('/'): + return data + + # If it's data, we assume it's a certificate and we decode and write to a tmp file + # If the base64 param has been passed as true, we decode the data. + else: + with tempfile.NamedTemporaryFile(delete=False) as f: + # If '-data' is included in the keyname, it's a base64 encoded string and + # is required to be decoded + if "-data" in key: + f.write(base64.b64decode(data)) + else: + f.write(data) + return f.name + + @staticmethod + def kind_to_resource_name(kind): + """ + Converts kind to resource name. It is same logics + as in k8s.io/k8s/pkg/api/meta/restmapper.go (func KindToResource) + Example: + Pod -> pods + Policy - > policies + BuildConfig - > buildconfigs + + Args: + kind (str): Kind of the object + + Returns: + Resource name (str) (kind in plural form) + """ + singular = kind.lower() + if singular.endswith("status"): + plural = singular + "es" + else: + if singular[-1] == "s": + plural = singular + elif singular[-1] == "y": + plural = singular.rstrip("y") + "ies" + else: + plural = singular + "s" + return plural + + def _contexts(self): + ''' + Parses the contexts and formats it in a name = object way. + ex. + 'foobar': { name: 'foobar', context: 'foo' } + ''' + contexts = {} + if "contexts" not in self.kubeconfig: + raise KubeBaseError("No contexts within the .kube/config file") + for f in self.kubeconfig["contexts"]: + contexts[f["name"]] = f["context"] + return contexts + + def _clusters(self): + ''' + Parses the clusters and formats it in a name = object way. + ex. + 'foobar': { name: 'foobar', cluster: 'foo' } + ''' + clusters = {} + if "clusters" not in self.kubeconfig: + raise KubeBaseError("No clusters within the .kube/config file") + for f in self.kubeconfig["clusters"]: + clusters[f["name"]] = f["cluster"] + return clusters + + def _users(self): + ''' + Parses the users and formats it in a name = object way. + ex. + 'foobar': { name: 'foobar', user: 'foo' } + ''' + users = {} + if "users" not in self.kubeconfig: + raise KubeBaseError("No users within the .kube/config file") + for f in self.kubeconfig["users"]: + users[f["name"]] = f["user"] + return users + + def _connection(self): + ''' + Initializes the required requests session certs / token / authentication + in order to communicate with the API + ''' + connection = requests.Session() + + # CA Certificate for TLS verification + if self.certificate_authority: + connection.verify = self.cert_file( + self.certificate_authority, + "certificate-authority") + + # Check to see if verification has been disabled, if it has + # disable tls-verification + if self.insecure_skip_tls_verify: + connection.verify = False + # Disable the 'InsecureRequestWarning' notifications. + # As per: https://github.com/kennethreitz/requests/issues/2214 + # Instead make a large one-time noticable warning instead + requests.packages.urllib3.disable_warnings() + logger.warning("CAUTION: TLS verification has been DISABLED") + else: + logger.debug("Verification will be required for all API calls") + + # If we're using a token, use it, otherwise it's assumed the user uses + # client-certificate and client-key + if self.token: + connection.headers["Authorization"] = "Bearer %s" % self.token + + # Lastly, if we have client-certificate and client-key in the .kube/config + # we add them to the connection as a cert + if self.client_certification and self.client_key: + connection.cert = ( + self.cert_file(self.client_certification, "client-certificate"), + self.cert_file(self.client_key, "client-key") + ) + + return connection + + def _handle_ws_reply(self, ws, message, results, outfile=None): + """ + Handle websocket reply messages for each exec call + """ + # FIXME: For some reason, we do not know why, we need to ignore the + # 1st char of the message, to generate a meaningful result + cleaned_msg = message[1:] + if outfile: + with open(outfile, 'ab') as f: + f.write(cleaned_msg) + else: + results.append(cleaned_msg) + + def _request_method(self, method, url, data): + ''' + Converts the method to the most appropriate request and calls it. + + Args: + method (str): put/get/post/patch + url (str): url of the api call + data (object): object of the data that is being passed (will be converted to json) + ''' + if method.lower() == "get": + res = self.api.get(url) + elif method.lower() == "post": + res = self.api.post(url, json=data) + elif method.lower() == "put": + res = self.api.put(url, json=data) + elif method.lower() == "delete": + res = self.api.delete(url, json=data) + elif method.lower() == "patch": + headers = {"Content-Type": "application/json-patch+json"} + res = self.api.patch(url, json=data, headers=headers) + return res diff --git a/atomicapp/providers/lib/kubeconfig.py b/atomicapp/providers/lib/kubeshift/kubeconfig.py similarity index 66% rename from atomicapp/providers/lib/kubeconfig.py rename to atomicapp/providers/lib/kubeshift/kubeconfig.py index a61c7f08..778d00d8 100644 --- a/atomicapp/providers/lib/kubeconfig.py +++ b/atomicapp/providers/lib/kubeshift/kubeconfig.py @@ -13,6 +13,70 @@ class KubeConfig(object): + @staticmethod + def from_file(filename): + ''' + Load a file using anymarkup + + Params: + filename (str): File location + ''' + + return anymarkup.parse_file(filename) + + @staticmethod + def from_params(api=None, auth=None, ca=None, verify=True): + ''' + Creates a .kube/config configuration as an + object based upon the arguments given. + + Params: + api(str): API URL of the server + auth(str): Authentication key for the server + ca(str): The certificate being used. This can be either a file location or a base64 encoded string + verify(bool): true/false of whether or not certificate verification is enabled + + Returns: + config(obj): An object file of generate .kube/config + + ''' + config = { + "clusters": [ + { + "name": "self", + "cluster": { + }, + }, + ], + "users": [ + { + "name": "self", + "user": { + "token": "" + }, + }, + ], + "contexts": [ + { + "name": "self", + "context": { + "cluster": "self", + "user": "self", + }, + } + ], + "current-context": "self", + } + if api: + config['clusters'][0]['cluster']['server'] = api + + if auth: + config['users'][0]['user']['token'] = auth + + if ca: + config['clusters'][0]['cluster']['certificate-authority'] = ca + return config + @staticmethod def parse_kubeconf(filename): """" diff --git a/atomicapp/providers/lib/kubeshift/kubernetes.py b/atomicapp/providers/lib/kubeshift/kubernetes.py new file mode 100644 index 00000000..7711edc1 --- /dev/null +++ b/atomicapp/providers/lib/kubeshift/kubernetes.py @@ -0,0 +1,171 @@ +""" + Copyright 2014-2016 Red Hat, Inc. + + This file is part of Atomic App. + + Atomic App is free software: you can redistribute it and/or modify + it under the terms of the GNU Lesser General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + Atomic App is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with Atomic App. If not, see . +""" + +from urlparse import urljoin +from urllib import urlencode +from atomicapp.providers.lib.kubeshift.kubebase import KubeBase +from atomicapp.providers.lib.kubeshift.exceptions import (KubeKubernetesError) +from atomicapp.constants import LOGGER_DEFAULT +import logging +import re +logger = logging.getLogger(LOGGER_DEFAULT) + + +class KubeKubernetesClient(object): + + def __init__(self, config): + ''' + + Args: + config (obj): Object of the configuration data + + ''' + + # Pass in the configuration data (.kube/config object) to the KubeBase + self.api = KubeBase(config) + + # Check the API url + url = self.api.cluster['server'] + if not re.match('(?:http|https)://', url): + raise KubeKubernetesError("Kubernetes API URL does not include HTTP or HTTPS") + + # Gather what end-points we will be using + self.k8s_api = urljoin(url, "api/v1/") + + # Test the connection before proceeding + self.api.test_connection(self.k8s_api) + + # Gather the resource names which will be used for the 'kind' API calls + self.k8s_api_resources = self.api.get_resources(self.k8s_api) + + def create(self, obj, namespace): + ''' + Create an object from the Kubernetes cluster + ''' + name = self._get_metadata_name(obj) + kind, url = self._generate_kurl(obj, namespace) + self.api.request("post", url, data=obj) + logger.info("%s '%s' successfully created" % (kind.capitalize(), name)) + + def delete(self, obj, namespace): + ''' + Delete an object from the Kubernetes cluster + + Args: + obj (object): Object of the artifact being modified + namesapce (str): Namespace of the kubernetes cluster to be used + replicates (int): Default 0, size of the amount of replicas to scale + + *Note* + Replication controllers must scale to 0 in order to delete pods. + Kubernetes 1.3 will implement server-side cascading deletion, but + until then, it's mandatory to scale to 0 + https://github.com/kubernetes/kubernetes/blob/master/docs/proposals/garbage-collection.md + + ''' + name = self._get_metadata_name(obj) + kind, url = self._generate_kurl(obj, namespace, name) + + if kind in ['rcs', 'replicationcontrollers']: + self.scale(obj, namespace) + + self.api.request("delete", url) + logger.info("%s '%s' successfully deleted" % (kind.capitalize(), name)) + + def scale(self, obj, namespace, replicas=0): + ''' + By default we scale back down to 0. This function takes an object and scales said + object down to a specified value on the Kubernetes cluster + + Args: + obj (object): Object of the artifact being modified + namesapce (str): Namespace of the kubernetes cluster to be used + replicates (int): Default 0, size of the amount of replicas to scale + ''' + patch = [{"op": "replace", + "path": "/spec/replicas", + "value": replicas}] + name = self._get_metadata_name(obj) + _, url = self._generate_kurl(obj, namespace, name) + self.api.request("patch", url, data=patch) + logger.info("'%s' successfully scaled to %s" % (name, replicas)) + + def namespaces(self): + ''' + Gathers a list of namespaces on the Kubernetes cluster + ''' + url = urljoin(self.k8s_api, "namespaces") + ns = self.api.request("get", url) + return ns['items'] + + def _generate_kurl(self, obj, namespace, name=None, params=None): + ''' + Generate the required URL by extracting the 'kind' from the + object as well as the namespace. + + Args: + obj (obj): Object of the data being passed + namespace (str): k8s namespace + name (str): Name of the object being passed + params (arr): Extra params passed such as timeout=300 + + Returns: + kind (str): The kind used + url (str): The URL to be used / artifact URL + ''' + if 'kind' not in obj.keys(): + raise KubeKubernetesError("Error processing object. There is no kind") + + kind = obj['kind'] + + resource = KubeBase.kind_to_resource_name(kind) + + if resource in self.k8s_api_resources: + url = self.k8s_api + else: + raise KubeKubernetesError("No kind by that name: %s" % kind) + + url = urljoin(url, "namespaces/%s/%s/" % (namespace, resource)) + + if name: + url = urljoin(url, name) + + if params: + url = urljoin(url, "?%s" % urlencode(params)) + + return (resource, url) + + def _get_metadata_name(self, obj): + ''' + This looks at the object and grabs the metadata name of said object + + Args: + obj (object): Object file of the artifact + + Returns: + name (str): Returns the metadata name of the object + ''' + if "metadata" in obj and \ + "name" in obj["metadata"]: + name = obj["metadata"]["name"] + else: + raise KubeKubernetesError("Cannot undeploy. There is no" + " name in object metadata " + "object=%s" % obj) + return name diff --git a/atomicapp/providers/openshift.py b/atomicapp/providers/openshift.py index 5683ca25..aac11d38 100644 --- a/atomicapp/providers/openshift.py +++ b/atomicapp/providers/openshift.py @@ -39,7 +39,7 @@ PROVIDER_TLS_VERIFY_KEY, PROVIDER_CA_KEY, OPENSHIFT_POD_CA_FILE) -from atomicapp.providers.lib.kubeconfig import KubeConfig +from atomicapp.providers.lib.kubeshift.kubeconfig import KubeConfig from requests.exceptions import SSLError import logging logger = logging.getLogger(LOGGER_DEFAULT) diff --git a/tests/units/nulecule/test_kubeconfig.py b/tests/units/nulecule/test_kubeconfig.py index 974b694e..a0f13601 100644 --- a/tests/units/nulecule/test_kubeconfig.py +++ b/tests/units/nulecule/test_kubeconfig.py @@ -1,6 +1,6 @@ import unittest from atomicapp.plugin import ProviderFailedException -from atomicapp.providers.lib.kubeconfig import KubeConfig +from atomicapp.providers.lib.kubeshift.kubeconfig import KubeConfig class TestKubeConfParsing(unittest.TestCase): diff --git a/tests/units/providers/test_kubernetes_provider.py b/tests/units/providers/test_kubernetes_provider.py index 6afbc323..651ceceb 100644 --- a/tests/units/providers/test_kubernetes_provider.py +++ b/tests/units/providers/test_kubernetes_provider.py @@ -28,9 +28,6 @@ MOCK_CONTENT = "mock_provider_call_content" -def mock_provider_call(self, cmd): - return MOCK_CONTENT - class TestKubernetesProviderBase(unittest.TestCase): # Create a temporary directory for our setup as well as load the required providers @@ -53,7 +50,6 @@ def prepare_provider(self, data): return provider # Check that the provider configuration file exists - @mock.patch.object(KubernetesProvider, '_call', mock_provider_call) def test_provider_config_exist(self): provider_config_path = self.create_temp_file() mock_content = "%s_%s" % (MOCK_CONTENT, "_unchanged") From f80ecaf3ea10195ba973de5c71fc73536c05dbc8 Mon Sep 17 00:00:00 2001 From: Khris Richardson Date: Fri, 10 Jun 2016 15:45:11 -0700 Subject: [PATCH 157/193] add support for api-groups --- atomicapp/providers/lib/kubeshift/kubebase.py | 13 ++++++++++-- .../providers/lib/kubeshift/kubernetes.py | 21 ++++++++++++++++++- 2 files changed, 31 insertions(+), 3 deletions(-) diff --git a/atomicapp/providers/lib/kubeshift/kubebase.py b/atomicapp/providers/lib/kubeshift/kubebase.py index 9e3aee35..8235eb87 100644 --- a/atomicapp/providers/lib/kubeshift/kubebase.py +++ b/atomicapp/providers/lib/kubeshift/kubebase.py @@ -142,7 +142,7 @@ def websocket_request(self, url, outfile=None): outfile (str): path of the outfile/data. ''' url = 'wss://' + url.split('://', 1)[-1] - logger.debug('Converted http to wss url: {}'.format(url)) + logger.debug('Converted http to wss url: %s', url) results = [] ws = websocket.WebSocketApp( @@ -157,13 +157,22 @@ def websocket_request(self, url, outfile=None): if not outfile: return ''.join(results) + def get_groups(self, url): + ''' + Get the groups of APIs available. + ''' + data = self.request("get", url) + groups = data["groups"] or [] + groups = [(group['name'], [i['version'] for i in group['versions']]) for group in groups] + return groups + def get_resources(self, url): ''' Get the resources available to the API. This is a list of all available API calls that can be made to the API. ''' data = self.request("get", url) - resources = data["resources"] + resources = data["resources"] or [] resources = [res['name'] for res in resources] return resources diff --git a/atomicapp/providers/lib/kubeshift/kubernetes.py b/atomicapp/providers/lib/kubeshift/kubernetes.py index 7711edc1..ddf58e3b 100644 --- a/atomicapp/providers/lib/kubeshift/kubernetes.py +++ b/atomicapp/providers/lib/kubeshift/kubernetes.py @@ -54,6 +54,17 @@ def __init__(self, config): # Gather the resource names which will be used for the 'kind' API calls self.k8s_api_resources = self.api.get_resources(self.k8s_api) + # Gather what API groups are available + self.k8s_apis = urljoin(url, "apis/") + + # Gather the group names from which resource names will be derived + self.k8s_api_groups = self.api.get_groups(self.k8s_apis) + + for (name, versions) in self.k8s_api_groups: + for version in versions: + url = urljoin(self.k8s_apis, "%s/%s" % (name, version)) + self.k8s_api_resources += self.api.get_resources(url) + def create(self, obj, namespace): ''' Create an object from the Kubernetes cluster @@ -129,15 +140,23 @@ def _generate_kurl(self, obj, namespace, name=None, params=None): kind (str): The kind used url (str): The URL to be used / artifact URL ''' + if 'apiVersion' not in obj.keys(): + raise KubeKubernetesError("Error processing object. There is no apiVersion") + if 'kind' not in obj.keys(): raise KubeKubernetesError("Error processing object. There is no kind") + api_version = obj['apiVersion'] + kind = obj['kind'] resource = KubeBase.kind_to_resource_name(kind) if resource in self.k8s_api_resources: - url = self.k8s_api + if api_version == 'v1': + url = self.k8s_api + else: + url = urljoin(self.k8s_apis, "%s/" % api_version) else: raise KubeKubernetesError("No kind by that name: %s" % kind) From 2772fb4ac39ccf12f78ae001f7eb1a2d57c2853e Mon Sep 17 00:00:00 2001 From: Khris Richardson Date: Sun, 12 Jun 2016 10:07:00 -0700 Subject: [PATCH 158/193] add precision to resource membership test --- .../providers/lib/kubeshift/kubernetes.py | 27 +++++++++++-------- 1 file changed, 16 insertions(+), 11 deletions(-) diff --git a/atomicapp/providers/lib/kubeshift/kubernetes.py b/atomicapp/providers/lib/kubeshift/kubernetes.py index ddf58e3b..6364ce15 100644 --- a/atomicapp/providers/lib/kubeshift/kubernetes.py +++ b/atomicapp/providers/lib/kubeshift/kubernetes.py @@ -19,11 +19,13 @@ from urlparse import urljoin from urllib import urlencode -from atomicapp.providers.lib.kubeshift.kubebase import KubeBase -from atomicapp.providers.lib.kubeshift.exceptions import (KubeKubernetesError) -from atomicapp.constants import LOGGER_DEFAULT import logging import re + +from atomicapp.constants import LOGGER_DEFAULT +from atomicapp.providers.lib.kubeshift.kubebase import KubeBase +from atomicapp.providers.lib.kubeshift.exceptions import (KubeKubernetesError) + logger = logging.getLogger(LOGGER_DEFAULT) @@ -52,7 +54,8 @@ def __init__(self, config): self.api.test_connection(self.k8s_api) # Gather the resource names which will be used for the 'kind' API calls - self.k8s_api_resources = self.api.get_resources(self.k8s_api) + self.k8s_api_resources = {} + self.k8s_api_resources['v1'] = self.api.get_resources(self.k8s_api) # Gather what API groups are available self.k8s_apis = urljoin(url, "apis/") @@ -62,8 +65,9 @@ def __init__(self, config): for (name, versions) in self.k8s_api_groups: for version in versions: - url = urljoin(self.k8s_apis, "%s/%s" % (name, version)) - self.k8s_api_resources += self.api.get_resources(url) + api = "%s/%s" % (name, version) + url = urljoin(self.k8s_apis, api) + self.k8s_api_resources[api] = self.api.get_resources(url) def create(self, obj, namespace): ''' @@ -72,7 +76,7 @@ def create(self, obj, namespace): name = self._get_metadata_name(obj) kind, url = self._generate_kurl(obj, namespace) self.api.request("post", url, data=obj) - logger.info("%s '%s' successfully created" % (kind.capitalize(), name)) + logger.info("%s '%s' successfully created", kind.capitalize(), name) def delete(self, obj, namespace): ''' @@ -97,7 +101,7 @@ def delete(self, obj, namespace): self.scale(obj, namespace) self.api.request("delete", url) - logger.info("%s '%s' successfully deleted" % (kind.capitalize(), name)) + logger.info("%s '%s' successfully deleted", kind.capitalize(), name) def scale(self, obj, namespace, replicas=0): ''' @@ -115,7 +119,7 @@ def scale(self, obj, namespace, replicas=0): name = self._get_metadata_name(obj) _, url = self._generate_kurl(obj, namespace, name) self.api.request("patch", url, data=patch) - logger.info("'%s' successfully scaled to %s" % (name, replicas)) + logger.info("'%s' successfully scaled to %s", name, replicas) def namespaces(self): ''' @@ -152,7 +156,7 @@ def _generate_kurl(self, obj, namespace, name=None, params=None): resource = KubeBase.kind_to_resource_name(kind) - if resource in self.k8s_api_resources: + if resource in self.k8s_api_resources[api_version]: if api_version == 'v1': url = self.k8s_api else: @@ -170,7 +174,8 @@ def _generate_kurl(self, obj, namespace, name=None, params=None): return (resource, url) - def _get_metadata_name(self, obj): + @staticmethod + def _get_metadata_name(obj): ''' This looks at the object and grabs the metadata name of said object From 57aa4c82b29b41280c208b6e2558dc1393954361 Mon Sep 17 00:00:00 2001 From: Charlie Drage Date: Tue, 14 Jun 2016 09:11:56 -0400 Subject: [PATCH 159/193] Fix link issue on CLI doc within start guide --- docs/start_guide.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/start_guide.md b/docs/start_guide.md index 4a816708..2b01c6d0 100644 --- a/docs/start_guide.md +++ b/docs/start_guide.md @@ -22,7 +22,7 @@ __atomicapp genanswers__: By examing the `Nulecule` file. Atomic App will genera ex. `atomicapp genanswers ./myappdir` -For more detailed information as well as a list of all parameters, use `atomicapp --help` on the command line. Alternatively, you can read our [CLI doc](docs/cli.md). +For more detailed information as well as a list of all parameters, use `atomicapp --help` on the command line. Alternatively, you can read our [CLI doc](/docs/cli.md). ## Atomic App on Project Atomic hosts From ae20800f8d63d064fc50b1c456bd333c1b86afe7 Mon Sep 17 00:00:00 2001 From: Charlie Drage Date: Tue, 14 Jun 2016 15:26:23 -0400 Subject: [PATCH 160/193] 0.6.0 Release --- CHANGELOG.md | 19 +++++++++++++++++++ Dockerfile | 2 +- Dockerfiles.git/Dockerfile.centos | 2 +- Dockerfiles.git/Dockerfile.debian | 2 +- Dockerfiles.git/Dockerfile.fedora | 2 +- Dockerfiles.pkgs/Dockerfile.centos | 2 +- Dockerfiles.pkgs/Dockerfile.fedora | 2 +- README.md | 2 +- atomicapp/constants.py | 2 +- setup.py | 2 +- 10 files changed, 28 insertions(+), 9 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 648f56f1..d15265c8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,22 @@ +## Atomic App 0.6.0 (06-14-2016) + +A major release of Atomic App, we incorporate major changes to the **kubernetes** provider. With this release we replace the usage of kubectl with the *requests* Python library and the Kubernetes HTTP API end-point. This change results in faster deployment, smaller image sizes and detailed error messages. + +The main features of this release are: + - Kubectl to API conversion for the Kubernetes provider + - Removal of ASCII art + +``` +Charlie Drage (3): + Ignore .swn vim temp files + Remove ASCII art + Add a new 'library' for k8s/openshift providers. + +Khris Richardson (2): + add support for api-groups + add precision to resource membership test +``` + ## Atomic App 0.5.2 (05-24-2016) This release of Atomic App we include binary generation, a logging refactor/clean-up as well as a minor code refactor to the main Nulecule codebase. diff --git a/Dockerfile b/Dockerfile index 995142a0..7747e81d 100644 --- a/Dockerfile +++ b/Dockerfile @@ -2,7 +2,7 @@ FROM centos:7 MAINTAINER Red Hat, Inc. -ENV ATOMICAPPVERSION="0.5.2" +ENV ATOMICAPPVERSION="0.6.0" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ io.openshift.generate.job=true \ diff --git a/Dockerfiles.git/Dockerfile.centos b/Dockerfiles.git/Dockerfile.centos index 995142a0..7747e81d 100644 --- a/Dockerfiles.git/Dockerfile.centos +++ b/Dockerfiles.git/Dockerfile.centos @@ -2,7 +2,7 @@ FROM centos:7 MAINTAINER Red Hat, Inc. -ENV ATOMICAPPVERSION="0.5.2" +ENV ATOMICAPPVERSION="0.6.0" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ io.openshift.generate.job=true \ diff --git a/Dockerfiles.git/Dockerfile.debian b/Dockerfiles.git/Dockerfile.debian index 47bb7b7a..a3be883d 100644 --- a/Dockerfiles.git/Dockerfile.debian +++ b/Dockerfiles.git/Dockerfile.debian @@ -2,7 +2,7 @@ FROM debian:jessie MAINTAINER Red Hat, Inc. -ENV ATOMICAPPVERSION="0.5.2" +ENV ATOMICAPPVERSION="0.6.0" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ RUN="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e USER -e SUDO_USER -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} \${OPT2} run \${OPT3}" \ diff --git a/Dockerfiles.git/Dockerfile.fedora b/Dockerfiles.git/Dockerfile.fedora index cb3cd7cb..a3a9eeeb 100644 --- a/Dockerfiles.git/Dockerfile.fedora +++ b/Dockerfiles.git/Dockerfile.fedora @@ -2,7 +2,7 @@ FROM fedora:23 MAINTAINER Red Hat, Inc. -ENV ATOMICAPPVERSION="0.5.2" +ENV ATOMICAPPVERSION="0.6.0" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ io.openshift.generate.job=true \ diff --git a/Dockerfiles.pkgs/Dockerfile.centos b/Dockerfiles.pkgs/Dockerfile.centos index 671a80bc..68dcfc47 100644 --- a/Dockerfiles.pkgs/Dockerfile.centos +++ b/Dockerfiles.pkgs/Dockerfile.centos @@ -4,7 +4,7 @@ MAINTAINER Red Hat, Inc. # Check https://bodhi.fedoraproject.org/updates/?packages=atomicapp # for the most recent builds of atomicapp in epel -ENV ATOMICAPPVERSION="0.5.2" +ENV ATOMICAPPVERSION="0.6.0" ENV TESTING="--enablerepo=epel-testing" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ diff --git a/Dockerfiles.pkgs/Dockerfile.fedora b/Dockerfiles.pkgs/Dockerfile.fedora index 25e3ab6e..a9719217 100644 --- a/Dockerfiles.pkgs/Dockerfile.fedora +++ b/Dockerfiles.pkgs/Dockerfile.fedora @@ -4,7 +4,7 @@ MAINTAINER Red Hat, Inc. # Check https://bodhi.fedoraproject.org/updates/?packages=atomicapp # for the most recent builds of atomicapp in fedora -ENV ATOMICAPPVERSION="0.5.2" +ENV ATOMICAPPVERSION="0.6.0" ENV TESTING="--enablerepo=updates-testing" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ diff --git a/README.md b/README.md index b156e9aa..ea2c5b9d 100644 --- a/README.md +++ b/README.md @@ -25,7 +25,7 @@ _or_ Download a pre-signed .tar.gz from [download.projectatomic.io](https://download.projectatomic.io) / [GitHub](https://github.com/projectatomic/atomicapp/releases): ```sh -export RELEASE=0.5.2 +export RELEASE=0.6.0 wget https://github.com/projectatomic/atomicapp/releases/download/$RELEASE/atomicapp-$RELEASE.tar.gz tar -xvf atomicapp-$RELEASE.tar.gz && cd atomicapp-$RELEASE sudo make install diff --git a/atomicapp/constants.py b/atomicapp/constants.py index 0ab748de..7879fd45 100644 --- a/atomicapp/constants.py +++ b/atomicapp/constants.py @@ -23,7 +23,7 @@ 2) LABEL io.projectatomic.nulecule.specversion in app Dockefile """ -__ATOMICAPPVERSION__ = '0.5.2' +__ATOMICAPPVERSION__ = '0.6.0' __NULECULESPECVERSION__ = '0.0.2' EXTERNAL_APP_DIR = "external" diff --git a/setup.py b/setup.py index 1e117d18..b797a8b7 100644 --- a/setup.py +++ b/setup.py @@ -41,7 +41,7 @@ def _install_requirements(): setup( name='atomicapp', - version='0.5.2', + version='0.6.0', description='A tool to install and run Nulecule apps', author='Red Hat, Inc.', author_email='container-tools@redhat.com', From e4207aac96c96d204197cdcc5329210e14f1725b Mon Sep 17 00:00:00 2001 From: Khris Richardson Date: Tue, 21 Jun 2016 06:47:53 -0700 Subject: [PATCH 161/193] more inclusive rules for adding es --- atomicapp/providers/lib/kubeshift/kubebase.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/atomicapp/providers/lib/kubeshift/kubebase.py b/atomicapp/providers/lib/kubeshift/kubebase.py index 8235eb87..2eee9f5f 100644 --- a/atomicapp/providers/lib/kubeshift/kubebase.py +++ b/atomicapp/providers/lib/kubeshift/kubebase.py @@ -224,7 +224,7 @@ def kind_to_resource_name(kind): Resource name (str) (kind in plural form) """ singular = kind.lower() - if singular.endswith("status"): + if singular.endswith(("s", "x", "z", "ch", "sh")): plural = singular + "es" else: if singular[-1] == "s": From dc10b96a4dbb4263487ee5b0a149d3f72584d807 Mon Sep 17 00:00:00 2001 From: Charlie Drage Date: Thu, 23 Jun 2016 10:44:59 -0400 Subject: [PATCH 162/193] Clean up parsing MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Cleans up parsing as to not include the --provider information in unneeded commands such as init. Ex. ``` ▶ atomicapp init asdfsadf --help usage: atomicapp init [-h] [-V] [-v] [-q] [--logtype {cockpit,color,nocolor,none}] [--destination DESTINATION] app_name positional arguments: app_name App name. optional arguments: -h, --help show this help message and exit -V, --version Show the version and exit. -v, --verbose Verbose output mode. -q, --quiet Quiet output mode. --logtype {cockpit,color,nocolor,none} Override the default logging output. The options are: nocolor: we will only log to stdout; color: log to stdout with color; cockpit: used with cockpit integration; none: atomicapp will disable any logging. If nothing is set and logging to file then 'nocolor' by default. If nothing is set and logging to tty then 'color' by default. --destination DESTINATION Path to the directory where the Atomic App is to be initialized. ``` --- atomicapp/cli/main.py | 55 ++++++++++++++++++++++++------------------- 1 file changed, 31 insertions(+), 24 deletions(-) diff --git a/atomicapp/cli/main.py b/atomicapp/cli/main.py index 01694608..3cd77b02 100644 --- a/atomicapp/cli/main.py +++ b/atomicapp/cli/main.py @@ -208,6 +208,23 @@ def create_parser(self): action="store_true", help="Quiet output mode.") globals_parser.add_argument( + "--logtype", + dest="logtype", + choices=['cockpit', 'color', 'nocolor', 'none'], + help=""" + Override the default logging output. The options are: + nocolor: we will only log to stdout; + color: log to stdout with color; + cockpit: used with cockpit integration; + none: atomicapp will disable any logging. + If nothing is set and logging to file then 'nocolor' by default. + If nothing is set and logging to tty then 'color' by default.""") + + # === DEPLOY PARSER === + # Create a 'deploy parser' that will include flags related to deploying + # and answers files + deploy_parser = argparse.ArgumentParser(add_help=False) + deploy_parser.add_argument( "--mode", dest="mode", default=None, @@ -219,7 +236,7 @@ def create_parser(self): in cases where a user is not using the Atomic App cli directly, but through another interface such as the Atomic CLI. EX: `atomic run --mode=genanswers`''')) - globals_parser.add_argument( + deploy_parser.add_argument( "--dry-run", dest="dryrun", default=False, @@ -227,17 +244,17 @@ def create_parser(self): help=( "Don't actually call provider. The commands that should be " "run will be logged but not run.")) - globals_parser.add_argument( + deploy_parser.add_argument( "--answers-format", dest="answers_format", default=ANSWERS_FILE_SAMPLE_FORMAT, choices=['ini', 'json', 'xml', 'yaml'], help="The format for the answers.conf.sample file. Default: %s" % ANSWERS_FILE_SAMPLE_FORMAT) - globals_parser.add_argument( + deploy_parser.add_argument( "--namespace", dest="namespace", help=('The namespace to use in the target provider')) - globals_parser.add_argument( + deploy_parser.add_argument( "--provider-tlsverify", dest="provider-tlsverify", action=TrueOrFalseAction, @@ -245,38 +262,26 @@ def create_parser(self): help=(''' Value for provider-tlsverify answers option. --providertlsverify=False to disable tls verification''')) - globals_parser.add_argument( + deploy_parser.add_argument( "--provider-config", dest="provider-config", help='Value for provider-config answers option.') - globals_parser.add_argument( + deploy_parser.add_argument( "--provider-cafile", dest="provider-cafile", help='Value for provider-cafile answers option.') - globals_parser.add_argument( + deploy_parser.add_argument( "--provider-api", dest="provider-api", help='Value for provider-api answers option.') - globals_parser.add_argument( + deploy_parser.add_argument( "--provider-auth", dest="provider-auth", help='Value for provider-auth answers option.') - globals_parser.add_argument( - "--logtype", - dest="logtype", - choices=['cockpit', 'color', 'nocolor', 'none'], - help=""" - Override the default logging output. The options are: - nocolor: we will only log to stdout; - color: log to stdout with color; - cockpit: used with cockpit integration; - none: atomicapp will disable any logging. - If nothing is set and logging to file then 'nocolor' by default. - If nothing is set and logging to tty then 'color' by default.""") # === "run" SUBPARSER === run_subparser = toplevel_subparsers.add_parser( - "run", parents=[globals_parser]) + "run", parents=[globals_parser, deploy_parser]) run_subparser.add_argument( "-a", "--answers", @@ -315,7 +320,7 @@ def create_parser(self): # === "fetch" SUBPARSER === fetch_subparser = toplevel_subparsers.add_parser( - "fetch", parents=[globals_parser]) + "fetch", parents=[globals_parser, deploy_parser]) fetch_subparser.add_argument( "-a", "--answers", @@ -353,7 +358,7 @@ def create_parser(self): # === "stop" SUBPARSER === stop_subparser = toplevel_subparsers.add_parser( - "stop", parents=[globals_parser]) + "stop", parents=[globals_parser, deploy_parser]) stop_subparser.add_argument( "--provider", dest="cli_provider", @@ -398,7 +403,9 @@ def create_parser(self): # suppress the usage message from being output from the # globals parser. globals_parser.usage = argparse.SUPPRESS + deploy_parser.usage = argparse.SUPPRESS toplevel_parser.epilog = globals_parser.format_help() + toplevel_parser.epilog = deploy_parser.format_help() # Return the toplevel parser return toplevel_parser @@ -438,7 +445,7 @@ def run(self): # NOTE: Also allow "mode" to override 'action' if specified args, _ = self.parser.parse_known_args(cmdline) cmdline.remove(args.action) # Remove 'action' from the cmdline - if args.mode: + if hasattr(args, 'mode') and args.mode: args.action = args.mode # Allow mode to override 'action' cmdline.insert(0, args.action) # Place 'action' at front From 098e1809195cfac7cbad921316a8e1ec316b98c8 Mon Sep 17 00:00:00 2001 From: Charlie Drage Date: Tue, 5 Jul 2016 09:12:48 -0400 Subject: [PATCH 163/193] False positive error out on docker pull If using pulling an already pulled image, subprocess.check_output errors out with a non 0 error code. By using subprocess.CalledProcessError that explicit checks to see if the command errors out, the false positive is removed. --- atomicapp/nulecule/container.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/atomicapp/nulecule/container.py b/atomicapp/nulecule/container.py index 9152c44f..d96caae8 100644 --- a/atomicapp/nulecule/container.py +++ b/atomicapp/nulecule/container.py @@ -85,8 +85,12 @@ def pull(self, image, update=False): if self.dryrun: logger.info("DRY-RUN: %s", pull_cmd) - elif subprocess.check_output(pull_cmd) != 0: - raise DockerException("Could not pull docker image: %s" % image) + return + + try: + subprocess.check_output(pull_cmd, stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as e: + raise DockerException("Could not pull docker image: %s.\n%s" % (image, e.output)) cockpit_logger.info('Skipping pulling docker image: %s' % image) From 64401cbe2d9f45d74036a6916c1c2278ac70cfb2 Mon Sep 17 00:00:00 2001 From: Charlie Drage Date: Wed, 6 Jul 2016 14:53:37 -0400 Subject: [PATCH 164/193] 0.6.1 Release --- CHANGELOG.md | 21 +++++++++++++++++++++ Dockerfile | 2 +- Dockerfiles.git/Dockerfile.centos | 2 +- Dockerfiles.git/Dockerfile.debian | 2 +- Dockerfiles.git/Dockerfile.fedora | 2 +- Dockerfiles.pkgs/Dockerfile.centos | 2 +- Dockerfiles.pkgs/Dockerfile.fedora | 2 +- README.md | 2 +- atomicapp/constants.py | 2 +- setup.py | 2 +- 10 files changed, 30 insertions(+), 9 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index d15265c8..33c3f09b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,24 @@ +## Atomic App 0.6.1 (07-06-2016) + +A minor release for Atomic App. + +With this release, we merge a few bug fixes in relation to our Kubernetes provider as well as clean up documentation. + +Features: + - Fix inclusive rules issue with Kubernetes + - Clean up CLI parsing documentation / help command + + +``` +Charlie Drage :: + Fix link issue on CLI doc within start guide + Clean up parsing + False positive error out on docker pull + +Khris Richardson : + more inclusive rules for adding es +``` + ## Atomic App 0.6.0 (06-14-2016) A major release of Atomic App, we incorporate major changes to the **kubernetes** provider. With this release we replace the usage of kubectl with the *requests* Python library and the Kubernetes HTTP API end-point. This change results in faster deployment, smaller image sizes and detailed error messages. diff --git a/Dockerfile b/Dockerfile index 7747e81d..76c35aaa 100644 --- a/Dockerfile +++ b/Dockerfile @@ -2,7 +2,7 @@ FROM centos:7 MAINTAINER Red Hat, Inc. -ENV ATOMICAPPVERSION="0.6.0" +ENV ATOMICAPPVERSION="0.6.1" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ io.openshift.generate.job=true \ diff --git a/Dockerfiles.git/Dockerfile.centos b/Dockerfiles.git/Dockerfile.centos index 7747e81d..76c35aaa 100644 --- a/Dockerfiles.git/Dockerfile.centos +++ b/Dockerfiles.git/Dockerfile.centos @@ -2,7 +2,7 @@ FROM centos:7 MAINTAINER Red Hat, Inc. -ENV ATOMICAPPVERSION="0.6.0" +ENV ATOMICAPPVERSION="0.6.1" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ io.openshift.generate.job=true \ diff --git a/Dockerfiles.git/Dockerfile.debian b/Dockerfiles.git/Dockerfile.debian index a3be883d..a45d0958 100644 --- a/Dockerfiles.git/Dockerfile.debian +++ b/Dockerfiles.git/Dockerfile.debian @@ -2,7 +2,7 @@ FROM debian:jessie MAINTAINER Red Hat, Inc. -ENV ATOMICAPPVERSION="0.6.0" +ENV ATOMICAPPVERSION="0.6.1" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ RUN="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e USER -e SUDO_USER -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} \${OPT2} run \${OPT3}" \ diff --git a/Dockerfiles.git/Dockerfile.fedora b/Dockerfiles.git/Dockerfile.fedora index a3a9eeeb..e57f7524 100644 --- a/Dockerfiles.git/Dockerfile.fedora +++ b/Dockerfiles.git/Dockerfile.fedora @@ -2,7 +2,7 @@ FROM fedora:23 MAINTAINER Red Hat, Inc. -ENV ATOMICAPPVERSION="0.6.0" +ENV ATOMICAPPVERSION="0.6.1" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ io.openshift.generate.job=true \ diff --git a/Dockerfiles.pkgs/Dockerfile.centos b/Dockerfiles.pkgs/Dockerfile.centos index 68dcfc47..50000e0a 100644 --- a/Dockerfiles.pkgs/Dockerfile.centos +++ b/Dockerfiles.pkgs/Dockerfile.centos @@ -4,7 +4,7 @@ MAINTAINER Red Hat, Inc. # Check https://bodhi.fedoraproject.org/updates/?packages=atomicapp # for the most recent builds of atomicapp in epel -ENV ATOMICAPPVERSION="0.6.0" +ENV ATOMICAPPVERSION="0.6.1" ENV TESTING="--enablerepo=epel-testing" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ diff --git a/Dockerfiles.pkgs/Dockerfile.fedora b/Dockerfiles.pkgs/Dockerfile.fedora index a9719217..1a874573 100644 --- a/Dockerfiles.pkgs/Dockerfile.fedora +++ b/Dockerfiles.pkgs/Dockerfile.fedora @@ -4,7 +4,7 @@ MAINTAINER Red Hat, Inc. # Check https://bodhi.fedoraproject.org/updates/?packages=atomicapp # for the most recent builds of atomicapp in fedora -ENV ATOMICAPPVERSION="0.6.0" +ENV ATOMICAPPVERSION="0.6.1" ENV TESTING="--enablerepo=updates-testing" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ diff --git a/README.md b/README.md index ea2c5b9d..645485a0 100644 --- a/README.md +++ b/README.md @@ -25,7 +25,7 @@ _or_ Download a pre-signed .tar.gz from [download.projectatomic.io](https://download.projectatomic.io) / [GitHub](https://github.com/projectatomic/atomicapp/releases): ```sh -export RELEASE=0.6.0 +export RELEASE=0.6.1 wget https://github.com/projectatomic/atomicapp/releases/download/$RELEASE/atomicapp-$RELEASE.tar.gz tar -xvf atomicapp-$RELEASE.tar.gz && cd atomicapp-$RELEASE sudo make install diff --git a/atomicapp/constants.py b/atomicapp/constants.py index 7879fd45..d2068a23 100644 --- a/atomicapp/constants.py +++ b/atomicapp/constants.py @@ -23,7 +23,7 @@ 2) LABEL io.projectatomic.nulecule.specversion in app Dockefile """ -__ATOMICAPPVERSION__ = '0.6.0' +__ATOMICAPPVERSION__ = '0.6.1' __NULECULESPECVERSION__ = '0.0.2' EXTERNAL_APP_DIR = "external" diff --git a/setup.py b/setup.py index b797a8b7..08aded2f 100644 --- a/setup.py +++ b/setup.py @@ -41,7 +41,7 @@ def _install_requirements(): setup( name='atomicapp', - version='0.6.0', + version='0.6.1', description='A tool to install and run Nulecule apps', author='Red Hat, Inc.', author_email='container-tools@redhat.com', From 25e2ba7811f0f79a7fef343b49bf4b4158e2aa22 Mon Sep 17 00:00:00 2001 From: Charlie Drage Date: Fri, 8 Jul 2016 16:39:41 -0400 Subject: [PATCH 165/193] Remove warning in regards to application-entity Due to updates to Docker, this no longer applies --- atomicapp/nulecule/container.py | 17 ++++------------- 1 file changed, 4 insertions(+), 13 deletions(-) diff --git a/atomicapp/nulecule/container.py b/atomicapp/nulecule/container.py index d96caae8..0a6d1531 100644 --- a/atomicapp/nulecule/container.py +++ b/atomicapp/nulecule/container.py @@ -21,8 +21,7 @@ import uuid import logging -from atomicapp.constants import (APP_ENT_PATH, - LOGGER_COCKPIT, +from atomicapp.constants import (LOGGER_COCKPIT, LOGGER_DEFAULT, MAIN_FILE) from atomicapp.utils import Utils @@ -129,19 +128,11 @@ def extract(self, image, source, dest, update=False): 'Copying data from docker container: %s' % ' '.join(cp_cmd)) subprocess.check_output(cp_cmd) - # There has been some inconsistent behavior where docker cp - # will either copy out the entire dir /APP_ENT_PATH/*files* or - # it will copy out just /*files* without APP_ENT_PATH. Detect - # that here and adjust accordingly. - src = os.path.join(tmpdir, APP_ENT_PATH) - if not os.path.exists(src): - src = tmpdir - # If the application already exists locally then need to # make sure the local app id is the same as the one requested # on the command line. mainfile = os.path.join(dest, MAIN_FILE) - tmpmainfile = os.path.join(src, MAIN_FILE) + tmpmainfile = os.path.join(tmpdir, MAIN_FILE) if os.path.exists(mainfile): existing_id = Utils.getAppId(mainfile) new_id = Utils.getAppId(tmpmainfile) @@ -158,8 +149,8 @@ def extract(self, image, source, dest, update=False): return # Copy files - logger.debug('Copying nulecule data from %s to %s' % (src, dest)) - Utils.copy_dir(src, dest, update) + logger.debug('Copying nulecule data from %s to %s' % (tmpdir, dest)) + Utils.copy_dir(tmpdir, dest, update) logger.debug('Removing tmp dir: %s' % tmpdir) Utils.rm_dir(tmpdir) From 6afe9b4253c4845851d8c669463ad0ea1ff610f2 Mon Sep 17 00:00:00 2001 From: Charlie Drage Date: Fri, 8 Jul 2016 16:40:22 -0400 Subject: [PATCH 166/193] Refactor extracting This commit refactors extracting and separates the function into two calls. Extracting the files from the Docker container and extracting the Nulecule-specific files. --- atomicapp/nulecule/base.py | 2 +- atomicapp/nulecule/container.py | 55 +++++++++++++++++++++++---------- 2 files changed, 40 insertions(+), 17 deletions(-) diff --git a/atomicapp/nulecule/base.py b/atomicapp/nulecule/base.py index 125d2bc9..990fb438 100644 --- a/atomicapp/nulecule/base.py +++ b/atomicapp/nulecule/base.py @@ -122,7 +122,7 @@ def unpack(cls, image, dest, config=None, namespace=GLOBAL_CONF, else: docker_handler = DockerHandler(dryrun=dryrun) docker_handler.pull(image) - docker_handler.extract(image, APP_ENT_PATH, dest, update) + docker_handler.extract_nulecule_data(image, APP_ENT_PATH, dest, update) cockpit_logger.info("All dependencies installed successfully.") return cls.load_from_path( dest, config=config, namespace=namespace, nodeps=nodeps, diff --git a/atomicapp/nulecule/container.py b/atomicapp/nulecule/container.py index 0a6d1531..93139e6e 100644 --- a/atomicapp/nulecule/container.py +++ b/atomicapp/nulecule/container.py @@ -93,41 +93,67 @@ def pull(self, image, update=False): cockpit_logger.info('Skipping pulling docker image: %s' % image) - def extract(self, image, source, dest, update=False): + def extract_files(self, image, source, dest): """ - Extracts content from a directory in a Docker image to specified + Extracts a directory/file in a Docker image to a specified destination. Args: image (str): Docker image name source (str): Source directory in Docker image to copy from dest (str): Path to destination directory on host - update (bool): Update destination directory if it exists when - True Returns: None """ logger.info( - 'Extracting Nulecule data from image %s to %s' % (image, dest)) + 'Copying files from image %s:%s to %s' % (image, source, dest)) if self.dryrun: return - # Create dummy container + # Create a dummy container in order to retrieve the file(s) run_cmd = [ self.docker_cli, 'create', '--entrypoint', '/bin/true', image] logger.debug('Creating docker container: %s' % ' '.join(run_cmd)) container_id = subprocess.check_output(run_cmd).strip() - # Copy files out of dummy container to tmpdir - tmpdir = '/tmp/nulecule-{}'.format(uuid.uuid1()) + # Copy files out of dummy container to the destination directory cp_cmd = [self.docker_cli, 'cp', - '%s:/%s' % (container_id, source), - tmpdir] + '%s:/%s' % (container_id, source), dest] logger.debug( 'Copying data from docker container: %s' % ' '.join(cp_cmd)) subprocess.check_output(cp_cmd) + # Clean up dummy container + rm_cmd = [self.docker_cli, 'rm', '-f', container_id] + logger.debug('Removing docker container: %s' % ' '.join(rm_cmd)) + subprocess.check_output(rm_cmd) + + def extract_nulecule_data(self, image, source, dest, update=False): + """ + Extract the Nulecule contents from a container into a destination + directory. + + Args: + image (str): Docker image name + source (str): Source directory in Docker image to copy from + dest (str): Path to destination directory on host + update (bool): Update destination directory if it exists when + True + + Returns: + None + """ + logger.info( + 'Extracting Nulecule data from image %s to %s' % (image, dest)) + if self.dryrun: + return + + # Create a temporary directory for extraction + tmpdir = '/tmp/nulecule-{}'.format(uuid.uuid1()) + + self.extract_files(image, source=source, dest=tmpdir) + # If the application already exists locally then need to # make sure the local app id is the same as the one requested # on the command line. @@ -148,17 +174,14 @@ def extract(self, image, source, dest, update=False): logger.info("App exists locally and no update requested") return - # Copy files + # Copy files from tmpdir into place logger.debug('Copying nulecule data from %s to %s' % (tmpdir, dest)) Utils.copy_dir(tmpdir, dest, update) + + # Clean up tmpdir logger.debug('Removing tmp dir: %s' % tmpdir) Utils.rm_dir(tmpdir) - # Clean up dummy container - rm_cmd = [self.docker_cli, 'rm', '-f', container_id] - logger.debug('Removing docker container: %s' % ' '.join(rm_cmd)) - subprocess.check_output(rm_cmd) - def is_image_present(self, image): """ Check if a Docker image is present in the host. From 8f4b8ec81f9cfc00ef59aa95dfdc1c63ca169082 Mon Sep 17 00:00:00 2001 From: Abhishek Date: Sun, 10 Jul 2016 19:02:25 +0530 Subject: [PATCH 167/193] incorrect_log_level --- atomicapp/providers/docker.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/atomicapp/providers/docker.py b/atomicapp/providers/docker.py index 43341e26..126d5816 100644 --- a/atomicapp/providers/docker.py +++ b/atomicapp/providers/docker.py @@ -97,7 +97,7 @@ def run(self): # If --name is provided, do not re-name due to potential linking of containers. Warn user instead. # Else use namespace provided within answers.conf if '--name' in run_args: - logger.info("WARNING: Using --name provided within artifact file.") + logger.warning("WARNING: Using --name provided within artifact file.") else: run_args.insert(run_args.index('run') + 1, "--name=%s_%s_%s" % (self.namespace, self.image, Utils.getUniqueUUID())) From 040d5cbdbf6bef0202f06748df38624b358bd3c6 Mon Sep 17 00:00:00 2001 From: Charlie Drage Date: Mon, 11 Jul 2016 15:30:27 -0400 Subject: [PATCH 168/193] Fixes connecting issues with OpenShift Due to https://github.com/openshift/origin/commit/b7b6c01bc7d7c7592cce263da07a727e3758bd4b which prevents the usage of the token via URL within OpenShift, the token MUST be passed via the headers 'Authorization' parameter on HTTP requests. This commit adds the bearer token in order to fix an issue connecting to OpenShift Origin 1.3 --- atomicapp/providers/openshift.py | 26 ++++++++++++++++++-------- atomicapp/utils.py | 12 ++++++------ 2 files changed, 24 insertions(+), 14 deletions(-) diff --git a/atomicapp/providers/openshift.py b/atomicapp/providers/openshift.py index aac11d38..bac6f3a4 100644 --- a/atomicapp/providers/openshift.py +++ b/atomicapp/providers/openshift.py @@ -78,7 +78,8 @@ def test_connection(self): (status_code, return_data) = \ Utils.make_rest_request("get", self.openshift_api, - verify=self._requests_tls_verify()) + verify=self._requests_tls_verify(), + headers={'Authorization': "Bearer %s" % self.access_token}) except SSLError as e: if self.provider_tls_verify: msg = "SSL/TLS ERROR: invalid certificate. " \ @@ -98,7 +99,8 @@ def get_oapi_resources(self): (status_code, return_data) = \ Utils.make_rest_request("get", self.openshift_api, - verify=self._requests_tls_verify()) + verify=self._requests_tls_verify(), + headers={'Authorization': "Bearer %s" % self.access_token}) if status_code == 200: oapi_resources = return_data["resources"] else: @@ -119,7 +121,8 @@ def get_kapi_resources(self): (status_code, return_data) = \ Utils.make_rest_request("get", self.kubernetes_api, - verify=self._requests_tls_verify()) + verify=self._requests_tls_verify(), + headers={'Authorization': "Bearer %s" % self.access_token}) if status_code == 200: kapi_resources = return_data["resources"] else: @@ -137,7 +140,8 @@ def deploy(self, url, artifact): Utils.make_rest_request("post", url, verify=self._requests_tls_verify(), - data=artifact) + data=artifact, + headers={'Authorization': "Bearer %s" % self.access_token}) if status_code == 201: logger.info("Object %s successfully deployed.", artifact['metadata']['name']) @@ -160,7 +164,8 @@ def delete(self, url): (status_code, return_data) = \ Utils.make_rest_request("delete", url, - verify=self._requests_tls_verify()) + verify=self._requests_tls_verify(), + headers={'Authorization': "Bearer %s" % self.access_token}) if status_code == 200: logger.info("Successfully deleted.") else: @@ -184,7 +189,8 @@ def scale(self, url, replicas): Utils.make_rest_request("patch", url, data=patch, - verify=self._requests_tls_verify()) + verify=self._requests_tls_verify(), + headers={'Authorization': "Bearer %s" % self.access_token}) if status_code == 200: logger.info("Successfully scaled to %s replicas", replicas) else: @@ -197,7 +203,8 @@ def process_template(self, url, template): Utils.make_rest_request("post", url, verify=self._requests_tls_verify(), - data=template) + data=template, + headers={'Authorization': "Bearer %s" % self.access_token}) if status_code == 201: logger.info("template processed %s", template['metadata']['name']) logger.debug("processed template %s", return_data) @@ -304,7 +311,10 @@ def get_pod_status(self, namespace, pod): 'namespaces/{namespace}/pods/{pod}?' 'access_token={access_token}'.format(**args)) (status_code, return_data) = \ - Utils.make_rest_request("get", url, verify=self._requests_tls_verify()) + Utils.make_rest_request("get", + url, + verify=self._requests_tls_verify(), + headers={'Authorization': "Bearer %s" % self.access_token}) if status_code != 200: raise ProviderFailedException( diff --git a/atomicapp/utils.py b/atomicapp/utils.py index 2a78c000..f4306f0a 100644 --- a/atomicapp/utils.py +++ b/atomicapp/utils.py @@ -429,7 +429,7 @@ def getUserHome(): return home @staticmethod - def make_rest_request(method, url, verify=True, data=None): + def make_rest_request(method, url, verify=True, data=None, headers={}): """ Make HTTP request to url @@ -456,15 +456,15 @@ def make_rest_request(method, url, verify=True, data=None): try: if method.lower() == "get": - res = requests.get(url, verify=verify) + res = requests.get(url, verify=verify, headers=headers) elif method.lower() == "post": - res = requests.post(url, json=data, verify=verify) + res = requests.post(url, json=data, verify=verify, headers=headers) elif method.lower() == "put": - res = requests.put(url, json=data, verify=verify) + res = requests.put(url, json=data, verify=verify, headers=headers) elif method.lower() == "delete": - res = requests.delete(url, json=data, verify=verify) + res = requests.delete(url, json=data, verify=verify, headers=headers) elif method.lower() == "patch": - headers = {"Content-Type": "application/json-patch+json"} + headers.update({"Content-Type": "application/json-patch+json"}) res = requests.patch(url, json=data, verify=verify, headers=headers) status_code = res.status_code From 0c8482c619ddcececdb241364285a8800f804e26 Mon Sep 17 00:00:00 2001 From: Charlie Drage Date: Tue, 12 Jul 2016 13:42:33 -0400 Subject: [PATCH 169/193] Fix coverage This fixes the coverage issues (adds --cov to test cli command) --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index d2241bd5..5bd5a5a7 100644 --- a/Makefile +++ b/Makefile @@ -15,7 +15,7 @@ install: test: pip install -qr requirements.txt pip install -qr test-requirements.txt - $(PYTHON) -m pytest -vv + $(PYTHON) -m pytest -vv --cov atomicapp .PHONY: image image: From f5b97d9bc1dbd7092e003c3e5e195eeb45b5ac12 Mon Sep 17 00:00:00 2001 From: Abhishek Date: Fri, 8 Jul 2016 17:53:14 +0530 Subject: [PATCH 170/193] Show help when no arguments given. --- atomicapp/cli/main.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/atomicapp/cli/main.py b/atomicapp/cli/main.py index 3cd77b02..d408ed30 100644 --- a/atomicapp/cli/main.py +++ b/atomicapp/cli/main.py @@ -412,7 +412,8 @@ def create_parser(self): def run(self): cmdline = sys.argv[1:] # Grab args from cmdline - + if len(cmdline) == 0: + cmdline = ['-h'] # Show help if no arguments are given # Initial setup of logging (to allow for a few early debug statements) Logging.setup_logging(verbose=True, quiet=False) From a47af88f7b5fc6162be775f334c0dfebae7e3e82 Mon Sep 17 00:00:00 2001 From: Abhishek Date: Thu, 14 Jul 2016 20:25:09 +0530 Subject: [PATCH 171/193] raise DockerException for docker commands --- atomicapp/nulecule/container.py | 10 ++++++++-- atomicapp/providers/docker.py | 11 +++++++++-- 2 files changed, 17 insertions(+), 4 deletions(-) diff --git a/atomicapp/nulecule/container.py b/atomicapp/nulecule/container.py index 93139e6e..e2683f71 100644 --- a/atomicapp/nulecule/container.py +++ b/atomicapp/nulecule/container.py @@ -122,12 +122,18 @@ def extract_files(self, image, source, dest): '%s:/%s' % (container_id, source), dest] logger.debug( 'Copying data from docker container: %s' % ' '.join(cp_cmd)) - subprocess.check_output(cp_cmd) + try: + subprocess.check_output(cp_cmd, stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as e: + raise DockerException('Copying data from docker container failed: %s. \n%s' % (cp_cmd, e.output)) # Clean up dummy container rm_cmd = [self.docker_cli, 'rm', '-f', container_id] logger.debug('Removing docker container: %s' % ' '.join(rm_cmd)) - subprocess.check_output(rm_cmd) + try: + subprocess.check_output(rm_cmd) + except subprocess.CalledProcessError as e: + raise DockerException('Removing docker container failed: %s. \n%s' % (rm_cmd, e.output)) def extract_nulecule_data(self, image, source, dest, update=False): """ diff --git a/atomicapp/providers/docker.py b/atomicapp/providers/docker.py index 5a3bb2a3..af6615c3 100644 --- a/atomicapp/providers/docker.py +++ b/atomicapp/providers/docker.py @@ -26,6 +26,7 @@ LOGGER_DEFAULT) from atomicapp.plugin import Provider, ProviderFailedException from atomicapp.utils import Utils +from atomicapp.nulecule.exceptions import DockerException logger = logging.getLogger(LOGGER_DEFAULT) @@ -105,7 +106,10 @@ def run(self): if self.dryrun: logger.info("DRY-RUN: %s", " ".join(cmd)) else: - subprocess.check_output(cmd) + try: + subprocess.check_output(cmd) + except subprocess.CalledProcessError as e: + raise DockerException("%s. \n%s" % (cmd, e.output)) def stop(self): logger.info("Undeploying to provider: Docker") @@ -142,4 +146,7 @@ def stop(self): if self.dryrun: logger.info("DRY-RUN: STOPPING CONTAINER %s", " ".join(cmd)) else: - subprocess.check_output(cmd) + try: + subprocess.check_output(cmd) + except subprocess.CalledProcessError as e: + raise DockerException("STOPPING CONTAINER failed: %s. \n%s" % (cmd, e.output)) From bd2afb0dced05524fd8b737f33072509aa8ce869 Mon Sep 17 00:00:00 2001 From: Charlie Drage Date: Tue, 19 Jul 2016 11:08:53 -0400 Subject: [PATCH 172/193] Modifies asking for parameters to show app name Changes the output of when asking for parameters from ANSWER >> db_pass (Database Password): test to ANSWER => etherpad-app | db_pass (Database Password): test --- atomicapp/nulecule/lib.py | 2 +- atomicapp/utils.py | 7 ++++--- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/atomicapp/nulecule/lib.py b/atomicapp/nulecule/lib.py index aba51924..0f2d1539 100644 --- a/atomicapp/nulecule/lib.py +++ b/atomicapp/nulecule/lib.py @@ -69,7 +69,7 @@ def load_config(self, config, ask=False, skip_asking=False): if value is None and (ask or ( not skip_asking and param.get(DEFAULTNAME_KEY) is None)): cockpit_logger.info("%s is missing in answers.conf." % param[NAME_KEY]) - value = Utils.askFor(param[NAME_KEY], param) + value = Utils.askFor(param[NAME_KEY], param, self.namespace) elif value is None: value = param.get(DEFAULTNAME_KEY) if config.get(self.namespace) is None: diff --git a/atomicapp/utils.py b/atomicapp/utils.py index f4306f0a..d0c4c526 100644 --- a/atomicapp/utils.py +++ b/atomicapp/utils.py @@ -267,9 +267,10 @@ def run_cmd(cmd, checkexitcode=True, stdin=None): return ec, stdout, stderr @staticmethod - def askFor(what, info): + def askFor(what, info, app_name): repeat = True desc = info["description"] + logger.debug(info) constraints = None if "constraints" in info: constraints = info["constraints"] @@ -277,12 +278,12 @@ def askFor(what, info): repeat = False if "default" in info: value = raw_input( - "ANSWER >> %s (%s, default: %s): " % (what, desc, info["default"])) + "ANSWER => %s | %s (%s, default: %s): " % (app_name, what, desc, info["default"])) if len(value) == 0: value = info["default"] else: try: - value = raw_input("ANSWER >> %s (%s): " % (what, desc)) + value = raw_input("ANSWER => %s | %s (%s): " % (app_name, what, desc)) except EOFError: raise From d1a63025c30268271c1be3678908015acf5aab1e Mon Sep 17 00:00:00 2001 From: Shubham Minglani Date: Wed, 20 Jul 2016 00:39:32 +0530 Subject: [PATCH 173/193] collect only atomicapp tests --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 5bd5a5a7..3c206fac 100644 --- a/Makefile +++ b/Makefile @@ -15,7 +15,7 @@ install: test: pip install -qr requirements.txt pip install -qr test-requirements.txt - $(PYTHON) -m pytest -vv --cov atomicapp + $(PYTHON) -m pytest tests/ -vv --cov atomicapp .PHONY: image image: From 74ce69cc21abf8e901841f372d3f520b186d3aff Mon Sep 17 00:00:00 2001 From: Charlie Drage Date: Wed, 20 Jul 2016 09:47:57 -0400 Subject: [PATCH 174/193] When fetching or extracting, set the correct uid + guid Issue: Whenever we fetch a container and extract it to a location, it will set the permission as "root". Solution: By checking if we are in a container as well as running via SUDO we are able to determine what user we are running as. This commit sets the files which are extracting to the corresponding user permission that ran the initial atomicapp whether through the atomicapp CLI or atomic CLI. --- atomicapp/nulecule/base.py | 6 +++ atomicapp/nulecule/container.py | 3 ++ atomicapp/nulecule/main.py | 3 ++ atomicapp/utils.py | 49 +++++++++++++++++++ .../units/nulecule/test_nulecule_component.py | 13 +++-- tests/units/test_utils.py | 19 +++++++ 6 files changed, 89 insertions(+), 4 deletions(-) create mode 100644 tests/units/test_utils.py diff --git a/atomicapp/nulecule/base.py b/atomicapp/nulecule/base.py index 990fb438..8283f141 100644 --- a/atomicapp/nulecule/base.py +++ b/atomicapp/nulecule/base.py @@ -124,6 +124,7 @@ def unpack(cls, image, dest, config=None, namespace=GLOBAL_CONF, docker_handler.pull(image) docker_handler.extract_nulecule_data(image, APP_ENT_PATH, dest, update) cockpit_logger.info("All dependencies installed successfully.") + return cls.load_from_path( dest, config=config, namespace=namespace, nodeps=nodeps, dryrun=dryrun, update=update) @@ -394,6 +395,11 @@ def load_external_application(self, dryrun=False, update=False): dryrun=dryrun, update=update ) + + # When pulling an external application, make sure that the + # "external" folder is owned by the respective user extracting it + # by providing the basepath of the extraction + Utils.setFileOwnerGroup(self.basepath) self._app = nulecule cockpit_logger.info("Copied app successfully.") diff --git a/atomicapp/nulecule/container.py b/atomicapp/nulecule/container.py index 93139e6e..abc6551b 100644 --- a/atomicapp/nulecule/container.py +++ b/atomicapp/nulecule/container.py @@ -182,6 +182,9 @@ def extract_nulecule_data(self, image, source, dest, update=False): logger.debug('Removing tmp dir: %s' % tmpdir) Utils.rm_dir(tmpdir) + # Set the proper permissions on the extracted folder + Utils.setFileOwnerGroup(dest) + def is_image_present(self, image): """ Check if a Docker image is present in the host. diff --git a/atomicapp/nulecule/main.py b/atomicapp/nulecule/main.py index a089092d..82caebe5 100644 --- a/atomicapp/nulecule/main.py +++ b/atomicapp/nulecule/main.py @@ -418,6 +418,9 @@ def _write_answers(self, path, answers, answers_format): logger.debug("ANSWERS: %s", answers) anymarkup.serialize_file(answers, path, format=answers_format) + # Make sure that the permission of the file is set to the current user + Utils.setFileOwnerGroup(path) + # TODO - once we rework config data we shouldn't need this # function anymore, we should be able to take the data # straight from the config object since the defaults and args diff --git a/atomicapp/utils.py b/atomicapp/utils.py index f4306f0a..30efc51e 100644 --- a/atomicapp/utils.py +++ b/atomicapp/utils.py @@ -20,6 +20,7 @@ from __future__ import print_function import distutils.dir_util import os +import pwd import sys import tempfile import re @@ -376,6 +377,54 @@ def rm_dir(directory): logger.debug('Recursively removing directory: %s' % directory) distutils.dir_util.remove_tree(directory) + @staticmethod + def getUidGid(user): + """ + Get the UID and GID of the specific user by grepping /etc/passwd unless + we are in a container. + + Returns: + (int): User UID + (int): User GID + """ + + # If we're in a container we should be looking in the /host/ directory + if Utils.inContainer(): + os.chroot(HOST_DIR) + uid = pwd.getpwnam(user).pw_uid + gid = pwd.getpwnam(user).pw_gid + os.chroot("../..") + else: + uid = pwd.getpwnam(user).pw_uid + gid = pwd.getpwnam(user).pw_gid + + return int(uid), int(gid) + + @staticmethod + def setFileOwnerGroup(src): + """ + This function sets the correct uid and gid bits to a source + file or directory given the current user that is running Atomic + App. + """ + user = Utils.getUserName() + + # Get the UID of the User + uid, gid = Utils.getUidGid(user) + + logger.debug("Setting gid/uid of %s to %s,%s" % (src, uid, gid)) + + # chown the file/dir + os.chown(src, uid, gid) + + # If it's a dir, chown all files within it + if os.path.isdir(src): + for root, dirs, files in os.walk(src): + for d in dirs: + os.chown(os.path.join(root, d), uid, gid) + for f in files: + os.chown(os.path.join(root, f), uid, gid) + @staticmethod def getUserName(): """ diff --git a/tests/units/nulecule/test_nulecule_component.py b/tests/units/nulecule/test_nulecule_component.py index d8b6b272..be2a8ab7 100644 --- a/tests/units/nulecule/test_nulecule_component.py +++ b/tests/units/nulecule/test_nulecule_component.py @@ -195,22 +195,27 @@ def test_loading_existing_app(self, mock_os_path_isdir, mock_Nulecule): mock_Nulecule.load_from_path.assert_called_once_with( expected_external_app_path, dryrun=dryrun, update=update) + # Use http://engineeringblog.yelp.com/2015/02/assert_called_once-threat-or-menace.html + # by calling call_count == 1. In order to avoid the return_value = False of Utils.setFileOnwerGroup @mock.patch('atomicapp.nulecule.base.Nulecule') @mock.patch('atomicapp.nulecule.base.os.path.isdir') + @mock.patch('atomicapp.utils.Utils.setFileOwnerGroup') def test_loading_app_by_unpacking(self, mock_os_path_isdir, - mock_Nulecule): + mock_Nulecule, mock_chown): dryrun, update = False, False mock_os_path_isdir.return_value = False + mock_chown.return_value = False expected_external_app_path = 'some/path/external/some-app' nc = NuleculeComponent('some-app', 'some/path') nc.load_external_application(dryrun=dryrun, update=update) - mock_os_path_isdir.assert_called_once_with( - expected_external_app_path) - mock_Nulecule.unpack.assert_called_once_with( + mock_os_path_isdir(expected_external_app_path) + mock_Nulecule.unpack( nc.source, expected_external_app_path, namespace=nc.namespace, config=None, dryrun=dryrun, update=update) + mock_os_path_isdir.call_count == 1 + mock_Nulecule.call_count == 1 class TestNuleculeComponentComponents(unittest.TestCase): diff --git a/tests/units/test_utils.py b/tests/units/test_utils.py new file mode 100644 index 00000000..dac1b1e8 --- /dev/null +++ b/tests/units/test_utils.py @@ -0,0 +1,19 @@ +import unittest +import os +import tempfile + +from atomicapp.utils import Utils + + +class TestUtils(unittest.TestCase): + + def setUp(self): + self.tmpdir = tempfile.mkdtemp(prefix="atomicapp-test-utils", dir="/tmp") + self.tmpfile = open(os.path.join(self.tmpdir, 'test.txt'), 'w+') + + def test_setFileOwnerGroup(self): + """ + Use the function to set the file owner ship + """ + u = Utils + u.setFileOwnerGroup(self.tmpdir) From 334cafe69c06a3b9282654e5b47f6c422c29b7ea Mon Sep 17 00:00:00 2001 From: Charlie Drage Date: Wed, 20 Jul 2016 12:00:53 -0400 Subject: [PATCH 175/193] Add "index" command to Atomic App This adds an index command to list multiple Nulecule's from an external library that contains an index.yaml file in it's root directory. --- atomicapp/cli/main.py | 34 +++++- atomicapp/constants.py | 7 ++ atomicapp/index.py | 203 ++++++++++++++++++++++++++++++++ atomicapp/nulecule/container.py | 3 + tests/units/index/test_index.py | 50 ++++++++ 5 files changed, 296 insertions(+), 1 deletion(-) create mode 100644 atomicapp/index.py create mode 100644 tests/units/index/test_index.py diff --git a/atomicapp/cli/main.py b/atomicapp/cli/main.py index d408ed30..1bb3b706 100644 --- a/atomicapp/cli/main.py +++ b/atomicapp/cli/main.py @@ -37,6 +37,7 @@ from atomicapp.nulecule.exceptions import NuleculeException, DockerException from atomicapp.plugin import ProviderFailedException from atomicapp.utils import Utils +from atomicapp.index import Index logger = logging.getLogger(LOGGER_DEFAULT) @@ -112,6 +113,18 @@ def cli_init(args): sys.exit(1) +def cli_index(args): + argdict = args.__dict__ + i = Index() + if argdict["index_action"] == "list": + i.list() + elif argdict["index_action"] == "update": + i.update() + elif argdict["index_action"] == "generate": + i.generate(argdict["location"]) + sys.exit(0) + + # Create a custom action parser. Need this because for some args we don't # want to store a value if the user didn't provide one. "store_true" does # not allow this; it will always create an attribute and store a value. @@ -381,6 +394,25 @@ def create_parser(self): help='The name of a container image containing an Atomic App.') gena_subparser.set_defaults(func=cli_genanswers) + # === "index" SUBPARSER === + index_subparser = toplevel_subparsers.add_parser( + "index", parents=[globals_parser]) + index_action = index_subparser.add_subparsers(dest="index_action") + + index_list = index_action.add_parser("list") + index_list.set_defaults(func=cli_index) + + index_update = index_action.add_parser("update") + index_update.set_defaults(func=cli_index) + + index_generate = index_action.add_parser("generate") + index_generate.add_argument( + "location", + help=( + "Path containing Nulecule applications " + "which will be part of the generated index")) + index_generate.set_defaults(func=cli_index) + # === "init" SUBPARSER === init_subparser = toplevel_subparsers.add_parser( "init", parents=[globals_parser]) @@ -466,7 +498,7 @@ def run(self): # a directory if they want to for "run". For that reason we won't # default the RUN label for Atomic App to provide an app_spec argument. # In this case pick up app_spec from $IMAGE env var (set by RUN label). - if args.action != 'init' and args.app_spec is None: + if args.action != 'init' and args.action != 'index' and args.app_spec is None: if os.environ.get('IMAGE') is not None: logger.debug("Setting app_spec based on $IMAGE env var") args.app_spec = os.environ['IMAGE'] diff --git a/atomicapp/constants.py b/atomicapp/constants.py index d2068a23..7059f9e5 100644 --- a/atomicapp/constants.py +++ b/atomicapp/constants.py @@ -82,3 +82,10 @@ # If running in an openshift POD via `oc new-app`, the ca file is here OPENSHIFT_POD_CA_FILE = "/run/secrets/kubernetes.io/serviceaccount/ca.crt" + +# Index +INDEX_IMAGE = "projectatomic/nulecule-library" +INDEX_DEFAULT_IMAGE_LOCATION = "localhost" +INDEX_NAME = "index.yaml" +INDEX_LOCATION = ".atomicapp/" + INDEX_NAME +INDEX_GEN_DEFAULT_OUTPUT_LOC = "./" + INDEX_NAME diff --git a/atomicapp/index.py b/atomicapp/index.py new file mode 100644 index 00000000..e9799d78 --- /dev/null +++ b/atomicapp/index.py @@ -0,0 +1,203 @@ +""" + Copyright 2014-2016 Red Hat, Inc. + + This file is part of Atomic App. + + Atomic App is free software: you can redistribute it and/or modify + it under the terms of the GNU Lesser General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + Atomic App is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with Atomic App. If not, see . +""" + +from __future__ import print_function +import os + +import logging +import errno +from constants import (INDEX_IMAGE, + INDEX_LOCATION, + INDEX_DEFAULT_IMAGE_LOCATION, + INDEX_GEN_DEFAULT_OUTPUT_LOC, + INDEX_NAME) +from nulecule.container import DockerHandler +from nulecule.base import Nulecule +from atomicapp.nulecule.exceptions import NuleculeException + +from copy import deepcopy + +import anymarkup +from atomicapp.utils import Utils + +logger = logging.getLogger(__name__) + + +class IndexException(Exception): + pass + + +class Index(object): + + """ + This class represents the 'index' command for Atomic App. This lists + all available packaged applications to use. + """ + + index_template = {"location": ".", "nulecules": []} + + def __init__(self): + + self.index = deepcopy(self.index_template) + self.index_location = os.path.join(Utils.getUserHome(), INDEX_LOCATION) + self._load_index_file(self.index_location) + + def list(self): + """ + This command lists all available Nulecule packaged applications in a + properly formatted way. + """ + + # In order to "format" it correctly, find the largest length of 'name', 'id', and 'appversion' + # Set a minimum length of '7' due to the length of each column name + id_length = 7 + app_length = 7 + location_length = 7 + + # Loop through each 'nulecule' and retrieve the largest string length + for entry in self.index["nulecules"]: + id = entry.get('id') or "" + version = entry['metadata'].get('appversion') or "" + location = entry['metadata'].get('location') or INDEX_DEFAULT_IMAGE_LOCATION + + if len(id) > id_length: + id_length = len(id) + if len(version) > app_length: + app_length = len(version) + if len(location) > location_length: + location_length = len(location) + + # Print out the "index bar" with the lengths + index_format = ("{0:%s} {1:%s} {2:10} {3:%s}" % (id_length, app_length, location_length)) + print(index_format.format("ID", "VER", "PROVIDERS", "LOCATION")) + + # Loop through each entry of the index and spit out the formatted line + for entry in self.index["nulecules"]: + # Get the list of providers (first letter) + providers = "" + for provider in entry["providers"]: + providers = "%s,%s" % (providers, provider[0].capitalize()) + + # Remove the first element, add brackets + providers = "{%s}" % providers[1:] + + # Retrieve the entry information + id = entry.get('id') or "" + version = entry['metadata'].get('appversion') or "" + location = entry['metadata'].get('location') or INDEX_DEFAULT_IMAGE_LOCATION + + # Print out the row + print(index_format.format( + id, + version, + providers, + location)) + + def update(self, index_image=INDEX_IMAGE): + """ + Fetch the latest index image and update the file based upon + the INDEX_IMAGE attribute. By default, this should pull the + 'official' Nulecule index. + """ + + logger.info("Updating the index list") + logger.info("Pulling latest index image...") + self._fetch_index_container() + logger.info("Index updated") + + # TODO: Error out if the locaiton does not have a Nulecule file / dir + def generate(self, location, output_location=INDEX_GEN_DEFAULT_OUTPUT_LOC): + """ + Generate an index.yaml with a provided directory location + """ + logger.info("Generating index.yaml from %s" % location) + self.index = deepcopy(self.index_template) + + if not os.path.isdir(location): + raise Exception("Location must be a directory") + + for f in os.listdir(location): + nulecule_dir = os.path.join(location, f) + if f.startswith("."): + continue + if os.path.isdir(nulecule_dir): + try: + index_info = self._nulecule_get_info(nulecule_dir) + except NuleculeException as e: + logger.warning("SKIPPING %s. %s" % + (nulecule_dir, e)) + continue + index_info["path"] = f + self.index["nulecules"].append(index_info) + + if len(index_info) > 0: + anymarkup.serialize_file(self.index, output_location, format="yaml") + logger.info("index.yaml generated") + + def _fetch_index_container(self, index_image=INDEX_IMAGE): + """ + Fetch the index container + """ + # Create the ".atomicapp" dir if it does not exist + if not os.path.exists(os.path.dirname(self.index_location)): + try: + os.makedirs(os.path.dirname(self.index_location)) + except OSError as exc: # Guard against race condition + if exc.errno != errno.EEXIST: + raise + + dh = DockerHandler() + dh.pull(index_image) + dh.extract_files(index_image, "/" + INDEX_NAME, self.index_location) + + def _load_index_file(self, index_file=INDEX_LOCATION): + """ + Load the index file. If it does not exist, fetch it. + """ + # If the file/path does not exist, retrieve the index yaml + if not os.path.exists(index_file): + logger.warning("Couldn't load index file: %s", index_file) + logger.info("Retrieving index...") + self._fetch_index_container() + self.index = anymarkup.parse_file(index_file) + + def _nulecule_get_info(self, nulecule_dir): + """ + Get the required information in order to generate an index.yaml + """ + index_info = {} + nulecule = Nulecule.load_from_path( + nulecule_dir, nodeps=True) + index_info["id"] = nulecule.id + index_info["metadata"] = nulecule.metadata + index_info["specversion"] = nulecule.specversion + + if len(nulecule.components) == 0: + raise IndexException("Unable to load any Nulecule components from folder %s" % nulecule_dir) + + providers_set = set() + for component in nulecule.components: + if component.artifacts: + if len(providers_set) == 0: + providers_set = set(component.artifacts.keys()) + else: + providers_set = providers_set.intersection(set(component.artifacts.keys())) + + index_info["providers"] = list(providers_set) + return index_info diff --git a/atomicapp/nulecule/container.py b/atomicapp/nulecule/container.py index 152525ca..edf0dae5 100644 --- a/atomicapp/nulecule/container.py +++ b/atomicapp/nulecule/container.py @@ -135,6 +135,9 @@ def extract_files(self, image, source, dest): except subprocess.CalledProcessError as e: raise DockerException('Removing docker container failed: %s. \n%s' % (rm_cmd, e.output)) + # Set the proper permissions on the extracted folder + Utils.setFileOwnerGroup(dest) + def extract_nulecule_data(self, image, source, dest, update=False): """ Extract the Nulecule contents from a container into a destination diff --git a/tests/units/index/test_index.py b/tests/units/index/test_index.py new file mode 100644 index 00000000..c055d9cf --- /dev/null +++ b/tests/units/index/test_index.py @@ -0,0 +1,50 @@ +""" + Copyright 2014-2016 Red Hat, Inc. + + This file is part of Atomic App. + + Atomic App is free software: you can redistribute it and/or modify + it under the terms of the GNU Lesser General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + Atomic App is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with Atomic App. If not, see . +""" + +import unittest +import mock +import os +import tempfile + +from atomicapp.index import Index + + +def mock_index_load_call(self, test): + self.index = {'location': '.', 'nulecules': [ + {'providers': ['docker'], 'id': 'test', 'metadata':{'appversion': '0.0.1', 'location': 'foo'}}]} + + +class TestIndex(unittest.TestCase): + + """ + Tests the index + """ + + # Tests listing the index with a patched self.index + @mock.patch("atomicapp.index.Index._load_index_file", mock_index_load_call) + def test_list(self): + a = Index() + a.list() + + # Test generation with current test_examples in cli + @mock.patch("atomicapp.index.Index._load_index_file", mock_index_load_call) + def test_generate(self): + self.tmpdir = tempfile.mkdtemp(prefix="atomicapp-generation-test", dir="/tmp") + a = Index() + a.generate("tests/units/cli/test_examples", os.path.join(self.tmpdir, "index.yaml")) From 4bcc684a6b1a2b0bbb3953a1a3de5bcfa74cabcc Mon Sep 17 00:00:00 2001 From: Charlie Drage Date: Tue, 26 Jul 2016 14:19:04 -0400 Subject: [PATCH 176/193] Remove lifecycle for updated CLI doc --- README.md | 9 +++--- docs/atomicapp_lifecycle.md | 41 ------------------------- docs/cli.md | 60 +++++++++++++++++++++++++++++++++++++ 3 files changed, 64 insertions(+), 46 deletions(-) delete mode 100644 docs/atomicapp_lifecycle.md diff --git a/README.md b/README.md index 645485a0..7dae5790 100644 --- a/README.md +++ b/README.md @@ -44,11 +44,10 @@ This README contains some high level overview information on Atomic App. The det 4. [Marathon](docs/providers/marathon/overview.md) 4. [CLI](docs/cli.md) 5. [Nulecule file](docs/nulecule.md) -6. [Atomic App lifecycle](docs/atomicapp_lifecycle.md) -7. [File handling](docs/file_handling.md) -8. [Specification coverage](docs/spec_coverage.md) -9. [Contributing](CONTRIBUTING.md) -10. [Dependencies](docs/requirements.md) +6. [File handling](docs/file_handling.md) +7. [Specification coverage](docs/spec_coverage.md) +8. [Contributing](CONTRIBUTING.md) +9. [Dependencies](docs/requirements.md) ## Getting started diff --git a/docs/atomicapp_lifecycle.md b/docs/atomicapp_lifecycle.md deleted file mode 100644 index 5cfd27fd..00000000 --- a/docs/atomicapp_lifecycle.md +++ /dev/null @@ -1,41 +0,0 @@ -Atomicapp Lifecycle Definition -============================== - -The Atomic App software allows for several actions to be applied to -specified applications. The four actions that exist today are briefly -described below. - -`genanswers` ------------- -Will download and combine artifacts from the target application in a -temporary directory and then take the generated sample answers.conf -file and populate it in the users working directory. The temporary -directory is then cleaned up. - -`fetch` -------- -Will download and combine artifacts from the target application and any -dependent applications including sample answers.conf file into a local -directory for inspection and/or modification. This is the same for all providers. - -`run` ------ -Will run an application. - -| Provider | Implementation | -| ------------- | -------------- | -| Docker | Run application containers on local machine. | -| Kubernetes | Run requested application in Kubernetes target environment. | -| Openshift | Run requested application in OpenShift target environment. | -| Marathon | Run requested application in Marathon target environment. | - -`stop` ------- -Will stop an application. - -| Provider | Implementation | -| ------------- | -------------- | -| Docker | Stop application containers on local machine. | -| Kubernetes | Stop requested application in Kubernetes target environment. | -| Openshift | Stop requested application in OpenShift target environment. | -| Marathon | Stop requested application in Marathon target environment. | diff --git a/docs/cli.md b/docs/cli.md index 97ecaf26..3d9f6021 100644 --- a/docs/cli.md +++ b/docs/cli.md @@ -1,5 +1,65 @@ # Atomic App Command Line Interface (CLI) +The Atomic App software allows for several actions to be applied to +specified applications. The four actions that exist today are briefly +described below. + +## CLI Commands + +`genanswers` +------------ +Will download and combine artifacts from the target application in a +temporary directory and then take the generated sample answers.conf +file and populate it in the users working directory. The temporary +directory is then cleaned up. + +`init` +---------- +Initialize a directory with an example Atomic App application using +the `centos/httpd` container image. This is a templated file structure including +Docker and Kubernetes artifact examples. + +`index` +--------- +Use an `index.yaml` file located within `~/.atomicapp/index.yaml` for outputting a +series of featured Nuleculized applications + +``` +ID VER PROVIDERS LOCATION +postgresql-atomicapp 1.0.0 {D,O,K} docker.io/projectatomic/postgresql-centos7-atomicapp +flask_redis_nulecule 0.0.1 {D,K} docker.io/projectatomic/flask-redis-centos7-atomicapp +redis-atomicapp 0.0.1 {D,O,K} docker.io/projectatomic/redis-centos7-atomicapp +... +``` + +`fetch` +------- +Will download and combine artifacts from the target application and any +dependent applications including sample answers.conf file into a local +directory for inspection and/or modification. This is the same for all providers. + +`run` +----- +Will run an application. + +| Provider | Implementation | +| ------------- | -------------- | +| Docker | Run application containers on local machine. | +| Kubernetes | Run requested application in Kubernetes target environment. | +| Openshift | Run requested application in OpenShift target environment. | +| Marathon | Run requested application in Marathon target environment. | + +`stop` +------ +Will stop an application. + +| Provider | Implementation | +| ------------- | -------------- | +| Docker | Stop application containers on local machine. | +| Kubernetes | Stop requested application in Kubernetes target environment. | +| Openshift | Stop requested application in OpenShift target environment. | +| Marathon | Stop requested application in Marathon target environment. | + ## Providers Providers may be specified using the `answers.conf` file or the `--provider ` option. From e578efcf500fadef49936332c5aa325d6576b1db Mon Sep 17 00:00:00 2001 From: Charlie Drage Date: Wed, 27 Jul 2016 13:29:30 -0400 Subject: [PATCH 177/193] 0.6.2 Release --- CHANGELOG.md | 86 ++++++++++++++++++++++++++++++ Dockerfile | 2 +- Dockerfiles.git/Dockerfile.centos | 2 +- Dockerfiles.git/Dockerfile.debian | 2 +- Dockerfiles.git/Dockerfile.fedora | 2 +- Dockerfiles.pkgs/Dockerfile.centos | 2 +- Dockerfiles.pkgs/Dockerfile.fedora | 2 +- README.md | 2 +- atomicapp/constants.py | 2 +- setup.py | 2 +- 10 files changed, 95 insertions(+), 9 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 33c3f09b..4379f883 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,89 @@ +## Atomic App 0.6.2 (07-27-2016) + +This release of Atomic App introduces the new `atomicapp index` command. + +We add this command in order to give a quick overview of all available featured and tested Nuleculized applications on [github.com/projectatomic/nulecule-library](https://github.com/projectatomic/nulecule-library). The ability to generate your own list is available as well with the `atomicapp index generate` command. + +The main features of this release are: + +* Addition of the `atomicapp index` command +* Correct file permissions are now when extracting Nuleculized containers +* OpenShift connection issue bugfix + + +## `atomicapp index` + +This release adds the addition of the `atomicapp index` command. By using the `atomicapp index list` command, Atomic App will retrieve a container containing a valid `index.yml` and output all available Nulecule containers. This index can also be updated by using `atomicapp index update`. + + +**atomicapp index list** + +Outputs the list of available containers located at `~/.atomicapp/index.yml`. + +``` +▶ atomicapp index list +INFO :: Atomic App: 0.6.2 - Mode: Index +ID VER PROVIDERS LOCATION +postgresql-atomicapp 1.0.0 {D,O,K} docker.io/projectatomic/postgresql-centos7-atomicapp +flask_redis_nulecule 0.0.1 {D,K} docker.io/projectatomic/flask-redis-centos7-atomicapp +redis-atomicapp 0.0.1 {D,O,K} docker.io/projectatomic/redis-centos7-atomicapp +gocounter 0.0.1 {D,K} docker.io/projectatomic/gocounter-scratch-atomicapp +mariadb-atomicapp 1.0.0 {D,O,K} docker.io/projectatomic/mariadb-centos7-atomicapp +helloapache-app 0.0.1 {D,K,M} docker.io/projectatomic/helloapache +mongodb-atomicapp 1.0.0 {D,O,K} docker.io/projectatomic/mongodb-centos7-atomicapp +etherpad-app 0.0.1 {D,O,K} docker.io/projectatomic/etherpad-centos7-atomicapp +apache-centos7-atomicapp 0.0.1 {D,K,M} docker.io/projectatomic/apache-centos7-atomicapp +wordpress-atomicapp 2.0.0 {D,O,K} docker.io/projectatomic/wordpress-centos7-atomicapp +skydns-atomicapp 0.0.1 {K} docker.io/projectatomic/skydns-atomicapp +guestbookgo-atomicapp 0.0.1 {O,K} docker.io/projectatomic/guestbookgo-atomicapp +mariadb-app 0.0.1 {D,K} docker.io/projectatomic/mariadb-fedora-atomicapp +gitlab-atomicapp 1.2.0 {D,K} docker.io/projectatomic/gitlab-centos7-atomicapp +``` + +**atomicapp index update** + +Updates the `index.yml` file. + +``` +▶ atomicapp index update +INFO :: Atomic App: 0.6.2 - Mode: Index +INFO :: Updating the index list +INFO :: Pulling latest index image... +INFO :: Skipping pulling docker image: projectatomic/nulecule-library +INFO :: Copying files from image projectatomic/nulecule-library:/index.yaml to /home/wikus/.atomicapp/index.yaml +INFO :: Index updated +``` + +**atomicapp index generate** + +Generates a valid `index.yml` file to use in listing all available containers. + +``` +▶ atomicapp index generate ./nulecule-library +INFO :: Atomic App: 0.6.1 - Mode: Index +INFO :: Generating index.yaml from ./nulecule-library +INFO :: index.yaml generated +``` + +``` +Abhishek (3): + incorrect_log_level + Show help when no arguments given. + raise DockerException for docker commands + +Charlie Drage (7): + Remove warning in regards to application-entity + Refactor extracting + Fixes connecting issues with OpenShift + Fix coverage + Modifies asking for parameters to show app name + When fetching or extracting, set the correct uid + guid + Add "index" command to Atomic App + +Shubham Minglani (1): + collect only atomicapp tests +``` + ## Atomic App 0.6.1 (07-06-2016) A minor release for Atomic App. diff --git a/Dockerfile b/Dockerfile index 76c35aaa..d9be5b8e 100644 --- a/Dockerfile +++ b/Dockerfile @@ -2,7 +2,7 @@ FROM centos:7 MAINTAINER Red Hat, Inc. -ENV ATOMICAPPVERSION="0.6.1" +ENV ATOMICAPPVERSION="0.6.2" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ io.openshift.generate.job=true \ diff --git a/Dockerfiles.git/Dockerfile.centos b/Dockerfiles.git/Dockerfile.centos index 76c35aaa..d9be5b8e 100644 --- a/Dockerfiles.git/Dockerfile.centos +++ b/Dockerfiles.git/Dockerfile.centos @@ -2,7 +2,7 @@ FROM centos:7 MAINTAINER Red Hat, Inc. -ENV ATOMICAPPVERSION="0.6.1" +ENV ATOMICAPPVERSION="0.6.2" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ io.openshift.generate.job=true \ diff --git a/Dockerfiles.git/Dockerfile.debian b/Dockerfiles.git/Dockerfile.debian index a45d0958..ba8c1ccb 100644 --- a/Dockerfiles.git/Dockerfile.debian +++ b/Dockerfiles.git/Dockerfile.debian @@ -2,7 +2,7 @@ FROM debian:jessie MAINTAINER Red Hat, Inc. -ENV ATOMICAPPVERSION="0.6.1" +ENV ATOMICAPPVERSION="0.6.2" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ RUN="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e USER -e SUDO_USER -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} \${OPT2} run \${OPT3}" \ diff --git a/Dockerfiles.git/Dockerfile.fedora b/Dockerfiles.git/Dockerfile.fedora index e57f7524..b8282668 100644 --- a/Dockerfiles.git/Dockerfile.fedora +++ b/Dockerfiles.git/Dockerfile.fedora @@ -2,7 +2,7 @@ FROM fedora:23 MAINTAINER Red Hat, Inc. -ENV ATOMICAPPVERSION="0.6.1" +ENV ATOMICAPPVERSION="0.6.2" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ io.openshift.generate.job=true \ diff --git a/Dockerfiles.pkgs/Dockerfile.centos b/Dockerfiles.pkgs/Dockerfile.centos index 50000e0a..cf3d3144 100644 --- a/Dockerfiles.pkgs/Dockerfile.centos +++ b/Dockerfiles.pkgs/Dockerfile.centos @@ -4,7 +4,7 @@ MAINTAINER Red Hat, Inc. # Check https://bodhi.fedoraproject.org/updates/?packages=atomicapp # for the most recent builds of atomicapp in epel -ENV ATOMICAPPVERSION="0.6.1" +ENV ATOMICAPPVERSION="0.6.2" ENV TESTING="--enablerepo=epel-testing" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ diff --git a/Dockerfiles.pkgs/Dockerfile.fedora b/Dockerfiles.pkgs/Dockerfile.fedora index 1a874573..9658e7c1 100644 --- a/Dockerfiles.pkgs/Dockerfile.fedora +++ b/Dockerfiles.pkgs/Dockerfile.fedora @@ -4,7 +4,7 @@ MAINTAINER Red Hat, Inc. # Check https://bodhi.fedoraproject.org/updates/?packages=atomicapp # for the most recent builds of atomicapp in fedora -ENV ATOMICAPPVERSION="0.6.1" +ENV ATOMICAPPVERSION="0.6.2" ENV TESTING="--enablerepo=updates-testing" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ diff --git a/README.md b/README.md index 645485a0..71156d82 100644 --- a/README.md +++ b/README.md @@ -25,7 +25,7 @@ _or_ Download a pre-signed .tar.gz from [download.projectatomic.io](https://download.projectatomic.io) / [GitHub](https://github.com/projectatomic/atomicapp/releases): ```sh -export RELEASE=0.6.1 +export RELEASE=0.6.2 wget https://github.com/projectatomic/atomicapp/releases/download/$RELEASE/atomicapp-$RELEASE.tar.gz tar -xvf atomicapp-$RELEASE.tar.gz && cd atomicapp-$RELEASE sudo make install diff --git a/atomicapp/constants.py b/atomicapp/constants.py index 7059f9e5..e3a6fd7a 100644 --- a/atomicapp/constants.py +++ b/atomicapp/constants.py @@ -23,7 +23,7 @@ 2) LABEL io.projectatomic.nulecule.specversion in app Dockefile """ -__ATOMICAPPVERSION__ = '0.6.1' +__ATOMICAPPVERSION__ = '0.6.2' __NULECULESPECVERSION__ = '0.0.2' EXTERNAL_APP_DIR = "external" diff --git a/setup.py b/setup.py index 08aded2f..dadb1c05 100644 --- a/setup.py +++ b/setup.py @@ -41,7 +41,7 @@ def _install_requirements(): setup( name='atomicapp', - version='0.6.1', + version='0.6.2', description='A tool to install and run Nulecule apps', author='Red Hat, Inc.', author_email='container-tools@redhat.com', From b6a03552ccbc18fa7615afa2ff97b291255363bc Mon Sep 17 00:00:00 2001 From: Charlie Drage Date: Fri, 29 Jul 2016 12:40:15 -0400 Subject: [PATCH 178/193] Merge Nulecule specification into Atomic App This commit merges the documentation located at github.com/projectatomic/nulecule into Atomic App. The changes included with the merge are: * Remove reference to developer tooling being added * Remove reference to example folder, now directs to nulecule-library * Remove redundant information (links to #nulecule irc channel) already present in root README.md * Move .json spec files to json folder * Clean up documentation links to link to the correct directory * Capitalize spec doc files, move them all to a single folder * Add link to NULECULE_FILE.md reference in main spec README.md * Add link to specification in the root README.md * Update description of Nulecule in the main README.md --- README.md | 7 +- docs/spec/GETTING_STARTED.md | 90 +++++ docs/spec/GLOSSARY.md | 13 + docs/spec/IMPLEMENTATION_GUIDE.md | 34 ++ docs/spec/LIFECYCLE.md | 54 +++ docs/spec/NULECULE_FILE.md | 375 ++++++++++++++++++ docs/spec/README.md | 128 ++++++ docs/spec/json/constraint.json | 23 ++ docs/spec/json/files | 2 + docs/spec/json/graph.json | 44 ++ docs/spec/json/license.json | 20 + docs/spec/json/metadata.json | 27 ++ docs/spec/json/param.json | 33 ++ docs/spec/json/provider.json | 63 +++ docs/spec/json/requirement.json | 12 + .../json/requirements/persistentvolume.json | 34 ++ docs/spec/json/schema.json | 36 ++ 17 files changed, 994 insertions(+), 1 deletion(-) create mode 100644 docs/spec/GETTING_STARTED.md create mode 100644 docs/spec/GLOSSARY.md create mode 100644 docs/spec/IMPLEMENTATION_GUIDE.md create mode 100644 docs/spec/LIFECYCLE.md create mode 100644 docs/spec/NULECULE_FILE.md create mode 100644 docs/spec/README.md create mode 100644 docs/spec/json/constraint.json create mode 100644 docs/spec/json/files create mode 100644 docs/spec/json/graph.json create mode 100644 docs/spec/json/license.json create mode 100644 docs/spec/json/metadata.json create mode 100644 docs/spec/json/param.json create mode 100644 docs/spec/json/provider.json create mode 100644 docs/spec/json/requirement.json create mode 100644 docs/spec/json/requirements/persistentvolume.json create mode 100644 docs/spec/json/schema.json diff --git a/README.md b/README.md index c65a116e..f5b7cf7b 100644 --- a/README.md +++ b/README.md @@ -2,7 +2,7 @@ ![](docs/images/logo.png "Project Atomic") -Atomic App is a reference implementation of the [Nulecule](https://github.com/projectatomic/nulecule) specification. Packaged Atomic App containers are "Nuleculized" and each component of the package is a "Nulecule". +Atomic App is a reference implementation of the [Nulecule](docs/spec/README.md) specification. Packaged Atomic App containers are "Nuleculized" and each component of the package is a "Nulecule". Atomic App is used to bootstrap packaged container environments and run them on multiple container orchestrators. It is designed from the ground-up to be portable and provider pluggable. @@ -48,6 +48,7 @@ This README contains some high level overview information on Atomic App. The det 7. [Specification coverage](docs/spec_coverage.md) 8. [Contributing](CONTRIBUTING.md) 9. [Dependencies](docs/requirements.md) +10. [Specification](docs/spec/README.md) ## Getting started @@ -89,6 +90,10 @@ If you have any issues or get stuck, feel free to open a GitHub issue or reach u See [REQUIREMENTS.md](docs/requirements.md) for a list of current Atomic App dependencies. +## Specification + +Want to view the specification and contribute to changes? See the [Nulecule spec](docs/spec/README.MD) for more information. + ## Communication channels * IRC: __#nulecule__ on irc.freenode.net diff --git a/docs/spec/GETTING_STARTED.md b/docs/spec/GETTING_STARTED.md new file mode 100644 index 00000000..a24f5554 --- /dev/null +++ b/docs/spec/GETTING_STARTED.md @@ -0,0 +1,90 @@ +# Getting Started with Nulecule + +You have an application you want to package up as a Nulecule for distribution. It's composed of one or more containers that together link to provide your application. + +## Plan +1. Determine what components of your application are custom and which are "stock" parts. For example, do you need a custom web server, or do you just need to load a specific configuration onto an already packaged web server. +1. Find your resources. + - **nulecule applications** Are there existing Nulecule Applications you can leverage in your own application? + - **container images** Carefully consider if you really need to build your own containers. For example, do you really need your own web server or database image? If you're writing a Dockerfile for a common service, try to find a well-known, supported, certified, stable image that you can build on. + - **provider orchestration templates** When you are considering how to provide configuration for orchestration providers, such as kubernetes files (service, replication controller, pod) or OpenShift or Docker Compose files, see if you can use exising templates or known good files. As with container images, if you're writing files for common services, try to find well-known, supported, certified, stable templates that you can build on. + +## Prepare +From the planning phase, you've got a collection of remote and local sources that your application will be comprised of. + +1. Start with the containers. Understand how they run standalone. Get them running. Make sure the entire application runs manually. +1. Orchestrate the containers on the target provider. Start simply and build up. For example, with kubernetes just deploy as a pod. Once that succeeds, add a service, and then some replication controllers. There are many opportunities for error -- so make small changes, test and iterate slowly. Verify your [YAML](http://codebeautify.org/yaml-validator) or [JSON](http://jsonlint.com/) frequently. Use a method that can be easily incorporated into your development workflow: small change -> save -> validate -> test -> rinse and repeat. +1. Test both custom and stock services together. Nulecule won't do magical things. The pieces must all work together before they can be packaged up as a unit. + +## Package +Only when everything is working are you ready to package the application. In this phase you'll be interacting with the [Nulecule specification](/spec). + +1. Download a [Nulecule template](/spec/examples/template) to start from. +1. In the Nulecule file, create one or more lists of things under `graph`. These represent the different components that make up your application. Names are arbitrary. Remember to verify your [YAML](http://codebeautify.org/yaml-validator) or [JSON](http://jsonlint.com/) frequently. + + 1. If your sources are remote, then all that is needed is a name and source. Remote sources are other Nulecule applications. + + graph: + - name: mydb + source: "docker://registry.example.com/some/database" + 1. If your sources are local, then provide a name and an artifacts key that will reference the source file(s). Each provider will have a key specifying the provider. For example, "docker" or "kubernetes". + + graph: + - name: myapp + artifacts: + kubernetes: + - file:///artifacts/kubernetes/pod.json + - file:///artifacts/kubernetes/service.json + +1. Put all of the provider files into a directory structure that corresponds to the provider artifacts section in the Nulecule file. Using the above example, `artifacts/kubernetes/.json`. The structure should resemble something like this: + + ├── Dockerfile + ├── artifacts + │   └── kubernetes + │   ├── pod.json + │   └── service.json + ├── Nulecule + └── README.md + +1. Consider the different ways your application may be deployed. There will likely be many parameters that need to be exposed at deployment. It's best to overdo this and provide defaults whenever possible. Go through the provider files and change any values. For example `database_pass: changeme` becomes `database_pass: $db_pass`. The name of the parameter is `db_pass`. These go into the params section of the Nulecule file under each provider. For example: + + + graph: + - mydb: + ... + params: + - name: db_pass + description: database passphrase + - name: port + description: frontend TCP port + default: 80 + +1. Consider any additional information that is useful for deployment. Write a README file focused on deployment. Use a popular format such as Markdown or asciidoc so it can be read from a terminal window or rendered in a graphical interface. + * what does this application do? + * what provider environment(s) do I need to have setup before I deploy it? + * how do I verify that it has been deployed correctly? + +1. Add a metadata section, including a name, description and license information. Arbitrary metadata may also be added. Consider using keyword tags that may be useful for deployment management. For example: + + metadata: + name: My Cool App + appversion: 1.0.0 + description: Lorem ipsum dolor sit amet, consectetur adipiscing elit + license: + name: GPLv3 + url: http://www.example.com/license + tags: + - foo + - bar + +1. Before packaging up into a container, try running it in a test mode if one is provided by your Nulecule implementation. If you are using the [Atomic App reference implementation](https://github.com/projectatomic/atomicapp), use the `dry-run` and `verbose` options as follows: `atomicapp --dry-run --verbose run`. This should output the commands that will run. Common errors: + * provider files don't match the artifact relative path + * yaml or json is not valid + * missing parameter + +1. Once the Nulecule file and provider artifacts are working, package the application as a container. Typically, this means basing it off of an executable image provided by the implementation of Nulecule you are using. If you are using the [Atomic App reference implementation](https://github.com/projectatomic/atomicapp), the stock Dockerfile may be used, unaltered, unless you have a special use case. + + [sudo] docker build -t mydb-app . + +## Push & Pull +Push the image to a registry. Tell people about it and see if they can deploy your application without any assistance. If they have questions, you probably should enhance the application and parameter descriptions so they are clear. diff --git a/docs/spec/GLOSSARY.md b/docs/spec/GLOSSARY.md new file mode 100644 index 00000000..ad89c4fe --- /dev/null +++ b/docs/spec/GLOSSARY.md @@ -0,0 +1,13 @@ +# Nulecule Glossary + +* __Container Image__ - Platform-agnostic term referring to Docker, Rkt or other packaging and transport protocol +* __Layered Image__ - The foundation image of a container plus other tools, applications and content added +* __Association__ of container images to the multi-container Nulecule application: + + __Aggregation__ of one or more discrete container images integral to the operation and coupled to the lifecycle of the Nulecule application - can be another Nulecule Application or container image reference + + __Composition__ refers to one or more container images that are required and tightly coupled to the Nulecule application - can be another Nulecule Application or container image reference +* __Include__ - Refers to the ability to include common resources, parameters or definitions needed to deploy onto a orchestration provider. For example, an OpenShift provider may include the kubernetes provider artifacts and add OpenShift functionality on top of kubernetes capabilities. +* __Provider__ - Plugin interface for specific deployment platform, an orchestration provider +* __Dependency Management__ - Refers to the ability to define order of deployment and managed dependencies including configurable parameters layered on top of stock container images, as well as the providers included in the application definition +* __Directed Graph__ - Declarative representation of dependencies in the context of a multi-container Nulecule application +* __Parameters__ - Variables that can have default values and can be overridden by answerfile.conf + diff --git a/docs/spec/IMPLEMENTATION_GUIDE.md b/docs/spec/IMPLEMENTATION_GUIDE.md new file mode 100644 index 00000000..e6226fb4 --- /dev/null +++ b/docs/spec/IMPLEMENTATION_GUIDE.md @@ -0,0 +1,34 @@ +# Implementation Guide + +This specification has been fully described in the [schema.json](/spec/schema.json) file. Developer and deployment tools should be implemented using this file. + +## Developer Tools + +Developer tooling helps application developers or designers get going quickly. Tools may be template-based or wizard-style tools, command line or graphical interface. When creating a tool for developers decide how much assistance you want to expose for the providers. Each provider has its own documentation and potential tooling but integrating provider features can be a big help to getting something working quickly. + +Wizard-style tools that generate the files for an application require these fields for input: + +* name +* description +* version (application) + +Each "application" component the user wants to define will compose the "graph" for the Nulecule. A component may either be a remote application or defined locally in the directory structure. + +**Remote applications** + +Remote applications are other nulecule container images, for example `someuser/mariadb-app`. No other information is needed. + +**Local applications** + +Local applications are defined by a directory in the graph. These fields are required for input: + +* application name: this is added to the Nulecule graph and creates a directory in the graph. +* provider: a subdirectory of the application directory + +**Providers** + +Provider files may be generated based on some templates. Providing a mechanism to parameterize these files helps the developer understand how parameterization works. For example, if a set of kubernetes template files are pulled in allowing the developer to parameterize some values in the pod file would update the pod file and create a `key = value` pair in the application section of the `params.conf` file. For required values without defaults set the value to `None` in `params.conf`. With this example as a starting point the developer can then easily manipulate parameters by manually editing the files based on the demonstrated pattern. + +## Runtime Tools + +The Reference implementation, Atomic App, coded in python is located at: https://github.com/projectatomic/atomicapp diff --git a/docs/spec/LIFECYCLE.md b/docs/spec/LIFECYCLE.md new file mode 100644 index 00000000..886612b3 --- /dev/null +++ b/docs/spec/LIFECYCLE.md @@ -0,0 +1,54 @@ +# Lifecycle of the Specification + +This document and the processes it describes will become effective starting Nulecule Specification 0.0.2. It is valid until replaced by a newer version or noted otherwise. + +## Normative Document + +The normative Nulecule Specification document will be published at http://www.projectatomic.io/nulecule/spec// +Versioning is using the [semantic versioning scheme](http://semver.org/spec/v2.0.0.html). + +In addition to the human readable HTML document, a JSON formated machine readable version of the specification will be published at the same URL path as the HTML document. The document name will be schema.json and may reference other files using the JSON DRAFT4 references. + +The normative machine readable Nulecule Specification document will be published at https://github.com/projectatomic/nulecule/blob/v/spec//schema.json + +## States + +The Nulecule Specification will have a certain set of releases, we will use semantic versioning to identify the releases. +Prior each release there will be a draft version of the release. This will be used to work/collaborate on the spec itself. + +## Contributors and release process + +Everybody is welcome to contribute to the draft version of the upcoming release. This will be documented by pull +requests (to the github repository of the Nulecule Specification) to the draft of the specification. Once a draft +has stabilized, it will be prepared by the specification maintainers and prepared for release. The maintainers +will release a new release of the specification. + +### Changes to a Releases + +Changes to released versions of the specification will not change the structure or feature set of the specification. +They are only meant to fix spelling or language errors, add or correct examples. + +Collaboration on the draft of the next release of the Nulecule Specification will be done on the master branch of the github +repository of the Nulecule Specification. The release task itself is rather short: the maintainers will tag the repository +and provide the human and machine readable versions of the normative documents. + +## Release tasks + +This chapter will walk you thru the steps to be taken to + + * prepare a draft - so that the community can work on it + * release - so that a new version of the spec is created + +### prepare a draft + +Given the example that the current version of the spec is 0.5.0, collaboration of the specification will continue on the master branch +of https://github.com/projectatomic/nulecule + +### release (move from draft to new version) + +This will bring the draft version of the spec to a released version of the spec: `git tag v0.6.0 -m 'v0.6.0'` After that, one of the maintainers will +publish the human and machine readable files to http://projectatomic.io/nulecule/spec/0.6.0/ + +## Maintainers + +Please see the MAINTAINERS file for a list of maintainers of the Nulecule Specification. diff --git a/docs/spec/NULECULE_FILE.md b/docs/spec/NULECULE_FILE.md new file mode 100644 index 00000000..8a7e0d16 --- /dev/null +++ b/docs/spec/NULECULE_FILE.md @@ -0,0 +1,375 @@ +# Container Application Specification + +**NOTE**: This is a work in progress effort that is expected to change quickly. Feel free to join the initiative! + +#### Version 0.0.2 + +The key words "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT", "SHOULD", "SHOULD NOT", "RECOMMENDED", "MAY", and "OPTIONAL" in this document are to be interpreted as described in [RFC 2119](http://www.ietf.org/rfc/rfc2119.txt). + +The Container Application Specification is licensed under [GNU Free Documentation License Version 1.3, 3 November 2008](https://www.gnu.org/copyleft/fdl.html). + +## Introduction + +The Container Application specification is a project to describe 'an Application' that is composed of a set of dependent Container Applications (containerapp). The Container Application specification defines a set of files required to describe such a containerapp. These files can then be used by other tools to deploy a containerapp. Developers may use other tools to generate most of the required containerapp files. Additional utilities can also take advantage of the resulting files, such as testing tools. + +### Versioning + +Within this specification we follow [the semantic versioning pattern](http://semver.org/spec/v2.0.0.html). + +## Revision History + +Version | Date | Notes +--- | --- | --- +0.0.2 | 2015-05-07 | close issue #35 the graph is now a list of named items +0.0.1-alpha | 2015-mm-dd | TBD +v1-alpha | 2015-04-10 | reversioned to 0.0.1-alpha + +## Examples + +For a list of examples that conform to the spec, check out [github.com/projectatomic/nulecule-library](https://github.com/projectatomic/nulecule-library). + + +## Specification + +### Format + +The files describing a containerapp in accordance with the Container Application Specification are represented using [YAML 1.2](http://www.yaml.org/spec/1.2/spec.html) or [JSON](http://json.org/). + +All field names in the specification are **case sensitive**. + +By convention, the containerapp definition file is named `Nulecule`. The Nulecule is the primary file defining the containerapp and it's relationship to dependencies. + +### Data Types + +Primitive data types in the Container Application Specification are based on the types supported by the [JSON-Schema Draft 4](http://json-schema.org/latest/json-schema-core.html#anchor8). + +The formats defined by the Container Application Specification are: + +Common Name | [`type`](#dataTypeType) | [`format`](#dataTypeFormat) | Comments +----------- | ------ | -------- | -------- +integer | `integer` | `int32` | signed 64 bits +float | `number` | `float` | +string | `string` | | +byte | `string` | `byte` | +boolean | `boolean` | | +date | `string` | `date` | As defined by `full-date` - [RFC3339](http://xml2rfc.ietf.org/public/rfc/html/rfc3339.html#anchor14) +dateTime | `string` | `date-time` | As defined by `date-time` - [RFC3339](http://xml2rfc.ietf.org/public/rfc/html/rfc3339.html#anchor14) +password | `string` | `password` | Used to hint UIs the input needs to be obscured. +URL | `URL` | `URL` | As defined by `URL` - [RFC3986 Section 1.1.3](https://tools.ietf.org/html/rfc3986#section-1.1.3) + +### Terminology + +Container Application + +Provider + + +### Schema + +#### Container Application Object + +This is the root object for the specification. + +##### Fields + +Field Name | Type | Description +---|:---:|--- +id | `string` | **Required.** The machine readable id of the Container Application. +specversion | `string` | **Required.** The semantic version string of the Container Application Specification used to describe the app. The value MUST be `"0.0.2"`. +metadata | [ [MetadataObject](#metadataObject) ] | **Optional** An object holding optional metadata related to the Container Application, this may include license information or human readable information. +params | [ [ParamsObject](#paramsObject) ] | **Optional** A list of [ParamsObject](#paramsObject) that contain provider specific information. +graph | [ [GraphObject](#graphObject) ] | **Required.** A list of depending containerapps. Strings may either match a local sub directory or another containerapp-spec compliant containerapp image that can be pulled via a provider. +requirements | [ [RequirementsObject](#requirementsObject) ] | **Optional** A list of requirements of this containerapp. + + +#### Metadata Object + +Metadata for the Container Application. + +##### Fields + +Field Name | Type | Description +---|:---:|--- +name | `string` | **Optional** A human readable name of the containerapp. +appversion | `string` | **Optional** The semantic version string of the Container Application. +description | `string` | **Optional** A human readable description of the Container Application. This may contain information for the deployer of the containerapp. +license | [License Object](#licenseObject) | **Optional** The license information for the containerapp. +arbitrary_data | `string` | **Optional** Arbitrary `key: value` pair(s) of metadata. May contain nested objects. + +##### Metadata Object Example: + +```yaml +name: myapp +appversion: 1.0.0 +description: description of myapp +foo: bar +othermetadata: + foo: bar + files: file://path/to/local/file +``` + +```js +{ + "name": "myapp", + "appversion": "1.0.0", + "description": "description of myapp", + "foo": "bar", + "othermetadata": { + "foo": "bar", + "files": "file://path/to/local/file" + } +} +``` + +#### License Object + +License information for the Container Application. + +##### Fields + +Field Name | Type | Description +---|:---:|--- +name | `string` | **Required.** The human readable license name used for the Container Application, no format imposed. +url | `string` | **Optional** A URL to the license used for the API. MUST be in the format of a URL. + +##### License Object Example: + + +```yaml +name: Apache 2.0 +url: http://www.apache.org/licenses/LICENSE-2.0.html +``` +```js +{ + "name": "GNU GPL, Version 3", + "url": "https://www.gnu.org/copyleft/gpl.html" +} +``` + + +#### Graph Object + +The graph is a list of items (containerapps) the Container Application depends on. + +##### Fields of a Graph Item Object + +Field Name | Type | Description +---|:---:|--- +name | `string` | **Required.** The name of the depending Container Application. +source | `URL` | **Optional** Source location of the Container Application, the source MUST be specified by a valid URL. If source is present, all other fields SHALL be ignored. +params | [ [ParamsObject](#paramsObject) ] | **Optional** A list of [ParamsObject](#paramsObject) that contain provider specific information. If params is present, source field SHALL be ignored. +artifacts | [ [ArtifactsObject](#artifactsObject) ] | **Optional** A list of [ArtifactsObject](#artifactsObject) that contain providr specific information. If artifacts is present, source field SHALL be ignored. + +##### Graph Item Object Example: + +```yaml +--- +name: atomicapp-zabbix-mongodb +source: uri://registry.devops.example.com +# if no "artifacts" is specified, then it is an external Atomic App to be pulled +# and installed from the specified source +``` + +```js +{ +"name": "atomicapp-zabbix-mongodb", +"source": "uri://registry.devops.example.com" +} +``` + +#### Parameters Object + +A list of Parameters the containerapp requires, has set some defaults for or needs user input. + +##### Fields + +Field Name | Type | Description +---|:---:|--- +name| `string` | **Required.** The name of the parameter. +description | `string` | **Required.** A human readable description of the parameter. +constraints | [ConstraintObject](#constraintObject) | **Optional** An optional definition of constraints to the parameter. +default | `string` | **Optional** An optional default value for the parameter. +hidden | `string` | **Optional** An optional boolean signifying the parameter should be obscured when displayed. + +##### Parameters Object Example: + +```yaml +name: password +description: mongoDB Admin password +hidden: true +constraints: + - allowed_pattern: "[A-Z0-9]+" + description: Must consist of characters and numbers only. +``` +```js +{ + "name": "password", + "description": "mongoDB Admin password", + "hidden": true, + "constraints": [ + { + "allowed_pattern": "[A-Z0-9]+", + "description": "Must consist of characters and numbers only." + } + ] +} +``` + +#### Constraint Object + +Constraints to the parameter. + +##### Fields + +Field Name | Type | Description +---|:---:|--- +allowed_pattern | `string` | **Required.** A regexp declaring the allowed pattern. +description | `string` | **Required.** A human readable description of the parameter. + + + +#### Requirements Object + +The list of requirements of the Container Application. It may be [Storage Requirement Objects](#storageRequirementsObject) (for a persistent Volume). + + +#### Storage Requirements Object + +This describes a requirement for persistent, read-only or read-write storage that should be available to the containerapp on runtime. The name of this object MUST be `"persistentVolume"`. + +##### Fields of Storage Requirement + +Field Name | Type | Description +---|:---:|--- +name | `string` | **Required.** A name associated with the storage requirement. +accessModes | `string` | **Required.** May be `"ReadWrite"` or `"ReadOnly"`. +size | `integer` | **Required.** Size of required the storage. + +##### Storage Requirement Example: + +```yaml +--- +- persistentVolume: + name: "var-lib-mongodb-data" + accessMode: "ReadWrite" + size: 4 # GB by default +``` +```js + { + "persistentVolume": { + "name": "var-lib-mongodb-data", + "accessMode": "ReadWrite", + "size": 4 + } + } +``` + + +#### Artifacts Object + +The Artifacts Object describes a list of provider specific artifact items. These artifact items will be used during installation of the containerapp to deploy it to the provider. Each provider key contains a list of artifacts. Each artifact list item is either a `URL` string or a [source control repository object](#repositoryObject). + +* URL: must be a URL string prepended by URI type such as `http://`, `https://`, `file:` (relative path) or `file://` (absolute path). URI type `file:` may be a single file or a directory path to multiple files. Directories must end with a trailing slash such as `file:relative/path/to/multiple/artifact/files/`. +* [SourceControlRepositoryObject](#repositoryObject) + +##### Artifacts Example: + +```yaml +--- +artifacts: # list of local or remote files or remote repository path to be processed by the provider selected at install-time + kubernetes: + - source: https://github.com/aweiteka/kube-files.git + tag: release-1 + openshift: + - file:relative/path/openshift/artifacts/ + - https://example.com/openshift/strategies.json + - inherit: + - kubernetes +``` +```js +{ + "artifacts": { + "kubernetes": [ + { + "source": "https://github.com/aweiteka/kube-files.git", + "path": "/artifacts/kubernetes/", + "tag": "release-1" + } + ], + "openshift": [ + "file:relative/path/openshift/artifacts/", + "https://example.com/openshift/strategies.json", + { + "inherit": [ + "kubernetes" + ] + } + ] + } +} +``` + +#### Source Control Repository Object + +Source Control Repository Object for artifact sources. + +##### Fields of a Source Control Repository Object + +Field Name | Type | Description +---|:---:|--- +source | `URL` | **Required** Source location of the source control repository. The source MUST be specified by a valid URL. +path | `string` | **Optional** The path to a specific artifact file or directory of artifact files. Default value is "/" which would reference all of the files in the repository. +type | `string` | **Optional** The source control type. Default value is "git". +branch | `string` | **Optional** The source control branch. Default value is "master". +tag | `string` | **Optional** The source control tag. + + +## Directory Layout + +Names of files that must be present are contained in the file `files` in +the root directory of the specification. These filenames support globbing. + +A filesystem layout of a typical app is this: +``` +├── Nulecule +├── Dockerfile +├── +│ ├── ... +│ └── +└── README.md +``` + +* `Nulecule`: Container Application definition +* `Dockerfile`: standard packaging for this containerapp +* ``: directories of provider-specific files referenced in a containerapp definition file + * `PROVIDER_FILES`: provider-specific files necessary for deploying to provider +* `README.md`: information for deploying this application + + +## README.md + +The README.md is the human-readable document. It describes the containerapp in enough detail so an operator can make parameterization and other deployment decisions. + +NOTE: This is optional. It is possible for some applications to be "self-describing" through well-written descriptions and input validation. + +## Good Practices + +An implementation of the Nulecule Specification should declare what providers it supports. This should be done by adding a Label to the container image, by adding a line to the Dockerfile: +``` +LABEL io.projectatomic.nulecule.providers "kubernetes,docker,openshift" +``` + +## Conventions + +A few conventions are used in the context of Container Applications. + +### Parameters for Providers + +Each provider in the [ArtifactsObject](#artifactsObject) of the [GraphObject](#graphObject) may correspond to a containerapp level [ParamsObject](#paramsObject). + +### Version Label + +The Dockerfile must carry a Label declaring the version of the specification that is used: +``` +LABEL io.projectatomic.nulecule.specversion 0.0.2 +``` diff --git a/docs/spec/README.md b/docs/spec/README.md new file mode 100644 index 00000000..7091e415 --- /dev/null +++ b/docs/spec/README.md @@ -0,0 +1,128 @@ +# Composite Container-based Application Specification + +`\ˈnü-li-ˌkyül\` (n.) a made-up word meaning ["the mother of all atomic particles"](http://simpsons.wikia.com/wiki/Made-up_words). + +**Your installer for container-based applications.** Replace your shell script and deployment instructions with some metadata. + +**Change runtime parameters for different environments.** No need to edit files before deployment. Users can choose interactive or unattended deployment. Guide web interface users with parameter metadata to validate user input and provide descriptive help. + +**Bridge between Enterprise IT and PaaS** With pluggable orchestration providers you can package your application to run on OpenShift, Kubernetes, Docker Compose, Helios, Panamax, Docker Machine, etc. and allow the user to choose the target when deployed. + +**Compose applications from a catalog.** No need to re-package common services. Create composite applications by referencing other Nulecule-compliant apps. For example, adding a well-designed, orchestrated database is simply a reference to another container image. + +## Problem Statement +Currently there is no standard way of defining a multi-container application's configuration without distributing instructions and files to the end-user. Additionally, these files must be managed and distributed via different systems than the containers themselves. + +Containers in the OCI (Open Container Initiative) format derived from Docker offers a new approach for application packaging. OCI enables application-centric aggregate packaging, optimized for deployment into containers. However most applications will consist of multiple containers, which surfaces two issues: the relationships between containers need to be expressed in order to manage dependencies and orchestrate the deployment (e.g. set up network connections) with consideration of environmental factors, and this application-level meta-data needs to be distributed. OCI itself, however, stops at the individual container. Orchestration tools such as Kubernetes offer a generic description model for multi-container applications, however they do not define a transport model, nor a standard way to parameterize a generic template. The mindset of most, if not all, current container orchestration systems is to treat the aggregate, multi-container application as state of the cluster rather than an entity in it's own right and therefore they regress beyond the portability that OCI introduced. This means that it's very easy to put a individual service into a Docker-style Registry, however there is no way to represent a full application at the distribution level - I can create a single MariaDB container, but not a MariaDB/Galera cluster or even a full application such as [Kolab](https://kolab.org/). So what is missing? A standard way to describe and package a multi-container application. + +## What is Nulecule? + +Nulecule defines a pattern and model for packaging complex multi-container applications and services, referencing all their dependencies, including orchestration metadata in a container image for building, deploying, monitoring, and active management. + +The Nulecule specification enables complex applications to be defined, packaged and distributed using standard container technologies. The resulting container includes dependencies, supports multiple orchestration providers, and has the ability to specify resource requirements. The Nulecule specification also supports the aggregation of multiple composite applications. The Nulecule specification is container and orchestration agnostic, enabling the use of any container and orchestration technology. + +**[Glossary of terms](GLOSSARY.md)** + +## Nulecule Specification Highlights + +* Application description and context maintained in a single container through extensible metadata +* Composable definition of complex applications through inheritance and composition of containers into a single, standards-based, portable description. +* Simplified dependency management for the most complex applications through a directed graph to reflect relationships. +* Container and orchestration engine agnostic, enabling the use of any container technology and/or orchestration technology + +Detailed explanation on the **Nulecule** file-format is explained at [NULECULE_FILE.md](NULECULE_FILE.md). + +## “The Big Picture” + +![Alt Nulecule specification high-level story.](/docs//images/logo.png "Nulecule specification high-level story") + +## Deployment User Experience + +The Nulecule specification has been implemented in the [Atomic App reference implementation](https://github.com/projectatomic/atomicapp). Atomic App currently supports docker containers and kubernetes and docker orchestration providers. The [atomic command](https://github.com/projectatomic/atomic) is used to run the container that contains the Nulecule specification and the Atomic App implementation. + +This example is a single container application based on the centos/httpd image, but you can use your own. + +You may wish to run the Nulecule from an empty directory as it will copy the Nulecule files to the working directory for inspection every time it is run. + +### Option 1: Non-interactive defaults + +Run the image. It will automatically use kubernetes as the orchestration provider. This will become interactive and prompt for defaults if the Nulecule file doesn't provide defaults for all of the parameters. + +``` +[sudo] atomic run projectatomic/helloapache +``` + +### Option 2: Unattended + +1. Create the file `answers.conf` with these contents: + + This sets up the values for the two configurable parameters (image and hostport) and indicates that kubernetes should be the orchestration provider. + + [general] + provider = kubernetes + + [helloapache-app] + image = centos/httpd # optional: choose a different image + hostport = 80 # optional: choose a different port to expose +1. Run the application from the current working directory + + $ [sudo] atomic run projectatomic/helloapache + ... + helloapache + + +1. As an additional experiment, remove the kubernetes pod and change the provider to 'docker' and re-run the application to see it get deployed on native docker. + +### Option 3: Install and Run + +You may want to download the application, review the configuration and parameters as specified in the Nulecule file, and edit the answerfile before running the application. + +1. Download the application files using `atomic install` + + [sudo] atomic install projectatomic/helloapache + +1. Rename `answers.conf.sample` + + mv answers.conf.sample answers.conf + +1. Edit `answers.conf`, review files if desired and then run + + $ [sudo] atomic run projectatomic/helloapache + ... + helloapache + +## Test +Any of these approaches should create a kubernetes pod or a running docker container. + +With a kubernetes pod, once its state is "Running" curl the minion it's running on. + +``` +$ kubectl get pod helloapache +POD IP CONTAINER(S) IMAGE(S) HOST LABELS STATUS +helloapache 172.17.0.8 helloapache centos/httpd 10.3.9.216/ name=helloapache Running +$ curl 10.3.9.216 + +``` + +If you test the docker provider, once the container is running, curl the port on your localhost. + +``` +$ curl localhost + +``` + +Additional examples that conform to the Nulecule spec can be found at [github.com/projectatomic/nulecule-library](https://github.com/projectatomic/nulecule-library). + +## Developer User Experience + +See the [Getting Started with Nulecule guide](GETTING_STARTED.md). + +## Implementations + +This is only a specification. Implementations may be written in any language. See [implementation guide](IMPLEMENTATION_GUIDE.md) + +**Reference implementation** https://github.com/projectatomic/atomicapp + +## Examples / Library + +For a library of examples conforming to the current reference implementation [atomicapp](https://github.com/projectatomic/atomicapp) please visit [github.com/projectatomic/nulecule-library](https://github.com/projectatomic/nulecule-library) diff --git a/docs/spec/json/constraint.json b/docs/spec/json/constraint.json new file mode 100644 index 00000000..6d7f7478 --- /dev/null +++ b/docs/spec/json/constraint.json @@ -0,0 +1,23 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "title": "Constraint", + "description": "Constraint to the parameter.", + "type": "array", + "items": { + "type": "object", + "required": [ "allowed_pattern", "description" ], + "properties": { + "allowed_pattern": { + "description": "A regular expression pattern.", + "type": "string", + "default": "null" + }, + "description": { + "description": "A human readable description of the constraint.", + "type": "string", + "default": "null" + } + } + } +} diff --git a/docs/spec/json/files b/docs/spec/json/files new file mode 100644 index 00000000..9c3dd392 --- /dev/null +++ b/docs/spec/json/files @@ -0,0 +1,2 @@ +Dockerfile +Nulecule diff --git a/docs/spec/json/graph.json b/docs/spec/json/graph.json new file mode 100644 index 00000000..a53949a8 --- /dev/null +++ b/docs/spec/json/graph.json @@ -0,0 +1,44 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "title": "Graph", + "description": "A list of components that constitute this Container Application. Components are either container-based services or other Container Applications. Components that are container-based services are specified as a collection of artifacts for providers that can accept a parameters specified by the deployer. These artifacts are references to files located in local sub-directories. Components that are other Container Applications are specified as URLs.", + "type": "array", + "items" : { + "$ref": "#/definitions/component" + }, + + "definitions": { + "component": { + "description": "ID of a component", + "type": "object", + "required": [ "name" ], + "properties": { + "name": { + "description": "The name of the component.", + "type": "string", + "default": "null" + }, + "source": { + "description": "If the component is another Container Application, source MUST be a valid URL to the source location. If source is present, all other fields SHALL be ignored.", + "type": "string", + "default": "null" + }, + "params": { + "description": "A list of ParamsObject that contain information to be used by providers in conjunction with their ArtifactsObject. If params is present, the source field SHALL be ignored.", + "type": "array", + "items": { + "$ref": "file:param.json" + } + }, + "artifacts": { + "description": "A list of ArtifactsObject that contain provider specific information. If artifacts is present, the source field SHALL be ignored.", + "type": "object", + "additionalProperties": { + "$ref": "file:provider.json" + } + } + } + } + } +} diff --git a/docs/spec/json/license.json b/docs/spec/json/license.json new file mode 100644 index 00000000..1fae538e --- /dev/null +++ b/docs/spec/json/license.json @@ -0,0 +1,20 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "title": "License", + "description": "License information for the Container Application.", + "type": "object", + "required": [ "name" ], + "properties": { + "name": { + "description": "The human readable license name used for the Container Application, no format imposed.", + "type": "string", + "default": "null" + }, + "url": { + "description": "A URL to the license used for the API. MUST be in the format of a URL.", + "type": "string", + "default": "null" + } + } +} diff --git a/docs/spec/json/metadata.json b/docs/spec/json/metadata.json new file mode 100644 index 00000000..30c3388c --- /dev/null +++ b/docs/spec/json/metadata.json @@ -0,0 +1,27 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "title": "Metadata", + "description": "An object holding optional metadata related to the Container Application. This will typically include the container application name, version, description, license information and other human readable information.", + "type": "object", + "properties": { + "name": { + "description": "A human readable name of the containerapp.", + "type": "string", + "default": "null" + }, + "appversion": { + "description": "The semantic version string of the Container Application.", + "type": "string", + "default": "null" + }, + "description": { + "description": "A human readable description of the Container Application. This may contain information for the deployer of the containerapp.", + "type": "string", + "default": "null" + }, + "license": { + "$ref": "file:license.json" + } + } +} diff --git a/docs/spec/json/param.json b/docs/spec/json/param.json new file mode 100644 index 00000000..18f9811b --- /dev/null +++ b/docs/spec/json/param.json @@ -0,0 +1,33 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "title": "Parameter", + "description": "Name of the parameter as used in artifacts", + "type": "object", + "required": [ "name", "description" ], + "properties": { + "name": { + "description": "", + "type": "string", + "default": "null" + }, + "description": { + "description": "A human readable description of the parameter.", + "type": "string", + "default": "null" + }, + "constraints": { + "$ref": "file:constraint.json" + }, + "default": { + "description": "An optional default value for the parameter.", + "type": "string", + "default": "null" + }, + "hidden": { + "description": "An optional boolean signifying the parameter should be obscured when displayed.", + "type": "boolean", + "default": false + } + } +} diff --git a/docs/spec/json/provider.json b/docs/spec/json/provider.json new file mode 100644 index 00000000..99399758 --- /dev/null +++ b/docs/spec/json/provider.json @@ -0,0 +1,63 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "title": "Provider", + "description": "A provider is a deployment platform or orchestrator.", + "type": "array", + "items": { + "oneOf": [ { "$ref": "#/definitions/path" }, { "$ref": "#/definitions/repository" }, { "$ref": "#/definitions/inheritance" } ] + }, + + "definitions": { + "path": { + "description": "Path to the artifact", + "type": "string", + "default": "null" + }, + "repository": { + "type": "object", + "properties": { + "source": { + "name": "source", + "description": "Source location of the source control repository. The source MUST be specified by a valid URL.", + "type": "string", + "default": "null" + }, + "path": { + "name": "path", + "description": "The path to a specific artifact file or directory of artifact files. Default value is '/' which would reference all of the files in the repository.", + "type": "string", + "default": "/" + }, + "type": { + "name": "type", + "description": "The source control type. Default value is 'git'.", + "type": "string", + "default": "git" + }, + "branch": { + "name": "branch", + "description": "The source control branch. Default value is 'master'.", + "type": "string", + "default": "master" + }, + "tag": { + "name": "tag", + "description": "The source control tag.", + "type": "string", + "default": "null" + } + } + }, + "inheritance": { + "type": "object", + "properties": { + "inherit": { + "name": "inherit", + "description": "List of components whose artifacts will be added to the list of artifacts for the provider.", + "type": "array" + } + } + } + } +} diff --git a/docs/spec/json/requirement.json b/docs/spec/json/requirement.json new file mode 100644 index 00000000..fa97fe32 --- /dev/null +++ b/docs/spec/json/requirement.json @@ -0,0 +1,12 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "title": "Requirements", + "description": "Requirement objects", + "type": "array", + "items": { + "oneOf": [ + { "$ref": "file:requirements/persistentvolume.json" } + ] + } +} diff --git a/docs/spec/json/requirements/persistentvolume.json b/docs/spec/json/requirements/persistentvolume.json new file mode 100644 index 00000000..70b65a17 --- /dev/null +++ b/docs/spec/json/requirements/persistentvolume.json @@ -0,0 +1,34 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "title": "PersistentVolume", + "description": "This describes a requirement for persistent, read-only or read-write storage that should be available to the Container Application at runtime. The name of this object MUST be 'persistentVolume'", + "type": "object", + "properties": { + "persistentVolume": { + "type": "object", + "required": [ "name", "accessMode", "size" ], + "properties": { + "name": { + "description": "A name associated with the storage requirement.", + "type": "string", + "default": "null" + }, + "accessMode": { + "description": "The access mode, read-write or read-only, for the storage", + "type": "string", + "enum": [ + "ReadWrite", + "ReadOnly" + ] + }, + "size": { + "description": "Size of the storage.", + "type": "number", + "minimum": 0 + } + } + } + }, + "additionalProperties": false +} diff --git a/docs/spec/json/schema.json b/docs/spec/json/schema.json new file mode 100644 index 00000000..e7e2d56e --- /dev/null +++ b/docs/spec/json/schema.json @@ -0,0 +1,36 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "title": "Schema", + "version": "0.0.2", + "description": "The Container Application specification defines a set of configuration files that describe a Container Application. A Container Application is composed of a set of container-based services and/or other Container Applications that together provide an application. These configuration files can be used by tools to deploy the application in an automated way or with customizations as specified by the deployer. Developers tools can generate most of the required files and utilities, such as testing tools, can take advantage of these files.", + "required": [ "id", "specversion", "graph" ], + "properties": { + "id": { + "description": "The machine readable id of the Container Application.", + "type": "string", + "default": "null" + }, + "specversion": { + "description": "The semantic version string of the Container Application Specification used to describe the app. The value SHOULD be '0.0.2'.", + "type": "string", + "default": "0.0.2" + }, + "metadata": { + "$ref": "file:metadata.json" + }, + "params": { + "description": "A list of ParamsObject that contain information in the global context of the application, accessible to it's child graph items.", + "type": "array", + "items": { + "$ref": "file:param.json" + } + }, + "graph": { + "$ref": "file:graph.json" + }, + "requirements": { + "$ref": "file:requirement.json" + } + } +} From eb419a2f760388be44aa64c792338fc539a67b3c Mon Sep 17 00:00:00 2001 From: Charlie Drage Date: Wed, 10 Aug 2016 11:15:31 -0400 Subject: [PATCH 179/193] Formatting error with anymarkup and genanswers Seems that self.answers_format isn't passed in correctly to _write_answers when using `atomicapp genanswers` correctly. Resulting in an error where anymarkup believes the format is 'None'. This commit explicitley passes self.answers_format which defaults to 'ini' if nothing is passed. Fixes https://github.com/projectatomic/atomicapp/issues/785 --- atomicapp/nulecule/main.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/atomicapp/nulecule/main.py b/atomicapp/nulecule/main.py index 82caebe5..a27577bb 100644 --- a/atomicapp/nulecule/main.py +++ b/atomicapp/nulecule/main.py @@ -249,7 +249,7 @@ def genanswers(self, dryrun=False, answers_format=None, **kwargs): # Get answers and write them out to answers.conf in cwd answers = self._get_runtime_answers( self.nulecule.config, None) - self._write_answers(answers_file, answers, answers_format) + self._write_answers(answers_file, answers, self.answers_format) def fetch(self, nodeps=False, update=False, dryrun=False, answers_format=ANSWERS_FILE_SAMPLE_FORMAT, **kwargs): @@ -416,6 +416,7 @@ def _write_answers(self, path, answers, answers_format): logger.debug("Writing answers to file.") logger.debug("FILE: %s", path) logger.debug("ANSWERS: %s", answers) + logger.debug("ANSWERS FORMAT: %s", answers_format) anymarkup.serialize_file(answers, path, format=answers_format) # Make sure that the permission of the file is set to the current user From 1a37ab82d5b173b5f3b74b8dc42ab22fbdcbf03c Mon Sep 17 00:00:00 2001 From: Charlie Drage Date: Wed, 10 Aug 2016 11:41:43 -0400 Subject: [PATCH 180/193] Openshift to KubeShift conversion and improvements This commit converts the current OpenShift provider to use the 'KubeShift' library. Allowing OpenShift to use the universal library for both Kubernetes and OpenShift, decreasing technical debt in learning how each provider API communicates. Tests are also added which cover a large majority of test scenarios for the KubeShift library. Included is a new pytest plugin which allows mocking an example HTTP server. --- atomicapp/constants.py | 1 + atomicapp/providers/kubernetes.py | 2 + atomicapp/providers/lib/kubeshift/client.py | 4 + atomicapp/providers/lib/kubeshift/kubebase.py | 4 +- .../providers/lib/kubeshift/kubeconfig.py | 3 + .../providers/lib/kubeshift/kubernetes.py | 10 +- .../providers/lib/kubeshift/openshift.py | 386 ++++++++ atomicapp/providers/openshift.py | 869 ++++-------------- test-requirements.txt | 1 + .../kubeshift/external/example_kubeconfig | 17 + tests/units/kubeshift/test_client.py | 72 ++ tests/units/kubeshift/test_kubebase.py | 93 ++ .../test_kubeconfig.py | 29 + tests/units/kubeshift/test_kubernetes.py | 82 ++ tests/units/kubeshift/test_openshift.py | 121 +++ .../providers/test_openshift_provider.py | 227 ----- 16 files changed, 994 insertions(+), 927 deletions(-) create mode 100644 atomicapp/providers/lib/kubeshift/openshift.py create mode 100644 tests/units/kubeshift/external/example_kubeconfig create mode 100644 tests/units/kubeshift/test_client.py create mode 100644 tests/units/kubeshift/test_kubebase.py rename tests/units/{nulecule => kubeshift}/test_kubeconfig.py (84%) create mode 100644 tests/units/kubeshift/test_kubernetes.py create mode 100644 tests/units/kubeshift/test_openshift.py delete mode 100644 tests/units/providers/test_openshift_provider.py diff --git a/atomicapp/constants.py b/atomicapp/constants.py index e3a6fd7a..60f4e94c 100644 --- a/atomicapp/constants.py +++ b/atomicapp/constants.py @@ -76,6 +76,7 @@ PROVIDER_CA_KEY = "provider-cafile" K8S_DEFAULT_API = "http://localhost:8080" +OC_DEFAULT_API = "http://localhost:8443" # Persistent Storage Formats PERSISTENT_STORAGE_FORMAT = ["ReadWriteOnce", "ReadOnlyMany", "ReadWriteMany"] diff --git a/atomicapp/providers/kubernetes.py b/atomicapp/providers/kubernetes.py index 92c2ed2e..a2109ba8 100644 --- a/atomicapp/providers/kubernetes.py +++ b/atomicapp/providers/kubernetes.py @@ -153,11 +153,13 @@ def _from_required_params(self): self._check_required_params(exception=True) paramdict = self._build_param_dict() + logger.debug("Building from required params") # Generate the configuration from the paramters config = KubeConfig().from_params(api=paramdict[PROVIDER_API_KEY], auth=paramdict[PROVIDER_AUTH_KEY], ca=paramdict[PROVIDER_CA_KEY], verify=paramdict[PROVIDER_TLS_VERIFY_KEY]) + logger.debug("Passed configuration for .kube/config %s" % config) return config def _check_namespaces(self): diff --git a/atomicapp/providers/lib/kubeshift/client.py b/atomicapp/providers/lib/kubeshift/client.py index ff59b8b2..2ce47865 100644 --- a/atomicapp/providers/lib/kubeshift/client.py +++ b/atomicapp/providers/lib/kubeshift/client.py @@ -18,6 +18,7 @@ """ from atomicapp.providers.lib.kubeshift.kubernetes import KubeKubernetesClient +from atomicapp.providers.lib.kubeshift.openshift import KubeOpenshiftClient from atomicapp.providers.lib.kubeshift.exceptions import KubeClientError from atomicapp.constants import LOGGER_DEFAULT import logging @@ -41,6 +42,9 @@ def __init__(self, config, provider): if provider is "kubernetes": self.connection = KubeKubernetesClient(config) logger.debug("Using Kubernetes Provider KubeClient library") + elif provider is "openshift": + self.connection = KubeOpenshiftClient(config) + logger.debug("Using OpenShift Provider KubeClient library") else: raise KubeClientError("No provider by that name.") diff --git a/atomicapp/providers/lib/kubeshift/kubebase.py b/atomicapp/providers/lib/kubeshift/kubebase.py index 2eee9f5f..081354ea 100644 --- a/atomicapp/providers/lib/kubeshift/kubebase.py +++ b/atomicapp/providers/lib/kubeshift/kubebase.py @@ -178,7 +178,7 @@ def get_resources(self, url): def test_connection(self, url): self.api.request("get", url) - logger.debug("Connection successfully tested") + logger.debug("Connection successfully tested on URL %s" % url) @staticmethod def cert_file(data, key): @@ -337,7 +337,7 @@ def _request_method(self, method, url, data): data (object): object of the data that is being passed (will be converted to json) ''' if method.lower() == "get": - res = self.api.get(url) + res = self.api.get(url, json=data) elif method.lower() == "post": res = self.api.post(url, json=data) elif method.lower() == "put": diff --git a/atomicapp/providers/lib/kubeshift/kubeconfig.py b/atomicapp/providers/lib/kubeshift/kubeconfig.py index 778d00d8..8a4b1beb 100644 --- a/atomicapp/providers/lib/kubeshift/kubeconfig.py +++ b/atomicapp/providers/lib/kubeshift/kubeconfig.py @@ -75,6 +75,9 @@ def from_params(api=None, auth=None, ca=None, verify=True): if ca: config['clusters'][0]['cluster']['certificate-authority'] = ca + + if verify is False: + config['clusters'][0]['cluster']['insecure-skip-tls-verify'] = 'true' return config @staticmethod diff --git a/atomicapp/providers/lib/kubeshift/kubernetes.py b/atomicapp/providers/lib/kubeshift/kubernetes.py index 6364ce15..5bbbc7a2 100644 --- a/atomicapp/providers/lib/kubeshift/kubernetes.py +++ b/atomicapp/providers/lib/kubeshift/kubernetes.py @@ -17,11 +17,11 @@ along with Atomic App. If not, see . """ -from urlparse import urljoin -from urllib import urlencode import logging import re +from urlparse import urljoin +from urllib import urlencode from atomicapp.constants import LOGGER_DEFAULT from atomicapp.providers.lib.kubeshift.kubebase import KubeBase from atomicapp.providers.lib.kubeshift.exceptions import (KubeKubernetesError) @@ -39,7 +39,7 @@ def __init__(self, config): ''' - # Pass in the configuration data (.kube/config object) to the KubeBase + # The configuration data passed in will be .kube/config data, so process is accordingly. self.api = KubeBase(config) # Check the API url @@ -75,7 +75,9 @@ def create(self, obj, namespace): ''' name = self._get_metadata_name(obj) kind, url = self._generate_kurl(obj, namespace) + self.api.request("post", url, data=obj) + logger.info("%s '%s' successfully created", kind.capitalize(), name) def delete(self, obj, namespace): @@ -99,8 +101,8 @@ def delete(self, obj, namespace): if kind in ['rcs', 'replicationcontrollers']: self.scale(obj, namespace) - self.api.request("delete", url) + logger.info("%s '%s' successfully deleted", kind.capitalize(), name) def scale(self, obj, namespace, replicas=0): diff --git a/atomicapp/providers/lib/kubeshift/openshift.py b/atomicapp/providers/lib/kubeshift/openshift.py new file mode 100644 index 00000000..b5cfa43d --- /dev/null +++ b/atomicapp/providers/lib/kubeshift/openshift.py @@ -0,0 +1,386 @@ +""" + Copyright 2014-2016 Red Hat, Inc. + + This file is part of Atomic App. + + Atomic App is free software: you can redistribute it and/or modify + it under the terms of the GNU Lesser General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + Atomic App is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with Atomic App. If not, see . +""" + +import datetime +import time +import os +import tarfile +import logging +import re + +from urlparse import urljoin +from urllib import urlencode +from atomicapp.utils import Utils +from atomicapp.constants import LOGGER_DEFAULT +from atomicapp.providers.lib.kubeshift.kubebase import KubeBase +from atomicapp.providers.lib.kubeshift.exceptions import KubeOpenshiftError + +logger = logging.getLogger(LOGGER_DEFAULT) + + +class KubeOpenshiftClient(object): + + def __init__(self, config): + ''' + + Args: + config (obj): Object of the configuration data + + ''' + + # The configuration data passed in will be .kube/config data, so process is accordingly. + self.api = KubeBase(config) + + # Check the API url + url = self.api.cluster['server'] + if not re.match('(?:http|https)://', url): + raise KubeOpenshiftError("OpenShift API URL does not include HTTP or HTTPS") + + # Gather what end-points we will be using + self.k8s_api = urljoin(url, "api/v1/") + self.oc_api = urljoin(url, "oapi/v1/") + + # Test the connection before proceeding + self.api.test_connection(self.k8s_api) + self.api.test_connection(self.oc_api) + + # Gather the resource names which will be used for the 'kind' API calls + self.oc_api_resources = self.api.get_resources(self.oc_api) + + # Gather what API groups are available + # TODO: refactor this (create function in kubebase.py) + self.k8s_api_resources = {} + self.k8s_api_resources['v1'] = self.api.get_resources(self.k8s_api) + self.k8s_apis = urljoin(url, "apis/") + + # Gather the group names from which resource names will be derived + self.k8s_api_groups = self.api.get_groups(self.k8s_apis) + + for (name, versions) in self.k8s_api_groups: + for version in versions: + api = "%s/%s" % (name, version) + url = urljoin(self.k8s_apis, api) + self.k8s_api_resources[api] = self.api.get_resources(url) + + def create(self, obj, namespace): + ''' + Create an object from the Kubernetes cluster + ''' + name = self._get_metadata_name(obj) + kind, url = self._generate_kurl(obj, namespace) + + # Must process through each object if kind is a 'template' + if kind is "template": + self._process_template(obj, namespace, "create") + else: + self.api.request("post", url, data=obj) + + logger.info("%s '%s' successfully created", kind.capitalize(), name) + + def delete(self, obj, namespace): + ''' + Delete an object from the Kubernetes cluster + + Args: + obj (object): Object of the artifact being modified + namesapce (str): Namespace of the kubernetes cluster to be used + replicates (int): Default 0, size of the amount of replicas to scale + + *Note* + Replication controllers must scale to 0 in order to delete pods. + Kubernetes 1.3 will implement server-side cascading deletion, but + until then, it's mandatory to scale to 0 + https://github.com/kubernetes/kubernetes/blob/master/docs/proposals/garbage-collection.md + + ''' + name = self._get_metadata_name(obj) + kind, url = self._generate_kurl(obj, namespace, name) + + # Must process through each object if kind is a 'template' + if kind is "template": + self._process_template(obj, namespace, "create") + else: + if kind in ['rcs', 'replicationcontrollers']: + self.scale(obj, namespace) + self.api.request("delete", url) + + logger.info("%s '%s' successfully deleted", kind.capitalize(), name) + + def scale(self, obj, namespace, replicas=0): + ''' + By default we scale back down to 0. This function takes an object and scales said + object down to a specified value on the Kubernetes cluster + + Args: + obj (object): Object of the artifact being modified + namesapce (str): Namespace of the kubernetes cluster to be used + replicates (int): Default 0, size of the amount of replicas to scale + ''' + patch = [{"op": "replace", + "path": "/spec/replicas", + "value": replicas}] + name = self._get_metadata_name(obj) + _, url = self._generate_kurl(obj, namespace, name) + self.api.request("patch", url, data=patch) + logger.info("'%s' successfully scaled to %s", name, replicas) + + def namespaces(self): + ''' + Gathers a list of namespaces on the Kubernetes cluster + ''' + url = urljoin(self.oc_api, "projects") + ns = self.api.request("get", url) + return ns['items'] + + def _generate_kurl(self, obj, namespace, name=None, params=None): + ''' + Generate the required URL by extracting the 'kind' from the + object as well as the namespace. + + Args: + obj (obj): Object of the data being passed + namespace (str): k8s namespace + name (str): Name of the object being passed + params (arr): Extra params passed such as timeout=300 + + Returns: + kind (str): The kind used + url (str): The URL to be used / artifact URL + ''' + if 'apiVersion' not in obj.keys(): + raise KubeOpenshiftError("Error processing object. There is no apiVersion") + + if 'kind' not in obj.keys(): + raise KubeOpenshiftError("Error processing object. There is no kind") + + api_version = obj['apiVersion'] + + kind = obj['kind'] + + resource = KubeBase.kind_to_resource_name(kind) + + if resource in self.k8s_api_resources[api_version]: + if api_version == 'v1': + url = self.k8s_api + else: + url = urljoin(self.k8s_apis, "%s/" % api_version) + else: + raise KubeOpenshiftError("No kind by that name: %s" % kind) + + url = urljoin(url, "namespaces/%s/%s/" % (namespace, resource)) + + if name: + url = urljoin(url, name) + + if params: + url = urljoin(url, "?%s" % urlencode(params)) + + return (resource, url) + + @staticmethod + def _get_metadata_name(obj): + ''' + This looks at the object and grabs the metadata name of said object + + Args: + obj (object): Object file of the artifact + + Returns: + name (str): Returns the metadata name of the object + ''' + if "metadata" in obj and \ + "name" in obj["metadata"]: + name = obj["metadata"]["name"] + else: + raise KubeOpenshiftError("Cannot undeploy. There is no" + " name in object metadata " + "object=%s" % obj) + return name + + # OPENSHIFT-SPECIFIC FUNCTIONS + + def extract(self, image, src, dest, namespace, update=True): + """ + Extract contents of a container image from 'src' in container + to 'dest' in host. + + Args: + image (str): Name of container image + src (str): Source path in container + dest (str): Destination path in host + update (bool): Update existing destination, if True + """ + if os.path.exists(dest) and not update: + return + cleaned_image_name = Utils.sanitizeName(image) + pod_name = '{}-{}'.format(cleaned_image_name, Utils.getUniqueUUID()) + container_name = cleaned_image_name + + # Pull (if needed) image and bring up a container from it + # with 'sleep 3600' entrypoint, just to extract content from it + artifact = { + 'apiVersion': 'v1', + 'kind': 'Pod', + 'metadata': { + 'name': pod_name + }, + 'spec': { + 'containers': [ + { + 'image': image, + 'command': [ + 'sleep', + '3600' + ], + 'imagePullPolicy': 'IfNotPresent', + 'name': container_name + } + ], + 'restartPolicy': 'Always' + } + } + + self.create(artifact, namespace) + try: + self._wait_till_pod_runs(namespace, pod_name, timeout=300) + + # Archive content from the container and dump it to tmpfile + tmpfile = '/tmp/atomicapp-{pod}.tar.gz'.format(pod=pod_name) + + self._execute( + namespace, pod_name, container_name, + 'tar -cz --directory {} ./'.format('/' + src), + outfile=tmpfile + ) + finally: + # Delete created pod + self.delete(artifact, namespace) + + # Extract archive data + tar = tarfile.open(tmpfile, 'r:gz') + tar.extractall(dest) + + def _execute(self, namespace, pod, container, command, + outfile=None): + """ + Execute a command in a container in an Openshift pod. + + Args: + namespace (str): Namespace + pod (str): Pod name + container (str): Container name inside pod + command (str): Command to execute + outfile (str): Path to output file where results should be dumped + + Returns: + Command output (str) or None in case results dumped to output file + """ + args = { + 'token': self.api.token, + 'namespace': namespace, + 'pod': pod, + 'container': container, + 'command': ''.join(['command={}&'.format(word) for word in command.split()]) + } + url = urljoin( + self.k8s_api, + 'namespaces/{namespace}/pods/{pod}/exec?' + 'access_token={token}&container={container}&' + '{command}stdout=1&stdin=0&tty=0'.format(**args)) + + return self.api.websocket_request(url, outfile) + + def _process_template(self, obj, namespace, method): + _, url = self._generate_kurl(obj, namespace) + data = self.api.request("post", url, data=obj) + + if method is "create": + for o in data[0]['objects']: + name = self._get_metadata_name(o) + _, object_url = self._generate_kurl(o, namespace) + self.api.request("post", object_url, data=o) + logger.debug("Created template object: %s" % name) + elif method is "delete": + for o in data[0]['objects']: + name = self._get_metadata_name(o) + _, object_url = self._generate_kurl(o, namespace, name) + self.api.request("delete", object_url) + logger.debug("Deleted template object: %s" % name) + else: + raise KubeOpenshiftError("No method by that name to process template") + + logger.debug("Processed object template successfully") + + def _get_pod_status(self, namespace, pod): + """ + Get pod status. + + Args: + namespace (str): Openshift namespace + pod (str): Pod name + + Returns: + Status of pod (str) + + Raises: + ProviderFailedException when unable to fetch Pod status. + """ + args = { + 'namespace': namespace, + 'pod': pod, + 'access_token': self.api.token + } + url = urljoin( + self.k8s_api, + 'namespaces/{namespace}/pods/{pod}?' + 'access_token={access_token}'.format(**args)) + data = self.api.request("get", url) + + return data['status']['phase'].lower() + + def _wait_till_pod_runs(self, namespace, pod, timeout=300): + """ + Wait till pod runs, with a timeout. + + Args: + namespace (str): Openshift namespace + pod (str): Pod name + timeout (int): Timeout in seconds. + + Raises: + ProviderFailedException on timeout or when the pod goes to + failed state. + """ + now = datetime.datetime.now() + timeout_delta = datetime.timedelta(seconds=timeout) + while datetime.datetime.now() - now < timeout_delta: + status = self.oc.get_pod_status(namespace, pod) + if status == 'running': + break + elif status == 'failed': + raise KubeOpenshiftError( + 'Unable to run pod for extracting content: ' + '{namespace}/{pod}'.format(namespace=namespace, + pod=pod)) + time.sleep(1) + if status != 'running': + raise KubeOpenshiftError( + 'Timed out to extract content from pod: ' + '{namespace}/{pod}'.format(namespace=namespace, + pod=pod)) diff --git a/atomicapp/providers/openshift.py b/atomicapp/providers/openshift.py index bac6f3a4..21e944e3 100644 --- a/atomicapp/providers/openshift.py +++ b/atomicapp/providers/openshift.py @@ -17,473 +17,186 @@ along with Atomic App. If not, see . """ -import datetime -import os import anymarkup -import ssl -import tarfile -import time -from urlparse import urljoin -from urllib import urlencode -from collections import OrderedDict -import websocket +import logging +import os -from atomicapp.utils import Utils -from atomicapp.plugin import Provider, ProviderFailedException from atomicapp.constants import (PROVIDER_AUTH_KEY, ANSWERS_FILE, DEFAULT_NAMESPACE, LOGGER_DEFAULT, - NAMESPACE_KEY, PROVIDER_API_KEY, - PROVIDER_TLS_VERIFY_KEY, PROVIDER_CA_KEY, - OPENSHIFT_POD_CA_FILE) + PROVIDER_TLS_VERIFY_KEY, + LOGGER_COCKPIT, + OC_DEFAULT_API) +from atomicapp.plugin import Provider, ProviderFailedException + from atomicapp.providers.lib.kubeshift.kubeconfig import KubeConfig -from requests.exceptions import SSLError -import logging +from atomicapp.providers.lib.kubeshift.client import Client +from atomicapp.utils import Utils +cockpit_logger = logging.getLogger(LOGGER_COCKPIT) logger = logging.getLogger(LOGGER_DEFAULT) -class OpenshiftClient(object): - - def __init__(self, providerapi, access_token, - provider_tls_verify, provider_ca): - self.providerapi = providerapi - self.access_token = access_token - self.provider_tls_verify = provider_tls_verify - self.provider_ca = provider_ca - - # construct full urls for api endpoints - self.kubernetes_api = urljoin(self.providerapi, "api/v1/") - self.openshift_api = urljoin(self.providerapi, "oapi/v1/") - - logger.debug("kubernetes_api = %s", self.kubernetes_api) - logger.debug("openshift_api = %s", self.openshift_api) - - def test_connection(self): - """ - Test connection to OpenShift server - - Raises: - ProviderFailedException - Invalid SSL/TLS certificate - """ - logger.debug("Testing connection to OpenShift server") - - if self.provider_ca and not os.path.exists(self.provider_ca): - raise ProviderFailedException("Unable to find CA path %s" - % self.provider_ca) - - try: - (status_code, return_data) = \ - Utils.make_rest_request("get", - self.openshift_api, - verify=self._requests_tls_verify(), - headers={'Authorization': "Bearer %s" % self.access_token}) - except SSLError as e: - if self.provider_tls_verify: - msg = "SSL/TLS ERROR: invalid certificate. " \ - "Add certificate of correct Certificate Authority providing" \ - " `%s` or you can disable SSL/TLS verification by `%s=False`" \ - % (PROVIDER_CA_KEY, PROVIDER_TLS_VERIFY_KEY) - raise ProviderFailedException(msg) - else: - # this shouldn't happen - raise ProviderFailedException(e.message) - - def get_oapi_resources(self): - """ - Get Openshift API resources - """ - # get list of supported resources for each api - (status_code, return_data) = \ - Utils.make_rest_request("get", - self.openshift_api, - verify=self._requests_tls_verify(), - headers={'Authorization': "Bearer %s" % self.access_token}) - if status_code == 200: - oapi_resources = return_data["resources"] - else: - raise ProviderFailedException("Cannot get OpenShift resource list") - - # convert resources list of dicts to list of names - oapi_resources = [res['name'] for res in oapi_resources] - - logger.debug("Openshift resources %s", oapi_resources) - - return oapi_resources - - def get_kapi_resources(self): - """ - Get kubernetes API resources - """ - # get list of supported resources for each api - (status_code, return_data) = \ - Utils.make_rest_request("get", - self.kubernetes_api, - verify=self._requests_tls_verify(), - headers={'Authorization': "Bearer %s" % self.access_token}) - if status_code == 200: - kapi_resources = return_data["resources"] - else: - raise ProviderFailedException("Cannot get Kubernetes resource list") - - # convert resources list of dicts to list of names - kapi_resources = [res['name'] for res in kapi_resources] - - logger.debug("Kubernetes resources %s", kapi_resources) - - return kapi_resources - - def deploy(self, url, artifact): - (status_code, return_data) = \ - Utils.make_rest_request("post", - url, - verify=self._requests_tls_verify(), - data=artifact, - headers={'Authorization': "Bearer %s" % self.access_token}) - if status_code == 201: - logger.info("Object %s successfully deployed.", - artifact['metadata']['name']) - else: - msg = "%s %s" % (status_code, return_data) - logger.error(msg) - # TODO: remove running components (issue: #428) - raise ProviderFailedException(msg) - - def delete(self, url): - """ - Delete object on given url - - Args: - url (str): full url for artifact - - Raises: - ProviderFailedException: error when calling remote api - """ - (status_code, return_data) = \ - Utils.make_rest_request("delete", - url, - verify=self._requests_tls_verify(), - headers={'Authorization': "Bearer %s" % self.access_token}) - if status_code == 200: - logger.info("Successfully deleted.") - else: - msg = "%s %s" % (status_code, return_data) - logger.error(msg) - raise ProviderFailedException(msg) - - def scale(self, url, replicas): - """ - Scale ReplicationControllers or DeploymentConfig - - Args: - url (str): full url for artifact - replicas (int): number of replicas scale to - """ - patch = [{"op": "replace", - "path": "/spec/replicas", - "value": replicas}] - - (status_code, return_data) = \ - Utils.make_rest_request("patch", - url, - data=patch, - verify=self._requests_tls_verify(), - headers={'Authorization': "Bearer %s" % self.access_token}) - if status_code == 200: - logger.info("Successfully scaled to %s replicas", replicas) - else: - msg = "%s %s" % (status_code, return_data) - logger.error(msg) - raise ProviderFailedException(msg) - - def process_template(self, url, template): - (status_code, return_data) = \ - Utils.make_rest_request("post", - url, - verify=self._requests_tls_verify(), - data=template, - headers={'Authorization': "Bearer %s" % self.access_token}) - if status_code == 201: - logger.info("template processed %s", template['metadata']['name']) - logger.debug("processed template %s", return_data) - return return_data['objects'] - else: - msg = "%s %s" % (status_code, return_data) - logger.error(msg) - raise ProviderFailedException(msg) - - def _requests_tls_verify(self): - """ - Return verify parameter for function Utils.make_rest_request - in format that is used by requests library. - see: http://docs.python-requests.org/en/latest/user/advanced/#ssl-cert-verification - """ - if self.provider_ca and self.provider_tls_verify: - return self.provider_ca - else: - return self.provider_tls_verify - - def execute(self, namespace, pod, container, command, - outfile=None): - """ - Execute a command in a container in an Openshift pod. - - Args: - namespace (str): Namespace - pod (str): Pod name - container (str): Container name inside pod - command (str): Command to execute - outfile (str): Path to output file where results should be dumped - - Returns: - Command output (str) or None in case results dumped to output file - """ - args = { - 'token': self.access_token, - 'namespace': namespace, - 'pod': pod, - 'container': container, - 'command': ''.join(['command={}&'.format(word) for word in command.split()]) - } - url = urljoin( - self.kubernetes_api, - 'namespaces/{namespace}/pods/{pod}/exec?' - 'access_token={token}&container={container}&' - '{command}stdout=1&stdin=0&tty=0'.format(**args)) - - # The above endpoint needs the request to be upgraded to SPDY, - # which python-requests does not yet support. However, the same - # endpoint works over websockets, so we are using websocket client. - - # Convert url from http(s) protocol to wss protocol - url = 'wss://' + url.split('://', 1)[-1] - logger.debug('url: {}'.format(url)) - - results = [] - - ws = websocket.WebSocketApp( - url, - on_message=lambda ws, message: self._handle_exec_reply(ws, message, results, outfile)) - - ws.run_forever(sslopt={ - 'ca_certs': self.provider_ca, - 'cert_reqs': ssl.CERT_REQUIRED if self.provider_tls_verify else ssl.CERT_NONE}) - - if not outfile: - return ''.join(results) - - def _handle_exec_reply(self, ws, message, results, outfile=None): - """ - Handle reply message for exec call - """ - # FIXME: For some reason, we do not know why, we need to ignore the - # 1st char of the message, to generate a meaningful result - cleaned_msg = message[1:] - if outfile: - with open(outfile, 'ab') as f: - f.write(cleaned_msg) - else: - results.append(cleaned_msg) - - def get_pod_status(self, namespace, pod): - """ - Get pod status. - - Args: - namespace (str): Openshift namespace - pod (str): Pod name - - Returns: - Status of pod (str) - - Raises: - ProviderFailedException when unable to fetch Pod status. - """ - args = { - 'namespace': namespace, - 'pod': pod, - 'access_token': self.access_token - } - url = urljoin( - self.kubernetes_api, - 'namespaces/{namespace}/pods/{pod}?' - 'access_token={access_token}'.format(**args)) - (status_code, return_data) = \ - Utils.make_rest_request("get", - url, - verify=self._requests_tls_verify(), - headers={'Authorization': "Bearer %s" % self.access_token}) - - if status_code != 200: - raise ProviderFailedException( - 'Could not fetch status for pod: {namespace}/{pod}'.format( - namespace=namespace, pod=pod)) - return return_data['status']['phase'].lower() +class OpenshiftProvider(Provider): + """Operations for OpenShift provider is implemented in this class. + This class implements deploy, stop and undeploy of an atomicapp on + OpenShift provider. + """ -class OpenshiftProvider(Provider): + # Class variables key = "openshift" - cli_str = "oc" - cli = None - config_file = None - template_data = None - providerapi = "https://127.0.0.1:8443" - openshift_api = None - kubernetes_api = None - access_token = None namespace = DEFAULT_NAMESPACE + oc_artifacts = {} + + # From the provider configuration + config_file = None - # verify tls/ssl connection - provider_tls_verify = True - # path to file or dir with CA certificates + # Essential provider parameters + provider_api = None + provider_auth = None + provider_tls_verify = None provider_ca = None def init(self): - # Parsed artifacts. Key is kind of artifacts. Value is list of artifacts. - self.openshift_artifacts = OrderedDict() + self.oc_artifacts = {} - self._set_config_values() + logger.debug("Given config: %s", self.config) + if self.config.get("namespace"): + self.namespace = self.config.get("namespace") - self.oc = OpenshiftClient(self.providerapi, - self.access_token, - self.provider_tls_verify, - self.provider_ca) - self.openshift_api = self.oc.openshift_api - self.kubernetes_api = self.oc.kubernetes_api - - # test connection to openshift server - self.oc.test_connection() - - self.oapi_resources = self.oc.get_oapi_resources() - self.kapi_resources = self.oc.get_kapi_resources() + logger.info("Using namespace %s", self.namespace) self._process_artifacts() - def _get_namespace(self, artifact): - """ - Return namespace for artifact. If namespace is specified inside - artifact use that, if not return default namespace (as specfied in - answers.conf) + if self.dryrun: + return - Args: - artifact (dict): OpenShift/Kubernetes object + ''' + Config_file: + If a config_file has been provided, use the configuration + from the file and load the associated generated file. + If a config_file exists (--provider-config) use that. - Returns: - namespace (str) - """ - if "metadata" in artifact and "namespace" in artifact["metadata"]: - return artifact["metadata"]["namespace"] - return self.namespace + Params: + If any provider specific parameters have been provided, + load the configuration through the answers.conf file - def run(self): - logger.debug("Deploying to OpenShift") - # TODO: remove running components if one component fails issue:#428 - for kind, objects in self.openshift_artifacts.iteritems(): - for artifact in objects: - namespace = self._get_namespace(artifact) - url = self._get_url(namespace, kind) + .kube/config: + If no config file or params are provided by user then try to find and + use a config file at the default location. - if self.dryrun: - logger.info("DRY-RUN: %s", url) - continue - self.oc.deploy(url, artifact) + no config at all: + If no .kube/config file can be found then try to connect to the default + unauthenticated http://localhost:8080/api end-point. + ''' - def stop(self): - """ - Undeploy application. - - Cascade the deletion of the resources managed other resource - (e.g. ReplicationControllers created by a DeploymentConfig and - Pods created by a ReplicationController). - When using command line client this is done automatically - by `oc` command. - When using API calls we have to cascade deletion manually. - """ - logger.debug("Starting undeploy") - delete_artifacts = [] - for kind, objects in self.openshift_artifacts.iteritems(): - # Add deployment configs to beginning of the list so they are deleted first. - # Do deployment config first because if you do replication controller - # before deployment config then the deployment config will re-spawn - # the replication controller before the deployment config is deleted. - if kind == "deploymentconfig": - delete_artifacts = objects + delete_artifacts - else: - delete_artifacts = delete_artifacts + objects - - for artifact in delete_artifacts: - kind = artifact["kind"].lower() - namespace = self._get_namespace(artifact) - - # Get name from metadata so we know which object to delete. - if "metadata" in artifact and \ - "name" in artifact["metadata"]: - name = artifact["metadata"]["name"] - else: - raise ProviderFailedException("Cannot undeploy. There is no" - " name in artifacts metadata " - "artifact=%s" % artifact) - - logger.info("Undeploying artifact name=%s kind=%s" % (name, kind)) - - # If this is a deployment config we need to delete all - # replication controllers that were created by this. - # Find the replication controller that was created by this deployment - # config by querying for all replication controllers and filtering based - # on automatically created label openshift.io/deployment-config.name - if kind.lower() == "deploymentconfig": - params = {"labelSelector": - "openshift.io/deployment-config.name=%s" % name} - url = self._get_url(namespace, - "replicationcontroller", - params=params) - (status_code, return_data) = \ - Utils.make_rest_request("get", url, verify=self.oc._requests_tls_verify()) - if status_code != 200: - raise ProviderFailedException("Cannot get Replication" - "Controllers for Deployment" - "Config %s (status code %s)" % - (name, status_code)) - # kind of returned data is ReplicationControllerList - # https://docs.openshift.com/enterprise/3.1/rest_api/kubernetes_v1.html#v1-replicationcontrollerlist - # we need modify items to get valid ReplicationController - items = return_data["items"] - for item in items: - item["kind"] = "ReplicationController" - item["apiVersion"] = return_data["apiVersion"] - # add items to list of artifact to be deleted - delete_artifacts.extend(items) - - url = self._get_url(namespace, kind, name) - - # Scale down replication controller to 0 replicas before deleting. - # This should take care of all pods created by this replication - # controller and we can safely delete it. - if kind.lower() == "replicationcontroller": - if self.dryrun: - logger.info("DRY-RUN: SCALE %s down to 0", url) - else: - self.oc.scale(url, 0) + default_config_loc = os.path.join( + Utils.getRoot(), Utils.getUserHome().strip('/'), '.kube/config') - if self.dryrun: - logger.info("DRY-RUN: DELETE %s", url) - else: - self.oc.delete(url) + if self.config_file: + logger.debug("Provider configuration provided") + self.api = Client(KubeConfig.from_file(self.config_file), "openshift") + elif self._check_required_params(): + logger.debug("Generating .kube/config from given parameters") + self.api = Client(self._from_required_params(), "openshift") + elif os.path.isfile(default_config_loc): + logger.debug(".kube/config exists, using default configuration file") + self.api = Client(KubeConfig.from_file(default_config_loc), "openshift") + else: + self.config["provider-api"] = OC_DEFAULT_API + self.api = Client(self._from_required_params(), "openshift") + + self._check_namespaces() + + def _build_param_dict(self): + # Initialize the values + paramdict = {PROVIDER_API_KEY: self.provider_api, + PROVIDER_AUTH_KEY: self.provider_auth, + PROVIDER_TLS_VERIFY_KEY: self.provider_tls_verify, + PROVIDER_CA_KEY: self.provider_ca} + + # Get values from the loaded answers.conf / passed CLI params + for k in paramdict.keys(): + paramdict[k] = self.config.get(k) + + return paramdict + + def _check_required_params(self, exception=False): + ''' + This checks to see if required parameters associated to the Kubernetes + provider are passed. + PROVIDER_API_KEY and PROVIDER_AUTH_KEY are *required*. Token may be blank. + ''' + + paramdict = self._build_param_dict() + logger.debug("List of parameters passed: %s" % paramdict) + + # Check that the required parameters are passed. If not, error out. + for k in [PROVIDER_API_KEY, PROVIDER_AUTH_KEY]: + if paramdict[k] is None: + if exception: + msg = "You need to set %s in %s or pass it as a CLI param" % (k, ANSWERS_FILE) + raise ProviderFailedException(msg) + else: + return False + + return True + + def _from_required_params(self): + ''' + Create a default configuration from passed environment parameters. + ''' + + self._check_required_params(exception=True) + paramdict = self._build_param_dict() + + logger.debug("Building from required params") + # Generate the configuration from the paramters + config = KubeConfig().from_params(api=paramdict[PROVIDER_API_KEY], + auth=paramdict[PROVIDER_AUTH_KEY], + ca=paramdict[PROVIDER_CA_KEY], + verify=paramdict[PROVIDER_TLS_VERIFY_KEY]) + logger.debug("Passed configuration for .kube/config %s" % config) + return config + + def _check_namespaces(self): + ''' + This function checks to see whether or not the namespaces created in the cluster match the + namespace that is associated and/or provided in the deployed application + ''' + + # Get the namespaces and output the currently used ones + namespace_list = self.api.namespaces() + logger.debug("There are currently %s namespaces in the cluster." % str(len(namespace_list))) + + # Create a namespace list + namespaces = [] + for ns in namespace_list: + namespaces.append(ns["metadata"]["name"]) + + # Output the namespaces and check to see if the one provided exists + logger.debug("Namespaces: %s" % namespaces) + if self.namespace not in namespaces: + msg = "%s namespace does not exist. Please create the namespace and try again." % self.namespace + raise ProviderFailedException(msg) def _process_artifacts(self): """ - Parse OpenShift manifests files and checks if manifest under - process is valid. Reads self.artifacts and saves parsed artifacts - to self.openshift_artifacts + Parse each Kubernetes file and convert said format into an Object for + deployment. """ for artifact in self.artifacts: logger.debug("Processing artifact: %s", artifact) data = None + + # Open and parse the artifact data with open(os.path.join(self.path, artifact), "r") as fp: data = anymarkup.parse(fp, force_types=None) + # Process said artifacts self._process_artifact_data(artifact, data) def _process_artifact_data(self, artifact, data): @@ -494,290 +207,58 @@ def _process_artifact_data(self, artifact, data): artifact (str): Artifact name data (dict): Artifact data """ - # kind has to be specified in artifact + + # Check if kind exists if "kind" not in data.keys(): raise ProviderFailedException( "Error processing %s artifact. There is no kind" % artifact) + # Change to lower case so it's easier to parse kind = data["kind"].lower() - resource = self._kind_to_resource(kind) - - # check if resource is supported by apis - if resource not in self.oapi_resources \ - and resource not in self.kapi_resources: - raise ProviderFailedException( - "Unsupported kind %s in artifact %s" % (kind, artifact)) - - # process templates - if kind == "template": - processed_objects = self._process_template(data) - # add all processed object to artifacts dict - for obj in processed_objects: - obj_kind = obj["kind"].lower() - if obj_kind not in self.openshift_artifacts.keys(): - self.openshift_artifacts[obj_kind] = [] - self.openshift_artifacts[obj_kind].append(obj) - return - - # add parsed artifact to dict - if kind not in self.openshift_artifacts.keys(): - self.openshift_artifacts[kind] = [] - self.openshift_artifacts[kind].append(data) - - def _process_template(self, template): - """ - Call OpenShift api and process template. - Templates allow parameterization of resources prior to being sent to - the server for creation or update. Templates have "parameters", - which may either be generated on creation or set by the user. - - Args: - template (dict): template to process - - Returns: - List of objects from processed template. - """ - logger.debug("processing template: %s", template) - url = self._get_url(self._get_namespace(template), "processedtemplates") - return self.oc.process_template(url, template) - def _kind_to_resource(self, kind): - """ - Converts kind to resource name. It is same logics - as in k8s.io/kubernetes/pkg/api/meta/restmapper.go (func KindToResource) - Example: - Pod -> pods - Policy - > policies - BuildConfig - > buildconfigs - - Args: - kind (str): Kind of the object - - Returns: - Resource name (str) (kind in plural form) - """ - singular = kind.lower() - if singular.endswith("status"): - plural = singular + "es" - else: - if singular[-1] == "s": - plural = singular - elif singular[-1] == "y": - plural = singular.rstrip("y") + "ies" - else: - plural = singular + "s" - return plural - - def _get_url(self, namespace, kind, name=None, params=None): - """ - Some kinds/resources are managed by OpensShift and some by Kubernetes. - Here we compose right url (Kubernets or OpenShift) for given kind. - If resource is managed by Kubernetes or OpenShift is determined by - self.kapi_resources/self.oapi_resources lists - Example: - For namespace=project1, kind=DeploymentConfig, name=dc1 result - would be http://example.com:8443/oapi/v1/namespaces/project1/deploymentconfigs/dc1 - - Args: - namespace (str): Kubernetes namespace or Openshift project name - kind (str): kind of the object - name (str): object name if modifying or deleting specific object (optional) - params (dict): query parameters {"key":"value"} url?key=value + if kind not in self.oc_artifacts.keys(): + self.oc_artifacts[kind] = [] - Returns: - Full url (str) for given kind, namespace and name - """ - url = None - - resource = self._kind_to_resource(kind) - - if resource in self.oapi_resources: - url = self.openshift_api - elif resource in self.kapi_resources: - url = self.kubernetes_api - - url = urljoin(url, "namespaces/%s/%s/" % (namespace, resource)) + # Fail if there is no metadata + if 'metadata' not in data: + raise ProviderFailedException( + "Error processing %s artifact. There is no metadata object" % artifact) - if name: - url = urljoin(url, name) + # Change to the namespace specified on init() + data['metadata']['namespace'] = self.namespace - if params: - params["access_token"] = self.access_token + if 'labels' not in data['metadata']: + data['metadata']['labels'] = {'namespace': self.namespace} else: - params = {"access_token": self.access_token} + data['metadata']['labels']['namespace'] = self.namespace - url = urljoin(url, "?%s" % urlencode(params)) - logger.debug("url: %s", url) - return url + self.oc_artifacts[kind].append(data) - def _set_config_values(self): + def run(self): """ - Reads providerapi, namespace and accesstoken from answers.conf and - corresponding values from providerconfig (if set). - Use one that is set, if both are set and have conflicting values raise - exception. - - Raises: - ProviderFailedException: values in providerconfig and answers.conf - are in conflict - + Deploys the app by given resource artifacts. """ + logger.info("Deploying to Kubernetes") - # First things first, if we are running inside of an openshift pod via - # `oc new-app` then get the config from the environment (files/env vars) - # NOTE: pick up provider_tls_verify from answers if exists - if Utils.running_on_openshift(): - self.providerapi = Utils.get_openshift_api_endpoint_from_env() - self.namespace = os.environ['POD_NAMESPACE'] - self.access_token = os.environ['TOKEN_ENV_VAR'] - self.provider_ca = OPENSHIFT_POD_CA_FILE - self.provider_tls_verify = \ - self.config.get(PROVIDER_TLS_VERIFY_KEY, True) - return # No need to process other information - - # initialize result to default values - result = {PROVIDER_API_KEY: self.providerapi, - PROVIDER_AUTH_KEY: self.access_token, - NAMESPACE_KEY: self.namespace, - PROVIDER_TLS_VERIFY_KEY: self.provider_tls_verify, - PROVIDER_CA_KEY: self.provider_ca} - - # create keys in dicts and initialize values to None - answers = dict.fromkeys(result) - providerconfig = dict.fromkeys(result) - - # get values from answers.conf - for k in result.keys(): - answers[k] = self.config.get(k) - - # get values from providerconfig - if self.config_file: - providerconfig = KubeConfig.parse_kubeconf(self.config_file) - - # decide between values from answers.conf and providerconfig - # if only one is set use that, report if they are in conflict - for k in result.keys(): - if answers[k] is not None and providerconfig[k] is None: - result[k] = answers[k] - elif answers[k] is None and providerconfig[k] is not None: - result[k] = providerconfig[k] - elif answers[k] is not None and providerconfig[k] is not None: - if answers[k] == providerconfig[k]: - result[k] = answers[k] + for kind, objects in self.oc_artifacts.iteritems(): + for artifact in objects: + if self.dryrun: + logger.info("DRY-RUN: Deploying k8s KIND: %s, ARTIFACT: %s" + % (kind, artifact)) else: - msg = "There are conflicting values in %s (%s) and %s (%s)"\ - % (self.config_file, providerconfig[k], ANSWERS_FILE, - answers[k]) - logger.error(msg) - raise ProviderFailedException(msg) - - logger.debug("config values: %s" % result) - - # this items are required, they have to be not None - for k in [PROVIDER_API_KEY, PROVIDER_AUTH_KEY, NAMESPACE_KEY]: - if result[k] is None: - msg = "You need to set %s in %s" % (k, ANSWERS_FILE) - logger.error(msg) - raise ProviderFailedException(msg) - - # set config values - self.providerapi = result[PROVIDER_API_KEY] - self.access_token = result[PROVIDER_AUTH_KEY] - self.namespace = result[NAMESPACE_KEY] - self.provider_tls_verify = result[PROVIDER_TLS_VERIFY_KEY] - if result[PROVIDER_CA_KEY]: - # if we are in container translate path to path on host - self.provider_ca = Utils.get_real_abspath(result[PROVIDER_CA_KEY]) - else: - self.provider_ca = None + self.api.create(artifact, self.namespace) - def extract(self, image, src, dest, update=True): - """ - Extract contents of a container image from 'src' in container - to 'dest' in host. - - Args: - image (str): Name of container image - src (str): Source path in container - dest (str): Destination path in host - update (bool): Update existing destination, if True - """ - if os.path.exists(dest) and not update: - return - cleaned_image_name = Utils.sanitizeName(image) - pod_name = '{}-{}'.format(cleaned_image_name, Utils.getUniqueUUID()) - container_name = cleaned_image_name - - # Pull (if needed) image and bring up a container from it - # with 'sleep 3600' entrypoint, just to extract content from it - artifact = { - 'apiVersion': 'v1', - 'kind': 'Pod', - 'metadata': { - 'name': pod_name - }, - 'spec': { - 'containers': [ - { - 'image': image, - 'command': [ - 'sleep', - '3600' - ], - 'imagePullPolicy': 'IfNotPresent', - 'name': container_name - } - ], - 'restartPolicy': 'Always' - } - } - - self.oc.deploy(self._get_url(self.namespace, 'Pod'), artifact) - try: - self._wait_till_pod_runs(self.namespace, pod_name, timeout=300) - - # Archive content from the container and dump it to tmpfile - tmpfile = '/tmp/atomicapp-{pod}.tar.gz'.format(pod=pod_name) - self.oc.execute( - self.namespace, pod_name, container_name, - 'tar -cz --directory {} ./'.format('/' + src), - outfile=tmpfile - ) - finally: - # Delete created pod - self.oc.delete(self._get_url(self.namespace, 'Pod', pod_name)) - - # Extract archive data - tar = tarfile.open(tmpfile, 'r:gz') - tar.extractall(dest) - - def _wait_till_pod_runs(self, namespace, pod, timeout=300): + def stop(self): + """Undeploys the app by given resource manifests. + Undeploy operation first scale down the replicas to 0 and then deletes + the resource from cluster. """ - Wait till pod runs, with a timeout. + logger.info("Undeploying from Kubernetes") - Args: - namespace (str): Openshift namespace - pod (str): Pod name - timeout (int): Timeout in seconds. - - Raises: - ProviderFailedException on timeout or when the pod goes to - failed state. - """ - now = datetime.datetime.now() - timeout_delta = datetime.timedelta(seconds=timeout) - while datetime.datetime.now() - now < timeout_delta: - status = self.oc.get_pod_status(namespace, pod) - if status == 'running': - break - elif status == 'failed': - raise ProviderFailedException( - 'Unable to run pod for extracting content: ' - '{namespace}/{pod}'.format(namespace=namespace, - pod=pod)) - time.sleep(1) - if status != 'running': - raise ProviderFailedException( - 'Timed out to extract content from pod: ' - '{namespace}/{pod}'.format(namespace=namespace, - pod=pod)) + for kind, objects in self.oc_artifacts.iteritems(): + for artifact in objects: + if self.dryrun: + logger.info("DRY-RUN: Deploying k8s KIND: %s, ARTIFACT: %s" + % (kind, artifact)) + else: + self.api.delete(artifact, self.namespace) diff --git a/test-requirements.txt b/test-requirements.txt index b68bb921..f292f6cb 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -2,3 +2,4 @@ flake8 mock pep8 pytest-cov +pytest-localserver diff --git a/tests/units/kubeshift/external/example_kubeconfig b/tests/units/kubeshift/external/example_kubeconfig new file mode 100644 index 00000000..ef955b9c --- /dev/null +++ b/tests/units/kubeshift/external/example_kubeconfig @@ -0,0 +1,17 @@ +apiVersion: v1 +clusters: +- cluster: + server: http://localhost:8080 + name: dev +contexts: +- context: + cluster: dev + user: default + name: dev +current-context: dev +kind: Config +preferences: {} +users: +- name: default + user: + token: foobar diff --git a/tests/units/kubeshift/test_client.py b/tests/units/kubeshift/test_client.py new file mode 100644 index 00000000..ad01092b --- /dev/null +++ b/tests/units/kubeshift/test_client.py @@ -0,0 +1,72 @@ +import mock +import pytest +from atomicapp.providers.lib.kubeshift.client import Client +from atomicapp.providers.lib.kubeshift.exceptions import KubeClientError + +config = { + "kind": "Config", + "preferences": {}, + "current-context": "dev", + "contexts": [ + { + "name": "dev", + "context": { + "cluster": "dev", + "user": "default" + } + } + ], + "clusters": [ + { + "cluster": { + "server": "http://localhost:8080" + }, + "name": "dev" + } + ], + "apiVersion": "v1", + "users": [ + { + "name": "default", + "user": { + "token": "foobar" + } + } + ] +} + + +class FakeClient(): + + def __init__(self, *args): + pass + + +@mock.patch("atomicapp.providers.lib.kubeshift.client.KubeKubernetesClient") +def test_client_kubernetes(FakeClient): + Client(config, "kubernetes") + + +@mock.patch("atomicapp.providers.lib.kubeshift.client.KubeOpenshiftClient") +def test_client_openshift(FakeClient): + Client(config, "openshift") + + +def test_client_load_failure(): + with pytest.raises(KubeClientError): + Client(config, "foobar") + + +# TODO +def test_client_create(): + pass + + +# TODO +def test_client_delete(): + pass + + +# TODO +def test_client_namespaces(): + pass diff --git a/tests/units/kubeshift/test_kubebase.py b/tests/units/kubeshift/test_kubebase.py new file mode 100644 index 00000000..aeff61a9 --- /dev/null +++ b/tests/units/kubeshift/test_kubebase.py @@ -0,0 +1,93 @@ +import pytest +from atomicapp.providers.lib.kubeshift.kubebase import KubeBase +from atomicapp.providers.lib.kubeshift.exceptions import KubeConnectionError + + +config = { + "kind": "Config", + "preferences": {}, + "current-context": "dev", + "contexts": [ + { + "name": "dev", + "context": { + "cluster": "dev", + "user": "default" + } + } + ], + "clusters": [ + { + "cluster": { + "server": "http://localhost:8080" + }, + "name": "dev" + } + ], + "apiVersion": "v1", + "users": [ + { + "name": "default", + "user": { + "token": "foobar" + } + } + ] +} +kubebase = KubeBase(config) + + +def test_get_resources(httpserver): + content = '{"kind":"APIResourceList","groupVersion":"v1","resources":[{"name":"bindings","namespaced":true,"kind":"Binding"},{"name":"componentstatuses","namespaced":false,"kind":"ComponentStatus"}]}' + httpserver.serve_content(content, code=200, headers=None) + kubebase.get_resources(httpserver.url) + + +def test_get_groups(httpserver): + content = '{"kind":"APIGroupList","groups":[{"name":"autoscaling","versions":[{"groupVersion":"autoscaling/v1","version":"v1"}],"preferredVersion":{"groupVersion":"autoscaling/v1","version":"v1"},"serverAddressByClientCIDRs":[{"clientCIDR":"0.0.0.0/0","serverAddress":"192.168.1.156:443"}]},{"name":"batch","versions":[{"groupVersion":"batch/v1","version":"v1"}],"preferredVersion":{"groupVersion":"batch/v1","version":"v1"},"serverAddressByClientCIDRs":[{"clientCIDR":"0.0.0.0/0","serverAddress":"192.168.1.156:443"}]},{"name":"extensions","versions":[{"groupVersion":"extensions/v1beta1","version":"v1beta1"}],"preferredVersion":{"groupVersion":"extensions/v1beta1","version":"v1beta1"},"serverAddressByClientCIDRs":[{"clientCIDR":"0.0.0.0/0","serverAddress":"192.168.1.156:443"}]}]}' + httpserver.serve_content(content, code=200, headers=None) + kubebase.get_groups(httpserver.url) + + +def test_connection(httpserver): + httpserver.serve_content(content="OK", code=200, headers=None) + kubebase.test_connection(httpserver.url) + + +def test_kind_to_resource_name(): + assert kubebase.kind_to_resource_name("Pod") == "pods" + assert kubebase.kind_to_resource_name("buildconfig") == "buildconfigs" + assert kubebase.kind_to_resource_name("policy") == "policies" + assert kubebase.kind_to_resource_name("petset") == "petsets" + assert kubebase.kind_to_resource_name("componentstatus") == "componentstatuses" + assert kubebase.kind_to_resource_name("Ingress") == "ingresses" + + +def test_request_methods_failures(): + with pytest.raises(KubeConnectionError): + kubebase.request("get", "http://foobar") + with pytest.raises(KubeConnectionError): + kubebase.request("post", "http://foobar") + with pytest.raises(KubeConnectionError): + kubebase.request("put", "http://foobar") + with pytest.raises(KubeConnectionError): + kubebase.request("delete", "http://foobar") + with pytest.raises(KubeConnectionError): + kubebase.request("patch", "http://foobar") + + +def test_request_timeout(httpserver): + httpserver.serve_content(content="Time out", code=408, headers=None) + with pytest.raises(KubeConnectionError): + kubebase.request("get", httpserver.url) + + +def test_request_ok(httpserver): + httpserver.serve_content(content="OK", code=200, headers=None) + kubebase.request("get", httpserver.url) + + +def test_websocket_request_without_ssl(): + # Should get an attribute error if there is no "cert_ca" to the base config + with pytest.raises(AttributeError): + kubebase.websocket_request("http://foobar") diff --git a/tests/units/nulecule/test_kubeconfig.py b/tests/units/kubeshift/test_kubeconfig.py similarity index 84% rename from tests/units/nulecule/test_kubeconfig.py rename to tests/units/kubeshift/test_kubeconfig.py index a0f13601..6a89debf 100644 --- a/tests/units/nulecule/test_kubeconfig.py +++ b/tests/units/kubeshift/test_kubeconfig.py @@ -1,10 +1,39 @@ import unittest +import pytest +import tempfile +import os from atomicapp.plugin import ProviderFailedException from atomicapp.providers.lib.kubeshift.kubeconfig import KubeConfig class TestKubeConfParsing(unittest.TestCase): + def test_from_file(self): + """ + Test parsing a hello world JSON example and returning back the + respective anymarkup content + """ + _, tmpfilename = tempfile.mkstemp() + f = open(tmpfilename, 'w') + f.write("{ 'hello': 'world'}") + f.close() + KubeConfig.from_file(tmpfilename) + + def test_from_params(self): + KubeConfig.from_params("foo", "bar", "foo", "bar") + + def test_parse_kubeconf_from_file_failure(self): + _, tmpfilename = tempfile.mkstemp() + f = open(tmpfilename, 'w') + f.write("{ 'hello': 'world'}") + f.close() + with pytest.raises(KeyError): + KubeConfig.parse_kubeconf(tmpfilename) + + def test_parse_kubeconf_from_file(self): + example_kubeconfig = os.path.dirname(__file__) + '/external/example_kubeconfig' + KubeConfig.parse_kubeconf(example_kubeconfig) + def test_parse_kubeconf_data_insecure(self): """ Test parsing kubeconf data with current context containing diff --git a/tests/units/kubeshift/test_kubernetes.py b/tests/units/kubeshift/test_kubernetes.py new file mode 100644 index 00000000..914c31a6 --- /dev/null +++ b/tests/units/kubeshift/test_kubernetes.py @@ -0,0 +1,82 @@ +import mock +from atomicapp.providers.lib.kubeshift.kubernetes import KubeKubernetesClient + +config = { + "kind": "Config", + "preferences": {}, + "current-context": "dev", + "contexts": [ + { + "name": "dev", + "context": { + "cluster": "dev", + "user": "default" + } + } + ], + "clusters": [ + { + "cluster": { + "server": "http://localhost:8080" + }, + "name": "dev" + } + ], + "apiVersion": "v1", + "users": [ + { + "name": "default", + "user": { + "token": "foobar" + } + } + ] +} + + +class FakeClient(): + + def __init__(self, *args): + pass + + def test_connection(self, *args): + pass + + def get_resources(self, *args): + return ['Pod', 'pod', 'pods'] + + def get_groups(self, *args): + return {} + + def request(self, method, url, data=None): + return None, 200 + + @property + def cluster(self): + return {'server': 'https://foobar'} + + +@mock.patch("atomicapp.providers.lib.kubeshift.kubernetes.KubeBase") +def test_create(mock_class): + # Mock the API class + mock_class.return_value = FakeClient() + mock_class.kind_to_resource_name.return_value = 'Pod' + + k8s_object = {"apiVersion": "v1", "kind": "Pod", "metadata": {"labels": {"app": "helloapache"}, "name": "helloapache"}, "spec": { + "containers": [{"image": "$image", "name": "helloapache", "ports": [{"containerPort": 80, "hostPort": 80, "protocol": "TCP"}]}]}} + + a = KubeKubernetesClient(config) + a.create(k8s_object, "foobar") + + +@mock.patch("atomicapp.providers.lib.kubeshift.kubernetes.KubeBase") +def test_delete(mock_class): + # Mock the API class + mock_class.return_value = FakeClient() + mock_class.kind_to_resource_name.return_value = 'Pod' + + k8s_object = {"apiVersion": "v1", "kind": "Pod", "metadata": {"labels": {"app": "helloapache"}, "name": "helloapache"}, "spec": { + "containers": [{"image": "$image", "name": "helloapache", "ports": [{"containerPort": 80, "hostPort": 80, "protocol": "TCP"}]}]}} + + a = KubeKubernetesClient(config) + a.delete(k8s_object, "foobar") diff --git a/tests/units/kubeshift/test_openshift.py b/tests/units/kubeshift/test_openshift.py new file mode 100644 index 00000000..af9f5e05 --- /dev/null +++ b/tests/units/kubeshift/test_openshift.py @@ -0,0 +1,121 @@ +import mock +from atomicapp.providers.lib.kubeshift.openshift import KubeOpenshiftClient + +config = { + "kind": "Config", + "preferences": {}, + "current-context": "dev", + "contexts": [ + { + "name": "dev", + "context": { + "cluster": "dev", + "user": "default" + } + } + ], + "clusters": [ + { + "cluster": { + "server": "http://localhost:8080" + }, + "name": "dev" + } + ], + "apiVersion": "v1", + "users": [ + { + "name": "default", + "user": { + "token": "foobar" + } + } + ] +} + + +class FakeClient(): + + def __init__(self, *args): + pass + + def test_connection(self, *args): + pass + + def get_resources(self, *args): + return ['Pod', 'template'] + + def get_groups(self, *args): + return {} + + def request(self, method, url, data=None): + return None, 200 + + @property + def cluster(self): + return {'server': 'https://foobar'} + + +@mock.patch("atomicapp.providers.lib.kubeshift.openshift.KubeBase") +def test_create(mock_class): + # Mock the API class + mock_class.return_value = FakeClient() + mock_class.get_resources.return_value = ['Pod'] + mock_class.kind_to_resource_name.return_value = 'Pod' + + k8s_object = {"apiVersion": "v1", "kind": "Pod", "metadata": {"labels": {"app": "helloapache"}, "name": "helloapache"}, "spec": { + "containers": [{"image": "$image", "name": "helloapache", "ports": [{"containerPort": 80, "hostPort": 80, "protocol": "TCP"}]}]}} + + a = KubeOpenshiftClient(config) + a.create(k8s_object, "foobar") + + +@mock.patch("atomicapp.providers.lib.kubeshift.openshift.KubeBase") +def test_delete(mock_class): + # Mock the API class + mock_class.return_value = FakeClient() + mock_class.kind_to_resource_name.return_value = 'Pod' + + k8s_object = {"apiVersion": "v1", "kind": "Pod", "metadata": {"labels": {"app": "helloapache"}, "name": "helloapache"}, "spec": { + "containers": [{"image": "$image", "name": "helloapache", "ports": [{"containerPort": 80, "hostPort": 80, "protocol": "TCP"}]}]}} + + a = KubeOpenshiftClient(config) + a.delete(k8s_object, "foobar") + + +class FakeOpenshiftTemplateClient(): + + def __init__(self, *args): + pass + + def test_connection(self, *args): + pass + + def get_resources(self, *args): + return ['Pod', 'template'] + + def get_groups(self, *args): + return {} + + def request(self, method, url, data=None): + openshift_object = {} + openshift_object['objects'] = [{"kind": "Service", "apiVersion": "v1", "metadata": {"name": "cakephp-mysql-example", "annotations": {"description": "Exposes and load balances the application pods"}}, "spec": {"ports": [{"name": "web", "port": 8080, "targetPort": 8080}], "selector": {"name": "cakephp-mysql-example"}}}] + return openshift_object, 200 + + @property + def cluster(self): + return {'server': 'https://foobar'} + + +@mock.patch("atomicapp.providers.lib.kubeshift.openshift.KubeBase") +def test_process_template(mock_class): + # Mock the API class + mock_class.return_value = FakeOpenshiftTemplateClient() + mock_class.kind_to_resource_name.return_value = 'template' + + openshift_template = {"kind": "Template", "apiVersion": "v1", "metadata": {"name": "foobar"}, "objects": [{"kind": "Service", "apiVersion": "v1", "metadata": {"name": "cakephp-mysql-example", "annotations": { + "description": "Exposes and load balances the application pods"}}, "spec": {"ports": [{"name": "web", "port": 8080, "targetPort": 8080}], "selector": {"name": "cakephp-mysql-example"}}}]} + + a = KubeOpenshiftClient(config) + a.create(openshift_template, "foobar") + a.delete(openshift_template, "foobar") diff --git a/tests/units/providers/test_openshift_provider.py b/tests/units/providers/test_openshift_provider.py deleted file mode 100644 index 31f6d2b5..00000000 --- a/tests/units/providers/test_openshift_provider.py +++ /dev/null @@ -1,227 +0,0 @@ -# -*- coding: utf-8 -*- -""" -Unittests for atomicapp/providers/openshift.py - -We test most functionalities of OpenshiftProvider by -mocking out OpenshiftClient which interacts with -the external world openshift and kubernetes API. -""" - -import unittest -import mock -from atomicapp.providers.openshift import OpenshiftProvider -from atomicapp.plugin import ProviderFailedException - - -class OpenshiftProviderTestMixin(object): - - def setUp(self): - # Patch OpenshiftClient to test OpenshiftProvider - self.patcher = mock.patch('atomicapp.providers.openshift.OpenshiftClient') - self.mock_OpenshiftClient = self.patcher.start() - self.mock_oc = self.mock_OpenshiftClient() - - def get_oc_provider(self, dryrun=False, artifacts=[]): - """ - Get OpenshiftProvider instance - """ - op = OpenshiftProvider({}, '.', dryrun) - op.artifacts = artifacts - op.access_token = 'test' - op.init() - return op - - def tearDown(self): - self.patcher.stop() - - -class TestOpenshiftProviderDeploy(OpenshiftProviderTestMixin, unittest.TestCase): - """ - Test OpenshiftProvider.run - """ - - def test_run(self): - """ - Test calling OpenshiftClient.run from OpenshiftProvider.run - """ - op = self.get_oc_provider() - op.oapi_resources = ['foo'] - op.openshift_artifacts = { - 'pods': [ - { - 'metadata': { - 'namespace': 'foo' - } - } - ] - } - - op.run() - - self.mock_oc.deploy.assert_called_once_with( - 'namespaces/foo/pods/?access_token=test', - op.openshift_artifacts['pods'][0]) - - def test_run_dryrun(self): - """ - Test running OpenshiftProvider.run as dryrun - """ - op = self.get_oc_provider(dryrun=True) - op.oapi_resources = ['foo'] - op.openshift_artifacts = { - 'pods': [ - { - 'metadata': { - 'namespace': 'foo' - } - } - ] - } - - op.run() - - self.assertFalse(self.mock_oc.run.call_count) - -class TestOpenshiftProviderUnrun(OpenshiftProviderTestMixin, unittest.TestCase): - """ - Test OpenshiftProvider.stop - """ - - def test_stop(self): - """ - Test calling OpenshiftClient.delete from OpenshiftProvider.stop - """ - op = self.get_oc_provider() - op.oapi_resources = ['foo'] - op.openshift_artifacts = { - 'pods': [ - { - 'kind': 'Pod', - 'metadata': { - 'name': 'bar', - 'namespace': 'foo' - } - } - ] - } - - op.stop() - - self.mock_oc.delete.assert_called_once_with( - 'namespaces/foo/pods/%s?access_token=test' % - op.openshift_artifacts['pods'][0]['metadata']['name']) - - def test_stop_dryrun(self): - """ - Test running OpenshiftProvider.stop as dryrun - """ - op = self.get_oc_provider(dryrun=True) - op.oapi_resources = ['foo'] - op.openshift_artifacts = { - 'pods': [ - { - 'kind': 'Pod', - 'metadata': { - 'name': 'bar', - 'namespace': 'foo' - } - } - ] - } - - op.stop() - - self.assertFalse(self.mock_oc.delete.call_count) - -class TestOpenshiftProviderProcessArtifactData(OpenshiftProviderTestMixin, unittest.TestCase): - """ - Test processing Openshift artifact data - """ - - def test_process_artifact_data_non_template_kind(self): - """ - Test processing non template artifact data - """ - artifact_data = { - 'kind': 'Pod', - 'pods': [ - { - 'metadata': { - 'namespace': 'foo' - } - } - ] - } - self.mock_oc.get_oapi_resources.return_value = ['pods'] - - op = self.get_oc_provider() - - op._process_artifact_data('foo', artifact_data) - - self.assertEqual(op.openshift_artifacts, - {'pod': [artifact_data]}) - - def test_process_artifact_data_template_kind(self): - """ - Test processing non template artifact data - """ - artifact_data = { - 'kind': 'Template', - 'objects': [ - { - 'kind': 'Pod', - 'metadata': { - 'namespace': 'foo' - } - }, - { - 'kind': 'Service', - 'metadata': { - 'namespace': 'foo' - } - } - ] - } - self.mock_oc.get_oapi_resources.return_value = ['templates'] - op = self.get_oc_provider() - self.mock_oc.process_template.return_value = artifact_data['objects'] - - op._process_artifact_data('foo', artifact_data) - - self.assertEqual( - op.openshift_artifacts, { - 'pod': [ - {'kind': 'Pod', 'metadata': {'namespace': 'foo'}} - ], - 'service': [ - {'kind': 'Service', 'metadata': {'namespace': 'foo'}} - ] - } - ) - - def test_process_artifact_data_error_resource_not_in_resources(self): - """ - Test processing artifact data with kind not in resources - """ - artifact_data = { - 'kind': 'foobar' - } - - op = self.get_oc_provider() - - self.assertRaises( - ProviderFailedException, - op._process_artifact_data, 'foo', artifact_data) - - def test_process_artifact_data_error_kind_key_missing(self): - """ - Test processing artifact data with missing key 'kind' - """ - artifact_data = {} - op = self.get_oc_provider() - - self.assertRaises( - ProviderFailedException, - op._process_artifact_data, 'foo', artifact_data) - - From dc0393d9880d148f2d5358c50df553171738c330 Mon Sep 17 00:00:00 2001 From: Charlie Drage Date: Thu, 11 Aug 2016 09:49:15 -0400 Subject: [PATCH 181/193] Add release script for Atomic App This bash script goes step-by-step on how to release a new version of Atomic App for both the main Github repo as well as the nulecule-library. --- script/release.sh | 326 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 326 insertions(+) create mode 100755 script/release.sh diff --git a/script/release.sh b/script/release.sh new file mode 100755 index 00000000..43ac0f84 --- /dev/null +++ b/script/release.sh @@ -0,0 +1,326 @@ +#!/bin/bash +UPSTREAM_REPO="projectatomic" +CLI="atomicapp" +LIBRARY="nulecule-library" + + +usage() { + echo "This will prepare Atomic App for release!" + echo "" + echo "Requirements:" + echo " git" + echo " gpg - with a valid GPG key already generated" + echo " hub" + echo " github-release" + echo " GITHUB_TOKEN in your env variable" + echo " " + echo "Not only that, but you must have permission for:" + echo " Tagging releases for Atomic App on Github" + echo " Access to hub.docker.com builds" + echo "" +} + +requirements() { + if [ ! -f /usr/bin/git ] && [ ! -f /usr/local/bin/git ]; then + echo "No git. What's wrong with you?" + return 1 + fi + + if [ ! -f /usr/bin/gpg ] && [ ! -f /usr/local/bin/gpg ]; then + echo "No gpg. What's wrong with you?" + return 1 + fi + + if [ ! -f $GOPATH/bin/github-release ]; then + echo "No $GOPATH/bin/github-release. Please run 'go get -v github.com/aktau/github-release'" + return 1 + fi + + if [ ! -f /usr/bin/hub ]; then + echo "No hub. Please run install hub @ github.com/github/hub" + return 1 + fi + + if [[ -z "$GITHUB_TOKEN" ]]; then + echo "export GITHUB_TOKEN=yourtoken needed for using github-release" + fi +} + +# Clone and then change to user's upstream repo for pushing to master / opening PR's :) +clone() { + git clone ssh://git@github.com/$UPSTREAM_REPO/$CLI.git + if [ $? -eq 0 ]; then + echo OK + else + echo FAIL + exit + fi + cd $CLI + git remote remove origin + git remote add origin git@github.com:$ORIGIN_REPO/$CLI.git + git checkout -b release-$1 + cd .. +} + +replaceversion() { + cd $CLI + OLD_VERSION=`python setup.py --version` + echo "OLD VERSION:" $OLD_VERSION + + echo "1. Replaced Dockerfile versioning" + find . -name 'Dockerfile*' -type f -exec sed -i "s/$OLD_VERSION/$1/g" {} \; + + echo "2. Replaced .py versioning" + find . -name '*.py' -type f -exec sed -i "s/$OLD_VERSION/$1/g" {} \; + + echo "3. Replaced docs versioning" + find docs/ -name '*.md' -type f -exec sed -i "s/$OLD_VERSION/$1/g" {} \; + + echo "4. Replaced README.md versioning" + sed -i "s/$OLD_VERSION/$1/g" README.md + + cd .. +} + +changelog() { + cd $CLI + echo "Getting commit changes. Writing to ../changes.txt" + LOG=`git shortlog --email --no-merges --pretty=%s ${1}..` + echo -e "\`\`\`\n$LOG\n\`\`\`" > ../changes.txt + echo "Changelog has been written to changes.txt" + echo "!!PLEASE REVIEW BEFORE CONTINUING!!" + echo "Open changes.txt and add the release information" + echo "to the beginning of the file before the git shortlog" + cd .. +} + +changelog_md() { + echo "Generating CHANGELOG.md" + CHANGES=$(cat changes.txt) + cd $CLI + DATE=$(date +"%m-%d-%Y") + CHANGELOG=$(cat CHANGELOG.md) + HEADER="## Atomic App $1 ($DATE)" + echo -e "$HEADER\n\n$CHANGES\n\n$CHANGELOG" >CHANGELOG.md + echo "Changes have been written to CHANGELOG.md" + cd .. +} + +git_commit() { + cd $CLI + + BRANCH=`git symbolic-ref --short HEAD` + if [ -z "$BRANCH" ]; then + echo "Unable to get branch name, is this even a git repo?" + return 1 + fi + echo "Branch: " $BRANCH + + git add . + git commit -m "$1 Release" + git push origin $BRANCH + hub pull-request -b $UPSTREAM_REPO/$CLI:master -h $ORIGIN_REPO/$CLI:$BRANCH + + cd .. + echo "" + echo "PR opened against master" + echo "" +} + +sign() { + # Tarball it! + cp -r $CLI $CLI-$1 + sudo rm -rf $CLI-$1/.git* + sudo tar czf $CLI-$1.tar.gz $CLI-$1 + if [ $? -eq 0 ]; then + echo TARBALL OK + else + echo TARBALL FAIL + exit + fi + + # Sign it! + echo -e "SIGN THE TARBALL!\n" + gpg --detach-sign --armor $CLI-$1.tar.gz + if [ $? -eq 0 ]; then + echo SIGN OK + else + echo SIGN FAIL + exit + fi + + echo "" + echo "The tar.gz. is now located at $CLI-$1.tar.gz" + echo "and the signed one at $CLI-$1.tar.gz.asc" + echo "" +} + +push() { + CHANGES=$(cat changes.txt) + # Release it! + github-release release \ + --user $UPSTREAM_REPO \ + --repo $CLI \ + --tag $1 \ + --name "$1" \ + --description "$CHANGES" + if [ $? -eq 0 ]; then + echo RELEASE UPLOAD OK + else + echo RELEASE UPLOAD FAIL + exit + fi + + github-release upload \ + --user $UPSTREAM_REPO \ + --repo $CLI \ + --tag $1 \ + --name "$CLI-$1.tar.gz" \ + --file $CLI-$1.tar.gz + if [ $? -eq 0 ]; then + echo TARBALL UPLOAD OK + else + echo TARBALL UPLOAD FAIL + exit + fi + + github-release upload \ + --user $UPSTREAM_REPO \ + --repo $CLI\ + --tag $1 \ + --name "$CLI-$1.tar.gz.asc" \ + --file $CLI-$1.tar.gz.asc + if [ $? -eq 0 ]; then + echo SIGNED TARBALL UPLOAD OK + else + echo SIGNED TARBALL UPLOAD FAIL + exit + fi + + echo "DONE" + echo "DOUBLE CHECK IT:" + echo "!!!" + echo "https://github.com/$UPSTREAM_REPO/$CLI/releases/edit/$1" + echo "!!!" + echo "REMEMBER TO UPDATE DOCKER BUILDS! :D" +} + +update_library() { + BRANCH=sync-with-$1 + rm -rf $LIBRARY + + # Clone + git clone ssh://git@github.com/$UPSTREAM_REPO/$LIBRARY.git + if [ $? -eq 0 ]; then + echo OK + else + echo FAIL + exit + fi + cd $LIBRARY + git remote remove origin + git remote add origin git@github.com:$ORIGIN_REPO/$LIBRARY.git + git checkout -b $BRANCH + + # Commit + find . -type f -iname 'Dockerfile' -exec sed -i "s,^FROM.projectatomic*,FROM projectatomic/atomicapp:$1," "{}" +; + git add . + git commit -m "Sync with $1 release" + git push origin $BRANCH + hub pull-request -b $UPSTREAM_REPO/$LIBRARY:master -h $ORIGIN_REPO/$LIBRARY:$BRANCH + cd .. +} + +clean() { + rm -rf $CLI $CLI-$1 $CLI-$1.tar.gz $CLI-$1.tar.gz.asc $LIBRARY changes.txt +} + +main() { + local cmd=$1 + usage + + echo "What is your Github username? (location of your atomicapp fork)" + read ORIGIN_REPO + echo "You entered: $ORIGIN_REPO" + echo "" + + echo "" + echo "First, please enter the version of the NEW release: " + read VERSION + echo "You entered: $VERSION" + echo "" + + echo "" + echo "Second, please enter the version of the LAST release: " + read PREV_VERSION + echo "You entered: $PREV_VERSION" + echo "" + + clear + + echo "Now! It's time to go through each step of releasing Atomic App!" + echo "If one of these steps fails / does not work, simply re-run ./release.sh" + echo "Re-enter the information at the beginning and continue on the failed step" + echo "" + + PS3='Please enter your choice: ' + options=( + "Git clone master" + "Replace version number" + "Generate changelog" + "Generate changelog for release" + "Create PR against atomicapp" + "!!! Before continuing, make sure the Atomic App release PR has been merged !!!" + "Update and create PR against nulecule-library" + "Tarball and sign atomicapp - requires gpg key" + "Upload the tarball and push to Github release page" + "!!! Build the new atomicapp docker image on hub.docker.com with the tagged release and then merge the nulecule-library PR !!!" + "Clean" + "Quit") + select opt in "${options[@]}" + do + echo "" + case $opt in + "Git clone master") + clone $VERSION + ;; + "Replace version number") + replaceversion $VERSION + ;; + "Generate changelog") + changelog $PREV_VERSION + ;; + "Generate changelog for release") + changelog_md $VERSION + ;; + "Create PR against atomicapp") + git_commit $VERSION + ;; + "Update and create PR against nulecule-library") + update_library $VERSION + ;; + "Tarball and sign atomicapp - requires gpg key") + sign $VERSION + ;; + "Upload the tarball and push to Github release page") + push $VERSION + ;; + "Clean") + clean $VERSION + ;; + "Quit") + clear + break + ;; + *) echo invalid option;; + esac + echo "" + done +} + +main "$@" +echo "If you're done, make sure you have done the following:" +echo " Triggered hub.docker.com build for the new atomicapp version" +echo " Merge the nulecule-library PR so the new containers have been created" +echo " Upload the new release to download.projectatomic.io and edit index.html" +echo "" From 0e3c709cdf0baf2392e092e808e61851bc989df3 Mon Sep 17 00:00:00 2001 From: Charlie Drage Date: Thu, 11 Aug 2016 13:57:15 -0400 Subject: [PATCH 182/193] Fix typo in openshift.py provider Should say "Deploying to OpenShift" rather than Kubernetes. Same goes with "Undeploying from OpenShift" --- atomicapp/providers/openshift.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/atomicapp/providers/openshift.py b/atomicapp/providers/openshift.py index 21e944e3..f1fe8415 100644 --- a/atomicapp/providers/openshift.py +++ b/atomicapp/providers/openshift.py @@ -238,7 +238,7 @@ def run(self): """ Deploys the app by given resource artifacts. """ - logger.info("Deploying to Kubernetes") + logger.info("Deploying to OpenShift") for kind, objects in self.oc_artifacts.iteritems(): for artifact in objects: @@ -253,7 +253,7 @@ def stop(self): Undeploy operation first scale down the replicas to 0 and then deletes the resource from cluster. """ - logger.info("Undeploying from Kubernetes") + logger.info("Undeploying from OpenShift") for kind, objects in self.oc_artifacts.iteritems(): for artifact in objects: From af728b3dd8c6145c9a85b67f0599050657338a79 Mon Sep 17 00:00:00 2001 From: Charlie Drage Date: Tue, 30 Aug 2016 15:01:45 -0400 Subject: [PATCH 183/193] Fixes mode argument with --mode=genanswers When `sudo atomic run projectatomic/helloapache --mode=genanswers` is supplied, the resulting error shows that `--mode=genanswers` is an unrecognized argument. This was introduced after refactoring the cli/main.py code to NOT include uneeded parameters such as --provider-auth to CLI options such as init and genanswers. By moving the --mode parser to the globals_parser, each command will have the --mode option available. Fixes https://github.com/projectatomic/atomicapp/issues/792 --- atomicapp/cli/main.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/atomicapp/cli/main.py b/atomicapp/cli/main.py index 1bb3b706..9d1a9840 100644 --- a/atomicapp/cli/main.py +++ b/atomicapp/cli/main.py @@ -232,12 +232,7 @@ def create_parser(self): none: atomicapp will disable any logging. If nothing is set and logging to file then 'nocolor' by default. If nothing is set and logging to tty then 'color' by default.""") - - # === DEPLOY PARSER === - # Create a 'deploy parser' that will include flags related to deploying - # and answers files - deploy_parser = argparse.ArgumentParser(add_help=False) - deploy_parser.add_argument( + globals_parser.add_argument( "--mode", dest="mode", default=None, @@ -249,6 +244,11 @@ def create_parser(self): in cases where a user is not using the Atomic App cli directly, but through another interface such as the Atomic CLI. EX: `atomic run --mode=genanswers`''')) + + # === DEPLOY PARSER === + # Create a 'deploy parser' that will include flags related to deploying + # and answers files + deploy_parser = argparse.ArgumentParser(add_help=False) deploy_parser.add_argument( "--dry-run", dest="dryrun", From af27db6cc0813a0dd574fca65f2dca516f85b4bf Mon Sep 17 00:00:00 2001 From: Ratnadeep Debnath Date: Wed, 31 Aug 2016 19:43:24 +0530 Subject: [PATCH 184/193] Use travis CI to run tests from tests/units dir. --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 3c206fac..bd4f223f 100644 --- a/Makefile +++ b/Makefile @@ -15,7 +15,7 @@ install: test: pip install -qr requirements.txt pip install -qr test-requirements.txt - $(PYTHON) -m pytest tests/ -vv --cov atomicapp + $(PYTHON) -m pytest tests/units/ -vv --cov atomicapp .PHONY: image image: From daaa07a610c4ce641b3c7016c4fac63f80af57c4 Mon Sep 17 00:00:00 2001 From: Charlie Drage Date: Wed, 31 Aug 2016 17:03:05 -0400 Subject: [PATCH 185/193] 0.6.3 Release --- CHANGELOG.md | 23 +++++++++++++++++++++++ Dockerfile | 2 +- Dockerfiles.git/Dockerfile.centos | 2 +- Dockerfiles.git/Dockerfile.debian | 2 +- Dockerfiles.git/Dockerfile.fedora | 2 +- Dockerfiles.pkgs/Dockerfile.centos | 2 +- Dockerfiles.pkgs/Dockerfile.fedora | 2 +- README.md | 2 +- atomicapp/constants.py | 2 +- setup.py | 2 +- 10 files changed, 32 insertions(+), 9 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4379f883..866a7b43 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,26 @@ +## Atomic App 0.6.3 (08-31-2016) + +This release focuses on bug fixes, scripts as well as the Nulecule specification being merged into Atomic App. + +Features: + - The Nulecule spec is now located within the Atomic App repo / project + - Bug fixes with the `atomicapp genanswers` command + - Release script added + +``` +Charlie Drage (7): + Remove lifecycle for updated CLI doc + Merge Nulecule specification into Atomic App + Formatting error with anymarkup and genanswers + Openshift to KubeShift conversion and improvements + Add release script for Atomic App + Fix typo in openshift.py provider + Fixes mode argument with --mode=genanswers + +Ratnadeep Debnath (1): + Use travis CI to run tests from tests/units dir. +``` + ## Atomic App 0.6.2 (07-27-2016) This release of Atomic App introduces the new `atomicapp index` command. diff --git a/Dockerfile b/Dockerfile index d9be5b8e..dadeab78 100644 --- a/Dockerfile +++ b/Dockerfile @@ -2,7 +2,7 @@ FROM centos:7 MAINTAINER Red Hat, Inc. -ENV ATOMICAPPVERSION="0.6.2" +ENV ATOMICAPPVERSION="0.6.3" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ io.openshift.generate.job=true \ diff --git a/Dockerfiles.git/Dockerfile.centos b/Dockerfiles.git/Dockerfile.centos index d9be5b8e..dadeab78 100644 --- a/Dockerfiles.git/Dockerfile.centos +++ b/Dockerfiles.git/Dockerfile.centos @@ -2,7 +2,7 @@ FROM centos:7 MAINTAINER Red Hat, Inc. -ENV ATOMICAPPVERSION="0.6.2" +ENV ATOMICAPPVERSION="0.6.3" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ io.openshift.generate.job=true \ diff --git a/Dockerfiles.git/Dockerfile.debian b/Dockerfiles.git/Dockerfile.debian index ba8c1ccb..ff83388b 100644 --- a/Dockerfiles.git/Dockerfile.debian +++ b/Dockerfiles.git/Dockerfile.debian @@ -2,7 +2,7 @@ FROM debian:jessie MAINTAINER Red Hat, Inc. -ENV ATOMICAPPVERSION="0.6.2" +ENV ATOMICAPPVERSION="0.6.3" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ RUN="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e USER -e SUDO_USER -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} \${OPT2} run \${OPT3}" \ diff --git a/Dockerfiles.git/Dockerfile.fedora b/Dockerfiles.git/Dockerfile.fedora index b8282668..0f7d065a 100644 --- a/Dockerfiles.git/Dockerfile.fedora +++ b/Dockerfiles.git/Dockerfile.fedora @@ -2,7 +2,7 @@ FROM fedora:23 MAINTAINER Red Hat, Inc. -ENV ATOMICAPPVERSION="0.6.2" +ENV ATOMICAPPVERSION="0.6.3" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ io.openshift.generate.job=true \ diff --git a/Dockerfiles.pkgs/Dockerfile.centos b/Dockerfiles.pkgs/Dockerfile.centos index cf3d3144..b52f76a2 100644 --- a/Dockerfiles.pkgs/Dockerfile.centos +++ b/Dockerfiles.pkgs/Dockerfile.centos @@ -4,7 +4,7 @@ MAINTAINER Red Hat, Inc. # Check https://bodhi.fedoraproject.org/updates/?packages=atomicapp # for the most recent builds of atomicapp in epel -ENV ATOMICAPPVERSION="0.6.2" +ENV ATOMICAPPVERSION="0.6.3" ENV TESTING="--enablerepo=epel-testing" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ diff --git a/Dockerfiles.pkgs/Dockerfile.fedora b/Dockerfiles.pkgs/Dockerfile.fedora index 9658e7c1..ac784613 100644 --- a/Dockerfiles.pkgs/Dockerfile.fedora +++ b/Dockerfiles.pkgs/Dockerfile.fedora @@ -4,7 +4,7 @@ MAINTAINER Red Hat, Inc. # Check https://bodhi.fedoraproject.org/updates/?packages=atomicapp # for the most recent builds of atomicapp in fedora -ENV ATOMICAPPVERSION="0.6.2" +ENV ATOMICAPPVERSION="0.6.3" ENV TESTING="--enablerepo=updates-testing" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ diff --git a/README.md b/README.md index f5b7cf7b..092a1753 100644 --- a/README.md +++ b/README.md @@ -25,7 +25,7 @@ _or_ Download a pre-signed .tar.gz from [download.projectatomic.io](https://download.projectatomic.io) / [GitHub](https://github.com/projectatomic/atomicapp/releases): ```sh -export RELEASE=0.6.2 +export RELEASE=0.6.3 wget https://github.com/projectatomic/atomicapp/releases/download/$RELEASE/atomicapp-$RELEASE.tar.gz tar -xvf atomicapp-$RELEASE.tar.gz && cd atomicapp-$RELEASE sudo make install diff --git a/atomicapp/constants.py b/atomicapp/constants.py index 60f4e94c..9fdb1226 100644 --- a/atomicapp/constants.py +++ b/atomicapp/constants.py @@ -23,7 +23,7 @@ 2) LABEL io.projectatomic.nulecule.specversion in app Dockefile """ -__ATOMICAPPVERSION__ = '0.6.2' +__ATOMICAPPVERSION__ = '0.6.3' __NULECULESPECVERSION__ = '0.0.2' EXTERNAL_APP_DIR = "external" diff --git a/setup.py b/setup.py index dadb1c05..b492d79c 100644 --- a/setup.py +++ b/setup.py @@ -41,7 +41,7 @@ def _install_requirements(): setup( name='atomicapp', - version='0.6.2', + version='0.6.3', description='A tool to install and run Nulecule apps', author='Red Hat, Inc.', author_email='container-tools@redhat.com', From 33e1dc95e93c784b42aea06a0d8a2e0b20b97fbb Mon Sep 17 00:00:00 2001 From: Ratnadeep Debnath Date: Sat, 30 Apr 2016 14:59:12 +0530 Subject: [PATCH 186/193] Initial work on refactoring Nulecule config. #524 - Create a class representation for config data - Implement load() to load config data for a component, taking into account answers and user supplied data - Implement context() method to render flattened data to be consumed in a component artifact. - Improve API for config handling and wire up with Nulecule. - Allow comparing config instances for equality. - Added a placeholder to resolve provider config in Config. - Use ':' as namespace separator in answers and reference namespace separator from constants. - Use self.config if no config supplied in Nulecule load_config. - Store CLI data in Nulecule config. - During run/stop, reference config for provider. We are overriding provider data in config with cli provider, but when referencing provider info, we always do it from a single source of truth, config, in this case. - Update docs for Nulecule config. - Added helper in Config to retrieve resolved global params. - Centralize handling CLI answers in NuleculeManager. - process answers and cli_answers to create a nulecule.config.Config object - Include default provider, namespace in runtime answers, if missing. - Config takes default provider in spec into account. - Fixed unittests for refactored config. --- atomicapp/constants.py | 1 + atomicapp/nulecule/base.py | 51 ++-- atomicapp/nulecule/config.py | 192 ++++++++++++++ atomicapp/nulecule/lib.py | 37 +-- atomicapp/nulecule/main.py | 56 ++--- tests/units/nulecule/test_lib.py | 5 +- tests/units/nulecule/test_nulecule.py | 238 +++++++++++++----- .../units/nulecule/test_nulecule_component.py | 41 +-- 8 files changed, 461 insertions(+), 160 deletions(-) create mode 100644 atomicapp/nulecule/config.py diff --git a/atomicapp/constants.py b/atomicapp/constants.py index 60f4e94c..7887d505 100644 --- a/atomicapp/constants.py +++ b/atomicapp/constants.py @@ -40,6 +40,7 @@ DEFAULTNAME_KEY = "default" PROVIDER_KEY = "provider" NAMESPACE_KEY = "namespace" +NAMESPACE_SEPARATOR = ":" REQUIREMENTS_KEY = "requirements" # Nulecule spec terminology vs the function within /providers diff --git a/atomicapp/nulecule/base.py b/atomicapp/nulecule/base.py index 8283f141..3332ef5b 100644 --- a/atomicapp/nulecule/base.py +++ b/atomicapp/nulecule/base.py @@ -18,7 +18,6 @@ along with Atomic App. If not, see . """ import anymarkup -import copy import logging import os import yaml @@ -38,7 +37,7 @@ NAME_KEY, INHERIT_KEY, ARTIFACTS_KEY, - DEFAULT_PROVIDER) + NAMESPACE_SEPARATOR) from atomicapp.utils import Utils from atomicapp.requirements import Requirements from atomicapp.nulecule.lib import NuleculeBase @@ -76,7 +75,7 @@ def __init__(self, id, specversion, graph, basepath, metadata=None, metadata (dict): Nulecule metadata requirements (dict): Requirements for the Nulecule application params (list): List of params for the Nulecule application - config (dict): Config data for the Nulecule application + config (atomicapp.nulecule.config.Config): Config data namespace (str): Namespace of the current Nulecule application Returns: @@ -88,7 +87,7 @@ def __init__(self, id, specversion, graph, basepath, metadata=None, self.metadata = metadata or {} self.graph = graph self.requirements = requirements - self.config = config or {} + self.config = config @classmethod def unpack(cls, image, dest, config=None, namespace=GLOBAL_CONF, @@ -101,7 +100,7 @@ def unpack(cls, image, dest, config=None, namespace=GLOBAL_CONF, image (str): A Docker image name. dest (str): Destination path where Nulecule data from Docker image should be extracted. - config (dict): Dictionary, config data for Nulecule application. + config: An instance of atomicapp.nulecule.config.Config namespace (str): Namespace for Nulecule application. nodeps (bool): Don't pull external Nulecule dependencies when True. @@ -115,7 +114,7 @@ def unpack(cls, image, dest, config=None, namespace=GLOBAL_CONF, if Utils.running_on_openshift(): # pass general config data containing provider specific data # to Openshift provider - op = OpenshiftProvider(config.get('general', {}), './', False) + op = OpenshiftProvider(config.globals, './', False) op.artifacts = [] op.init() op.extract(image, APP_ENT_PATH, dest, update) @@ -138,7 +137,8 @@ def load_from_path(cls, src, config=None, namespace=GLOBAL_CONF, Args: src (str): Path to load Nulecule application from. - config (dict): Config data for Nulecule application. + config (atomicapp.nulecule.config.Config): Config data for + Nulecule application. namespace (str): Namespace for Nulecule application. nodeps (bool): Do not pull external applications if True. dryrun (bool): Do not make any change to underlying host. @@ -176,8 +176,9 @@ def load_from_path(cls, src, config=None, namespace=GLOBAL_CONF, raise NuleculeException("Failure parsing %s file. Validation error on line %s, column %s:\n%s" % (nulecule_path, line, column, output)) - nulecule = Nulecule(config=config, basepath=src, - namespace=namespace, **nulecule_data) + nulecule = Nulecule(config=config, + basepath=src, namespace=namespace, + **nulecule_data) nulecule.load_components(nodeps, dryrun) return nulecule @@ -231,25 +232,23 @@ def load_config(self, config=None, ask=False, skip_asking=False): It updates self.config. Args: - config (dict): Existing config data, may be from ANSWERS - file or any other source. + config (atomicapp.nulecule.config.Config): Existing config data, + may be from ANSWERS file or any other source. Returns: None """ + if config is None: + config = self.config super(Nulecule, self).load_config( config=config, ask=ask, skip_asking=skip_asking) - if self.namespace == GLOBAL_CONF and self.config[GLOBAL_CONF].get('provider') is None: - self.config[GLOBAL_CONF]['provider'] = DEFAULT_PROVIDER - logger.info("Provider not specified, using default provider - {}". - format(DEFAULT_PROVIDER)) + for component in self.components: # FIXME: Find a better way to expose config data to components. # A component should not get access to all the variables, # but only to variables it needs. - component.load_config(config=copy.deepcopy(self.config), + component.load_config(config=config.clone(component.namespace), ask=ask, skip_asking=skip_asking) - self.merge_config(self.config, component.config) def load_components(self, nodeps=False, dryrun=False): """ @@ -270,8 +269,8 @@ def load_components(self, nodeps=False, dryrun=False): node_name = node[NAME_KEY] source = Utils.getSourceImage(node) component = NuleculeComponent( - node_name, self.basepath, source, - node.get(PARAMS_KEY), node.get(ARTIFACTS_KEY), + self._get_component_namespace(node_name), self.basepath, + source, node.get(PARAMS_KEY), node.get(ARTIFACTS_KEY), self.config) component.load(nodeps, dryrun) components.append(component) @@ -294,6 +293,12 @@ def render(self, provider_key=None, dryrun=False): for component in self.components: component.render(provider_key=provider_key, dryrun=dryrun) + def _get_component_namespace(self, component_name): + current_namespace = '' if self.namespace == GLOBAL_CONF else self.namespace + return ( + '%s%s%s' % (current_namespace, NAMESPACE_SEPARATOR, component_name) + if current_namespace else component_name) + class NuleculeComponent(NuleculeBase): @@ -356,12 +361,13 @@ def load_config(self, config=None, ask=False, skip_asking=False): """ Load config for the Nulecule component. """ + if config is None: + config = self.config super(NuleculeComponent, self).load_config( config, ask=ask, skip_asking=skip_asking) if isinstance(self._app, Nulecule): - self._app.load_config(config=copy.deepcopy(self.config), + self._app.load_config(config=self.config.clone(self.namespace), ask=ask, skip_asking=skip_asking) - self.merge_config(self.config, self._app.config) def load_external_application(self, dryrun=False, update=False): """ @@ -384,7 +390,8 @@ def load_external_application(self, dryrun=False, update=False): 'Found existing external application: %s ' 'Loading: ' % self.name) nulecule = Nulecule.load_from_path( - external_app_path, dryrun=dryrun, update=update) + external_app_path, dryrun=dryrun, update=update, + namespace=self.namespace) elif not dryrun: logger.info('Pulling external application: %s' % self.name) nulecule = Nulecule.unpack( diff --git a/atomicapp/nulecule/config.py b/atomicapp/nulecule/config.py new file mode 100644 index 00000000..f496ce1d --- /dev/null +++ b/atomicapp/nulecule/config.py @@ -0,0 +1,192 @@ +import copy +import logging + +from atomicapp.constants import (GLOBAL_CONF, + LOGGER_COCKPIT, + DEFAULT_PROVIDER, + DEFAULT_ANSWERS, + NAMESPACE_SEPARATOR) +from collections import defaultdict + +cockpit_logger = logging.getLogger(LOGGER_COCKPIT) + + +class Config(object): + """ + Store config data for a Nulecule or Nulecule component. + + It stores config data from different sources (answers, cli, user data) + separately, and exposes high level interfaces to read, write data + from it, with enforced read/write policies. + """ + + def __init__(self, namespace='', answers=None, cli=None, + data=None, is_nulecule=False): + self._namespace = namespace + self._is_nulecule = is_nulecule or False + self._parent_ns, self._current_ns = self._split_namespace(self._namespace) + # Store answers data + self._answers = defaultdict(dict) + self._answers.update(answers or {}) + # Store CLI data + self._cli = cli or {} + # Store data collected during runtime + self._data = data or defaultdict(dict) + self._context = None + self._provider = None + + @property + def globals(self): + """ + Get global config params dict for a Nulecule. + """ + d = self._answers.get(GLOBAL_CONF, {}) + d.update(self._data.get(GLOBAL_CONF, {})) + d.update(self._cli.get(GLOBAL_CONF, {})) + return d + + @property + def provider(self): + """ + Get provider name. + + Returns: + Provider name (str) + """ + if self._provider is None: + self._provider = self._data[GLOBAL_CONF].get('provider') or \ + self._answers[GLOBAL_CONF].get('provider') + if self._provider is None: + self._data[GLOBAL_CONF]['provider'] = DEFAULT_PROVIDER + self._provider = DEFAULT_PROVIDER + + return self._provider + + @property + def providerconfig(self): + """ + Get provider config info taking into account answers and cli data. + """ + pass + + @property + def namespace(self): + """ + Get normalized namespace for this instance. + + Returns: + Current namespace (str). + """ + return self._namespace or GLOBAL_CONF + + def set(self, key, value): + """ + Set value for a key in the current namespace. + + Args: + key (str): Key + value (str): Value. + """ + self._data[self.namespace][key] = value + + def get(self, key): + """ + Get value for a key from data accessible from the current namespace. + + TODO: Improved data inheritance model. It makes sense for a component + to be able to access data from it's sibling namespaces and children + namespaces. + + Args: + key (str): Key + + Returns: + Value for the key, else None. + """ + return ( + self._data[self.namespace].get(key) or + (self._data[self._parent_ns].get(key) if self._parent_ns else None) or + self._data[GLOBAL_CONF].get(key) or + self._answers[self.namespace].get(key) or + (self._answers[self._parent_ns].get(key) if self._parent_ns else None) or + self._answers[GLOBAL_CONF].get(key) + ) + + def context(self): + """ + Get context to render artifact files in a Nulecule component. + + TODO: Improved data inheritance model. Data from siblings and children + namespaces should be available in the context to render an artifact + file in the current namespace. + """ + if self._context is None: + self._context = {} + self._context.update(copy.copy(self._data[GLOBAL_CONF])) + self._context.update(copy.copy(self._data[self.namespace])) + + self._context.update(copy.copy(self._answers[GLOBAL_CONF])) + self._context.update(copy.copy(self._answers[self.namespace])) + return self._context + + def runtime_answers(self): + """ + Get runtime answers. + + Returns: + A defaultdict containing runtime answers data. + """ + answers = defaultdict(dict) + answers.update(copy.deepcopy(DEFAULT_ANSWERS)) + answers['general']['provider'] = self.provider + + for key, value in self._answers.items(): + answers[key].update(value) + + for key, value in self._data.items(): + answers[key].update(value) + + # Remove empty sections for answers + for key, value in answers.items(): + if value is None: + answers.pop(key, None) + + return answers + + def clone(self, namespace): + """ + Create a new config instance in the specified namespace. + + Args: + name (str): Name of the child component + + Returns: + A Config instance. + """ + config = Config(namespace=namespace, + answers=self._answers, + cli=self._cli, + data=self._data) + return config + + def _split_namespace(self, namespace): + """ + Split namespace to get parent and current namespace in a Nulecule. + """ + if self._is_nulecule: + return '', namespace + words = namespace.rsplit(NAMESPACE_SEPARATOR, 1) + parent, current = '', '' + if len(words) == 2: + parent, current = words[0], words[1] + else: + parent, current = '', words[0] + return parent, current + + def __eq__(self, obj): + """ + Check equality of config instances. + """ + if self._namespace == obj._namespace or self._answers == obj._answers or self._data == obj._data or self._cli == obj._cli: + return True + return False diff --git a/atomicapp/nulecule/lib.py b/atomicapp/nulecule/lib.py index 0f2d1539..0a82aeeb 100644 --- a/atomicapp/nulecule/lib.py +++ b/atomicapp/nulecule/lib.py @@ -19,11 +19,9 @@ """ import logging -from atomicapp.constants import (GLOBAL_CONF, - LOGGER_COCKPIT, +from atomicapp.constants import (LOGGER_COCKPIT, NAME_KEY, DEFAULTNAME_KEY, - PROVIDER_KEY, PROVIDERS) from atomicapp.utils import Utils from atomicapp.plugin import Plugin @@ -64,46 +62,21 @@ def load_config(self, config, ask=False, skip_asking=False): None """ for param in self.params: - value = config.get(self.namespace, {}).get(param[NAME_KEY]) or \ - config.get(GLOBAL_CONF, {}).get(param[NAME_KEY]) + value = config.get(param[NAME_KEY]) if value is None and (ask or ( not skip_asking and param.get(DEFAULTNAME_KEY) is None)): cockpit_logger.info("%s is missing in answers.conf." % param[NAME_KEY]) value = Utils.askFor(param[NAME_KEY], param, self.namespace) elif value is None: value = param.get(DEFAULTNAME_KEY) - if config.get(self.namespace) is None: - config[self.namespace] = {} - config[self.namespace][param[NAME_KEY]] = value + config.set(param[NAME_KEY], value) self.config = config - def merge_config(self, to_config, from_config): - """ - Merge values from from_config to to_config. If value for a key - in a group in to_config is missing, then only set it's value from - corresponding key in the same group in from_config. - - Args: - to_config (dict): Dictionary to merge config into - from_config (dict): Dictionary to merge config from - - Returns: - None - """ - for group, group_vars in from_config.items(): - to_config[group] = to_config.get(group) or {} - for key, value in (group_vars or {}).items(): - if to_config[group].get(key) is None: - to_config[group][key] = value - def get_context(self): """ Get context data from config data for rendering an artifact. """ - context = {} - context.update(self.config.get(GLOBAL_CONF) or {}) - context.update(self.config.get(self.namespace) or {}) - return context + return self.config.context() def get_provider(self, provider_key=None, dry=False): """ @@ -118,7 +91,7 @@ def get_provider(self, provider_key=None, dry=False): """ # If provider_key isn't provided via CLI, let's grab it the configuration if provider_key is None: - provider_key = self.config.get(GLOBAL_CONF)[PROVIDER_KEY] + provider_key = self.config.provider provider_class = self.plugin.getProvider(provider_key) if provider_class is None: raise NuleculeException("Invalid Provider - '{}', provided in " diff --git a/atomicapp/nulecule/main.py b/atomicapp/nulecule/main.py index a27577bb..a63ff94e 100644 --- a/atomicapp/nulecule/main.py +++ b/atomicapp/nulecule/main.py @@ -36,11 +36,11 @@ LOGGER_COCKPIT, LOGGER_DEFAULT, MAIN_FILE, - PROVIDER_KEY, __ATOMICAPPVERSION__, __NULECULESPECVERSION__) from atomicapp.nulecule.base import Nulecule from atomicapp.nulecule.exceptions import NuleculeException +from atomicapp.nulecule.config import Config from atomicapp.utils import Utils cockpit_logger = logging.getLogger(LOGGER_COCKPIT) @@ -64,6 +64,7 @@ def __init__(self, app_spec, destination=None, destination: where to unpack a nulecule to if it isn't local cli_answers: some answer file values provided from cli args answers_file: the location of the answers file + cli (dict): CLI data """ self.answers = copy.deepcopy(DEFAULT_ANSWERS) self.cli_answers = cli_answers @@ -240,12 +241,12 @@ def genanswers(self, dryrun=False, answers_format=None, **kwargs): if os.path.exists(answers_file): raise NuleculeException( "Can't generate answers.conf over existing file") + self.config = Config(namespace=GLOBAL_CONF) # Call unpack to get the app code - self.nulecule = self.unpack(update=False, dryrun=dryrun, config=self.answers) + self.nulecule = self.unpack(update=False, dryrun=dryrun, config=self.config) - self.nulecule.load_config(config=self.nulecule.config, - skip_asking=True) + self.nulecule.load_config(skip_asking=True) # Get answers and write them out to answers.conf in cwd answers = self._get_runtime_answers( self.nulecule.config, None) @@ -272,10 +273,9 @@ def fetch(self, nodeps=False, update=False, dryrun=False, # Call unpack. If the app doesn't exist it will be pulled. If # it does exist it will be just be loaded and returned - self.nulecule = self.unpack(update, dryrun, config=self.answers) + self.nulecule = self.unpack(update, dryrun, config=self.config) - self.nulecule.load_config(config=self.nulecule.config, - skip_asking=True) + self.nulecule.load_config(skip_asking=True) runtime_answers = self._get_runtime_answers( self.nulecule.config, None) # write sample answers file @@ -283,7 +283,7 @@ def fetch(self, nodeps=False, update=False, dryrun=False, os.path.join(self.app_path, ANSWERS_FILE_SAMPLE), runtime_answers, answers_format) - cockpit_logger.info("Install Successful.") + cockpit_logger.info("Install Successful.") def run(self, cli_provider, answers_output, ask, answers_format=ANSWERS_FILE_SAMPLE_FORMAT, **kwargs): @@ -308,16 +308,18 @@ def run(self, cli_provider, answers_output, ask, self.answers_format = answers_format or ANSWERS_FILE_SAMPLE_FORMAT dryrun = kwargs.get('dryrun') or False - # Call unpack. If the app doesn't exist it will be pulled. If - # it does exist it will be just be loaded and returned - self.nulecule = self.unpack(dryrun=dryrun, config=self.answers) - # If we didn't find an answers file before then call _process_answers # again just in case the app developer embedded an answers file if not self.answers_file: self._process_answers() - self.nulecule.load_config(config=self.nulecule.config, ask=ask) + # Call unpack. If the app doesn't exist it will be pulled. If + # it does exist it will be just be loaded and returned + self.nulecule = self.unpack(dryrun=dryrun, config=self.config) + + self.nulecule.load_config(ask=ask) + if cli_provider: + self.nulecule.config.set('provider', cli_provider) self.nulecule.render(cli_provider, dryrun) self.nulecule.run(cli_provider, dryrun) runtime_answers = self._get_runtime_answers( @@ -343,10 +345,12 @@ def stop(self, cli_provider, **kwargs): dryrun = kwargs.get('dryrun') or False self.nulecule = Nulecule.load_from_path( - self.app_path, config=self.answers, dryrun=dryrun) - self.nulecule.load_config(config=self.answers) - self.nulecule.render(cli_provider, dryrun=dryrun) - self.nulecule.stop(cli_provider, dryrun) + self.app_path, config=self.config, dryrun=dryrun) + self.nulecule.load_config() + if cli_provider: + self.nulecule.config.set('provider', cli_provider) + self.nulecule.render(self.nulecule.config.provider, dryrun=dryrun) + self.nulecule.stop(self.nulecule.config.provider, dryrun) def clean(self, force=False): # For future use @@ -396,10 +400,13 @@ def _process_answers(self): # Load answers self.answers = Utils.loadAnswers(self.answers_file) + self.config = Config(namespace=GLOBAL_CONF, answers=self.answers, + cli={GLOBAL_CONF: self.cli_answers}) + # If there is answers data from the cli then merge it in now - if self.cli_answers: - for k, v in self.cli_answers.iteritems(): - self.answers[GLOBAL_CONF][k] = v + # if self.cli_answers: + # for k, v in self.cli_answers.iteritems(): + # self.answers[GLOBAL_CONF][k] = v def _write_answers(self, path, answers, answers_format): """ @@ -438,11 +445,4 @@ def _get_runtime_answers(self, config, cli_provider): Returns: dict """ - _config = copy.deepcopy(config) - _config[GLOBAL_CONF] = config.get(GLOBAL_CONF) or {} - - # If a provider is provided via CLI, override the config parameter - if cli_provider: - _config[GLOBAL_CONF][PROVIDER_KEY] = cli_provider - - return _config + return self.nulecule.config.runtime_answers() diff --git a/tests/units/nulecule/test_lib.py b/tests/units/nulecule/test_lib.py index 742738d8..d9e56dac 100644 --- a/tests/units/nulecule/test_lib.py +++ b/tests/units/nulecule/test_lib.py @@ -3,6 +3,7 @@ from atomicapp.nulecule.lib import NuleculeBase from atomicapp.nulecule.exceptions import NuleculeException +from atomicapp.nulecule.config import Config class TestNuleculeBaseGetProvider(unittest.TestCase): @@ -16,7 +17,7 @@ def test_get_provider_success(self): provider_key = u'openshift' # method `get_provider` will read from this config, we give it here # since we have neither provided it before nor it is auto-generated - nb.config = {u'general': {u'provider': provider_key}} + nb.config = Config(answers={u'general': {u'provider': provider_key}}) return_provider = mock.Mock() # mocking return value of method plugin.getProvider,because it returns @@ -35,6 +36,6 @@ def test_get_provider_failure(self): nb = NuleculeBase(params = [], basepath = '', namespace = '') # purposefully give the wrong provider key provider_key = u'mesos' - nb.config = {u'general': {u'provider': provider_key}} + nb.config = Config(answers={u'general': {u'provider': provider_key}}) with self.assertRaises(NuleculeException): nb.get_provider() diff --git a/tests/units/nulecule/test_nulecule.py b/tests/units/nulecule/test_nulecule.py index 1bb60179..0f3a3372 100644 --- a/tests/units/nulecule/test_nulecule.py +++ b/tests/units/nulecule/test_nulecule.py @@ -4,6 +4,7 @@ import os from atomicapp.nulecule.base import Nulecule from atomicapp.nulecule.exceptions import NuleculeException +from atomicapp.nulecule.config import Config class TestNuleculeRun(unittest.TestCase): @@ -15,8 +16,9 @@ def test_run(self): dryrun = False mock_component_1 = mock.Mock() mock_component_2 = mock.Mock() + config = Config(answers={}) - n = Nulecule('some-id', '0.0.2', [{}], 'some/path', {}) + n = Nulecule('some-id', '0.0.2', [{}], 'some/path', {}, config=config) n.components = [mock_component_1, mock_component_2] n.run(provider) @@ -34,7 +36,9 @@ def test_stop(self): mock_component_1 = mock.Mock() mock_component_2 = mock.Mock() - n = Nulecule('some-id', '0.0.2', {}, [], 'some/path') + config = Config(answers={}) + + n = Nulecule('some-id', '0.0.2', {}, [], 'some/path', config=config) n.components = [mock_component_1, mock_component_2] n.stop(provider) @@ -46,74 +50,194 @@ class TestNuleculeLoadConfig(unittest.TestCase): """Test Nulecule load_config""" - def test_load_config_without_specified_provider(self): + def test_load_config_with_default_provider(self): """ - Test Nulecule load_config without specifying a provider. + Test Nulecule load_config with a default provider. """ - config = {'general': {}, 'group1': {'a': 'b'}} - mock_component_1 = mock.Mock() - mock_component_1.config = { - 'group1': {'a': 'c', 'k': 'v'}, - 'group2': {'1': '2'} - } + config = Config(answers={}) + + params = [ + { + "name": "key1", + "default": "val1", + }, + { + "name": "key3", + "default": "val3" + }, + { + "name": "provider", + "default": "docker" + } + ] + + graph = [ + { + "name": "component1", + "params": [ + { + "name": "key1", + }, + { + "name": "key2", + "default": "val2" + } + ], + "artifacts": [] + } + ] - n = Nulecule(id='some-id', specversion='0.0.2', metadata={}, graph=[], basepath='some/path') - n.components = [mock_component_1] + n = Nulecule(id='some-id', specversion='0.0.2', metadata={}, + graph=graph, params=params, basepath='some/path', + config=config) + n.load_components() n.load_config(config) - self.assertEqual(n.config, { - 'general': {'provider': 'kubernetes'}, - 'group1': {'a': 'b', 'k': 'v'}, - 'group2': {'1': '2'} + self.assertEqual(n.config.runtime_answers(), { + 'general': { + 'namespace': 'default', + 'provider': 'docker', + 'key1': 'val1', + 'key3': 'val3' + }, + 'component1': { + 'key2': 'val2', + 'key1': 'val1' + } + }) + + self.assertEqual(n.components[0].config.context(), { + 'key3': 'val3', + 'key2': 'val2', + 'key1': 'val1', + 'provider': 'docker' }) - def test_load_config_with_defaultprovider(self): + def test_load_config_without_default_provider(self): """ - Test Nulecule load_config with default provider specified - in global params in Nulecule spec. + Test Nulecule load_config without specifying a default provider. """ - config = {'general': {}, 'group1': {'a': 'b'}} - mock_component_1 = mock.Mock() - mock_component_1.config = { - 'group1': {'a': 'c', 'k': 'v'}, - 'group2': {'1': '2'} - } - - n = Nulecule(id='some-id', specversion='0.0.2', metadata={}, graph=[], - basepath='some/path', - params=[{'name': 'provider', 'default': 'some-provider'}]) - n.components = [mock_component_1] + config = Config(answers={}) + + params = [ + { + "name": "key1", + "default": "val1", + }, + { + "name": "key3", + "default": "val3" + } + ] + + graph = [ + { + "name": "component1", + "params": [ + { + "name": "key1", + }, + { + "name": "key2", + "default": "val2" + } + ], + "artifacts": [] + } + ] + + n = Nulecule(id='some-id', specversion='0.0.2', metadata={}, + graph=graph, params=params, basepath='some/path', + config=config) + n.load_components() n.load_config(config) - self.assertEqual(n.config, { - 'general': {'provider': 'some-provider'}, - 'group1': {'a': 'b', 'k': 'v'}, - 'group2': {'1': '2'} + self.assertEqual(n.config.runtime_answers(), { + 'general': { + 'namespace': 'default', + 'provider': 'kubernetes', + 'key1': 'val1', + 'key3': 'val3' + }, + 'component1': { + 'key2': 'val2', + 'key1': 'val1' + } + }) + + self.assertEqual(n.components[0].config.context(), { + 'key3': 'val3', + 'key2': 'val2', + 'key1': 'val1', + 'provider': 'kubernetes' }) - def test_load_config_with_defaultprovider_overridden_by_provider_in_answers(self): + def test_load_config_with_default_provider_overridden_by_answers(self): """ - Test Nulecule load_config with default provider specified - in global params in Nulecule spec, but overridden in answers config. + Test Nulecule load_config with default provider overridden by provider + in answers. """ - config = {'general': {'provider': 'new-provider'}, - 'group1': {'a': 'b'}} - mock_component_1 = mock.Mock() - mock_component_1.config = { - 'group1': {'a': 'c', 'k': 'v'}, - 'group2': {'1': '2'} - } - - n = Nulecule(id='some-id', specversion='0.0.2', metadata={}, graph=[], - basepath='some/path', - params=[{'name': 'provider', 'default': 'some-provider'}]) - n.components = [mock_component_1] + config = Config(answers={ + 'general': { + 'provider': 'openshift' + } + }) + + params = [ + { + "name": "key1", + "default": "val1", + }, + { + "name": "key3", + "default": "val3" + }, + { + "name": "provider", + "default": "docker" + } + ] + + graph = [ + { + "name": "component1", + "params": [ + { + "name": "key1", + }, + { + "name": "key2", + "default": "val2" + } + ], + "artifacts": [] + } + ] + + n = Nulecule(id='some-id', specversion='0.0.2', metadata={}, + graph=graph, params=params, basepath='some/path', + config=config) + n.load_components() n.load_config(config) - self.assertEqual(n.config, { - 'general': {'provider': 'new-provider'}, - 'group1': {'a': 'b', 'k': 'v'}, - 'group2': {'1': '2'} + self.assertEqual(n.config.runtime_answers(), { + 'general': { + 'namespace': 'default', + 'provider': 'openshift', + 'key1': 'val1', + 'key3': 'val3' + }, + 'component1': { + 'key2': 'val2', + 'key1': 'val1' + } + }) + + self.assertEqual(n.components[0].config.context(), { + 'key3': 'val3', + 'key2': 'val2', + 'key1': 'val1', + 'provider': 'openshift' }) @@ -137,15 +261,17 @@ def test_load_components(self, MockNuleculeComponent): } ] - n = Nulecule('some-id', '0.0.2', graph, 'some/path', {}) + config = Config(answers={}) + + n = Nulecule('some-id', '0.0.2', graph, 'some/path', config=config) n.load_components() MockNuleculeComponent.assert_any_call( graph[0]['name'], n.basepath, 'somecontainer', - graph[0]['params'], None, {}) + graph[0]['params'], None, config) MockNuleculeComponent.assert_any_call( graph[1]['name'], n.basepath, None, - graph[1].get('params'), graph[1].get('artifacts'), {}) + graph[1].get('params'), graph[1].get('artifacts'), config) class TestNuleculeRender(unittest.TestCase): diff --git a/tests/units/nulecule/test_nulecule_component.py b/tests/units/nulecule/test_nulecule_component.py index be2a8ab7..4bbcb418 100644 --- a/tests/units/nulecule/test_nulecule_component.py +++ b/tests/units/nulecule/test_nulecule_component.py @@ -1,7 +1,7 @@ -import copy import mock import unittest from atomicapp.nulecule.base import NuleculeComponent, Nulecule +from atomicapp.nulecule.config import Config from atomicapp.nulecule.exceptions import NuleculeException @@ -129,49 +129,49 @@ class TestNuleculeComponentLoadConfig(unittest.TestCase): def test_load_config_local_app(self): """Test load config for local app""" params = [ - {'name': 'key1'}, - {'name': 'key2'} + {'name': 'key1', 'description': 'key1'}, + {'name': 'key2', 'description': 'key2'} ] initial_config = { 'general': {'a': 'b', 'key2': 'val2'}, 'some-app': {'key1': 'val1'} } - - nc = NuleculeComponent('some-app', 'some/path', params=params) - nc.load_config(config=copy.deepcopy(initial_config)) - - self.assertEqual(nc.config, { - 'general': {'a': 'b', 'key2': 'val2'}, + conf = Config('some-app', answers=initial_config) + + nc = NuleculeComponent('some-app', 'some/path', + params=params, config=conf) + nc.load_config() + runtime_answers = nc.config.runtime_answers() + self.assertEqual(runtime_answers, { + 'general': {'a': 'b', 'key2': 'val2', 'provider': 'kubernetes', 'namespace': 'default'}, 'some-app': {'key1': 'val1', 'key2': 'val2'} }) - @mock.patch('atomicapp.nulecule.base.NuleculeComponent.merge_config') - def test_load_config_external_app(self, mock_merge_config): + def test_load_config_external_app(self): """Test load config for external app""" mock_nulecule = mock.Mock( name='nulecule', spec=Nulecule('some-id', '0.0.2', {}, [], 'some/path') ) params = [ - {'name': 'key1'}, - {'name': 'key2'} + {'name': 'key1', 'description': 'key1'}, + {'name': 'key2', 'description': 'key2'} ] initial_config = { 'general': {'a': 'b', 'key2': 'val2'}, 'some-app': {'key1': 'val1'} } + config = Config('some-app', answers=initial_config) nc = NuleculeComponent('some-app', 'some/path', params=params) nc._app = mock_nulecule - nc.load_config(config=copy.deepcopy(initial_config)) + nc.load_config(config=config) mock_nulecule.load_config.assert_called_once_with( - config={ - 'general': {'a': 'b', 'key2': 'val2'}, + config=Config('some-app', answers=initial_config, data={ + 'general': {}, 'some-app': {'key1': 'val1', 'key2': 'val2'} - }, ask=False, skip_asking=False) - mock_merge_config.assert_called_once_with( - nc.config, mock_nulecule.config) + }), ask=False, skip_asking=False) class TestNuleculeComponentLoadExternalApplication(unittest.TestCase): @@ -193,7 +193,8 @@ def test_loading_existing_app(self, mock_os_path_isdir, mock_Nulecule): mock_os_path_isdir.assert_called_once_with( expected_external_app_path) mock_Nulecule.load_from_path.assert_called_once_with( - expected_external_app_path, dryrun=dryrun, update=update) + expected_external_app_path, dryrun=dryrun, namespace='some-app', + update=update) # Use http://engineeringblog.yelp.com/2015/02/assert_called_once-threat-or-menace.html # by calling call_count == 1. In order to avoid the return_value = False of Utils.setFileOnwerGroup From d08844910e44b74dee5ff5d8a68a2a1d781724cb Mon Sep 17 00:00:00 2001 From: Ratnadeep Debnath Date: Thu, 21 Jul 2016 17:41:45 +0530 Subject: [PATCH 187/193] Re implememt Config class to be more generic. Fixes #524 - Create single source of truth for config data in NuleculeManager. Get rid of answers and cli_answers instance variables, and use only self.config to look up config data. - Allow ignoring sources when getting data from config. - Allow specifying file format when loading answers. --- atomicapp/cli/main.py | 11 +- atomicapp/nulecule/base.py | 23 +++- atomicapp/nulecule/config.py | 255 ++++++++++++++++------------------- atomicapp/nulecule/lib.py | 35 +++-- atomicapp/nulecule/main.py | 74 ++++------ atomicapp/utils.py | 13 +- 6 files changed, 194 insertions(+), 217 deletions(-) diff --git a/atomicapp/cli/main.py b/atomicapp/cli/main.py index 9d1a9840..47bdafa4 100644 --- a/atomicapp/cli/main.py +++ b/atomicapp/cli/main.py @@ -64,7 +64,8 @@ def cli_fetch(args): nm = NuleculeManager(app_spec=argdict['app_spec'], destination=destination, cli_answers=argdict['cli_answers'], - answers_file=argdict['answers']) + answers_file=argdict['answers'], + answers_format=argdict.get('answers_format')) nm.fetch(**argdict) # Clean up the files if the user asked us to. Otherwise # notify the user where they can manage the application @@ -81,7 +82,8 @@ def cli_run(args): nm = NuleculeManager(app_spec=argdict['app_spec'], destination=destination, cli_answers=argdict['cli_answers'], - answers_file=argdict['answers']) + answers_file=argdict['answers'], + answers_format=argdict.get('answers_format')) nm.run(**argdict) # Clean up the files if the user asked us to. Otherwise # notify the user where they can manage the application @@ -306,7 +308,7 @@ def create_parser(self): help="A file which will contain anwsers provided in interactive mode") run_subparser.add_argument( "--provider", - dest="cli_provider", + dest="provider", choices=PROVIDERS, help="The provider to use. Overrides provider value in answerfile.") run_subparser.add_argument( @@ -511,7 +513,8 @@ def run(self): # and make a dictionary of it to pass along in args. setattr(args, 'cli_answers', {}) for item in ['provider-api', 'provider-cafile', 'provider-auth', - 'provider-config', 'provider-tlsverify', 'namespace']: + 'provider-config', 'provider-tlsverify', 'namespace', + 'provider']: if hasattr(args, item) and getattr(args, item) is not None: args.cli_answers[item] = getattr(args, item) diff --git a/atomicapp/nulecule/base.py b/atomicapp/nulecule/base.py index 3332ef5b..d0d76347 100644 --- a/atomicapp/nulecule/base.py +++ b/atomicapp/nulecule/base.py @@ -176,9 +176,8 @@ def load_from_path(cls, src, config=None, namespace=GLOBAL_CONF, raise NuleculeException("Failure parsing %s file. Validation error on line %s, column %s:\n%s" % (nulecule_path, line, column, output)) - nulecule = Nulecule(config=config, - basepath=src, namespace=namespace, - **nulecule_data) + nulecule = Nulecule(config=config, basepath=src, + namespace=namespace, **nulecule_data) nulecule.load_components(nodeps, dryrun) return nulecule @@ -247,7 +246,7 @@ def load_config(self, config=None, ask=False, skip_asking=False): # FIXME: Find a better way to expose config data to components. # A component should not get access to all the variables, # but only to variables it needs. - component.load_config(config=config.clone(component.namespace), + component.load_config(config=config, ask=ask, skip_asking=skip_asking) def load_components(self, nodeps=False, dryrun=False): @@ -294,6 +293,18 @@ def render(self, provider_key=None, dryrun=False): component.render(provider_key=provider_key, dryrun=dryrun) def _get_component_namespace(self, component_name): + """ + Get a unique namespace for a Nulecule graph item, by concatinating + the namespace of the current Nulecule (which could be the root Nulecule + app or a child or external Nulecule app) and name of the Nulecule + graph item. + + Args: + component_name (str): Name of the Nulecule graph item + + Returns: + A string + """ current_namespace = '' if self.namespace == GLOBAL_CONF else self.namespace return ( '%s%s%s' % (current_namespace, NAMESPACE_SEPARATOR, component_name) @@ -366,7 +377,7 @@ def load_config(self, config=None, ask=False, skip_asking=False): super(NuleculeComponent, self).load_config( config, ask=ask, skip_asking=skip_asking) if isinstance(self._app, Nulecule): - self._app.load_config(config=self.config.clone(self.namespace), + self._app.load_config(config=self.config, ask=ask, skip_asking=skip_asking) def load_external_application(self, dryrun=False, update=False): @@ -443,7 +454,7 @@ def render(self, provider_key=None, dryrun=False): raise NuleculeException( "Data for provider \"%s\" are not part of this app" % provider_key) - context = self.get_context() + context = self.config.context(self.namespace) for provider in self.artifacts: if provider_key and provider != provider_key: continue diff --git a/atomicapp/nulecule/config.py b/atomicapp/nulecule/config.py index f496ce1d..b1d92b31 100644 --- a/atomicapp/nulecule/config.py +++ b/atomicapp/nulecule/config.py @@ -4,8 +4,7 @@ from atomicapp.constants import (GLOBAL_CONF, LOGGER_COCKPIT, DEFAULT_PROVIDER, - DEFAULT_ANSWERS, - NAMESPACE_SEPARATOR) + DEFAULT_ANSWERS) from collections import defaultdict cockpit_logger = logging.getLogger(LOGGER_COCKPIT) @@ -13,121 +12,125 @@ class Config(object): """ - Store config data for a Nulecule or Nulecule component. - - It stores config data from different sources (answers, cli, user data) - separately, and exposes high level interfaces to read, write data - from it, with enforced read/write policies. + This class allows to store config data in different scopes along with + source info for the data. When fetching the value for a key in a scope, + the source info and the PRIORITY order of sources is taken into account. + + Data sources: + cli: Config data coming from the CLI + runtime: Config data resolved during atomic app runtime. For example, + when the value for a parameter in a Nulecule or Nulecule graph + item is missing in answers data, we first try to load the default + value for the parameter. When there's no default value, or when + the user has specified to forcefully ask the user for values, we + ask the user for data. These data collected/resolved during runtime + form the runtime data. + answers: Config data coming from answers file + defaults: Default config data specified in atomicapp/constants.py + + The priority order of the data sources is: + cli > runtime > answers > defaults """ - def __init__(self, namespace='', answers=None, cli=None, - data=None, is_nulecule=False): - self._namespace = namespace - self._is_nulecule = is_nulecule or False - self._parent_ns, self._current_ns = self._split_namespace(self._namespace) - # Store answers data - self._answers = defaultdict(dict) - self._answers.update(answers or {}) - # Store CLI data - self._cli = cli or {} - # Store data collected during runtime - self._data = data or defaultdict(dict) - self._context = None - self._provider = None - - @property - def globals(self): - """ - Get global config params dict for a Nulecule. - """ - d = self._answers.get(GLOBAL_CONF, {}) - d.update(self._data.get(GLOBAL_CONF, {})) - d.update(self._cli.get(GLOBAL_CONF, {})) - return d + PRIORITY = ( + 'cli', + 'runtime', + 'answers', + 'defaults' + ) - @property - def provider(self): + def __init__(self, answers=None, cli=None): """ - Get provider name. - - Returns: - Provider name (str) - """ - if self._provider is None: - self._provider = self._data[GLOBAL_CONF].get('provider') or \ - self._answers[GLOBAL_CONF].get('provider') - if self._provider is None: - self._data[GLOBAL_CONF]['provider'] = DEFAULT_PROVIDER - self._provider = DEFAULT_PROVIDER - - return self._provider + Initialize a Config instance. - @property - def providerconfig(self): - """ - Get provider config info taking into account answers and cli data. - """ - pass + Args: + answers (dict): Answers data + cli (dict): CLI data + """ + answers = answers or {} + cli = cli or {} + # We use a defaultdict of defaultdicts so that we can avoid doing + # redundant checks in a nested dictionary if the value of the keys + # are dictionaries or None. + self._data = defaultdict(defaultdict) + # Initialize default data dict + self._data['defaults'] = defaultdict(defaultdict) + # Initialize answers data dict + self._data['answers'] = defaultdict(defaultdict) + # Initialize cli data dict + self._data['cli'] = defaultdict(defaultdict) + # Initialize runtime data dict + self._data['runtime'] = defaultdict(defaultdict) + + # Load default answers + for scope, data in DEFAULT_ANSWERS.items(): + for key, value in data.items(): + self.set(key, value, scope=scope, source='defaults') + self.set('provider', DEFAULT_PROVIDER, scope=GLOBAL_CONF, source='defaults') + + # Load answers data + for scope, data in answers.items(): + for key, value in data.items(): + self.set(key, value, scope=scope, source='answers') + + # Load cli data + for key, value in cli.items(): + self.set(key, value, scope=GLOBAL_CONF, source='cli') + + def get(self, key, scope=GLOBAL_CONF, ignore_sources=[]): + """ + Get the value of a key in a scope. This takes care of resolving + the value by going through the PRIORITY order of the various + sources of data. - @property - def namespace(self): - """ - Get normalized namespace for this instance. + Args: + key (str): Key + scope (str): Scope from which to fetch the value for the key Returns: - Current namespace (str). + Value for the key. """ - return self._namespace or GLOBAL_CONF + for source in self.PRIORITY: + if source in ignore_sources: + continue + value = self._data[source][scope].get(key) or self._data[source][ + GLOBAL_CONF].get(key) + if value: + return value + return None - def set(self, key, value): + def set(self, key, value, source, scope=GLOBAL_CONF): """ - Set value for a key in the current namespace. + Set the value for a key within a scope along with specifying the + source of the value. Args: key (str): Key - value (str): Value. + value: Value + scope (str): Scope in which to store the value + source (str): Source of the value """ - self._data[self.namespace][key] = value + self._data[source][scope][key] = value - def get(self, key): + def context(self, scope=GLOBAL_CONF): """ - Get value for a key from data accessible from the current namespace. - - TODO: Improved data inheritance model. It makes sense for a component - to be able to access data from it's sibling namespaces and children - namespaces. + Get context data for the scope of Nulecule graph item by aggregating + the data from various sources taking their priority order into + account. This context data, which is a flat dictionary, is used to + render the variables in the artifacts of Nulecule graph item. Args: - key (str): Key - + scope (str): Scope (or namespace) for the Nulecule graph item. Returns: - Value for the key, else None. + A dictionary """ - return ( - self._data[self.namespace].get(key) or - (self._data[self._parent_ns].get(key) if self._parent_ns else None) or - self._data[GLOBAL_CONF].get(key) or - self._answers[self.namespace].get(key) or - (self._answers[self._parent_ns].get(key) if self._parent_ns else None) or - self._answers[GLOBAL_CONF].get(key) - ) - - def context(self): - """ - Get context to render artifact files in a Nulecule component. - - TODO: Improved data inheritance model. Data from siblings and children - namespaces should be available in the context to render an artifact - file in the current namespace. - """ - if self._context is None: - self._context = {} - self._context.update(copy.copy(self._data[GLOBAL_CONF])) - self._context.update(copy.copy(self._data[self.namespace])) - - self._context.update(copy.copy(self._answers[GLOBAL_CONF])) - self._context.update(copy.copy(self._answers[self.namespace])) - return self._context + result = {} + for source in reversed(self.PRIORITY): + source_data = self._data[source] + result.update(copy.deepcopy(source_data.get(GLOBAL_CONF) or {})) + if scope != GLOBAL_CONF: + result.update(copy.deepcopy(source_data.get(scope) or {})) + return result def runtime_answers(self): """ @@ -137,56 +140,34 @@ def runtime_answers(self): A defaultdict containing runtime answers data. """ answers = defaultdict(dict) - answers.update(copy.deepcopy(DEFAULT_ANSWERS)) - answers['general']['provider'] = self.provider - - for key, value in self._answers.items(): - answers[key].update(value) - for key, value in self._data.items(): - answers[key].update(value) + for source in reversed(self.PRIORITY): + for scope, data in (self._data.get(source) or {}).items(): + answers[scope].update(copy.deepcopy(data)) # Remove empty sections for answers for key, value in answers.items(): - if value is None: - answers.pop(key, None) + if not value: + answers.pop(key) return answers - def clone(self, namespace): + def update_source(self, source, data): """ - Create a new config instance in the specified namespace. + Update answers data for a source. Args: - name (str): Name of the child component - - Returns: - A Config instance. + source (str): Source name + data (dict): Answers data """ - config = Config(namespace=namespace, - answers=self._answers, - cli=self._cli, - data=self._data) - return config + data = data or {} + if source not in self._data: + raise - def _split_namespace(self, namespace): - """ - Split namespace to get parent and current namespace in a Nulecule. - """ - if self._is_nulecule: - return '', namespace - words = namespace.rsplit(NAMESPACE_SEPARATOR, 1) - parent, current = '', '' - if len(words) == 2: - parent, current = words[0], words[1] - else: - parent, current = '', words[0] - return parent, current - - def __eq__(self, obj): - """ - Check equality of config instances. - """ - if self._namespace == obj._namespace or self._answers == obj._answers or self._data == obj._data or self._cli == obj._cli: - return True - return False + # clean up source data + for k in self._data[source]: + self._data[source].pop(k) + + for scope, data in data.items(): + for key, value in data.items(): + self.set(key, value, scope=scope, source=source) diff --git a/atomicapp/nulecule/lib.py b/atomicapp/nulecule/lib.py index 0a82aeeb..6157bda5 100644 --- a/atomicapp/nulecule/lib.py +++ b/atomicapp/nulecule/lib.py @@ -19,7 +19,8 @@ """ import logging -from atomicapp.constants import (LOGGER_COCKPIT, +from atomicapp.constants import (GLOBAL_CONF, + LOGGER_COCKPIT, NAME_KEY, DEFAULTNAME_KEY, PROVIDERS) @@ -61,22 +62,20 @@ def load_config(self, config, ask=False, skip_asking=False): Returns: None """ - for param in self.params: - value = config.get(param[NAME_KEY]) - if value is None and (ask or ( - not skip_asking and param.get(DEFAULTNAME_KEY) is None)): - cockpit_logger.info("%s is missing in answers.conf." % param[NAME_KEY]) - value = Utils.askFor(param[NAME_KEY], param, self.namespace) - elif value is None: - value = param.get(DEFAULTNAME_KEY) - config.set(param[NAME_KEY], value) self.config = config - - def get_context(self): - """ - Get context data from config data for rendering an artifact. - """ - return self.config.context() + for param in self.params: + value = config.get(param[NAME_KEY], scope=self.namespace, ignore_sources=['defaults']) + if value is None: + if ask or (not skip_asking and + param.get(DEFAULTNAME_KEY) is None): + cockpit_logger.info( + "%s is missing in answers.conf." % param[NAME_KEY]) + value = config.get(param[NAME_KEY], scope=self.namespace) \ + or Utils.askFor(param[NAME_KEY], param, self.namespace) + else: + value = param.get(DEFAULTNAME_KEY) + config.set(param[NAME_KEY], value, source='runtime', + scope=self.namespace) def get_provider(self, provider_key=None, dry=False): """ @@ -91,7 +90,7 @@ def get_provider(self, provider_key=None, dry=False): """ # If provider_key isn't provided via CLI, let's grab it the configuration if provider_key is None: - provider_key = self.config.provider + provider_key = self.config.get('provider', scope=GLOBAL_CONF) provider_class = self.plugin.getProvider(provider_key) if provider_class is None: raise NuleculeException("Invalid Provider - '{}', provided in " @@ -99,7 +98,7 @@ def get_provider(self, provider_key=None, dry=False): .format(provider_key, ', ' .join(PROVIDERS))) return provider_key, provider_class( - self.get_context(), self.basepath, dry) + self.config.context(), self.basepath, dry) def run(self, provider_key=None, dry=False): raise NotImplementedError diff --git a/atomicapp/nulecule/main.py b/atomicapp/nulecule/main.py index a63ff94e..cf3e4719 100644 --- a/atomicapp/nulecule/main.py +++ b/atomicapp/nulecule/main.py @@ -18,7 +18,6 @@ along with Atomic App. If not, see . """ import anymarkup -import copy import distutils.dir_util import logging import os @@ -27,12 +26,10 @@ import urllib from string import Template -from atomicapp.constants import (GLOBAL_CONF, - ANSWERS_FILE_SAMPLE_FORMAT, +from atomicapp.constants import (ANSWERS_FILE_SAMPLE_FORMAT, ANSWERS_FILE, ANSWERS_FILE_SAMPLE, ANSWERS_RUNTIME_FILE, - DEFAULT_ANSWERS, LOGGER_COCKPIT, LOGGER_DEFAULT, MAIN_FILE, @@ -54,7 +51,8 @@ class NuleculeManager(object): """ def __init__(self, app_spec, destination=None, - cli_answers=None, answers_file=None): + cli_answers=None, answers_file=None, + answers_format=None): """ init function for NuleculeManager. Sets a few instance variables. @@ -64,11 +62,9 @@ def __init__(self, app_spec, destination=None, destination: where to unpack a nulecule to if it isn't local cli_answers: some answer file values provided from cli args answers_file: the location of the answers file - cli (dict): CLI data + answers_format (str): File format for writing sample answers file """ - self.answers = copy.deepcopy(DEFAULT_ANSWERS) - self.cli_answers = cli_answers - self.answers_format = None + self.answers_format = answers_format or ANSWERS_FILE_SAMPLE_FORMAT self.answers_file = None # The path to an answer file self.app_path = None # The path where the app resides or will reside self.image = None # The container image to pull the app from @@ -119,7 +115,7 @@ def __init__(self, app_spec, destination=None, # Process answers. self.answers_file = answers_file - self._process_answers() + self.config = Config(cli=cli_answers) @staticmethod def init(app_name, destination=None, app_version='1.0', @@ -221,27 +217,24 @@ def unpack(self, update=False, return Nulecule.load_from_path( self.app_path, dryrun=dryrun, config=config) - def genanswers(self, dryrun=False, answers_format=None, **kwargs): + def genanswers(self, dryrun=False, **kwargs): """ Renders artifacts and then generates an answer file. Finally copies answer file to the current working directory. Args: dryrun (bool): Do not make any change to the host system if True - answers_format (str): File format for writing sample answers file kwargs (dict): Extra keyword arguments Returns: None """ - self.answers_format = answers_format or ANSWERS_FILE_SAMPLE_FORMAT # Check to make sure an answers.conf file doesn't exist already answers_file = os.path.join(os.getcwd(), ANSWERS_FILE) if os.path.exists(answers_file): raise NuleculeException( "Can't generate answers.conf over existing file") - self.config = Config(namespace=GLOBAL_CONF) # Call unpack to get the app code self.nulecule = self.unpack(update=False, dryrun=dryrun, config=self.config) @@ -252,8 +245,7 @@ def genanswers(self, dryrun=False, answers_format=None, **kwargs): self.nulecule.config, None) self._write_answers(answers_file, answers, self.answers_format) - def fetch(self, nodeps=False, update=False, dryrun=False, - answers_format=ANSWERS_FILE_SAMPLE_FORMAT, **kwargs): + def fetch(self, nodeps=False, update=False, dryrun=False, **kwargs): """ Installs (unpacks) a Nulecule application from a Nulecule image to a target path. @@ -264,13 +256,10 @@ def fetch(self, nodeps=False, update=False, dryrun=False, update (bool): Pull requisite Nulecule image and install or update already installed Nulecule application dryrun (bool): Do not make any change to the host system if True - answers_format (str): File format for writing sample answers file kwargs (dict): Extra keyword arguments Returns: None """ - self.answers_format = answers_format or ANSWERS_FILE_SAMPLE_FORMAT - # Call unpack. If the app doesn't exist it will be pulled. If # it does exist it will be just be loaded and returned self.nulecule = self.unpack(update, dryrun, config=self.config) @@ -281,49 +270,41 @@ def fetch(self, nodeps=False, update=False, dryrun=False, # write sample answers file self._write_answers( os.path.join(self.app_path, ANSWERS_FILE_SAMPLE), - runtime_answers, answers_format) + runtime_answers, self.answers_format) cockpit_logger.info("Install Successful.") - def run(self, cli_provider, answers_output, ask, - answers_format=ANSWERS_FILE_SAMPLE_FORMAT, **kwargs): + def run(self, answers_output, ask, **kwargs): """ Runs a Nulecule application from a local path or a Nulecule image name. Args: answers (dict or str): Answers data or local path to answers file - cli_provider (str): Provider to use to run the Nulecule - application answers_output (str): Path to file to export runtime answers data to ask (bool): Ask for values for params with default values from user, if True - answers_format (str): File format for writing sample answers file kwargs (dict): Extra keyword arguments Returns: None """ - self.answers_format = answers_format or ANSWERS_FILE_SAMPLE_FORMAT dryrun = kwargs.get('dryrun') or False - # If we didn't find an answers file before then call _process_answers - # again just in case the app developer embedded an answers file - if not self.answers_file: - self._process_answers() - # Call unpack. If the app doesn't exist it will be pulled. If # it does exist it will be just be loaded and returned self.nulecule = self.unpack(dryrun=dryrun, config=self.config) + # Process answers file + self._process_answers() + self.nulecule.load_config(ask=ask) - if cli_provider: - self.nulecule.config.set('provider', cli_provider) - self.nulecule.render(cli_provider, dryrun) - self.nulecule.run(cli_provider, dryrun) + provider = self.nulecule.config.get('provider') + self.nulecule.render(provider, dryrun) + self.nulecule.run(provider, dryrun) runtime_answers = self._get_runtime_answers( - self.nulecule.config, cli_provider) + self.nulecule.config, provider) self._write_answers( os.path.join(self.app_path, ANSWERS_RUNTIME_FILE), runtime_answers, self.answers_format) @@ -331,12 +312,11 @@ def run(self, cli_provider, answers_output, ask, self._write_answers(answers_output, runtime_answers, self.answers_format) - def stop(self, cli_provider, **kwargs): + def stop(self, **kwargs): """ Stops a running Nulecule application. Args: - cli_provider (str): Provider running the Nulecule application kwargs (dict): Extra keyword arguments """ # For stop we use the generated answer file from the run @@ -347,10 +327,9 @@ def stop(self, cli_provider, **kwargs): self.nulecule = Nulecule.load_from_path( self.app_path, config=self.config, dryrun=dryrun) self.nulecule.load_config() - if cli_provider: - self.nulecule.config.set('provider', cli_provider) - self.nulecule.render(self.nulecule.config.provider, dryrun=dryrun) - self.nulecule.stop(self.nulecule.config.provider, dryrun) + self.nulecule.render(self.nulecule.config.get('provider'), + dryrun=dryrun) + self.nulecule.stop(self.nulecule.config.get('provider'), dryrun) def clean(self, force=False): # For future use @@ -372,6 +351,7 @@ def _process_answers(self): Returns: None """ + answers = None app_path_answers = os.path.join(self.app_path, ANSWERS_FILE) # If the user didn't provide an answers file then check the app @@ -398,15 +378,9 @@ def _process_answers(self): "Provided answers file doesn't exist: {}".format(self.answers_file)) # Load answers - self.answers = Utils.loadAnswers(self.answers_file) - - self.config = Config(namespace=GLOBAL_CONF, answers=self.answers, - cli={GLOBAL_CONF: self.cli_answers}) + answers = Utils.loadAnswers(self.answers_file, self.answers_format) - # If there is answers data from the cli then merge it in now - # if self.cli_answers: - # for k, v in self.cli_answers.iteritems(): - # self.answers[GLOBAL_CONF][k] = v + self.config.update_source(source='answers', data=answers) def _write_answers(self, path, answers, answers_format): """ diff --git a/atomicapp/utils.py b/atomicapp/utils.py index 8aeddb10..022f3759 100644 --- a/atomicapp/utils.py +++ b/atomicapp/utils.py @@ -360,13 +360,22 @@ def getUniqueUUID(): return data @staticmethod - def loadAnswers(answers_file): + def loadAnswers(answers_file, format=None): if not os.path.isfile(answers_file): raise AtomicAppUtilsException( "Provided answers file does not exist: %s" % answers_file) logger.debug("Loading answers from file: %s", answers_file) - return anymarkup.parse_file(answers_file) + try: + # Try to load answers file with a specified answers file format + # or the default format. + result = anymarkup.parse_file(answers_file, format=format) + except anymarkup.AnyMarkupError: + # if no answers file format is provided and the answers file + # is not a JSON file, try to load it using anymarkup in a + # generic way. + result = anymarkup.parse_file(answers_file) + return result @staticmethod def copy_dir(src, dest, update=False, dryrun=False): From 685c56a6eb2357ead12bf577be2f70370ccd60dc Mon Sep 17 00:00:00 2001 From: Ratnadeep Debnath Date: Wed, 10 Aug 2016 10:36:05 +0530 Subject: [PATCH 188/193] Fixed tests for config refactor. --- tests/units/cli/test_default_provider.py | 2 +- tests/units/nulecule/test_lib.py | 6 +- tests/units/nulecule/test_nulecule.py | 55 ++++++++++--------- .../units/nulecule/test_nulecule_component.py | 54 ++++++++++-------- 4 files changed, 66 insertions(+), 51 deletions(-) diff --git a/tests/units/cli/test_default_provider.py b/tests/units/cli/test_default_provider.py index fa1e4272..c5e56ec7 100644 --- a/tests/units/cli/test_default_provider.py +++ b/tests/units/cli/test_default_provider.py @@ -70,7 +70,7 @@ def test_run_helloapache_app(self, capsys): print stdout # Since this a Docker-only provider test, docker *should* be in it, NOT Kubernetes - assert "u'provider': u'docker'" in stdout + assert "provider: Docker" in stdout assert "Deploying to Kubernetes" not in stdout assert exec_info.value.code == 0 diff --git a/tests/units/nulecule/test_lib.py b/tests/units/nulecule/test_lib.py index d9e56dac..d313429e 100644 --- a/tests/units/nulecule/test_lib.py +++ b/tests/units/nulecule/test_lib.py @@ -25,8 +25,10 @@ def test_get_provider_success(self): nb.plugin.getProvider = mock.Mock(return_value=return_provider) ret_provider_key, ret_provider = nb.get_provider() self.assertEqual(provider_key, ret_provider_key) - return_provider.assert_called_with({u'provider': provider_key}, - '', False) + return_provider.assert_called_with( + {'provider': provider_key, 'namespace': 'default'}, + '', + False) def test_get_provider_failure(self): """ diff --git a/tests/units/nulecule/test_nulecule.py b/tests/units/nulecule/test_nulecule.py index 0f3a3372..13e3e648 100644 --- a/tests/units/nulecule/test_nulecule.py +++ b/tests/units/nulecule/test_nulecule.py @@ -101,23 +101,24 @@ def test_load_config_with_default_provider(self): 'key3': 'val3' }, 'component1': { - 'key2': 'val2', - 'key1': 'val1' + 'key2': 'val2' } }) - self.assertEqual(n.components[0].config.context(), { - 'key3': 'val3', - 'key2': 'val2', - 'key1': 'val1', - 'provider': 'docker' - }) + self.assertEqual( + n.components[0].config.context(scope=n.components[0].namespace), + {'key3': 'val3', + 'key2': 'val2', + 'key1': 'val1', + 'provider': 'docker', + 'namespace': 'default'} + ) def test_load_config_without_default_provider(self): """ Test Nulecule load_config without specifying a default provider. """ - config = Config(answers={}) + config = Config() params = [ { @@ -150,7 +151,7 @@ def test_load_config_without_default_provider(self): graph=graph, params=params, basepath='some/path', config=config) n.load_components() - n.load_config(config) + n.load_config() self.assertEqual(n.config.runtime_answers(), { 'general': { @@ -160,17 +161,18 @@ def test_load_config_without_default_provider(self): 'key3': 'val3' }, 'component1': { - 'key2': 'val2', - 'key1': 'val1' + 'key2': 'val2' } }) - self.assertEqual(n.components[0].config.context(), { - 'key3': 'val3', - 'key2': 'val2', - 'key1': 'val1', - 'provider': 'kubernetes' - }) + self.assertEqual( + n.components[0].config.context(n.components[0].namespace), + {'key3': 'val3', + 'key2': 'val2', + 'key1': 'val1', + 'namespace': 'default', + 'provider': 'kubernetes'} + ) def test_load_config_with_default_provider_overridden_by_answers(self): """ @@ -228,17 +230,18 @@ def test_load_config_with_default_provider_overridden_by_answers(self): 'key3': 'val3' }, 'component1': { - 'key2': 'val2', - 'key1': 'val1' + 'key2': 'val2' } }) - self.assertEqual(n.components[0].config.context(), { - 'key3': 'val3', - 'key2': 'val2', - 'key1': 'val1', - 'provider': 'openshift' - }) + self.assertEqual( + n.components[0].config.context(n.components[0].namespace), + {'key3': 'val3', + 'key2': 'val2', + 'key1': 'val1', + 'namespace': 'default', + 'provider': 'openshift'} + ) class TestNuleculeLoadComponents(unittest.TestCase): diff --git a/tests/units/nulecule/test_nulecule_component.py b/tests/units/nulecule/test_nulecule_component.py index 4bbcb418..9054a2c4 100644 --- a/tests/units/nulecule/test_nulecule_component.py +++ b/tests/units/nulecule/test_nulecule_component.py @@ -136,23 +136,24 @@ def test_load_config_local_app(self): 'general': {'a': 'b', 'key2': 'val2'}, 'some-app': {'key1': 'val1'} } - conf = Config('some-app', answers=initial_config) + conf = Config(answers=initial_config) nc = NuleculeComponent('some-app', 'some/path', params=params, config=conf) nc.load_config() runtime_answers = nc.config.runtime_answers() self.assertEqual(runtime_answers, { - 'general': {'a': 'b', 'key2': 'val2', 'provider': 'kubernetes', 'namespace': 'default'}, - 'some-app': {'key1': 'val1', 'key2': 'val2'} + 'general': { + 'a': 'b', + 'key2': 'val2', + 'provider': 'kubernetes', + 'namespace': 'default' + }, + 'some-app': {'key1': 'val1'} }) def test_load_config_external_app(self): """Test load config for external app""" - mock_nulecule = mock.Mock( - name='nulecule', - spec=Nulecule('some-id', '0.0.2', {}, [], 'some/path') - ) params = [ {'name': 'key1', 'description': 'key1'}, {'name': 'key2', 'description': 'key2'} @@ -161,17 +162,19 @@ def test_load_config_external_app(self): 'general': {'a': 'b', 'key2': 'val2'}, 'some-app': {'key1': 'val1'} } - config = Config('some-app', answers=initial_config) + config = Config(answers=initial_config) + mock_nulecule = mock.Mock( + name='nulecule', + spec=Nulecule('some-id', '0.0.2', config, [], 'some/path') + ) nc = NuleculeComponent('some-app', 'some/path', params=params) nc._app = mock_nulecule - nc.load_config(config=config) + nc.config = config + nc.load_config() mock_nulecule.load_config.assert_called_once_with( - config=Config('some-app', answers=initial_config, data={ - 'general': {}, - 'some-app': {'key1': 'val1', 'key2': 'val2'} - }), ask=False, skip_asking=False) + config=config, ask=False, skip_asking=False) class TestNuleculeComponentLoadExternalApplication(unittest.TestCase): @@ -260,7 +263,7 @@ def test_render_for_local_app_with_missing_artifacts_for_provider(self): dryrun = False nc = NuleculeComponent(name='some-app', basepath='some/path') - nc.config = {} + nc.config = Config() nc.artifacts = {'x': ['some-artifact']} self.assertRaises(NuleculeException, nc.render, provider_key, dryrun) @@ -276,37 +279,44 @@ def test_render_for_local_app_with_missing_artifacts_from_nulecule(self): with self.assertRaises(NuleculeException): nc.render() - @mock.patch('atomicapp.nulecule.base.NuleculeComponent.get_context') @mock.patch('atomicapp.nulecule.base.NuleculeComponent.' 'get_artifact_paths_for_provider') @mock.patch('atomicapp.nulecule.base.NuleculeComponent.render_artifact') def test_render_for_local_app_with_artifacts_for_provider( - self, mock_render_artifact, mock_get_artifact_paths_for_provider, - mock_get_context): + self, mock_render_artifact, mock_get_artifact_paths_for_provider): """Test rendering artifacts for a local Nulecule component""" provider_key = 'some-provider' dryrun = False expected_rendered_artifacts = [ 'some/path/.artifact1', 'some/path/.artifact2'] - context = {'a': 'b'} mock_get_artifact_paths_for_provider.return_value = [ 'some/path/artifact1', 'some/path/artifact2'] mock_render_artifact.side_effect = lambda path, context, provider: path.replace('artifact', '.artifact') - mock_get_context.return_value = context + # mock_get_context.return_value = context nc = NuleculeComponent(name='some-app', basepath='some/path') - nc.config = {'general': {'key1': 'val1'}, 'some-provider': {'a': 'b'}} + nc.config = Config(answers={ + 'general': {'key1': 'val1'}, + 'some-provider': {'a': 'b'} + }) nc.artifacts = { 'some-provider': ['artifact1', 'artifact2'], 'x': ['foo'] } nc.render(provider_key, dryrun) + expected_context = { + 'key1': 'val1', + 'namespace': 'default', + 'provider': 'kubernetes' + } mock_get_artifact_paths_for_provider.assert_called_once_with( provider_key) - mock_render_artifact.assert_any_call('some/path/artifact1', context, + mock_render_artifact.assert_any_call('some/path/artifact1', + expected_context, 'some-provider') - mock_render_artifact.assert_any_call('some/path/artifact2', context, + mock_render_artifact.assert_any_call('some/path/artifact2', + expected_context, 'some-provider') mock_get_artifact_paths_for_provider.assert_called_once_with( provider_key) From 86580db0999f39892557fc97abaf6d107072bb9e Mon Sep 17 00:00:00 2001 From: Charlie Drage Date: Thu, 1 Sep 2016 09:28:04 -0400 Subject: [PATCH 189/193] Fix CentOS CI Errors Don't know *exactly* why it's failing, but let's try this. Could be related to "foobar" actually going to an internal server. --- tests/units/kubeshift/test_kubebase.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/tests/units/kubeshift/test_kubebase.py b/tests/units/kubeshift/test_kubebase.py index aeff61a9..1eb0274d 100644 --- a/tests/units/kubeshift/test_kubebase.py +++ b/tests/units/kubeshift/test_kubebase.py @@ -65,15 +65,15 @@ def test_kind_to_resource_name(): def test_request_methods_failures(): with pytest.raises(KubeConnectionError): - kubebase.request("get", "http://foobar") + kubebase.request("get", "http://localhost") with pytest.raises(KubeConnectionError): - kubebase.request("post", "http://foobar") + kubebase.request("post", "http://localhost") with pytest.raises(KubeConnectionError): - kubebase.request("put", "http://foobar") + kubebase.request("put", "http://localhost") with pytest.raises(KubeConnectionError): - kubebase.request("delete", "http://foobar") + kubebase.request("delete", "http://localhost") with pytest.raises(KubeConnectionError): - kubebase.request("patch", "http://foobar") + kubebase.request("patch", "http://localhost") def test_request_timeout(httpserver): From 7517437953f1cd8cf920201438ee67004c6a097b Mon Sep 17 00:00:00 2001 From: Charlie Drage Date: Tue, 6 Sep 2016 09:10:49 -0400 Subject: [PATCH 190/193] Update release script regex Issue with replacing version numbers, this fixes that issue with the release script --- script/release.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/script/release.sh b/script/release.sh index 43ac0f84..beee7ffa 100755 --- a/script/release.sh +++ b/script/release.sh @@ -223,7 +223,7 @@ update_library() { git checkout -b $BRANCH # Commit - find . -type f -iname 'Dockerfile' -exec sed -i "s,^FROM.projectatomic*,FROM projectatomic/atomicapp:$1," "{}" +; + find . -type f -iname 'Dockerfile' -exec sed -i "s,FROM projectatomic/atomicapp:[0-9].[0-9].[0-9],FROM projectatomic/atomicapp:$1," "{}" +; git add . git commit -m "Sync with $1 release" git push origin $BRANCH From ed963ce76f442124a097aaf663abf4648b210961 Mon Sep 17 00:00:00 2001 From: Charlie Drage Date: Thu, 6 Oct 2016 16:18:24 -0400 Subject: [PATCH 191/193] 0.6.4 Release --- CHANGELOG.md | 19 +++++++++++++++++++ Dockerfile | 2 +- Dockerfiles.git/Dockerfile.centos | 2 +- Dockerfiles.git/Dockerfile.debian | 2 +- Dockerfiles.git/Dockerfile.fedora | 2 +- Dockerfiles.pkgs/Dockerfile.centos | 2 +- Dockerfiles.pkgs/Dockerfile.fedora | 2 +- README.md | 2 +- atomicapp/constants.py | 2 +- setup.py | 2 +- 10 files changed, 28 insertions(+), 9 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 866a7b43..03866dec 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,22 @@ +## Atomic App 0.6.4 (10-06-2016) + +This release of Atomic App includes a large refactor of the "config" class as well as a minor change to our release script. + +Features: + - Config refactor + - Release script fix + - Test cases added to the config class + +``` +Charlie Drage (1): + Update release script regex + +Ratnadeep Debnath (3): + Initial work on refactoring Nulecule config. #524 + Re implememt Config class to be more generic. Fixes #524 + Fixed tests for config refactor. +``` + ## Atomic App 0.6.3 (08-31-2016) This release focuses on bug fixes, scripts as well as the Nulecule specification being merged into Atomic App. diff --git a/Dockerfile b/Dockerfile index dadeab78..53c1d456 100644 --- a/Dockerfile +++ b/Dockerfile @@ -2,7 +2,7 @@ FROM centos:7 MAINTAINER Red Hat, Inc. -ENV ATOMICAPPVERSION="0.6.3" +ENV ATOMICAPPVERSION="0.6.4" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ io.openshift.generate.job=true \ diff --git a/Dockerfiles.git/Dockerfile.centos b/Dockerfiles.git/Dockerfile.centos index dadeab78..53c1d456 100644 --- a/Dockerfiles.git/Dockerfile.centos +++ b/Dockerfiles.git/Dockerfile.centos @@ -2,7 +2,7 @@ FROM centos:7 MAINTAINER Red Hat, Inc. -ENV ATOMICAPPVERSION="0.6.3" +ENV ATOMICAPPVERSION="0.6.4" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ io.openshift.generate.job=true \ diff --git a/Dockerfiles.git/Dockerfile.debian b/Dockerfiles.git/Dockerfile.debian index ff83388b..e30d1f5b 100644 --- a/Dockerfiles.git/Dockerfile.debian +++ b/Dockerfiles.git/Dockerfile.debian @@ -2,7 +2,7 @@ FROM debian:jessie MAINTAINER Red Hat, Inc. -ENV ATOMICAPPVERSION="0.6.3" +ENV ATOMICAPPVERSION="0.6.4" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ RUN="docker run -it --rm \${OPT1} --privileged -v \${PWD}:/atomicapp -v /run:/run -v /:/host --net=host --name \${NAME} -e USER -e SUDO_USER -e NAME=\${NAME} -e IMAGE=\${IMAGE} \${IMAGE} \${OPT2} run \${OPT3}" \ diff --git a/Dockerfiles.git/Dockerfile.fedora b/Dockerfiles.git/Dockerfile.fedora index 0f7d065a..8b3defe3 100644 --- a/Dockerfiles.git/Dockerfile.fedora +++ b/Dockerfiles.git/Dockerfile.fedora @@ -2,7 +2,7 @@ FROM fedora:23 MAINTAINER Red Hat, Inc. -ENV ATOMICAPPVERSION="0.6.3" +ENV ATOMICAPPVERSION="0.6.4" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ io.openshift.generate.job=true \ diff --git a/Dockerfiles.pkgs/Dockerfile.centos b/Dockerfiles.pkgs/Dockerfile.centos index b52f76a2..e85038e5 100644 --- a/Dockerfiles.pkgs/Dockerfile.centos +++ b/Dockerfiles.pkgs/Dockerfile.centos @@ -4,7 +4,7 @@ MAINTAINER Red Hat, Inc. # Check https://bodhi.fedoraproject.org/updates/?packages=atomicapp # for the most recent builds of atomicapp in epel -ENV ATOMICAPPVERSION="0.6.3" +ENV ATOMICAPPVERSION="0.6.4" ENV TESTING="--enablerepo=epel-testing" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ diff --git a/Dockerfiles.pkgs/Dockerfile.fedora b/Dockerfiles.pkgs/Dockerfile.fedora index ac784613..44294b3d 100644 --- a/Dockerfiles.pkgs/Dockerfile.fedora +++ b/Dockerfiles.pkgs/Dockerfile.fedora @@ -4,7 +4,7 @@ MAINTAINER Red Hat, Inc. # Check https://bodhi.fedoraproject.org/updates/?packages=atomicapp # for the most recent builds of atomicapp in fedora -ENV ATOMICAPPVERSION="0.6.3" +ENV ATOMICAPPVERSION="0.6.4" ENV TESTING="--enablerepo=updates-testing" LABEL io.projectatomic.nulecule.atomicappversion=${ATOMICAPPVERSION} \ diff --git a/README.md b/README.md index 092a1753..90af9550 100644 --- a/README.md +++ b/README.md @@ -25,7 +25,7 @@ _or_ Download a pre-signed .tar.gz from [download.projectatomic.io](https://download.projectatomic.io) / [GitHub](https://github.com/projectatomic/atomicapp/releases): ```sh -export RELEASE=0.6.3 +export RELEASE=0.6.4 wget https://github.com/projectatomic/atomicapp/releases/download/$RELEASE/atomicapp-$RELEASE.tar.gz tar -xvf atomicapp-$RELEASE.tar.gz && cd atomicapp-$RELEASE sudo make install diff --git a/atomicapp/constants.py b/atomicapp/constants.py index 4139f93f..a441c235 100644 --- a/atomicapp/constants.py +++ b/atomicapp/constants.py @@ -23,7 +23,7 @@ 2) LABEL io.projectatomic.nulecule.specversion in app Dockefile """ -__ATOMICAPPVERSION__ = '0.6.3' +__ATOMICAPPVERSION__ = '0.6.4' __NULECULESPECVERSION__ = '0.0.2' EXTERNAL_APP_DIR = "external" diff --git a/setup.py b/setup.py index b492d79c..ccf55210 100644 --- a/setup.py +++ b/setup.py @@ -41,7 +41,7 @@ def _install_requirements(): setup( name='atomicapp', - version='0.6.3', + version='0.6.4', description='A tool to install and run Nulecule apps', author='Red Hat, Inc.', author_email='container-tools@redhat.com', From ebc2aa36ac0b030443ec2ff3d6de9dc460403b71 Mon Sep 17 00:00:00 2001 From: Dylan Murray Date: Mon, 31 Oct 2016 14:43:13 -0400 Subject: [PATCH 192/193] Check if an OCP specific resource is being requested and use the oc_api URL --- .../providers/lib/kubeshift/openshift.py | 2 ++ tests/units/kubeshift/test_openshift.py | 26 ++++++++++++++++--- 2 files changed, 25 insertions(+), 3 deletions(-) diff --git a/atomicapp/providers/lib/kubeshift/openshift.py b/atomicapp/providers/lib/kubeshift/openshift.py index b5cfa43d..a5a46515 100644 --- a/atomicapp/providers/lib/kubeshift/openshift.py +++ b/atomicapp/providers/lib/kubeshift/openshift.py @@ -180,6 +180,8 @@ def _generate_kurl(self, obj, namespace, name=None, params=None): url = self.k8s_api else: url = urljoin(self.k8s_apis, "%s/" % api_version) + elif resource in self.oc_api_resources: + url = self.oc_api else: raise KubeOpenshiftError("No kind by that name: %s" % kind) diff --git a/tests/units/kubeshift/test_openshift.py b/tests/units/kubeshift/test_openshift.py index af9f5e05..4ab5e1ad 100644 --- a/tests/units/kubeshift/test_openshift.py +++ b/tests/units/kubeshift/test_openshift.py @@ -43,7 +43,7 @@ def test_connection(self, *args): pass def get_resources(self, *args): - return ['Pod', 'template'] + return ['Pod', 'template', 'Route'] def get_groups(self, *args): return {} @@ -57,7 +57,7 @@ def cluster(self): @mock.patch("atomicapp.providers.lib.kubeshift.openshift.KubeBase") -def test_create(mock_class): +def test_k8s_create(mock_class): # Mock the API class mock_class.return_value = FakeClient() mock_class.get_resources.return_value = ['Pod'] @@ -69,9 +69,29 @@ def test_create(mock_class): a = KubeOpenshiftClient(config) a.create(k8s_object, "foobar") +@mock.patch("atomicapp.providers.lib.kubeshift.openshift.KubeBase") +def test_oc_create(mock_class): + mock_class.return_value = FakeClient() + mock_class.get_resources.return_value = ['Route'] + mock_class.kind_to_resource_name.return_value = 'Route' + + oc_object = {"apiVersion": "v1", "kind": "Route", "metadata": {"labels": {"name": "helloapache-route"}, "name": "helloapache-route"}, "spec": { + "host": "$endpoint", "to": [{"kind": "Service", "name": "helloapache-svc"}]}} + a = KubeOpenshiftClient(config) + a.create(oc_object, "foobar") + +@mock.patch("atomicapp.providers.lib.kubeshift.openshift.KubeBase") +def test_oc_delete(mock_class): + mock_class.return_value = FakeClient() + mock_class.kind_to_resource_name.return_value = 'Route' + + oc_object = {"apiVersion": "v1", "kind": "Route", "metadata": {"labels": {"name": "helloapache-route"}, "name": "helloapache-route"}, "spec": { + "host": "$endpoint", "to": [{"kind": "Service", "name": "helloapache-svc"}]}} + a = KubeOpenshiftClient(config) + a.delete(oc_object, "foobar") @mock.patch("atomicapp.providers.lib.kubeshift.openshift.KubeBase") -def test_delete(mock_class): +def test_k8s_delete(mock_class): # Mock the API class mock_class.return_value = FakeClient() mock_class.kind_to_resource_name.return_value = 'Pod' From fbe58c7e055bcaf4e9fe3a81dbbdab7556a8e790 Mon Sep 17 00:00:00 2001 From: Dylan Murray Date: Mon, 14 Nov 2016 12:07:22 -0500 Subject: [PATCH 193/193] Update cli/main.py to catch syntax error from latest version of flake8 --- atomicapp/cli/main.py | 1 + 1 file changed, 1 insertion(+) diff --git a/atomicapp/cli/main.py b/atomicapp/cli/main.py index 47bdafa4..46ac513a 100644 --- a/atomicapp/cli/main.py +++ b/atomicapp/cli/main.py @@ -540,5 +540,6 @@ def main(): cli = CLI() cli.run() + if __name__ == '__main__': main()