From 205efe8c0ca63ec967b957da4c80e4533494dd7f Mon Sep 17 00:00:00 2001 From: Jeff Coffler Date: Thu, 17 Mar 2016 09:01:38 -0700 Subject: [PATCH 1/7] Convert configure & Makefile for use with git and Jenkins --- build/Makefile | 12 ++++---- build/configure | 78 +++++-------------------------------------------- 2 files changed, 14 insertions(+), 76 deletions(-) mode change 100644 => 100755 build/configure diff --git a/build/Makefile b/build/Makefile index b3fa3a75f..405a0678d 100644 --- a/build/Makefile +++ b/build/Makefile @@ -14,6 +14,13 @@ ifndef ENABLE_DEBUG $(error "ENABLE_DEBUG is not set. Please re-run configure") endif +# Include the version file +include ../../docker.version + +ifndef CONTAINER_BUILDVERSION_STATUS +$(error "Is Makefile.version missing? Please re-run configure") +endif + SOURCE_DIR := $(BASE_DIR)/source/code TEST_DIR := $(BASE_DIR)/test/code @@ -55,11 +62,6 @@ SHARED_FLAGS := -shared STAGING_DIR := $(TARGET_DIR)/staging -CONTAINER_BUILDVERSION_MAJOR := 1 -CONTAINER_BUILDVERSION_MINOR := 0 -CONTAINER_BUILDVERSION_PATCH := 0 -CONTAINER_BUILDVERSION_BUILDNR := 0 - ifeq ($(ULINUX),1) OUTPUT_PACKAGE_PREFIX=docker-cimprov-$(CONTAINER_BUILDVERSION_MAJOR).$(CONTAINER_BUILDVERSION_MINOR).$(CONTAINER_BUILDVERSION_PATCH)-$(CONTAINER_BUILDVERSION_BUILDNR).universal.$(PF_ARCH) else diff --git a/build/configure b/build/configure old mode 100644 new mode 100755 index a44eb9e67..bd3b81c93 --- a/build/configure +++ b/build/configure @@ -1,35 +1,5 @@ #!/bin/bash -#--------------------------------- START OF LICENSE ---------------------------- -# -# MySQL cimprov ver. 1.0 -# -# Copyright (c) Microsoft Corporation -# -# All rights reserved. -# -# MIT License -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the ""Software""), to deal -# in the Software without restriction, including without limitation the rights to -# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -# of the Software, and to permit persons to whom the Software is furnished to do -# so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in all -# copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. -# -#---------------------------------- END OF LICENSE ----------------------------- - scxomi_dir=`(cd ../../OMI/Unix; pwd -P)` scxpal_dir=`(cd ../../Pal; pwd -P)` @@ -39,7 +9,6 @@ enable_purify_agent="" enable_purify_server="" enable_omi_tools="" enable_omi_tools_flag=0 -opensource_distro=0 build_type=Release ULINUX=0 NOULINIX=0 @@ -52,6 +21,13 @@ if [ ! -d "$scxomi_dir" ]; then exit 1 fi +# Do we have a version file from the super project? If not, trigger error + +if [ ! -f ../../docker.version ]; then + echo "Docker version file does not exist in superproject" + exit 1 +fi + for opt do @@ -106,10 +82,6 @@ do enable_omi_tools_flag=1 ;; - --enable-open-source) - opensource_distro=1 - ;; - *) echo "configure: invalid option '$opt'" echo "Try configure --help' for more information." @@ -141,7 +113,6 @@ OPTIONS: --enable-purify-agent Allow agent to be run with purify (memory leak detection) --enable-purify-server Allow server to be run with purify (memory leak detection) --enable-omi-tools Build to allow use of OMI tools (omischema and omireg) - --enable-open-source Build for open source distribution EOF exit 0 @@ -150,27 +121,6 @@ fi omi_configure_quals="${enable_debug} ${enable_purify_agent} ${enable_purify_server} ${enable_omi_tools} ${omi_configure_quals}" pal_configure_quals="${enable_debug}" -if [ "$ULINUX" -eq 1 -a "$opensource_distro" -eq 1 ]; then - echo "*** ULINUX not permitted for open source distributions; ULINUX disabled ***" - ULINUX=0 -fi - -if [ "$opensource_distro" -eq 1 ]; then - # We really don't want ULINUX, not even defaulted - NOULINUX=1 -fi - -# See if we're running on a SuSE 10 (universal) system; if so, assume ULINUX -if [ "$NOULINUX" != "1" ]; then - if [ -e /etc/SuSE-release ]; then - PF_MAJOR=`head -n 1 /etc/SuSE-release | sed 's/.* \([0-9][0-9]*\)[ \.].*/\1/'` - PF_MINOR=`(head -n 1 /etc/SuSE-release | sed 's/.* [0-9][0-9]*[\.]\([0-9][0-9]*\).*/\1/') | grep -v '[^0-9]' || echo '0'` - if [ "$PF_MAJOR" = "10" -a "$PF_MINOR" = "0" ]; then - ULINUX=1 - fi - fi -fi - # Note: Most of this code came from the PAL configure file set_ulinux_pf() @@ -178,15 +128,6 @@ set_ulinux_pf() PF_MAJOR=1 PF_MINOR=0 PF_DISTRO=ULINUX - PF_DISTRO_ULINUX_D=`[ -e /usr/bin/dpkg ] && echo 1` - if [ -n "$PF_DISTRO_ULINUX_D" ]; then - PF_DISTOR_ULINUX_R= - BUILD_DPKG=1 - PKG_SUFFIX=deb - else - PF_DISTRO_ULINUX_R=`[ -e /usr/bin/rpmbuild ] && echo 1` - BUILD_RPM=1 - fi } uname=`uname` @@ -226,19 +167,15 @@ case $uname in case `uname -m` in *64*) PF_ARCH=x64 - ARCH=x64 ;; *86*) PF_ARCH=x86 - ARCH=ia32 ;; *athlon*) PF_ARCH=x86 - ARCH=ia32 ;; *) PF_ARCH=UNKNOWN - ARCH=UNKNOWN ;; esac @@ -267,7 +204,6 @@ PF=$PF PF_MAJOR=$PF_MAJOR PF_MINOR=$PF_MINOR PF_ARCH=$PF_ARCH -ARCH=$ARCH PF_WIDTH=$PF_WIDTH PF_DISTRO=$PF_DISTRO ULINUX=$ULINUX From f5cb633981c843d0443ff820e21124090de47291 Mon Sep 17 00:00:00 2001 From: Jeff Coffler Date: Thu, 17 Mar 2016 09:50:46 -0700 Subject: [PATCH 2/7] Fix some basic build issues due to new build environment --- .gitignore | 6 ++++++ build/configure | 4 ++-- installer/bundle/create_bundle.sh | 0 installer/datafiles/base_container.data | 7 ++++--- 4 files changed, 12 insertions(+), 5 deletions(-) create mode 100644 .gitignore mode change 100644 => 100755 installer/bundle/create_bundle.sh diff --git a/.gitignore b/.gitignore new file mode 100644 index 000000000..2ddf52a96 --- /dev/null +++ b/.gitignore @@ -0,0 +1,6 @@ +# git ignore file for the Docker-Provider project + +/intermediate/ +/target/ +build/config.mak + diff --git a/build/configure b/build/configure index bd3b81c93..519a7e7f1 100755 --- a/build/configure +++ b/build/configure @@ -1,7 +1,7 @@ #!/bin/bash -scxomi_dir=`(cd ../../OMI/Unix; pwd -P)` -scxpal_dir=`(cd ../../Pal; pwd -P)` +scxomi_dir=`(cd ../../omi/Unix; pwd -P)` +scxpal_dir=`(cd ../../pal; pwd -P)` enable_debug="" enable_debug_flag=0 diff --git a/installer/bundle/create_bundle.sh b/installer/bundle/create_bundle.sh old mode 100644 new mode 100755 diff --git a/installer/datafiles/base_container.data b/installer/datafiles/base_container.data index 64ed1ebc3..3d992f858 100644 --- a/installer/datafiles/base_container.data +++ b/installer/datafiles/base_container.data @@ -26,8 +26,6 @@ MAINTAINER: 'Microsoft Corporation' /etc/opt/microsoft/docker-cimprov/container.conf; installer/conf/container.conf; 644; root; root -/var/opt/microsoft/docker-cimprov/state/LastEventQueryTime.txt; installer/LastEventQueryTime.txt; 644; root; root - %Links /opt/omi/lib/libcontainer.${{SHLIB_EXT}}; /opt/microsoft/docker-cimprov/lib/libcontainer.${{SHLIB_EXT}}; 644; root; root @@ -71,6 +69,9 @@ WriteInstallInfo() { } WriteInstallInfo +# Get the state file in place with proper permissions +touch /var/opt/microsoft/docker-cimprov/state/LastEventQueryTime.txt +chmod 644 /var/opt/microsoft/docker-cimprov/state/LastEventQueryTime.txt chown omsagent:omsagent /var/opt/microsoft/docker-cimprov/state/LastEventQueryTime.txt mv /etc/opt/microsoft/docker-cimprov/container.conf /etc/opt/microsoft/omsagent/conf/omsagent.d/container.conf @@ -97,4 +98,4 @@ ${{SHELL_HEADER}} ${{SHELL_HEADER}} %Postuninstall_0 -${{SHELL_HEADER}} \ No newline at end of file +${{SHELL_HEADER}} From e81c657ecf16d361a2c0741fe61dcad99be856b1 Mon Sep 17 00:00:00 2001 From: Jeff Coffler Date: Thu, 17 Mar 2016 10:12:35 -0700 Subject: [PATCH 3/7] Fix some basic unit test compiler issues due to newer PAL version --- .gitignore | 4 + ...ainerInventory_Class_Provider_UnitTest.cpp | 604 +++++++++--------- ...inerStatistics_Class_Provider_UnitTest.cpp | 230 +++---- ...er_DaemonEvent_Class_Provider_UnitTest.cpp | 276 ++++---- ...ImageInventory_Class_Provider_UnitTest.cpp | 584 ++++++++--------- 5 files changed, 851 insertions(+), 847 deletions(-) diff --git a/.gitignore b/.gitignore index 2ddf52a96..35ad70b8f 100644 --- a/.gitignore +++ b/.gitignore @@ -4,3 +4,7 @@ /target/ build/config.mak +# Unit test files + +/test/code/providers/TestScriptPath.h +/test/code/providers/providertestutils.cpp diff --git a/test/code/providers/Container_ContainerInventory_Class_Provider_UnitTest.cpp b/test/code/providers/Container_ContainerInventory_Class_Provider_UnitTest.cpp index 2209e3ce0..699b7f3ea 100644 --- a/test/code/providers/Container_ContainerInventory_Class_Provider_UnitTest.cpp +++ b/test/code/providers/Container_ContainerInventory_Class_Provider_UnitTest.cpp @@ -1,302 +1,302 @@ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include - -#include "Container_ContainerInventory_Class_Provider.h" -#include "cjson/cJSON.h" -#include "TestScriptPath.h" - -using namespace std; -using namespace SCXCoreLib; - -class ContainerInventoryTest : public CppUnit::TestFixture -{ - CPPUNIT_TEST_SUITE(ContainerInventoryTest); - CPPUNIT_TEST(TestEnumerateInstances); - CPPUNIT_TEST(TestEnumerateVerifyAllValues); - CPPUNIT_TEST(TestEnumerateDeletedContainer); - CPPUNIT_TEST_SUITE_END(); - -private: - vector containers; - - static string NewGuid() - { - uuid_t uuid; - uuid_generate_random(uuid); - char s[37]; - uuid_unparse(uuid, s); - return s; - } - - static string RunCommand(const char* command) - { - istringstream processInput; - ostringstream processOutput; - ostringstream processErr; - - CPPUNIT_ASSERT(!SCXProcess::Run(StrFromMultibyte(string(command)), processInput, processOutput, processErr, 0)); - CPPUNIT_ASSERT_EQUAL(processErr.str(), string()); - - return processOutput.str(); - } - -public: - void setUp() - { - // Get some images to use - fputc('\n', stdout); - RunCommand("docker pull hello-world"); - RunCommand("rm -f /var/opt/microsoft/docker-cimprov/state/ContainerInventory/*"); - } - - void tearDown() - { - char command[128]; - - // Remove the containers that were started by the tests - for (unsigned i = 0; i < containers.size(); i++) - { - snprintf(command, 128, "docker rm -f %s", containers[i].c_str()); - RunCommand(command); - } - - containers.clear(); - } - -protected: - void TestEnumerateInstances() - { - wstring errMsg; - TestableContext context; - - vector m_keyNames; - m_keyNames.push_back(L"InstanceID"); - - // Remove cached state - RunCommand("rm -f /var/opt/microsoft/docker-cimprov/state/ContainerInventory/*"); - - // Run a container to ensure that there is at lease one result - string containerName = NewGuid(); - containers.push_back(containerName); - char command[128]; - snprintf(command, 128, "docker run --name=%s hello-world", containerName.c_str()); - RunCommand(command); - - // Enumerate provider - StandardTestEnumerateInstances(m_keyNames, context, CALL_LOCATION(errMsg)); - - // Get containers using command line - there is a Docker bug that causes this to fail if SCXProcess::Run() is used instead of system() - char path[128]; - snprintf(path, 128, "/tmp/docker_container_ids_%d.txt", getpid()); - snprintf(command, 128, "docker ps -aq --no-trunc > %s", path); - CPPUNIT_ASSERT_MESSAGE(string(strerror(errno)), !system(command)); - - FILE* idFile = fopen(path, "r"); - CPPUNIT_ASSERT_MESSAGE(string(strerror(errno)), idFile); - - wchar_t id[13]; - set allIds; - - // Full container IDs (one per line) - while (fwscanf(idFile, L"%ls", id) != EOF) - { - allIds.insert(wstring(id)); - } - - fclose(idFile); - CPPUNIT_ASSERT_MESSAGE(string(strerror(errno)), !remove(path)); - - CPPUNIT_ASSERT_EQUAL(allIds.size(), context.Size()); - - for (unsigned i = 0; i < context.Size(); ++i) - { - // Verify the InstanceID - CPPUNIT_ASSERT(allIds.count(context[i].GetKey(L"InstanceID", CALL_LOCATION(errMsg)))); - } - } - - void TestEnumerateVerifyAllValues() - { - wstring errMsg; - TestableContext context; - - vector m_keyNames; - m_keyNames.push_back(L"InstanceID"); - - // Remove cached state - RunCommand("rm -f /var/opt/microsoft/docker-cimprov/state/ContainerInventory/*"); - - // Run a container to ensure that there is at lease one result - string containerName = NewGuid(); - containers.push_back(containerName); - char command[256]; - snprintf(command, 256, "docker run --name=%s hello-world", containerName.c_str()); - RunCommand(command); - - // Enumerate provider - StandardTestEnumerateInstances(m_keyNames, context, CALL_LOCATION(errMsg)); - - // Get container inventory using a script - char path[128]; - snprintf(path, 128, "/tmp/docker_container_inventory_%d.txt", getpid()); - snprintf(command, 256, "python %sContainerInventory.py > %s", TEST_SCRIPT_PATH, path); - CPPUNIT_ASSERT_MESSAGE(string(strerror(errno)), !system(command)); - - FILE* containerFile = fopen(path, "r"); - CPPUNIT_ASSERT_MESSAGE(string(strerror(errno)), containerFile); - - char buffer[1024]; - vector containersList; - - while (fgets(buffer, 1023, containerFile)) - { - containersList.push_back(cJSON_Parse(buffer)); - } - - fclose(containerFile); - CPPUNIT_ASSERT_MESSAGE(string(strerror(errno)), !remove(path)); - - // Should have no more current containers than current + deleted containers - CPPUNIT_ASSERT(containersList.size() <= context.Size()); - - wchar_t currentId[66]; - int containerCount = 0; - - for (unsigned i = 0; i < containersList.size(); ++i) - { - bool flag = false; - mbstowcs(currentId, cJSON_GetObjectItem(containersList[i], "InstanceID")->valuestring, 65); - - for (unsigned j = 0; !flag && j < context.Size(); j++) - { - if (!context[j].GetProperty(L"InstanceID", CALL_LOCATION(errMsg)).GetValue_MIString(CALL_LOCATION(errMsg)).compare(wstring(currentId))) - { - wchar_t temp[512]; - unsigned count = 0; - - mbstowcs(temp, cJSON_GetObjectItem(containersList[i], "CreatedTime")->valuestring, 511); - CPPUNIT_ASSERT_EQUAL(wstring(temp), context[j].GetProperty(L"CreatedTime", CALL_LOCATION(errMsg)).GetValue_MIString(CALL_LOCATION(errMsg))); - - mbstowcs(temp, cJSON_GetObjectItem(containersList[i], "ElementName")->valuestring, 511); - CPPUNIT_ASSERT_EQUAL(wstring(temp), context[j].GetProperty(L"ElementName", CALL_LOCATION(errMsg)).GetValue_MIString(CALL_LOCATION(errMsg))); - - mbstowcs(temp, cJSON_GetObjectItem(containersList[i], "State")->valuestring, 511); - CPPUNIT_ASSERT_EQUAL(wstring(temp), context[j].GetProperty(L"State", CALL_LOCATION(errMsg)).GetValue_MIString(CALL_LOCATION(errMsg))); - - mbstowcs(temp, cJSON_GetObjectItem(containersList[i], "StartedTime")->valuestring, 511); - CPPUNIT_ASSERT_EQUAL(wstring(temp), context[j].GetProperty(L"StartedTime", CALL_LOCATION(errMsg)).GetValue_MIString(CALL_LOCATION(errMsg))); - - mbstowcs(temp, cJSON_GetObjectItem(containersList[i], "FinishedTime")->valuestring, 511); - CPPUNIT_ASSERT_EQUAL(wstring(temp), context[j].GetProperty(L"FinishedTime", CALL_LOCATION(errMsg)).GetValue_MIString(CALL_LOCATION(errMsg))); - - mbstowcs(temp, cJSON_GetObjectItem(containersList[i], "ImageId")->valuestring, 511); - CPPUNIT_ASSERT_EQUAL(wstring(temp), context[j].GetProperty(L"ImageId", CALL_LOCATION(errMsg)).GetValue_MIString(CALL_LOCATION(errMsg))); - - mbstowcs(temp, cJSON_GetObjectItem(containersList[i], "Image")->valuestring, 511); - CPPUNIT_ASSERT_EQUAL(wstring(temp), context[j].GetProperty(L"Image", CALL_LOCATION(errMsg)).GetValue_MIString(CALL_LOCATION(errMsg))); - - mbstowcs(temp, cJSON_GetObjectItem(containersList[i], "Repository")->valuestring, 511); - CPPUNIT_ASSERT_EQUAL(wstring(temp), context[j].GetProperty(L"Repository", CALL_LOCATION(errMsg)).GetValue_MIString(CALL_LOCATION(errMsg))); - - mbstowcs(temp, cJSON_GetObjectItem(containersList[i], "ImageTag")->valuestring, 511); - CPPUNIT_ASSERT_EQUAL(wstring(temp), context[j].GetProperty(L"ImageTag", CALL_LOCATION(errMsg)).GetValue_MIString(CALL_LOCATION(errMsg))); - - mbstowcs(temp, cJSON_GetObjectItem(containersList[i], "ComposeGroup")->valuestring, 511); - CPPUNIT_ASSERT_EQUAL(wstring(temp), context[j].GetProperty(L"ComposeGroup", CALL_LOCATION(errMsg)).GetValue_MIString(CALL_LOCATION(errMsg))); - - mbstowcs(temp, cJSON_GetObjectItem(containersList[i], "ContainerHostname")->valuestring, 511); - CPPUNIT_ASSERT_EQUAL(wstring(temp), context[j].GetProperty(L"ContainerHostname", CALL_LOCATION(errMsg)).GetValue_MIString(CALL_LOCATION(errMsg))); - - mbstowcs(temp, cJSON_GetObjectItem(containersList[i], "Computer")->valuestring, 511); - CPPUNIT_ASSERT_EQUAL(wstring(temp), context[j].GetProperty(L"Computer", CALL_LOCATION(errMsg)).GetValue_MIString(CALL_LOCATION(errMsg))); - - mbstowcs(temp, cJSON_GetObjectItem(containersList[i], "Command")->valuestring, 511); - CPPUNIT_ASSERT_EQUAL(wstring(temp), context[j].GetProperty(L"Command", CALL_LOCATION(errMsg)).GetValue_MIString(CALL_LOCATION(errMsg))); - - mbstowcs(temp, cJSON_GetObjectItem(containersList[i], "EnvironmentVar")->valuestring, 511); - CPPUNIT_ASSERT_EQUAL(wstring(temp), context[j].GetProperty(L"EnvironmentVar", CALL_LOCATION(errMsg)).GetValue_MIString(CALL_LOCATION(errMsg))); - - mbstowcs(temp, cJSON_GetObjectItem(containersList[i], "Ports")->valuestring, 511); - - if (wcslen(temp) > 3) - { - CPPUNIT_ASSERT_EQUAL(wstring(temp), context[j].GetProperty(L"Ports", CALL_LOCATION(errMsg)).GetValue_MIString(CALL_LOCATION(errMsg))); - } - - mbstowcs(temp, cJSON_GetObjectItem(containersList[i], "Links")->valuestring, 511); - CPPUNIT_ASSERT_EQUAL(wstring(temp), context[j].GetProperty(L"Links", CALL_LOCATION(errMsg)).GetValue_MIString(CALL_LOCATION(errMsg))); - - count = cJSON_GetObjectItem(containersList[i], "ExitCode")->valueint; - unsigned providerExitCode = context[j].GetProperty(L"ExitCode", CALL_LOCATION(errMsg)).GetValue_MIUint32(CALL_LOCATION(errMsg)); - CPPUNIT_ASSERT_EQUAL(count, providerExitCode); - CPPUNIT_ASSERT(providerExitCode <= INT_MAX); - - flag = true; - containerCount += 1; - } - } - - cJSON_Delete(containersList[i]); - } - - // Ensure all objects were validated - CPPUNIT_ASSERT_EQUAL(containerCount, containersList.size()); - } - - void TestEnumerateDeletedContainer() - { - wstring errMsg; - TestableContext context; - - vector m_keyNames; - m_keyNames.push_back(L"InstanceID"); - - // Remove cached state - RunCommand("rm -f /var/opt/microsoft/docker-cimprov/state/ContainerInventory/*"); - - // Run a container to ensure that there is at lease one result - string containerName = NewGuid(); - char command[128]; - snprintf(command, 128, "docker run --name=%s hello-world", containerName.c_str()); - RunCommand(command); - - // Enumerate provider - StandardTestEnumerateInstances(m_keyNames, context, CALL_LOCATION(errMsg)); - - // Delete container - snprintf(command, 128, "docker rm -f %s", containerName.c_str()); - RunCommand(command); - - // Enumerate provider - StandardTestEnumerateInstances(m_keyNames, context, CALL_LOCATION(errMsg)); - - wchar_t wcontainerName[65]; - mbstowcs(wcontainerName, containerName.c_str(), 64); - - for (unsigned i = 0; i < context.Size(); ++i) - { - if (!context[i].GetProperty(L"InstanceID", CALL_LOCATION(errMsg)).GetValue_MIString(CALL_LOCATION(errMsg)).compare(wstring(wcontainerName))) - { - CPPUNIT_ASSERT_EQUAL(wstring(L"Deleted"), context[i].GetProperty(L"State", CALL_LOCATION(errMsg)).GetValue_MIString(CALL_LOCATION(errMsg))); - } - } - } -}; - -CPPUNIT_TEST_SUITE_REGISTRATION(ContainerInventoryTest); \ No newline at end of file +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "Container_ContainerInventory_Class_Provider.h" +#include "cjson/cJSON.h" +#include "TestScriptPath.h" + +using namespace std; +using namespace SCXCoreLib; + +class ContainerInventoryTest : public CppUnit::TestFixture +{ + CPPUNIT_TEST_SUITE(ContainerInventoryTest); + CPPUNIT_TEST(TestEnumerateInstances); + CPPUNIT_TEST(TestEnumerateVerifyAllValues); + CPPUNIT_TEST(TestEnumerateDeletedContainer); + CPPUNIT_TEST_SUITE_END(); + +private: + vector containers; + + static string NewGuid() + { + uuid_t uuid; + uuid_generate_random(uuid); + char s[37]; + uuid_unparse(uuid, s); + return s; + } + + static string RunCommand(const char* command) + { + istringstream processInput; + ostringstream processOutput; + ostringstream processErr; + + CPPUNIT_ASSERT(!SCXProcess::Run(StrFromMultibyte(string(command)), processInput, processOutput, processErr, 0)); + CPPUNIT_ASSERT_EQUAL(processErr.str(), string()); + + return processOutput.str(); + } + +public: + void setUp() + { + // Get some images to use + fputc('\n', stdout); + RunCommand("docker pull hello-world"); + RunCommand("rm -f /var/opt/microsoft/docker-cimprov/state/ContainerInventory/*"); + } + + void tearDown() + { + char command[128]; + + // Remove the containers that were started by the tests + for (unsigned i = 0; i < containers.size(); i++) + { + snprintf(command, 128, "docker rm -f %s", containers[i].c_str()); + RunCommand(command); + } + + containers.clear(); + } + +protected: + void TestEnumerateInstances() + { + wstring errMsg; + TestableContext context; + + vector m_keyNames; + m_keyNames.push_back(L"InstanceID"); + + // Remove cached state + RunCommand("rm -f /var/opt/microsoft/docker-cimprov/state/ContainerInventory/*"); + + // Run a container to ensure that there is at lease one result + string containerName = NewGuid(); + containers.push_back(containerName); + char command[128]; + snprintf(command, 128, "docker run --name=%s hello-world", containerName.c_str()); + RunCommand(command); + + // Enumerate provider + StandardTestEnumerateInstances(m_keyNames, context, CALL_LOCATION(errMsg)); + + // Get containers using command line - there is a Docker bug that causes this to fail if SCXProcess::Run() is used instead of system() + char path[128]; + snprintf(path, 128, "/tmp/docker_container_ids_%d.txt", getpid()); + snprintf(command, 128, "docker ps -aq --no-trunc > %s", path); + CPPUNIT_ASSERT_MESSAGE(string(SCXCoreLib::strerror(errno)), !system(command)); + + FILE* idFile = fopen(path, "r"); + CPPUNIT_ASSERT_MESSAGE(string(SCXCoreLib::strerror(errno)), idFile); + + wchar_t id[13]; + set allIds; + + // Full container IDs (one per line) + while (fwscanf(idFile, L"%ls", id) != EOF) + { + allIds.insert(wstring(id)); + } + + fclose(idFile); + CPPUNIT_ASSERT_MESSAGE(string(SCXCoreLib::strerror(errno)), !remove(path)); + + CPPUNIT_ASSERT_EQUAL(allIds.size(), context.Size()); + + for (unsigned i = 0; i < context.Size(); ++i) + { + // Verify the InstanceID + CPPUNIT_ASSERT(allIds.count(context[i].GetKey(L"InstanceID", CALL_LOCATION(errMsg)))); + } + } + + void TestEnumerateVerifyAllValues() + { + wstring errMsg; + TestableContext context; + + vector m_keyNames; + m_keyNames.push_back(L"InstanceID"); + + // Remove cached state + RunCommand("rm -f /var/opt/microsoft/docker-cimprov/state/ContainerInventory/*"); + + // Run a container to ensure that there is at lease one result + string containerName = NewGuid(); + containers.push_back(containerName); + char command[256]; + snprintf(command, 256, "docker run --name=%s hello-world", containerName.c_str()); + RunCommand(command); + + // Enumerate provider + StandardTestEnumerateInstances(m_keyNames, context, CALL_LOCATION(errMsg)); + + // Get container inventory using a script + char path[128]; + snprintf(path, 128, "/tmp/docker_container_inventory_%d.txt", getpid()); + snprintf(command, 256, "python %sContainerInventory.py > %s", TEST_SCRIPT_PATH, path); + CPPUNIT_ASSERT_MESSAGE(string(SCXCoreLib::strerror(errno)), !system(command)); + + FILE* containerFile = fopen(path, "r"); + CPPUNIT_ASSERT_MESSAGE(string(SCXCoreLib::strerror(errno)), containerFile); + + char buffer[1024]; + vector containersList; + + while (fgets(buffer, 1023, containerFile)) + { + containersList.push_back(cJSON_Parse(buffer)); + } + + fclose(containerFile); + CPPUNIT_ASSERT_MESSAGE(string(SCXCoreLib::strerror(errno)), !remove(path)); + + // Should have no more current containers than current + deleted containers + CPPUNIT_ASSERT(containersList.size() <= context.Size()); + + wchar_t currentId[66]; + int containerCount = 0; + + for (unsigned i = 0; i < containersList.size(); ++i) + { + bool flag = false; + mbstowcs(currentId, cJSON_GetObjectItem(containersList[i], "InstanceID")->valuestring, 65); + + for (unsigned j = 0; !flag && j < context.Size(); j++) + { + if (!context[j].GetProperty(L"InstanceID", CALL_LOCATION(errMsg)).GetValue_MIString(CALL_LOCATION(errMsg)).compare(wstring(currentId))) + { + wchar_t temp[512]; + unsigned count = 0; + + mbstowcs(temp, cJSON_GetObjectItem(containersList[i], "CreatedTime")->valuestring, 511); + CPPUNIT_ASSERT_EQUAL(wstring(temp), context[j].GetProperty(L"CreatedTime", CALL_LOCATION(errMsg)).GetValue_MIString(CALL_LOCATION(errMsg))); + + mbstowcs(temp, cJSON_GetObjectItem(containersList[i], "ElementName")->valuestring, 511); + CPPUNIT_ASSERT_EQUAL(wstring(temp), context[j].GetProperty(L"ElementName", CALL_LOCATION(errMsg)).GetValue_MIString(CALL_LOCATION(errMsg))); + + mbstowcs(temp, cJSON_GetObjectItem(containersList[i], "State")->valuestring, 511); + CPPUNIT_ASSERT_EQUAL(wstring(temp), context[j].GetProperty(L"State", CALL_LOCATION(errMsg)).GetValue_MIString(CALL_LOCATION(errMsg))); + + mbstowcs(temp, cJSON_GetObjectItem(containersList[i], "StartedTime")->valuestring, 511); + CPPUNIT_ASSERT_EQUAL(wstring(temp), context[j].GetProperty(L"StartedTime", CALL_LOCATION(errMsg)).GetValue_MIString(CALL_LOCATION(errMsg))); + + mbstowcs(temp, cJSON_GetObjectItem(containersList[i], "FinishedTime")->valuestring, 511); + CPPUNIT_ASSERT_EQUAL(wstring(temp), context[j].GetProperty(L"FinishedTime", CALL_LOCATION(errMsg)).GetValue_MIString(CALL_LOCATION(errMsg))); + + mbstowcs(temp, cJSON_GetObjectItem(containersList[i], "ImageId")->valuestring, 511); + CPPUNIT_ASSERT_EQUAL(wstring(temp), context[j].GetProperty(L"ImageId", CALL_LOCATION(errMsg)).GetValue_MIString(CALL_LOCATION(errMsg))); + + mbstowcs(temp, cJSON_GetObjectItem(containersList[i], "Image")->valuestring, 511); + CPPUNIT_ASSERT_EQUAL(wstring(temp), context[j].GetProperty(L"Image", CALL_LOCATION(errMsg)).GetValue_MIString(CALL_LOCATION(errMsg))); + + mbstowcs(temp, cJSON_GetObjectItem(containersList[i], "Repository")->valuestring, 511); + CPPUNIT_ASSERT_EQUAL(wstring(temp), context[j].GetProperty(L"Repository", CALL_LOCATION(errMsg)).GetValue_MIString(CALL_LOCATION(errMsg))); + + mbstowcs(temp, cJSON_GetObjectItem(containersList[i], "ImageTag")->valuestring, 511); + CPPUNIT_ASSERT_EQUAL(wstring(temp), context[j].GetProperty(L"ImageTag", CALL_LOCATION(errMsg)).GetValue_MIString(CALL_LOCATION(errMsg))); + + mbstowcs(temp, cJSON_GetObjectItem(containersList[i], "ComposeGroup")->valuestring, 511); + CPPUNIT_ASSERT_EQUAL(wstring(temp), context[j].GetProperty(L"ComposeGroup", CALL_LOCATION(errMsg)).GetValue_MIString(CALL_LOCATION(errMsg))); + + mbstowcs(temp, cJSON_GetObjectItem(containersList[i], "ContainerHostname")->valuestring, 511); + CPPUNIT_ASSERT_EQUAL(wstring(temp), context[j].GetProperty(L"ContainerHostname", CALL_LOCATION(errMsg)).GetValue_MIString(CALL_LOCATION(errMsg))); + + mbstowcs(temp, cJSON_GetObjectItem(containersList[i], "Computer")->valuestring, 511); + CPPUNIT_ASSERT_EQUAL(wstring(temp), context[j].GetProperty(L"Computer", CALL_LOCATION(errMsg)).GetValue_MIString(CALL_LOCATION(errMsg))); + + mbstowcs(temp, cJSON_GetObjectItem(containersList[i], "Command")->valuestring, 511); + CPPUNIT_ASSERT_EQUAL(wstring(temp), context[j].GetProperty(L"Command", CALL_LOCATION(errMsg)).GetValue_MIString(CALL_LOCATION(errMsg))); + + mbstowcs(temp, cJSON_GetObjectItem(containersList[i], "EnvironmentVar")->valuestring, 511); + CPPUNIT_ASSERT_EQUAL(wstring(temp), context[j].GetProperty(L"EnvironmentVar", CALL_LOCATION(errMsg)).GetValue_MIString(CALL_LOCATION(errMsg))); + + mbstowcs(temp, cJSON_GetObjectItem(containersList[i], "Ports")->valuestring, 511); + + if (wcslen(temp) > 3) + { + CPPUNIT_ASSERT_EQUAL(wstring(temp), context[j].GetProperty(L"Ports", CALL_LOCATION(errMsg)).GetValue_MIString(CALL_LOCATION(errMsg))); + } + + mbstowcs(temp, cJSON_GetObjectItem(containersList[i], "Links")->valuestring, 511); + CPPUNIT_ASSERT_EQUAL(wstring(temp), context[j].GetProperty(L"Links", CALL_LOCATION(errMsg)).GetValue_MIString(CALL_LOCATION(errMsg))); + + count = cJSON_GetObjectItem(containersList[i], "ExitCode")->valueint; + unsigned providerExitCode = context[j].GetProperty(L"ExitCode", CALL_LOCATION(errMsg)).GetValue_MIUint32(CALL_LOCATION(errMsg)); + CPPUNIT_ASSERT_EQUAL(count, providerExitCode); + CPPUNIT_ASSERT(providerExitCode <= INT_MAX); + + flag = true; + containerCount += 1; + } + } + + cJSON_Delete(containersList[i]); + } + + // Ensure all objects were validated + CPPUNIT_ASSERT_EQUAL(containerCount, containersList.size()); + } + + void TestEnumerateDeletedContainer() + { + wstring errMsg; + TestableContext context; + + vector m_keyNames; + m_keyNames.push_back(L"InstanceID"); + + // Remove cached state + RunCommand("rm -f /var/opt/microsoft/docker-cimprov/state/ContainerInventory/*"); + + // Run a container to ensure that there is at lease one result + string containerName = NewGuid(); + char command[128]; + snprintf(command, 128, "docker run --name=%s hello-world", containerName.c_str()); + RunCommand(command); + + // Enumerate provider + StandardTestEnumerateInstances(m_keyNames, context, CALL_LOCATION(errMsg)); + + // Delete container + snprintf(command, 128, "docker rm -f %s", containerName.c_str()); + RunCommand(command); + + // Enumerate provider + StandardTestEnumerateInstances(m_keyNames, context, CALL_LOCATION(errMsg)); + + wchar_t wcontainerName[65]; + mbstowcs(wcontainerName, containerName.c_str(), 64); + + for (unsigned i = 0; i < context.Size(); ++i) + { + if (!context[i].GetProperty(L"InstanceID", CALL_LOCATION(errMsg)).GetValue_MIString(CALL_LOCATION(errMsg)).compare(wstring(wcontainerName))) + { + CPPUNIT_ASSERT_EQUAL(wstring(L"Deleted"), context[i].GetProperty(L"State", CALL_LOCATION(errMsg)).GetValue_MIString(CALL_LOCATION(errMsg))); + } + } + } +}; + +CPPUNIT_TEST_SUITE_REGISTRATION(ContainerInventoryTest); diff --git a/test/code/providers/Container_ContainerStatistics_Class_Provider_UnitTest.cpp b/test/code/providers/Container_ContainerStatistics_Class_Provider_UnitTest.cpp index a38b0d8e3..6ce101937 100644 --- a/test/code/providers/Container_ContainerStatistics_Class_Provider_UnitTest.cpp +++ b/test/code/providers/Container_ContainerStatistics_Class_Provider_UnitTest.cpp @@ -1,116 +1,116 @@ -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include - -#include "Container_ContainerStatistics_Class_Provider.h" -#include "cjson/cJSON.h" - -using namespace std; -using namespace SCXCoreLib; - -class ContainerStatisticsTest : public CppUnit::TestFixture -{ - CPPUNIT_TEST_SUITE(ContainerStatisticsTest); - CPPUNIT_TEST(TestEnumerateInstances); - CPPUNIT_TEST_SUITE_END(); - -public: - vector containers; - - static string NewGuid() - { - uuid_t uuid; - uuid_generate_random(uuid); - char s[37]; - uuid_unparse(uuid, s); - return s; - } - - static string RunCommand(const char* command) - { - istringstream processInput; - ostringstream processOutput; - ostringstream processErr; - - CPPUNIT_ASSERT(!SCXProcess::Run(StrFromMultibyte(string(command)), processInput, processOutput, processErr, 0)); - CPPUNIT_ASSERT_EQUAL(processErr.str(), string()); - - return processOutput.str(); - } - -public: - void setUp() - { - // Get some images to use - fputc('\n', stdout); - RunCommand("docker pull centos"); - } - - void tearDown() - { - char command[128]; - - // Remove the containers that were started by the tests - for (unsigned i = 0; i < containers.size(); i++) - { - sprintf(command, "docker rm -f %s", containers[i].c_str()); - RunCommand(command); - } - - containers.clear(); - } - -protected: - void TestEnumerateInstances() - { - wstring errMsg; - TestableContext context; - - vector m_keyNames; - m_keyNames.push_back(L"InstanceID"); - - char containerName[64]; - strcpy(containerName, NewGuid().c_str()); - containers.push_back(string(containerName)); - char command[128]; - sprintf(command, "docker run --name=%s centos sleep 60 &", containerName); - - system(command); - sleep(5); - - // Enumerate provider - StandardTestEnumerateInstances(m_keyNames, context, CALL_LOCATION(errMsg)); - - CPPUNIT_ASSERT(context.Size()); - - // Only check that the values are present and within the valid range because it is not possible to create a controlled environment - for (unsigned i = 0; i < context.Size(); ++i) - { - wstring instanceId = context[i].GetProperty(L"InstanceID", CALL_LOCATION(errMsg)).GetValue_MIString(CALL_LOCATION(errMsg)); - CPPUNIT_ASSERT(instanceId.length()); - - CPPUNIT_ASSERT(context[i].GetProperty(L"NetRXBytes", CALL_LOCATION(errMsg)).GetValue_MIUint64(CALL_LOCATION(errMsg)) >= 0); - CPPUNIT_ASSERT(context[i].GetProperty(L"NetTXBytes", CALL_LOCATION(errMsg)).GetValue_MIUint64(CALL_LOCATION(errMsg)) >= 0); - CPPUNIT_ASSERT(context[i].GetProperty(L"MemUsedMB", CALL_LOCATION(errMsg)).GetValue_MIUint64(CALL_LOCATION(errMsg)) >= 0); - CPPUNIT_ASSERT(context[i].GetProperty(L"CPUTotal", CALL_LOCATION(errMsg)).GetValue_MIUint64(CALL_LOCATION(errMsg)) >= 0); - - unsigned short cpuUse = context[i].GetProperty(L"CPUTotalPct", CALL_LOCATION(errMsg)).GetValue_MIUint16(CALL_LOCATION(errMsg)); - CPPUNIT_ASSERT(cpuUse <= 100); - - CPPUNIT_ASSERT(context[i].GetProperty(L"DiskBytesRead", CALL_LOCATION(errMsg)).GetValue_MIUint64(CALL_LOCATION(errMsg)) >= 0); - CPPUNIT_ASSERT(context[i].GetProperty(L"DiskBytesWritten", CALL_LOCATION(errMsg)).GetValue_MIUint64(CALL_LOCATION(errMsg)) >= 0); - } - } -}; - +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "Container_ContainerStatistics_Class_Provider.h" +#include "cjson/cJSON.h" + +using namespace std; +using namespace SCXCoreLib; + +class ContainerStatisticsTest : public CppUnit::TestFixture +{ + CPPUNIT_TEST_SUITE(ContainerStatisticsTest); + CPPUNIT_TEST(TestEnumerateInstances); + CPPUNIT_TEST_SUITE_END(); + +public: + vector containers; + + static string NewGuid() + { + uuid_t uuid; + uuid_generate_random(uuid); + char s[37]; + uuid_unparse(uuid, s); + return s; + } + + static string RunCommand(const char* command) + { + istringstream processInput; + ostringstream processOutput; + ostringstream processErr; + + CPPUNIT_ASSERT(!SCXProcess::Run(StrFromMultibyte(string(command)), processInput, processOutput, processErr, 0)); + CPPUNIT_ASSERT_EQUAL(processErr.str(), string()); + + return processOutput.str(); + } + +public: + void setUp() + { + // Get some images to use + fputc('\n', stdout); + RunCommand("docker pull centos"); + } + + void tearDown() + { + char command[128]; + + // Remove the containers that were started by the tests + for (unsigned i = 0; i < containers.size(); i++) + { + sprintf(command, "docker rm -f %s", containers[i].c_str()); + RunCommand(command); + } + + containers.clear(); + } + +protected: + void TestEnumerateInstances() + { + wstring errMsg; + TestableContext context; + + vector m_keyNames; + m_keyNames.push_back(L"InstanceID"); + + char containerName[64]; + strcpy(containerName, NewGuid().c_str()); + containers.push_back(string(containerName)); + char command[128]; + sprintf(command, "docker run --name=%s centos sleep 60 &", containerName); + + system(command); + sleep(5); + + // Enumerate provider + StandardTestEnumerateInstances(m_keyNames, context, CALL_LOCATION(errMsg)); + + CPPUNIT_ASSERT(context.Size()); + + // Only check that the values are present and within the valid range because it is not possible to create a controlled environment + for (unsigned i = 0; i < context.Size(); ++i) + { + wstring instanceId = context[i].GetProperty(L"InstanceID", CALL_LOCATION(errMsg)).GetValue_MIString(CALL_LOCATION(errMsg)); + CPPUNIT_ASSERT(instanceId.length()); + + CPPUNIT_ASSERT(context[i].GetProperty(L"NetRXBytes", CALL_LOCATION(errMsg)).GetValue_MIUint64(CALL_LOCATION(errMsg)) >= 0); + CPPUNIT_ASSERT(context[i].GetProperty(L"NetTXBytes", CALL_LOCATION(errMsg)).GetValue_MIUint64(CALL_LOCATION(errMsg)) >= 0); + CPPUNIT_ASSERT(context[i].GetProperty(L"MemUsedMB", CALL_LOCATION(errMsg)).GetValue_MIUint64(CALL_LOCATION(errMsg)) >= 0); + CPPUNIT_ASSERT(context[i].GetProperty(L"CPUTotal", CALL_LOCATION(errMsg)).GetValue_MIUint64(CALL_LOCATION(errMsg)) >= 0); + + unsigned short cpuUse = context[i].GetProperty(L"CPUTotalPct", CALL_LOCATION(errMsg)).GetValue_MIUint16(CALL_LOCATION(errMsg)); + CPPUNIT_ASSERT(cpuUse <= 100); + + CPPUNIT_ASSERT(context[i].GetProperty(L"DiskBytesRead", CALL_LOCATION(errMsg)).GetValue_MIUint64(CALL_LOCATION(errMsg)) >= 0); + CPPUNIT_ASSERT(context[i].GetProperty(L"DiskBytesWritten", CALL_LOCATION(errMsg)).GetValue_MIUint64(CALL_LOCATION(errMsg)) >= 0); + } + } +}; + CPPUNIT_TEST_SUITE_REGISTRATION(ContainerStatisticsTest); \ No newline at end of file diff --git a/test/code/providers/Container_DaemonEvent_Class_Provider_UnitTest.cpp b/test/code/providers/Container_DaemonEvent_Class_Provider_UnitTest.cpp index 888b2d060..5da3b879d 100644 --- a/test/code/providers/Container_DaemonEvent_Class_Provider_UnitTest.cpp +++ b/test/code/providers/Container_DaemonEvent_Class_Provider_UnitTest.cpp @@ -1,139 +1,139 @@ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include - -#include "Container_DaemonEvent_Class_Provider.h" -#include "cjson/cJSON.h" - -#define LASTQUERYTIMEFILE "/var/opt/microsoft/docker-cimprov/state/LastEventQueryTime.txt" - -using namespace std; -using namespace SCXCoreLib; - -class DaemonEventTest : public CppUnit::TestFixture -{ - CPPUNIT_TEST_SUITE(DaemonEventTest); - CPPUNIT_TEST(TestEnumerateVerifyAllValues); - CPPUNIT_TEST_SUITE_END(); - -private: - vector containers; - - static string NewGuid() - { - uuid_t uuid; - uuid_generate_random(uuid); - char s[37]; - uuid_unparse(uuid, s); - return s; - } - - static string RunCommand(const char* command) - { - istringstream processInput; - ostringstream processOutput; - ostringstream processErr; - - CPPUNIT_ASSERT(!SCXProcess::Run(StrFromMultibyte(string(command)), processInput, processOutput, processErr, 0)); - CPPUNIT_ASSERT_EQUAL(processErr.str(), string()); - - return processOutput.str(); - } - -public: - void setUp() - { - // Get some images to use - fputc('\n', stdout); - RunCommand("docker pull hello-world"); - } - - void tearDown() - { - char command[128]; - - // Remove the containers that were started by the tests - for (unsigned i = 0; i < containers.size(); i++) - { - sprintf(command, "docker rm -f %s", containers[i].c_str()); - RunCommand(command); - } - - containers.clear(); - } - -protected: - void TestEnumerateVerifyAllValues() - { - wstring errMsg; - TestableContext context; - - istringstream processInput; - ostringstream processOutput; - ostringstream processErr; - - vector m_keyNames; - m_keyNames.push_back(L"InstanceID"); - - // Read the time from disk - FILE* file = fopen(LASTQUERYTIMEFILE, "r"); - int fileTime = time(NULL); - - if (file) - { - fscanf(file, "%d", &fileTime); - } - else - { - file = fopen(LASTQUERYTIMEFILE, "w"); - CPPUNIT_ASSERT(file); - fprintf(file, "%d", fileTime); - } - - fclose(file); - - // Run a container to ensure that there is at lease one result - string containerName = NewGuid(); - containers.push_back(containerName); - char command[128]; - sprintf(command, "docker run --name=%s hello-world", containerName.c_str()); - RunCommand(command); - - // Enumerate provider - StandardTestEnumerateInstances(m_keyNames, context, CALL_LOCATION(errMsg)); - - wstring allowedCommandsList[] = { L"attach", L"commit", L"copy", L"create", L"destroy", L"die", L"exec_create", L"exec_start", L"export", L"kill", L"oom", L"pause", L"rename", L"resize", L"restart", L"start", L"stop", L"top", L"unpause", L"delete", L"import", L"pull", L"push", L"tag", L"untag" }; - set allowedCommands(allowedCommandsList, allowedCommandsList + 25); - - // Check validity of every field of every object - for (unsigned i = 0; i < context.Size(); i++) - { - // This field is a GUID and the value does not need verification - CPPUNIT_ASSERT(context[i].GetProperty(L"InstanceID", CALL_LOCATION(errMsg)).GetValue_MIString(CALL_LOCATION(errMsg)).length()); - - // These fields cannot be validated directly against Docker because events cannot be uniquely identified - wstring temp = context[i].GetProperty(L"TimeOfCommand", CALL_LOCATION(errMsg)).GetValue_MIString(CALL_LOCATION(errMsg)); - int t = -1; - swscanf(temp.c_str(), L"%d", &t); - CPPUNIT_ASSERT(t >= fileTime); - - CPPUNIT_ASSERT(allowedCommands.count(context[i].GetProperty(L"Command", CALL_LOCATION(errMsg)).GetValue_MIString(CALL_LOCATION(errMsg)))); - CPPUNIT_ASSERT(context[i].GetProperty(L"ElementName", CALL_LOCATION(errMsg)).GetValue_MIString(CALL_LOCATION(errMsg)).length()); - } - } -}; - +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "Container_DaemonEvent_Class_Provider.h" +#include "cjson/cJSON.h" + +#define LASTQUERYTIMEFILE "/var/opt/microsoft/docker-cimprov/state/LastEventQueryTime.txt" + +using namespace std; +using namespace SCXCoreLib; + +class DaemonEventTest : public CppUnit::TestFixture +{ + CPPUNIT_TEST_SUITE(DaemonEventTest); + CPPUNIT_TEST(TestEnumerateVerifyAllValues); + CPPUNIT_TEST_SUITE_END(); + +private: + vector containers; + + static string NewGuid() + { + uuid_t uuid; + uuid_generate_random(uuid); + char s[37]; + uuid_unparse(uuid, s); + return s; + } + + static string RunCommand(const char* command) + { + istringstream processInput; + ostringstream processOutput; + ostringstream processErr; + + CPPUNIT_ASSERT(!SCXProcess::Run(StrFromMultibyte(string(command)), processInput, processOutput, processErr, 0)); + CPPUNIT_ASSERT_EQUAL(processErr.str(), string()); + + return processOutput.str(); + } + +public: + void setUp() + { + // Get some images to use + fputc('\n', stdout); + RunCommand("docker pull hello-world"); + } + + void tearDown() + { + char command[128]; + + // Remove the containers that were started by the tests + for (unsigned i = 0; i < containers.size(); i++) + { + sprintf(command, "docker rm -f %s", containers[i].c_str()); + RunCommand(command); + } + + containers.clear(); + } + +protected: + void TestEnumerateVerifyAllValues() + { + wstring errMsg; + TestableContext context; + + istringstream processInput; + ostringstream processOutput; + ostringstream processErr; + + vector m_keyNames; + m_keyNames.push_back(L"InstanceID"); + + // Read the time from disk + FILE* file = fopen(LASTQUERYTIMEFILE, "r"); + int fileTime = time(NULL); + + if (file) + { + fscanf(file, "%d", &fileTime); + } + else + { + file = fopen(LASTQUERYTIMEFILE, "w"); + CPPUNIT_ASSERT(file); + fprintf(file, "%d", fileTime); + } + + fclose(file); + + // Run a container to ensure that there is at lease one result + string containerName = NewGuid(); + containers.push_back(containerName); + char command[128]; + sprintf(command, "docker run --name=%s hello-world", containerName.c_str()); + RunCommand(command); + + // Enumerate provider + StandardTestEnumerateInstances(m_keyNames, context, CALL_LOCATION(errMsg)); + + wstring allowedCommandsList[] = { L"attach", L"commit", L"copy", L"create", L"destroy", L"die", L"exec_create", L"exec_start", L"export", L"kill", L"oom", L"pause", L"rename", L"resize", L"restart", L"start", L"stop", L"top", L"unpause", L"delete", L"import", L"pull", L"push", L"tag", L"untag" }; + set allowedCommands(allowedCommandsList, allowedCommandsList + 25); + + // Check validity of every field of every object + for (unsigned i = 0; i < context.Size(); i++) + { + // This field is a GUID and the value does not need verification + CPPUNIT_ASSERT(context[i].GetProperty(L"InstanceID", CALL_LOCATION(errMsg)).GetValue_MIString(CALL_LOCATION(errMsg)).length()); + + // These fields cannot be validated directly against Docker because events cannot be uniquely identified + wstring temp = context[i].GetProperty(L"TimeOfCommand", CALL_LOCATION(errMsg)).GetValue_MIString(CALL_LOCATION(errMsg)); + int t = -1; + swscanf(temp.c_str(), L"%d", &t); + CPPUNIT_ASSERT(t >= fileTime); + + CPPUNIT_ASSERT(allowedCommands.count(context[i].GetProperty(L"Command", CALL_LOCATION(errMsg)).GetValue_MIString(CALL_LOCATION(errMsg)))); + CPPUNIT_ASSERT(context[i].GetProperty(L"ElementName", CALL_LOCATION(errMsg)).GetValue_MIString(CALL_LOCATION(errMsg)).length()); + } + } +}; + CPPUNIT_TEST_SUITE_REGISTRATION(DaemonEventTest); \ No newline at end of file diff --git a/test/code/providers/Container_ImageInventory_Class_Provider_UnitTest.cpp b/test/code/providers/Container_ImageInventory_Class_Provider_UnitTest.cpp index d12b50eb0..13fca396d 100644 --- a/test/code/providers/Container_ImageInventory_Class_Provider_UnitTest.cpp +++ b/test/code/providers/Container_ImageInventory_Class_Provider_UnitTest.cpp @@ -1,293 +1,293 @@ -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include - -#include "Container_ImageInventory_Class_Provider.h" -#include "cjson/cJSON.h" -#include "TestScriptPath.h" - -using namespace std; -using namespace SCXCoreLib; - -class ImageInventoryTest : public CppUnit::TestFixture -{ - CPPUNIT_TEST_SUITE(ImageInventoryTest); - CPPUNIT_TEST(TestEnumerateInstances); - CPPUNIT_TEST(TestEnumerateVerifyAllValues); - CPPUNIT_TEST(TestRunContainer); - CPPUNIT_TEST(TestRunFailedContainer); - CPPUNIT_TEST_SUITE_END(); - -private: - vector containers; - - static string NewGuid() - { - uuid_t uuid; - uuid_generate_random(uuid); - char s[37]; - uuid_unparse(uuid, s); - return s; - } - - static string RunCommand(const char* command) - { - istringstream processInput; - ostringstream processOutput; - ostringstream processErr; - - CPPUNIT_ASSERT(!SCXProcess::Run(StrFromMultibyte(string(command)), processInput, processOutput, processErr, 0)); - CPPUNIT_ASSERT_EQUAL(processErr.str(), string()); - - return processOutput.str(); - } - -public: - void setUp() - { - // Get some images to use - fputc('\n', stdout); - RunCommand("docker pull hello-world"); - RunCommand("docker pull centos"); - RunCommand("rm -f /var/opt/microsoft/docker-cimprov/state/ImageInventory/*"); - } - - void tearDown() - { - char command[128]; - - // Remove the containers that were started by the tests - for (unsigned i = 0; i < containers.size(); i++) - { - sprintf(command, "docker rm -f %s", containers[i].c_str()); - RunCommand(command); - } - - containers.clear(); - } - -protected: - void TestEnumerateInstances() - { - wstring errMsg; - TestableContext context; - - vector m_keyNames; - m_keyNames.push_back(L"InstanceID"); - - // Remove cached state - RunCommand("rm -f /var/opt/microsoft/docker-cimprov/state/ContainerInventory/*"); - - // Enumerate provider - StandardTestEnumerateInstances(m_keyNames, context, CALL_LOCATION(errMsg)); - - // Get images using command line - CPPUNIT_ASSERT(!system("docker images -q --no-trunc > /tmp/docker_image_ids.txt")); - - FILE* idFile = fopen("/tmp/docker_image_ids.txt", "r"); - CPPUNIT_ASSERT(idFile); - - wchar_t id[65]; - set allIds; - - // Full image IDs (one per line) - while (fwscanf(idFile, L"%ls", id) != EOF) - { - allIds.insert(wstring(id)); - } - - fclose(idFile); - remove("/tmp/docker_image_ids.txt"); - - CPPUNIT_ASSERT_EQUAL(allIds.size(), context.Size()); - - for (unsigned i = 0; i < context.Size(); ++i) - { - // Verify the InstanceID - CPPUNIT_ASSERT(allIds.count(context[i].GetKey(L"InstanceID", CALL_LOCATION(errMsg)))); - } - } - - void TestEnumerateVerifyAllValues() - { - wstring errMsg; - TestableContext context; - - vector m_keyNames; - m_keyNames.push_back(L"InstanceID"); - - // Remove cached state - RunCommand("rm -f /var/opt/microsoft/docker-cimprov/state/ContainerInventory/*"); - - // Enumerate provider - StandardTestEnumerateInstances(m_keyNames, context, CALL_LOCATION(errMsg)); - - // Get image inventory using a script - char command[256]; - sprintf(command, "python %sImageInventory.py > /tmp/docker_image_inventory.txt", TEST_SCRIPT_PATH); - CPPUNIT_ASSERT(!system(command)); - - FILE* imageFile = fopen("/tmp/docker_image_inventory.txt", "r"); - CPPUNIT_ASSERT(imageFile); - - char buffer[1024]; - vector images; - - while (fgets(buffer, 1023, imageFile)) - { - images.push_back(cJSON_Parse(buffer)); - } - - fclose(imageFile); - remove("/tmp/docker_image_inventory.txt"); - - // Should have same number of images - CPPUNIT_ASSERT_EQUAL(images.size(), context.Size()); - - wchar_t currentId[66]; - int imageCount = 0; - - // Verify every field of every object - for (unsigned i = 0; i < images.size(); i++) - { - bool flag = false; - mbstowcs(currentId, cJSON_GetObjectItem(images[i], "InstanceID")->valuestring, 65); - - for (unsigned j = 0; !flag && j < context.Size(); j++) - { - if (!context[j].GetProperty(L"InstanceID", CALL_LOCATION(errMsg)).GetValue_MIString(CALL_LOCATION(errMsg)).compare(wstring(currentId))) - { - wchar_t temp[512]; - unsigned count = 0; - - mbstowcs(temp, cJSON_GetObjectItem(images[i], "Image")->valuestring, 511); - CPPUNIT_ASSERT_EQUAL(wstring(temp), context[j].GetProperty(L"Image", CALL_LOCATION(errMsg)).GetValue_MIString(CALL_LOCATION(errMsg))); - - mbstowcs(temp, cJSON_GetObjectItem(images[i], "Repository")->valuestring, 511); - CPPUNIT_ASSERT_EQUAL(wstring(temp), context[j].GetProperty(L"Repository", CALL_LOCATION(errMsg)).GetValue_MIString(CALL_LOCATION(errMsg))); - - mbstowcs(temp, cJSON_GetObjectItem(images[i], "ImageTag")->valuestring, 511); - CPPUNIT_ASSERT_EQUAL(wstring(temp), context[j].GetProperty(L"ImageTag", CALL_LOCATION(errMsg)).GetValue_MIString(CALL_LOCATION(errMsg))); - - mbstowcs(temp, cJSON_GetObjectItem(images[i], "Computer")->valuestring, 511); - CPPUNIT_ASSERT_EQUAL(wstring(temp), context[j].GetProperty(L"Computer", CALL_LOCATION(errMsg)).GetValue_MIString(CALL_LOCATION(errMsg))); - - mbstowcs(temp, cJSON_GetObjectItem(images[i], "ImageSize")->valuestring, 511); - CPPUNIT_ASSERT_EQUAL(wstring(temp), context[j].GetProperty(L"ImageSize", CALL_LOCATION(errMsg)).GetValue_MIString(CALL_LOCATION(errMsg))); - - mbstowcs(temp, cJSON_GetObjectItem(images[i], "VirtualSize")->valuestring, 511); - CPPUNIT_ASSERT_EQUAL(wstring(temp), context[j].GetProperty(L"VirtualSize", CALL_LOCATION(errMsg)).GetValue_MIString(CALL_LOCATION(errMsg))); - - count = cJSON_GetObjectItem(images[i], "Running")->valueint; - CPPUNIT_ASSERT_EQUAL(count, context[j].GetProperty(L"Running", CALL_LOCATION(errMsg)).GetValue_MIUint32(CALL_LOCATION(errMsg))); - - count = cJSON_GetObjectItem(images[i], "Stopped")->valueint; - CPPUNIT_ASSERT_EQUAL(count, context[j].GetProperty(L"Stopped", CALL_LOCATION(errMsg)).GetValue_MIUint32(CALL_LOCATION(errMsg))); - - count = cJSON_GetObjectItem(images[i], "Failed")->valueint; - CPPUNIT_ASSERT_EQUAL(count, context[j].GetProperty(L"Failed", CALL_LOCATION(errMsg)).GetValue_MIUint32(CALL_LOCATION(errMsg))); - - count = cJSON_GetObjectItem(images[i], "Paused")->valueint; - CPPUNIT_ASSERT_EQUAL(count, context[j].GetProperty(L"Paused", CALL_LOCATION(errMsg)).GetValue_MIUint32(CALL_LOCATION(errMsg))); - - count = cJSON_GetObjectItem(images[i], "Total")->valueint; - CPPUNIT_ASSERT_EQUAL(count, context[j].GetProperty(L"Total", CALL_LOCATION(errMsg)).GetValue_MIUint32(CALL_LOCATION(errMsg))); - - flag = true; - imageCount += 1; - } - } - - cJSON_Delete(images[i]); - } - - // Ensure all objects were validated - CPPUNIT_ASSERT_EQUAL(imageCount, context.Size()); - } - - void TestRunContainer() - { - wstring errMsg; - TestableContext context; - - vector m_keyNames; - m_keyNames.push_back(L"InstanceID"); - - // Remove cached state - RunCommand("rm -f /var/opt/microsoft/docker-cimprov/state/ContainerInventory/*"); - - char containerName[64]; - strcpy(containerName, NewGuid().c_str()); - containers.push_back(string(containerName)); - char command[128]; - sprintf(command, "docker run --name=%s hello-world", containerName); - - CPPUNIT_ASSERT(!system(command)); - sleep(5); - - bool flag = false; - - // Enumerate provider - StandardTestEnumerateInstances(m_keyNames, context, CALL_LOCATION(errMsg)); - - for (unsigned i = 0; !flag && i < context.Size(); ++i) - { - if (!context[i].GetProperty(L"Image", CALL_LOCATION(errMsg)).GetValue_MIString(CALL_LOCATION(errMsg)).compare(L"hello-world")) - { - // Checke that the number of stopped containers is not 0 - CPPUNIT_ASSERT(context[i].GetProperty(L"Stopped", CALL_LOCATION(errMsg)).GetValue_MIUint32(CALL_LOCATION(errMsg))); - flag = true; - } - } - } - - void TestRunFailedContainer() - { - wstring errMsg; - TestableContext context; - - vector m_keyNames; - m_keyNames.push_back(L"InstanceID"); - - // Remove cached state - RunCommand("rm -f /var/opt/microsoft/docker-cimprov/state/ContainerInventory/*"); - - char containerName[64]; - strcpy(containerName, NewGuid().c_str()); - containers.push_back(string(containerName)); - char command[128]; - sprintf(command, "docker run --name=%s centos false", containerName); - - CPPUNIT_ASSERT(system(command)); - sleep(5); - - bool flag = false; - - // Enumerate provider - StandardTestEnumerateInstances(m_keyNames, context, CALL_LOCATION(errMsg)); - - for (unsigned i = 0; !flag && i < context.Size(); ++i) - { - if (!context[i].GetProperty(L"Image", CALL_LOCATION(errMsg)).GetValue_MIString(CALL_LOCATION(errMsg)).compare(L"centos")) - { - // Check that the number of failed containers is not 0 - CPPUNIT_ASSERT(context[i].GetProperty(L"Failed", CALL_LOCATION(errMsg)).GetValue_MIUint32(CALL_LOCATION(errMsg))); - flag = true; - } - } - } - -}; - +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "Container_ImageInventory_Class_Provider.h" +#include "cjson/cJSON.h" +#include "TestScriptPath.h" + +using namespace std; +using namespace SCXCoreLib; + +class ImageInventoryTest : public CppUnit::TestFixture +{ + CPPUNIT_TEST_SUITE(ImageInventoryTest); + CPPUNIT_TEST(TestEnumerateInstances); + CPPUNIT_TEST(TestEnumerateVerifyAllValues); + CPPUNIT_TEST(TestRunContainer); + CPPUNIT_TEST(TestRunFailedContainer); + CPPUNIT_TEST_SUITE_END(); + +private: + vector containers; + + static string NewGuid() + { + uuid_t uuid; + uuid_generate_random(uuid); + char s[37]; + uuid_unparse(uuid, s); + return s; + } + + static string RunCommand(const char* command) + { + istringstream processInput; + ostringstream processOutput; + ostringstream processErr; + + CPPUNIT_ASSERT(!SCXProcess::Run(StrFromMultibyte(string(command)), processInput, processOutput, processErr, 0)); + CPPUNIT_ASSERT_EQUAL(processErr.str(), string()); + + return processOutput.str(); + } + +public: + void setUp() + { + // Get some images to use + fputc('\n', stdout); + RunCommand("docker pull hello-world"); + RunCommand("docker pull centos"); + RunCommand("rm -f /var/opt/microsoft/docker-cimprov/state/ImageInventory/*"); + } + + void tearDown() + { + char command[128]; + + // Remove the containers that were started by the tests + for (unsigned i = 0; i < containers.size(); i++) + { + sprintf(command, "docker rm -f %s", containers[i].c_str()); + RunCommand(command); + } + + containers.clear(); + } + +protected: + void TestEnumerateInstances() + { + wstring errMsg; + TestableContext context; + + vector m_keyNames; + m_keyNames.push_back(L"InstanceID"); + + // Remove cached state + RunCommand("rm -f /var/opt/microsoft/docker-cimprov/state/ContainerInventory/*"); + + // Enumerate provider + StandardTestEnumerateInstances(m_keyNames, context, CALL_LOCATION(errMsg)); + + // Get images using command line + CPPUNIT_ASSERT(!system("docker images -q --no-trunc > /tmp/docker_image_ids.txt")); + + FILE* idFile = fopen("/tmp/docker_image_ids.txt", "r"); + CPPUNIT_ASSERT(idFile); + + wchar_t id[65]; + set allIds; + + // Full image IDs (one per line) + while (fwscanf(idFile, L"%ls", id) != EOF) + { + allIds.insert(wstring(id)); + } + + fclose(idFile); + remove("/tmp/docker_image_ids.txt"); + + CPPUNIT_ASSERT_EQUAL(allIds.size(), context.Size()); + + for (unsigned i = 0; i < context.Size(); ++i) + { + // Verify the InstanceID + CPPUNIT_ASSERT(allIds.count(context[i].GetKey(L"InstanceID", CALL_LOCATION(errMsg)))); + } + } + + void TestEnumerateVerifyAllValues() + { + wstring errMsg; + TestableContext context; + + vector m_keyNames; + m_keyNames.push_back(L"InstanceID"); + + // Remove cached state + RunCommand("rm -f /var/opt/microsoft/docker-cimprov/state/ContainerInventory/*"); + + // Enumerate provider + StandardTestEnumerateInstances(m_keyNames, context, CALL_LOCATION(errMsg)); + + // Get image inventory using a script + char command[256]; + sprintf(command, "python %sImageInventory.py > /tmp/docker_image_inventory.txt", TEST_SCRIPT_PATH); + CPPUNIT_ASSERT(!system(command)); + + FILE* imageFile = fopen("/tmp/docker_image_inventory.txt", "r"); + CPPUNIT_ASSERT(imageFile); + + char buffer[1024]; + vector images; + + while (fgets(buffer, 1023, imageFile)) + { + images.push_back(cJSON_Parse(buffer)); + } + + fclose(imageFile); + remove("/tmp/docker_image_inventory.txt"); + + // Should have same number of images + CPPUNIT_ASSERT_EQUAL(images.size(), context.Size()); + + wchar_t currentId[66]; + int imageCount = 0; + + // Verify every field of every object + for (unsigned i = 0; i < images.size(); i++) + { + bool flag = false; + mbstowcs(currentId, cJSON_GetObjectItem(images[i], "InstanceID")->valuestring, 65); + + for (unsigned j = 0; !flag && j < context.Size(); j++) + { + if (!context[j].GetProperty(L"InstanceID", CALL_LOCATION(errMsg)).GetValue_MIString(CALL_LOCATION(errMsg)).compare(wstring(currentId))) + { + wchar_t temp[512]; + unsigned count = 0; + + mbstowcs(temp, cJSON_GetObjectItem(images[i], "Image")->valuestring, 511); + CPPUNIT_ASSERT_EQUAL(wstring(temp), context[j].GetProperty(L"Image", CALL_LOCATION(errMsg)).GetValue_MIString(CALL_LOCATION(errMsg))); + + mbstowcs(temp, cJSON_GetObjectItem(images[i], "Repository")->valuestring, 511); + CPPUNIT_ASSERT_EQUAL(wstring(temp), context[j].GetProperty(L"Repository", CALL_LOCATION(errMsg)).GetValue_MIString(CALL_LOCATION(errMsg))); + + mbstowcs(temp, cJSON_GetObjectItem(images[i], "ImageTag")->valuestring, 511); + CPPUNIT_ASSERT_EQUAL(wstring(temp), context[j].GetProperty(L"ImageTag", CALL_LOCATION(errMsg)).GetValue_MIString(CALL_LOCATION(errMsg))); + + mbstowcs(temp, cJSON_GetObjectItem(images[i], "Computer")->valuestring, 511); + CPPUNIT_ASSERT_EQUAL(wstring(temp), context[j].GetProperty(L"Computer", CALL_LOCATION(errMsg)).GetValue_MIString(CALL_LOCATION(errMsg))); + + mbstowcs(temp, cJSON_GetObjectItem(images[i], "ImageSize")->valuestring, 511); + CPPUNIT_ASSERT_EQUAL(wstring(temp), context[j].GetProperty(L"ImageSize", CALL_LOCATION(errMsg)).GetValue_MIString(CALL_LOCATION(errMsg))); + + mbstowcs(temp, cJSON_GetObjectItem(images[i], "VirtualSize")->valuestring, 511); + CPPUNIT_ASSERT_EQUAL(wstring(temp), context[j].GetProperty(L"VirtualSize", CALL_LOCATION(errMsg)).GetValue_MIString(CALL_LOCATION(errMsg))); + + count = cJSON_GetObjectItem(images[i], "Running")->valueint; + CPPUNIT_ASSERT_EQUAL(count, context[j].GetProperty(L"Running", CALL_LOCATION(errMsg)).GetValue_MIUint32(CALL_LOCATION(errMsg))); + + count = cJSON_GetObjectItem(images[i], "Stopped")->valueint; + CPPUNIT_ASSERT_EQUAL(count, context[j].GetProperty(L"Stopped", CALL_LOCATION(errMsg)).GetValue_MIUint32(CALL_LOCATION(errMsg))); + + count = cJSON_GetObjectItem(images[i], "Failed")->valueint; + CPPUNIT_ASSERT_EQUAL(count, context[j].GetProperty(L"Failed", CALL_LOCATION(errMsg)).GetValue_MIUint32(CALL_LOCATION(errMsg))); + + count = cJSON_GetObjectItem(images[i], "Paused")->valueint; + CPPUNIT_ASSERT_EQUAL(count, context[j].GetProperty(L"Paused", CALL_LOCATION(errMsg)).GetValue_MIUint32(CALL_LOCATION(errMsg))); + + count = cJSON_GetObjectItem(images[i], "Total")->valueint; + CPPUNIT_ASSERT_EQUAL(count, context[j].GetProperty(L"Total", CALL_LOCATION(errMsg)).GetValue_MIUint32(CALL_LOCATION(errMsg))); + + flag = true; + imageCount += 1; + } + } + + cJSON_Delete(images[i]); + } + + // Ensure all objects were validated + CPPUNIT_ASSERT_EQUAL(imageCount, context.Size()); + } + + void TestRunContainer() + { + wstring errMsg; + TestableContext context; + + vector m_keyNames; + m_keyNames.push_back(L"InstanceID"); + + // Remove cached state + RunCommand("rm -f /var/opt/microsoft/docker-cimprov/state/ContainerInventory/*"); + + char containerName[64]; + strcpy(containerName, NewGuid().c_str()); + containers.push_back(string(containerName)); + char command[128]; + sprintf(command, "docker run --name=%s hello-world", containerName); + + CPPUNIT_ASSERT(!system(command)); + sleep(5); + + bool flag = false; + + // Enumerate provider + StandardTestEnumerateInstances(m_keyNames, context, CALL_LOCATION(errMsg)); + + for (unsigned i = 0; !flag && i < context.Size(); ++i) + { + if (!context[i].GetProperty(L"Image", CALL_LOCATION(errMsg)).GetValue_MIString(CALL_LOCATION(errMsg)).compare(L"hello-world")) + { + // Checke that the number of stopped containers is not 0 + CPPUNIT_ASSERT(context[i].GetProperty(L"Stopped", CALL_LOCATION(errMsg)).GetValue_MIUint32(CALL_LOCATION(errMsg))); + flag = true; + } + } + } + + void TestRunFailedContainer() + { + wstring errMsg; + TestableContext context; + + vector m_keyNames; + m_keyNames.push_back(L"InstanceID"); + + // Remove cached state + RunCommand("rm -f /var/opt/microsoft/docker-cimprov/state/ContainerInventory/*"); + + char containerName[64]; + strcpy(containerName, NewGuid().c_str()); + containers.push_back(string(containerName)); + char command[128]; + sprintf(command, "docker run --name=%s centos false", containerName); + + CPPUNIT_ASSERT(system(command)); + sleep(5); + + bool flag = false; + + // Enumerate provider + StandardTestEnumerateInstances(m_keyNames, context, CALL_LOCATION(errMsg)); + + for (unsigned i = 0; !flag && i < context.Size(); ++i) + { + if (!context[i].GetProperty(L"Image", CALL_LOCATION(errMsg)).GetValue_MIString(CALL_LOCATION(errMsg)).compare(L"centos")) + { + // Check that the number of failed containers is not 0 + CPPUNIT_ASSERT(context[i].GetProperty(L"Failed", CALL_LOCATION(errMsg)).GetValue_MIUint32(CALL_LOCATION(errMsg))); + flag = true; + } + } + } + +}; + CPPUNIT_TEST_SUITE_REGISTRATION(ImageInventoryTest); \ No newline at end of file From 0d194e044d1136a4f421029ce5e02fcc2cef76bc Mon Sep 17 00:00:00 2001 From: Jeff Coffler Date: Thu, 17 Mar 2016 11:34:08 -0700 Subject: [PATCH 4/7] Docker needs root permissions to work, so unit tests do as well --- build/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build/Makefile b/build/Makefile index 405a0678d..a71ee960f 100644 --- a/build/Makefile +++ b/build/Makefile @@ -304,7 +304,7 @@ TEST_STATUS: test : TEST_STATUS $(SCXPAL_INTERMEDIATE_DIR) $(TARGET_DIR)/testrunner @echo "========================= Performing container testrun execution" - export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$(OMI_ROOT)/output/lib; cd $(TARGET_DIR); ./testrunner + export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$(OMI_ROOT)/output/lib; cd $(TARGET_DIR); sudo ./testrunner #-------------------------------------------------------------------------------- # Build the distribution kit From 9a5c5b7222bf234796ec6711e2e3bd30f8cc5af4 Mon Sep 17 00:00:00 2001 From: Jeff Coffler Date: Thu, 17 Mar 2016 15:31:33 -0700 Subject: [PATCH 5/7] Clean up target directory (only has shell bundle, no temp files) Make create_bundle.sh more robust (consistent with other projects) --- build/Makefile | 69 ++--- installer/bundle/bundle_skel.sh | 338 +++++++++++++++++++++++ installer/bundle/create_bundle.sh | 148 ++++++++--- installer/bundle/primary.skel | 340 ------------------------ installer/datafiles/base_container.data | 6 +- 5 files changed, 485 insertions(+), 416 deletions(-) create mode 100644 installer/bundle/bundle_skel.sh delete mode 100644 installer/bundle/primary.skel diff --git a/build/Makefile b/build/Makefile index a71ee960f..acf07a0db 100644 --- a/build/Makefile +++ b/build/Makefile @@ -29,22 +29,22 @@ PROVIDER_TEST_DIR := $(TEST_DIR)/providers PAL_INCLUDE_DIR := $(SCXPAL_DIR)/source/code/include PAL_TESTUTILS_DIR := $(SCXPAL_DIR)/test/code/testutils -INTERMEDIATE_DIR=$(BASE_DIR)/intermediate/$(BUILD_CONFIGURATION) +INTERMEDIATE_DIR := $(BASE_DIR)/intermediate/$(BUILD_CONFIGURATION) TARGET_DIR := $(BASE_DIR)/target/$(BUILD_CONFIGURATION) -PROVIDER_LIBRARY := $(TARGET_DIR)/libcontainer.so +PROVIDER_LIBRARY := $(INTERMEDIATE_DIR)/libcontainer.so -INSTALLER_TMPDIR := $(TARGET_DIR)/installer_tmp +INSTALLER_TMPDIR := $(INTERMEDIATE_DIR)/installer_tmp # Include files -INCLUDE_DEFINES := $(TARGET_DIR)/defines.h +INCLUDE_DEFINES := $(INTERMEDIATE_DIR)/defines.h # Compiler flags OMI_INCLUDE_FLAGS := -I$(OMI_ROOT)/output/include -PROVIDER_INCLUDE_FLAGS := -I$(PAL_INCLUDE_DIR) -I$(TARGET_DIR) +PROVIDER_INCLUDE_FLAGS := -I$(PAL_INCLUDE_DIR) -I$(INTERMEDIATE_DIR) -PROVIDER_TEST_INCLUDE_FLAGS := -Wmissing-include-dirs -Wno-non-virtual-dtor -I$(SCXPAL_DIR)/source/code/include -I$(TARGET_DIR) -I$(SCXPAL_DIR)/test/ext/include -I$(OMI_ROOT)/output/include -I$(OMI_ROOT) -I$(OMI_ROOT)/common -I$(SCXPAL_DIR)/test/code/include $(PROVIDER_INCLUDE_FLAGS) -I$(PROVIDER_DIR) +PROVIDER_TEST_INCLUDE_FLAGS := -Wmissing-include-dirs -Wno-non-virtual-dtor -I$(SCXPAL_DIR)/source/code/include -I$(INTERMEDIATE_DIR) -I$(SCXPAL_DIR)/test/ext/include -I$(OMI_ROOT)/output/include -I$(OMI_ROOT) -I$(OMI_ROOT)/common -I$(SCXPAL_DIR)/test/code/include $(PROVIDER_INCLUDE_FLAGS) -I$(PROVIDER_DIR) ifeq ($(ENABLE_DEBUG),1) PROV_DEBUG_FLAGS := -g @@ -60,10 +60,12 @@ SHARED_FLAGS := -shared # Support for installbuilder -STAGING_DIR := $(TARGET_DIR)/staging +STAGING_DIR := $(INTERMEDIATE_DIR)/staging ifeq ($(ULINUX),1) - OUTPUT_PACKAGE_PREFIX=docker-cimprov-$(CONTAINER_BUILDVERSION_MAJOR).$(CONTAINER_BUILDVERSION_MINOR).$(CONTAINER_BUILDVERSION_PATCH)-$(CONTAINER_BUILDVERSION_BUILDNR).universal.$(PF_ARCH) + # For consistency, the architecture should be i686 (for x86) and x86_64 (for x64) + DOCKER_ARCH := $(shell echo $(PF_ARCH) | sed -e 's/x86$$/i686/' -e 's/x64$$/x86_64/') + OUTPUT_PACKAGE_PREFIX=docker-cimprov-$(CONTAINER_BUILDVERSION_MAJOR).$(CONTAINER_BUILDVERSION_MINOR).$(CONTAINER_BUILDVERSION_PATCH)-$(CONTAINER_BUILDVERSION_BUILDNR).universal.$(DOCKER_ARCH) else PF_DISTRO_LC := $(shell echo $(PF_DISTRO) | tr A-Z a-z) OUTPUT_PACKAGE_PREFIX=docker-cimprov-$(CONTAINER_BUILDVERSION_MAJOR).$(CONTAINER_BUILDVERSION_MINOR).$(CONTAINER_BUILDVERSION_PATCH)-$(CONTAINER_BUILDVERSION_BUILDNR).$(PF_DISTRO_LC).$(PF_MAJOR).$(PF_ARCH) @@ -257,7 +259,7 @@ $(PROVIDER_LIBRARY): CFLAGS += $(PROVIDER_COMPILE_FLAGS) $(PROVIDER_LIBRARY): CXXFLAGS += $(PROVIDER_COMPILE_FLAGS) $(PROVIDER_LIBRARY): $(STATIC_PROVIDERLIB_OBJFILES) $(STATIC_PROVIDERLIB_LOGPOLICY) $(INCLUDE_DEFINES) $(PROVIDER_HEADERS) $(MKPATH) $(INTERMEDIATE_DIR) - $(MKPATH) $(TARGET_DIR) + $(MKPATH) $(INTERMEDIATE_DIR) g++ $(PROVIDER_COMPILE_FLAGS) $(SHARED_FLAGS) $(PROVIDER_INCLUDE_FLAGS) -o $@ $(STATIC_PROVIDERLIB_OBJFILES) $(LINK_LIBRARIES) #-------------------------------------------------------------------------------- @@ -288,12 +290,12 @@ STATIC_PROVIDER_PAL_UNITFILES = \ STATIC_PROVIDER_TEST_OBJFILES = $(call src_to_obj,$(STATIC_PROVIDER_UNITFILES)) -$(TARGET_DIR)/testrunner: INCLUDES += $(PROVIDER_TEST_INCLUDE_FLAGS) -I$(PAL_INCLUDE_DIR) -I$(PAL_TESTUTILS_DIR) -I$(PROVIDER_DIR) -I$(SOURCE_DIR) -$(TARGET_DIR)/testrunner: CFLAGS += $(PROVIDER_COMPILE_FLAGS) -$(TARGET_DIR)/testrunner: CXXFLAGS += $(PROVIDER_COMPILE_FLAGS) -$(TARGET_DIR)/testrunner : $(STATIC_PROVIDER_TEST_OBJFILES) $(STATIC_PROVIDERLIB_OBJFILES) $(INCLUDE_DEFINES) $(PROVIDER_HEADERS) +$(INTERMEDIATE_DIR)/testrunner: INCLUDES += $(PROVIDER_TEST_INCLUDE_FLAGS) -I$(PAL_INCLUDE_DIR) -I$(PAL_TESTUTILS_DIR) -I$(PROVIDER_DIR) -I$(SOURCE_DIR) +$(INTERMEDIATE_DIR)/testrunner: CFLAGS += $(PROVIDER_COMPILE_FLAGS) +$(INTERMEDIATE_DIR)/testrunner: CXXFLAGS += $(PROVIDER_COMPILE_FLAGS) +$(INTERMEDIATE_DIR)/testrunner : $(STATIC_PROVIDER_TEST_OBJFILES) $(STATIC_PROVIDERLIB_OBJFILES) $(INCLUDE_DEFINES) $(PROVIDER_HEADERS) + $(MKPATH) $(INTERMEDIATE_DIR) $(MKPATH) $(INTERMEDIATE_DIR) - $(MKPATH) $(TARGET_DIR) g++ $(PROVIDER_COMPILE_FLAGS) $(PROVIDER_TEST_INCLUDE_FLAGS) -o $@ $(STATIC_PROVIDER_PAL_UNITFILES) $(STATIC_PROVIDER_TEST_OBJFILES) $(STATIC_PROVIDERLIB_OBJFILES) $(LINK_LIBRARIES) $(PROVIDER_TEST_LINK_LIBRARIES) testrun : test @@ -302,9 +304,9 @@ TEST_STATUS: @echo "========================= Performing Building provider tests" @echo \#define TEST_SCRIPT_PATH \"$(TEST_DIR)/scripts/\" > $(TEST_DIR)/providers/TestScriptPath.h -test : TEST_STATUS $(SCXPAL_INTERMEDIATE_DIR) $(TARGET_DIR)/testrunner +test : TEST_STATUS $(SCXPAL_INTERMEDIATE_DIR) $(INTERMEDIATE_DIR)/testrunner @echo "========================= Performing container testrun execution" - export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$(OMI_ROOT)/output/lib; cd $(TARGET_DIR); sudo ./testrunner + export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$(OMI_ROOT)/output/lib; cd $(INTERMEDIATE_DIR); sudo ./testrunner #-------------------------------------------------------------------------------- # Build the distribution kit @@ -322,10 +324,10 @@ ifeq ($(ULINUX),1) @echo "========================= Performing Building RPM and DPKG packages" $(MKPATH) $(INSTALLER_TMPDIR) sudo $(RMDIR) $(STAGING_DIR) - $(MKPATH) $(TARGET_DIR) + $(MKPATH) $(INTERMEDIATE_DIR) python $(SCXPAL_DIR)/installer/InstallBuilder/installbuilder.py \ --BASE_DIR=$(BASE_DIR) \ - --TARGET_DIR=$(TARGET_DIR) \ + --TARGET_DIR=$(INTERMEDIATE_DIR) \ --INTERMEDIATE_DIR=$(INSTALLER_TMPDIR) \ --STAGING_DIR=$(STAGING_DIR) \ --BUILD_TYPE=$(BUILD_TYPE) \ @@ -334,18 +336,18 @@ ifeq ($(ULINUX),1) --PFDISTRO=$(PF_DISTRO) \ --PFMAJOR=$(PF_MAJOR) \ --PFMINOR=$(PF_MINOR) \ - --VERSION=1.0.0 \ - --RELEASE=1 \ + --VERSION=$(CONTAINER_BUILDVERSION_MAJOR).$(CONTAINER_BUILDVERSION_MINOR).$(CONTAINER_BUILDVERSION_PATCH) \ + --RELEASE=$(CONTAINER_BUILDVERSION_BUILDNR) \ --CONTAINER_BUILD_LIBRARY=$(CONTAINERLIB_FILENAME) \ --OUTPUTFILE=$(OUTPUT_PACKAGE_PREFIX) \ --DATAFILE_PATH=$(BASE_DIR)/installer/datafiles \ base_container.data linux.data linux_rpm.data sudo $(RMDIR) $(STAGING_DIR) - $(MKPATH) $(TARGET_DIR) + $(MKPATH) $(INTERMEDIATE_DIR) python $(SCXPAL_DIR)/installer/InstallBuilder/installbuilder.py \ --BASE_DIR=$(BASE_DIR) \ - --TARGET_DIR=$(TARGET_DIR) \ + --TARGET_DIR=$(INTERMEDIATE_DIR) \ --INTERMEDIATE_DIR=$(INSTALLER_TMPDIR) \ --STAGING_DIR=$(STAGING_DIR) \ --BUILD_TYPE=$(BUILD_TYPE) \ @@ -354,8 +356,8 @@ ifeq ($(ULINUX),1) --PFDISTRO=$(PF_DISTRO) \ --PFMAJOR=$(PF_MAJOR) \ --PFMINOR=$(PF_MINOR) \ - --VERSION=1.0.0 \ - --RELEASE=1 \ + --VERSION=$(CONTAINER_BUILDVERSION_MAJOR).$(CONTAINER_BUILDVERSION_MINOR).$(CONTAINER_BUILDVERSION_PATCH) \ + --RELEASE=$(CONTAINER_BUILDVERSION_BUILDNR) \ --CONTAINER_BUILD_LIBRARY=$(CONTAINERLIB_FILENAME) \ $(DPKG_LOCATION) \ --OUTPUTFILE=$(OUTPUT_PACKAGE_PREFIX) \ @@ -363,20 +365,24 @@ ifeq ($(ULINUX),1) base_container.data linux.data linux_dpkg.data # Strip the package extension from the package filename - sed -re 's/.rpm$$|.deb$$//' $(TARGET_DIR)/package_filename > $(TARGET_DIR)/package_file.tmp + sed -re 's/.rpm$$|.deb$$//' $(INTERMEDIATE_DIR)/package_filename > $(INTERMEDIATE_DIR)/package_file.tmp; mv $(INTERMEDIATE_DIR)/package_file.tmp $(INTERMEDIATE_DIR)/package_filename # Build the tar file containing both .rpm and .deb packages - cd $(TARGET_DIR); tar cvf $(OUTPUT_PACKAGE_PREFIX).tar $(OUTPUT_PACKAGE_PREFIX).rpm $(OUTPUT_PACKAGE_PREFIX).deb + cd $(INTERMEDIATE_DIR); tar cvf $(OUTPUT_PACKAGE_PREFIX).tar $(OUTPUT_PACKAGE_PREFIX).rpm $(OUTPUT_PACKAGE_PREFIX).deb + + ../installer/bundle/create_bundle.sh $(PF)_$(PF_DISTRO) $(INTERMEDIATE_DIR) $(OUTPUT_PACKAGE_PREFIX) + # Copy the shell bundle to the target directory + mkdir -p $(TARGET_DIR) + cd $(INTERMEDIATE_DIR); cp `cat $(INTERMEDIATE_DIR)/package_filename`.sh $(TARGET_DIR) - ../installer/bundle/create_bundle.sh $(PF)_$(PF_DISTRO) $(TARGET_DIR) $(OUTPUT_PACKAGE_PREFIX) else @echo "========================= Performing Building RPM and DPKG packages" sudo $(RMDIR) $(STAGING_DIR) - $(MKPATH) $(TARGET_DIR) + $(MKPATH) $(INTERMEDIATE_DIR) python $(SCXPAL_DIR)/installer/InstallBuilder/installbuilder.py \ --BASE_DIR=$(BASE_DIR) \ - --TARGET_DIR=$(TARGET_DIR) \ + --TARGET_DIR=$(INTERMEDIATE_DIR) \ --INTERMEDIATE_DIR=$(INSTALLER_TMPDIR) \ --STAGING_DIR=$(STAGING_DIR) \ --BUILD_TYPE=$(BUILD_TYPE) \ @@ -385,11 +391,12 @@ else --PFDISTRO=$(PF_DISTRO) \ --PFMAJOR=$(PF_MAJOR) \ --PFMINOR=$(PF_MINOR) \ - --VERSION=1.0.0 \ - --RELEASE=1 \ + --VERSION=$(CONTAINER_BUILDVERSION_MAJOR).$(CONTAINER_BUILDVERSION_MINOR).$(CONTAINER_BUILDVERSION_PATCH) \ + --RELEASE=$(CONTAINER_BUILDVERSION_BUILDNR) \ --CONTAINER_BUILD_LIBRARY=$(CONTAINERLIB_FILENAME) \ $(DPKG_LOCATION) \ --OUTPUTFILE=$(OUTPUT_PACKAGE_PREFIX) \ --DATAFILE_PATH=$(BASE_DIR)/installer/datafiles \ base_container.data linux.data linux_dpkg.data + endif diff --git a/installer/bundle/bundle_skel.sh b/installer/bundle/bundle_skel.sh new file mode 100644 index 000000000..f069ceb5a --- /dev/null +++ b/installer/bundle/bundle_skel.sh @@ -0,0 +1,338 @@ +#!/bin/sh +# +# +# This script is a skeleton bundle file for primary platforms the docker +# project, which only ships in universal form (RPM & DEB installers for the +# Linux platforms). +# +# Use this script by concatenating it with some binary package. +# +# The bundle is created by cat'ing the script in front of the binary, so for +# the gzip'ed tar example, a command like the following will build the bundle: +# +# tar -czvf - | cat sfx.skel - > my.bundle +# +# The bundle can then be copied to a system, made executable (chmod +x) and +# then run. When run without any options it will make any pre-extraction +# calls, extract the binary, and then make any post-extraction calls. +# +# This script has some usefull helper options to split out the script and/or +# binary in place, and to turn on shell debugging. +# +# This script is paired with create_bundle.sh, which will edit constants in +# this script for proper execution at runtime. The "magic", here, is that +# create_bundle.sh encodes the length of this script in the script itself. +# Then the script can use that with 'tail' in order to strip the script from +# the binary package. +# +# Developer note: A prior incarnation of this script used 'sed' to strip the +# script from the binary package. That didn't work on AIX 5, where 'sed' did +# strip the binary package - AND null bytes, creating a corrupted stream. +# +# Docker-specific implementaiton: Unlike CM & OM projects, this bundle does +# not install OMI. Why a bundle, then? Primarily so a single package can +# install either a .DEB file or a .RPM file, whichever is appropraite. + +set -e +PATH=/usr/bin:/usr/sbin:/bin:/sbin +umask 022 + +# Note: Because this is Linux-only, 'readlink' should work +SCRIPT="`readlink -e $0`" + +# These symbols will get replaced during the bundle creation process. +# +# The PLATFORM symbol should contain ONE of the following: +# Linux_REDHAT, Linux_SUSE, Linux_ULINUX +# +# The CONTAINER_PKG symbol should contain something like: +# docker-cimprov-1.0.0-89.rhel.6.x64. (script adds rpm or deb, as appropriate) + +PLATFORM= +CONTAINER_PKG= +SCRIPT_LEN= +SCRIPT_LEN_PLUS_ONE= + +usage() +{ + echo "usage: $1 [OPTIONS]" + echo "Options:" + echo " --extract Extract contents and exit." + echo " --force Force upgrade (override version checks)." + echo " --install Install the package from the system." + echo " --purge Uninstall the package and remove all related data." + echo " --remove Uninstall the package from the system." + echo " --restart-deps Reconfigure and restart dependent services (no-op)." + echo " --upgrade Upgrade the package in the system." + echo " --debug use shell debug mode." + echo " -? | --help shows this usage text." +} + +cleanup_and_exit() +{ + if [ -n "$1" ]; then + exit $1 + else + exit 0 + fi +} + +verifyNoInstallationOption() +{ + if [ -n "${installMode}" ]; then + echo "$0: Conflicting qualifiers, exiting" >&2 + cleanup_and_exit 1 + fi + + return; +} + +ulinux_detect_installer() +{ + INSTALLER= + + # If DPKG lives here, assume we use that. Otherwise we use RPM. + type dpkg > /dev/null 2>&1 + if [ $? -eq 0 ]; then + INSTALLER=DPKG + else + INSTALLER=RPM + fi +} + +# $1 - The filename of the package to be installed +pkg_add() { + pkg_filename=$1 + ulinux_detect_installer + + if [ "$INSTALLER" = "DPKG" ]; then + dpkg --install --refuse-downgrade ${pkg_filename}.deb + else + rpm --install ${pkg_filename}.rpm + fi +} + +# $1 - The package name of the package to be uninstalled +# $2 - Optional parameter. Only used when forcibly removing omi on SunOS +pkg_rm() { + ulinux_detect_installer + if [ "$INSTALLER" = "DPKG" ]; then + if [ "$installMode" = "P" ]; then + dpkg --purge $1 + else + dpkg --remove $1 + fi + else + rpm --erase $1 + fi +} + + +# $1 - The filename of the package to be installed +pkg_upd() { + pkg_filename=$1 + ulinux_detect_installer + if [ "$INSTALLER" = "DPKG" ]; then + [ -z "${forceFlag}" ] && FORCE="--refuse-downgrade" + dpkg --install $FORCE ${pkg_filename}.deb + + export PATH=/usr/local/sbin:/usr/sbin:/sbin:$PATH + else + [ -n "${forceFlag}" ] && FORCE="--force" + rpm --upgrade $FORCE ${pkg_filename}.rpm + fi +} + +force_stop_omi_service() { + # For any installation or upgrade, we should be shutting down omiserver (and it will be started after install/upgrade). + if [ -x /usr/sbin/invoke-rc.d ]; then + /usr/sbin/invoke-rc.d omiserverd stop 1> /dev/null 2> /dev/null + elif [ -x /sbin/service ]; then + service omiserverd stop 1> /dev/null 2> /dev/null + fi + + # Catchall for stopping omiserver + /etc/init.d/omiserverd stop 1> /dev/null 2> /dev/null + /sbin/init.d/omiserverd stop 1> /dev/null 2> /dev/null +} + +# +# Executable code follows +# + +while [ $# -ne 0 ]; do + case "$1" in + --extract-script) + # hidden option, not part of usage + # echo " --extract-script FILE extract the script to FILE." + head -${SCRIPT_LEN} "${SCRIPT}" > "$2" + local shouldexit=true + shift 2 + ;; + + --extract-binary) + # hidden option, not part of usage + # echo " --extract-binary FILE extract the binary to FILE." + tail +${SCRIPT_LEN_PLUS_ONE} "${SCRIPT}" > "$2" + local shouldexit=true + shift 2 + ;; + + --extract) + verifyNoInstallationOption + installMode=E + shift 1 + ;; + + --force) + forceFlag=true + shift 1 + ;; + + --install) + verifyNoInstallationOption + installMode=I + shift 1 + ;; + + --purge) + verifyNoInstallationOption + installMode=P + shouldexit=true + shift 1 + ;; + + --remove) + verifyNoInstallationOption + installMode=R + shouldexit=true + shift 1 + ;; + + --restart-deps) + # No-op for Docker, as there are no dependent services + shift 1 + ;; + + --upgrade) + verifyNoInstallationOption + installMode=U + shift 1 + ;; + + --debug) + echo "Starting shell debug mode." >&2 + echo "" >&2 + echo "SCRIPT_INDIRECT: $SCRIPT_INDIRECT" >&2 + echo "SCRIPT_DIR: $SCRIPT_DIR" >&2 + echo "SCRIPT: $SCRIPT" >&2 + echo >&2 + set -x + shift 1 + ;; + + -? | --help) + usage `basename $0` >&2 + cleanup_and_exit 0 + ;; + + *) + usage `basename $0` >&2 + cleanup_and_exit 1 + ;; + esac +done + +if [ -n "${forceFlag}" ]; then + if [ "$installMode" != "I" -a "$installMode" != "U" ]; then + echo "Option --force is only valid with --install or --upgrade" >&2 + cleanup_and_exit 1 + fi +fi + +if [ -z "${installMode}" ]; then + echo "$0: No options specified, specify --help for help" >&2 + cleanup_and_exit 3 +fi + +# Do we need to remove the package? +set +e +if [ "$installMode" = "R" -o "$installMode" = "P" ]; then + pkg_rm docker-cimprov + + if [ "$installMode" = "P" ]; then + echo "Purging all files in container agent ..." + rm -rf /etc/opt/microsoft/docker-cimprov /opt/microsoft/docker-cimprov /var/opt/microsoft/docker-cimprov + fi +fi + +if [ -n "${shouldexit}" ]; then + # when extracting script/tarball don't also install + cleanup_and_exit 0 +fi + +# +# Do stuff before extracting the binary here, for example test [ `id -u` -eq 0 ], +# validate space, platform, uninstall a previous version, backup config data, etc... +# + +# +# Extract the binary here. +# + +echo "Extracting..." + +# $PLATFORM is validated, so we know we're on Linux of some flavor +tail -n +${SCRIPT_LEN_PLUS_ONE} "${SCRIPT}" | tar xzf - +STATUS=$? +if [ ${STATUS} -ne 0 ]; then + echo "Failed: could not extract the install bundle." + cleanup_and_exit ${STATUS} +fi + +# +# Do stuff after extracting the binary here, such as actually installing the package. +# + +EXIT_STATUS=0 + +case "$installMode" in + E) + # Files are extracted, so just exit + cleanup_and_exit ${STATUS} + ;; + + I) + echo "Installing container agent ..." + + force_stop_omi_service + + pkg_add $CONTAINER_PKG + EXIT_STATUS=$? + ;; + + U) + echo "Updating container agent ..." + force_stop_omi_service + + pkg_upd $CONTAINER_PKG + EXIT_STATUS=$? + ;; + + *) + echo "$0: Invalid setting of variable \$installMode ($installMode), exiting" >&2 + cleanup_and_exit 2 +esac + +# Remove the package that was extracted as part of the bundle + +[ -f $CONTAINER_PKG.rpm ] && rm $CONTAINER_PKG.rpm +[ -f $CONTAINER_PKG.deb ] && rm $CONTAINER_PKG.deb + +if [ $? -ne 0 -o "$EXIT_STATUS" -ne "0" ]; then + cleanup_and_exit 1 +fi + +cleanup_and_exit 0 + +#####>>- This must be the last line of this script, followed by a single empty line. -<<##### diff --git a/installer/bundle/create_bundle.sh b/installer/bundle/create_bundle.sh index 3b642e7c6..180ef9a64 100755 --- a/installer/bundle/create_bundle.sh +++ b/installer/bundle/create_bundle.sh @@ -6,60 +6,104 @@ # Parameters: # $1: Platform type # $2: Directory to package file -# $3: Package name for docker package (without extension) +# $3: Package name for Docker package (without extension) # -# We expect this script to run from the BUILD directory (i.e. docker-osm/build). +# We expect this script to run from the BUILD directory (i.e. docker/build). # Directory paths are hard-coded for this location. +# Notes for file bundle_skel.sh (included here since we dont want to ship +# these comments in shell bundle): +# +# This script is a skeleton bundle file for the Docker project, which only +# ships in universal form (RPM & DEB installers for the Linux platforms). +# +# Use this script by concatenating it with some binary package. +# +# The bundle is created by cat'ing the script in front of the binary, so for +# the gzip'ed tar example, a command like the following will build the bundle: +# +# tar -czvf - | cat sfx.skel - > my.bundle +# +# The bundle can then be copied to a system, made executable (chmod +x) and +# then run. When run without any options it will make any pre-extraction +# calls, extract the binary, and then make any post-extraction calls. +# +# This script has some usefull helper options to split out the script and/or +# binary in place, and to turn on shell debugging. +# +# This script is paired with create_bundle.sh, which will edit constants in +# this script for proper execution at runtime. The "magic", here, is that +# create_bundle.sh encodes the length of this script in the script itself. +# Then the script can use that with 'tail' in order to strip the script from +# the binary package. +# +# Developer note: A prior incarnation of this script used 'sed' to strip the +# script from the binary package. That didn't work on AIX 5, where 'sed' did +# strip the binary package - AND null bytes, creating a corrupted stream. +# +# Docker-specific implementaiton: Unlike CM & OM projects, this bundle does +# not install OMI. Why a bundle, then? Primarily so a single package can +# install either a .DEB file or a .RPM file, whichever is appropraite. + SOURCE_DIR=`(cd ../installer/bundle; pwd -P)` INTERMEDIATE_DIR=`(mkdir -p ../installer/intermediate; cd ../installer/intermediate; pwd -P)` # Exit on error - set -e -set -x + +# Don't display output +set +x usage() { - echo "usage: $0 platform directory mysql-package-name" - echo " where" - echo " platform is one of: Linux_REDHAT, Linux_SUSE, Linux_ULINUX" - echo " directory is directory path to package file" - echo " mysql-package-name is the name of the MySQL installation package" - exit 1 + echo "usage: $0 platform directory Docker-package-name" + echo " where" + echo " platform is one of: Linux_REDHAT, Linux_SUSE, Linux_ULINUX" + echo " directory is directory path to package file" + echo " Docker-package-name is the name of the Docker installation package" + exit 1 } # Validate parameters if [ -z "$1" ]; then - echo "Missing parameter: Platform type" >&2 - echo "" - usage - exit 1 + echo "Missing parameter: Platform type" >&2 + echo "" + usage + exit 1 fi -if [ -z "$2" ]; then - echo "Missing parameter: Directory to platform file" >&2 - echo "" - usage +case "$1" in + Linux_REDHAT|Linux_SUSE|Linux_ULINUX) + ;; + + *) + echo "Invalid platform type specified: $1" >&2 exit 1 +esac + +if [ -z "$2" ]; then + echo "Missing parameter: Directory to platform file" >&2 + echo "" + usage + exit 1 fi if [ ! -d "$2" ]; then - echo "Directory \"$2\" does not exist" >&2 - exit 1 + echo "Directory \"$2\" does not exist" >&2 + exit 1 fi if [ -z "$3" ]; then - echo "Missing parameter: Container-package-name" >&2 - echo "" - usage - exit 1 + echo "Missing parameter: Docker-package-name" >&2 + echo "" + usage + exit 1 fi if [ ! -f "$2/$3".tar ]; then - echo "Tar file \"$2/$3\" does not exist" - exit 1 + echo "Tar file \"$2/$3\" does not exist" + exit 1 fi # Determine the output file name @@ -70,34 +114,54 @@ OUTPUT_DIR=`(cd $2; pwd -P)` cd $INTERMEDIATE_DIR # Fetch the bundle skeleton file -cp $SOURCE_DIR/primary.skel . -chmod u+w primary.skel +cp $SOURCE_DIR/bundle_skel.sh . +chmod u+w bundle_skel.sh + +# See if we can resolve git references for output +# (See if we can find the master project) +if [ -f ../../../.gitmodules ]; then + TEMP_FILE=/tmp/create_bundle.$$ + + # Get the git reference hashes in a file + ( + cd ../../.. + echo "Entering 'superproject'" > $TEMP_FILE + git rev-parse HEAD >> $TEMP_FILE + git submodule foreach git rev-parse HEAD >> $TEMP_FILE + ) + + # Change lines like: "Entering 'omi'\n" to "omi: " + perl -i -pe "s/Entering '([^\n]*)'\n/\$1: /" $TEMP_FILE + + # Grab the reference hashes in a variable + SOURCE_REFS=`cat $TEMP_FILE` + rm $TEMP_FILE + + # Update the bundle file w/the ref hash (much easier with perl since multi-line) + perl -i -pe "s/-- Source code references --/${SOURCE_REFS}/" bundle_skel.sh +else + echo "Unable to find git superproject!" >& 2 + exit 1 +fi # Edit the bundle file for hard-coded values -sed -e "s/PLATFORM=/PLATFORM=$1/" < primary.skel > primary.$$ -mv primary.$$ primary.skel +sed -i "s/PLATFORM=/PLATFORM=$1/" bundle_skel.sh +sed -i "s/CONTAINER_PKG=/CONTAINER_PKG=$3/" bundle_skel.sh -sed -e "s/CONTAINER_PKG=/CONTAINER_PKG=$3/" < primary.skel > primary.$$ -mv primary.$$ primary.skel - -SCRIPT_LEN=`wc -l < primary.skel | sed -e 's/ //g'` +SCRIPT_LEN=`wc -l < bundle_skel.sh | sed -e 's/ //g'` SCRIPT_LEN_PLUS_ONE="$((SCRIPT_LEN + 1))" -sed -e "s/SCRIPT_LEN=/SCRIPT_LEN=${SCRIPT_LEN}/" < primary.skel > primary.$$ -mv primary.$$ primary.skel - -sed -e "s/SCRIPT_LEN_PLUS_ONE=/SCRIPT_LEN_PLUS_ONE=${SCRIPT_LEN_PLUS_ONE}/" < primary.skel > primary.$$ -mv primary.$$ primary.skel - +sed -i "s/SCRIPT_LEN=/SCRIPT_LEN=${SCRIPT_LEN}/" bundle_skel.sh +sed -i "s/SCRIPT_LEN_PLUS_ONE=/SCRIPT_LEN_PLUS_ONE=${SCRIPT_LEN_PLUS_ONE}/" bundle_skel.sh # Fetch the kit cp ${OUTPUT_DIR}/${3}.tar . # Build the bundle BUNDLE_FILE=${3}.sh -gzip -c ${3}.tar | cat primary.skel - > $BUNDLE_FILE +gzip -c ${3}.tar | cat bundle_skel.sh - > $BUNDLE_FILE chmod +x $BUNDLE_FILE -rm primary.skel +rm bundle_skel.sh # Remove the kit and copy the bundle to the kit location rm ${3}.tar diff --git a/installer/bundle/primary.skel b/installer/bundle/primary.skel deleted file mode 100644 index 3856d98bc..000000000 --- a/installer/bundle/primary.skel +++ /dev/null @@ -1,340 +0,0 @@ -#!/bin/sh -# -# -# This script is a skeleton bundle file for primary platforms the docker -# project, which only ships in universal form (RPM & DEB installers for the -# Linux platforms). -# -# Use this script by concatenating it with some binary package. -# -# The bundle is created by cat'ing the script in front of the binary, so for -# the gzip'ed tar example, a command like the following will build the bundle: -# -# tar -czvf - | cat sfx.skel - > my.bundle -# -# The bundle can then be copied to a system, made executable (chmod +x) and -# then run. When run without any options it will make any pre-extraction -# calls, extract the binary, and then make any post-extraction calls. -# -# This script has some usefull helper options to split out the script and/or -# binary in place, and to turn on shell debugging. -# -# This script is paired with create_bundle.sh, which will edit constants in -# this script for proper execution at runtime. The "magic", here, is that -# create_bundle.sh encodes the length of this script in the script itself. -# Then the script can use that with 'tail' in order to strip the script from -# the binary package. -# -# Developer note: A prior incarnation of this script used 'sed' to strip the -# script from the binary package. That didn't work on AIX 5, where 'sed' did -# strip the binary package - AND null bytes, creating a corrupted stream. -# -# docker-specific implementaiton: Unlike CM & OM projects, this bundle does -# not install OMI. Why a bundle, then? Primarily so a single package can -# install either a .DEB file or a .RPM file, whichever is appropraite. This -# significantly simplies the complexity of installation by the Management -# Pack (MP) in the Operations Manager product. - -set -e -PATH=/usr/bin:/usr/sbin:/bin:/sbin -umask 022 - -# Note: Because this is Linux-only, 'readlink' should work -SCRIPT="`readlink -e $0`" - -# These symbols will get replaced during the bundle creation process. -# -# The PLATFORM symbol should contain ONE of the following: -# Linux_REDHAT, Linux_SUSE, Linux_ULINUX -# -# The CONTAINER_PKG symbol should contain something like: -# docker-cimprov-1.0.0-89.rhel.6.x64. (script adds rpm or deb, as appropriate) - -PLATFORM= -CONTAINER_PKG= -SCRIPT_LEN= -SCRIPT_LEN_PLUS_ONE= - -usage() -{ - echo "usage: $1 [OPTIONS]" - echo "Options:" - echo " --extract Extract contents and exit." - echo " --force Force upgrade (override version checks)." - echo " --install Install the package from the system." - echo " --purge Uninstall the package and remove all related data." - echo " --remove Uninstall the package from the system." - echo " --restart-deps Reconfigure and restart dependent services (no-op)." - echo " --upgrade Upgrade the package in the system." - echo " --debug use shell debug mode." - echo " -? | --help shows this usage text." -} - -cleanup_and_exit() -{ - if [ -n "$1" ]; then - exit $1 - else - exit 0 - fi -} - -verifyNoInstallationOption() -{ - if [ -n "${installMode}" ]; then - echo "$0: Conflicting qualifiers, exiting" >&2 - cleanup_and_exit 1 - fi - - return; -} - -ulinux_detect_installer() -{ - INSTALLER= - - # If DPKG lives here, assume we use that. Otherwise we use RPM. - type dpkg > /dev/null 2>&1 - if [ $? -eq 0 ]; then - INSTALLER=DPKG - else - INSTALLER=RPM - fi -} - -# $1 - The filename of the package to be installed -pkg_add() { - pkg_filename=$1 - ulinux_detect_installer - - if [ "$INSTALLER" = "DPKG" ]; then - dpkg --install --refuse-downgrade ${pkg_filename}.deb - else - rpm --install ${pkg_filename}.rpm - fi -} - -# $1 - The package name of the package to be uninstalled -# $2 - Optional parameter. Only used when forcibly removing omi on SunOS -pkg_rm() { - ulinux_detect_installer - if [ "$INSTALLER" = "DPKG" ]; then - if [ "$installMode" = "P" ]; then - dpkg --purge $1 - else - dpkg --remove $1 - fi - else - rpm --erase $1 - fi -} - - -# $1 - The filename of the package to be installed -pkg_upd() { - pkg_filename=$1 - ulinux_detect_installer - if [ "$INSTALLER" = "DPKG" ]; then - [ -z "${forceFlag}" ] && FORCE="--refuse-downgrade" - dpkg --install $FORCE ${pkg_filename}.deb - - export PATH=/usr/local/sbin:/usr/sbin:/sbin:$PATH - else - [ -n "${forceFlag}" ] && FORCE="--force" - rpm --upgrade $FORCE ${pkg_filename}.rpm - fi -} - -force_stop_omi_service() { - # For any installation or upgrade, we should be shutting down omiserver (and it will be started after install/upgrade). - if [ -x /usr/sbin/invoke-rc.d ]; then - /usr/sbin/invoke-rc.d omiserverd stop 1> /dev/null 2> /dev/null - elif [ -x /sbin/service ]; then - service omiserverd stop 1> /dev/null 2> /dev/null - fi - - # Catchall for stopping omiserver - /etc/init.d/omiserverd stop 1> /dev/null 2> /dev/null - /sbin/init.d/omiserverd stop 1> /dev/null 2> /dev/null -} - -# -# Executable code follows -# - -while [ $# -ne 0 ]; do - case "$1" in - --extract-script) - # hidden option, not part of usage - # echo " --extract-script FILE extract the script to FILE." - head -${SCRIPT_LEN} "${SCRIPT}" > "$2" - local shouldexit=true - shift 2 - ;; - - --extract-binary) - # hidden option, not part of usage - # echo " --extract-binary FILE extract the binary to FILE." - tail +${SCRIPT_LEN_PLUS_ONE} "${SCRIPT}" > "$2" - local shouldexit=true - shift 2 - ;; - - --extract) - verifyNoInstallationOption - installMode=E - shift 1 - ;; - - --force) - forceFlag=true - shift 1 - ;; - - --install) - verifyNoInstallationOption - installMode=I - shift 1 - ;; - - --purge) - verifyNoInstallationOption - installMode=P - shouldexit=true - shift 1 - ;; - - --remove) - verifyNoInstallationOption - installMode=R - shouldexit=true - shift 1 - ;; - - --restart-deps) - # No-op for MySQL, as there are no dependent services - shift 1 - ;; - - --upgrade) - verifyNoInstallationOption - installMode=U - shift 1 - ;; - - --debug) - echo "Starting shell debug mode." >&2 - echo "" >&2 - echo "SCRIPT_INDIRECT: $SCRIPT_INDIRECT" >&2 - echo "SCRIPT_DIR: $SCRIPT_DIR" >&2 - echo "SCRIPT: $SCRIPT" >&2 - echo >&2 - set -x - shift 1 - ;; - - -? | --help) - usage `basename $0` >&2 - cleanup_and_exit 0 - ;; - - *) - usage `basename $0` >&2 - cleanup_and_exit 1 - ;; - esac -done - -if [ -n "${forceFlag}" ]; then - if [ "$installMode" != "I" -a "$installMode" != "U" ]; then - echo "Option --force is only valid with --install or --upgrade" >&2 - cleanup_and_exit 1 - fi -fi - -if [ -z "${installMode}" ]; then - echo "$0: No options specified, specify --help for help" >&2 - cleanup_and_exit 3 -fi - -# Do we need to remove the package? -set +e -if [ "$installMode" = "R" -o "$installMode" = "P" ]; then - pkg_rm docker-cimprov - - if [ "$installMode" = "P" ]; then - echo "Purging all files in container agent ..." - rm -rf /etc/opt/microsoft/docker-cimprov /opt/microsoft/docker-cimprov /var/opt/microsoft/docker-cimprov - fi -fi - -if [ -n "${shouldexit}" ]; then - # when extracting script/tarball don't also install - cleanup_and_exit 0 -fi - -# -# Do stuff before extracting the binary here, for example test [ `id -u` -eq 0 ], -# validate space, platform, uninstall a previous version, backup config data, etc... -# - -# -# Extract the binary here. -# - -echo "Extracting..." - -# $PLATFORM is validated, so we know we're on Linux of some flavor -tail -n +${SCRIPT_LEN_PLUS_ONE} "${SCRIPT}" | tar xzf - -STATUS=$? -if [ ${STATUS} -ne 0 ]; then - echo "Failed: could not extract the install bundle." - cleanup_and_exit ${STATUS} -fi - -# -# Do stuff after extracting the binary here, such as actually installing the package. -# - -EXIT_STATUS=0 - -case "$installMode" in - E) - # Files are extracted, so just exit - cleanup_and_exit ${STATUS} - ;; - - I) - echo "Installing container agent ..." - - force_stop_omi_service - - pkg_add $CONTAINER_PKG - EXIT_STATUS=$? - ;; - - U) - echo "Updating container agent ..." - force_stop_omi_service - - pkg_upd $CONTAINER_PKG - EXIT_STATUS=$? - ;; - - *) - echo "$0: Invalid setting of variable \$installMode ($installMode), exiting" >&2 - cleanup_and_exit 2 -esac - -# Remove the package that was extracted as part of the bundle - -[ -f $CONTAINER_PKG.rpm ] && rm $CONTAINER_PKG.rpm -[ -f $CONTAINER_PKG.deb ] && rm $CONTAINER_PKG.deb - -if [ $? -ne 0 -o "$EXIT_STATUS" -ne "0" ]; then - cleanup_and_exit 1 -fi - -cleanup_and_exit 0 - -#####>>- This must be the last line of this script, followed by a single empty line. -<<##### diff --git a/installer/datafiles/base_container.data b/installer/datafiles/base_container.data index 3d992f858..3034d58c4 100644 --- a/installer/datafiles/base_container.data +++ b/installer/datafiles/base_container.data @@ -15,16 +15,16 @@ MAINTAINER: 'Microsoft Corporation' %Defines %Files -/opt/microsoft/docker-cimprov/lib/libcontainer.so; target/${{BUILD_CONFIGURATION}}/libcontainer.so; 755; root; root +/opt/microsoft/docker-cimprov/lib/libcontainer.so; intermediate/${{BUILD_CONFIGURATION}}/libcontainer.so; 755; root; root /etc/opt/microsoft/docker-cimprov/conf/installinfo.txt; installer/conf/installinfo.txt; 644; root; root; conffile /etc/opt/omi/conf/omiregister/root-cimv2/container.reg; installer/conf/omi/container.reg; 755; root; root /opt/microsoft/omsagent/plugin/filter_docker_log.rb; source/code/plugin/filter_docker_log.rb; 644; root; root -/opt/microsoft/omsagent/plugin/filter_container.rb; source/code/plugin/filter_container.rb; 644; root; root +/opt/microsoft/omsagent/plugin/filter_container.rb; source/code/plugin/filter_container.rb; 644; root; root -/etc/opt/microsoft/docker-cimprov/container.conf; installer/conf/container.conf; 644; root; root +/etc/opt/microsoft/docker-cimprov/container.conf; installer/conf/container.conf; 644; root; root %Links /opt/omi/lib/libcontainer.${{SHLIB_EXT}}; /opt/microsoft/docker-cimprov/lib/libcontainer.${{SHLIB_EXT}}; 644; root; root From de20fc4b3af9dcf3da2d02baa1f81dd027ff38ae Mon Sep 17 00:00:00 2001 From: Jeff Coffler Date: Thu, 17 Mar 2016 16:03:54 -0700 Subject: [PATCH 6/7] Modify to not upgrade to same version of kit already installed --- installer/bundle/bundle_skel.sh | 523 +++++++++++++++++++++----------- installer/datafiles/linux.data | 33 +- 2 files changed, 362 insertions(+), 194 deletions(-) diff --git a/installer/bundle/bundle_skel.sh b/installer/bundle/bundle_skel.sh index f069ceb5a..859d51dcb 100644 --- a/installer/bundle/bundle_skel.sh +++ b/installer/bundle/bundle_skel.sh @@ -33,12 +33,12 @@ # not install OMI. Why a bundle, then? Primarily so a single package can # install either a .DEB file or a .RPM file, whichever is appropraite. -set -e PATH=/usr/bin:/usr/sbin:/bin:/sbin umask 022 # Note: Because this is Linux-only, 'readlink' should work SCRIPT="`readlink -e $0`" +set +e # These symbols will get replaced during the bundle creation process. # @@ -46,7 +46,7 @@ SCRIPT="`readlink -e $0`" # Linux_REDHAT, Linux_SUSE, Linux_ULINUX # # The CONTAINER_PKG symbol should contain something like: -# docker-cimprov-1.0.0-89.rhel.6.x64. (script adds rpm or deb, as appropriate) +# docker-cimprov-1.0.0-1.universal.x86_64 (script adds rpm or deb, as appropriate) PLATFORM= CONTAINER_PKG= @@ -55,220 +55,387 @@ SCRIPT_LEN_PLUS_ONE= usage() { - echo "usage: $1 [OPTIONS]" - echo "Options:" - echo " --extract Extract contents and exit." - echo " --force Force upgrade (override version checks)." - echo " --install Install the package from the system." - echo " --purge Uninstall the package and remove all related data." - echo " --remove Uninstall the package from the system." - echo " --restart-deps Reconfigure and restart dependent services (no-op)." - echo " --upgrade Upgrade the package in the system." - echo " --debug use shell debug mode." - echo " -? | --help shows this usage text." + echo "usage: $1 [OPTIONS]" + echo "Options:" + echo " --extract Extract contents and exit." + echo " --force Force upgrade (override version checks)." + echo " --install Install the package from the system." + echo " --purge Uninstall the package and remove all related data." + echo " --remove Uninstall the package from the system." + echo " --restart-deps Reconfigure and restart dependent services (no-op)." + echo " --upgrade Upgrade the package in the system." + echo " --version Version of this shell bundle." + echo " --version-check Check versions already installed to see if upgradable." + echo " --debug use shell debug mode." + echo " -? | --help shows this usage text." } cleanup_and_exit() { - if [ -n "$1" ]; then - exit $1 - else - exit 0 - fi + if [ -n "$1" ]; then + exit $1 + else + exit 0 + fi +} + +check_version_installable() { + # POSIX Semantic Version <= Test + # Exit code 0 is true (i.e. installable). + # Exit code non-zero means existing version is >= version to install. + # + # Parameter: + # Installed: "x.y.z.b" (like "4.2.2.135"), for major.minor.patch.build versions + # Available: "x.y.z.b" (like "4.2.2.135"), for major.minor.patch.build versions + + if [ $# -ne 2 ]; then + echo "INTERNAL ERROR: Incorrect number of parameters passed to check_version_installable" >&2 + cleanup_and_exit 1 + fi + + # Current version installed + local INS_MAJOR=`echo $1 | cut -d. -f1` + local INS_MINOR=`echo $1 | cut -d. -f2` + local INS_PATCH=`echo $1 | cut -d. -f3` + local INS_BUILD=`echo $1 | cut -d. -f4` + + # Available version number + local AVA_MAJOR=`echo $2 | cut -d. -f1` + local AVA_MINOR=`echo $2 | cut -d. -f2` + local AVA_PATCH=`echo $2 | cut -d. -f3` + local AVA_BUILD=`echo $2 | cut -d. -f4` + + # Check bounds on MAJOR + if [ $INS_MAJOR -lt $AVA_MAJOR ]; then + return 0 + elif [ $INS_MAJOR -gt $AVA_MAJOR ]; then + return 1 + fi + + # MAJOR matched, so check bounds on MINOR + if [ $INS_MINOR -lt $AVA_MINOR ]; then + return 0 + elif [ $INS_MINOR -gt $INS_MINOR ]; then + return 1 + fi + + # MINOR matched, so check bounds on PATCH + if [ $INS_PATCH -lt $AVA_PATCH ]; then + return 0 + elif [ $INS_PATCH -gt $AVA_PATCH ]; then + return 1 + fi + + # PATCH matched, so check bounds on BUILD + if [ $INS_BUILD -lt $AVA_BUILD ]; then + return 0 + elif [ $INS_BUILD -gt $AVA_BUILD ]; then + return 1 + fi + + # Version available is idential to installed version, so don't install + return 1 +} + +getVersionNumber() +{ + # Parse a version number from a string. + # + # Parameter 1: string to parse version number string from + # (should contain something like mumble-4.2.2.135.universal.x86.tar) + # Parameter 2: prefix to remove ("mumble-" in above example) + + if [ $# -ne 2 ]; then + echo "INTERNAL ERROR: Incorrect number of parameters passed to getVersionNumber" >&2 + cleanup_and_exit 1 + fi + + echo $1 | sed -e "s/$2//" -e 's/\.universal\..*//' -e 's/\.x64.*//' -e 's/\.x86.*//' -e 's/-/./' } verifyNoInstallationOption() { - if [ -n "${installMode}" ]; then - echo "$0: Conflicting qualifiers, exiting" >&2 - cleanup_and_exit 1 - fi + if [ -n "${installMode}" ]; then + echo "$0: Conflicting qualifiers, exiting" >&2 + cleanup_and_exit 1 + fi - return; + return; } ulinux_detect_installer() { - INSTALLER= + INSTALLER= + + # If DPKG lives here, assume we use that. Otherwise we use RPM. + type dpkg > /dev/null 2>&1 + if [ $? -eq 0 ]; then + INSTALLER=DPKG + else + INSTALLER=RPM + fi +} - # If DPKG lives here, assume we use that. Otherwise we use RPM. - type dpkg > /dev/null 2>&1 - if [ $? -eq 0 ]; then - INSTALLER=DPKG - else - INSTALLER=RPM - fi +# $1 - The name of the package to check as to whether it's installed +check_if_pkg_is_installed() { + if [ "$INSTALLER" = "DPKG" ]; then + dpkg -s $1 2> /dev/null | grep Status | grep " installed" 1> /dev/null + else + rpm -q $1 2> /dev/null 1> /dev/null + fi + + return $? } # $1 - The filename of the package to be installed +# $2 - The package name of the package to be installed pkg_add() { - pkg_filename=$1 - ulinux_detect_installer + pkg_filename=$1 + pkg_name=$2 - if [ "$INSTALLER" = "DPKG" ]; then - dpkg --install --refuse-downgrade ${pkg_filename}.deb - else - rpm --install ${pkg_filename}.rpm + echo "----- Installing package: $2 ($1) -----" + + if [ -z "${forceFlag}" -a -n "$3" ]; then + if [ $3 -ne 0 ]; then + echo "Skipping package since existing version >= version available" + return 0 fi + fi + + if [ "$INSTALLER" = "DPKG" ]; then + dpkg --install --refuse-downgrade ${pkg_filename}.deb + else + rpm --install ${pkg_filename}.rpm + fi } # $1 - The package name of the package to be uninstalled # $2 - Optional parameter. Only used when forcibly removing omi on SunOS pkg_rm() { - ulinux_detect_installer - if [ "$INSTALLER" = "DPKG" ]; then - if [ "$installMode" = "P" ]; then - dpkg --purge $1 - else - dpkg --remove $1 - fi + echo "----- Removing package: $1 -----" + if [ "$INSTALLER" = "DPKG" ]; then + if [ "$installMode" = "P" ]; then + dpkg --purge $1 else - rpm --erase $1 + dpkg --remove $1 fi + else + rpm --erase $1 + fi } - # $1 - The filename of the package to be installed +# $2 - The package name of the package to be installed +# $3 - Okay to upgrade the package? (Optional) pkg_upd() { - pkg_filename=$1 - ulinux_detect_installer - if [ "$INSTALLER" = "DPKG" ]; then - [ -z "${forceFlag}" ] && FORCE="--refuse-downgrade" - dpkg --install $FORCE ${pkg_filename}.deb + pkg_filename=$1 + pkg_name=$2 + pkg_allowed=$3 + + echo "----- Updating package: $pkg_name ($pkg_filename) -----" + + if [ -z "${forceFlag}" -a -n "$pkg_allowed" ]; then + if [ $pkg_allowed -ne 0 ]; then + echo "Skipping package since existing version >= version available" + return 0 + fi + fi - export PATH=/usr/local/sbin:/usr/sbin:/sbin:$PATH + if [ "$INSTALLER" = "DPKG" ]; then + [ -z "${forceFlag}" ] && FORCE="--refuse-downgrade" + dpkg --install $FORCE ${pkg_filename}.deb + + export PATH=/usr/local/sbin:/usr/sbin:/sbin:$PATH + else + [ -n "${forceFlag}" ] && FORCE="--force" + rpm --upgrade $FORCE ${pkg_filename}.rpm + fi +} + +getInstalledVersion() +{ + # Parameter: Package to check if installed + # Returns: Printable string (version installed or "None") + if check_if_pkg_is_installed $1; then + if [ "$INSTALLER" = "DPKG" ]; then + local version=`dpkg -s $1 2> /dev/null | grep "Version: "` + getVersionNumber $version "Version: " else - [ -n "${forceFlag}" ] && FORCE="--force" - rpm --upgrade $FORCE ${pkg_filename}.rpm + local version=`rpm -q $1 2> /dev/null` + getVersionNumber $version ${1}- fi + else + echo "None" + fi +} + +shouldInstall_mysql() +{ + local versionInstalled=`getInstalledVersion mysql-cimprov` + [ "$versionInstalled" = "None" ] && return 0 + local versionAvailable=`getVersionNumber $MYSQL_PKG mysql-cimprov-` + + check_version_installable $versionInstalled $versionAvailable } -force_stop_omi_service() { - # For any installation or upgrade, we should be shutting down omiserver (and it will be started after install/upgrade). - if [ -x /usr/sbin/invoke-rc.d ]; then - /usr/sbin/invoke-rc.d omiserverd stop 1> /dev/null 2> /dev/null - elif [ -x /sbin/service ]; then - service omiserverd stop 1> /dev/null 2> /dev/null +getInstalledVersion() +{ + # Parameter: Package to check if installed + # Returns: Printable string (version installed or "None") + if check_if_pkg_is_installed $1; then + if [ "$INSTALLER" = "DPKG" ]; then + local version="`dpkg -s $1 2> /dev/null | grep 'Version: '`" + getVersionNumber "$version" "Version: " + else + local version=`rpm -q $1 2> /dev/null` + getVersionNumber $version ${1}- fi + else + echo "None" + fi +} - # Catchall for stopping omiserver - /etc/init.d/omiserverd stop 1> /dev/null 2> /dev/null - /sbin/init.d/omiserverd stop 1> /dev/null 2> /dev/null +shouldInstall_docker() +{ + local versionInstalled=`getInstalledVersion docker-cimprov` + [ "$versionInstalled" = "None" ] && return 0 + local versionAvailable=`getVersionNumber $CONTAINER_PKG docker-cimprov-` + + check_version_installable $versionInstalled $versionAvailable } # # Executable code follows # +ulinux_detect_installer + while [ $# -ne 0 ]; do - case "$1" in - --extract-script) - # hidden option, not part of usage - # echo " --extract-script FILE extract the script to FILE." - head -${SCRIPT_LEN} "${SCRIPT}" > "$2" - local shouldexit=true - shift 2 - ;; - - --extract-binary) - # hidden option, not part of usage - # echo " --extract-binary FILE extract the binary to FILE." - tail +${SCRIPT_LEN_PLUS_ONE} "${SCRIPT}" > "$2" - local shouldexit=true - shift 2 - ;; - - --extract) - verifyNoInstallationOption - installMode=E - shift 1 - ;; - - --force) - forceFlag=true - shift 1 - ;; - - --install) - verifyNoInstallationOption - installMode=I - shift 1 - ;; - - --purge) - verifyNoInstallationOption - installMode=P - shouldexit=true - shift 1 - ;; - - --remove) - verifyNoInstallationOption - installMode=R - shouldexit=true - shift 1 - ;; - - --restart-deps) - # No-op for Docker, as there are no dependent services - shift 1 - ;; - - --upgrade) - verifyNoInstallationOption - installMode=U - shift 1 - ;; - - --debug) - echo "Starting shell debug mode." >&2 - echo "" >&2 - echo "SCRIPT_INDIRECT: $SCRIPT_INDIRECT" >&2 - echo "SCRIPT_DIR: $SCRIPT_DIR" >&2 - echo "SCRIPT: $SCRIPT" >&2 - echo >&2 - set -x - shift 1 - ;; - - -? | --help) - usage `basename $0` >&2 - cleanup_and_exit 0 - ;; - - *) - usage `basename $0` >&2 - cleanup_and_exit 1 - ;; - esac + case "$1" in + --extract-script) + # hidden option, not part of usage + # echo " --extract-script FILE extract the script to FILE." + head -${SCRIPT_LEN} "${SCRIPT}" > "$2" + local shouldexit=true + shift 2 + ;; + + --extract-binary) + # hidden option, not part of usage + # echo " --extract-binary FILE extract the binary to FILE." + tail +${SCRIPT_LEN_PLUS_ONE} "${SCRIPT}" > "$2" + local shouldexit=true + shift 2 + ;; + + --extract) + verifyNoInstallationOption + installMode=E + shift 1 + ;; + + --force) + forceFlag=true + shift 1 + ;; + + --install) + verifyNoInstallationOption + installMode=I + shift 1 + ;; + + --purge) + verifyNoInstallationOption + installMode=P + shouldexit=true + shift 1 + ;; + + --remove) + verifyNoInstallationOption + installMode=R + shouldexit=true + shift 1 + ;; + + --restart-deps) + # No-op for Docker, as there are no dependent services + shift 1 + ;; + + --upgrade) + verifyNoInstallationOption + installMode=U + shift 1 + ;; + + --version) + echo "Version: `getVersionNumber $CONTAINER_PKG docker-cimprov-`" + exit 0 + ;; + + --version-check) + printf '%-18s%-15s%-15s%-15s\n\n' Package Installed Available Install? + + # docker-cimprov itself + versionInstalled=`getInstalledVersion docker-cimprov` + versionAvailable=`getVersionNumber $CONTAINER_PKG docker-cimprov-` + if shouldInstall_docker; then shouldInstall="Yes"; else shouldInstall="No"; fi + printf '%-18s%-15s%-15s%-15s\n' docker-cimprov $versionInstalled $versionAvailable $shouldInstall + + exit 0 + ;; + + --debug) + echo "Starting shell debug mode." >&2 + echo "" >&2 + echo "SCRIPT_INDIRECT: $SCRIPT_INDIRECT" >&2 + echo "SCRIPT_DIR: $SCRIPT_DIR" >&2 + echo "SCRIPT: $SCRIPT" >&2 + echo >&2 + set -x + shift 1 + ;; + + -? | --help) + usage `basename $0` >&2 + cleanup_and_exit 0 + ;; + + *) + usage `basename $0` >&2 + cleanup_and_exit 1 + ;; + esac done if [ -n "${forceFlag}" ]; then - if [ "$installMode" != "I" -a "$installMode" != "U" ]; then - echo "Option --force is only valid with --install or --upgrade" >&2 - cleanup_and_exit 1 - fi + if [ "$installMode" != "I" -a "$installMode" != "U" ]; then + echo "Option --force is only valid with --install or --upgrade" >&2 + cleanup_and_exit 1 + fi fi if [ -z "${installMode}" ]; then - echo "$0: No options specified, specify --help for help" >&2 - cleanup_and_exit 3 + echo "$0: No options specified, specify --help for help" >&2 + cleanup_and_exit 3 fi # Do we need to remove the package? set +e if [ "$installMode" = "R" -o "$installMode" = "P" ]; then - pkg_rm docker-cimprov + pkg_rm docker-cimprov - if [ "$installMode" = "P" ]; then - echo "Purging all files in container agent ..." - rm -rf /etc/opt/microsoft/docker-cimprov /opt/microsoft/docker-cimprov /var/opt/microsoft/docker-cimprov - fi + if [ "$installMode" = "P" ]; then + echo "Purging all files in container agent ..." + rm -rf /etc/opt/microsoft/docker-cimprov /opt/microsoft/docker-cimprov /var/opt/microsoft/docker-cimprov + fi fi if [ -n "${shouldexit}" ]; then - # when extracting script/tarball don't also install - cleanup_and_exit 0 + # when extracting script/tarball don't also install + cleanup_and_exit 0 fi # @@ -286,8 +453,8 @@ echo "Extracting..." tail -n +${SCRIPT_LEN_PLUS_ONE} "${SCRIPT}" | tar xzf - STATUS=$? if [ ${STATUS} -ne 0 ]; then - echo "Failed: could not extract the install bundle." - cleanup_and_exit ${STATUS} + echo "Failed: could not extract the install bundle." + cleanup_and_exit ${STATUS} fi # @@ -297,31 +464,29 @@ fi EXIT_STATUS=0 case "$installMode" in - E) - # Files are extracted, so just exit - cleanup_and_exit ${STATUS} - ;; - - I) - echo "Installing container agent ..." + E) + # Files are extracted, so just exit + cleanup_and_exit ${STATUS} + ;; - force_stop_omi_service + I) + echo "Installing container agent ..." - pkg_add $CONTAINER_PKG - EXIT_STATUS=$? - ;; + pkg_add $CONTAINER_PKG docker-cimprov + EXIT_STATUS=$? + ;; - U) - echo "Updating container agent ..." - force_stop_omi_service + U) + echo "Updating container agent ..." - pkg_upd $CONTAINER_PKG - EXIT_STATUS=$? - ;; + shouldInstall_docker + pkg_upd $CONTAINER_PKG docker-cimprov $? + EXIT_STATUS=$? + ;; - *) - echo "$0: Invalid setting of variable \$installMode ($installMode), exiting" >&2 - cleanup_and_exit 2 + *) + echo "$0: Invalid setting of variable \$installMode ($installMode), exiting" >&2 + cleanup_and_exit 2 esac # Remove the package that was extracted as part of the bundle @@ -330,7 +495,7 @@ esac [ -f $CONTAINER_PKG.deb ] && rm $CONTAINER_PKG.deb if [ $? -ne 0 -o "$EXIT_STATUS" -ne "0" ]; then - cleanup_and_exit 1 + cleanup_and_exit 1 fi cleanup_and_exit 0 diff --git a/installer/datafiles/linux.data b/installer/datafiles/linux.data index d441bfd13..61e07d45a 100644 --- a/installer/datafiles/linux.data +++ b/installer/datafiles/linux.data @@ -13,11 +13,14 @@ if ${{PERFORMING_UPGRADE_NOT}}; then fi echo -echo "In order to view container logs in OMS, Docker needs to be configured with the correct log driver using the option:" 1>&2 -echo +echo "In order to view container logs in OMS, Docker needs to be configured" 1>&2 +echo "with the correct log driver using the option:" 1>&2 +echo 1>&2 echo " --log-driver=fluentd --log-opt fluentd-address=localhost:" 1>&2 -echo -echo "where is the port that is exposed in the config file (default: 25225). Specify this option either when starting the Docker daemon or when starting any container that you want to send logs to OMS." 1>&2 +echo 1>&2 +echo "where is the port that is exposed in the config file (default:" 1>&2 +echo "25225). Specify this option either when starting the Docker daemon or" 1>&2 +echo "when starting any container that you want to send logs to OMS." 1>&2 %Postuninstall_1000 # Calling sequence for RPM pre/post scripts, during upgrade, is as follows: @@ -31,20 +34,20 @@ echo "where is the port that is exposed in the config file (default: 2522 # # Thus, if we're an upgrade, skip all of this cleanup if ${{PERFORMING_UPGRADE_NOT}}; then - # Remove linkage in case it exists - CONTAINER_BUILD_LIBRARY=${{CONTAINER_BUILD_LIBRARY}} - LIBRARY_DIR='/opt/microsoft/docker-cimprov/lib' - [ -e ${LIBRARY_DIR}/${CONTAINER_BUILD_LIBRARY} ] && rm ${LIBRARY_DIR}/${CONTAINER_BUILD_LIBRARY} - rmdir /opt/microsoft/docker-cimprov/lib 2> /dev/null - rmdir /opt/microsoft/docker-cimprov 2> /dev/null - rmdir /opt/microsoft 2> /dev/null - rmdir /opt 2> /dev/null + # Remove linkage in case it exists + CONTAINER_BUILD_LIBRARY=${{CONTAINER_BUILD_LIBRARY}} + LIBRARY_DIR='/opt/microsoft/docker-cimprov/lib' + [ -e ${LIBRARY_DIR}/${CONTAINER_BUILD_LIBRARY} ] && rm ${LIBRARY_DIR}/${CONTAINER_BUILD_LIBRARY} + rmdir /opt/microsoft/docker-cimprov/lib 2> /dev/null + rmdir /opt/microsoft/docker-cimprov 2> /dev/null + rmdir /opt/microsoft 2> /dev/null + rmdir /opt 2> /dev/null fi %Postuninstall_1100 # If we're called for upgrade, don't do anything if ${{PERFORMING_UPGRADE_NOT}}; then - # Reload the OMI server - ${{OMI_SERVICE}} reload - ${{OMS_SERVICE}} reload + # Reload the OMI server + ${{OMI_SERVICE}} reload + ${{OMS_SERVICE}} reload fi From ff50af2997861a6aa3c5b43c877efa4061f44b5e Mon Sep 17 00:00:00 2001 From: Jeff Coffler Date: Fri, 18 Mar 2016 12:53:43 -0700 Subject: [PATCH 7/7] Eliminate extranious mkdir's from the Makefile (global replace) --- .gitignore | 2 +- build/Makefile | 6 ++---- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/.gitignore b/.gitignore index 35ad70b8f..92c8c0cf2 100644 --- a/.gitignore +++ b/.gitignore @@ -2,7 +2,7 @@ /intermediate/ /target/ -build/config.mak +/build/config.mak # Unit test files diff --git a/build/Makefile b/build/Makefile index acf07a0db..5a9c1e1c1 100644 --- a/build/Makefile +++ b/build/Makefile @@ -18,7 +18,7 @@ endif include ../../docker.version ifndef CONTAINER_BUILDVERSION_STATUS -$(error "Is Makefile.version missing? Please re-run configure") +$(error "Is docker.version missing? Please re-run configure") endif SOURCE_DIR := $(BASE_DIR)/source/code @@ -258,7 +258,6 @@ $(PROVIDER_LIBRARY): INCLUDES += $(PROVIDER_INCLUDE_FLAGS) $(PROVIDER_LIBRARY): CFLAGS += $(PROVIDER_COMPILE_FLAGS) $(PROVIDER_LIBRARY): CXXFLAGS += $(PROVIDER_COMPILE_FLAGS) $(PROVIDER_LIBRARY): $(STATIC_PROVIDERLIB_OBJFILES) $(STATIC_PROVIDERLIB_LOGPOLICY) $(INCLUDE_DEFINES) $(PROVIDER_HEADERS) - $(MKPATH) $(INTERMEDIATE_DIR) $(MKPATH) $(INTERMEDIATE_DIR) g++ $(PROVIDER_COMPILE_FLAGS) $(SHARED_FLAGS) $(PROVIDER_INCLUDE_FLAGS) -o $@ $(STATIC_PROVIDERLIB_OBJFILES) $(LINK_LIBRARIES) @@ -294,7 +293,6 @@ $(INTERMEDIATE_DIR)/testrunner: INCLUDES += $(PROVIDER_TEST_INCLUDE_FLAGS) -I$(P $(INTERMEDIATE_DIR)/testrunner: CFLAGS += $(PROVIDER_COMPILE_FLAGS) $(INTERMEDIATE_DIR)/testrunner: CXXFLAGS += $(PROVIDER_COMPILE_FLAGS) $(INTERMEDIATE_DIR)/testrunner : $(STATIC_PROVIDER_TEST_OBJFILES) $(STATIC_PROVIDERLIB_OBJFILES) $(INCLUDE_DEFINES) $(PROVIDER_HEADERS) - $(MKPATH) $(INTERMEDIATE_DIR) $(MKPATH) $(INTERMEDIATE_DIR) g++ $(PROVIDER_COMPILE_FLAGS) $(PROVIDER_TEST_INCLUDE_FLAGS) -o $@ $(STATIC_PROVIDER_PAL_UNITFILES) $(STATIC_PROVIDER_TEST_OBJFILES) $(STATIC_PROVIDERLIB_OBJFILES) $(LINK_LIBRARIES) $(PROVIDER_TEST_LINK_LIBRARIES) @@ -372,7 +370,7 @@ ifeq ($(ULINUX),1) ../installer/bundle/create_bundle.sh $(PF)_$(PF_DISTRO) $(INTERMEDIATE_DIR) $(OUTPUT_PACKAGE_PREFIX) # Copy the shell bundle to the target directory - mkdir -p $(TARGET_DIR) + $(MKPATH) $(TARGET_DIR) cd $(INTERMEDIATE_DIR); cp `cat $(INTERMEDIATE_DIR)/package_filename`.sh $(TARGET_DIR) else