From 3c5b46d3ca41ee6df5092845de647e1b32cb6fb6 Mon Sep 17 00:00:00 2001 From: Vishwanath Narasimhan Date: Wed, 1 Aug 2018 16:54:33 -0700 Subject: [PATCH 01/25] Updatng release history --- README.md | 31 +++++++++++++++++++++++++++++-- 1 file changed, 29 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 18e50ebe3..a822f6f97 100644 --- a/README.md +++ b/README.md @@ -1,8 +1,35 @@ -# Docker Monitoring Agent for OMI Server +# AKS Container Health monitoring -### Code of Conduct +## Code of Conduct This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. + +## Release History + +### 7/31/2018 - Version microsoft/oms:ciprod07312018 +- Changes for node lost scenario (roll-up pod & container statuses as Unknown) +- Discover unscheduled pods +- KubeNodeInventory - delimit multiple true node conditions for node status +- UTF Encoding support for container logs +- Container environment variable truncated to 200K +- Handle json parsing errors for OMI provider for docker +- Test mode enablement for ACS-engine testing +- Latest OMS agent (1.6.0-163) +- Latest OMI (1.4.2.5) + + +### 6/7/2018 - Version microsoft/oms:ciprod06072018 +- Remove node-0 dependency +- Remove passing WSID & Key as environment variables and pass them as kubernetes secret (for non-AKS; we already pass them as secret for AKS) +- Please note that if you are manually deploying thru yaml you need to - +- Provide workspaceid & key as base64 encoded strings with in double quotes (.yaml has comments to do so as well) +- Provide cluster name twice (for each container – daemonset & replicaset) + +### 5/8/2018 - Version microsoft/oms:ciprod05082018 +- Kubernetes RBAC enablement +- Latest released omsagent (1.6.0-42) +- Bug fix so that we do not collect kube-system namespace container logs when kube api calls fail occasionally (Bug #215107) +- .yaml changes (for RBAC) From d31f5889ec2f9ff6981efc72f2166b0430bffae9 Mon Sep 17 00:00:00 2001 From: rashmy Date: Wed, 1 Aug 2018 16:52:40 -0700 Subject: [PATCH 02/25] fixing the plugin logs for emit stream --- source/code/plugin/in_cadvisor_perf.rb | 4 ++-- source/code/plugin/in_kube_nodes.rb | 7 ++++--- source/code/plugin/in_kube_podinventory.rb | 3 ++- 3 files changed, 8 insertions(+), 6 deletions(-) diff --git a/source/code/plugin/in_cadvisor_perf.rb b/source/code/plugin/in_cadvisor_perf.rb index 01f2fa9f4..2e28650f6 100644 --- a/source/code/plugin/in_cadvisor_perf.rb +++ b/source/code/plugin/in_cadvisor_perf.rb @@ -55,10 +55,10 @@ def enumerate() end router.emit_stream(@tag, eventStream) if eventStream - if (ENV['ISTEST'] == true && eventStream.count > 0) + @@istestvar = ENV['ISTEST'] + if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp('true') == 0 && eventStream.count > 0) $log.info("in_cadvisor_perf::emit-stream : Success @ #{Time.now.utc.iso8601}") end - rescue => errorStr $log.warn "Failed to retrieve cadvisor metric data: #{errorStr}" $log.debug_backtrace(errorStr.backtrace) diff --git a/source/code/plugin/in_kube_nodes.rb b/source/code/plugin/in_kube_nodes.rb index 473978cbc..6cbad0897 100644 --- a/source/code/plugin/in_kube_nodes.rb +++ b/source/code/plugin/in_kube_nodes.rb @@ -99,9 +99,10 @@ def enumerate eventStream.add(emitTime, wrapper) if wrapper end router.emit_stream(@tag, eventStream) if eventStream - if (ENV['ISTEST'] == true && eventStream.count > 0) - $log.info("in_kube_nodeinventory::emit-stream : Success @ #{Time.now.utc.iso8601}") - end + @@istestvar = ENV['ISTEST'] + if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp('true') == 0 && eventStream.count > 0) + $log.info("in_kube_nodeinventory::emit-stream : Success @ #{Time.now.utc.iso8601}") + end end rescue => errorStr $log.warn "Failed to retrieve node inventory: #{errorStr}" diff --git a/source/code/plugin/in_kube_podinventory.rb b/source/code/plugin/in_kube_podinventory.rb index a96a0b207..656d1aa48 100644 --- a/source/code/plugin/in_kube_podinventory.rb +++ b/source/code/plugin/in_kube_podinventory.rb @@ -190,7 +190,8 @@ def parse_and_emit_records(podInventory, serviceList) end end #podInventory block end router.emit_stream(@tag, eventStream) if eventStream - if (ENV['ISTEST'] == true && eventStream.count > 0) + @@istestvar = ENV['ISTEST'] + if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp('true') == 0 && eventStream.count > 0) $log.info("in_kube_podinventory::emit-stream : Success @ #{Time.now.utc.iso8601}") end rescue => errorStr From 11fd5f6d4e3dd0b4fe57c8f4a551d1da4e8fa41f Mon Sep 17 00:00:00 2001 From: rashmy Date: Sun, 5 Aug 2018 00:37:52 -0700 Subject: [PATCH 03/25] updating log message --- source/code/plugin/in_cadvisor_perf.rb | 2 +- source/code/plugin/in_kube_nodes.rb | 2 +- source/code/plugin/in_kube_podinventory.rb | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/source/code/plugin/in_cadvisor_perf.rb b/source/code/plugin/in_cadvisor_perf.rb index 2e28650f6..5b551f74e 100644 --- a/source/code/plugin/in_cadvisor_perf.rb +++ b/source/code/plugin/in_cadvisor_perf.rb @@ -57,7 +57,7 @@ def enumerate() router.emit_stream(@tag, eventStream) if eventStream @@istestvar = ENV['ISTEST'] if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp('true') == 0 && eventStream.count > 0) - $log.info("in_cadvisor_perf::emit-stream : Success @ #{Time.now.utc.iso8601}") + $log.info("cAdvisorPerfEmitStreamSuccess @ #{Time.now.utc.iso8601}") end rescue => errorStr $log.warn "Failed to retrieve cadvisor metric data: #{errorStr}" diff --git a/source/code/plugin/in_kube_nodes.rb b/source/code/plugin/in_kube_nodes.rb index 6cbad0897..edbbdd37f 100644 --- a/source/code/plugin/in_kube_nodes.rb +++ b/source/code/plugin/in_kube_nodes.rb @@ -101,7 +101,7 @@ def enumerate router.emit_stream(@tag, eventStream) if eventStream @@istestvar = ENV['ISTEST'] if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp('true') == 0 && eventStream.count > 0) - $log.info("in_kube_nodeinventory::emit-stream : Success @ #{Time.now.utc.iso8601}") + $log.info("kubeNodeInventoryEmitStreamSuccess @ #{Time.now.utc.iso8601}") end end rescue => errorStr diff --git a/source/code/plugin/in_kube_podinventory.rb b/source/code/plugin/in_kube_podinventory.rb index 656d1aa48..f478705f6 100644 --- a/source/code/plugin/in_kube_podinventory.rb +++ b/source/code/plugin/in_kube_podinventory.rb @@ -192,7 +192,7 @@ def parse_and_emit_records(podInventory, serviceList) router.emit_stream(@tag, eventStream) if eventStream @@istestvar = ENV['ISTEST'] if (!@@istestvar.nil? && !@@istestvar.empty? && @@istestvar.casecmp('true') == 0 && eventStream.count > 0) - $log.info("in_kube_podinventory::emit-stream : Success @ #{Time.now.utc.iso8601}") + $log.info("kubePodInventoryEmitStreamSuccess @ #{Time.now.utc.iso8601}") end rescue => errorStr $log.warn "Failed in parse_and_emit_record pod inventory: #{errorStr}" From 87a9cf8ddb77f789a805b433ca4ff92556f7d8a0 Mon Sep 17 00:00:00 2001 From: r-dilip Date: Thu, 16 Aug 2018 11:58:10 -0700 Subject: [PATCH 04/25] Remove Log Processing from fluentd configuration --- installer/conf/container.conf | 32 -- .../code/plugin/containerlogtailfilereader.rb | 396 ------------------ source/code/plugin/filter_container_log.rb | 42 -- 3 files changed, 470 deletions(-) delete mode 100644 source/code/plugin/containerlogtailfilereader.rb delete mode 100644 source/code/plugin/filter_container_log.rb diff --git a/installer/conf/container.conf b/installer/conf/container.conf index a20fdbe5a..9eaed9b47 100755 --- a/installer/conf/container.conf +++ b/installer/conf/container.conf @@ -50,18 +50,6 @@ ] -# Container log -# Example line which matches the format: -# {"log"=>"Test 9th January\n", "stream"=>"stdout", "time"=>"2018-01-09T23:14:39.273429353Z", "ContainerID"=>"ee1ec26aa974af81b21fff24cef8ec78bf7ac1558b5de6f1eb1a5b28ecd6d559", "Image"=>"ubuntu", "Name"=>"determined_wilson", "SourceSystem"=>"Containers"} -# NOTE: The LogEntryTimeStamp is just being appended in the begining of the LogEntry field. This is the actual time the log was generated and the TimeGenerated field in Kusto is different - - type containerlog_sudo_tail - pos_file /var/opt/microsoft/docker-cimprov/state/ContainerLogFile.pos.log - tag oms.container.log - format /\"log\"=>\"(?.*)", \"stream\"=>\"(?.*)", \"time\"=>\"(?.*)", \"ContainerID\"=>\"(?.*)", \"Image\"=>\"(?.*)", \"Name\"=>\"(?.*)", \"SourceSystem\"=>\"(?.*)"}/ - run_interval 60s - - # Container host inventory type omi @@ -95,11 +83,6 @@ type filter_container -# Seperate filter for container log - - type filter_container_log - - type out_oms_api log_level debug @@ -152,21 +135,6 @@ max_retry_wait 9m - - type out_oms - log_level debug - num_threads 5 - buffer_chunk_limit 20m - buffer_type file - buffer_path %STATE_DIR_WS%/out_oms_log*.buffer - buffer_queue_limit 20 - buffer_queue_full_action drop_oldest_chunk - flush_interval 20s - retry_limit 10 - retry_wait 15s - max_retry_wait 9m - - type out_oms log_level info diff --git a/source/code/plugin/containerlogtailfilereader.rb b/source/code/plugin/containerlogtailfilereader.rb deleted file mode 100644 index 2d55b1d73..000000000 --- a/source/code/plugin/containerlogtailfilereader.rb +++ /dev/null @@ -1,396 +0,0 @@ - -require 'optparse' -require 'json' -require 'logger' -require_relative 'omslog' -require 'fluent/filter' - -module ContainerLogTailscript - - class ContainerLogNewTail - def initialize(paths) - @paths = paths - @tails = {} - @pos_file = $options[:pos_file] - @read_from_head = $options[:read_from_head] - @pf = nil - @pf_file = nil - - @log = Logger.new(STDERR) - @log.formatter = proc do |severity, time, progname, msg| - "#{severity} #{msg}\n" - end - end - - attr_reader :paths - - def start - start_watchers(@paths) unless @paths.empty? - end - - def shutdown - @pf_file.close if @pf_file - end - - def setup_watcher(path, pe) - tw = TailWatcher.new(path, pe, @read_from_head, @log, &method(:receive_lines)) - tw.on_notify - tw - end - - def start_watchers(paths) - if @pos_file - @pf_file = File.open(@pos_file, File::RDWR|File::CREAT) - @pf_file.sync = true - @pf = PositionFile.parse(@pf_file) - end - paths.each { |path| - pe = nil - if @pf - pe = @pf[path] #pe is FilePositionEntry instance - if pe.read_inode.zero? - begin - pe.update(File::Stat.new(path).ino, 0) - rescue Errno::ENOENT - @log.warn "#{path} not found. Continuing without tailing it." - end - end - end - - @tails[path] = setup_watcher(path, pe) - } - end - - def receive_lines(lines, tail_watcher) - unless lines.empty? - puts lines - end - return true - end - - class TailWatcher - def initialize(path, pe, read_from_head, log, &receive_lines) - @path = path - @pe = pe || MemoryPositionEntry.new - @read_from_head = read_from_head - @log = log - @receive_lines = receive_lines - @rotate_handler = RotateHandler.new(path, log, &method(:on_rotate)) - @io_handler = nil - @containerIDFilePath = "/var/opt/microsoft/docker-cimprov/state/ContainerInventory/" - end - - attr_reader :path - - def wrap_receive_lines(lines) - newLines = [] - containerID = @path.split('/').last.chomp('-json.log') - containerInspectInformation = @containerIDFilePath + containerID - tempContainerInfo = {} - begin - File.open(containerInspectInformation) { |f| tempContainerInfo = JSON.parse(f.readline)} - lines.each { |line| - unless line.empty? - newLine = {} - newLine = JSON.parse(line) - newLine["ContainerID"] = containerID - newLine["Image"] = tempContainerInfo["Image"] - newLine["Name"] = tempContainerInfo["ElementName"] - newLine["SourceSystem"] = "Containers" - newLines.push(newLine) - end - } - rescue Exception => e - #File doesn't exist or error in reading the data - @log.error "Caught exception when opening file -> #{e}" - end - @receive_lines.call(newLines, self) - end - - def on_notify - @rotate_handler.on_notify if @rotate_handler - return unless @io_handler - @io_handler.on_notify - end - - def on_rotate(io) - if io - # first time - stat = io.stat - fsize = stat.size - inode = stat.ino - - last_inode = @pe.read_inode - if @read_from_head - pos = 0 - @pe.update(inode, pos) - elsif inode == last_inode - # rotated file has the same inode number as the pos_file. - # seek to the saved position - pos = @pe.read_pos - elsif last_inode != 0 - # read data from the head of the rotated file. - pos = 0 - @pe.update(inode, pos) - else - # this is the first MemoryPositionEntry for the first time fluentd started. - # seeks to the end of the file to know where to start tailing - pos = fsize - @pe.update(inode, pos) - end - io.seek(pos) - @io_handler = IOHandler.new(io, @pe, @log, &method(:wrap_receive_lines)) - else - @io_handler = NullIOHandler.new - end - end - - class IOHandler - def initialize(io, pe, log, &receive_lines) - @log = log - @io = io - @pe = pe - @log = log - @read_lines_limit = 100 - @receive_lines = receive_lines - @buffer = ''.force_encoding('ASCII-8BIT') - @iobuf = ''.force_encoding('ASCII-8BIT') - @lines = [] - end - - attr_reader :io - - def on_notify - begin - read_more = false - if @lines.empty? - begin - while true - if @buffer.empty? - @io.readpartial(512, @buffer) - else - @buffer << @io.readpartial(512, @iobuf) - end - while line = @buffer.slice!(/.*?\n/m) - @lines << line - end - if @lines.size >= @read_lines_limit - # not to use too much memory in case the file is very large - read_more = true - break - end - end - rescue EOFError - end - end - - unless @lines.empty? - if @receive_lines.call(@lines) - @pe.update_pos(@io.pos - @buffer.bytesize) - @lines.clear - else - read_more = false - end - end - end while read_more - - rescue - @log.error "#{$!.to_s}" - close - end - - def close - @io.close unless @io.closed? - end - end - - class NullIOHandler - def initialize - end - - def io - end - - def on_notify - end - - def close - end - end - - class RotateHandler - def initialize(path, log, &on_rotate) - @path = path - @inode = nil - @fsize = -1 # first - @on_rotate = on_rotate - @log = log - end - - def on_notify - begin - stat = File.stat(@path) #returns a File::Stat object for the file named @path - inode = stat.ino - fsize = stat.size - rescue Errno::ENOENT - # moved or deleted - inode = nil - fsize = 0 - end - - begin - if @inode != inode || fsize < @fsize - # rotated or truncated - begin - io = File.open(@path) - rescue Errno::ENOENT - end - @on_rotate.call(io) - end - @inode = inode - @fsize = fsize - end - - rescue - @log.error "#{$!.to_s}" - end - end - end - - - class PositionFile - UNWATCHED_POSITION = 0xffffffffffffffff - - def initialize(file, map, last_pos) - @file = file - @map = map - @last_pos = last_pos - end - - def [](path) - if m = @map[path] - return m - end - - @file.pos = @last_pos - @file.write path - @file.write "\t" - seek = @file.pos - @file.write "0000000000000000\t0000000000000000\n" - @last_pos = @file.pos - - @map[path] = FilePositionEntry.new(@file, seek) - end - - def self.parse(file) - compact(file) - - map = {} - file.pos = 0 - file.each_line {|line| - m = /^([^\t]+)\t([0-9a-fA-F]+)\t([0-9a-fA-F]+)/.match(line) - next unless m - path = m[1] - seek = file.pos - line.bytesize + path.bytesize + 1 - map[path] = FilePositionEntry.new(file, seek) - } - new(file, map, file.pos) - end - - # Clean up unwatched file entries - def self.compact(file) - file.pos = 0 - existent_entries = file.each_line.map { |line| - m = /^([^\t]+)\t([0-9a-fA-F]+)\t([0-9a-fA-F]+)/.match(line) - next unless m - path = m[1] - pos = m[2].to_i(16) - ino = m[3].to_i(16) - # 32bit inode converted to 64bit at this phase - pos == UNWATCHED_POSITION ? nil : ("%s\t%016x\t%016x\n" % [path, pos, ino]) - }.compact - - file.pos = 0 - file.truncate(0) - file.write(existent_entries.join) - end - end - - # pos inode - # ffffffffffffffff\tffffffffffffffff\n - class FilePositionEntry - POS_SIZE = 16 - INO_OFFSET = 17 - INO_SIZE = 16 - LN_OFFSET = 33 - SIZE = 34 - - def initialize(file, seek) - @file = file - @seek = seek - end - - def update(ino, pos) - @file.pos = @seek - @file.write "%016x\t%016x" % [pos, ino] - end - - def update_pos(pos) - @file.pos = @seek - @file.write "%016x" % pos - end - - def read_inode - @file.pos = @seek + INO_OFFSET - raw = @file.read(INO_SIZE) - raw ? raw.to_i(16) : 0 - end - - def read_pos - @file.pos = @seek - raw = @file.read(POS_SIZE) - raw ? raw.to_i(16) : 0 - end - end - - class MemoryPositionEntry - def initialize - @pos = 0 - @inode = 0 - end - - def update(ino, pos) - @inode = ino - @pos = pos - end - - def update_pos(pos) - @pos = pos - end - - def read_pos - @pos - end - - def read_inode - @inode - end - end - end -end - -if __FILE__ == $0 - $options = {:read_from_head => false} - OptionParser.new do |opts| - opts.on("-p", "--posfile [POSFILE]") do |p| - $options[:pos_file] = p - end - opts.on("-h", "--[no-]readfromhead") do |h| - $options[:read_from_head] = h - end - end.parse! - a = ContainerLogTailscript::ContainerLogNewTail.new(ARGV) - a.start - a.shutdown -end - diff --git a/source/code/plugin/filter_container_log.rb b/source/code/plugin/filter_container_log.rb deleted file mode 100644 index 21e146a35..000000000 --- a/source/code/plugin/filter_container_log.rb +++ /dev/null @@ -1,42 +0,0 @@ -# frozen_string_literal: true - -require 'fluent/filter' - -module Fluent - require 'logger' - class PassThruFilter < Filter - Fluent::Plugin.register_filter('filter_container_log', self) - - def configure(conf) - super - end - - def start - super - @hostname = OMS::Common.get_hostname or "Unknown host" - end - - def shutdown - super - end - - def filter(tag, time, record) - begin - #Try to force utf-8 encoding on the string so that all characters can flow through to - #$log.info "before : #{record['LogEntry']}" - record['LogEntry'].force_encoding('UTF-8') - rescue - $log.error "Failed to convert record['LogEntry'] : '#{record['LogEntry']}' to UTF-8 using force_encoding." - $log.error "Current string encoding for record['LogEntry'] is #{record['LogEntry'].encoding}" - end - - record['Computer'] = @hostname - wrapper = { - "DataType"=>"CONTAINER_LOG_BLOB", - "IPName"=>"Containers", - "DataItems"=>[record.each{|k,v| record[k]=v}] - } - wrapper - end - end -end From 308be41fe87202ee6e289cc9c952a24910eed133 Mon Sep 17 00:00:00 2001 From: r-dilip Date: Thu, 16 Aug 2018 12:01:14 -0700 Subject: [PATCH 05/25] Remove plugin references from base_container.data --- installer/datafiles/base_container.data | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/installer/datafiles/base_container.data b/installer/datafiles/base_container.data index c49a8d1d0..ec0728c01 100644 --- a/installer/datafiles/base_container.data +++ b/installer/datafiles/base_container.data @@ -23,14 +23,11 @@ MAINTAINER: 'Microsoft Corporation' /opt/microsoft/omsagent/plugin/filter_docker_log.rb; source/code/plugin/filter_docker_log.rb; 644; root; root /opt/microsoft/omsagent/plugin/filter_container.rb; source/code/plugin/filter_container.rb; 644; root; root -/opt/microsoft/omsagent/plugin/filter_container_log.rb; source/code/plugin/filter_container_log.rb; 644; root; root /opt/microsoft/omsagent/plugin/in_kube_podinventory.rb; source/code/plugin/in_kube_podinventory.rb; 644; root; root /opt/microsoft/omsagent/plugin/in_kube_events.rb; source/code/plugin/in_kube_events.rb; 644; root; root /opt/microsoft/omsagent/plugin/in_kube_logs.rb; source/code/plugin/in_kube_logs.rb; 644; root; root /opt/microsoft/omsagent/plugin/KubernetesApiClient.rb; source/code/plugin/KubernetesApiClient.rb; 644; root; root -/opt/microsoft/omsagent/plugin/in_containerlog_sudo_tail.rb; source/code/plugin/in_containerlog_sudo_tail.rb; 644; root; root -/opt/microsoft/omsagent/plugin/containerlogtailfilereader.rb; source/code/plugin/containerlogtailfilereader.rb; 744; root; root /etc/opt/microsoft/docker-cimprov/container.conf; installer/conf/container.conf; 644; root; root @@ -88,15 +85,6 @@ WriteInstallInfo() { } WriteInstallInfo -#Setup sudo permission for containerlogtailfilereader -if [ -z $(cat /etc/sudoers.d/omsagent | grep /containerlogtailfilereader.rb) ] -then - chmod +w /etc/sudoers.d/omsagent - echo "#run containerlogtailfilereader.rb for docker-provider" >> /etc/sudoers.d/omsagent - echo "omsagent ALL=(ALL) NOPASSWD: /opt/microsoft/omsagent/ruby/bin/ruby /opt/microsoft/omsagent/plugin/containerlogtailfilereader.rb *" >> /etc/sudoers.d/omsagent - chmod 440 /etc/sudoers.d/omsagent -fi - # Get the state file in place with proper permissions touch /var/opt/microsoft/docker-cimprov/state/LastEventQueryTime.txt chmod 644 /var/opt/microsoft/docker-cimprov/state/LastEventQueryTime.txt From bcd1a3ff040eb25218cfffd5028394f7594075c7 Mon Sep 17 00:00:00 2001 From: Dilip Raghunathan Date: Fri, 14 Sep 2018 10:46:55 -0700 Subject: [PATCH 06/25] Dilipr/fluent bit log processing (#126) * Build out_oms.so and include in docker-cimprov package * Adding fluent-bit-config file to base container * PR Feedback * Adding out_oms.conf to base_container.data * PR Feedback * Making the critical section as small as possible * PR Feedback * Fixing the newline bug for Computer, and changing containerId to Id --- build/Makefile | 829 ++++++++++++------------ installer/conf/out_oms.conf | 6 + installer/conf/td-agent-bit.conf | 35 + installer/datafiles/base_container.data | 7 +- source/code/go/src/plugins/Makefile | 20 + source/code/go/src/plugins/glide.lock | 209 ++++++ source/code/go/src/plugins/glide.yaml | 15 + source/code/go/src/plugins/oms.go | 359 ++++++++++ source/code/go/src/plugins/out_oms.go | 57 ++ source/code/go/src/plugins/utils.go | 67 ++ 10 files changed, 1194 insertions(+), 410 deletions(-) create mode 100644 installer/conf/out_oms.conf create mode 100644 installer/conf/td-agent-bit.conf create mode 100644 source/code/go/src/plugins/Makefile create mode 100644 source/code/go/src/plugins/glide.lock create mode 100644 source/code/go/src/plugins/glide.yaml create mode 100644 source/code/go/src/plugins/oms.go create mode 100644 source/code/go/src/plugins/out_oms.go create mode 100644 source/code/go/src/plugins/utils.go diff --git a/build/Makefile b/build/Makefile index 9586c3b23..b5312cfe3 100644 --- a/build/Makefile +++ b/build/Makefile @@ -1,409 +1,420 @@ -# -*- mode: Makefile; -*- -# Copyright (c) Microsoft Corporation - -BASE_DIR := $(subst /build,,$(PWD)) -OMI_ROOT := $(shell cd ../../omi/Unix; pwd -P) -SCXPAL_DIR := $(shell cd ../../pal; pwd -P) - -PF_POSIX := 1 -include $(SCXPAL_DIR)/build/config.mak -include $(BASE_DIR)/build/config.mak -include $(SCXPAL_DIR)/build/Makefile.pal - -ifndef ENABLE_DEBUG -$(error "ENABLE_DEBUG is not set. Please re-run configure") -endif - -# Include the version file -include ../../docker.version - -ifndef CONTAINER_BUILDVERSION_STATUS -$(error "Is docker.version missing? Please re-run configure") -endif - -SOURCE_DIR := $(BASE_DIR)/source/code -TEST_DIR := $(BASE_DIR)/test/code - -PROVIDER_DIR := $(SOURCE_DIR)/providers -PROVIDER_TEST_DIR := $(TEST_DIR)/providers -PAL_INCLUDE_DIR := $(SCXPAL_DIR)/source/code/include -PAL_TESTUTILS_DIR := $(SCXPAL_DIR)/test/code/testutils - -INTERMEDIATE_DIR := $(BASE_DIR)/intermediate/$(BUILD_CONFIGURATION) -INTERMEDIATE_TESTFILES := $(INTERMEDIATE_DIR)/testfiles -TARGET_DIR := $(BASE_DIR)/target/$(BUILD_CONFIGURATION) -PROVIDER_LIBRARY := $(INTERMEDIATE_DIR)/libcontainer.so - -INSTALLER_TMPDIR := $(INTERMEDIATE_DIR)/installer_tmp - -# Include files - -INCLUDE_DEFINES := $(INTERMEDIATE_DIR)/defines.h - -# Compiler flags - -OMI_INCLUDE_FLAGS := -I$(OMI_ROOT)/output/include -PROVIDER_INCLUDE_FLAGS := -I$(PAL_INCLUDE_DIR) -I$(INTERMEDIATE_DIR) - -PROVIDER_TEST_INCLUDE_FLAGS := -Wmissing-include-dirs -Wno-non-virtual-dtor -I$(SCXPAL_DIR)/source/code/include -I$(INTERMEDIATE_DIR) -I$(SCXPAL_DIR)/test/ext/include -I$(OMI_ROOT)/output/include -I$(OMI_ROOT) -I$(OMI_ROOT)/common -I$(SCXPAL_DIR)/test/code/include $(PROVIDER_INCLUDE_FLAGS) -I$(PROVIDER_DIR) - -ifeq ($(ENABLE_DEBUG),1) -PROV_DEBUG_FLAGS := -g -endif - -COMPILE_FLAGS := $(PROV_DEBUG_FLAGS) -D_REENTRANT -fstack-protector-all -Wall -fno-nonansi-builtins -Woverloaded-virtual -Wformat -Wformat-security -Wcast-align -Wswitch-enum -Wshadow -Wwrite-strings -Wredundant-decls -Wcast-qual -fPIC -PROVIDER_COMPILE_FLAGS := $(COMPILE_FLAGS) - -LINK_LIBRARIES := -Wl,-rpath=/opt/omi/lib -L$(OMI_ROOT)/output/lib -lmicxx -L$(SCXPAL_TARGET_DIR) -lscxcore -lUtil -lscxassertabort -lrt -luuid -PROVIDER_TEST_LINK_LIBRARIES := -lbase -lpal -L$(SCXPAL_TARGET_DIR) -lscxcore $(SCXPAL_DIR)/test/ext/lib/linux/$(ARCH)/cppunit/libcppunit.a -lpthread -lrt -luuid - -SHARED_FLAGS := -shared - -# Support for installbuilder - -STAGING_DIR := $(INTERMEDIATE_DIR)/staging - -ifeq ($(ULINUX),1) - # For consistency, the architecture should be i686 (for x86) and x86_64 (for x64) - DOCKER_ARCH := $(shell echo $(PF_ARCH) | sed -e 's/x86$$/i686/' -e 's/x64$$/x86_64/') - OUTPUT_PACKAGE_PREFIX=docker-cimprov-$(CONTAINER_BUILDVERSION_MAJOR).$(CONTAINER_BUILDVERSION_MINOR).$(CONTAINER_BUILDVERSION_PATCH)-$(CONTAINER_BUILDVERSION_BUILDNR).universal.$(DOCKER_ARCH) -else - PF_DISTRO_LC := $(shell echo $(PF_DISTRO) | tr A-Z a-z) - OUTPUT_PACKAGE_PREFIX=docker-cimprov-$(CONTAINER_BUILDVERSION_MAJOR).$(CONTAINER_BUILDVERSION_MINOR).$(CONTAINER_BUILDVERSION_PATCH)-$(CONTAINER_BUILDVERSION_BUILDNR).$(PF_DISTRO_LC).$(PF_MAJOR).$(PF_ARCH) -endif - -ifeq ("$(wildcard /usr/bin/dpkg-deb)","") - DPKG_LOCATION="--DPKG_LOCATION=$(SCXPAL_DIR)/installer/InstallBuilder/tools/bin/dpkg-deb-$(PF_ARCH)" -else - DPKG_LOCATION= -endif - -# Support for src_to_obj handling - -INCLUDES = $(OMI_INCLUDE_FLAGS) $(PROVIDER_INCLUDE_FLAGS) -CFLAGS = $(COMPILE_FLAGS) -CXXFLAGS = $(COMPILE_FLAGS) - -#-------------------------------------------------------------------------------- -# Build targets - -ifeq ($(ULINUX),1) -all : $(OMI_ROOT)/output $(SCXPAL_INTERMEDIATE_DIR) PROVIDER_STATUS $(PROVIDER_LIBRARY) KIT_STATUS kit -else -all : $(OMI_ROOT)/output $(SCXPAL_INTERMEDIATE_DIR) PROVIDER_STATUS $(PROVIDER_LIBRARY) -endif - -clean : - $(RMDIR) $(BASE_DIR)/build/cppunit_result.* $(BASE_DIR)/build/scxtestrunner.log $(BASE_DIR)/installer/intermediate $(BASE_DIR)/intermediate $(BASE_DIR)/target $(PROVIDER_TEST_DIR)/providertestutils.cpp - -find $(BASE_DIR) -name \*~ -exec rm {} \; - -$(RM) $(TEST_DIR)/providers/TestScriptPath.h - -distclean : clean - $(RM) $(BASE_DIR)/build/config.mak - -make -C $(OMI_ROOT) distclean - -make -C $(SCXPAL_DIR)/build distclean - -$(RMDIR) $(OMI_ROOT)/output* - -$(RM) $(SCXPAL_DIR)/build/config.mak - -$(RM) $(SCXPAL_DIR)/build/Makefile.config_cache - -PROVIDER_STATUS: - @echo "========================= Performing Building provider" - -KIT_STATUS: - @echo "========================= Performing Building provider tests" - -#-------------------------------------------------------------------------------- -# OMI build -# -# Build the OMI distribution -# -# Technically, we should go to build OMI all the time. But I'd rather not spend -# the time doing it here EVERY TIME, when we never normally change OMI. This is -# a good tradeoff (build if not built, otherwise assume all is well). -# -# Doing a 'make clean' in OMI directory will force us to rebuild. - -$(OMI_ROOT)/output : $(OMI_ROOT)/output/lib/libmicxx.so - -$(OMI_ROOT)/output/lib/libmicxx.so : - @echo "========================= Performing Building OMI" - make -C $(OMI_ROOT) -ifeq ($(PERFORM_OMI_MAKEINSTALL),1) - make -C $(OMI_ROOT) install -endif - -#-------------------------------------------------------------------------------- -# PAL build -# -# Build the PAL (Platform Abstraction Layer) -# -# Doing a 'make clean' in PAL directory will force us to rebuild. - -$(SCXPAL_INTERMEDIATE_DIR) : - @echo "========================= Performing Building PAL" - make -C $(SCXPAL_DIR)/build - -#================================================================================ -# File depends.h (compiler dependencies) -#================================================================================ - -$(INCLUDE_DEFINES) : $(BASE_DIR)/build/config.mak - -$(MKPATH) $(@D) - @$(ECHO) "Creating $@" - @$(call pf_fwrite,"/*-------------------------------------------------------------------------------", $@) - @$(call pf_fappend," Copyright (C) 2007-2015 Microsoft Corp. ", $@) - @$(call pf_fappend," ", $@) - @$(call pf_fappend,"*/ ", $@) - @$(call pf_fappend,"/** ", $@) - @$(call pf_fappend," \file ", $@) - @$(call pf_fappend," ", $@) - @$(call pf_fappend," \brief Auto generated file containing build definitions ", $@) - @$(call pf_fappend," ", $@) - @$(call pf_fappend," \author Automated Build System ", $@) - @$(call pf_fappend," ", $@) - @$(call pf_fappend," DO NOT EDIT THIS FILE! ", $@) - @$(call pf_fappend," DO NOT CHECK IN THIS FILE! ", $@) - @$(call pf_fappend,"*/ ", $@) - @$(call pf_fappend,"/*----------------------------------------------------------------------------*/", $@) - @$(call pf_fappend,"#ifndef DEFINES_H ", $@) - @$(call pf_fappend,"#define DEFINES_H ", $@) - @$(call pf_fappend," ", $@) -ifneq ($(PF_DISTRO),) - @$(call pf_fappend," ", $@) - @$(call pf_fappend,"#ifndef PF_DISTRO_$(PF_DISTRO) ", $@) - @$(call pf_fappend,"#define PF_DISTRO_$(PF_DISTRO) ", $@) - @$(call pf_fappend,"#endif ", $@) -endif -ifneq ($(PF_MAJOR),) - @$(call pf_fappend," ", $@) - @$(call pf_fappend,"#ifndef PF_MAJOR ", $@) - @$(call pf_fappend,"#define PF_MAJOR $(PF_MAJOR) ", $@) - @$(call pf_fappend,"#endif ", $@) -endif -ifneq ($(PF_MINOR),) - @$(call pf_fappend," ", $@) - @$(call pf_fappend,"#ifndef PF_MINOR ", $@) - @$(call pf_fappend,"#define PF_MINOR $(PF_MINOR) ", $@) - @$(call pf_fappend,"#endif ", $@) -endif -ifneq ($(ARCH),) - @$(call pf_fappend," ", $@) - @$(call pf_fappend,"#ifndef $(ARCH) ", $@) - @$(call pf_fappend,"#define $(ARCH) ", $@) - @$(call pf_fappend,"#endif ", $@) -endif -ifeq ($(BUILD_TYPE),Debug) - @$(call pf_fappend," ", $@) - @$(call pf_fappend,"#ifndef _DEBUG ", $@) - @$(call pf_fappend,"#define _DEBUG ", $@) - @$(call pf_fappend,"#endif ", $@) -else - @$(call pf_fappend," ", $@) - @$(call pf_fappend,"#ifndef NDEBUG ", $@) - @$(call pf_fappend,"#define NDEBUG ", $@) - @$(call pf_fappend,"#endif ", $@) -endif - @$(call pf_fappend," ", $@) - @$(call pf_fappend,"#endif /* DEFINES_H */ ", $@) - @$(call pf_fappend,"/*----------------------------E-N-D---O-F---F-I-L-E---------------------------*/", $@) - -#================================================================================ -# Internal functions -#================================================================================ - -# Convert a list of src files with absolute paths under BASE_DIR to corresponding -# object files under intermediate directory -# src_to_obj(list_of_cppfiles) -src_to_obj = $(patsubst $(BASE_DIR)%, $(INTERMEDIATE_DIR)%, $(patsubst %.c, %.o, $(patsubst %.cpp, %.o, $(1)))) - -# No default rules, please -.SUFFIX: - -# Rule for compiling cpp files in source tree, ouptut in mirrored intermediate dir -$(INTERMEDIATE_DIR)/%.o : $(BASE_DIR)/%.cpp $(INCLUDE_DEFINES) - $(MKPATH) $(@D) - $(CXX) -c $(CXXFLAGS) $(INCLUDES) -I$( $(TEST_DIR)/providers/TestScriptPath.h - -test : TEST_STATUS $(SCXPAL_INTERMEDIATE_DIR) $(INTERMEDIATE_DIR)/testrunner - @echo "========================= Performing container testrun execution" - $(MKPATH) $(INTERMEDIATE_TESTFILES) - $(COPY) $(TEST_DIR)/scripts/createEnv.sh $(TEST_DIR)/scripts/testrun_wrapper $(INTERMEDIATE_TESTFILES) - export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$(OMI_ROOT)/output/lib; cd $(INTERMEDIATE_TESTFILES); ./createEnv.sh - cd $(INTERMEDIATE_TESTFILES); ./testrun_wrapper $(INTERMEDIATE_DIR) - -#-------------------------------------------------------------------------------- -# Build the distribution kit -# -# Build the packages via installbuilder -# -# While the "formal build" only builds ULINUX, we may build something else for DEV purposes. -# Assume we ALWAYS build DPKG, but only build RPM if --enable-ulinux is speified in configure. - -kit : CONTAINERLIB_FILENAME = libcontainer.so -kit : $(OMI_ROOT)/output $(PROVIDER_LIBRARY) - -ifeq ($(ULINUX),1) - - @echo "========================= Performing Building RPM and DPKG packages" - $(MKPATH) $(INSTALLER_TMPDIR) - sudo $(RMDIR) $(STAGING_DIR) - $(MKPATH) $(INTERMEDIATE_DIR) - python $(SCXPAL_DIR)/installer/InstallBuilder/installbuilder.py \ - --BASE_DIR=$(BASE_DIR) \ - --TARGET_DIR=$(INTERMEDIATE_DIR) \ - --INTERMEDIATE_DIR=$(INSTALLER_TMPDIR) \ - --STAGING_DIR=$(STAGING_DIR) \ - --BUILD_TYPE=$(BUILD_TYPE) \ - --BUILD_CONFIGURATION=$(BUILD_CONFIGURATION) \ - --PFARCH=$(PF_ARCH) \ - --PFDISTRO=$(PF_DISTRO) \ - --PFMAJOR=$(PF_MAJOR) \ - --PFMINOR=$(PF_MINOR) \ - --VERSION=$(CONTAINER_BUILDVERSION_MAJOR).$(CONTAINER_BUILDVERSION_MINOR).$(CONTAINER_BUILDVERSION_PATCH) \ - --RELEASE=$(CONTAINER_BUILDVERSION_BUILDNR) \ - --CONTAINER_BUILD_LIBRARY=$(CONTAINERLIB_FILENAME) \ - --OUTPUTFILE=$(OUTPUT_PACKAGE_PREFIX) \ - --DATAFILE_PATH=$(BASE_DIR)/installer/datafiles \ - base_container.data linux.data linux_rpm.data - - sudo $(RMDIR) $(STAGING_DIR) - $(MKPATH) $(INTERMEDIATE_DIR) - python $(SCXPAL_DIR)/installer/InstallBuilder/installbuilder.py \ - --BASE_DIR=$(BASE_DIR) \ - --TARGET_DIR=$(INTERMEDIATE_DIR) \ - --INTERMEDIATE_DIR=$(INSTALLER_TMPDIR) \ - --STAGING_DIR=$(STAGING_DIR) \ - --BUILD_TYPE=$(BUILD_TYPE) \ - --BUILD_CONFIGURATION=$(BUILD_CONFIGURATION) \ - --PFARCH=$(PF_ARCH) \ - --PFDISTRO=$(PF_DISTRO) \ - --PFMAJOR=$(PF_MAJOR) \ - --PFMINOR=$(PF_MINOR) \ - --VERSION=$(CONTAINER_BUILDVERSION_MAJOR).$(CONTAINER_BUILDVERSION_MINOR).$(CONTAINER_BUILDVERSION_PATCH) \ - --RELEASE=$(CONTAINER_BUILDVERSION_BUILDNR) \ - --CONTAINER_BUILD_LIBRARY=$(CONTAINERLIB_FILENAME) \ - $(DPKG_LOCATION) \ - --OUTPUTFILE=$(OUTPUT_PACKAGE_PREFIX) \ - --DATAFILE_PATH=$(BASE_DIR)/installer/datafiles \ - base_container.data linux.data linux_dpkg.data - - # Strip the package extension from the package filename - sed -re 's/.rpm$$|.deb$$//' $(INTERMEDIATE_DIR)/package_filename > $(INTERMEDIATE_DIR)/package_file.tmp; mv $(INTERMEDIATE_DIR)/package_file.tmp $(INTERMEDIATE_DIR)/package_filename - - # Build the tar file containing both .rpm and .deb packages - cd $(INTERMEDIATE_DIR); tar cvf $(OUTPUT_PACKAGE_PREFIX).tar $(OUTPUT_PACKAGE_PREFIX).rpm $(OUTPUT_PACKAGE_PREFIX).deb - - ../installer/bundle/create_bundle.sh $(PF)_$(PF_DISTRO) $(INTERMEDIATE_DIR) $(OUTPUT_PACKAGE_PREFIX) - # Copy the shell bundle to the target directory - $(MKPATH) $(TARGET_DIR) - cd $(INTERMEDIATE_DIR); $(COPY) `cat $(INTERMEDIATE_DIR)/package_filename`.sh $(TARGET_DIR) - -else - - @echo "========================= Performing Building RPM and DPKG packages" - sudo $(RMDIR) $(STAGING_DIR) - $(MKPATH) $(INTERMEDIATE_DIR) - python $(SCXPAL_DIR)/installer/InstallBuilder/installbuilder.py \ - --BASE_DIR=$(BASE_DIR) \ - --TARGET_DIR=$(INTERMEDIATE_DIR) \ - --INTERMEDIATE_DIR=$(INSTALLER_TMPDIR) \ - --STAGING_DIR=$(STAGING_DIR) \ - --BUILD_TYPE=$(BUILD_TYPE) \ - --BUILD_CONFIGURATION=$(BUILD_CONFIGURATION) \ - --PFARCH=$(PF_ARCH) \ - --PFDISTRO=$(PF_DISTRO) \ - --PFMAJOR=$(PF_MAJOR) \ - --PFMINOR=$(PF_MINOR) \ - --VERSION=$(CONTAINER_BUILDVERSION_MAJOR).$(CONTAINER_BUILDVERSION_MINOR).$(CONTAINER_BUILDVERSION_PATCH) \ - --RELEASE=$(CONTAINER_BUILDVERSION_BUILDNR) \ - --CONTAINER_BUILD_LIBRARY=$(CONTAINERLIB_FILENAME) \ - $(DPKG_LOCATION) \ - --OUTPUTFILE=$(OUTPUT_PACKAGE_PREFIX) \ - --DATAFILE_PATH=$(BASE_DIR)/installer/datafiles \ - base_container.data linux.data linux_dpkg.data - -endif +# -*- mode: Makefile; -*- +# Copyright (c) Microsoft Corporation + +BASE_DIR := $(subst /build,,$(PWD)) +OMI_ROOT := $(shell cd ../../omi/Unix; pwd -P) +SCXPAL_DIR := $(shell cd ../../pal; pwd -P) + +PF_POSIX := 1 +include $(SCXPAL_DIR)/build/config.mak +include $(BASE_DIR)/build/config.mak +include $(SCXPAL_DIR)/build/Makefile.pal + +ifndef ENABLE_DEBUG +$(error "ENABLE_DEBUG is not set. Please re-run configure") +endif + +# Include the version file +include ../../docker.version + +ifndef CONTAINER_BUILDVERSION_STATUS +$(error "Is docker.version missing? Please re-run configure") +endif + +SOURCE_DIR := $(BASE_DIR)/source/code +TEST_DIR := $(BASE_DIR)/test/code + +PROVIDER_DIR := $(SOURCE_DIR)/providers +PROVIDER_TEST_DIR := $(TEST_DIR)/providers +PAL_INCLUDE_DIR := $(SCXPAL_DIR)/source/code/include +PAL_TESTUTILS_DIR := $(SCXPAL_DIR)/test/code/testutils + +INTERMEDIATE_DIR := $(BASE_DIR)/intermediate/$(BUILD_CONFIGURATION) +INTERMEDIATE_TESTFILES := $(INTERMEDIATE_DIR)/testfiles +TARGET_DIR := $(BASE_DIR)/target/$(BUILD_CONFIGURATION) +PROVIDER_LIBRARY := $(INTERMEDIATE_DIR)/libcontainer.so + +INSTALLER_TMPDIR := $(INTERMEDIATE_DIR)/installer_tmp + +# GO Source dir for custom fluent bit plugin +GO_SOURCE_DIR := $(SOURCE_DIR)/go/src/plugins + +# Include files + +INCLUDE_DEFINES := $(INTERMEDIATE_DIR)/defines.h + +# Compiler flags + +OMI_INCLUDE_FLAGS := -I$(OMI_ROOT)/output/include +PROVIDER_INCLUDE_FLAGS := -I$(PAL_INCLUDE_DIR) -I$(INTERMEDIATE_DIR) + +PROVIDER_TEST_INCLUDE_FLAGS := -Wmissing-include-dirs -Wno-non-virtual-dtor -I$(SCXPAL_DIR)/source/code/include -I$(INTERMEDIATE_DIR) -I$(SCXPAL_DIR)/test/ext/include -I$(OMI_ROOT)/output/include -I$(OMI_ROOT) -I$(OMI_ROOT)/common -I$(SCXPAL_DIR)/test/code/include $(PROVIDER_INCLUDE_FLAGS) -I$(PROVIDER_DIR) + +ifeq ($(ENABLE_DEBUG),1) +PROV_DEBUG_FLAGS := -g +endif + +COMPILE_FLAGS := $(PROV_DEBUG_FLAGS) -D_REENTRANT -fstack-protector-all -Wall -fno-nonansi-builtins -Woverloaded-virtual -Wformat -Wformat-security -Wcast-align -Wswitch-enum -Wshadow -Wwrite-strings -Wredundant-decls -Wcast-qual -fPIC +PROVIDER_COMPILE_FLAGS := $(COMPILE_FLAGS) + +LINK_LIBRARIES := -Wl,-rpath=/opt/omi/lib -L$(OMI_ROOT)/output/lib -lmicxx -L$(SCXPAL_TARGET_DIR) -lscxcore -lUtil -lscxassertabort -lrt -luuid +PROVIDER_TEST_LINK_LIBRARIES := -lbase -lpal -L$(SCXPAL_TARGET_DIR) -lscxcore $(SCXPAL_DIR)/test/ext/lib/linux/$(ARCH)/cppunit/libcppunit.a -lpthread -lrt -luuid + +SHARED_FLAGS := -shared + +# Support for installbuilder + +STAGING_DIR := $(INTERMEDIATE_DIR)/staging + +ifeq ($(ULINUX),1) + # For consistency, the architecture should be i686 (for x86) and x86_64 (for x64) + DOCKER_ARCH := $(shell echo $(PF_ARCH) | sed -e 's/x86$$/i686/' -e 's/x64$$/x86_64/') + OUTPUT_PACKAGE_PREFIX=docker-cimprov-$(CONTAINER_BUILDVERSION_MAJOR).$(CONTAINER_BUILDVERSION_MINOR).$(CONTAINER_BUILDVERSION_PATCH)-$(CONTAINER_BUILDVERSION_BUILDNR).universal.$(DOCKER_ARCH) +else + PF_DISTRO_LC := $(shell echo $(PF_DISTRO) | tr A-Z a-z) + OUTPUT_PACKAGE_PREFIX=docker-cimprov-$(CONTAINER_BUILDVERSION_MAJOR).$(CONTAINER_BUILDVERSION_MINOR).$(CONTAINER_BUILDVERSION_PATCH)-$(CONTAINER_BUILDVERSION_BUILDNR).$(PF_DISTRO_LC).$(PF_MAJOR).$(PF_ARCH) +endif + +ifeq ("$(wildcard /usr/bin/dpkg-deb)","") + DPKG_LOCATION="--DPKG_LOCATION=$(SCXPAL_DIR)/installer/InstallBuilder/tools/bin/dpkg-deb-$(PF_ARCH)" +else + DPKG_LOCATION= +endif + +# Support for src_to_obj handling + +INCLUDES = $(OMI_INCLUDE_FLAGS) $(PROVIDER_INCLUDE_FLAGS) +CFLAGS = $(COMPILE_FLAGS) +CXXFLAGS = $(COMPILE_FLAGS) + +#-------------------------------------------------------------------------------- +# Build targets + +ifeq ($(ULINUX),1) +all : $(OMI_ROOT)/output $(SCXPAL_INTERMEDIATE_DIR) PROVIDER_STATUS $(PROVIDER_LIBRARY) KIT_STATUS kit fluentbitplugin +else +all : $(OMI_ROOT)/output $(SCXPAL_INTERMEDIATE_DIR) PROVIDER_STATUS $(PROVIDER_LIBRARY) fluentbitplugin +endif + +clean : + $(RMDIR) $(BASE_DIR)/build/cppunit_result.* $(BASE_DIR)/build/scxtestrunner.log $(BASE_DIR)/installer/intermediate $(BASE_DIR)/intermediate $(BASE_DIR)/target $(PROVIDER_TEST_DIR)/providertestutils.cpp + -find $(BASE_DIR) -name \*~ -exec rm {} \; + -$(RM) $(TEST_DIR)/providers/TestScriptPath.h + +distclean : clean + $(RM) $(BASE_DIR)/build/config.mak + -make -C $(OMI_ROOT) distclean + -make -C $(SCXPAL_DIR)/build distclean + -$(RMDIR) $(OMI_ROOT)/output* + -$(RM) $(SCXPAL_DIR)/build/config.mak + -$(RM) $(SCXPAL_DIR)/build/Makefile.config_cache + +PROVIDER_STATUS: + @echo "========================= Performing Building provider" + +KIT_STATUS: + @echo "========================= Performing Building provider tests" + +#-------------------------------------------------------------------------------- +# OMI build +# +# Build the OMI distribution +# +# Technically, we should go to build OMI all the time. But I'd rather not spend +# the time doing it here EVERY TIME, when we never normally change OMI. This is +# a good tradeoff (build if not built, otherwise assume all is well). +# +# Doing a 'make clean' in OMI directory will force us to rebuild. + +$(OMI_ROOT)/output : $(OMI_ROOT)/output/lib/libmicxx.so + +$(OMI_ROOT)/output/lib/libmicxx.so : + @echo "========================= Performing Building OMI" + make -C $(OMI_ROOT) +ifeq ($(PERFORM_OMI_MAKEINSTALL),1) + make -C $(OMI_ROOT) install +endif + +#--------------------------------------------------------------------------------- +# fluentbit go plugin build. This is required to send container logs to ODS endpoint +# +fluentbitplugin : + @echo "========================= Building fluentbit out_oms go plugin for logs" + make -C $(GO_SOURCE_DIR) fbplugin + $(COPY) $(GO_SOURCE_DIR)/out_oms.so $(INTERMEDIATE_DIR) + +#-------------------------------------------------------------------------------- +# PAL build +# +# Build the PAL (Platform Abstraction Layer) +# +# Doing a 'make clean' in PAL directory will force us to rebuild. + +$(SCXPAL_INTERMEDIATE_DIR) : + @echo "========================= Performing Building PAL" + make -C $(SCXPAL_DIR)/build + +#================================================================================ +# File depends.h (compiler dependencies) +#================================================================================ + +$(INCLUDE_DEFINES) : $(BASE_DIR)/build/config.mak + -$(MKPATH) $(@D) + @$(ECHO) "Creating $@" + @$(call pf_fwrite,"/*-------------------------------------------------------------------------------", $@) + @$(call pf_fappend," Copyright (C) 2007-2015 Microsoft Corp. ", $@) + @$(call pf_fappend," ", $@) + @$(call pf_fappend,"*/ ", $@) + @$(call pf_fappend,"/** ", $@) + @$(call pf_fappend," \file ", $@) + @$(call pf_fappend," ", $@) + @$(call pf_fappend," \brief Auto generated file containing build definitions ", $@) + @$(call pf_fappend," ", $@) + @$(call pf_fappend," \author Automated Build System ", $@) + @$(call pf_fappend," ", $@) + @$(call pf_fappend," DO NOT EDIT THIS FILE! ", $@) + @$(call pf_fappend," DO NOT CHECK IN THIS FILE! ", $@) + @$(call pf_fappend,"*/ ", $@) + @$(call pf_fappend,"/*----------------------------------------------------------------------------*/", $@) + @$(call pf_fappend,"#ifndef DEFINES_H ", $@) + @$(call pf_fappend,"#define DEFINES_H ", $@) + @$(call pf_fappend," ", $@) +ifneq ($(PF_DISTRO),) + @$(call pf_fappend," ", $@) + @$(call pf_fappend,"#ifndef PF_DISTRO_$(PF_DISTRO) ", $@) + @$(call pf_fappend,"#define PF_DISTRO_$(PF_DISTRO) ", $@) + @$(call pf_fappend,"#endif ", $@) +endif +ifneq ($(PF_MAJOR),) + @$(call pf_fappend," ", $@) + @$(call pf_fappend,"#ifndef PF_MAJOR ", $@) + @$(call pf_fappend,"#define PF_MAJOR $(PF_MAJOR) ", $@) + @$(call pf_fappend,"#endif ", $@) +endif +ifneq ($(PF_MINOR),) + @$(call pf_fappend," ", $@) + @$(call pf_fappend,"#ifndef PF_MINOR ", $@) + @$(call pf_fappend,"#define PF_MINOR $(PF_MINOR) ", $@) + @$(call pf_fappend,"#endif ", $@) +endif +ifneq ($(ARCH),) + @$(call pf_fappend," ", $@) + @$(call pf_fappend,"#ifndef $(ARCH) ", $@) + @$(call pf_fappend,"#define $(ARCH) ", $@) + @$(call pf_fappend,"#endif ", $@) +endif +ifeq ($(BUILD_TYPE),Debug) + @$(call pf_fappend," ", $@) + @$(call pf_fappend,"#ifndef _DEBUG ", $@) + @$(call pf_fappend,"#define _DEBUG ", $@) + @$(call pf_fappend,"#endif ", $@) +else + @$(call pf_fappend," ", $@) + @$(call pf_fappend,"#ifndef NDEBUG ", $@) + @$(call pf_fappend,"#define NDEBUG ", $@) + @$(call pf_fappend,"#endif ", $@) +endif + @$(call pf_fappend," ", $@) + @$(call pf_fappend,"#endif /* DEFINES_H */ ", $@) + @$(call pf_fappend,"/*----------------------------E-N-D---O-F---F-I-L-E---------------------------*/", $@) + +#================================================================================ +# Internal functions +#================================================================================ + +# Convert a list of src files with absolute paths under BASE_DIR to corresponding +# object files under intermediate directory +# src_to_obj(list_of_cppfiles) +src_to_obj = $(patsubst $(BASE_DIR)%, $(INTERMEDIATE_DIR)%, $(patsubst %.c, %.o, $(patsubst %.cpp, %.o, $(1)))) + +# No default rules, please +.SUFFIX: + +# Rule for compiling cpp files in source tree, ouptut in mirrored intermediate dir +$(INTERMEDIATE_DIR)/%.o : $(BASE_DIR)/%.cpp $(INCLUDE_DEFINES) + $(MKPATH) $(@D) + $(CXX) -c $(CXXFLAGS) $(INCLUDES) -I$( $(TEST_DIR)/providers/TestScriptPath.h + +test : TEST_STATUS $(SCXPAL_INTERMEDIATE_DIR) $(INTERMEDIATE_DIR)/testrunner + @echo "========================= Performing container testrun execution" + $(MKPATH) $(INTERMEDIATE_TESTFILES) + $(COPY) $(TEST_DIR)/scripts/createEnv.sh $(TEST_DIR)/scripts/testrun_wrapper $(INTERMEDIATE_TESTFILES) + export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$(OMI_ROOT)/output/lib; cd $(INTERMEDIATE_TESTFILES); ./createEnv.sh + cd $(INTERMEDIATE_TESTFILES); ./testrun_wrapper $(INTERMEDIATE_DIR) + +#-------------------------------------------------------------------------------- +# Build the distribution kit +# +# Build the packages via installbuilder +# +# While the "formal build" only builds ULINUX, we may build something else for DEV purposes. +# Assume we ALWAYS build DPKG, but only build RPM if --enable-ulinux is speified in configure. + +kit : CONTAINERLIB_FILENAME = libcontainer.so +kit : $(OMI_ROOT)/output $(PROVIDER_LIBRARY) fluentbitplugin + +ifeq ($(ULINUX),1) + + @echo "========================= Performing Building RPM and DPKG packages" + $(MKPATH) $(INSTALLER_TMPDIR) + sudo $(RMDIR) $(STAGING_DIR) + $(MKPATH) $(INTERMEDIATE_DIR) + python $(SCXPAL_DIR)/installer/InstallBuilder/installbuilder.py \ + --BASE_DIR=$(BASE_DIR) \ + --TARGET_DIR=$(INTERMEDIATE_DIR) \ + --INTERMEDIATE_DIR=$(INSTALLER_TMPDIR) \ + --STAGING_DIR=$(STAGING_DIR) \ + --BUILD_TYPE=$(BUILD_TYPE) \ + --BUILD_CONFIGURATION=$(BUILD_CONFIGURATION) \ + --PFARCH=$(PF_ARCH) \ + --PFDISTRO=$(PF_DISTRO) \ + --PFMAJOR=$(PF_MAJOR) \ + --PFMINOR=$(PF_MINOR) \ + --VERSION=$(CONTAINER_BUILDVERSION_MAJOR).$(CONTAINER_BUILDVERSION_MINOR).$(CONTAINER_BUILDVERSION_PATCH) \ + --RELEASE=$(CONTAINER_BUILDVERSION_BUILDNR) \ + --CONTAINER_BUILD_LIBRARY=$(CONTAINERLIB_FILENAME) \ + --OUTPUTFILE=$(OUTPUT_PACKAGE_PREFIX) \ + --DATAFILE_PATH=$(BASE_DIR)/installer/datafiles \ + base_container.data linux.data linux_rpm.data + + sudo $(RMDIR) $(STAGING_DIR) + $(MKPATH) $(INTERMEDIATE_DIR) + python $(SCXPAL_DIR)/installer/InstallBuilder/installbuilder.py \ + --BASE_DIR=$(BASE_DIR) \ + --TARGET_DIR=$(INTERMEDIATE_DIR) \ + --INTERMEDIATE_DIR=$(INSTALLER_TMPDIR) \ + --STAGING_DIR=$(STAGING_DIR) \ + --BUILD_TYPE=$(BUILD_TYPE) \ + --BUILD_CONFIGURATION=$(BUILD_CONFIGURATION) \ + --PFARCH=$(PF_ARCH) \ + --PFDISTRO=$(PF_DISTRO) \ + --PFMAJOR=$(PF_MAJOR) \ + --PFMINOR=$(PF_MINOR) \ + --VERSION=$(CONTAINER_BUILDVERSION_MAJOR).$(CONTAINER_BUILDVERSION_MINOR).$(CONTAINER_BUILDVERSION_PATCH) \ + --RELEASE=$(CONTAINER_BUILDVERSION_BUILDNR) \ + --CONTAINER_BUILD_LIBRARY=$(CONTAINERLIB_FILENAME) \ + $(DPKG_LOCATION) \ + --OUTPUTFILE=$(OUTPUT_PACKAGE_PREFIX) \ + --DATAFILE_PATH=$(BASE_DIR)/installer/datafiles \ + base_container.data linux.data linux_dpkg.data + + # Strip the package extension from the package filename + sed -re 's/.rpm$$|.deb$$//' $(INTERMEDIATE_DIR)/package_filename > $(INTERMEDIATE_DIR)/package_file.tmp; mv $(INTERMEDIATE_DIR)/package_file.tmp $(INTERMEDIATE_DIR)/package_filename + + # Build the tar file containing both .rpm and .deb packages + cd $(INTERMEDIATE_DIR); tar cvf $(OUTPUT_PACKAGE_PREFIX).tar $(OUTPUT_PACKAGE_PREFIX).rpm $(OUTPUT_PACKAGE_PREFIX).deb + + ../installer/bundle/create_bundle.sh $(PF)_$(PF_DISTRO) $(INTERMEDIATE_DIR) $(OUTPUT_PACKAGE_PREFIX) + # Copy the shell bundle to the target directory + $(MKPATH) $(TARGET_DIR) + cd $(INTERMEDIATE_DIR); $(COPY) `cat $(INTERMEDIATE_DIR)/package_filename`.sh $(TARGET_DIR) + +else + + @echo "========================= Performing Building RPM and DPKG packages" + sudo $(RMDIR) $(STAGING_DIR) + $(MKPATH) $(INTERMEDIATE_DIR) + python $(SCXPAL_DIR)/installer/InstallBuilder/installbuilder.py \ + --BASE_DIR=$(BASE_DIR) \ + --TARGET_DIR=$(INTERMEDIATE_DIR) \ + --INTERMEDIATE_DIR=$(INSTALLER_TMPDIR) \ + --STAGING_DIR=$(STAGING_DIR) \ + --BUILD_TYPE=$(BUILD_TYPE) \ + --BUILD_CONFIGURATION=$(BUILD_CONFIGURATION) \ + --PFARCH=$(PF_ARCH) \ + --PFDISTRO=$(PF_DISTRO) \ + --PFMAJOR=$(PF_MAJOR) \ + --PFMINOR=$(PF_MINOR) \ + --VERSION=$(CONTAINER_BUILDVERSION_MAJOR).$(CONTAINER_BUILDVERSION_MINOR).$(CONTAINER_BUILDVERSION_PATCH) \ + --RELEASE=$(CONTAINER_BUILDVERSION_BUILDNR) \ + --CONTAINER_BUILD_LIBRARY=$(CONTAINERLIB_FILENAME) \ + $(DPKG_LOCATION) \ + --OUTPUTFILE=$(OUTPUT_PACKAGE_PREFIX) \ + --DATAFILE_PATH=$(BASE_DIR)/installer/datafiles \ + base_container.data linux.data linux_dpkg.data + +endif diff --git a/installer/conf/out_oms.conf b/installer/conf/out_oms.conf new file mode 100644 index 000000000..d4b797757 --- /dev/null +++ b/installer/conf/out_oms.conf @@ -0,0 +1,6 @@ +omsadmin_conf_path=/etc/opt/microsoft/omsagent/conf/omsadmin.conf +cert_file_path=/etc/opt/microsoft/omsagent/certs/oms.crt +key_file_path=/etc/opt/microsoft/omsagent/certs/oms.key +container_host_file_path=/var/opt/microsoft/docker-cimprov/state/containerhostname +container_inventory_refresh_interval=60 +kube_system_containers_refresh_interval=300 diff --git a/installer/conf/td-agent-bit.conf b/installer/conf/td-agent-bit.conf new file mode 100644 index 000000000..cf490c077 --- /dev/null +++ b/installer/conf/td-agent-bit.conf @@ -0,0 +1,35 @@ +[SERVICE] + Flush 5 + Log_Level info + Parsers_File /etc/td-agent-bit/parsers.conf + Log_File /var/log/fluent-bit.log + +[INPUT] + Name tail + Tag oms.container.log.* + Path /var/log/containers/*.log + DB /var/log/fblogs.db + Parser docker + Mem_Buf_Limit 30m + Path_Key filepath + +[FILTER] + Name record_modifier + Match oms.container.log.* + Whitelist_key log + Whitelist_key stream + Whitelist_key time + Whitelist_key filepath + +[FILTER] + Name modify + Match oms.container.log.* + Rename log LogEntry + Rename stream LogEntrySource + Rename time LogEntryTimeStamp + Rename filepath Filepath + Add_if_not_present SourceSystem Containers + +[OUTPUT] + Name oms + Match oms.container.log.* \ No newline at end of file diff --git a/installer/datafiles/base_container.data b/installer/datafiles/base_container.data index ec0728c01..85a128b2a 100644 --- a/installer/datafiles/base_container.data +++ b/installer/datafiles/base_container.data @@ -37,7 +37,9 @@ MAINTAINER: 'Microsoft Corporation' /opt/microsoft/omsagent/plugin/in_kube_services.rb; source/code/plugin/in_kube_services.rb; 644; root; root /opt/microsoft/omsagent/plugin/in_kube_nodes.rb; source/code/plugin/in_kube_nodes.rb; 644; root; root - +/opt/td-agent-bit/bin/out_oms.so; intermediate/${{BUILD_CONFIGURATION}}/out_oms.so; 755; root; root +/etc/opt/microsoft/docker-cimprov/td-agent-bit.conf; installer/conf/td-agent-bit.conf; 644; root; root +/etc/opt/microsoft/docker-cimprov/out_oms.conf; installer/conf/out_oms.conf; 644; root; root %Links /opt/omi/lib/libcontainer.${{SHLIB_EXT}}; /opt/microsoft/docker-cimprov/lib/libcontainer.${{SHLIB_EXT}}; 644; root; root @@ -76,6 +78,9 @@ MAINTAINER: 'Microsoft Corporation' /var/opt/microsoft/docker-cimprov/state/ImageInventory; 755; root; root /var/opt/microsoft/docker-cimprov/log; 755; root; root +/opt/td-agent-bit; 755; root; root;sysdir +/opt/td-agent-bit/bin; 755; root; root;sysdir + %Dependencies %Postinstall_10 diff --git a/source/code/go/src/plugins/Makefile b/source/code/go/src/plugins/Makefile new file mode 100644 index 000000000..dfdc65d81 --- /dev/null +++ b/source/code/go/src/plugins/Makefile @@ -0,0 +1,20 @@ +GITVERSION := 0.1 +UNAME_S := $(shell uname -s) +ifeq ($(UNAME_S),Linux) + BUILDDATE := $(shell date --rfc-3339=seconds) +endif +ifeq ($(UNAME_S),Darwin) + BUILDDATE := $(shell gdate --rfc-3339=seconds) +endif + +fbplugin: + go build -ldflags "-X 'main.revision=$(GITVERSION)' -X 'main.builddate=$(BUILDDATE)'" -buildmode=c-shared -o out_oms.so . + +test: + go test -cover -race -coverprofile=coverage.txt -covermode=atomic + +glide: + glide install + +clean: + rm -rf *.so *.h *~ diff --git a/source/code/go/src/plugins/glide.lock b/source/code/go/src/plugins/glide.lock new file mode 100644 index 000000000..79745820b --- /dev/null +++ b/source/code/go/src/plugins/glide.lock @@ -0,0 +1,209 @@ +hash: a4b073d827b5cbb4a772dada9ff3bcf55c55afc3cda83ddec1e6edcdca8e219a +updated: 2018-09-06T04:07:01.808678175Z +imports: +- name: github.com/fluent/fluent-bit-go + version: c4a158a6e3a793166c6ecfa2d5c80d71eada8959 + subpackages: + - output +- name: github.com/ghodss/yaml + version: 73d445a93680fa1a78ae23a5839bad48f32ba1ee +- name: github.com/gogo/protobuf + version: c0656edd0d9eab7c66d1eb0c568f9039345796f7 + subpackages: + - proto + - sortkeys +- name: github.com/golang/glog + version: 44145f04b68cf362d9c4df2182967c2275eaefed +- name: github.com/golang/protobuf + version: b4deda0973fb4c70b50d226b1af49f3da59f5265 + subpackages: + - proto + - ptypes + - ptypes/any + - ptypes/duration + - ptypes/timestamp +- name: github.com/google/btree + version: 7d79101e329e5a3adf994758c578dab82b90c017 +- name: github.com/google/gofuzz + version: 44d81051d367757e1c7c6a5a86423ece9afcf63c +- name: github.com/googleapis/gnostic + version: 0c5108395e2debce0d731cf0287ddf7242066aba + subpackages: + - OpenAPIv2 + - compiler + - extensions +- name: github.com/gregjones/httpcache + version: 787624de3eb7bd915c329cba748687a3b22666a6 + subpackages: + - diskcache +- name: github.com/json-iterator/go + version: f2b4162afba35581b6d4a50d3b8f34e33c144682 +- name: github.com/mitchellh/mapstructure + version: fa473d140ef3c6adf42d6b391fe76707f1f243c8 +- name: github.com/modern-go/concurrent + version: bacd9c7ef1dd9b15be4a9909b8ac7a4e313eec94 +- name: github.com/modern-go/reflect2 + version: 05fbef0ca5da472bbf96c9322b84a53edc03c9fd +- name: github.com/peterbourgon/diskv + version: 5f041e8faa004a95c88a202771f4cc3e991971e6 +- name: github.com/ugorji/go + version: 00b869d2f4a5e27445c2d916fa106fc72c106d4c + subpackages: + - codec +- name: golang.org/x/crypto + version: 49796115aa4b964c318aad4f3084fdb41e9aa067 + subpackages: + - ssh/terminal +- name: golang.org/x/net + version: 1c05540f6879653db88113bc4a2b70aec4bd491f + subpackages: + - context + - html + - html/atom + - http2 + - http2/hpack + - idna + - lex/httplex + - websocket +- name: golang.org/x/sys + version: 95c6576299259db960f6c5b9b69ea52422860fce + subpackages: + - unix + - windows +- name: golang.org/x/text + version: b19bf474d317b857955b12035d2c5acb57ce8b01 + subpackages: + - secure/bidirule + - transform + - unicode/bidi + - unicode/norm +- name: golang.org/x/time + version: f51c12702a4d776e4c1fa9b0fabab841babae631 + subpackages: + - rate +- name: gopkg.in/inf.v0 + version: 3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4 +- name: gopkg.in/yaml.v2 + version: 670d4cfef0544295bc27a114dbac37980d83185a +- name: k8s.io/api + version: 072894a440bdee3a891dea811fe42902311cd2a3 + subpackages: + - admissionregistration/v1alpha1 + - admissionregistration/v1beta1 + - apps/v1 + - apps/v1beta1 + - apps/v1beta2 + - authentication/v1 + - authentication/v1beta1 + - authorization/v1 + - authorization/v1beta1 + - autoscaling/v1 + - autoscaling/v2beta1 + - batch/v1 + - batch/v1beta1 + - batch/v2alpha1 + - certificates/v1beta1 + - core/v1 + - events/v1beta1 + - extensions/v1beta1 + - imagepolicy/v1alpha1 + - networking/v1 + - policy/v1beta1 + - rbac/v1 + - rbac/v1alpha1 + - rbac/v1beta1 + - scheduling/v1alpha1 + - scheduling/v1beta1 + - settings/v1alpha1 + - storage/v1 + - storage/v1alpha1 + - storage/v1beta1 +- name: k8s.io/apimachinery + version: 103fd098999dc9c0c88536f5c9ad2e5da39373ae + subpackages: + - pkg/api/errors + - pkg/api/meta + - pkg/api/resource + - pkg/apis/meta/v1 + - pkg/apis/meta/v1/unstructured + - pkg/apis/meta/v1beta1 + - pkg/conversion + - pkg/conversion/queryparams + - pkg/fields + - pkg/labels + - pkg/runtime + - pkg/runtime/schema + - pkg/runtime/serializer + - pkg/runtime/serializer/json + - pkg/runtime/serializer/protobuf + - pkg/runtime/serializer/recognizer + - pkg/runtime/serializer/streaming + - pkg/runtime/serializer/versioning + - pkg/selection + - pkg/types + - pkg/util/clock + - pkg/util/errors + - pkg/util/framer + - pkg/util/intstr + - pkg/util/json + - pkg/util/net + - pkg/util/runtime + - pkg/util/sets + - pkg/util/validation + - pkg/util/validation/field + - pkg/util/wait + - pkg/util/yaml + - pkg/version + - pkg/watch + - third_party/forked/golang/reflect +- name: k8s.io/client-go + version: 7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65 + subpackages: + - discovery + - kubernetes + - kubernetes/scheme + - kubernetes/typed/admissionregistration/v1alpha1 + - kubernetes/typed/admissionregistration/v1beta1 + - kubernetes/typed/apps/v1 + - kubernetes/typed/apps/v1beta1 + - kubernetes/typed/apps/v1beta2 + - kubernetes/typed/authentication/v1 + - kubernetes/typed/authentication/v1beta1 + - kubernetes/typed/authorization/v1 + - kubernetes/typed/authorization/v1beta1 + - kubernetes/typed/autoscaling/v1 + - kubernetes/typed/autoscaling/v2beta1 + - kubernetes/typed/batch/v1 + - kubernetes/typed/batch/v1beta1 + - kubernetes/typed/batch/v2alpha1 + - kubernetes/typed/certificates/v1beta1 + - kubernetes/typed/core/v1 + - kubernetes/typed/events/v1beta1 + - kubernetes/typed/extensions/v1beta1 + - kubernetes/typed/networking/v1 + - kubernetes/typed/policy/v1beta1 + - kubernetes/typed/rbac/v1 + - kubernetes/typed/rbac/v1alpha1 + - kubernetes/typed/rbac/v1beta1 + - kubernetes/typed/scheduling/v1alpha1 + - kubernetes/typed/scheduling/v1beta1 + - kubernetes/typed/settings/v1alpha1 + - kubernetes/typed/storage/v1 + - kubernetes/typed/storage/v1alpha1 + - kubernetes/typed/storage/v1beta1 + - pkg/apis/clientauthentication + - pkg/apis/clientauthentication/v1alpha1 + - pkg/apis/clientauthentication/v1beta1 + - pkg/version + - plugin/pkg/client/auth/exec + - rest + - rest/watch + - tools/clientcmd/api + - tools/metrics + - tools/reference + - transport + - util/cert + - util/connrotation + - util/flowcontrol + - util/integer +testImports: [] diff --git a/source/code/go/src/plugins/glide.yaml b/source/code/go/src/plugins/glide.yaml new file mode 100644 index 000000000..b986ece21 --- /dev/null +++ b/source/code/go/src/plugins/glide.yaml @@ -0,0 +1,15 @@ +package: plugins +import: +- package: github.com/fluent/fluent-bit-go + subpackages: + - output +- package: github.com/mitchellh/mapstructure + version: ^1.0.0 +- package: k8s.io/apimachinery + subpackages: + - pkg/apis/meta/v1 +- package: k8s.io/client-go + version: ^8.0.0 + subpackages: + - kubernetes + - rest diff --git a/source/code/go/src/plugins/oms.go b/source/code/go/src/plugins/oms.go new file mode 100644 index 000000000..49472c74b --- /dev/null +++ b/source/code/go/src/plugins/oms.go @@ -0,0 +1,359 @@ +package main + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "log" + "net/http" + "os" + "strconv" + "strings" + "sync" + "time" + + "github.com/fluent/fluent-bit-go/output" + "github.com/mitchellh/mapstructure" + lumberjack "gopkg.in/natefinch/lumberjack.v2" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" +) + +// DataType for Container Log +const DataType = "CONTAINER_LOG_BLOB" + +// IPName for Container Log +const IPName = "Containers" +const containerInventoryPath = "/var/opt/microsoft/docker-cimprov/state/ContainerInventory" +const defaultContainerInventoryRefreshInterval = 60 +const defaultKubeSystemContainersRefreshInterval = 300 + +var ( + // PluginConfiguration the plugins configuration + PluginConfiguration map[string]string + // HTTPClient for making POST requests to OMSEndpoint + HTTPClient http.Client + // OMSEndpoint ingestion endpoint + OMSEndpoint string + // Computer (Hostname) when ingesting into ContainerLog table + Computer string +) + +var ( + // ImageIDMap caches the container id to image mapping + ImageIDMap map[string]string + // NameIDMap caches the container it to Name mapping + NameIDMap map[string]string + // IgnoreIDSet set of container Ids of kube-system pods + IgnoreIDSet map[string]bool + + // DataUpdateMutex read and write mutex access to the container id set + DataUpdateMutex = &sync.Mutex{} +) + +var ( + // FLBLogger stream + FLBLogger = createLogger() + + // Log wrapper function + Log = FLBLogger.Printf +) + +// ContainerInventory represents the container info +type ContainerInventory struct { + ElementName string `json:"ElementName"` + CreatedTime string `json:"CreatedTime"` + State string `json:"State"` + ExitCode int `json:"ExitCode"` + StartedTime string `json:"StartedTime"` + FinishedTime string `json:"FinishedTime"` + ImageID string `json:"ImageId"` + Image string `json:"Image"` + Repository string `json:"Repository"` + ImageTag string `json:"ImageTag"` + ComposeGroup string `json:"ComposeGroup"` + ContainerHostname string `json:"ContainerHostname"` + Computer string `json:"Computer"` + Command string `json:"Command"` + EnvironmentVar string `json:"EnvironmentVar"` + Ports string `json:"Ports"` + Links string `json:"Links"` +} + +// DataItem represents the object corresponding to the json that is sent by fluentbit tail plugin +type DataItem struct { + LogEntry string `json:"LogEntry"` + LogEntrySource string `json:"LogEntrySource"` + LogEntryTimeStamp string `json:"LogEntryTimeStamp"` + ID string `json:"Id"` + Image string `json:"Image"` + Name string `json:"Name"` + SourceSystem string `json:"SourceSystem"` + Computer string `json:"Computer"` + Filepath string `json:"Filepath"` +} + +// ContainerLogBlob represents the object corresponding to the payload that is sent to the ODS end point +type ContainerLogBlob struct { + DataType string `json:"DataType"` + IPName string `json:"IPName"` + DataItems []DataItem `json:"DataItems"` +} + +func populateMaps() { + + Log("Updating ImageIDMap and NameIDMap") + + _imageIDMap := make(map[string]string) + _nameIDMap := make(map[string]string) + files, err := ioutil.ReadDir(containerInventoryPath) + + if err != nil { + Log("error when reading container inventory %s\n", err.Error()) + } + + for _, file := range files { + fullPath := fmt.Sprintf("%s/%s", containerInventoryPath, file.Name()) + fileContent, err := ioutil.ReadFile(fullPath) + if err != nil { + Log("Error reading file content %s", fullPath) + Log(err.Error()) + } + var containerInventory ContainerInventory + unmarshallErr := json.Unmarshal(fileContent, &containerInventory) + + if unmarshallErr != nil { + Log("Unmarshall error when reading file %s %s \n", fullPath, unmarshallErr.Error()) + } + + _imageIDMap[file.Name()] = containerInventory.Image + _nameIDMap[file.Name()] = containerInventory.ElementName + } + Log("Locking to update image and name maps") + DataUpdateMutex.Lock() + ImageIDMap = _imageIDMap + NameIDMap = _nameIDMap + DataUpdateMutex.Unlock() + Log("Unlocking after updating image and name maps") +} + +func createLogger() *log.Logger { + + var logfile *os.File + path := "/var/opt/microsoft/docker-cimprov/log/fluent-bit-out-oms-runtime.log" + if _, err := os.Stat(path); err == nil { + fmt.Printf("File Exists. Opening file in append mode...\n") + logfile, err = os.OpenFile(path, os.O_APPEND|os.O_WRONLY, 0600) + if err != nil { + fmt.Printf(err.Error()) + } + } + + if _, err := os.Stat(path); os.IsNotExist(err) { + fmt.Printf("File Doesnt Exist. Creating file...\n") + logfile, err = os.Create(path) + if err != nil { + fmt.Printf(err.Error()) + } + } + + logger := log.New(logfile, "", 0) + + logger.SetOutput(&lumberjack.Logger{ + Filename: path, + MaxSize: 10, //megabytes + MaxBackups: 3, + MaxAge: 28, //days + Compress: true, // false by default + }) + + logger.SetFlags(log.Ltime | log.Lshortfile | log.LstdFlags) + return logger +} + +func updateContainersData() { + + containerInventoryRefreshInterval, err := strconv.Atoi(PluginConfiguration["container_inventory_refresh_interval"]) + if err != nil { + Log("Error Reading Container Inventory Refresh Interval %s", err.Error()) + containerInventoryRefreshInterval = defaultContainerInventoryRefreshInterval + } + Log("containerInventoryRefreshInterval = %d \n", containerInventoryRefreshInterval) + go initMaps(containerInventoryRefreshInterval) + + kubeSystemContainersRefreshInterval, err := strconv.Atoi(PluginConfiguration["kube_system_containers_refresh_interval"]) + if err != nil { + Log("Error Reading Kube System Container Ids Refresh Interval %s", err.Error()) + kubeSystemContainersRefreshInterval = defaultKubeSystemContainersRefreshInterval + } + Log("kubeSystemContainersRefreshInterval = %d \n", kubeSystemContainersRefreshInterval) + + go updateIgnoreContainerIds(kubeSystemContainersRefreshInterval) +} + +func initMaps(refreshInterval int) { + ImageIDMap = make(map[string]string) + NameIDMap = make(map[string]string) + + populateMaps() + + for range time.Tick(time.Second * time.Duration(refreshInterval)) { + populateMaps() + } +} + +func updateIgnoreContainerIds(refreshInterval int) { + IgnoreIDSet = make(map[string]bool) + + updateKubeSystemContainerIDs() + + for range time.Tick(time.Second * time.Duration(refreshInterval)) { + updateKubeSystemContainerIDs() + } +} + +func updateKubeSystemContainerIDs() { + + if strings.Compare(os.Getenv("DISABLE_KUBE_SYSTEM_LOG_COLLECTION"), "true") != 0 { + Log("Kube System Log Collection is ENABLED.") + return + } + + Log("Kube System Log Collection is DISABLED. Collecting containerIds to drop their records") + config, err := rest.InClusterConfig() + if err != nil { + Log("Error getting config %s\n", err.Error()) + } + + clientset, err := kubernetes.NewForConfig(config) + if err != nil { + Log("Error getting clientset %s", err.Error()) + } + + pods, err := clientset.CoreV1().Pods("kube-system").List(metav1.ListOptions{}) + if err != nil { + Log("Error getting pods %s\n", err.Error()) + } + + _ignoreIDSet := make(map[string]bool) + for _, pod := range pods.Items { + for _, status := range pod.Status.ContainerStatuses { + lastSlashIndex := strings.LastIndex(status.ContainerID, "/") + _ignoreIDSet[status.ContainerID[lastSlashIndex+1:len(status.ContainerID)]] = true + } + } + + Log("Locking to update kube-system container IDs") + DataUpdateMutex.Lock() + IgnoreIDSet = _ignoreIDSet + DataUpdateMutex.Unlock() + Log("Unlocking after updating kube-system container IDs") +} + +// PostDataHelper sends data to the OMS endpoint +func PostDataHelper(tailPluginRecords []map[interface{}]interface{}) int { + + start := time.Now() + var dataItems []DataItem + DataUpdateMutex.Lock() + + for _, record := range tailPluginRecords { + + containerID := getContainerIDFromFilePath(toString(record["Filepath"])) + + if containsKey(IgnoreIDSet, containerID) { + continue + } + + var dataItem DataItem + stringMap := make(map[string]string) + + // convert map[interface{}]interface{} to map[string]string + for key, value := range record { + strKey := fmt.Sprintf("%v", key) + strValue := toString(value) + stringMap[strKey] = strValue + } + + stringMap["Id"] = containerID + stringMap["Image"] = ImageIDMap[containerID] + stringMap["Name"] = NameIDMap[containerID] + stringMap["Computer"] = Computer + mapstructure.Decode(stringMap, &dataItem) + dataItems = append(dataItems, dataItem) + } + DataUpdateMutex.Unlock() + + if len(dataItems) > 0 { + logEntry := ContainerLogBlob{ + DataType: DataType, + IPName: IPName, + DataItems: dataItems} + + marshalled, err := json.Marshal(logEntry) + req, _ := http.NewRequest("POST", OMSEndpoint, bytes.NewBuffer(marshalled)) + req.Header.Set("Content-Type", "application/json") + + resp, err := HTTPClient.Do(req) + elapsed := time.Since(start) + + if err != nil { + Log("Error when sending request %s \n", err.Error()) + Log("Failed to flush %d records after %s", len(dataItems), elapsed) + return output.FLB_RETRY + } + + if resp == nil || resp.StatusCode != 200 { + if resp != nil { + Log("Status %s Status Code %d", resp.Status, resp.StatusCode) + } + return output.FLB_RETRY + } + + Log("Successfully flushed %d records in %s", len(dataItems), elapsed) + } + + return output.FLB_OK +} + +func containsKey(currentMap map[string]bool, key string) bool { + _, c := currentMap[key] + return c +} + +func toString(s interface{}) string { + value := s.([]uint8) + return string([]byte(value[:])) +} + +func getContainerIDFromFilePath(filepath string) string { + start := strings.LastIndex(filepath, "-") + end := strings.LastIndex(filepath, ".") + return filepath[start+1 : end] +} + +// ReadConfig reads and populates plugin configuration +func ReadConfig(pluginConfPath string) map[string]string { + + pluginConf, err := ReadConfiguration(pluginConfPath) + omsadminConf, err := ReadConfiguration(pluginConf["omsadmin_conf_path"]) + + if err != nil { + Log(err.Error()) + } + + containerHostName, err := ioutil.ReadFile(pluginConf["container_host_file_path"]) + if err != nil { + Log("Error when reading containerHostName file %s", err.Error()) + } + + Computer = strings.TrimSuffix(toString(containerHostName), "\n") + Log("Computer == %s \n", Computer) + + OMSEndpoint = omsadminConf["OMS_ENDPOINT"] + Log("OMSEndpoint %s", OMSEndpoint) + + return pluginConf +} diff --git a/source/code/go/src/plugins/out_oms.go b/source/code/go/src/plugins/out_oms.go new file mode 100644 index 000000000..dad0ede81 --- /dev/null +++ b/source/code/go/src/plugins/out_oms.go @@ -0,0 +1,57 @@ +package main + +import ( + "github.com/fluent/fluent-bit-go/output" +) +import ( + "C" + "unsafe" +) + +//export FLBPluginRegister +func FLBPluginRegister(ctx unsafe.Pointer) int { + return output.FLBPluginRegister(ctx, "oms", "Stdout GO!") +} + +//export FLBPluginInit +// (fluentbit will call this) +// ctx (context) pointer to fluentbit context (state/ c code) +func FLBPluginInit(ctx unsafe.Pointer) int { + Log("Initializing out_oms go plugin for fluentbit") + PluginConfiguration = ReadConfig("/etc/opt/microsoft/docker-cimprov/out_oms.conf") + CreateHTTPClient() + updateContainersData() + return output.FLB_OK +} + +//export FLBPluginFlush +func FLBPluginFlush(data unsafe.Pointer, length C.int, tag *C.char) int { + var count int + var ret int + var record map[interface{}]interface{} + var records []map[interface{}]interface{} + + // Create Fluent Bit decoder + dec := output.NewDecoder(data, int(length)) + + // Iterate Records + count = 0 + for { + // Extract Record + ret, _, record = output.GetRecord(dec) + if ret != 0 { + break + } + records = append(records, record) + count++ + } + return PostDataHelper(records) +} + +// FLBPluginExit exits the plugin +func FLBPluginExit() int { + return output.FLB_OK +} + +func main() { +} diff --git a/source/code/go/src/plugins/utils.go b/source/code/go/src/plugins/utils.go new file mode 100644 index 000000000..0e33f43f9 --- /dev/null +++ b/source/code/go/src/plugins/utils.go @@ -0,0 +1,67 @@ +package main + +import ( + "bufio" + "crypto/tls" + "log" + "net/http" + "os" + "strings" +) + +// ReadConfiguration reads a property file +func ReadConfiguration(filename string) (map[string]string, error) { + config := map[string]string{} + + if len(filename) == 0 { + return config, nil + } + + file, err := os.Open(filename) + if err != nil { + log.Fatal(err) + return nil, err + } + defer file.Close() + + scanner := bufio.NewScanner(file) + for scanner.Scan() { + currentLine := scanner.Text() + if equalIndex := strings.Index(currentLine, "="); equalIndex >= 0 { + if key := strings.TrimSpace(currentLine[:equalIndex]); len(key) > 0 { + value := "" + if len(currentLine) > equalIndex { + value = strings.TrimSpace(currentLine[equalIndex+1:]) + } + config[key] = value + } + } + } + + if err := scanner.Err(); err != nil { + log.Fatal(err) + return nil, err + } + + return config, nil +} + +// CreateHTTPClient used to create the client for sending post requests to OMSEndpoint +func CreateHTTPClient() { + + cert, err := tls.LoadX509KeyPair(PluginConfiguration["cert_file_path"], PluginConfiguration["key_file_path"]) + if err != nil { + Log("Error when loading cert %s", err.Error()) + } + + tlsConfig := &tls.Config{ + Certificates: []tls.Certificate{cert}, + } + + tlsConfig.BuildNameToCertificate() + transport := &http.Transport{TLSClientConfig: tlsConfig} + + HTTPClient = http.Client{Transport: transport} + + Log("Successfully created HTTP Client") +} From b02f2ec57e47c68648596ef7487bf320fa5e9331 Mon Sep 17 00:00:00 2001 From: Dilip Raghunathan Date: Fri, 14 Sep 2018 11:24:12 -0700 Subject: [PATCH 07/25] Dilipr/glide updates (#127) * Updating glide.* files to include lumberjack --- source/code/go/src/plugins/glide.lock | 6 ++++-- source/code/go/src/plugins/glide.yaml | 2 ++ 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/source/code/go/src/plugins/glide.lock b/source/code/go/src/plugins/glide.lock index 79745820b..4597b594a 100644 --- a/source/code/go/src/plugins/glide.lock +++ b/source/code/go/src/plugins/glide.lock @@ -1,5 +1,5 @@ -hash: a4b073d827b5cbb4a772dada9ff3bcf55c55afc3cda83ddec1e6edcdca8e219a -updated: 2018-09-06T04:07:01.808678175Z +hash: bb32415f402ab29751f29b8e394bc974cbc31861453d817aaeb94ef83dacc488 +updated: 2018-09-14T18:14:28.748047598Z imports: - name: github.com/fluent/fluent-bit-go version: c4a158a6e3a793166c6ecfa2d5c80d71eada8959 @@ -83,6 +83,8 @@ imports: - rate - name: gopkg.in/inf.v0 version: 3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4 +- name: gopkg.in/natefinch/lumberjack.v2 + version: a96e63847dc3c67d17befa69c303767e2f84e54f - name: gopkg.in/yaml.v2 version: 670d4cfef0544295bc27a114dbac37980d83185a - name: k8s.io/api diff --git a/source/code/go/src/plugins/glide.yaml b/source/code/go/src/plugins/glide.yaml index b986ece21..403e1efc4 100644 --- a/source/code/go/src/plugins/glide.yaml +++ b/source/code/go/src/plugins/glide.yaml @@ -5,6 +5,8 @@ import: - output - package: github.com/mitchellh/mapstructure version: ^1.0.0 +- package: gopkg.in/natefinch/lumberjack.v2 + version: ^2.1.0 - package: k8s.io/apimachinery subpackages: - pkg/apis/meta/v1 From e01c67845cd5d99f77b8dafd3e579d933984c3af Mon Sep 17 00:00:00 2001 From: Vishwanath Narasimhan Date: Mon, 17 Sep 2018 15:42:01 -0700 Subject: [PATCH 08/25] containerID="" for pull issues --- source/code/plugin/in_kube_podinventory.rb | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/source/code/plugin/in_kube_podinventory.rb b/source/code/plugin/in_kube_podinventory.rb index f478705f6..2cd1e1bc3 100644 --- a/source/code/plugin/in_kube_podinventory.rb +++ b/source/code/plugin/in_kube_podinventory.rb @@ -143,7 +143,8 @@ def parse_and_emit_records(podInventory, serviceList) if !container['containerID'].nil? record['ContainerID'] = container['containerID'].split("//")[1] else - record['ContainerID'] = "00000000-0000-0000-0000-000000000000" + # for containers that have image issues (like invalid image/tag etc..) this will be empty. do not make it all 0 + record['ContainerID'] = "" end #keeping this as which is same as InstanceName in perf table record['ContainerName'] = podUid + "/" +container['name'] From b0ba22deaf43c29058d61f0dd76c2c64c34f5ac4 Mon Sep 17 00:00:00 2001 From: Dilip Raghunathan Date: Tue, 18 Sep 2018 16:59:46 -0700 Subject: [PATCH 09/25] Using KubeAPI for getting image,name. Adding more logs (#129) * Using KubeAPI for getting image,name. Adding more logs * Moving log file and state file to within the omsagent container * Changing log and state paths --- installer/conf/td-agent-bit.conf | 4 +- source/code/go/src/plugins/oms.go | 105 +++++++++++++------------- source/code/go/src/plugins/out_oms.go | 2 +- 3 files changed, 54 insertions(+), 57 deletions(-) diff --git a/installer/conf/td-agent-bit.conf b/installer/conf/td-agent-bit.conf index cf490c077..84a9fcf94 100644 --- a/installer/conf/td-agent-bit.conf +++ b/installer/conf/td-agent-bit.conf @@ -2,13 +2,13 @@ Flush 5 Log_Level info Parsers_File /etc/td-agent-bit/parsers.conf - Log_File /var/log/fluent-bit.log + Log_File /var/opt/microsoft/docker-cimprov/log/fluent-bit.log [INPUT] Name tail Tag oms.container.log.* Path /var/log/containers/*.log - DB /var/log/fblogs.db + DB /var/opt/microsoft/docker-cimprov/state/fblogs.db Parser docker Mem_Buf_Limit 30m Path_Key filepath diff --git a/source/code/go/src/plugins/oms.go b/source/code/go/src/plugins/oms.go index 49472c74b..c18135dcc 100644 --- a/source/code/go/src/plugins/oms.go +++ b/source/code/go/src/plugins/oms.go @@ -26,7 +26,6 @@ const DataType = "CONTAINER_LOG_BLOB" // IPName for Container Log const IPName = "Containers" -const containerInventoryPath = "/var/opt/microsoft/docker-cimprov/state/ContainerInventory" const defaultContainerInventoryRefreshInterval = 60 const defaultKubeSystemContainersRefreshInterval = 300 @@ -51,6 +50,9 @@ var ( // DataUpdateMutex read and write mutex access to the container id set DataUpdateMutex = &sync.Mutex{} + + // ClientSet for querying KubeAPIs + ClientSet *kubernetes.Clientset ) var ( @@ -61,27 +63,6 @@ var ( Log = FLBLogger.Printf ) -// ContainerInventory represents the container info -type ContainerInventory struct { - ElementName string `json:"ElementName"` - CreatedTime string `json:"CreatedTime"` - State string `json:"State"` - ExitCode int `json:"ExitCode"` - StartedTime string `json:"StartedTime"` - FinishedTime string `json:"FinishedTime"` - ImageID string `json:"ImageId"` - Image string `json:"Image"` - Repository string `json:"Repository"` - ImageTag string `json:"ImageTag"` - ComposeGroup string `json:"ComposeGroup"` - ContainerHostname string `json:"ContainerHostname"` - Computer string `json:"Computer"` - Command string `json:"Command"` - EnvironmentVar string `json:"EnvironmentVar"` - Ports string `json:"Ports"` - Links string `json:"Links"` -} - // DataItem represents the object corresponding to the json that is sent by fluentbit tail plugin type DataItem struct { LogEntry string `json:"LogEntry"` @@ -108,29 +89,25 @@ func populateMaps() { _imageIDMap := make(map[string]string) _nameIDMap := make(map[string]string) - files, err := ioutil.ReadDir(containerInventoryPath) + pods, err := ClientSet.CoreV1().Pods("").List(metav1.ListOptions{}) if err != nil { - Log("error when reading container inventory %s\n", err.Error()) + Log("Error getting pods %s\n", err.Error()) } - for _, file := range files { - fullPath := fmt.Sprintf("%s/%s", containerInventoryPath, file.Name()) - fileContent, err := ioutil.ReadFile(fullPath) - if err != nil { - Log("Error reading file content %s", fullPath) - Log(err.Error()) - } - var containerInventory ContainerInventory - unmarshallErr := json.Unmarshal(fileContent, &containerInventory) - - if unmarshallErr != nil { - Log("Unmarshall error when reading file %s %s \n", fullPath, unmarshallErr.Error()) + for _, pod := range pods.Items { + for _, status := range pod.Status.ContainerStatuses { + lastSlashIndex := strings.LastIndex(status.ContainerID, "/") + containerID := status.ContainerID[lastSlashIndex+1 : len(status.ContainerID)] + image := status.Image + name := fmt.Sprintf("%s/%s", pod.UID, status.Name) + if containerID != "" { + _imageIDMap[containerID] = image + _nameIDMap[containerID] = name + } } - - _imageIDMap[file.Name()] = containerInventory.Image - _nameIDMap[file.Name()] = containerInventory.ElementName } + Log("Locking to update image and name maps") DataUpdateMutex.Lock() ImageIDMap = _imageIDMap @@ -164,7 +141,7 @@ func createLogger() *log.Logger { logger.SetOutput(&lumberjack.Logger{ Filename: path, MaxSize: 10, //megabytes - MaxBackups: 3, + MaxBackups: 1, MaxAge: 28, //days Compress: true, // false by default }) @@ -222,17 +199,8 @@ func updateKubeSystemContainerIDs() { } Log("Kube System Log Collection is DISABLED. Collecting containerIds to drop their records") - config, err := rest.InClusterConfig() - if err != nil { - Log("Error getting config %s\n", err.Error()) - } - clientset, err := kubernetes.NewForConfig(config) - if err != nil { - Log("Error getting clientset %s", err.Error()) - } - - pods, err := clientset.CoreV1().Pods("kube-system").List(metav1.ListOptions{}) + pods, err := ClientSet.CoreV1().Pods("kube-system").List(metav1.ListOptions{}) if err != nil { Log("Error getting pods %s\n", err.Error()) } @@ -278,8 +246,27 @@ func PostDataHelper(tailPluginRecords []map[interface{}]interface{}) int { } stringMap["Id"] = containerID - stringMap["Image"] = ImageIDMap[containerID] - stringMap["Name"] = NameIDMap[containerID] + + if val, ok := ImageIDMap[containerID]; ok { + stringMap["Image"] = val + } else { + Log("ContainerId %s not present in Map ", containerID) + Log("CurrentMap Snapshot \n") + for k, v := range ImageIDMap { + Log("%s ==> %s", k, v) + } + } + + if val, ok := NameIDMap[containerID]; ok { + stringMap["Name"] = val + } else { + Log("ContainerId %s not present in Map ", containerID) + Log("CurrentMap Snapshot \n") + for k, v := range NameIDMap { + Log("%s ==> %s", k, v) + } + } + stringMap["Computer"] = Computer mapstructure.Decode(stringMap, &dataItem) dataItems = append(dataItems, dataItem) @@ -334,8 +321,8 @@ func getContainerIDFromFilePath(filepath string) string { return filepath[start+1 : end] } -// ReadConfig reads and populates plugin configuration -func ReadConfig(pluginConfPath string) map[string]string { +// InitializeConfig reads and populates plugin configuration +func InitializeConfig(pluginConfPath string) map[string]string { pluginConf, err := ReadConfiguration(pluginConfPath) omsadminConf, err := ReadConfiguration(pluginConf["omsadmin_conf_path"]) @@ -355,5 +342,15 @@ func ReadConfig(pluginConfPath string) map[string]string { OMSEndpoint = omsadminConf["OMS_ENDPOINT"] Log("OMSEndpoint %s", OMSEndpoint) + config, err := rest.InClusterConfig() + if err != nil { + Log("Error getting config %s\n", err.Error()) + } + + ClientSet, err = kubernetes.NewForConfig(config) + if err != nil { + Log("Error getting clientset %s", err.Error()) + } + return pluginConf } diff --git a/source/code/go/src/plugins/out_oms.go b/source/code/go/src/plugins/out_oms.go index dad0ede81..8c23f47a8 100644 --- a/source/code/go/src/plugins/out_oms.go +++ b/source/code/go/src/plugins/out_oms.go @@ -18,7 +18,7 @@ func FLBPluginRegister(ctx unsafe.Pointer) int { // ctx (context) pointer to fluentbit context (state/ c code) func FLBPluginInit(ctx unsafe.Pointer) int { Log("Initializing out_oms go plugin for fluentbit") - PluginConfiguration = ReadConfig("/etc/opt/microsoft/docker-cimprov/out_oms.conf") + PluginConfiguration = InitializeConfig("/etc/opt/microsoft/docker-cimprov/out_oms.conf") CreateHTTPClient() updateContainersData() return output.FLB_OK From 97834199721172ba0a67828b19a6f26de1a4b0a0 Mon Sep 17 00:00:00 2001 From: Dilip Raghunathan Date: Thu, 27 Sep 2018 14:35:29 -0700 Subject: [PATCH 10/25] Dilipr/mark comments (#130) * Marks Comments + Error Handling * Drop records from files that are not in k8s format * Remove unnecessary log line' * Adding Log to the file that doesn't conform to the expected format --- source/code/go/src/plugins/oms.go | 227 ++++++++++++++------------ source/code/go/src/plugins/out_oms.go | 6 +- source/code/go/src/plugins/utils.go | 1 + 3 files changed, 123 insertions(+), 111 deletions(-) diff --git a/source/code/go/src/plugins/oms.go b/source/code/go/src/plugins/oms.go index c18135dcc..2e9e2f3d0 100644 --- a/source/code/go/src/plugins/oms.go +++ b/source/code/go/src/plugins/oms.go @@ -12,7 +12,8 @@ import ( "strings" "sync" "time" - +) +import ( "github.com/fluent/fluent-bit-go/output" "github.com/mitchellh/mapstructure" lumberjack "gopkg.in/natefinch/lumberjack.v2" @@ -24,6 +25,9 @@ import ( // DataType for Container Log const DataType = "CONTAINER_LOG_BLOB" +// ContainerLogPluginConfFilePath --> config file path for container log plugin +const ContainerLogPluginConfFilePath = "/etc/opt/microsoft/docker-cimprov/out_oms.conf" + // IPName for Container Log const IPName = "Containers" const defaultContainerInventoryRefreshInterval = 60 @@ -47,18 +51,22 @@ var ( NameIDMap map[string]string // IgnoreIDSet set of container Ids of kube-system pods IgnoreIDSet map[string]bool - // DataUpdateMutex read and write mutex access to the container id set DataUpdateMutex = &sync.Mutex{} - // ClientSet for querying KubeAPIs ClientSet *kubernetes.Clientset ) +var ( + // KubeSystemContainersRefreshTicker updates the kube-system containers + KubeSystemContainersRefreshTicker = time.NewTicker(time.Second * 300) + // ContainerImageNameRefreshTicker updates the container image and names periodically + ContainerImageNameRefreshTicker = time.NewTicker(time.Second * 60) +) + var ( // FLBLogger stream FLBLogger = createLogger() - // Log wrapper function Log = FLBLogger.Printf ) @@ -83,41 +91,7 @@ type ContainerLogBlob struct { DataItems []DataItem `json:"DataItems"` } -func populateMaps() { - - Log("Updating ImageIDMap and NameIDMap") - - _imageIDMap := make(map[string]string) - _nameIDMap := make(map[string]string) - - pods, err := ClientSet.CoreV1().Pods("").List(metav1.ListOptions{}) - if err != nil { - Log("Error getting pods %s\n", err.Error()) - } - - for _, pod := range pods.Items { - for _, status := range pod.Status.ContainerStatuses { - lastSlashIndex := strings.LastIndex(status.ContainerID, "/") - containerID := status.ContainerID[lastSlashIndex+1 : len(status.ContainerID)] - image := status.Image - name := fmt.Sprintf("%s/%s", pod.UID, status.Name) - if containerID != "" { - _imageIDMap[containerID] = image - _nameIDMap[containerID] = name - } - } - } - - Log("Locking to update image and name maps") - DataUpdateMutex.Lock() - ImageIDMap = _imageIDMap - NameIDMap = _nameIDMap - DataUpdateMutex.Unlock() - Log("Unlocking after updating image and name maps") -} - func createLogger() *log.Logger { - var logfile *os.File path := "/var/opt/microsoft/docker-cimprov/log/fluent-bit-out-oms-runtime.log" if _, err := os.Stat(path); err == nil { @@ -150,88 +124,85 @@ func createLogger() *log.Logger { return logger } -func updateContainersData() { +func updateContainerImageNameMaps() { + for ; true; <-ContainerImageNameRefreshTicker.C { + Log("Updating ImageIDMap and NameIDMap") - containerInventoryRefreshInterval, err := strconv.Atoi(PluginConfiguration["container_inventory_refresh_interval"]) - if err != nil { - Log("Error Reading Container Inventory Refresh Interval %s", err.Error()) - containerInventoryRefreshInterval = defaultContainerInventoryRefreshInterval - } - Log("containerInventoryRefreshInterval = %d \n", containerInventoryRefreshInterval) - go initMaps(containerInventoryRefreshInterval) + _imageIDMap := make(map[string]string) + _nameIDMap := make(map[string]string) - kubeSystemContainersRefreshInterval, err := strconv.Atoi(PluginConfiguration["kube_system_containers_refresh_interval"]) - if err != nil { - Log("Error Reading Kube System Container Ids Refresh Interval %s", err.Error()) - kubeSystemContainersRefreshInterval = defaultKubeSystemContainersRefreshInterval - } - Log("kubeSystemContainersRefreshInterval = %d \n", kubeSystemContainersRefreshInterval) - - go updateIgnoreContainerIds(kubeSystemContainersRefreshInterval) -} - -func initMaps(refreshInterval int) { - ImageIDMap = make(map[string]string) - NameIDMap = make(map[string]string) - - populateMaps() - - for range time.Tick(time.Second * time.Duration(refreshInterval)) { - populateMaps() - } -} - -func updateIgnoreContainerIds(refreshInterval int) { - IgnoreIDSet = make(map[string]bool) + pods, err := ClientSet.CoreV1().Pods("").List(metav1.ListOptions{}) + if err != nil { + Log("Error getting pods %s\nIt is ok to log here and continue, because the logs will be missing image and Name, but the logs will still have the containerID", err.Error()) + } - updateKubeSystemContainerIDs() + for _, pod := range pods.Items { + for _, status := range pod.Status.ContainerStatuses { + lastSlashIndex := strings.LastIndex(status.ContainerID, "/") + containerID := status.ContainerID[lastSlashIndex+1 : len(status.ContainerID)] + image := status.Image + name := fmt.Sprintf("%s/%s", pod.UID, status.Name) + if containerID != "" { + _imageIDMap[containerID] = image + _nameIDMap[containerID] = name + } + } + } - for range time.Tick(time.Second * time.Duration(refreshInterval)) { - updateKubeSystemContainerIDs() + Log("Locking to update image and name maps") + DataUpdateMutex.Lock() + ImageIDMap = _imageIDMap + NameIDMap = _nameIDMap + DataUpdateMutex.Unlock() + Log("Unlocking after updating image and name maps") } } func updateKubeSystemContainerIDs() { + for ; true; <-KubeSystemContainersRefreshTicker.C { + if strings.Compare(os.Getenv("DISABLE_KUBE_SYSTEM_LOG_COLLECTION"), "true") != 0 { + Log("Kube System Log Collection is ENABLED.") + return + } - if strings.Compare(os.Getenv("DISABLE_KUBE_SYSTEM_LOG_COLLECTION"), "true") != 0 { - Log("Kube System Log Collection is ENABLED.") - return - } - - Log("Kube System Log Collection is DISABLED. Collecting containerIds to drop their records") + Log("Kube System Log Collection is DISABLED. Collecting containerIds to drop their records") - pods, err := ClientSet.CoreV1().Pods("kube-system").List(metav1.ListOptions{}) - if err != nil { - Log("Error getting pods %s\n", err.Error()) - } + pods, err := ClientSet.CoreV1().Pods("kube-system").List(metav1.ListOptions{}) + if err != nil { + Log("Error getting pods %s\nIt is ok to log here and continue. Kube-system logs will be collected", err.Error()) + } - _ignoreIDSet := make(map[string]bool) - for _, pod := range pods.Items { - for _, status := range pod.Status.ContainerStatuses { - lastSlashIndex := strings.LastIndex(status.ContainerID, "/") - _ignoreIDSet[status.ContainerID[lastSlashIndex+1:len(status.ContainerID)]] = true + _ignoreIDSet := make(map[string]bool) + for _, pod := range pods.Items { + for _, status := range pod.Status.ContainerStatuses { + lastSlashIndex := strings.LastIndex(status.ContainerID, "/") + _ignoreIDSet[status.ContainerID[lastSlashIndex+1:len(status.ContainerID)]] = true + } } - } - Log("Locking to update kube-system container IDs") - DataUpdateMutex.Lock() - IgnoreIDSet = _ignoreIDSet - DataUpdateMutex.Unlock() - Log("Unlocking after updating kube-system container IDs") + Log("Locking to update kube-system container IDs") + DataUpdateMutex.Lock() + IgnoreIDSet = _ignoreIDSet + DataUpdateMutex.Unlock() + Log("Unlocking after updating kube-system container IDs") + } } // PostDataHelper sends data to the OMS endpoint func PostDataHelper(tailPluginRecords []map[interface{}]interface{}) int { + defer DataUpdateMutex.Unlock() + start := time.Now() var dataItems []DataItem DataUpdateMutex.Lock() for _, record := range tailPluginRecords { - containerID := getContainerIDFromFilePath(toString(record["Filepath"])) + filepath := toString(record["Filepath"]) + containerID := getContainerIDFromFilePath(filepath) - if containsKey(IgnoreIDSet, containerID) { + if containerID == "" || containsKey(IgnoreIDSet, containerID) { continue } @@ -271,7 +242,6 @@ func PostDataHelper(tailPluginRecords []map[interface{}]interface{}) int { mapstructure.Decode(stringMap, &dataItem) dataItems = append(dataItems, dataItem) } - DataUpdateMutex.Unlock() if len(dataItems) > 0 { logEntry := ContainerLogBlob{ @@ -318,39 +288,80 @@ func toString(s interface{}) string { func getContainerIDFromFilePath(filepath string) string { start := strings.LastIndex(filepath, "-") end := strings.LastIndex(filepath, ".") + if start >= end || start == -1 || end == -1 { + // This means the file is not a managed Kubernetes docker log file. + // Drop all records from the file + Log("File %s is not a Kubernetes managed docker log file. Dropping all records from the file", filepath) + return "" + } return filepath[start+1 : end] } -// InitializeConfig reads and populates plugin configuration -func InitializeConfig(pluginConfPath string) map[string]string { +// InitializePlugin reads and populates plugin configuration +func InitializePlugin(pluginConfPath string) { + + IgnoreIDSet = make(map[string]bool) + ImageIDMap = make(map[string]string) + NameIDMap = make(map[string]string) - pluginConf, err := ReadConfiguration(pluginConfPath) - omsadminConf, err := ReadConfiguration(pluginConf["omsadmin_conf_path"]) + pluginConfig, err := ReadConfiguration(pluginConfPath) + if err != nil { + Log("Error Reading plugin config path : %s \n", err.Error()) + log.Fatalf("Error Reading plugin config path : %s \n", err.Error()) + } + omsadminConf, err := ReadConfiguration(pluginConfig["omsadmin_conf_path"]) if err != nil { Log(err.Error()) + log.Fatalf("Error Reading omsadmin configuration %s\n", err.Error()) } + OMSEndpoint = omsadminConf["OMS_ENDPOINT"] + Log("OMSEndpoint %s", OMSEndpoint) - containerHostName, err := ioutil.ReadFile(pluginConf["container_host_file_path"]) + // Initialize image,name map refresh ticker + containerInventoryRefreshInterval, err := strconv.Atoi(pluginConfig["container_inventory_refresh_interval"]) if err != nil { - Log("Error when reading containerHostName file %s", err.Error()) + Log("Error Reading Container Inventory Refresh Interval %s", err.Error()) + Log("Using Default Refresh Interval of %d s\n", defaultContainerInventoryRefreshInterval) + containerInventoryRefreshInterval = defaultContainerInventoryRefreshInterval } + Log("containerInventoryRefreshInterval = %d \n", containerInventoryRefreshInterval) + ContainerImageNameRefreshTicker = time.NewTicker(time.Second * time.Duration(containerInventoryRefreshInterval)) + // Initialize Kube System Refresh Ticker + kubeSystemContainersRefreshInterval, err := strconv.Atoi(pluginConfig["kube_system_containers_refresh_interval"]) + if err != nil { + Log("Error Reading Kube System Container Ids Refresh Interval %s", err.Error()) + Log("Using Default Refresh Interval of %d s\n", defaultKubeSystemContainersRefreshInterval) + kubeSystemContainersRefreshInterval = defaultKubeSystemContainersRefreshInterval + } + Log("kubeSystemContainersRefreshInterval = %d \n", kubeSystemContainersRefreshInterval) + KubeSystemContainersRefreshTicker = time.NewTicker(time.Second * time.Duration(kubeSystemContainersRefreshInterval)) + + // Populate Computer field + containerHostName, err := ioutil.ReadFile(pluginConfig["container_host_file_path"]) + if err != nil { + // It is ok to log here and continue, because only the Computer column will be missing, + // which can be deduced from a combination of containerId, and docker logs on the node + Log("Error when reading containerHostName file %s.\n It is ok to log here and continue, because only the Computer column will be missing, which can be deduced from a combination of containerId, and docker logs on the nodes\n", err.Error()) + } Computer = strings.TrimSuffix(toString(containerHostName), "\n") Log("Computer == %s \n", Computer) - OMSEndpoint = omsadminConf["OMS_ENDPOINT"] - Log("OMSEndpoint %s", OMSEndpoint) - + // Initialize KubeAPI Client config, err := rest.InClusterConfig() if err != nil { - Log("Error getting config %s\n", err.Error()) + Log("Error getting config %s.\nIt is ok to log here and continue, because the logs will be missing image and Name, but the logs will still have the containerID", err.Error()) } ClientSet, err = kubernetes.NewForConfig(config) if err != nil { - Log("Error getting clientset %s", err.Error()) + Log("Error getting clientset %s.\nIt is ok to log here and continue, because the logs will be missing image and Name, but the logs will still have the containerID", err.Error()) } - return pluginConf + PluginConfiguration = pluginConfig + + CreateHTTPClient() + go updateKubeSystemContainerIDs() + go updateContainerImageNameMaps() } diff --git a/source/code/go/src/plugins/out_oms.go b/source/code/go/src/plugins/out_oms.go index 8c23f47a8..ec9a573d1 100644 --- a/source/code/go/src/plugins/out_oms.go +++ b/source/code/go/src/plugins/out_oms.go @@ -18,9 +18,7 @@ func FLBPluginRegister(ctx unsafe.Pointer) int { // ctx (context) pointer to fluentbit context (state/ c code) func FLBPluginInit(ctx unsafe.Pointer) int { Log("Initializing out_oms go plugin for fluentbit") - PluginConfiguration = InitializeConfig("/etc/opt/microsoft/docker-cimprov/out_oms.conf") - CreateHTTPClient() - updateContainersData() + InitializePlugin(ContainerLogPluginConfFilePath) return output.FLB_OK } @@ -50,6 +48,8 @@ func FLBPluginFlush(data unsafe.Pointer, length C.int, tag *C.char) int { // FLBPluginExit exits the plugin func FLBPluginExit() int { + KubeSystemContainersRefreshTicker.Stop() + ContainerImageNameRefreshTicker.Stop() return output.FLB_OK } diff --git a/source/code/go/src/plugins/utils.go b/source/code/go/src/plugins/utils.go index 0e33f43f9..1ac9b05a9 100644 --- a/source/code/go/src/plugins/utils.go +++ b/source/code/go/src/plugins/utils.go @@ -52,6 +52,7 @@ func CreateHTTPClient() { cert, err := tls.LoadX509KeyPair(PluginConfiguration["cert_file_path"], PluginConfiguration["key_file_path"]) if err != nil { Log("Error when loading cert %s", err.Error()) + log.Fatalf("Error when loading cert %s", err.Error()) } tlsConfig := &tls.Config{ From 8e35b7365bab9de6d087718887d5021167617a0d Mon Sep 17 00:00:00 2001 From: rashmichandrashekar Date: Thu, 27 Sep 2018 15:52:13 -0700 Subject: [PATCH 11/25] Rashmi/segfault latest (#132) * adding null checks in all providers * fixing type * fixing type * adding more null checks * update cjson --- source/code/cjson/cJSON.c | 3478 +++++++++++++---- source/code/cjson/cJSON.h | 398 +- ...iner_ContainerInventory_Class_Provider.cpp | 34 +- ...ner_ContainerStatistics_Class_Provider.cpp | 39 +- .../Container_DaemonEvent_Class_Provider.cpp | 6 +- ...ontainer_ImageInventory_Class_Provider.cpp | 19 +- .../Container_Process_Class_Provider.cpp | 2 +- 7 files changed, 3146 insertions(+), 830 deletions(-) diff --git a/source/code/cjson/cJSON.c b/source/code/cjson/cJSON.c index 77dbfe959..c561c7ceb 100755 --- a/source/code/cjson/cJSON.c +++ b/source/code/cjson/cJSON.c @@ -1,770 +1,2930 @@ /* - Copyright (c) 2009 Dave Gamble - - Permission is hereby granted, free of charge, to any person obtaining a copy - of this software and associated documentation files (the "Software"), to deal - in the Software without restriction, including without limitation the rights - to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - copies of the Software, and to permit persons to whom the Software is - furnished to do so, subject to the following conditions: - - The above copyright notice and this permission notice shall be included in - all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - THE SOFTWARE. +Copyright (c) 2009-2017 Dave Gamble and cJSON contributors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. */ /* cJSON */ /* JSON parser in C. */ +/* disable warnings about old C89 functions in MSVC */ +#if !defined(_CRT_SECURE_NO_DEPRECATE) && defined(_MSC_VER) +#define _CRT_SECURE_NO_DEPRECATE +#endif + +#ifdef __GNUC__ +#pragma GCC visibility push(default) +#endif +#if defined(_MSC_VER) +#pragma warning (push) +/* disable warning about single line comments in system headers */ +#pragma warning (disable : 4001) +#endif + #include #include #include #include -#include #include #include + +#ifdef ENABLE_LOCALES +#include +#endif + +#if defined(_MSC_VER) +#pragma warning (pop) +#endif +#ifdef __GNUC__ +#pragma GCC visibility pop +#endif + #include "cJSON.h" -static const char *ep; -const char *cJSON_GetErrorPtr(void) {return ep;} +/* define our own boolean type */ +#define true ((cJSON_bool)1) +#define false ((cJSON_bool)0) -static int cJSON_strcasecmp(const char *s1,const char *s2) +typedef struct { + const unsigned char *json; + size_t position; +} error; +static error global_error = { NULL, 0 }; + +CJSON_PUBLIC(const char *) cJSON_GetErrorPtr(void) { - if (!s1) return (s1==s2)?0:1;if (!s2) return 1; - for(; tolower(*s1) == tolower(*s2); ++s1, ++s2) if(*s1 == 0) return 0; - return tolower(*(const unsigned char *)s1) - tolower(*(const unsigned char *)s2); + return (const char*)(global_error.json + global_error.position); } -static void *(*cJSON_malloc)(size_t sz) = malloc; -static void (*cJSON_free)(void *ptr) = free; +CJSON_PUBLIC(char *) cJSON_GetStringValue(cJSON *item) { + if (!cJSON_IsString(item)) { + return NULL; + } + + return item->valuestring; +} -static char* cJSON_strdup(const char* str) +/* This is a safeguard to prevent copy-pasters from using incompatible C and header files */ +#if (CJSON_VERSION_MAJOR != 1) || (CJSON_VERSION_MINOR != 7) || (CJSON_VERSION_PATCH != 8) +#error cJSON.h and cJSON.c have different versions. Make sure that both have the same. +#endif + +CJSON_PUBLIC(const char*) cJSON_Version(void) { - size_t len; - char* copy; + static char version[15]; + sprintf(version, "%i.%i.%i", CJSON_VERSION_MAJOR, CJSON_VERSION_MINOR, CJSON_VERSION_PATCH); - len = strlen(str) + 1; - if (!(copy = (char*)cJSON_malloc(len))) return 0; - memcpy(copy,str,len); - return copy; + return version; } -void cJSON_InitHooks(cJSON_Hooks* hooks) +/* Case insensitive string comparison, doesn't consider two NULL pointers equal though */ +static int case_insensitive_strcmp(const unsigned char *string1, const unsigned char *string2) { - if (!hooks) { /* Reset hooks */ - cJSON_malloc = malloc; - cJSON_free = free; - return; - } + if ((string1 == NULL) || (string2 == NULL)) + { + return 1; + } + + if (string1 == string2) + { + return 0; + } + + for (; tolower(*string1) == tolower(*string2); (void)string1++, string2++) + { + if (*string1 == '\0') + { + return 0; + } + } + + return tolower(*string1) - tolower(*string2); +} - cJSON_malloc = (hooks->malloc_fn)?hooks->malloc_fn:malloc; - cJSON_free = (hooks->free_fn)?hooks->free_fn:free; +typedef struct internal_hooks +{ + void *(CJSON_CDECL *allocate)(size_t size); + void (CJSON_CDECL *deallocate)(void *pointer); + void *(CJSON_CDECL *reallocate)(void *pointer, size_t size); +} internal_hooks; + +#if defined(_MSC_VER) +/* work around MSVC error C2322: '...' address of dillimport '...' is not static */ +static void * CJSON_CDECL internal_malloc(size_t size) +{ + return malloc(size); +} +static void CJSON_CDECL internal_free(void *pointer) +{ + free(pointer); +} +static void * CJSON_CDECL internal_realloc(void *pointer, size_t size) +{ + return realloc(pointer, size); } +#else +#define internal_malloc malloc +#define internal_free free +#define internal_realloc realloc +#endif -/* Internal constructor. */ -static cJSON *cJSON_New_Item(void) +static internal_hooks global_hooks = { internal_malloc, internal_free, internal_realloc }; + +static unsigned char* cJSON_strdup(const unsigned char* string, const internal_hooks * const hooks) { - cJSON* node = (cJSON*)cJSON_malloc(sizeof(cJSON)); - if (node) memset(node,0,sizeof(cJSON)); - return node; + size_t length = 0; + unsigned char *copy = NULL; + + if (string == NULL) + { + return NULL; + } + + length = strlen((const char*)string) + sizeof(""); + copy = (unsigned char*)hooks->allocate(length); + if (copy == NULL) + { + return NULL; + } + memcpy(copy, string, length); + + return copy; } +CJSON_PUBLIC(void) cJSON_InitHooks(cJSON_Hooks* hooks) +{ + if (hooks == NULL) + { + /* Reset hooks */ + global_hooks.allocate = malloc; + global_hooks.deallocate = free; + global_hooks.reallocate = realloc; + return; + } + + global_hooks.allocate = malloc; + if (hooks->malloc_fn != NULL) + { + global_hooks.allocate = hooks->malloc_fn; + } + + global_hooks.deallocate = free; + if (hooks->free_fn != NULL) + { + global_hooks.deallocate = hooks->free_fn; + } + + /* use realloc only if both free and malloc are used */ + global_hooks.reallocate = NULL; + if ((global_hooks.allocate == malloc) && (global_hooks.deallocate == free)) + { + global_hooks.reallocate = realloc; + } +} + +/* Internal constructor. */ +static cJSON *cJSON_New_Item(const internal_hooks * const hooks) +{ + cJSON* node = (cJSON*)hooks->allocate(sizeof(cJSON)); + if (node) + { + memset(node, '\0', sizeof(cJSON)); + } + + return node; +} /* Delete a cJSON structure. */ -void cJSON_Delete(cJSON *c) +CJSON_PUBLIC(void) cJSON_Delete(cJSON *item) { - cJSON *next; - while (c) - { - next=c->next; - if (!(c->type&cJSON_IsReference) && c->child) cJSON_Delete(c->child); - if (!(c->type&cJSON_IsReference) && c->valuestring) cJSON_free(c->valuestring); - if (!(c->type&cJSON_StringIsConst) && c->string) cJSON_free(c->string); - cJSON_free(c); - c=next; - } + cJSON *next = NULL; + while (item != NULL) + { + next = item->next; + if (!(item->type & cJSON_IsReference) && (item->child != NULL)) + { + cJSON_Delete(item->child); + } + if (!(item->type & cJSON_IsReference) && (item->valuestring != NULL)) + { + global_hooks.deallocate(item->valuestring); + } + if (!(item->type & cJSON_StringIsConst) && (item->string != NULL)) + { + global_hooks.deallocate(item->string); + } + global_hooks.deallocate(item); + item = next; + } } -/* Parse the input text to generate a number, and populate the result into item. */ -static const char *parse_number(cJSON *item,const char *num) +/* get the decimal point character of the current locale */ +static unsigned char get_decimal_point(void) { - double n=0,sign=1,scale=0;int subscale=0,signsubscale=1; +#ifdef ENABLE_LOCALES + struct lconv *lconv = localeconv(); + return (unsigned char)lconv->decimal_point[0]; +#else + return '.'; +#endif +} - if (*num=='-') sign=-1,num++; /* Has sign? */ - if (*num=='0') num++; /* is zero */ - if (*num>='1' && *num<='9') do n=(n*10.0)+(*num++ -'0'); while (*num>='0' && *num<='9'); /* Number? */ - if (*num=='.' && num[1]>='0' && num[1]<='9') {num++; do n=(n*10.0)+(*num++ -'0'),scale--; while (*num>='0' && *num<='9');} /* Fractional part? */ - if (*num=='e' || *num=='E') /* Exponent? */ - { num++;if (*num=='+') num++; else if (*num=='-') signsubscale=-1,num++; /* With sign? */ - while (*num>='0' && *num<='9') subscale=(subscale*10)+(*num++ - '0'); /* Number? */ - } +typedef struct +{ + const unsigned char *content; + size_t length; + size_t offset; + size_t depth; /* How deeply nested (in arrays/objects) is the input at the current offset. */ + internal_hooks hooks; +} parse_buffer; + +/* check if the given size is left to read in a given parse buffer (starting with 1) */ +#define can_read(buffer, size) ((buffer != NULL) && (((buffer)->offset + size) <= (buffer)->length)) +/* check if the buffer can be accessed at the given index (starting with 0) */ +#define can_access_at_index(buffer, index) ((buffer != NULL) && (((buffer)->offset + index) < (buffer)->length)) +#define cannot_access_at_index(buffer, index) (!can_access_at_index(buffer, index)) +/* get a pointer to the buffer at the position */ +#define buffer_at_offset(buffer) ((buffer)->content + (buffer)->offset) - n=sign*n*pow(10.0,(scale+subscale*signsubscale)); /* number = +/- number.fraction * 10^+/- exponent */ - - item->valuedouble=n; - item->valueint=(int)n; - item->type=cJSON_Number; - return num; +/* Parse the input text to generate a number, and populate the result into item. */ +static cJSON_bool parse_number(cJSON * const item, parse_buffer * const input_buffer) +{ + double number = 0; + unsigned char *after_end = NULL; + unsigned char number_c_string[64]; + unsigned char decimal_point = get_decimal_point(); + size_t i = 0; + + if ((input_buffer == NULL) || (input_buffer->content == NULL)) + { + return false; + } + + /* copy the number into a temporary buffer and replace '.' with the decimal point + * of the current locale (for strtod) + * This also takes care of '\0' not necessarily being available for marking the end of the input */ + for (i = 0; (i < (sizeof(number_c_string) - 1)) && can_access_at_index(input_buffer, i); i++) + { + switch (buffer_at_offset(input_buffer)[i]) + { + case '0': + case '1': + case '2': + case '3': + case '4': + case '5': + case '6': + case '7': + case '8': + case '9': + case '+': + case '-': + case 'e': + case 'E': + number_c_string[i] = buffer_at_offset(input_buffer)[i]; + break; + + case '.': + number_c_string[i] = decimal_point; + break; + + default: + goto loop_end; + } + } +loop_end: + number_c_string[i] = '\0'; + + number = strtod((const char*)number_c_string, (char**)&after_end); + if (number_c_string == after_end) + { + return false; /* parse_error */ + } + + item->valuedouble = number; + + /* use saturation in case of overflow */ + if (number >= INT_MAX) + { + item->valueint = INT_MAX; + } + else if (number <= INT_MIN) + { + item->valueint = INT_MIN; + } + else + { + item->valueint = (int)number; + } + + item->type = cJSON_Number; + + input_buffer->offset += (size_t)(after_end - number_c_string); + return true; } -static int pow2gt (int x) { --x; x|=x>>1; x|=x>>2; x|=x>>4; x|=x>>8; x|=x>>16; return x+1; } +/* don't ask me, but the original cJSON_SetNumberValue returns an integer or double */ +CJSON_PUBLIC(double) cJSON_SetNumberHelper(cJSON *object, double number) +{ + if (number >= INT_MAX) + { + object->valueint = INT_MAX; + } + else if (number <= INT_MIN) + { + object->valueint = INT_MIN; + } + else + { + object->valueint = (int)number; + } + + return object->valuedouble = number; +} -typedef struct {char *buffer; int length; int offset; } printbuffer; +typedef struct +{ + unsigned char *buffer; + size_t length; + size_t offset; + size_t depth; /* current nesting depth (for formatted printing) */ + cJSON_bool noalloc; + cJSON_bool format; /* is this print a formatted print */ + internal_hooks hooks; +} printbuffer; + +/* realloc printbuffer if necessary to have at least "needed" bytes more */ +static unsigned char* ensure(printbuffer * const p, size_t needed) +{ + unsigned char *newbuffer = NULL; + size_t newsize = 0; + + if ((p == NULL) || (p->buffer == NULL)) + { + return NULL; + } + + if ((p->length > 0) && (p->offset >= p->length)) + { + /* make sure that offset is valid */ + return NULL; + } + + if (needed > INT_MAX) + { + /* sizes bigger than INT_MAX are currently not supported */ + return NULL; + } + + needed += p->offset + 1; + if (needed <= p->length) + { + return p->buffer + p->offset; + } + + if (p->noalloc) { + return NULL; + } + + /* calculate new buffer size */ + if (needed > (INT_MAX / 2)) + { + /* overflow of int, use INT_MAX if possible */ + if (needed <= INT_MAX) + { + newsize = INT_MAX; + } + else + { + return NULL; + } + } + else + { + newsize = needed * 2; + } + + if (p->hooks.reallocate != NULL) + { + /* reallocate with realloc if available */ + newbuffer = (unsigned char*)p->hooks.reallocate(p->buffer, newsize); + if (newbuffer == NULL) + { + p->hooks.deallocate(p->buffer); + p->length = 0; + p->buffer = NULL; + + return NULL; + } + } + else + { + /* otherwise reallocate manually */ + newbuffer = (unsigned char*)p->hooks.allocate(newsize); + if (!newbuffer) + { + p->hooks.deallocate(p->buffer); + p->length = 0; + p->buffer = NULL; + + return NULL; + } + if (newbuffer) + { + memcpy(newbuffer, p->buffer, p->offset + 1); + } + p->hooks.deallocate(p->buffer); + } + p->length = newsize; + p->buffer = newbuffer; + + return newbuffer + p->offset; +} -static char* ensure(printbuffer *p,int needed) +/* calculate the new length of the string in a printbuffer and update the offset */ +static void update_offset(printbuffer * const buffer) { - char *newbuffer;int newsize; - if (!p || !p->buffer) return 0; - needed+=p->offset; - if (needed<=p->length) return p->buffer+p->offset; + const unsigned char *buffer_pointer = NULL; + if ((buffer == NULL) || (buffer->buffer == NULL)) + { + return; + } + buffer_pointer = buffer->buffer + buffer->offset; + + buffer->offset += strlen((const char*)buffer_pointer); +} - newsize=pow2gt(needed); - newbuffer=(char*)cJSON_malloc(newsize); - if (!newbuffer) {cJSON_free(p->buffer);p->length=0,p->buffer=0;return 0;} - if (newbuffer) memcpy(newbuffer,p->buffer,p->length); - cJSON_free(p->buffer); - p->length=newsize; - p->buffer=newbuffer; - return newbuffer+p->offset; +/* Render the number nicely from the given item into a string. */ +static cJSON_bool print_number(const cJSON * const item, printbuffer * const output_buffer) +{ + unsigned char *output_pointer = NULL; + double d = item->valuedouble; + int length = 0; + size_t i = 0; + unsigned char number_buffer[26]; /* temporary buffer to print the number into */ + unsigned char decimal_point = get_decimal_point(); + double test; + + if (output_buffer == NULL) + { + return false; + } + + /* This checks for NaN and Infinity */ + if ((d * 0) != 0) + { + length = sprintf((char*)number_buffer, "null"); + } + else + { + /* Try 15 decimal places of precision to avoid nonsignificant nonzero digits */ + length = sprintf((char*)number_buffer, "%1.15g", d); + + /* Check whether the original double can be recovered */ + if ((sscanf((char*)number_buffer, "%lg", &test) != 1) || ((double)test != d)) + { + /* If not, print with 17 decimal places of precision */ + length = sprintf((char*)number_buffer, "%1.17g", d); + } + } + + /* sprintf failed or buffer overrun occured */ + if ((length < 0) || (length >(int)(sizeof(number_buffer) - 1))) + { + return false; + } + + /* reserve appropriate space in the output */ + output_pointer = ensure(output_buffer, (size_t)length + sizeof("")); + if (output_pointer == NULL) + { + return false; + } + + /* copy the printed number to the output and replace locale + * dependent decimal point with '.' */ + for (i = 0; i < ((size_t)length); i++) + { + if (number_buffer[i] == decimal_point) + { + output_pointer[i] = '.'; + continue; + } + + output_pointer[i] = number_buffer[i]; + } + output_pointer[i] = '\0'; + + output_buffer->offset += (size_t)length; + + return true; } -static int update(printbuffer *p) +/* parse 4 digit hexadecimal number */ +static unsigned parse_hex4(const unsigned char * const input) { - char *str; - if (!p || !p->buffer) return 0; - str=p->buffer+p->offset; - return p->offset+strlen(str); + unsigned int h = 0; + size_t i = 0; + + for (i = 0; i < 4; i++) + { + /* parse digit */ + if ((input[i] >= '0') && (input[i] <= '9')) + { + h += (unsigned int)input[i] - '0'; + } + else if ((input[i] >= 'A') && (input[i] <= 'F')) + { + h += (unsigned int)10 + input[i] - 'A'; + } + else if ((input[i] >= 'a') && (input[i] <= 'f')) + { + h += (unsigned int)10 + input[i] - 'a'; + } + else /* invalid */ + { + return 0; + } + + if (i < 3) + { + /* shift left to make place for the next nibble */ + h = h << 4; + } + } + + return h; } -/* Render the number nicely from the given item into a string. */ -static char *print_number(cJSON *item,printbuffer *p) -{ - char *str=0; - double d=item->valuedouble; - if (d==0) - { - if (p) str=ensure(p,2); - else str=(char*)cJSON_malloc(2); /* special case for 0. */ - if (str) strcpy(str,"0"); - } - else if (fabs(((double)item->valueint)-d)<=DBL_EPSILON && d<=INT_MAX && d>=INT_MIN) - { - if (p) str=ensure(p,21); - else str=(char*)cJSON_malloc(21); /* 2^64+1 can be represented in 21 chars. */ - if (str) sprintf(str,"%d",item->valueint); - } - else - { - if (p) str=ensure(p,64); - else str=(char*)cJSON_malloc(64); /* This is a nice tradeoff. */ - if (str) - { - if (fabs(floor(d)-d)<=DBL_EPSILON && fabs(d)<1.0e60)sprintf(str,"%.0f",d); - else if (fabs(d)<1.0e-6 || fabs(d)>1.0e9) sprintf(str,"%e",d); - else sprintf(str,"%f",d); - } - } - return str; -} - -static unsigned parse_hex4(const char *str) -{ - unsigned h=0; - if (*str>='0' && *str<='9') h+=(*str)-'0'; else if (*str>='A' && *str<='F') h+=10+(*str)-'A'; else if (*str>='a' && *str<='f') h+=10+(*str)-'a'; else return 0; - h=h<<4;str++; - if (*str>='0' && *str<='9') h+=(*str)-'0'; else if (*str>='A' && *str<='F') h+=10+(*str)-'A'; else if (*str>='a' && *str<='f') h+=10+(*str)-'a'; else return 0; - h=h<<4;str++; - if (*str>='0' && *str<='9') h+=(*str)-'0'; else if (*str>='A' && *str<='F') h+=10+(*str)-'A'; else if (*str>='a' && *str<='f') h+=10+(*str)-'a'; else return 0; - h=h<<4;str++; - if (*str>='0' && *str<='9') h+=(*str)-'0'; else if (*str>='A' && *str<='F') h+=10+(*str)-'A'; else if (*str>='a' && *str<='f') h+=10+(*str)-'a'; else return 0; - return h; -} - -/* Parse the input text into an unescaped cstring, and populate item. */ -static const unsigned char firstByteMark[7] = { 0x00, 0x00, 0xC0, 0xE0, 0xF0, 0xF8, 0xFC }; -static const char *parse_string(cJSON *item,const char *str) -{ - const char *ptr=str+1;char *ptr2;char *out;int len=0;unsigned uc,uc2; - if (*str!='\"') {ep=str;return 0;} /* not a string! */ - - while (*ptr!='\"' && *ptr && ++len) if (*ptr++ == '\\') ptr++; /* Skip escaped quotes. */ - - out=(char*)cJSON_malloc(len+1); /* This is how long we need for the string, roughly. */ - if (!out) return 0; - - ptr=str+1;ptr2=out; - while (*ptr!='\"' && *ptr) - { - if (*ptr!='\\') *ptr2++=*ptr++; - else - { - ptr++; - switch (*ptr) - { - case 'b': *ptr2++='\b'; break; - case 'f': *ptr2++='\f'; break; - case 'n': *ptr2++='\n'; break; - case 'r': *ptr2++='\r'; break; - case 't': *ptr2++='\t'; break; - case 'u': /* transcode utf16 to utf8. */ - uc=parse_hex4(ptr+1);ptr+=4; /* get the unicode char. */ - - if ((uc>=0xDC00 && uc<=0xDFFF) || uc==0) break; /* check for invalid. */ - - if (uc>=0xD800 && uc<=0xDBFF) /* UTF16 surrogate pairs. */ - { - if (ptr[1]!='\\' || ptr[2]!='u') break; /* missing second-half of surrogate. */ - uc2=parse_hex4(ptr+3);ptr+=6; - if (uc2<0xDC00 || uc2>0xDFFF) break; /* invalid second-half of surrogate. */ - uc=0x10000 + (((uc&0x3FF)<<10) | (uc2&0x3FF)); - } - - len=4;if (uc<0x80) len=1;else if (uc<0x800) len=2;else if (uc<0x10000) len=3; ptr2+=len; - - switch (len) { - case 4: *--ptr2 =((uc | 0x80) & 0xBF); uc >>= 6; - case 3: *--ptr2 =((uc | 0x80) & 0xBF); uc >>= 6; - case 2: *--ptr2 =((uc | 0x80) & 0xBF); uc >>= 6; - case 1: *--ptr2 =(uc | firstByteMark[len]); - } - ptr2+=len; - break; - default: *ptr2++=*ptr; break; - } - ptr++; - } - } - *ptr2=0; - if (*ptr=='\"') ptr++; - item->valuestring=out; - item->type=cJSON_String; - return ptr; +/* converts a UTF-16 literal to UTF-8 +* A literal can be one or two sequences of the form \uXXXX */ +static unsigned char utf16_literal_to_utf8(const unsigned char * const input_pointer, const unsigned char * const input_end, unsigned char **output_pointer) +{ + long unsigned int codepoint = 0; + unsigned int first_code = 0; + const unsigned char *first_sequence = input_pointer; + unsigned char utf8_length = 0; + unsigned char utf8_position = 0; + unsigned char sequence_length = 0; + unsigned char first_byte_mark = 0; + + if ((input_end - first_sequence) < 6) + { + /* input ends unexpectedly */ + goto fail; + } + + /* get the first utf16 sequence */ + first_code = parse_hex4(first_sequence + 2); + + /* check that the code is valid */ + if (((first_code >= 0xDC00) && (first_code <= 0xDFFF))) + { + goto fail; + } + + /* UTF16 surrogate pair */ + if ((first_code >= 0xD800) && (first_code <= 0xDBFF)) + { + const unsigned char *second_sequence = first_sequence + 6; + unsigned int second_code = 0; + sequence_length = 12; /* \uXXXX\uXXXX */ + + if ((input_end - second_sequence) < 6) + { + /* input ends unexpectedly */ + goto fail; + } + + if ((second_sequence[0] != '\\') || (second_sequence[1] != 'u')) + { + /* missing second half of the surrogate pair */ + goto fail; + } + + /* get the second utf16 sequence */ + second_code = parse_hex4(second_sequence + 2); + /* check that the code is valid */ + if ((second_code < 0xDC00) || (second_code > 0xDFFF)) + { + /* invalid second half of the surrogate pair */ + goto fail; + } + + + /* calculate the unicode codepoint from the surrogate pair */ + codepoint = 0x10000 + (((first_code & 0x3FF) << 10) | (second_code & 0x3FF)); + } + else + { + sequence_length = 6; /* \uXXXX */ + codepoint = first_code; + } + + /* encode as UTF-8 + * takes at maximum 4 bytes to encode: + * 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx */ + if (codepoint < 0x80) + { + /* normal ascii, encoding 0xxxxxxx */ + utf8_length = 1; + } + else if (codepoint < 0x800) + { + /* two bytes, encoding 110xxxxx 10xxxxxx */ + utf8_length = 2; + first_byte_mark = 0xC0; /* 11000000 */ + } + else if (codepoint < 0x10000) + { + /* three bytes, encoding 1110xxxx 10xxxxxx 10xxxxxx */ + utf8_length = 3; + first_byte_mark = 0xE0; /* 11100000 */ + } + else if (codepoint <= 0x10FFFF) + { + /* four bytes, encoding 1110xxxx 10xxxxxx 10xxxxxx 10xxxxxx */ + utf8_length = 4; + first_byte_mark = 0xF0; /* 11110000 */ + } + else + { + /* invalid unicode codepoint */ + goto fail; + } + + /* encode as utf8 */ + for (utf8_position = (unsigned char)(utf8_length - 1); utf8_position > 0; utf8_position--) + { + /* 10xxxxxx */ + (*output_pointer)[utf8_position] = (unsigned char)((codepoint | 0x80) & 0xBF); + codepoint >>= 6; + } + /* encode first byte */ + if (utf8_length > 1) + { + (*output_pointer)[0] = (unsigned char)((codepoint | first_byte_mark) & 0xFF); + } + else + { + (*output_pointer)[0] = (unsigned char)(codepoint & 0x7F); + } + + *output_pointer += utf8_length; + + return sequence_length; + +fail: + return 0; +} + +/* Parse the input text into an unescaped cinput, and populate item. */ +static cJSON_bool parse_string(cJSON * const item, parse_buffer * const input_buffer) +{ + const unsigned char *input_pointer = buffer_at_offset(input_buffer) + 1; + const unsigned char *input_end = buffer_at_offset(input_buffer) + 1; + unsigned char *output_pointer = NULL; + unsigned char *output = NULL; + + /* not a string */ + if (buffer_at_offset(input_buffer)[0] != '\"') + { + goto fail; + } + + { + /* calculate approximate size of the output (overestimate) */ + size_t allocation_length = 0; + size_t skipped_bytes = 0; + while (((size_t)(input_end - input_buffer->content) < input_buffer->length) && (*input_end != '\"')) + { + /* is escape sequence */ + if (input_end[0] == '\\') + { + if ((size_t)(input_end + 1 - input_buffer->content) >= input_buffer->length) + { + /* prevent buffer overflow when last input character is a backslash */ + goto fail; + } + skipped_bytes++; + input_end++; + } + input_end++; + } + if (((size_t)(input_end - input_buffer->content) >= input_buffer->length) || (*input_end != '\"')) + { + goto fail; /* string ended unexpectedly */ + } + + /* This is at most how much we need for the output */ + allocation_length = (size_t)(input_end - buffer_at_offset(input_buffer)) - skipped_bytes; + output = (unsigned char*)input_buffer->hooks.allocate(allocation_length + sizeof("")); + if (output == NULL) + { + goto fail; /* allocation failure */ + } + } + + output_pointer = output; + /* loop through the string literal */ + while (input_pointer < input_end) + { + if (*input_pointer != '\\') + { + *output_pointer++ = *input_pointer++; + } + /* escape sequence */ + else + { + unsigned char sequence_length = 2; + if ((input_end - input_pointer) < 1) + { + goto fail; + } + + switch (input_pointer[1]) + { + case 'b': + *output_pointer++ = '\b'; + break; + case 'f': + *output_pointer++ = '\f'; + break; + case 'n': + *output_pointer++ = '\n'; + break; + case 'r': + *output_pointer++ = '\r'; + break; + case 't': + *output_pointer++ = '\t'; + break; + case '\"': + case '\\': + case '/': + *output_pointer++ = input_pointer[1]; + break; + + /* UTF-16 literal */ + case 'u': + sequence_length = utf16_literal_to_utf8(input_pointer, input_end, &output_pointer); + if (sequence_length == 0) + { + /* failed to convert UTF16-literal to UTF-8 */ + goto fail; + } + break; + + default: + goto fail; + } + input_pointer += sequence_length; + } + } + + /* zero terminate the output */ + *output_pointer = '\0'; + + item->type = cJSON_String; + item->valuestring = (char*)output; + + input_buffer->offset = (size_t)(input_end - input_buffer->content); + input_buffer->offset++; + + return true; + +fail: + if (output != NULL) + { + input_buffer->hooks.deallocate(output); + } + + if (input_pointer != NULL) + { + input_buffer->offset = (size_t)(input_pointer - input_buffer->content); + } + + return false; } /* Render the cstring provided to an escaped version that can be printed. */ -static char *print_string_ptr(const char *str,printbuffer *p) -{ - const char *ptr;char *ptr2,*out;int len=0,flag=0;unsigned char token; - - for (ptr=str;*ptr;ptr++) flag|=((*ptr>0 && *ptr<32)||(*ptr=='\"')||(*ptr=='\\'))?1:0; - if (!flag) - { - len=ptr-str; - if (p) out=ensure(p,len+3); - else out=(char*)cJSON_malloc(len+3); - if (!out) return 0; - ptr2=out;*ptr2++='\"'; - strcpy(ptr2,str); - ptr2[len]='\"'; - ptr2[len+1]=0; - return out; - } - - if (!str) - { - if (p) out=ensure(p,3); - else out=(char*)cJSON_malloc(3); - if (!out) return 0; - strcpy(out,"\"\""); - return out; - } - ptr=str;while ((token=*ptr) && ++len) {if (strchr("\"\\\b\f\n\r\t",token)) len++; else if (token<32) len+=5;ptr++;} - - if (p) out=ensure(p,len+3); - else out=(char*)cJSON_malloc(len+3); - if (!out) return 0; - - ptr2=out;ptr=str; - *ptr2++='\"'; - while (*ptr) - { - if ((unsigned char)*ptr>31 && *ptr!='\"' && *ptr!='\\') *ptr2++=*ptr++; - else - { - *ptr2++='\\'; - switch (token=*ptr++) - { - case '\\': *ptr2++='\\'; break; - case '\"': *ptr2++='\"'; break; - case '\b': *ptr2++='b'; break; - case '\f': *ptr2++='f'; break; - case '\n': *ptr2++='n'; break; - case '\r': *ptr2++='r'; break; - case '\t': *ptr2++='t'; break; - default: sprintf(ptr2,"u%04x",token);ptr2+=5; break; /* escape and print */ - } - } - } - *ptr2++='\"';*ptr2++=0; - return out; -} -/* Invote print_string_ptr (which is useful) on an item. */ -static char *print_string(cJSON *item,printbuffer *p) {return print_string_ptr(item->valuestring,p);} +static cJSON_bool print_string_ptr(const unsigned char * const input, printbuffer * const output_buffer) +{ + const unsigned char *input_pointer = NULL; + unsigned char *output = NULL; + unsigned char *output_pointer = NULL; + size_t output_length = 0; + /* numbers of additional characters needed for escaping */ + size_t escape_characters = 0; + + if (output_buffer == NULL) + { + return false; + } + + /* empty string */ + if (input == NULL) + { + output = ensure(output_buffer, sizeof("\"\"")); + if (output == NULL) + { + return false; + } + strcpy((char*)output, "\"\""); + + return true; + } + + /* set "flag" to 1 if something needs to be escaped */ + for (input_pointer = input; *input_pointer; input_pointer++) + { + switch (*input_pointer) + { + case '\"': + case '\\': + case '\b': + case '\f': + case '\n': + case '\r': + case '\t': + /* one character escape sequence */ + escape_characters++; + break; + default: + if (*input_pointer < 32) + { + /* UTF-16 escape sequence uXXXX */ + escape_characters += 5; + } + break; + } + } + output_length = (size_t)(input_pointer - input) + escape_characters; + + output = ensure(output_buffer, output_length + sizeof("\"\"")); + if (output == NULL) + { + return false; + } + + /* no characters have to be escaped */ + if (escape_characters == 0) + { + output[0] = '\"'; + memcpy(output + 1, input, output_length); + output[output_length + 1] = '\"'; + output[output_length + 2] = '\0'; + + return true; + } + + output[0] = '\"'; + output_pointer = output + 1; + /* copy the string */ + for (input_pointer = input; *input_pointer != '\0'; (void)input_pointer++, output_pointer++) + { + if ((*input_pointer > 31) && (*input_pointer != '\"') && (*input_pointer != '\\')) + { + /* normal character, copy */ + *output_pointer = *input_pointer; + } + else + { + /* character needs to be escaped */ + *output_pointer++ = '\\'; + switch (*input_pointer) + { + case '\\': + *output_pointer = '\\'; + break; + case '\"': + *output_pointer = '\"'; + break; + case '\b': + *output_pointer = 'b'; + break; + case '\f': + *output_pointer = 'f'; + break; + case '\n': + *output_pointer = 'n'; + break; + case '\r': + *output_pointer = 'r'; + break; + case '\t': + *output_pointer = 't'; + break; + default: + /* escape and print as unicode codepoint */ + sprintf((char*)output_pointer, "u%04x", *input_pointer); + output_pointer += 4; + break; + } + } + } + output[output_length + 1] = '\"'; + output[output_length + 2] = '\0'; + + return true; +} + +/* Invoke print_string_ptr (which is useful) on an item. */ +static cJSON_bool print_string(const cJSON * const item, printbuffer * const p) +{ + return print_string_ptr((unsigned char*)item->valuestring, p); +} /* Predeclare these prototypes. */ -static const char *parse_value(cJSON *item,const char *value); -static char *print_value(cJSON *item,int depth,int fmt,printbuffer *p); -static const char *parse_array(cJSON *item,const char *value); -static char *print_array(cJSON *item,int depth,int fmt,printbuffer *p); -static const char *parse_object(cJSON *item,const char *value); -static char *print_object(cJSON *item,int depth,int fmt,printbuffer *p); +static cJSON_bool parse_value(cJSON * const item, parse_buffer * const input_buffer); +static cJSON_bool print_value(const cJSON * const item, printbuffer * const output_buffer); +static cJSON_bool parse_array(cJSON * const item, parse_buffer * const input_buffer); +static cJSON_bool print_array(const cJSON * const item, printbuffer * const output_buffer); +static cJSON_bool parse_object(cJSON * const item, parse_buffer * const input_buffer); +static cJSON_bool print_object(const cJSON * const item, printbuffer * const output_buffer); /* Utility to jump whitespace and cr/lf */ -static const char *skip(const char *in) {while (in && *in && (unsigned char)*in<=32) in++; return in;} +static parse_buffer *buffer_skip_whitespace(parse_buffer * const buffer) +{ + if ((buffer == NULL) || (buffer->content == NULL)) + { + return NULL; + } + + while (can_access_at_index(buffer, 0) && (buffer_at_offset(buffer)[0] <= 32)) + { + buffer->offset++; + } + + if (buffer->offset == buffer->length) + { + buffer->offset--; + } + + return buffer; +} -/* Parse an object - create a new root, and populate. */ -cJSON *cJSON_ParseWithOpts(const char *value,const char **return_parse_end,int require_null_terminated) +/* skip the UTF-8 BOM (byte order mark) if it is at the beginning of a buffer */ +static parse_buffer *skip_utf8_bom(parse_buffer * const buffer) { - const char *end=0; - cJSON *c=cJSON_New_Item(); - ep=0; - if (!c) return 0; /* memory fail */ + if ((buffer == NULL) || (buffer->content == NULL) || (buffer->offset != 0)) + { + return NULL; + } - end=parse_value(c,skip(value)); - if (!end) {cJSON_Delete(c);return 0;} /* parse failure. ep is set. */ + if (can_access_at_index(buffer, 4) && (strncmp((const char*)buffer_at_offset(buffer), "\xEF\xBB\xBF", 3) == 0)) + { + buffer->offset += 3; + } - /* if we require null-terminated JSON without appended garbage, skip and then check for a null terminator */ - if (require_null_terminated) {end=skip(end);if (*end) {cJSON_Delete(c);ep=end;return 0;}} - if (return_parse_end) *return_parse_end=end; - return c; + return buffer; } -/* Default options for cJSON_Parse */ -cJSON *cJSON_Parse(const char *value) {return cJSON_ParseWithOpts(value,0,0);} -/* Render a cJSON item/entity/structure to text. */ -char *cJSON_Print(cJSON *item) {return print_value(item,0,1,0);} -char *cJSON_PrintUnformatted(cJSON *item) {return print_value(item,0,0,0);} +/* Parse an object - create a new root, and populate. */ +CJSON_PUBLIC(cJSON *) cJSON_ParseWithOpts(const char *value, const char **return_parse_end, cJSON_bool require_null_terminated) +{ + parse_buffer buffer = { 0, 0, 0, 0,{ 0, 0, 0 } }; + cJSON *item = NULL; + + /* reset error position */ + global_error.json = NULL; + global_error.position = 0; + + if (value == NULL) + { + goto fail; + } + + buffer.content = (const unsigned char*)value; + buffer.length = strlen((const char*)value) + sizeof(""); + buffer.offset = 0; + buffer.hooks = global_hooks; + + item = cJSON_New_Item(&global_hooks); + if (item == NULL) /* memory fail */ + { + goto fail; + } + + if (!parse_value(item, buffer_skip_whitespace(skip_utf8_bom(&buffer)))) + { + /* parse failure. ep is set. */ + goto fail; + } + + /* if we require null-terminated JSON without appended garbage, skip and then check for a null terminator */ + if (require_null_terminated) + { + buffer_skip_whitespace(&buffer); + if ((buffer.offset >= buffer.length) || buffer_at_offset(&buffer)[0] != '\0') + { + goto fail; + } + } + if (return_parse_end) + { + *return_parse_end = (const char*)buffer_at_offset(&buffer); + } + + return item; + +fail: + if (item != NULL) + { + cJSON_Delete(item); + } + + if (value != NULL) + { + error local_error; + local_error.json = (const unsigned char*)value; + local_error.position = 0; + + if (buffer.offset < buffer.length) + { + local_error.position = buffer.offset; + } + else if (buffer.length > 0) + { + local_error.position = buffer.length - 1; + } + + if (return_parse_end != NULL) + { + *return_parse_end = (const char*)local_error.json + local_error.position; + } + + global_error = local_error; + } + + return NULL; +} -char *cJSON_PrintBuffered(cJSON *item,int prebuffer,int fmt) +/* Default options for cJSON_Parse */ +CJSON_PUBLIC(cJSON *) cJSON_Parse(const char *value) { - printbuffer p; - p.buffer=(char*)cJSON_malloc(prebuffer); - p.length=prebuffer; - p.offset=0; - return print_value(item,0,fmt,&p); - return p.buffer; + return cJSON_ParseWithOpts(value, 0, 0); } +#define cjson_min(a, b) ((a < b) ? a : b) -/* Parser core - when encountering text, process appropriately. */ -static const char *parse_value(cJSON *item,const char *value) +static unsigned char *print(const cJSON * const item, cJSON_bool format, const internal_hooks * const hooks) { - if (!value) return 0; /* Fail on null. */ - if (!strncmp(value,"null",4)) { item->type=cJSON_NULL; return value+4; } - if (!strncmp(value,"false",5)) { item->type=cJSON_False; return value+5; } - if (!strncmp(value,"true",4)) { item->type=cJSON_True; item->valueint=1; return value+4; } - if (*value=='\"') { return parse_string(item,value); } - if (*value=='-' || (*value>='0' && *value<='9')) { return parse_number(item,value); } - if (*value=='[') { return parse_array(item,value); } - if (*value=='{') { return parse_object(item,value); } + static const size_t default_buffer_size = 256; + printbuffer buffer[1]; + unsigned char *printed = NULL; + + memset(buffer, 0, sizeof(buffer)); + + /* create buffer */ + buffer->buffer = (unsigned char*)hooks->allocate(default_buffer_size); + buffer->length = default_buffer_size; + buffer->format = format; + buffer->hooks = *hooks; + if (buffer->buffer == NULL) + { + goto fail; + } + + /* print the value */ + if (!print_value(item, buffer)) + { + goto fail; + } + update_offset(buffer); + + /* check if reallocate is available */ + if (hooks->reallocate != NULL) + { + printed = (unsigned char*)hooks->reallocate(buffer->buffer, buffer->offset + 1); + if (printed == NULL) { + goto fail; + } + buffer->buffer = NULL; + } + else /* otherwise copy the JSON over to a new buffer */ + { + printed = (unsigned char*)hooks->allocate(buffer->offset + 1); + if (printed == NULL) + { + goto fail; + } + memcpy(printed, buffer->buffer, cjson_min(buffer->length, buffer->offset + 1)); + printed[buffer->offset] = '\0'; /* just to be sure */ + + /* free the buffer */ + hooks->deallocate(buffer->buffer); + } + + return printed; + +fail: + if (buffer->buffer != NULL) + { + hooks->deallocate(buffer->buffer); + } + + if (printed != NULL) + { + hooks->deallocate(printed); + } + + return NULL; +} - ep=value;return 0; /* failure. */ +/* Render a cJSON item/entity/structure to text. */ +CJSON_PUBLIC(char *) cJSON_Print(const cJSON *item) +{ + return (char*)print(item, true, &global_hooks); } -/* Render a value to text. */ -static char *print_value(cJSON *item,int depth,int fmt,printbuffer *p) -{ - char *out=0; - if (!item) return 0; - if (p) - { - switch ((item->type)&255) - { - case cJSON_NULL: {out=ensure(p,5); if (out) strcpy(out,"null"); break;} - case cJSON_False: {out=ensure(p,6); if (out) strcpy(out,"false"); break;} - case cJSON_True: {out=ensure(p,5); if (out) strcpy(out,"true"); break;} - case cJSON_Number: out=print_number(item,p);break; - case cJSON_String: out=print_string(item,p);break; - case cJSON_Array: out=print_array(item,depth,fmt,p);break; - case cJSON_Object: out=print_object(item,depth,fmt,p);break; - } - } - else - { - switch ((item->type)&255) - { - case cJSON_NULL: out=cJSON_strdup("null"); break; - case cJSON_False: out=cJSON_strdup("false");break; - case cJSON_True: out=cJSON_strdup("true"); break; - case cJSON_Number: out=print_number(item,0);break; - case cJSON_String: out=print_string(item,0);break; - case cJSON_Array: out=print_array(item,depth,fmt,0);break; - case cJSON_Object: out=print_object(item,depth,fmt,0);break; - } - } - return out; +CJSON_PUBLIC(char *) cJSON_PrintUnformatted(const cJSON *item) +{ + return (char*)print(item, false, &global_hooks); } -/* Build an array from input text. */ -static const char *parse_array(cJSON *item,const char *value) +CJSON_PUBLIC(char *) cJSON_PrintBuffered(const cJSON *item, int prebuffer, cJSON_bool fmt) { - cJSON *child; - if (*value!='[') {ep=value;return 0;} /* not an array! */ + printbuffer p = { 0, 0, 0, 0, 0, 0,{ 0, 0, 0 } }; + + if (prebuffer < 0) + { + return NULL; + } + + p.buffer = (unsigned char*)global_hooks.allocate((size_t)prebuffer); + if (!p.buffer) + { + return NULL; + } + + p.length = (size_t)prebuffer; + p.offset = 0; + p.noalloc = false; + p.format = fmt; + p.hooks = global_hooks; + + if (!print_value(item, &p)) + { + global_hooks.deallocate(p.buffer); + return NULL; + } + + return (char*)p.buffer; +} - item->type=cJSON_Array; - value=skip(value+1); - if (*value==']') return value+1; /* empty array. */ +CJSON_PUBLIC(cJSON_bool) cJSON_PrintPreallocated(cJSON *item, char *buf, const int len, const cJSON_bool fmt) +{ + printbuffer p = { 0, 0, 0, 0, 0, 0,{ 0, 0, 0 } }; - item->child=child=cJSON_New_Item(); - if (!item->child) return 0; /* memory fail */ - value=skip(parse_value(child,skip(value))); /* skip any spacing, get the value. */ - if (!value) return 0; + if ((len < 0) || (buf == NULL)) + { + return false; + } - while (*value==',') - { - cJSON *new_item; - if (!(new_item=cJSON_New_Item())) return 0; /* memory fail */ - child->next=new_item;new_item->prev=child;child=new_item; - value=skip(parse_value(child,skip(value+1))); - if (!value) return 0; /* memory fail */ - } + p.buffer = (unsigned char*)buf; + p.length = (size_t)len; + p.offset = 0; + p.noalloc = true; + p.format = fmt; + p.hooks = global_hooks; + + return print_value(item, &p); +} - if (*value==']') return value+1; /* end of array */ - ep=value;return 0; /* malformed. */ +/* Parser core - when encountering text, process appropriately. */ +static cJSON_bool parse_value(cJSON * const item, parse_buffer * const input_buffer) +{ + if ((input_buffer == NULL) || (input_buffer->content == NULL)) + { + return false; /* no input */ + } + + /* parse the different types of values */ + /* null */ + if (can_read(input_buffer, 4) && (strncmp((const char*)buffer_at_offset(input_buffer), "null", 4) == 0)) + { + item->type = cJSON_NULL; + input_buffer->offset += 4; + return true; + } + /* false */ + if (can_read(input_buffer, 5) && (strncmp((const char*)buffer_at_offset(input_buffer), "false", 5) == 0)) + { + item->type = cJSON_False; + input_buffer->offset += 5; + return true; + } + /* true */ + if (can_read(input_buffer, 4) && (strncmp((const char*)buffer_at_offset(input_buffer), "true", 4) == 0)) + { + item->type = cJSON_True; + item->valueint = 1; + input_buffer->offset += 4; + return true; + } + /* string */ + if (can_access_at_index(input_buffer, 0) && (buffer_at_offset(input_buffer)[0] == '\"')) + { + return parse_string(item, input_buffer); + } + /* number */ + if (can_access_at_index(input_buffer, 0) && ((buffer_at_offset(input_buffer)[0] == '-') || ((buffer_at_offset(input_buffer)[0] >= '0') && (buffer_at_offset(input_buffer)[0] <= '9')))) + { + return parse_number(item, input_buffer); + } + /* array */ + if (can_access_at_index(input_buffer, 0) && (buffer_at_offset(input_buffer)[0] == '[')) + { + return parse_array(item, input_buffer); + } + /* object */ + if (can_access_at_index(input_buffer, 0) && (buffer_at_offset(input_buffer)[0] == '{')) + { + return parse_object(item, input_buffer); + } + + return false; +} + +/* Render a value to text. */ +static cJSON_bool print_value(const cJSON * const item, printbuffer * const output_buffer) +{ + unsigned char *output = NULL; + + if ((item == NULL) || (output_buffer == NULL)) + { + return false; + } + + switch ((item->type) & 0xFF) + { + case cJSON_NULL: + output = ensure(output_buffer, 5); + if (output == NULL) + { + return false; + } + strcpy((char*)output, "null"); + return true; + + case cJSON_False: + output = ensure(output_buffer, 6); + if (output == NULL) + { + return false; + } + strcpy((char*)output, "false"); + return true; + + case cJSON_True: + output = ensure(output_buffer, 5); + if (output == NULL) + { + return false; + } + strcpy((char*)output, "true"); + return true; + + case cJSON_Number: + return print_number(item, output_buffer); + + case cJSON_Raw: + { + size_t raw_length = 0; + if (item->valuestring == NULL) + { + return false; + } + + raw_length = strlen(item->valuestring) + sizeof(""); + output = ensure(output_buffer, raw_length); + if (output == NULL) + { + return false; + } + memcpy(output, item->valuestring, raw_length); + return true; + } + + case cJSON_String: + return print_string(item, output_buffer); + + case cJSON_Array: + return print_array(item, output_buffer); + + case cJSON_Object: + return print_object(item, output_buffer); + + default: + return false; + } +} + +/* Build an array from input text. */ +static cJSON_bool parse_array(cJSON * const item, parse_buffer * const input_buffer) +{ + cJSON *head = NULL; /* head of the linked list */ + cJSON *current_item = NULL; + + if (input_buffer->depth >= CJSON_NESTING_LIMIT) + { + return false; /* to deeply nested */ + } + input_buffer->depth++; + + if (buffer_at_offset(input_buffer)[0] != '[') + { + /* not an array */ + goto fail; + } + + input_buffer->offset++; + buffer_skip_whitespace(input_buffer); + if (can_access_at_index(input_buffer, 0) && (buffer_at_offset(input_buffer)[0] == ']')) + { + /* empty array */ + goto success; + } + + /* check if we skipped to the end of the buffer */ + if (cannot_access_at_index(input_buffer, 0)) + { + input_buffer->offset--; + goto fail; + } + + /* step back to character in front of the first element */ + input_buffer->offset--; + /* loop through the comma separated array elements */ + do + { + /* allocate next item */ + cJSON *new_item = cJSON_New_Item(&(input_buffer->hooks)); + if (new_item == NULL) + { + goto fail; /* allocation failure */ + } + + /* attach next item to list */ + if (head == NULL) + { + /* start the linked list */ + current_item = head = new_item; + } + else + { + /* add to the end and advance */ + current_item->next = new_item; + new_item->prev = current_item; + current_item = new_item; + } + + /* parse next value */ + input_buffer->offset++; + buffer_skip_whitespace(input_buffer); + if (!parse_value(current_item, input_buffer)) + { + goto fail; /* failed to parse value */ + } + buffer_skip_whitespace(input_buffer); + } while (can_access_at_index(input_buffer, 0) && (buffer_at_offset(input_buffer)[0] == ',')); + + if (cannot_access_at_index(input_buffer, 0) || buffer_at_offset(input_buffer)[0] != ']') + { + goto fail; /* expected end of array */ + } + +success: + input_buffer->depth--; + + item->type = cJSON_Array; + item->child = head; + + input_buffer->offset++; + + return true; + +fail: + if (head != NULL) + { + cJSON_Delete(head); + } + + return false; } /* Render an array to text */ -static char *print_array(cJSON *item,int depth,int fmt,printbuffer *p) -{ - char **entries; - char *out=0,*ptr,*ret;int len=5; - cJSON *child=item->child; - int numentries=0,i=0,fail=0; - size_t tmplen=0; - - /* How many entries in the array? */ - while (child) numentries++,child=child->next; - /* Explicitly handle numentries==0 */ - if (!numentries) - { - if (p) out=ensure(p,3); - else out=(char*)cJSON_malloc(3); - if (out) strcpy(out,"[]"); - return out; - } - - if (p) - { - /* Compose the output array. */ - i=p->offset; - ptr=ensure(p,1);if (!ptr) return 0; *ptr='['; p->offset++; - child=item->child; - while (child && !fail) - { - print_value(child,depth+1,fmt,p); - p->offset=update(p); - if (child->next) {len=fmt?2:1;ptr=ensure(p,len+1);if (!ptr) return 0;*ptr++=',';if(fmt)*ptr++=' ';*ptr=0;p->offset+=len;} - child=child->next; - } - ptr=ensure(p,2);if (!ptr) return 0; *ptr++=']';*ptr=0; - out=(p->buffer)+i; - } - else - { - /* Allocate an array to hold the values for each */ - entries=(char**)cJSON_malloc(numentries*sizeof(char*)); - if (!entries) return 0; - memset(entries,0,numentries*sizeof(char*)); - /* Retrieve all the results: */ - child=item->child; - while (child && !fail) - { - ret=print_value(child,depth+1,fmt,0); - entries[i++]=ret; - if (ret) len+=strlen(ret)+2+(fmt?1:0); else fail=1; - child=child->next; - } - - /* If we didn't fail, try to malloc the output string */ - if (!fail) out=(char*)cJSON_malloc(len); - /* If that fails, we fail. */ - if (!out) fail=1; - - /* Handle failure. */ - if (fail) - { - for (i=0;ichild; + + if (output_buffer == NULL) + { + return false; + } + + /* Compose the output array. */ + /* opening square bracket */ + output_pointer = ensure(output_buffer, 1); + if (output_pointer == NULL) + { + return false; + } + + *output_pointer = '['; + output_buffer->offset++; + output_buffer->depth++; + + while (current_element != NULL) + { + if (!print_value(current_element, output_buffer)) + { + return false; + } + update_offset(output_buffer); + if (current_element->next) + { + length = (size_t)(output_buffer->format ? 2 : 1); + output_pointer = ensure(output_buffer, length + 1); + if (output_pointer == NULL) + { + return false; + } + *output_pointer++ = ','; + if (output_buffer->format) + { + *output_pointer++ = ' '; + } + *output_pointer = '\0'; + output_buffer->offset += length; + } + current_element = current_element->next; + } + + output_pointer = ensure(output_buffer, 2); + if (output_pointer == NULL) + { + return false; + } + *output_pointer++ = ']'; + *output_pointer = '\0'; + output_buffer->depth--; + + return true; } /* Build an object from the text. */ -static const char *parse_object(cJSON *item,const char *value) -{ - cJSON *child; - if (*value!='{') {ep=value;return 0;} /* not an object! */ - - item->type=cJSON_Object; - value=skip(value+1); - if (*value=='}') return value+1; /* empty array. */ - - item->child=child=cJSON_New_Item(); - if (!item->child) return 0; - value=skip(parse_string(child,skip(value))); - if (!value) return 0; - child->string=child->valuestring;child->valuestring=0; - if (*value!=':') {ep=value;return 0;} /* fail! */ - value=skip(parse_value(child,skip(value+1))); /* skip any spacing, get the value. */ - if (!value) return 0; - - while (*value==',') - { - cJSON *new_item; - if (!(new_item=cJSON_New_Item())) return 0; /* memory fail */ - child->next=new_item;new_item->prev=child;child=new_item; - value=skip(parse_string(child,skip(value+1))); - if (!value) return 0; - child->string=child->valuestring;child->valuestring=0; - if (*value!=':') {ep=value;return 0;} /* fail! */ - value=skip(parse_value(child,skip(value+1))); /* skip any spacing, get the value. */ - if (!value) return 0; - } - - if (*value=='}') return value+1; /* end of array */ - ep=value;return 0; /* malformed. */ +static cJSON_bool parse_object(cJSON * const item, parse_buffer * const input_buffer) +{ + cJSON *head = NULL; /* linked list head */ + cJSON *current_item = NULL; + + if (input_buffer->depth >= CJSON_NESTING_LIMIT) + { + return false; /* to deeply nested */ + } + input_buffer->depth++; + + if (cannot_access_at_index(input_buffer, 0) || (buffer_at_offset(input_buffer)[0] != '{')) + { + goto fail; /* not an object */ + } + + input_buffer->offset++; + buffer_skip_whitespace(input_buffer); + if (can_access_at_index(input_buffer, 0) && (buffer_at_offset(input_buffer)[0] == '}')) + { + goto success; /* empty object */ + } + + /* check if we skipped to the end of the buffer */ + if (cannot_access_at_index(input_buffer, 0)) + { + input_buffer->offset--; + goto fail; + } + + /* step back to character in front of the first element */ + input_buffer->offset--; + /* loop through the comma separated array elements */ + do + { + /* allocate next item */ + cJSON *new_item = cJSON_New_Item(&(input_buffer->hooks)); + if (new_item == NULL) + { + goto fail; /* allocation failure */ + } + + /* attach next item to list */ + if (head == NULL) + { + /* start the linked list */ + current_item = head = new_item; + } + else + { + /* add to the end and advance */ + current_item->next = new_item; + new_item->prev = current_item; + current_item = new_item; + } + + /* parse the name of the child */ + input_buffer->offset++; + buffer_skip_whitespace(input_buffer); + if (!parse_string(current_item, input_buffer)) + { + goto fail; /* faile to parse name */ + } + buffer_skip_whitespace(input_buffer); + + /* swap valuestring and string, because we parsed the name */ + current_item->string = current_item->valuestring; + current_item->valuestring = NULL; + + if (cannot_access_at_index(input_buffer, 0) || (buffer_at_offset(input_buffer)[0] != ':')) + { + goto fail; /* invalid object */ + } + + /* parse the value */ + input_buffer->offset++; + buffer_skip_whitespace(input_buffer); + if (!parse_value(current_item, input_buffer)) + { + goto fail; /* failed to parse value */ + } + buffer_skip_whitespace(input_buffer); + } while (can_access_at_index(input_buffer, 0) && (buffer_at_offset(input_buffer)[0] == ',')); + + if (cannot_access_at_index(input_buffer, 0) || (buffer_at_offset(input_buffer)[0] != '}')) + { + goto fail; /* expected end of object */ + } + +success: + input_buffer->depth--; + + item->type = cJSON_Object; + item->child = head; + + input_buffer->offset++; + return true; + +fail: + if (head != NULL) + { + cJSON_Delete(head); + } + + return false; } /* Render an object to text. */ -static char *print_object(cJSON *item,int depth,int fmt,printbuffer *p) -{ - char **entries=0,**names=0; - char *out=0,*ptr,*ret,*str;int len=7,i=0,j; - cJSON *child=item->child; - int numentries=0,fail=0; - size_t tmplen=0; - /* Count the number of entries. */ - while (child) numentries++,child=child->next; - /* Explicitly handle empty object case */ - if (!numentries) - { - if (p) out=ensure(p,fmt?depth+4:3); - else out=(char*)cJSON_malloc(fmt?depth+4:3); - if (!out) return 0; - ptr=out;*ptr++='{'; - if (fmt) {*ptr++='\n';for (i=0;ioffset; - len=fmt?2:1; ptr=ensure(p,len+1); if (!ptr) return 0; - *ptr++='{'; if (fmt) *ptr++='\n'; *ptr=0; p->offset+=len; - child=item->child;depth++; - while (child) - { - if (fmt) - { - ptr=ensure(p,depth); if (!ptr) return 0; - for (j=0;joffset+=depth; - } - print_string_ptr(child->string,p); - p->offset=update(p); - - len=fmt?2:1; - ptr=ensure(p,len); if (!ptr) return 0; - *ptr++=':';if (fmt) *ptr++='\t'; - p->offset+=len; - - print_value(child,depth,fmt,p); - p->offset=update(p); - - len=(fmt?1:0)+(child->next?1:0); - ptr=ensure(p,len+1); if (!ptr) return 0; - if (child->next) *ptr++=','; - if (fmt) *ptr++='\n';*ptr=0; - p->offset+=len; - child=child->next; - } - ptr=ensure(p,fmt?(depth+1):2); if (!ptr) return 0; - if (fmt) for (i=0;ibuffer)+i; - } - else - { - /* Allocate space for the names and the objects */ - entries=(char**)cJSON_malloc(numentries*sizeof(char*)); - if (!entries) return 0; - names=(char**)cJSON_malloc(numentries*sizeof(char*)); - if (!names) {cJSON_free(entries);return 0;} - memset(entries,0,sizeof(char*)*numentries); - memset(names,0,sizeof(char*)*numentries); - - /* Collect all the results into our arrays: */ - child=item->child;depth++;if (fmt) len+=depth; - while (child) - { - names[i]=str=print_string_ptr(child->string,0); - entries[i++]=ret=print_value(child,depth,fmt,0); - if (str && ret) len+=strlen(ret)+strlen(str)+2+(fmt?2+depth:0); else fail=1; - child=child->next; - } - - /* Try to allocate the output string */ - if (!fail) out=(char*)cJSON_malloc(len); - if (!out) fail=1; - - /* Handle failure */ - if (fail) - { - for (i=0;ichild; + + if (output_buffer == NULL) + { + return false; + } + + /* Compose the output: */ + length = (size_t)(output_buffer->format ? 2 : 1); /* fmt: {\n */ + output_pointer = ensure(output_buffer, length + 1); + if (output_pointer == NULL) + { + return false; + } + + *output_pointer++ = '{'; + output_buffer->depth++; + if (output_buffer->format) + { + *output_pointer++ = '\n'; + } + output_buffer->offset += length; + + while (current_item) + { + if (output_buffer->format) + { + size_t i; + output_pointer = ensure(output_buffer, output_buffer->depth); + if (output_pointer == NULL) + { + return false; + } + for (i = 0; i < output_buffer->depth; i++) + { + *output_pointer++ = '\t'; + } + output_buffer->offset += output_buffer->depth; + } + + /* print key */ + if (!print_string_ptr((unsigned char*)current_item->string, output_buffer)) + { + return false; + } + update_offset(output_buffer); + + length = (size_t)(output_buffer->format ? 2 : 1); + output_pointer = ensure(output_buffer, length); + if (output_pointer == NULL) + { + return false; + } + *output_pointer++ = ':'; + if (output_buffer->format) + { + *output_pointer++ = '\t'; + } + output_buffer->offset += length; + + /* print value */ + if (!print_value(current_item, output_buffer)) + { + return false; + } + update_offset(output_buffer); + + /* print comma if not last */ + length = (size_t)((output_buffer->format ? 1 : 0) + (current_item->next ? 1 : 0)); + output_pointer = ensure(output_buffer, length + 1); + if (output_pointer == NULL) + { + return false; + } + if (current_item->next) + { + *output_pointer++ = ','; + } + + if (output_buffer->format) + { + *output_pointer++ = '\n'; + } + *output_pointer = '\0'; + output_buffer->offset += length; + + current_item = current_item->next; + } + + output_pointer = ensure(output_buffer, output_buffer->format ? (output_buffer->depth + 1) : 2); + if (output_pointer == NULL) + { + return false; + } + if (output_buffer->format) + { + size_t i; + for (i = 0; i < (output_buffer->depth - 1); i++) + { + *output_pointer++ = '\t'; + } + } + *output_pointer++ = '}'; + *output_pointer = '\0'; + output_buffer->depth--; + + return true; } /* Get Array size/item / object item. */ -int cJSON_GetArraySize(cJSON *array) {cJSON *c=array->child;int i=0;while(c)i++,c=c->next;return i;} -cJSON *cJSON_GetArrayItem(cJSON *array,int item) +CJSON_PUBLIC(int) cJSON_GetArraySize(const cJSON *array) +{ + cJSON *child = NULL; + size_t size = 0; + + if (array == NULL) + { + return 0; + } + + child = array->child; + + while (child != NULL) + { + size++; + child = child->next; + } + + /* FIXME: Can overflow here. Cannot be fixed without breaking the API */ + + return (int)size; +} + +static cJSON* get_array_item(const cJSON *array, size_t index) +{ + cJSON *current_child = NULL; + + if (array == NULL) + { + return NULL; + } + + current_child = array->child; + while ((current_child != NULL) && (index > 0)) + { + index--; + current_child = current_child->next; + } + + return current_child; +} + +CJSON_PUBLIC(cJSON *) cJSON_GetArrayItem(const cJSON *array, int index) { - cJSON *c = (array != NULL) ? array->child : NULL; - while ((c != NULL) && (item > 0)) - { - item--; - c = c->next; - } + if (index < 0) + { + return NULL; + } - return c; + return get_array_item(array, (size_t)index); } -cJSON *cJSON_GetObjectItem(cJSON *object, const char *string) +static cJSON *get_object_item(const cJSON * const object, const char * const name, const cJSON_bool case_sensitive) { - cJSON *c = (object != NULL) ? object->child : NULL; - while ((c != NULL) && (cJSON_strcasecmp(c->string, string))) - { - c = c->next; - } - return c; + cJSON *current_element = NULL; + + if ((object == NULL) || (name == NULL)) + { + return NULL; + } + + current_element = object->child; + if (case_sensitive) + { + while ((current_element != NULL) && (strcmp(name, current_element->string) != 0)) + { + current_element = current_element->next; + } + } + else + { + while ((current_element != NULL) && (case_insensitive_strcmp((const unsigned char*)name, (const unsigned char*)(current_element->string)) != 0)) + { + current_element = current_element->next; + } + } + + return current_element; +} + +CJSON_PUBLIC(cJSON *) cJSON_GetObjectItem(const cJSON * const object, const char * const string) +{ + return get_object_item(object, string, false); +} + +CJSON_PUBLIC(cJSON *) cJSON_GetObjectItemCaseSensitive(const cJSON * const object, const char * const string) +{ + return get_object_item(object, string, true); +} + +CJSON_PUBLIC(cJSON_bool) cJSON_HasObjectItem(const cJSON *object, const char *string) +{ + return cJSON_GetObjectItem(object, string) ? 1 : 0; } /* Utility for array list handling. */ -static void suffix_object(cJSON *prev,cJSON *item) {prev->next=item;item->prev=prev;} +static void suffix_object(cJSON *prev, cJSON *item) +{ + prev->next = item; + item->prev = prev; +} + /* Utility for handling references. */ -static cJSON *create_reference(cJSON *item) {cJSON *ref=cJSON_New_Item();if (!ref) return 0;memcpy(ref,item,sizeof(cJSON));ref->string=0;ref->type|=cJSON_IsReference;ref->next=ref->prev=0;return ref;} +static cJSON *create_reference(const cJSON *item, const internal_hooks * const hooks) +{ + cJSON *reference = NULL; + if (item == NULL) + { + return NULL; + } + + reference = cJSON_New_Item(hooks); + if (reference == NULL) + { + return NULL; + } + + memcpy(reference, item, sizeof(cJSON)); + reference->string = NULL; + reference->type |= cJSON_IsReference; + reference->next = reference->prev = NULL; + return reference; +} + +static cJSON_bool add_item_to_array(cJSON *array, cJSON *item) +{ + cJSON *child = NULL; + + if ((item == NULL) || (array == NULL)) + { + return false; + } + + child = array->child; + + if (child == NULL) + { + /* list is empty, start new one */ + array->child = item; + } + else + { + /* append to the end */ + while (child->next) + { + child = child->next; + } + suffix_object(child, item); + } + + return true; +} /* Add item to array/object. */ -void cJSON_AddItemToArray(cJSON *array, cJSON *item) {cJSON *c=array->child;if (!item) return; if (!c) {array->child=item;} else {while (c && c->next) c=c->next; suffix_object(c,item);}} -void cJSON_AddItemToObject(cJSON *object,const char *string,cJSON *item) {if (!item) return; if (item->string) cJSON_free(item->string);item->string=cJSON_strdup(string);cJSON_AddItemToArray(object,item);} -void cJSON_AddItemToObjectCS(cJSON *object,const char *string,cJSON *item) {if (!item) return; if (!(item->type&cJSON_StringIsConst) && item->string) cJSON_free(item->string);item->string=(char*)string;item->type|=cJSON_StringIsConst;cJSON_AddItemToArray(object,item);} -void cJSON_AddItemReferenceToArray(cJSON *array, cJSON *item) {cJSON_AddItemToArray(array,create_reference(item));} -void cJSON_AddItemReferenceToObject(cJSON *object,const char *string,cJSON *item) {cJSON_AddItemToObject(object,string,create_reference(item));} - -cJSON *cJSON_DetachItemFromArray(cJSON *array,int which) {cJSON *c=array->child;while (c && which>0) c=c->next,which--;if (!c) return 0; - if (c->prev) c->prev->next=c->next;if (c->next) c->next->prev=c->prev;if (c==array->child) array->child=c->next;c->prev=c->next=0;return c;} -void cJSON_DeleteItemFromArray(cJSON *array,int which) {cJSON_Delete(cJSON_DetachItemFromArray(array,which));} -cJSON *cJSON_DetachItemFromObject(cJSON *object,const char *string) {int i=0;cJSON *c=object->child;while (c && cJSON_strcasecmp(c->string,string)) i++,c=c->next;if (c) return cJSON_DetachItemFromArray(object,i);return 0;} -void cJSON_DeleteItemFromObject(cJSON *object,const char *string) {cJSON_Delete(cJSON_DetachItemFromObject(object,string));} +CJSON_PUBLIC(void) cJSON_AddItemToArray(cJSON *array, cJSON *item) +{ + add_item_to_array(array, item); +} + +#if defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ > 5)))) +#pragma GCC diagnostic push +#endif +#ifdef __GNUC__ +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif +/* helper function to cast away const */ +static void* cast_away_const(const void* string) +{ + return (void*)string; +} +#if defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ > 5)))) +#pragma GCC diagnostic pop +#endif + + +static cJSON_bool add_item_to_object(cJSON * const object, const char * const string, cJSON * const item, const internal_hooks * const hooks, const cJSON_bool constant_key) +{ + char *new_key = NULL; + int new_type = cJSON_Invalid; + + if ((object == NULL) || (string == NULL) || (item == NULL)) + { + return false; + } + + if (constant_key) + { + new_key = (char*)cast_away_const(string); + new_type = item->type | cJSON_StringIsConst; + } + else + { + new_key = (char*)cJSON_strdup((const unsigned char*)string, hooks); + if (new_key == NULL) + { + return false; + } + + new_type = item->type & ~cJSON_StringIsConst; + } + + if (!(item->type & cJSON_StringIsConst) && (item->string != NULL)) + { + hooks->deallocate(item->string); + } + + item->string = new_key; + item->type = new_type; + + return add_item_to_array(object, item); +} + +CJSON_PUBLIC(void) cJSON_AddItemToObject(cJSON *object, const char *string, cJSON *item) +{ + add_item_to_object(object, string, item, &global_hooks, false); +} + +/* Add an item to an object with constant string as key */ +CJSON_PUBLIC(void) cJSON_AddItemToObjectCS(cJSON *object, const char *string, cJSON *item) +{ + add_item_to_object(object, string, item, &global_hooks, true); +} + +CJSON_PUBLIC(void) cJSON_AddItemReferenceToArray(cJSON *array, cJSON *item) +{ + if (array == NULL) + { + return; + } + + add_item_to_array(array, create_reference(item, &global_hooks)); +} + +CJSON_PUBLIC(void) cJSON_AddItemReferenceToObject(cJSON *object, const char *string, cJSON *item) +{ + if ((object == NULL) || (string == NULL)) + { + return; + } + + add_item_to_object(object, string, create_reference(item, &global_hooks), &global_hooks, false); +} + +CJSON_PUBLIC(cJSON*) cJSON_AddNullToObject(cJSON * const object, const char * const name) +{ + cJSON *null = cJSON_CreateNull(); + if (add_item_to_object(object, name, null, &global_hooks, false)) + { + return null; + } + + cJSON_Delete(null); + return NULL; +} + +CJSON_PUBLIC(cJSON*) cJSON_AddTrueToObject(cJSON * const object, const char * const name) +{ + cJSON *true_item = cJSON_CreateTrue(); + if (add_item_to_object(object, name, true_item, &global_hooks, false)) + { + return true_item; + } + + cJSON_Delete(true_item); + return NULL; +} + +CJSON_PUBLIC(cJSON*) cJSON_AddFalseToObject(cJSON * const object, const char * const name) +{ + cJSON *false_item = cJSON_CreateFalse(); + if (add_item_to_object(object, name, false_item, &global_hooks, false)) + { + return false_item; + } + + cJSON_Delete(false_item); + return NULL; +} + +CJSON_PUBLIC(cJSON*) cJSON_AddBoolToObject(cJSON * const object, const char * const name, const cJSON_bool boolean) +{ + cJSON *bool_item = cJSON_CreateBool(boolean); + if (add_item_to_object(object, name, bool_item, &global_hooks, false)) + { + return bool_item; + } + + cJSON_Delete(bool_item); + return NULL; +} + +CJSON_PUBLIC(cJSON*) cJSON_AddNumberToObject(cJSON * const object, const char * const name, const double number) +{ + cJSON *number_item = cJSON_CreateNumber(number); + if (add_item_to_object(object, name, number_item, &global_hooks, false)) + { + return number_item; + } + + cJSON_Delete(number_item); + return NULL; +} + +CJSON_PUBLIC(cJSON*) cJSON_AddStringToObject(cJSON * const object, const char * const name, const char * const string) +{ + cJSON *string_item = cJSON_CreateString(string); + if (add_item_to_object(object, name, string_item, &global_hooks, false)) + { + return string_item; + } + + cJSON_Delete(string_item); + return NULL; +} + +CJSON_PUBLIC(cJSON*) cJSON_AddRawToObject(cJSON * const object, const char * const name, const char * const raw) +{ + cJSON *raw_item = cJSON_CreateRaw(raw); + if (add_item_to_object(object, name, raw_item, &global_hooks, false)) + { + return raw_item; + } + + cJSON_Delete(raw_item); + return NULL; +} + +CJSON_PUBLIC(cJSON*) cJSON_AddObjectToObject(cJSON * const object, const char * const name) +{ + cJSON *object_item = cJSON_CreateObject(); + if (add_item_to_object(object, name, object_item, &global_hooks, false)) + { + return object_item; + } + + cJSON_Delete(object_item); + return NULL; +} + +CJSON_PUBLIC(cJSON*) cJSON_AddArrayToObject(cJSON * const object, const char * const name) +{ + cJSON *array = cJSON_CreateArray(); + if (add_item_to_object(object, name, array, &global_hooks, false)) + { + return array; + } + + cJSON_Delete(array); + return NULL; +} + +CJSON_PUBLIC(cJSON *) cJSON_DetachItemViaPointer(cJSON *parent, cJSON * const item) +{ + if ((parent == NULL) || (item == NULL)) + { + return NULL; + } + + if (item->prev != NULL) + { + /* not the first element */ + item->prev->next = item->next; + } + if (item->next != NULL) + { + /* not the last element */ + item->next->prev = item->prev; + } + + if (item == parent->child) + { + /* first element */ + parent->child = item->next; + } + /* make sure the detached item doesn't point anywhere anymore */ + item->prev = NULL; + item->next = NULL; + + return item; +} + +CJSON_PUBLIC(cJSON *) cJSON_DetachItemFromArray(cJSON *array, int which) +{ + if (which < 0) + { + return NULL; + } + + return cJSON_DetachItemViaPointer(array, get_array_item(array, (size_t)which)); +} + +CJSON_PUBLIC(void) cJSON_DeleteItemFromArray(cJSON *array, int which) +{ + cJSON_Delete(cJSON_DetachItemFromArray(array, which)); +} + +CJSON_PUBLIC(cJSON *) cJSON_DetachItemFromObject(cJSON *object, const char *string) +{ + cJSON *to_detach = cJSON_GetObjectItem(object, string); + + return cJSON_DetachItemViaPointer(object, to_detach); +} + +CJSON_PUBLIC(cJSON *) cJSON_DetachItemFromObjectCaseSensitive(cJSON *object, const char *string) +{ + cJSON *to_detach = cJSON_GetObjectItemCaseSensitive(object, string); + + return cJSON_DetachItemViaPointer(object, to_detach); +} + +CJSON_PUBLIC(void) cJSON_DeleteItemFromObject(cJSON *object, const char *string) +{ + cJSON_Delete(cJSON_DetachItemFromObject(object, string)); +} + +CJSON_PUBLIC(void) cJSON_DeleteItemFromObjectCaseSensitive(cJSON *object, const char *string) +{ + cJSON_Delete(cJSON_DetachItemFromObjectCaseSensitive(object, string)); +} /* Replace array/object items with new ones. */ -void cJSON_InsertItemInArray(cJSON *array,int which,cJSON *newitem) {cJSON *c=array->child;while (c && which>0) c=c->next,which--;if (!c) {cJSON_AddItemToArray(array,newitem);return;} - newitem->next=c;newitem->prev=c->prev;c->prev=newitem;if (c==array->child) array->child=newitem; else newitem->prev->next=newitem;} -void cJSON_ReplaceItemInArray(cJSON *array,int which,cJSON *newitem) {cJSON *c=array->child;while (c && which>0) c=c->next,which--;if (!c) return; - newitem->next=c->next;newitem->prev=c->prev;if (newitem->next) newitem->next->prev=newitem; - if (c==array->child) array->child=newitem; else newitem->prev->next=newitem;c->next=c->prev=0;cJSON_Delete(c);} -void cJSON_ReplaceItemInObject(cJSON *object,const char *string,cJSON *newitem){int i=0;cJSON *c=object->child;while(c && cJSON_strcasecmp(c->string,string))i++,c=c->next;if(c){newitem->string=cJSON_strdup(string);cJSON_ReplaceItemInArray(object,i,newitem);}} +CJSON_PUBLIC(void) cJSON_InsertItemInArray(cJSON *array, int which, cJSON *newitem) +{ + cJSON *after_inserted = NULL; + + if (which < 0) + { + return; + } + + after_inserted = get_array_item(array, (size_t)which); + if (after_inserted == NULL) + { + add_item_to_array(array, newitem); + return; + } + + newitem->next = after_inserted; + newitem->prev = after_inserted->prev; + after_inserted->prev = newitem; + if (after_inserted == array->child) + { + array->child = newitem; + } + else + { + newitem->prev->next = newitem; + } +} + +CJSON_PUBLIC(cJSON_bool) cJSON_ReplaceItemViaPointer(cJSON * const parent, cJSON * const item, cJSON * replacement) +{ + if ((parent == NULL) || (replacement == NULL) || (item == NULL)) + { + return false; + } + + if (replacement == item) + { + return true; + } + + replacement->next = item->next; + replacement->prev = item->prev; + + if (replacement->next != NULL) + { + replacement->next->prev = replacement; + } + if (replacement->prev != NULL) + { + replacement->prev->next = replacement; + } + if (parent->child == item) + { + parent->child = replacement; + } + + item->next = NULL; + item->prev = NULL; + cJSON_Delete(item); + + return true; +} + +CJSON_PUBLIC(void) cJSON_ReplaceItemInArray(cJSON *array, int which, cJSON *newitem) +{ + if (which < 0) + { + return; + } + + cJSON_ReplaceItemViaPointer(array, get_array_item(array, (size_t)which), newitem); +} + +static cJSON_bool replace_item_in_object(cJSON *object, const char *string, cJSON *replacement, cJSON_bool case_sensitive) +{ + if ((replacement == NULL) || (string == NULL)) + { + return false; + } + + /* replace the name in the replacement */ + if (!(replacement->type & cJSON_StringIsConst) && (replacement->string != NULL)) + { + cJSON_free(replacement->string); + } + replacement->string = (char*)cJSON_strdup((const unsigned char*)string, &global_hooks); + replacement->type &= ~cJSON_StringIsConst; + + cJSON_ReplaceItemViaPointer(object, get_object_item(object, string, case_sensitive), replacement); + + return true; +} + +CJSON_PUBLIC(void) cJSON_ReplaceItemInObject(cJSON *object, const char *string, cJSON *newitem) +{ + replace_item_in_object(object, string, newitem, false); +} + +CJSON_PUBLIC(void) cJSON_ReplaceItemInObjectCaseSensitive(cJSON *object, const char *string, cJSON *newitem) +{ + replace_item_in_object(object, string, newitem, true); +} /* Create basic types: */ -cJSON *cJSON_CreateNull(void) {cJSON *item=cJSON_New_Item();if(item)item->type=cJSON_NULL;return item;} -cJSON *cJSON_CreateTrue(void) {cJSON *item=cJSON_New_Item();if(item)item->type=cJSON_True;return item;} -cJSON *cJSON_CreateFalse(void) {cJSON *item=cJSON_New_Item();if(item)item->type=cJSON_False;return item;} -cJSON *cJSON_CreateBool(int b) {cJSON *item=cJSON_New_Item();if(item)item->type=b?cJSON_True:cJSON_False;return item;} -cJSON *cJSON_CreateNumber(double num) {cJSON *item=cJSON_New_Item();if(item){item->type=cJSON_Number;item->valuedouble=num;item->valueint=(int)num;}return item;} -cJSON *cJSON_CreateString(const char *string) {cJSON *item=cJSON_New_Item();if(item){item->type=cJSON_String;item->valuestring=cJSON_strdup(string);}return item;} -cJSON *cJSON_CreateArray(void) {cJSON *item=cJSON_New_Item();if(item)item->type=cJSON_Array;return item;} -cJSON *cJSON_CreateObject(void) {cJSON *item=cJSON_New_Item();if(item)item->type=cJSON_Object;return item;} +CJSON_PUBLIC(cJSON *) cJSON_CreateNull(void) +{ + cJSON *item = cJSON_New_Item(&global_hooks); + if (item) + { + item->type = cJSON_NULL; + } + + return item; +} + +CJSON_PUBLIC(cJSON *) cJSON_CreateTrue(void) +{ + cJSON *item = cJSON_New_Item(&global_hooks); + if (item) + { + item->type = cJSON_True; + } + + return item; +} + +CJSON_PUBLIC(cJSON *) cJSON_CreateFalse(void) +{ + cJSON *item = cJSON_New_Item(&global_hooks); + if (item) + { + item->type = cJSON_False; + } + + return item; +} + +CJSON_PUBLIC(cJSON *) cJSON_CreateBool(cJSON_bool b) +{ + cJSON *item = cJSON_New_Item(&global_hooks); + if (item) + { + item->type = b ? cJSON_True : cJSON_False; + } + + return item; +} + +CJSON_PUBLIC(cJSON *) cJSON_CreateNumber(double num) +{ + cJSON *item = cJSON_New_Item(&global_hooks); + if (item) + { + item->type = cJSON_Number; + item->valuedouble = num; + + /* use saturation in case of overflow */ + if (num >= INT_MAX) + { + item->valueint = INT_MAX; + } + else if (num <= INT_MIN) + { + item->valueint = INT_MIN; + } + else + { + item->valueint = (int)num; + } + } + + return item; +} + +CJSON_PUBLIC(cJSON *) cJSON_CreateString(const char *string) +{ + cJSON *item = cJSON_New_Item(&global_hooks); + if (item) + { + item->type = cJSON_String; + item->valuestring = (char*)cJSON_strdup((const unsigned char*)string, &global_hooks); + if (!item->valuestring) + { + cJSON_Delete(item); + return NULL; + } + } + + return item; +} + +CJSON_PUBLIC(cJSON *) cJSON_CreateStringReference(const char *string) +{ + cJSON *item = cJSON_New_Item(&global_hooks); + if (item != NULL) + { + item->type = cJSON_String | cJSON_IsReference; + item->valuestring = (char*)cast_away_const(string); + } + + return item; +} + +CJSON_PUBLIC(cJSON *) cJSON_CreateObjectReference(const cJSON *child) +{ + cJSON *item = cJSON_New_Item(&global_hooks); + if (item != NULL) { + item->type = cJSON_Object | cJSON_IsReference; + item->child = (cJSON*)cast_away_const(child); + } + + return item; +} + +CJSON_PUBLIC(cJSON *) cJSON_CreateArrayReference(const cJSON *child) { + cJSON *item = cJSON_New_Item(&global_hooks); + if (item != NULL) { + item->type = cJSON_Array | cJSON_IsReference; + item->child = (cJSON*)cast_away_const(child); + } + + return item; +} + +CJSON_PUBLIC(cJSON *) cJSON_CreateRaw(const char *raw) +{ + cJSON *item = cJSON_New_Item(&global_hooks); + if (item) + { + item->type = cJSON_Raw; + item->valuestring = (char*)cJSON_strdup((const unsigned char*)raw, &global_hooks); + if (!item->valuestring) + { + cJSON_Delete(item); + return NULL; + } + } + + return item; +} + +CJSON_PUBLIC(cJSON *) cJSON_CreateArray(void) +{ + cJSON *item = cJSON_New_Item(&global_hooks); + if (item) + { + item->type = cJSON_Array; + } + + return item; +} + +CJSON_PUBLIC(cJSON *) cJSON_CreateObject(void) +{ + cJSON *item = cJSON_New_Item(&global_hooks); + if (item) + { + item->type = cJSON_Object; + } + + return item; +} /* Create Arrays: */ -cJSON *cJSON_CreateIntArray(const int *numbers,int count) {int i;cJSON *n=0,*p=0,*a=cJSON_CreateArray();for(i=0;a && ichild=n;else suffix_object(p,n);p=n;}return a;} -cJSON *cJSON_CreateFloatArray(const float *numbers,int count) {int i;cJSON *n=0,*p=0,*a=cJSON_CreateArray();for(i=0;a && ichild=n;else suffix_object(p,n);p=n;}return a;} -cJSON *cJSON_CreateDoubleArray(const double *numbers,int count) {int i;cJSON *n=0,*p=0,*a=cJSON_CreateArray();for(i=0;a && ichild=n;else suffix_object(p,n);p=n;}return a;} -cJSON *cJSON_CreateStringArray(const char **strings,int count) {int i;cJSON *n=0,*p=0,*a=cJSON_CreateArray();for(i=0;a && ichild=n;else suffix_object(p,n);p=n;}return a;} +CJSON_PUBLIC(cJSON *) cJSON_CreateIntArray(const int *numbers, int count) +{ + size_t i = 0; + cJSON *n = NULL; + cJSON *p = NULL; + cJSON *a = NULL; + + if ((count < 0) || (numbers == NULL)) + { + return NULL; + } + + a = cJSON_CreateArray(); + for (i = 0; a && (i < (size_t)count); i++) + { + n = cJSON_CreateNumber(numbers[i]); + if (!n) + { + cJSON_Delete(a); + return NULL; + } + if (!i) + { + a->child = n; + } + else + { + suffix_object(p, n); + } + p = n; + } + + return a; +} + +CJSON_PUBLIC(cJSON *) cJSON_CreateFloatArray(const float *numbers, int count) +{ + size_t i = 0; + cJSON *n = NULL; + cJSON *p = NULL; + cJSON *a = NULL; + + if ((count < 0) || (numbers == NULL)) + { + return NULL; + } + + a = cJSON_CreateArray(); + + for (i = 0; a && (i < (size_t)count); i++) + { + n = cJSON_CreateNumber((double)numbers[i]); + if (!n) + { + cJSON_Delete(a); + return NULL; + } + if (!i) + { + a->child = n; + } + else + { + suffix_object(p, n); + } + p = n; + } + + return a; +} + +CJSON_PUBLIC(cJSON *) cJSON_CreateDoubleArray(const double *numbers, int count) +{ + size_t i = 0; + cJSON *n = NULL; + cJSON *p = NULL; + cJSON *a = NULL; + + if ((count < 0) || (numbers == NULL)) + { + return NULL; + } + + a = cJSON_CreateArray(); + + for (i = 0; a && (i < (size_t)count); i++) + { + n = cJSON_CreateNumber(numbers[i]); + if (!n) + { + cJSON_Delete(a); + return NULL; + } + if (!i) + { + a->child = n; + } + else + { + suffix_object(p, n); + } + p = n; + } + + return a; +} + +CJSON_PUBLIC(cJSON *) cJSON_CreateStringArray(const char **strings, int count) +{ + size_t i = 0; + cJSON *n = NULL; + cJSON *p = NULL; + cJSON *a = NULL; + + if ((count < 0) || (strings == NULL)) + { + return NULL; + } + + a = cJSON_CreateArray(); + + for (i = 0; a && (i < (size_t)count); i++) + { + n = cJSON_CreateString(strings[i]); + if (!n) + { + cJSON_Delete(a); + return NULL; + } + if (!i) + { + a->child = n; + } + else + { + suffix_object(p, n); + } + p = n; + } + + return a; +} /* Duplication */ -cJSON *cJSON_Duplicate(cJSON *item,int recurse) -{ - cJSON *newitem,*cptr,*nptr=0,*newchild; - /* Bail on bad ptr */ - if (!item) return 0; - /* Create new item */ - newitem=cJSON_New_Item(); - if (!newitem) return 0; - /* Copy over all vars */ - newitem->type=item->type&(~cJSON_IsReference),newitem->valueint=item->valueint,newitem->valuedouble=item->valuedouble; - if (item->valuestring) {newitem->valuestring=cJSON_strdup(item->valuestring); if (!newitem->valuestring) {cJSON_Delete(newitem);return 0;}} - if (item->string) {newitem->string=cJSON_strdup(item->string); if (!newitem->string) {cJSON_Delete(newitem);return 0;}} - /* If non-recursive, then we're done! */ - if (!recurse) return newitem; - /* Walk the ->next chain for the child. */ - cptr=item->child; - while (cptr) - { - newchild=cJSON_Duplicate(cptr,1); /* Duplicate (with recurse) each item in the ->next chain */ - if (!newchild) {cJSON_Delete(newitem);return 0;} - if (nptr) {nptr->next=newchild,newchild->prev=nptr;nptr=newchild;} /* If newitem->child already set, then crosswire ->prev and ->next and move on */ - else {newitem->child=newchild;nptr=newchild;} /* Set newitem->child and move to it */ - cptr=cptr->next; - } - return newitem; -} - -void cJSON_Minify(char *json) -{ - char *into=json; - while (*json) - { - if (*json==' ') json++; - else if (*json=='\t') json++; /* Whitespace characters. */ - else if (*json=='\r') json++; - else if (*json=='\n') json++; - else if (*json=='/' && json[1]=='/') while (*json && *json!='\n') json++; /* double-slash comments, to end of line. */ - else if (*json=='/' && json[1]=='*') {while (*json && !(*json=='*' && json[1]=='/')) json++;json+=2;} /* multiline comments. */ - else if (*json=='\"'){*into++=*json++;while (*json && *json!='\"'){if (*json=='\\') *into++=*json++;*into++=*json++;}*into++=*json++;} /* string literals, which are \" sensitive. */ - else *into++=*json++; /* All other characters. */ - } - *into=0; /* and null-terminate. */ +CJSON_PUBLIC(cJSON *) cJSON_Duplicate(const cJSON *item, cJSON_bool recurse) +{ + cJSON *newitem = NULL; + cJSON *child = NULL; + cJSON *next = NULL; + cJSON *newchild = NULL; + + /* Bail on bad ptr */ + if (!item) + { + goto fail; + } + /* Create new item */ + newitem = cJSON_New_Item(&global_hooks); + if (!newitem) + { + goto fail; + } + /* Copy over all vars */ + newitem->type = item->type & (~cJSON_IsReference); + newitem->valueint = item->valueint; + newitem->valuedouble = item->valuedouble; + if (item->valuestring) + { + newitem->valuestring = (char*)cJSON_strdup((unsigned char*)item->valuestring, &global_hooks); + if (!newitem->valuestring) + { + goto fail; + } + } + if (item->string) + { + newitem->string = (item->type&cJSON_StringIsConst) ? item->string : (char*)cJSON_strdup((unsigned char*)item->string, &global_hooks); + if (!newitem->string) + { + goto fail; + } + } + /* If non-recursive, then we're done! */ + if (!recurse) + { + return newitem; + } + /* Walk the ->next chain for the child. */ + child = item->child; + while (child != NULL) + { + newchild = cJSON_Duplicate(child, true); /* Duplicate (with recurse) each item in the ->next chain */ + if (!newchild) + { + goto fail; + } + if (next != NULL) + { + /* If newitem->child already set, then crosswire ->prev and ->next and move on */ + next->next = newchild; + newchild->prev = next; + next = newchild; + } + else + { + /* Set newitem->child and move to it */ + newitem->child = newchild; + next = newchild; + } + child = child->next; + } + + return newitem; + +fail: + if (newitem != NULL) + { + cJSON_Delete(newitem); + } + + return NULL; +} + +CJSON_PUBLIC(void) cJSON_Minify(char *json) +{ + unsigned char *into = (unsigned char*)json; + + if (json == NULL) + { + return; + } + + while (*json) + { + if (*json == ' ') + { + json++; + } + else if (*json == '\t') + { + /* Whitespace characters. */ + json++; + } + else if (*json == '\r') + { + json++; + } + else if (*json == '\n') + { + json++; + } + else if ((*json == '/') && (json[1] == '/')) + { + /* double-slash comments, to end of line. */ + while (*json && (*json != '\n')) + { + json++; + } + } + else if ((*json == '/') && (json[1] == '*')) + { + /* multiline comments. */ + while (*json && !((*json == '*') && (json[1] == '/'))) + { + json++; + } + json += 2; + } + else if (*json == '\"') + { + /* string literals, which are \" sensitive. */ + *into++ = (unsigned char)*json++; + while (*json && (*json != '\"')) + { + if (*json == '\\') + { + *into++ = (unsigned char)*json++; + } + *into++ = (unsigned char)*json++; + } + *into++ = (unsigned char)*json++; + } + else + { + /* All other characters. */ + *into++ = (unsigned char)*json++; + } + } + + /* and null-terminate. */ + *into = '\0'; } +CJSON_PUBLIC(cJSON_bool) cJSON_IsInvalid(const cJSON * const item) +{ + if (item == NULL) + { + return false; + } + + return (item->type & 0xFF) == cJSON_Invalid; +} + +CJSON_PUBLIC(cJSON_bool) cJSON_IsFalse(const cJSON * const item) +{ + if (item == NULL) + { + return false; + } + + return (item->type & 0xFF) == cJSON_False; +} + +CJSON_PUBLIC(cJSON_bool) cJSON_IsTrue(const cJSON * const item) +{ + if (item == NULL) + { + return false; + } + + return (item->type & 0xff) == cJSON_True; +} + + +CJSON_PUBLIC(cJSON_bool) cJSON_IsBool(const cJSON * const item) +{ + if (item == NULL) + { + return false; + } + + return (item->type & (cJSON_True | cJSON_False)) != 0; +} +CJSON_PUBLIC(cJSON_bool) cJSON_IsNull(const cJSON * const item) +{ + if (item == NULL) + { + return false; + } + + return (item->type & 0xFF) == cJSON_NULL; +} + +CJSON_PUBLIC(cJSON_bool) cJSON_IsNumber(const cJSON * const item) +{ + if (item == NULL) + { + return false; + } + + return (item->type & 0xFF) == cJSON_Number; +} + +CJSON_PUBLIC(cJSON_bool) cJSON_IsString(const cJSON * const item) +{ + if (item == NULL) + { + return false; + } + + return (item->type & 0xFF) == cJSON_String; +} + +CJSON_PUBLIC(cJSON_bool) cJSON_IsArray(const cJSON * const item) +{ + if (item == NULL) + { + return false; + } + + return (item->type & 0xFF) == cJSON_Array; +} + +CJSON_PUBLIC(cJSON_bool) cJSON_IsObject(const cJSON * const item) +{ + if (item == NULL) + { + return false; + } + + return (item->type & 0xFF) == cJSON_Object; +} + +CJSON_PUBLIC(cJSON_bool) cJSON_IsRaw(const cJSON * const item) +{ + if (item == NULL) + { + return false; + } + + return (item->type & 0xFF) == cJSON_Raw; +} + +CJSON_PUBLIC(cJSON_bool) cJSON_Compare(const cJSON * const a, const cJSON * const b, const cJSON_bool case_sensitive) +{ + if ((a == NULL) || (b == NULL) || ((a->type & 0xFF) != (b->type & 0xFF)) || cJSON_IsInvalid(a)) + { + return false; + } + + /* check if type is valid */ + switch (a->type & 0xFF) + { + case cJSON_False: + case cJSON_True: + case cJSON_NULL: + case cJSON_Number: + case cJSON_String: + case cJSON_Raw: + case cJSON_Array: + case cJSON_Object: + break; + + default: + return false; + } + + /* identical objects are equal */ + if (a == b) + { + return true; + } + + switch (a->type & 0xFF) + { + /* in these cases and equal type is enough */ + case cJSON_False: + case cJSON_True: + case cJSON_NULL: + return true; + + case cJSON_Number: + if (a->valuedouble == b->valuedouble) + { + return true; + } + return false; + + case cJSON_String: + case cJSON_Raw: + if ((a->valuestring == NULL) || (b->valuestring == NULL)) + { + return false; + } + if (strcmp(a->valuestring, b->valuestring) == 0) + { + return true; + } + + return false; + + case cJSON_Array: + { + cJSON *a_element = a->child; + cJSON *b_element = b->child; + + for (; (a_element != NULL) && (b_element != NULL);) + { + if (!cJSON_Compare(a_element, b_element, case_sensitive)) + { + return false; + } + + a_element = a_element->next; + b_element = b_element->next; + } + + /* one of the arrays is longer than the other */ + if (a_element != b_element) { + return false; + } + + return true; + } + + case cJSON_Object: + { + cJSON *a_element = NULL; + cJSON *b_element = NULL; + cJSON_ArrayForEach(a_element, a) + { + /* TODO This has O(n^2) runtime, which is horrible! */ + b_element = get_object_item(b, a_element->string, case_sensitive); + if (b_element == NULL) + { + return false; + } + + if (!cJSON_Compare(a_element, b_element, case_sensitive)) + { + return false; + } + } + + /* doing this twice, once on a and b to prevent true comparison if a subset of b + * TODO: Do this the proper way, this is just a fix for now */ + cJSON_ArrayForEach(b_element, b) + { + a_element = get_object_item(a, b_element->string, case_sensitive); + if (a_element == NULL) + { + return false; + } + + if (!cJSON_Compare(b_element, a_element, case_sensitive)) + { + return false; + } + } + + return true; + } + + default: + return false; + } +} + +CJSON_PUBLIC(void *) cJSON_malloc(size_t size) +{ + return global_hooks.allocate(size); +} + +CJSON_PUBLIC(void) cJSON_free(void *object) +{ + global_hooks.deallocate(object); +} \ No newline at end of file diff --git a/source/code/cjson/cJSON.h b/source/code/cjson/cJSON.h index 662948612..d4a2dfed3 100644 --- a/source/code/cjson/cJSON.h +++ b/source/code/cjson/cJSON.h @@ -1,147 +1,285 @@ /* - Copyright (c) 2009 Dave Gamble - - Permission is hereby granted, free of charge, to any person obtaining a copy - of this software and associated documentation files (the "Software"), to deal - in the Software without restriction, including without limitation the rights - to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - copies of the Software, and to permit persons to whom the Software is - furnished to do so, subject to the following conditions: - - The above copyright notice and this permission notice shall be included in - all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - THE SOFTWARE. +Copyright (c) 2009-2017 Dave Gamble and cJSON contributors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. */ #ifndef cJSON__h #define cJSON__h + #ifdef __cplusplus extern "C" { #endif -/* cJSON Types: */ -#define cJSON_False 0 -#define cJSON_True 1 -#define cJSON_NULL 2 -#define cJSON_Number 3 -#define cJSON_String 4 -#define cJSON_Array 5 -#define cJSON_Object 6 - +#if !defined(__WINDOWS__) && (defined(WIN32) || defined(WIN64) || defined(_MSC_VER) || defined(_WIN32)) +#define __WINDOWS__ +#endif + +#ifdef __WINDOWS__ + + /* When compiling for windows, we specify a specific calling convention to avoid issues where we are being called from a project with a different default calling convention. For windows you have 3 define options: + + CJSON_HIDE_SYMBOLS - Define this in the case where you don't want to ever dllexport symbols + CJSON_EXPORT_SYMBOLS - Define this on library build when you want to dllexport symbols (default) + CJSON_IMPORT_SYMBOLS - Define this if you want to dllimport symbol + + For *nix builds that support visibility attribute, you can define similar behavior by + + setting default visibility to hidden by adding + -fvisibility=hidden (for gcc) + or + -xldscope=hidden (for sun cc) + to CFLAGS + + then using the CJSON_API_VISIBILITY flag to "export" the same symbols the way CJSON_EXPORT_SYMBOLS does + + */ + +#define CJSON_CDECL __cdecl +#define CJSON_STDCALL __stdcall + + /* export symbols by default, this is necessary for copy pasting the C and header file */ +#if !defined(CJSON_HIDE_SYMBOLS) && !defined(CJSON_IMPORT_SYMBOLS) && !defined(CJSON_EXPORT_SYMBOLS) +#define CJSON_EXPORT_SYMBOLS +#endif + +#if defined(CJSON_HIDE_SYMBOLS) +#define CJSON_PUBLIC(type) type CJSON_STDCALL +#elif defined(CJSON_EXPORT_SYMBOLS) +#define CJSON_PUBLIC(type) __declspec(dllexport) type CJSON_STDCALL +#elif defined(CJSON_IMPORT_SYMBOLS) +#define CJSON_PUBLIC(type) __declspec(dllimport) type CJSON_STDCALL +#endif +#else /* !__WINDOWS__ */ +#define CJSON_CDECL +#define CJSON_STDCALL + +#if (defined(__GNUC__) || defined(__SUNPRO_CC) || defined (__SUNPRO_C)) && defined(CJSON_API_VISIBILITY) +#define CJSON_PUBLIC(type) __attribute__((visibility("default"))) type +#else +#define CJSON_PUBLIC(type) type +#endif +#endif + + /* project version */ +#define CJSON_VERSION_MAJOR 1 +#define CJSON_VERSION_MINOR 7 +#define CJSON_VERSION_PATCH 8 + +#include + + /* cJSON Types: */ +#define cJSON_Invalid (0) +#define cJSON_False (1 << 0) +#define cJSON_True (1 << 1) +#define cJSON_NULL (1 << 2) +#define cJSON_Number (1 << 3) +#define cJSON_String (1 << 4) +#define cJSON_Array (1 << 5) +#define cJSON_Object (1 << 6) +#define cJSON_Raw (1 << 7) /* raw json */ + #define cJSON_IsReference 256 #define cJSON_StringIsConst 512 -/* The cJSON structure: */ - typedef struct cJSON { - struct cJSON *next,*prev; /* next/prev allow you to walk array/object chains. Alternatively, use GetArraySize/GetArrayItem/GetObjectItem */ - struct cJSON *child; /* An array or object item will have a child pointer pointing to a chain of the items in the array/object. */ - - int type; /* The type of the item, as above. */ - - char *valuestring; /* The item's string, if type==cJSON_String */ - int valueint; /* The item's number, if type==cJSON_Number */ - double valuedouble; /* The item's number, if type==cJSON_Number */ - - char *string; /* The item's name string, if this item is the child of, or is in the list of subitems of an object. */ - } cJSON; - - typedef struct cJSON_Hooks { - void *(*malloc_fn)(size_t sz); - void (*free_fn)(void *ptr); - } cJSON_Hooks; - -/* Supply malloc, realloc and free functions to cJSON */ - extern void cJSON_InitHooks(cJSON_Hooks* hooks); - - -/* Supply a block of JSON, and this returns a cJSON object you can interrogate. Call cJSON_Delete when finished. */ - extern cJSON *cJSON_Parse(const char *value); -/* Render a cJSON entity to text for transfer/storage. Free the char* when finished. */ - extern char *cJSON_Print(cJSON *item); -/* Render a cJSON entity to text for transfer/storage without any formatting. Free the char* when finished. */ - extern char *cJSON_PrintUnformatted(cJSON *item); -/* Render a cJSON entity to text using a buffered strategy. prebuffer is a guess at the final size. guessing well reduces reallocation. fmt=0 gives unformatted, =1 gives formatted */ - extern char *cJSON_PrintBuffered(cJSON *item,int prebuffer,int fmt); -/* Delete a cJSON entity and all subentities. */ - extern void cJSON_Delete(cJSON *c); - -/* Returns the number of items in an array (or object). */ - extern int cJSON_GetArraySize(cJSON *array); -/* Retrieve item number "item" from array "array". Returns NULL if unsuccessful. */ - extern cJSON *cJSON_GetArrayItem(cJSON *array,int item); -/* Get item "string" from object. Case insensitive. */ - extern cJSON *cJSON_GetObjectItem(cJSON *object,const char *string); - -/* For analysing failed parses. This returns a pointer to the parse error. You'll probably need to look a few chars back to make sense of it. Defined when cJSON_Parse() returns 0. 0 when cJSON_Parse() succeeds. */ - extern const char *cJSON_GetErrorPtr(void); - -/* These calls create a cJSON item of the appropriate type. */ - extern cJSON *cJSON_CreateNull(void); - extern cJSON *cJSON_CreateTrue(void); - extern cJSON *cJSON_CreateFalse(void); - extern cJSON *cJSON_CreateBool(int b); - extern cJSON *cJSON_CreateNumber(double num); - extern cJSON *cJSON_CreateString(const char *string); - extern cJSON *cJSON_CreateArray(void); - extern cJSON *cJSON_CreateObject(void); - -/* These utilities create an Array of count items. */ - extern cJSON *cJSON_CreateIntArray(const int *numbers,int count); - extern cJSON *cJSON_CreateFloatArray(const float *numbers,int count); - extern cJSON *cJSON_CreateDoubleArray(const double *numbers,int count); - extern cJSON *cJSON_CreateStringArray(const char **strings,int count); - -/* Append item to the specified array/object. */ - extern void cJSON_AddItemToArray(cJSON *array, cJSON *item); - extern void cJSON_AddItemToObject(cJSON *object,const char *string,cJSON *item); - extern void cJSON_AddItemToObjectCS(cJSON *object,const char *string,cJSON *item); /* Use this when string is definitely const (i.e. a literal, or as good as), and will definitely survive the cJSON object */ -/* Append reference to item to the specified array/object. Use this when you want to add an existing cJSON to a new cJSON, but don't want to corrupt your existing cJSON. */ - extern void cJSON_AddItemReferenceToArray(cJSON *array, cJSON *item); - extern void cJSON_AddItemReferenceToObject(cJSON *object,const char *string,cJSON *item); - -/* Remove/Detatch items from Arrays/Objects. */ - extern cJSON *cJSON_DetachItemFromArray(cJSON *array,int which); - extern void cJSON_DeleteItemFromArray(cJSON *array,int which); - extern cJSON *cJSON_DetachItemFromObject(cJSON *object,const char *string); - extern void cJSON_DeleteItemFromObject(cJSON *object,const char *string); - -/* Update array items. */ - extern void cJSON_InsertItemInArray(cJSON *array,int which,cJSON *newitem); /* Shifts pre-existing items to the right. */ - extern void cJSON_ReplaceItemInArray(cJSON *array,int which,cJSON *newitem); - extern void cJSON_ReplaceItemInObject(cJSON *object,const char *string,cJSON *newitem); - -/* Duplicate a cJSON item */ - extern cJSON *cJSON_Duplicate(cJSON *item,int recurse); -/* Duplicate will create a new, identical cJSON item to the one you pass, in new memory that will - need to be released. With recurse!=0, it will duplicate any children connected to the item. - The item->next and ->prev pointers are always zero on return from Duplicate. */ - -/* ParseWithOpts allows you to require (and check) that the JSON is null terminated, and to retrieve the pointer to the final byte parsed. */ - extern cJSON *cJSON_ParseWithOpts(const char *value,const char **return_parse_end,int require_null_terminated); - - extern void cJSON_Minify(char *json); - -/* Macros for creating things quickly. */ -#define cJSON_AddNullToObject(object,name) cJSON_AddItemToObject(object, name, cJSON_CreateNull()) -#define cJSON_AddTrueToObject(object,name) cJSON_AddItemToObject(object, name, cJSON_CreateTrue()) -#define cJSON_AddFalseToObject(object,name) cJSON_AddItemToObject(object, name, cJSON_CreateFalse()) -#define cJSON_AddBoolToObject(object,name,b) cJSON_AddItemToObject(object, name, cJSON_CreateBool(b)) -#define cJSON_AddNumberToObject(object,name,n) cJSON_AddItemToObject(object, name, cJSON_CreateNumber(n)) -#define cJSON_AddStringToObject(object,name,s) cJSON_AddItemToObject(object, name, cJSON_CreateString(s)) - -/* When assigning an integer value, it needs to be propagated to valuedouble too. */ -#define cJSON_SetIntValue(object,val) ((object)?(object)->valueint=(object)->valuedouble=(val):(val)) -#define cJSON_SetNumberValue(object,val) ((object)?(object)->valueint=(object)->valuedouble=(val):(val)) + /* The cJSON structure: */ + typedef struct cJSON + { + /* next/prev allow you to walk array/object chains. Alternatively, use GetArraySize/GetArrayItem/GetObjectItem */ + struct cJSON *next; + struct cJSON *prev; + /* An array or object item will have a child pointer pointing to a chain of the items in the array/object. */ + struct cJSON *child; + + /* The type of the item, as above. */ + int type; + + /* The item's string, if type==cJSON_String and type == cJSON_Raw */ + char *valuestring; + /* writing to valueint is DEPRECATED, use cJSON_SetNumberValue instead */ + int valueint; + /* The item's number, if type==cJSON_Number */ + double valuedouble; + + /* The item's name string, if this item is the child of, or is in the list of subitems of an object. */ + char *string; + } cJSON; + + typedef struct cJSON_Hooks + { + /* malloc/free are CDECL on Windows regardless of the default calling convention of the compiler, so ensure the hooks allow passing those functions directly. */ + void *(CJSON_CDECL *malloc_fn)(size_t sz); + void (CJSON_CDECL *free_fn)(void *ptr); + } cJSON_Hooks; + + typedef int cJSON_bool; + + /* Limits how deeply nested arrays/objects can be before cJSON rejects to parse them. + * This is to prevent stack overflows. */ +#ifndef CJSON_NESTING_LIMIT +#define CJSON_NESTING_LIMIT 1000 +#endif + + /* returns the version of cJSON as a string */ + CJSON_PUBLIC(const char*) cJSON_Version(void); + + /* Supply malloc, realloc and free functions to cJSON */ + CJSON_PUBLIC(void) cJSON_InitHooks(cJSON_Hooks* hooks); + + /* Memory Management: the caller is always responsible to free the results from all variants of cJSON_Parse (with cJSON_Delete) and cJSON_Print (with stdlib free, cJSON_Hooks.free_fn, or cJSON_free as appropriate). The exception is cJSON_PrintPreallocated, where the caller has full responsibility of the buffer. */ + /* Supply a block of JSON, and this returns a cJSON object you can interrogate. */ + CJSON_PUBLIC(cJSON *) cJSON_Parse(const char *value); + /* ParseWithOpts allows you to require (and check) that the JSON is null terminated, and to retrieve the pointer to the final byte parsed. */ + /* If you supply a ptr in return_parse_end and parsing fails, then return_parse_end will contain a pointer to the error so will match cJSON_GetErrorPtr(). */ + CJSON_PUBLIC(cJSON *) cJSON_ParseWithOpts(const char *value, const char **return_parse_end, cJSON_bool require_null_terminated); + + /* Render a cJSON entity to text for transfer/storage. */ + CJSON_PUBLIC(char *) cJSON_Print(const cJSON *item); + /* Render a cJSON entity to text for transfer/storage without any formatting. */ + CJSON_PUBLIC(char *) cJSON_PrintUnformatted(const cJSON *item); + /* Render a cJSON entity to text using a buffered strategy. prebuffer is a guess at the final size. guessing well reduces reallocation. fmt=0 gives unformatted, =1 gives formatted */ + CJSON_PUBLIC(char *) cJSON_PrintBuffered(const cJSON *item, int prebuffer, cJSON_bool fmt); + /* Render a cJSON entity to text using a buffer already allocated in memory with given length. Returns 1 on success and 0 on failure. */ + /* NOTE: cJSON is not always 100% accurate in estimating how much memory it will use, so to be safe allocate 5 bytes more than you actually need */ + CJSON_PUBLIC(cJSON_bool) cJSON_PrintPreallocated(cJSON *item, char *buffer, const int length, const cJSON_bool format); + /* Delete a cJSON entity and all subentities. */ + CJSON_PUBLIC(void) cJSON_Delete(cJSON *c); + + /* Returns the number of items in an array (or object). */ + CJSON_PUBLIC(int) cJSON_GetArraySize(const cJSON *array); + /* Retrieve item number "index" from array "array". Returns NULL if unsuccessful. */ + CJSON_PUBLIC(cJSON *) cJSON_GetArrayItem(const cJSON *array, int index); + /* Get item "string" from object. Case insensitive. */ + CJSON_PUBLIC(cJSON *) cJSON_GetObjectItem(const cJSON * const object, const char * const string); + CJSON_PUBLIC(cJSON *) cJSON_GetObjectItemCaseSensitive(const cJSON * const object, const char * const string); + CJSON_PUBLIC(cJSON_bool) cJSON_HasObjectItem(const cJSON *object, const char *string); + /* For analysing failed parses. This returns a pointer to the parse error. You'll probably need to look a few chars back to make sense of it. Defined when cJSON_Parse() returns 0. 0 when cJSON_Parse() succeeds. */ + CJSON_PUBLIC(const char *) cJSON_GetErrorPtr(void); + + /* Check if the item is a string and return its valuestring */ + CJSON_PUBLIC(char *) cJSON_GetStringValue(cJSON *item); + + /* These functions check the type of an item */ + CJSON_PUBLIC(cJSON_bool) cJSON_IsInvalid(const cJSON * const item); + CJSON_PUBLIC(cJSON_bool) cJSON_IsFalse(const cJSON * const item); + CJSON_PUBLIC(cJSON_bool) cJSON_IsTrue(const cJSON * const item); + CJSON_PUBLIC(cJSON_bool) cJSON_IsBool(const cJSON * const item); + CJSON_PUBLIC(cJSON_bool) cJSON_IsNull(const cJSON * const item); + CJSON_PUBLIC(cJSON_bool) cJSON_IsNumber(const cJSON * const item); + CJSON_PUBLIC(cJSON_bool) cJSON_IsString(const cJSON * const item); + CJSON_PUBLIC(cJSON_bool) cJSON_IsArray(const cJSON * const item); + CJSON_PUBLIC(cJSON_bool) cJSON_IsObject(const cJSON * const item); + CJSON_PUBLIC(cJSON_bool) cJSON_IsRaw(const cJSON * const item); + + /* These calls create a cJSON item of the appropriate type. */ + CJSON_PUBLIC(cJSON *) cJSON_CreateNull(void); + CJSON_PUBLIC(cJSON *) cJSON_CreateTrue(void); + CJSON_PUBLIC(cJSON *) cJSON_CreateFalse(void); + CJSON_PUBLIC(cJSON *) cJSON_CreateBool(cJSON_bool boolean); + CJSON_PUBLIC(cJSON *) cJSON_CreateNumber(double num); + CJSON_PUBLIC(cJSON *) cJSON_CreateString(const char *string); + /* raw json */ + CJSON_PUBLIC(cJSON *) cJSON_CreateRaw(const char *raw); + CJSON_PUBLIC(cJSON *) cJSON_CreateArray(void); + CJSON_PUBLIC(cJSON *) cJSON_CreateObject(void); + + /* Create a string where valuestring references a string so + * it will not be freed by cJSON_Delete */ + CJSON_PUBLIC(cJSON *) cJSON_CreateStringReference(const char *string); + /* Create an object/arrray that only references it's elements so + * they will not be freed by cJSON_Delete */ + CJSON_PUBLIC(cJSON *) cJSON_CreateObjectReference(const cJSON *child); + CJSON_PUBLIC(cJSON *) cJSON_CreateArrayReference(const cJSON *child); + + /* These utilities create an Array of count items. */ + CJSON_PUBLIC(cJSON *) cJSON_CreateIntArray(const int *numbers, int count); + CJSON_PUBLIC(cJSON *) cJSON_CreateFloatArray(const float *numbers, int count); + CJSON_PUBLIC(cJSON *) cJSON_CreateDoubleArray(const double *numbers, int count); + CJSON_PUBLIC(cJSON *) cJSON_CreateStringArray(const char **strings, int count); + + /* Append item to the specified array/object. */ + CJSON_PUBLIC(void) cJSON_AddItemToArray(cJSON *array, cJSON *item); + CJSON_PUBLIC(void) cJSON_AddItemToObject(cJSON *object, const char *string, cJSON *item); + /* Use this when string is definitely const (i.e. a literal, or as good as), and will definitely survive the cJSON object. + * WARNING: When this function was used, make sure to always check that (item->type & cJSON_StringIsConst) is zero before + * writing to `item->string` */ + CJSON_PUBLIC(void) cJSON_AddItemToObjectCS(cJSON *object, const char *string, cJSON *item); + /* Append reference to item to the specified array/object. Use this when you want to add an existing cJSON to a new cJSON, but don't want to corrupt your existing cJSON. */ + CJSON_PUBLIC(void) cJSON_AddItemReferenceToArray(cJSON *array, cJSON *item); + CJSON_PUBLIC(void) cJSON_AddItemReferenceToObject(cJSON *object, const char *string, cJSON *item); + + /* Remove/Detatch items from Arrays/Objects. */ + CJSON_PUBLIC(cJSON *) cJSON_DetachItemViaPointer(cJSON *parent, cJSON * const item); + CJSON_PUBLIC(cJSON *) cJSON_DetachItemFromArray(cJSON *array, int which); + CJSON_PUBLIC(void) cJSON_DeleteItemFromArray(cJSON *array, int which); + CJSON_PUBLIC(cJSON *) cJSON_DetachItemFromObject(cJSON *object, const char *string); + CJSON_PUBLIC(cJSON *) cJSON_DetachItemFromObjectCaseSensitive(cJSON *object, const char *string); + CJSON_PUBLIC(void) cJSON_DeleteItemFromObject(cJSON *object, const char *string); + CJSON_PUBLIC(void) cJSON_DeleteItemFromObjectCaseSensitive(cJSON *object, const char *string); + + /* Update array items. */ + CJSON_PUBLIC(void) cJSON_InsertItemInArray(cJSON *array, int which, cJSON *newitem); /* Shifts pre-existing items to the right. */ + CJSON_PUBLIC(cJSON_bool) cJSON_ReplaceItemViaPointer(cJSON * const parent, cJSON * const item, cJSON * replacement); + CJSON_PUBLIC(void) cJSON_ReplaceItemInArray(cJSON *array, int which, cJSON *newitem); + CJSON_PUBLIC(void) cJSON_ReplaceItemInObject(cJSON *object, const char *string, cJSON *newitem); + CJSON_PUBLIC(void) cJSON_ReplaceItemInObjectCaseSensitive(cJSON *object, const char *string, cJSON *newitem); + + /* Duplicate a cJSON item */ + CJSON_PUBLIC(cJSON *) cJSON_Duplicate(const cJSON *item, cJSON_bool recurse); + /* Duplicate will create a new, identical cJSON item to the one you pass, in new memory that will + need to be released. With recurse!=0, it will duplicate any children connected to the item. + The item->next and ->prev pointers are always zero on return from Duplicate. */ + /* Recursively compare two cJSON items for equality. If either a or b is NULL or invalid, they will be considered unequal. + * case_sensitive determines if object keys are treated case sensitive (1) or case insensitive (0) */ + CJSON_PUBLIC(cJSON_bool) cJSON_Compare(const cJSON * const a, const cJSON * const b, const cJSON_bool case_sensitive); + + + CJSON_PUBLIC(void) cJSON_Minify(char *json); + + /* Helper functions for creating and adding items to an object at the same time. + * They return the added item or NULL on failure. */ + CJSON_PUBLIC(cJSON*) cJSON_AddNullToObject(cJSON * const object, const char * const name); + CJSON_PUBLIC(cJSON*) cJSON_AddTrueToObject(cJSON * const object, const char * const name); + CJSON_PUBLIC(cJSON*) cJSON_AddFalseToObject(cJSON * const object, const char * const name); + CJSON_PUBLIC(cJSON*) cJSON_AddBoolToObject(cJSON * const object, const char * const name, const cJSON_bool boolean); + CJSON_PUBLIC(cJSON*) cJSON_AddNumberToObject(cJSON * const object, const char * const name, const double number); + CJSON_PUBLIC(cJSON*) cJSON_AddStringToObject(cJSON * const object, const char * const name, const char * const string); + CJSON_PUBLIC(cJSON*) cJSON_AddRawToObject(cJSON * const object, const char * const name, const char * const raw); + CJSON_PUBLIC(cJSON*) cJSON_AddObjectToObject(cJSON * const object, const char * const name); + CJSON_PUBLIC(cJSON*) cJSON_AddArrayToObject(cJSON * const object, const char * const name); + + /* When assigning an integer value, it needs to be propagated to valuedouble too. */ +#define cJSON_SetIntValue(object, number) ((object) ? (object)->valueint = (object)->valuedouble = (number) : (number)) + /* helper for the cJSON_SetNumberValue macro */ + CJSON_PUBLIC(double) cJSON_SetNumberHelper(cJSON *object, double number); +#define cJSON_SetNumberValue(object, number) ((object != NULL) ? cJSON_SetNumberHelper(object, (double)number) : (number)) + + /* Macro for iterating over an array or object */ +#define cJSON_ArrayForEach(element, array) for(element = (array != NULL) ? (array)->child : NULL; element != NULL; element = element->next) + + /* malloc/free objects using the malloc/free functions that have been set with cJSON_InitHooks */ + CJSON_PUBLIC(void *) cJSON_malloc(size_t size); + CJSON_PUBLIC(void) cJSON_free(void *object); #ifdef __cplusplus } #endif -#endif + +#endif \ No newline at end of file diff --git a/source/code/providers/Container_ContainerInventory_Class_Provider.cpp b/source/code/providers/Container_ContainerInventory_Class_Provider.cpp index 7fdd746a1..68c13053a 100644 --- a/source/code/providers/Container_ContainerInventory_Class_Provider.cpp +++ b/source/code/providers/Container_ContainerInventory_Class_Provider.cpp @@ -103,11 +103,11 @@ class ContainerQuery { cJSON* entry = cJSON_GetArrayItem(response[0], i); - if (entry) + if (entry != NULL) { cJSON* tags = cJSON_GetObjectItem(entry, "RepoTags"); - if (tags && cJSON_GetArraySize(tags)) + if ((tags != NULL) && cJSON_GetArraySize(tags)) { string value = ""; cJSON* arrItem = cJSON_GetArrayItem(tags, 0); @@ -168,7 +168,7 @@ class ContainerQuery try { cJSON* config = cJSON_GetObjectItem(entry, "Config"); - if (config) + if (config != NULL) { // Hostname of container string hostnamevalue = ""; @@ -232,11 +232,11 @@ class ContainerQuery // Compose group instance.ComposeGroup_value(""); - if (labels) + if (labels != NULL) { cJSON* groupName = cJSON_GetObjectItem(labels, "com.docker.compose.project"); - if (groupName) + if (groupName != NULL) { instance.ComposeGroup_value(groupName->valuestring); } @@ -244,7 +244,10 @@ class ContainerQuery } else { - syslog(LOG_WARNING, "Attempt in ObtainContainerConfig to get container %s config information returned null", cJSON_GetObjectItem(entry, "Id")->valuestring); + if (cJSON_GetObjectItem(entry, "Id") != NULL) + { + syslog(LOG_WARNING, "Attempt in ObtainContainerConfig to get container %s config information returned null", cJSON_GetObjectItem(entry, "Id")->valuestring); + } } } catch (std::exception &e) @@ -268,7 +271,7 @@ class ContainerQuery try { cJSON* state = cJSON_GetObjectItem(entry, "State"); - if (state) + if (state != NULL) { cJSON* objItem = cJSON_GetObjectItem(state, "ExitCode"); if (objItem != NULL) @@ -278,7 +281,10 @@ class ContainerQuery if (exitCode < 0) { exitCode = 128; - syslog(LOG_NOTICE, "Container %s returned negative exit code", cJSON_GetObjectItem(entry, "Id")->valuestring); + if (cJSON_GetObjectItem(entry, "Id") != NULL) + { + syslog(LOG_NOTICE, "Container %s returned negative exit code", cJSON_GetObjectItem(entry, "Id")->valuestring); + } } instance.ExitCode_value(exitCode); @@ -328,7 +334,10 @@ class ContainerQuery } else { - syslog(LOG_WARNING, "Attempt in ObtainContainerState to get container %s state information returned null", cJSON_GetObjectItem(entry, "Id")->valuestring); + if (cJSON_GetObjectItem(entry, "Id")) + { + syslog(LOG_WARNING, "Attempt in ObtainContainerState to get container %s state information returned null", cJSON_GetObjectItem(entry, "Id")->valuestring); + } } } catch (std::exception &e) @@ -352,7 +361,7 @@ class ContainerQuery try { cJSON* hostConfig = cJSON_GetObjectItem(entry, "HostConfig"); - if (hostConfig) + if (hostConfig != NULL) { // Links cJSON* objItem = cJSON_GetObjectItem(hostConfig, "Links"); @@ -372,7 +381,10 @@ class ContainerQuery } else { - syslog(LOG_WARNING, "Attempt in ObtainContainerHostConfig to get container %s host config information returned null", cJSON_GetObjectItem(entry, "Id")->valuestring); + if (cJSON_GetObjectItem(entry, "Id")) + { + syslog(LOG_WARNING, "Attempt in ObtainContainerHostConfig to get container %s host config information returned null", cJSON_GetObjectItem(entry, "Id")->valuestring); + } } } catch (std::exception &e) diff --git a/source/code/providers/Container_ContainerStatistics_Class_Provider.cpp b/source/code/providers/Container_ContainerStatistics_Class_Provider.cpp index c43057ec7..08b68b1d8 100644 --- a/source/code/providers/Container_ContainerStatistics_Class_Provider.cpp +++ b/source/code/providers/Container_ContainerStatistics_Class_Provider.cpp @@ -34,17 +34,17 @@ class StatsQuery int totalRx = 0; int totalTx = 0; - if (stats) + if (stats != NULL) { cJSON* network = cJSON_GetObjectItem(stats, "networks"); - if (network) + if (network != NULL) { // Docker 1.9+ network = network->child; // Sum the number of bytes from each NIC if there is more than one - while (network) + while (network != NULL) { cJSON* objItem = cJSON_GetObjectItem(network, "rx_bytes"); if (objItem != NULL) { @@ -66,7 +66,7 @@ class StatsQuery { // Docker 1.8.x network = cJSON_GetObjectItem(stats, "network"); - if (network) + if (network != NULL) { cJSON* objItem = cJSON_GetObjectItem(network, "rx_bytes"); if (objItem != NULL) { @@ -110,7 +110,7 @@ class StatsQuery static void TrySetContainerMemoryData(Container_ContainerStatistics_Class& instance, cJSON* stats) { try { - if (stats) + if (stats != NULL) { cJSON* memory_stats = cJSON_GetObjectItem(stats, "memory_stats"); if (memory_stats != NULL) { @@ -150,27 +150,27 @@ class StatsQuery instance.DiskBytesRead_value(0); instance.DiskBytesWritten_value(0); - if (stats) + if (stats != NULL) { cJSON* blkio_stats = cJSON_GetObjectItem(stats, "blkio_stats"); - if (blkio_stats) + if (blkio_stats != NULL) { cJSON* values = cJSON_GetObjectItem(blkio_stats, "io_service_bytes_recursive"); bool readFlag = false; bool writeFlag = false; - for (int i = 0; values && !(readFlag && writeFlag) && i < cJSON_GetArraySize(values); i++) + for (int i = 0; values != NULL && !(readFlag && writeFlag) && i < cJSON_GetArraySize(values); i++) { cJSON* entry = cJSON_GetArrayItem(values, i); - if (entry) + if (entry != NULL) { cJSON* op = cJSON_GetObjectItem(entry, "op"); cJSON* rawValue = cJSON_GetObjectItem(entry, "value"); - if (op && rawValue) + if ((op != NULL) && (rawValue != NULL)) { if (!strcmp(op->valuestring, "Read")) { @@ -215,15 +215,15 @@ class StatsQuery result["system"] = 0; try { - if (stats) + if (stats != NULL) { cJSON* cpu_stats = cJSON_GetObjectItem(stats, "cpu_stats"); - if (cpu_stats) + if (cpu_stats != NULL) { cJSON* cpu_usage = cJSON_GetObjectItem(cpu_stats, "cpu_usage"); - if (cpu_usage) + if (cpu_usage != NULL) { cJSON* objItem = cJSON_GetObjectItem(cpu_usage, "total_usage"); if (objItem != NULL) { @@ -269,15 +269,15 @@ class StatsQuery instance.CPUTotal_value(0); instance.CPUTotalPct_value(0); - if (stats) + if (stats != NULL) { cJSON* cpu_stats = cJSON_GetObjectItem(stats, "cpu_stats"); - if (cpu_stats) + if (cpu_stats != NULL) { cJSON* cpu_usage = cJSON_GetObjectItem(cpu_stats, "cpu_usage"); - if (cpu_usage) + if (cpu_usage != NULL) { cJSON* totalUsageItem = cJSON_GetObjectItem(cpu_usage, "total_usage"); cJSON* systemCpuUsageItem = cJSON_GetObjectItem(cpu_stats, "system_cpu_usage"); @@ -333,7 +333,7 @@ class StatsQuery { cJSON* entry = cJSON_GetArrayItem(response[0], i); - if (entry) + if (entry != NULL) { // New perf entry Container_ContainerStatistics_Class instance; @@ -396,7 +396,10 @@ class StatsQuery // See http://docs.docker.com/engine/reference/api/docker_remote_api_v1.21/#get-container-stats-based-on-resource-usage for example output if (!subResponse.empty() && subResponse[0]) { - TrySetContainerCpuData(result[i], subResponse[0], previousStatsList[i]); + if (i < previousStatsList.size()) + { + TrySetContainerCpuData(result[i], subResponse[0], previousStatsList[i]); + } // Set container name in 'InstanceName' field of Perf data. result[i].InstanceID_value(result[i].ElementName_value()); diff --git a/source/code/providers/Container_DaemonEvent_Class_Provider.cpp b/source/code/providers/Container_DaemonEvent_Class_Provider.cpp index d5d2ce6f2..bf2ab3b53 100644 --- a/source/code/providers/Container_DaemonEvent_Class_Provider.cpp +++ b/source/code/providers/Container_DaemonEvent_Class_Provider.cpp @@ -137,11 +137,11 @@ class EventQuery { cJSON* entry = cJSON_GetArrayItem(response[0], i); - if (entry) + if (entry != NULL) { cJSON* nameField = cJSON_GetObjectItem(entry, "Names"); - if (nameField && cJSON_GetArraySize(nameField)) + if ((nameField != NULL) && cJSON_GetArraySize(nameField)) { // Docker API documentation says that this field contains the short ID but that is not the case; use full ID instead cJSON* objItem = cJSON_GetObjectItem(entry, "Id"); @@ -239,7 +239,7 @@ class EventQuery cJSON* entry = cJSON_GetArrayItem(response[0], i); // the newer versions of the API may return objects that do not have status or id - if (entry && cJSON_GetObjectItem(entry, "status") != NULL && cJSON_GetObjectItem(entry, "id") != NULL) + if ((entry != NULL) && cJSON_GetObjectItem(entry, "status") != NULL && cJSON_GetObjectItem(entry, "id") != NULL) { // New inventory entry Container_DaemonEvent_Class instance; diff --git a/source/code/providers/Container_ImageInventory_Class_Provider.cpp b/source/code/providers/Container_ImageInventory_Class_Provider.cpp index 3cc088683..01d1c639c 100644 --- a/source/code/providers/Container_ImageInventory_Class_Provider.cpp +++ b/source/code/providers/Container_ImageInventory_Class_Provider.cpp @@ -35,7 +35,7 @@ class InventoryQuery string result = ""; try { - if (tags && cJSON_GetArraySize(tags)) + if ((tags != NULL) && cJSON_GetArraySize(tags)) { bool flag = false; @@ -164,7 +164,7 @@ class InventoryQuery try { cJSON* state = cJSON_GetObjectItem(entry, "State"); - if (state) + if (state != NULL) { cJSON* objItem = cJSON_GetObjectItem(entry, "Image"); if (objItem != NULL) @@ -173,10 +173,10 @@ class InventoryQuery { string id = string(objItem->valuestring); - if (cJSON_GetObjectItem(state, "Running")->valueint) + if (cJSON_GetObjectItem(state, "Running") != NULL && cJSON_GetObjectItem(state, "Running")->valueint) { // Running container - if (cJSON_GetObjectItem(state, "Paused")->valueint) + if (cJSON_GetObjectItem(state, "Paused") != NULL && cJSON_GetObjectItem(state, "Paused")->valueint) { // Paused container instances[idTable[id]].Paused_value(instances[idTable[id]].Paused_value() + 1); @@ -188,7 +188,7 @@ class InventoryQuery } else { - if (cJSON_GetObjectItem(state, "ExitCode")->valueint) + if (cJSON_GetObjectItem(state, "ExitCode") != NULL && cJSON_GetObjectItem(state, "ExitCode")->valueint) { // Container exited nonzero instances[idTable[id]].Failed_value(instances[idTable[id]].Failed_value() + 1); @@ -206,7 +206,10 @@ class InventoryQuery } else { - syslog(LOG_WARNING, "Attempt in ObtainContainerState to get container %s state information returned null", cJSON_GetObjectItem(entry, "Id")->valuestring); + if (cJSON_GetObjectItem(entry, "Id") != NULL) + { + syslog(LOG_WARNING, "Attempt in ObtainContainerState to get container %s state information returned null", cJSON_GetObjectItem(entry, "Id")->valuestring); + } } } catch (std::exception &e) @@ -239,7 +242,7 @@ class InventoryQuery { cJSON* entry = cJSON_GetArrayItem(response[0], i); - if (entry) + if (entry != NULL) { cJSON* objItem = cJSON_GetObjectItem(entry, "Id"); if (objItem != NULL) @@ -321,7 +324,7 @@ class InventoryQuery { cJSON* entry = cJSON_GetArrayItem(response[0], i); - if (entry) + if (entry != NULL) { // New inventory entry Container_ImageInventory_Class instance; diff --git a/source/code/providers/Container_Process_Class_Provider.cpp b/source/code/providers/Container_Process_Class_Provider.cpp index 76b15bdfc..9adc4edcd 100644 --- a/source/code/providers/Container_Process_Class_Provider.cpp +++ b/source/code/providers/Container_Process_Class_Provider.cpp @@ -55,7 +55,7 @@ class ContainerProcessQuery for (int i = 0; i < cJSON_GetArraySize(dockerPsResponse[0]); i++) { cJSON* containerEntry = cJSON_GetArrayItem(dockerPsResponse[0], i); - if (containerEntry) + if (containerEntry != NULL) { cJSON* objItem = cJSON_GetObjectItem(containerEntry, "Id"); if (objItem != NULL) From 4b630215824d85d568fd384b1bbee071996bec1a Mon Sep 17 00:00:00 2001 From: rashmichandrashekar Date: Thu, 27 Sep 2018 16:10:59 -0700 Subject: [PATCH 12/25] Adding a missed null check (#135) --- .../code/providers/Container_DaemonEvent_Class_Provider.cpp | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/source/code/providers/Container_DaemonEvent_Class_Provider.cpp b/source/code/providers/Container_DaemonEvent_Class_Provider.cpp index bf2ab3b53..51e253d73 100644 --- a/source/code/providers/Container_DaemonEvent_Class_Provider.cpp +++ b/source/code/providers/Container_DaemonEvent_Class_Provider.cpp @@ -289,7 +289,10 @@ class EventQuery else { // Image event - instance.ElementName_value(cJSON_GetObjectItem(entry, "id")->valuestring); + if (cJSON_GetObjectItem(entry, "id") != NULL) + { + instance.ElementName_value(cJSON_GetObjectItem(entry, "id")->valuestring); + } instance.Id_value(""); instance.ContainerName_value(""); } From 8b964fd7ee54948b7374ed44f3253d0d89ceb443 Mon Sep 17 00:00:00 2001 From: rashmichandrashekar Date: Thu, 27 Sep 2018 17:01:04 -0700 Subject: [PATCH 13/25] reusing some variables (#136) --- ...iner_ContainerInventory_Class_Provider.cpp | 26 ++++++++++++------- .../Container_DaemonEvent_Class_Provider.cpp | 5 ++-- ...ontainer_ImageInventory_Class_Provider.cpp | 16 +++++++----- 3 files changed, 30 insertions(+), 17 deletions(-) diff --git a/source/code/providers/Container_ContainerInventory_Class_Provider.cpp b/source/code/providers/Container_ContainerInventory_Class_Provider.cpp index 68c13053a..ded8fb869 100644 --- a/source/code/providers/Container_ContainerInventory_Class_Provider.cpp +++ b/source/code/providers/Container_ContainerInventory_Class_Provider.cpp @@ -210,7 +210,11 @@ class ContainerQuery correctedstring = stringToTruncate + "\"]"; } instance.EnvironmentVar_value(correctedstring.c_str()); - syslog(LOG_WARNING, "Environment variable truncated for container %s", cJSON_GetObjectItem(entry, "Id")->valuestring); + cJSON* idItem = cJSON_GetObjectItem(entry, "Id"); + if (idItem != NULL) + { + syslog(LOG_WARNING, "Environment variable truncated for container %s", idItem->valuestring); + } } else { instance.EnvironmentVar_value(strcmp(env, "null") ? env : ""); @@ -244,9 +248,10 @@ class ContainerQuery } else { - if (cJSON_GetObjectItem(entry, "Id") != NULL) + cJSON* idItem = cJSON_GetObjectItem(entry, "Id"); + if (idItem != NULL) { - syslog(LOG_WARNING, "Attempt in ObtainContainerConfig to get container %s config information returned null", cJSON_GetObjectItem(entry, "Id")->valuestring); + syslog(LOG_WARNING, "Attempt in ObtainContainerConfig to get container %s config information returned null", idItem->valuestring); } } } @@ -281,9 +286,10 @@ class ContainerQuery if (exitCode < 0) { exitCode = 128; - if (cJSON_GetObjectItem(entry, "Id") != NULL) + cJSON* idItem = cJSON_GetObjectItem(entry, "Id"); + if (idItem != NULL) { - syslog(LOG_NOTICE, "Container %s returned negative exit code", cJSON_GetObjectItem(entry, "Id")->valuestring); + syslog(LOG_NOTICE, "Container %s returned negative exit code", idItem->valuestring); } } @@ -334,9 +340,10 @@ class ContainerQuery } else { - if (cJSON_GetObjectItem(entry, "Id")) + cJSON* idItem = cJSON_GetObjectItem(entry, "Id"); + if (idItem) { - syslog(LOG_WARNING, "Attempt in ObtainContainerState to get container %s state information returned null", cJSON_GetObjectItem(entry, "Id")->valuestring); + syslog(LOG_WARNING, "Attempt in ObtainContainerState to get container %s state information returned null", idItem->valuestring); } } } @@ -381,9 +388,10 @@ class ContainerQuery } else { - if (cJSON_GetObjectItem(entry, "Id")) + cJSON* idItem = cJSON_GetObjectItem(entry, "Id"); + if (idItem != NULL) { - syslog(LOG_WARNING, "Attempt in ObtainContainerHostConfig to get container %s host config information returned null", cJSON_GetObjectItem(entry, "Id")->valuestring); + syslog(LOG_WARNING, "Attempt in ObtainContainerHostConfig to get container %s host config information returned null", idItem->valuestring); } } } diff --git a/source/code/providers/Container_DaemonEvent_Class_Provider.cpp b/source/code/providers/Container_DaemonEvent_Class_Provider.cpp index 51e253d73..0c28e4769 100644 --- a/source/code/providers/Container_DaemonEvent_Class_Provider.cpp +++ b/source/code/providers/Container_DaemonEvent_Class_Provider.cpp @@ -288,10 +288,11 @@ class EventQuery } else { + cJSON* idItem = cJSON_GetObjectItem(entry, "id"); // Image event - if (cJSON_GetObjectItem(entry, "id") != NULL) + if (idItem != NULL) { - instance.ElementName_value(cJSON_GetObjectItem(entry, "id")->valuestring); + instance.ElementName_value(idItem->valuestring); } instance.Id_value(""); instance.ContainerName_value(""); diff --git a/source/code/providers/Container_ImageInventory_Class_Provider.cpp b/source/code/providers/Container_ImageInventory_Class_Provider.cpp index 01d1c639c..f5742ef5f 100644 --- a/source/code/providers/Container_ImageInventory_Class_Provider.cpp +++ b/source/code/providers/Container_ImageInventory_Class_Provider.cpp @@ -173,10 +173,12 @@ class InventoryQuery { string id = string(objItem->valuestring); - if (cJSON_GetObjectItem(state, "Running") != NULL && cJSON_GetObjectItem(state, "Running")->valueint) + cJSON* runningItem = cJSON_GetObjectItem(state, "Running"); + if (runningItem != NULL && runningItem->valueint) { // Running container - if (cJSON_GetObjectItem(state, "Paused") != NULL && cJSON_GetObjectItem(state, "Paused")->valueint) + cJSON* pausedItem = cJSON_GetObjectItem(state, "Paused"); + if (pausedItem != NULL && pausedItem->valueint) { // Paused container instances[idTable[id]].Paused_value(instances[idTable[id]].Paused_value() + 1); @@ -188,7 +190,8 @@ class InventoryQuery } else { - if (cJSON_GetObjectItem(state, "ExitCode") != NULL && cJSON_GetObjectItem(state, "ExitCode")->valueint) + cJSON* exitCodeItem = cJSON_GetObjectItem(state, "ExitCode"); + if (exitCodeItem != NULL && exitCodeItem->valueint) { // Container exited nonzero instances[idTable[id]].Failed_value(instances[idTable[id]].Failed_value() + 1); @@ -206,9 +209,10 @@ class InventoryQuery } else { - if (cJSON_GetObjectItem(entry, "Id") != NULL) + cJSON* idItem = cJSON_GetObjectItem(entry, "Id"); + if (idItem != NULL) { - syslog(LOG_WARNING, "Attempt in ObtainContainerState to get container %s state information returned null", cJSON_GetObjectItem(entry, "Id")->valuestring); + syslog(LOG_WARNING, "Attempt in ObtainContainerState to get container %s state information returned null", idItem->valuestring); } } } @@ -263,7 +267,7 @@ class InventoryQuery } else { - syslog(LOG_WARNING, "API call in AggregateContainerStatus to inspect container %s returned null", cJSON_GetObjectItem(entry, "Id")->valuestring); + syslog(LOG_WARNING, "API call in AggregateContainerStatus to inspect container %s returned null", objItem->valuestring); } } } From 938c2edc0d84917c123c2947c791fa3806fce25c Mon Sep 17 00:00:00 2001 From: rashmichandrashekar Date: Fri, 28 Sep 2018 16:00:29 -0700 Subject: [PATCH 14/25] Rashmi/cjson delete null check (#138) * adding null check for cjson-delete * null chk * removing null check --- source/code/providers/Container_Process_Class_Provider.cpp | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/source/code/providers/Container_Process_Class_Provider.cpp b/source/code/providers/Container_Process_Class_Provider.cpp index 9adc4edcd..e27df1788 100644 --- a/source/code/providers/Container_Process_Class_Provider.cpp +++ b/source/code/providers/Container_Process_Class_Provider.cpp @@ -163,7 +163,10 @@ class ContainerProcessQuery } } } - cJSON_Delete(dockerPsResponse[0]); + if (!dockerPsResponse.empty() && dockerPsResponse[0]) + { + cJSON_Delete(dockerPsResponse[0]); + } } catch (std::exception &e) { From fbfdf11e98cebbbc623bd845bf3010b46dd3918b Mon Sep 17 00:00:00 2001 From: rashmichandrashekar Date: Tue, 2 Oct 2018 17:33:22 -0700 Subject: [PATCH 15/25] updating log level to debug for some provider workflows (#139) --- installer/conf/container.conf | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/installer/conf/container.conf b/installer/conf/container.conf index 9eaed9b47..a41b963a9 100755 --- a/installer/conf/container.conf +++ b/installer/conf/container.conf @@ -111,7 +111,7 @@ type out_oms - log_level info + log_level debug buffer_chunk_limit 20m buffer_type file buffer_path %STATE_DIR_WS%/out_oms_containerinventory*.buffer @@ -124,7 +124,7 @@ type out_oms - log_level info + log_level debug buffer_chunk_limit 20m buffer_type file buffer_path %STATE_DIR_WS%/out_oms_imageinventory*.buffer @@ -137,7 +137,7 @@ type out_oms - log_level info + log_level debug buffer_chunk_limit 20m buffer_type file buffer_path %STATE_DIR_WS%/out_oms_servicelog*.buffer From d4260663ccaeae093911052ab47bb2f644f3e56c Mon Sep 17 00:00:00 2001 From: Dilip Raghunathan Date: Thu, 4 Oct 2018 14:01:11 -0700 Subject: [PATCH 16/25] Fixing CPU Utilization and removing Fluent-bit filters (#140) Removing fluent-bit filters, CPU optimizations --- installer/conf/td-agent-bit.conf | 20 ++---------- source/code/go/src/plugins/oms.go | 47 ++++++++++++++++----------- source/code/go/src/plugins/out_oms.go | 2 +- 3 files changed, 32 insertions(+), 37 deletions(-) diff --git a/installer/conf/td-agent-bit.conf b/installer/conf/td-agent-bit.conf index 84a9fcf94..27916eafd 100644 --- a/installer/conf/td-agent-bit.conf +++ b/installer/conf/td-agent-bit.conf @@ -12,23 +12,9 @@ Parser docker Mem_Buf_Limit 30m Path_Key filepath - -[FILTER] - Name record_modifier - Match oms.container.log.* - Whitelist_key log - Whitelist_key stream - Whitelist_key time - Whitelist_key filepath - -[FILTER] - Name modify - Match oms.container.log.* - Rename log LogEntry - Rename stream LogEntrySource - Rename time LogEntryTimeStamp - Rename filepath Filepath - Add_if_not_present SourceSystem Containers + Buffer_Chunk_Size 1m + Buffer_Max_Size 1m + Skip_Long_Lines On [OUTPUT] Name oms diff --git a/source/code/go/src/plugins/oms.go b/source/code/go/src/plugins/oms.go index 2e9e2f3d0..c7fe8eb42 100644 --- a/source/code/go/src/plugins/oms.go +++ b/source/code/go/src/plugins/oms.go @@ -12,11 +12,11 @@ import ( "strings" "sync" "time" -) -import ( + "github.com/fluent/fluent-bit-go/output" - "github.com/mitchellh/mapstructure" + lumberjack "gopkg.in/natefinch/lumberjack.v2" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" @@ -81,7 +81,6 @@ type DataItem struct { Name string `json:"Name"` SourceSystem string `json:"SourceSystem"` Computer string `json:"Computer"` - Filepath string `json:"Filepath"` } // ContainerLogBlob represents the object corresponding to the payload that is sent to the ODS end point @@ -199,23 +198,18 @@ func PostDataHelper(tailPluginRecords []map[interface{}]interface{}) int { for _, record := range tailPluginRecords { - filepath := toString(record["Filepath"]) - containerID := getContainerIDFromFilePath(filepath) + containerID := GetContainerIDFromFilePath(toString(record["filepath"])) if containerID == "" || containsKey(IgnoreIDSet, containerID) { continue } - var dataItem DataItem stringMap := make(map[string]string) - // convert map[interface{}]interface{} to map[string]string - for key, value := range record { - strKey := fmt.Sprintf("%v", key) - strValue := toString(value) - stringMap[strKey] = strValue - } - + stringMap["LogEntry"] = toString(record["log"]) + stringMap["LogEntrySource"] = toString(record["stream"]) + stringMap["LogEntryTimeStamp"] = toString(record["time"]) + stringMap["SourceSystem"] = "Containers" stringMap["Id"] = containerID if val, ok := ImageIDMap[containerID]; ok { @@ -238,8 +232,17 @@ func PostDataHelper(tailPluginRecords []map[interface{}]interface{}) int { } } - stringMap["Computer"] = Computer - mapstructure.Decode(stringMap, &dataItem) + dataItem := DataItem{ + ID: stringMap["Id"], + LogEntry: stringMap["LogEntry"], + LogEntrySource: stringMap["LogEntrySource"], + LogEntryTimeStamp: stringMap["LogEntryTimeStamp"], + SourceSystem: stringMap["SourceSystem"], + Computer: Computer, + Image: stringMap["Image"], + Name: stringMap["Name"], + } + dataItems = append(dataItems, dataItem) } @@ -281,11 +284,17 @@ func containsKey(currentMap map[string]bool, key string) bool { } func toString(s interface{}) string { - value := s.([]uint8) - return string([]byte(value[:])) + switch t := s.(type) { + case []byte: + // prevent encoding to base64 + return string(t) + default: + return "" + } } -func getContainerIDFromFilePath(filepath string) string { +// GetContainerIDFromFilePath Gets the container ID From the file Path +func GetContainerIDFromFilePath(filepath string) string { start := strings.LastIndex(filepath, "-") end := strings.LastIndex(filepath, ".") if start >= end || start == -1 || end == -1 { diff --git a/source/code/go/src/plugins/out_oms.go b/source/code/go/src/plugins/out_oms.go index ec9a573d1..0efc1242d 100644 --- a/source/code/go/src/plugins/out_oms.go +++ b/source/code/go/src/plugins/out_oms.go @@ -10,7 +10,7 @@ import ( //export FLBPluginRegister func FLBPluginRegister(ctx unsafe.Pointer) int { - return output.FLBPluginRegister(ctx, "oms", "Stdout GO!") + return output.FLBPluginRegister(ctx, "oms", "OMS GO!") } //export FLBPluginInit From c2cabab7199870af23bb90de10bca4d8eb50e847 Mon Sep 17 00:00:00 2001 From: Dilip Raghunathan Date: Tue, 9 Oct 2018 14:50:10 -0700 Subject: [PATCH 17/25] Minor tweaks 1. Remove some logging 2. Added more Error Handling 3. Continue when there is an error with k8s api (#141) * Removing some logs, added more error checking, continue on kube-api error * Return FLB OK for json Marshall error, instead of RETRY --- source/code/go/src/plugins/oms.go | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/source/code/go/src/plugins/oms.go b/source/code/go/src/plugins/oms.go index c7fe8eb42..d20f11d57 100644 --- a/source/code/go/src/plugins/oms.go +++ b/source/code/go/src/plugins/oms.go @@ -133,6 +133,7 @@ func updateContainerImageNameMaps() { pods, err := ClientSet.CoreV1().Pods("").List(metav1.ListOptions{}) if err != nil { Log("Error getting pods %s\nIt is ok to log here and continue, because the logs will be missing image and Name, but the logs will still have the containerID", err.Error()) + continue } for _, pod := range pods.Items { @@ -216,20 +217,12 @@ func PostDataHelper(tailPluginRecords []map[interface{}]interface{}) int { stringMap["Image"] = val } else { Log("ContainerId %s not present in Map ", containerID) - Log("CurrentMap Snapshot \n") - for k, v := range ImageIDMap { - Log("%s ==> %s", k, v) - } } if val, ok := NameIDMap[containerID]; ok { stringMap["Name"] = val } else { Log("ContainerId %s not present in Map ", containerID) - Log("CurrentMap Snapshot \n") - for k, v := range NameIDMap { - Log("%s ==> %s", k, v) - } } dataItem := DataItem{ @@ -253,6 +246,10 @@ func PostDataHelper(tailPluginRecords []map[interface{}]interface{}) int { DataItems: dataItems} marshalled, err := json.Marshal(logEntry) + if err != nil { + Log("Error while Marshalling log Entry: %s", err.Error()) + return output.FLB_OK + } req, _ := http.NewRequest("POST", OMSEndpoint, bytes.NewBuffer(marshalled)) req.Header.Set("Content-Type", "application/json") From 32567db6965f65154663c0204c1a3e2a599530d0 Mon Sep 17 00:00:00 2001 From: Vishwanath Narasimhan Date: Wed, 10 Oct 2018 14:09:04 -0700 Subject: [PATCH 18/25] * Change FluentBit flush interval to 30 secs (from 5 secs) * Remove ContainerPerf, ContainerServiceLog,ContainerProcess (OMI workflows) for Daemonset --- installer/conf/container.conf | 33 -------------------------------- installer/conf/td-agent-bit.conf | 2 +- 2 files changed, 1 insertion(+), 34 deletions(-) diff --git a/installer/conf/container.conf b/installer/conf/container.conf index a41b963a9..1916300cb 100755 --- a/installer/conf/container.conf +++ b/installer/conf/container.conf @@ -7,19 +7,6 @@ bind 127.0.0.1 -# Filter container logs - - type filter_docker_log - log_path "/var/opt/microsoft/omsagent/log/filter_docker_log.txt" - - -# Container perf - - type oms_omi - object_name "Container" - interval 30s - - # Container inventory type omi @@ -40,16 +27,6 @@ ] -# Container service log - - type omi - run_interval 60s - tag oms.container.servicelog - items [ - ["root/cimv2","Container_DaemonEvent"] - ] - - # Container host inventory type omi @@ -60,16 +37,6 @@ ] -# Container processes - - type omi - run_interval 60s - tag oms.api.ContainerProcess - items [ - ["root/cimv2","Container_Process"] - ] - - #cadvisor perf type cadvisorperf diff --git a/installer/conf/td-agent-bit.conf b/installer/conf/td-agent-bit.conf index 27916eafd..b5d2309e1 100644 --- a/installer/conf/td-agent-bit.conf +++ b/installer/conf/td-agent-bit.conf @@ -1,5 +1,5 @@ [SERVICE] - Flush 5 + Flush 30 Log_Level info Parsers_File /etc/td-agent-bit/parsers.conf Log_File /var/opt/microsoft/docker-cimprov/log/fluent-bit.log From afc981d504c3f44fd3232892e4823d5d09503d14 Mon Sep 17 00:00:00 2001 From: r-dilip Date: Thu, 11 Oct 2018 21:37:09 -0700 Subject: [PATCH 19/25] Container Log Telemetry --- .gitignore | 3 + installer/conf/td-agent-bit.conf | 7 +- source/code/go/src/plugins/glide.lock | 10 +- source/code/go/src/plugins/glide.yaml | 8 +- source/code/go/src/plugins/oms.go | 9 +- source/code/go/src/plugins/out_oms.go | 10 ++ source/code/go/src/plugins/telemetry.go | 151 ++++++++++++++++++++++++ 7 files changed, 188 insertions(+), 10 deletions(-) create mode 100644 source/code/go/src/plugins/telemetry.go diff --git a/.gitignore b/.gitignore index 92c8c0cf2..e58d69f7b 100644 --- a/.gitignore +++ b/.gitignore @@ -8,3 +8,6 @@ /test/code/providers/TestScriptPath.h /test/code/providers/providertestutils.cpp +source/code/go/src/plugins/profiling +.vscode/launch.json +source/code/go/src/plugins/vendor/ \ No newline at end of file diff --git a/installer/conf/td-agent-bit.conf b/installer/conf/td-agent-bit.conf index b5d2309e1..5a1c105bf 100644 --- a/installer/conf/td-agent-bit.conf +++ b/installer/conf/td-agent-bit.conf @@ -17,5 +17,8 @@ Skip_Long_Lines On [OUTPUT] - Name oms - Match oms.container.log.* \ No newline at end of file + Name oms + EnableTelemetry true + TelemetryPushInterval 300 + Match oms.container.log.* + AgentVersion internaltest1004-2 \ No newline at end of file diff --git a/source/code/go/src/plugins/glide.lock b/source/code/go/src/plugins/glide.lock index 4597b594a..fc147fe74 100644 --- a/source/code/go/src/plugins/glide.lock +++ b/source/code/go/src/plugins/glide.lock @@ -1,5 +1,5 @@ -hash: bb32415f402ab29751f29b8e394bc974cbc31861453d817aaeb94ef83dacc488 -updated: 2018-09-14T18:14:28.748047598Z +hash: a6a873d09ed9c3d890a70122e61efba992ead9850fe48f6fcb020d86800d4ade +updated: 2018-10-10T13:37:51.9703908-07:00 imports: - name: github.com/fluent/fluent-bit-go version: c4a158a6e3a793166c6ecfa2d5c80d71eada8959 @@ -38,8 +38,10 @@ imports: - diskcache - name: github.com/json-iterator/go version: f2b4162afba35581b6d4a50d3b8f34e33c144682 -- name: github.com/mitchellh/mapstructure - version: fa473d140ef3c6adf42d6b391fe76707f1f243c8 +- name: github.com/Microsoft/ApplicationInsights-Go + version: d2df5d440eda5372f24fcac03839a64d6cb5f7e5 + subpackages: + - appinsights - name: github.com/modern-go/concurrent version: bacd9c7ef1dd9b15be4a9909b8ac7a4e313eec94 - name: github.com/modern-go/reflect2 diff --git a/source/code/go/src/plugins/glide.yaml b/source/code/go/src/plugins/glide.yaml index 403e1efc4..b2829391b 100644 --- a/source/code/go/src/plugins/glide.yaml +++ b/source/code/go/src/plugins/glide.yaml @@ -1,10 +1,8 @@ -package: plugins +package: . import: - package: github.com/fluent/fluent-bit-go subpackages: - output -- package: github.com/mitchellh/mapstructure - version: ^1.0.0 - package: gopkg.in/natefinch/lumberjack.v2 version: ^2.1.0 - package: k8s.io/apimachinery @@ -15,3 +13,7 @@ import: subpackages: - kubernetes - rest +- package: github.com/Microsoft/ApplicationInsights-Go + version: ^0.4.2 + subpackages: + - appinsights diff --git a/source/code/go/src/plugins/oms.go b/source/code/go/src/plugins/oms.go index d20f11d57..807e00937 100644 --- a/source/code/go/src/plugins/oms.go +++ b/source/code/go/src/plugins/oms.go @@ -42,6 +42,8 @@ var ( OMSEndpoint string // Computer (Hostname) when ingesting into ContainerLog table Computer string + // WorkspaceID log analytics workspace id + WorkspaceID string ) var ( @@ -170,6 +172,7 @@ func updateKubeSystemContainerIDs() { pods, err := ClientSet.CoreV1().Pods("kube-system").List(metav1.ListOptions{}) if err != nil { Log("Error getting pods %s\nIt is ok to log here and continue. Kube-system logs will be collected", err.Error()) + continue } _ignoreIDSet := make(map[string]bool) @@ -269,7 +272,10 @@ func PostDataHelper(tailPluginRecords []map[interface{}]interface{}) int { return output.FLB_RETRY } - Log("Successfully flushed %d records in %s", len(dataItems), elapsed) + numRecords := len(dataItems) + Log("Successfully flushed %d records in %s", numRecords, elapsed) + FlushedRecordsCount += float64(numRecords) + FlushedRecordsTimeTaken += float64(elapsed / time.Millisecond) } return output.FLB_OK @@ -322,6 +328,7 @@ func InitializePlugin(pluginConfPath string) { log.Fatalf("Error Reading omsadmin configuration %s\n", err.Error()) } OMSEndpoint = omsadminConf["OMS_ENDPOINT"] + WorkspaceID = omsadminConf["WORKSPACE_ID"] Log("OMSEndpoint %s", OMSEndpoint) // Initialize image,name map refresh ticker diff --git a/source/code/go/src/plugins/out_oms.go b/source/code/go/src/plugins/out_oms.go index 0efc1242d..37c9eb12b 100644 --- a/source/code/go/src/plugins/out_oms.go +++ b/source/code/go/src/plugins/out_oms.go @@ -5,6 +5,7 @@ import ( ) import ( "C" + "strings" "unsafe" ) @@ -19,6 +20,14 @@ func FLBPluginRegister(ctx unsafe.Pointer) int { func FLBPluginInit(ctx unsafe.Pointer) int { Log("Initializing out_oms go plugin for fluentbit") InitializePlugin(ContainerLogPluginConfFilePath) + enablePlugin := output.FLBPluginConfigKey(ctx, "EnableTelemetry") + telemetryPushInterval := output.FLBPluginConfigKey(ctx, "TelemetryPushInterval") + agentVersion := output.FLBPluginConfigKey(ctx, "AgentVersion") + + if strings.Compare(strings.ToLower(enablePlugin), "true") == 0 { + go SendContainerLogFlushRateMetric(telemetryPushInterval, agentVersion) + SendEvent(EventNameContainerLogInit, make(map[string]string)) + } return output.FLB_OK } @@ -48,6 +57,7 @@ func FLBPluginFlush(data unsafe.Pointer, length C.int, tag *C.char) int { // FLBPluginExit exits the plugin func FLBPluginExit() int { + defer TelemetryShutdown() KubeSystemContainersRefreshTicker.Stop() ContainerImageNameRefreshTicker.Stop() return output.FLB_OK diff --git a/source/code/go/src/plugins/telemetry.go b/source/code/go/src/plugins/telemetry.go new file mode 100644 index 000000000..4d4ab2371 --- /dev/null +++ b/source/code/go/src/plugins/telemetry.go @@ -0,0 +1,151 @@ +package main + +import ( + "encoding/base64" + "errors" + "os" + "strconv" + "strings" + "time" + + "github.com/Microsoft/ApplicationInsights-Go/appinsights" +) + +var ( + // FlushedRecordsCount indicates the number of flushed records in the current period + FlushedRecordsCount float64 + // FlushedRecordsTimeTaken indicates the cumulative time taken to flush the records for the current period + FlushedRecordsTimeTaken float64 + // CommonProperties indicates the dimensions that are sent with every event/metric + CommonProperties map[string]string + // TelemetryClient is the client used to send the telemetry + TelemetryClient appinsights.TelemetryClient + // ContainerLogTelemetryTicker sends telemetry periodically + ContainerLogTelemetryTicker *time.Ticker +) + +const ( + clusterTypeACS = "ACS" + clusterTypeAKS = "AKS" + controllerTypeDaemonSet = "DaemonSet" + controllerTypeReplicaSet = "ReplicaSet" + envAKSResourceID = "AKS_RESOURCE_ID" + envACSResourceName = "ACS_RESOURCE_NAME" + envAppInsightsInstrumentationKey = "APPLICATIONINSIGHTS_INSTRUMENTATIONKEY" + metricNameAvgFlushRate = "ContainerLogAvgRecordsFlushedPerSec" + defaultTelemetryPushInterval = 300 + + // EventNameContainerLogInit name of the event + EventNameContainerLogInit = "ContainerLogPluginInitialized" +) + +// Initialize initializes the telemetry artifacts +func initialize(telemetryIntervalProperty string, agentVersion string) (int, error) { + + telemetryInterval, err := strconv.Atoi(telemetryIntervalProperty) + if err != nil { + telemetryInterval = defaultTelemetryPushInterval + } + + ContainerLogTelemetryTicker = time.NewTicker(time.Second * time.Duration(telemetryInterval)) + + encodedIkey := os.Getenv(envAppInsightsInstrumentationKey) + if encodedIkey == "" { + Log("App Insights IKey missing in Environment Variables \n") + return -1, errors.New("Missing App Insights Instrumentation Key Environment Variable") + } + + decIkey, err := base64.StdEncoding.DecodeString(encodedIkey) + if err != nil { + Log("Error Decoding encoded Instrumentation key %s", err.Error()) + return -1, err + } + + TelemetryClient = appinsights.NewTelemetryClient(string(decIkey)) + + CommonProperties = make(map[string]string) + CommonProperties["Computer"] = Computer + CommonProperties["WorkspaceID"] = WorkspaceID + CommonProperties["ControllerType"] = controllerTypeDaemonSet + CommonProperties["AgentVersion"] = agentVersion + + aksResourceID := os.Getenv(envAKSResourceID) + // if the aks resource id is not defined, it is most likely an ACS Cluster + if aksResourceID == "" { + CommonProperties["ACSResourceName"] = os.Getenv(envACSResourceName) + CommonProperties["ClusterType"] = clusterTypeACS + + CommonProperties["SubscriptionID"] = "" + CommonProperties["ResourceGroupName"] = "" + CommonProperties["ClusterName"] = "" + CommonProperties["Region"] = "" + + } else { + CommonProperties["ACSResourceName"] = "" + splitStrings := strings.Split(aksResourceID, "/") + CommonProperties["SubscriptionID"] = splitStrings[2] + CommonProperties["ResourceGroupName"] = splitStrings[4] + CommonProperties["ClusterName"] = splitStrings[8] + CommonProperties["ClusterType"] = clusterTypeAKS + + region := os.Getenv("AKS_REGION") + if region != "" { + CommonProperties["Region"] = region + } + } + + TelemetryClient.Context().CommonProperties = CommonProperties + return 0, nil +} + +// SendContainerLogFlushRateMetric is a go-routine that flushes the data periodically (every 5 mins to App Insights) +func SendContainerLogFlushRateMetric(telemetryIntervalProperty string, agentVersion string) { + + ret, err := initialize(telemetryIntervalProperty, agentVersion) + if ret != 0 || err != nil { + Log("Error During Telemetry Initialization :%s", err.Error()) + return + } + + for ; true; <-ContainerLogTelemetryTicker.C { + flushRate := FlushedRecordsCount / FlushedRecordsTimeTaken * 1000 + metric := appinsights.NewMetricTelemetry(metricNameAvgFlushRate, flushRate) + Log("Flushed Records : %f Time Taken : %f flush Rate : %f", FlushedRecordsCount, FlushedRecordsTimeTaken, flushRate) + TelemetryClient.Track(metric) + FlushedRecordsCount = 0.0 + FlushedRecordsTimeTaken = 0.0 + } +} + +// TelemetryShutdown stops the ticker that sends data to App Insights periodically +func TelemetryShutdown() { + Log("Shutting down ContainerLog Telemetry\n") + ContainerLogTelemetryTicker.Stop() +} + +// SendEvent sends an event to App Insights +func SendEvent(eventName string, dimensions map[string]string) { + // this is because the TelemetryClient is initialized in a different goroutine. A simple wait loop here is just waiting for it to be initialized. This will happen only for the init event. Any subsequent Event should work just fine + for TelemetryClient == nil { + Log("Waiting for Telemetry Client to be initialized") + time.Sleep(1 * time.Second) + } + + // take a copy so the CommonProperties can be restored later + _commonProps := make(map[string]string) + for k, v := range TelemetryClient.Context().CommonProperties { + _commonProps[k] = v + } + + // add any extra dimensions + for k, v := range dimensions { + TelemetryClient.Context().CommonProperties[k] = v + } + + Log("Sending Event : %s\n", eventName) + event := appinsights.NewEventTelemetry(eventName) + TelemetryClient.Track(event) + + // restore original CommonProperties + TelemetryClient.Context().CommonProperties = _commonProps +} From 4b958dde94450e96d6d46351756c83500df7935f Mon Sep 17 00:00:00 2001 From: r-dilip Date: Fri, 12 Oct 2018 09:18:10 -0700 Subject: [PATCH 20/25] Fixing an issue with Send Init Event if Telemetry is not initialized properly, tab to whitespace in conf file --- installer/conf/td-agent-bit.conf | 2 +- source/code/go/src/plugins/out_oms.go | 7 ++-- source/code/go/src/plugins/telemetry.go | 44 ++++++++++++++----------- 3 files changed, 29 insertions(+), 24 deletions(-) diff --git a/installer/conf/td-agent-bit.conf b/installer/conf/td-agent-bit.conf index 5a1c105bf..6849a3744 100644 --- a/installer/conf/td-agent-bit.conf +++ b/installer/conf/td-agent-bit.conf @@ -19,6 +19,6 @@ [OUTPUT] Name oms EnableTelemetry true - TelemetryPushInterval 300 + TelemetryPushInterval 300 Match oms.container.log.* AgentVersion internaltest1004-2 \ No newline at end of file diff --git a/source/code/go/src/plugins/out_oms.go b/source/code/go/src/plugins/out_oms.go index 37c9eb12b..2603368ab 100644 --- a/source/code/go/src/plugins/out_oms.go +++ b/source/code/go/src/plugins/out_oms.go @@ -20,13 +20,14 @@ func FLBPluginRegister(ctx unsafe.Pointer) int { func FLBPluginInit(ctx unsafe.Pointer) int { Log("Initializing out_oms go plugin for fluentbit") InitializePlugin(ContainerLogPluginConfFilePath) - enablePlugin := output.FLBPluginConfigKey(ctx, "EnableTelemetry") + enableTelemetry := output.FLBPluginConfigKey(ctx, "EnableTelemetry") telemetryPushInterval := output.FLBPluginConfigKey(ctx, "TelemetryPushInterval") agentVersion := output.FLBPluginConfigKey(ctx, "AgentVersion") - if strings.Compare(strings.ToLower(enablePlugin), "true") == 0 { + if strings.Compare(strings.ToLower(enableTelemetry), "true") == 0 { go SendContainerLogFlushRateMetric(telemetryPushInterval, agentVersion) - SendEvent(EventNameContainerLogInit, make(map[string]string)) + } else { + Log("Telemetry is not enabled for the plugin %s \n", output.FLBPluginConfigKey(ctx, "Name")) } return output.FLB_OK } diff --git a/source/code/go/src/plugins/telemetry.go b/source/code/go/src/plugins/telemetry.go index 4d4ab2371..c2f565a45 100644 --- a/source/code/go/src/plugins/telemetry.go +++ b/source/code/go/src/plugins/telemetry.go @@ -4,6 +4,7 @@ import ( "encoding/base64" "errors" "os" + "runtime" "strconv" "strings" "time" @@ -25,39 +26,40 @@ var ( ) const ( - clusterTypeACS = "ACS" - clusterTypeAKS = "AKS" - controllerTypeDaemonSet = "DaemonSet" - controllerTypeReplicaSet = "ReplicaSet" - envAKSResourceID = "AKS_RESOURCE_ID" - envACSResourceName = "ACS_RESOURCE_NAME" - envAppInsightsInstrumentationKey = "APPLICATIONINSIGHTS_INSTRUMENTATIONKEY" - metricNameAvgFlushRate = "ContainerLogAvgRecordsFlushedPerSec" - defaultTelemetryPushInterval = 300 + clusterTypeACS = "ACS" + clusterTypeAKS = "AKS" + controllerTypeDaemonSet = "DaemonSet" + controllerTypeReplicaSet = "ReplicaSet" + envAKSResourceID = "AKS_RESOURCE_ID" + envACSResourceName = "ACS_RESOURCE_NAME" + envAppInsightsAuth = "APPLICATIONINSIGHTS_AUTH" + metricNameAvgFlushRate = "ContainerLogAvgRecordsFlushedPerSec" + defaultTelemetryPushInterval = 300 // EventNameContainerLogInit name of the event EventNameContainerLogInit = "ContainerLogPluginInitialized" ) // Initialize initializes the telemetry artifacts -func initialize(telemetryIntervalProperty string, agentVersion string) (int, error) { +func initialize(telemetryPushIntervalProperty string, agentVersion string) (int, error) { - telemetryInterval, err := strconv.Atoi(telemetryIntervalProperty) + telemetryPushInterval, err := strconv.Atoi(telemetryPushIntervalProperty) if err != nil { - telemetryInterval = defaultTelemetryPushInterval + Log("Error Converting telemetryPushIntervalProperty %s. Using Default Interval... %d \n", telemetryPushIntervalProperty, defaultTelemetryPushInterval) + telemetryPushInterval = defaultTelemetryPushInterval } - ContainerLogTelemetryTicker = time.NewTicker(time.Second * time.Duration(telemetryInterval)) + ContainerLogTelemetryTicker = time.NewTicker(time.Second * time.Duration(telemetryPushInterval)) - encodedIkey := os.Getenv(envAppInsightsInstrumentationKey) + encodedIkey := os.Getenv(envAppInsightsAuth) if encodedIkey == "" { - Log("App Insights IKey missing in Environment Variables \n") - return -1, errors.New("Missing App Insights Instrumentation Key Environment Variable") + Log("Environment Variable Missing \n") + return -1, errors.New("Missing Environment Variable") } decIkey, err := base64.StdEncoding.DecodeString(encodedIkey) if err != nil { - Log("Error Decoding encoded Instrumentation key %s", err.Error()) + Log("Decoding Error %s", err.Error()) return -1, err } @@ -99,14 +101,16 @@ func initialize(telemetryIntervalProperty string, agentVersion string) (int, err } // SendContainerLogFlushRateMetric is a go-routine that flushes the data periodically (every 5 mins to App Insights) -func SendContainerLogFlushRateMetric(telemetryIntervalProperty string, agentVersion string) { +func SendContainerLogFlushRateMetric(telemetryPushIntervalProperty string, agentVersion string) { - ret, err := initialize(telemetryIntervalProperty, agentVersion) + ret, err := initialize(telemetryPushIntervalProperty, agentVersion) if ret != 0 || err != nil { Log("Error During Telemetry Initialization :%s", err.Error()) - return + runtime.Goexit() } + SendEvent(EventNameContainerLogInit, make(map[string]string)) + for ; true; <-ContainerLogTelemetryTicker.C { flushRate := FlushedRecordsCount / FlushedRecordsTimeTaken * 1000 metric := appinsights.NewMetricTelemetry(metricNameAvgFlushRate, flushRate) From 510ef9f95b8e5de04e7b5952e24458374d6cbf6b Mon Sep 17 00:00:00 2001 From: r-dilip Date: Fri, 12 Oct 2018 10:45:14 -0700 Subject: [PATCH 21/25] PR feedback --- installer/conf/td-agent-bit.conf | 10 ++++----- source/code/go/src/plugins/out_oms.go | 8 +++---- source/code/go/src/plugins/telemetry.go | 30 +++++++++++-------------- 3 files changed, 22 insertions(+), 26 deletions(-) diff --git a/installer/conf/td-agent-bit.conf b/installer/conf/td-agent-bit.conf index 6849a3744..b01b3a352 100644 --- a/installer/conf/td-agent-bit.conf +++ b/installer/conf/td-agent-bit.conf @@ -17,8 +17,8 @@ Skip_Long_Lines On [OUTPUT] - Name oms - EnableTelemetry true - TelemetryPushInterval 300 - Match oms.container.log.* - AgentVersion internaltest1004-2 \ No newline at end of file + Name oms + EnableTelemetry true + TelemetryPushIntervalSeconds 300 + Match oms.container.log.* + AgentVersion internaltest1004-2 \ No newline at end of file diff --git a/source/code/go/src/plugins/out_oms.go b/source/code/go/src/plugins/out_oms.go index 2603368ab..732ae5216 100644 --- a/source/code/go/src/plugins/out_oms.go +++ b/source/code/go/src/plugins/out_oms.go @@ -21,13 +21,13 @@ func FLBPluginInit(ctx unsafe.Pointer) int { Log("Initializing out_oms go plugin for fluentbit") InitializePlugin(ContainerLogPluginConfFilePath) enableTelemetry := output.FLBPluginConfigKey(ctx, "EnableTelemetry") - telemetryPushInterval := output.FLBPluginConfigKey(ctx, "TelemetryPushInterval") - agentVersion := output.FLBPluginConfigKey(ctx, "AgentVersion") - if strings.Compare(strings.ToLower(enableTelemetry), "true") == 0 { + telemetryPushInterval := output.FLBPluginConfigKey(ctx, "TelemetryPushIntervalSeconds") + agentVersion := output.FLBPluginConfigKey(ctx, "AgentVersion") go SendContainerLogFlushRateMetric(telemetryPushInterval, agentVersion) } else { Log("Telemetry is not enabled for the plugin %s \n", output.FLBPluginConfigKey(ctx, "Name")) + return output.FLB_OK } return output.FLB_OK } @@ -58,7 +58,7 @@ func FLBPluginFlush(data unsafe.Pointer, length C.int, tag *C.char) int { // FLBPluginExit exits the plugin func FLBPluginExit() int { - defer TelemetryShutdown() + ContainerLogTelemetryTicker.Stop() KubeSystemContainersRefreshTicker.Stop() ContainerImageNameRefreshTicker.Stop() return output.FLB_OK diff --git a/source/code/go/src/plugins/telemetry.go b/source/code/go/src/plugins/telemetry.go index c2f565a45..4396ea655 100644 --- a/source/code/go/src/plugins/telemetry.go +++ b/source/code/go/src/plugins/telemetry.go @@ -26,15 +26,15 @@ var ( ) const ( - clusterTypeACS = "ACS" - clusterTypeAKS = "AKS" - controllerTypeDaemonSet = "DaemonSet" - controllerTypeReplicaSet = "ReplicaSet" - envAKSResourceID = "AKS_RESOURCE_ID" - envACSResourceName = "ACS_RESOURCE_NAME" - envAppInsightsAuth = "APPLICATIONINSIGHTS_AUTH" - metricNameAvgFlushRate = "ContainerLogAvgRecordsFlushedPerSec" - defaultTelemetryPushInterval = 300 + clusterTypeACS = "ACS" + clusterTypeAKS = "AKS" + controllerTypeDaemonSet = "DaemonSet" + controllerTypeReplicaSet = "ReplicaSet" + envAKSResourceID = "AKS_RESOURCE_ID" + envACSResourceName = "ACS_RESOURCE_NAME" + envAppInsightsAuth = "APPLICATIONINSIGHTS_AUTH" + metricNameAvgFlushRate = "ContainerLogAvgRecordsFlushedPerSec" + defaultTelemetryPushIntervalSeconds = 300 // EventNameContainerLogInit name of the event EventNameContainerLogInit = "ContainerLogPluginInitialized" @@ -45,8 +45,8 @@ func initialize(telemetryPushIntervalProperty string, agentVersion string) (int, telemetryPushInterval, err := strconv.Atoi(telemetryPushIntervalProperty) if err != nil { - Log("Error Converting telemetryPushIntervalProperty %s. Using Default Interval... %d \n", telemetryPushIntervalProperty, defaultTelemetryPushInterval) - telemetryPushInterval = defaultTelemetryPushInterval + Log("Error Converting telemetryPushIntervalProperty %s. Using Default Interval... %d \n", telemetryPushIntervalProperty, defaultTelemetryPushIntervalSeconds) + telemetryPushInterval = defaultTelemetryPushIntervalSeconds } ContainerLogTelemetryTicker = time.NewTicker(time.Second * time.Duration(telemetryPushInterval)) @@ -116,17 +116,13 @@ func SendContainerLogFlushRateMetric(telemetryPushIntervalProperty string, agent metric := appinsights.NewMetricTelemetry(metricNameAvgFlushRate, flushRate) Log("Flushed Records : %f Time Taken : %f flush Rate : %f", FlushedRecordsCount, FlushedRecordsTimeTaken, flushRate) TelemetryClient.Track(metric) + DataUpdateMutex.Lock() FlushedRecordsCount = 0.0 FlushedRecordsTimeTaken = 0.0 + DataUpdateMutex.Unlock() } } -// TelemetryShutdown stops the ticker that sends data to App Insights periodically -func TelemetryShutdown() { - Log("Shutting down ContainerLog Telemetry\n") - ContainerLogTelemetryTicker.Stop() -} - // SendEvent sends an event to App Insights func SendEvent(eventName string, dimensions map[string]string) { // this is because the TelemetryClient is initialized in a different goroutine. A simple wait loop here is just waiting for it to be initialized. This will happen only for the init event. Any subsequent Event should work just fine From 684c39b63581fab69595885ec2c98942098be4f6 Mon Sep 17 00:00:00 2001 From: r-dilip Date: Fri, 12 Oct 2018 15:44:25 -0700 Subject: [PATCH 22/25] PR feedback --- source/code/go/src/plugins/telemetry.go | 42 +++++++++---------------- 1 file changed, 15 insertions(+), 27 deletions(-) diff --git a/source/code/go/src/plugins/telemetry.go b/source/code/go/src/plugins/telemetry.go index 4396ea655..621d88eec 100644 --- a/source/code/go/src/plugins/telemetry.go +++ b/source/code/go/src/plugins/telemetry.go @@ -81,19 +81,21 @@ func initialize(telemetryPushIntervalProperty string, agentVersion string) (int, CommonProperties["ResourceGroupName"] = "" CommonProperties["ClusterName"] = "" CommonProperties["Region"] = "" + CommonProperties["AKS_RESOURCE_ID"] = "" } else { CommonProperties["ACSResourceName"] = "" + CommonProperties["AKS_RESOURCE_ID"] = aksResourceID splitStrings := strings.Split(aksResourceID, "/") - CommonProperties["SubscriptionID"] = splitStrings[2] - CommonProperties["ResourceGroupName"] = splitStrings[4] - CommonProperties["ClusterName"] = splitStrings[8] + if len(aksResourceID) > 0 && len(aksResourceID) < 10 { + CommonProperties["SubscriptionID"] = splitStrings[2] + CommonProperties["ResourceGroupName"] = splitStrings[4] + CommonProperties["ClusterName"] = splitStrings[8] + } CommonProperties["ClusterType"] = clusterTypeAKS region := os.Getenv("AKS_REGION") - if region != "" { - CommonProperties["Region"] = region - } + CommonProperties["Region"] = region } TelemetryClient.Context().CommonProperties = CommonProperties @@ -112,40 +114,26 @@ func SendContainerLogFlushRateMetric(telemetryPushIntervalProperty string, agent SendEvent(EventNameContainerLogInit, make(map[string]string)) for ; true; <-ContainerLogTelemetryTicker.C { + DataUpdateMutex.Lock() flushRate := FlushedRecordsCount / FlushedRecordsTimeTaken * 1000 - metric := appinsights.NewMetricTelemetry(metricNameAvgFlushRate, flushRate) Log("Flushed Records : %f Time Taken : %f flush Rate : %f", FlushedRecordsCount, FlushedRecordsTimeTaken, flushRate) - TelemetryClient.Track(metric) - DataUpdateMutex.Lock() FlushedRecordsCount = 0.0 FlushedRecordsTimeTaken = 0.0 DataUpdateMutex.Unlock() + metric := appinsights.NewMetricTelemetry(metricNameAvgFlushRate, flushRate) + TelemetryClient.Track(metric) } } // SendEvent sends an event to App Insights func SendEvent(eventName string, dimensions map[string]string) { - // this is because the TelemetryClient is initialized in a different goroutine. A simple wait loop here is just waiting for it to be initialized. This will happen only for the init event. Any subsequent Event should work just fine - for TelemetryClient == nil { - Log("Waiting for Telemetry Client to be initialized") - time.Sleep(1 * time.Second) - } - - // take a copy so the CommonProperties can be restored later - _commonProps := make(map[string]string) - for k, v := range TelemetryClient.Context().CommonProperties { - _commonProps[k] = v - } + Log("Sending Event : %s\n", eventName) + event := appinsights.NewEventTelemetry(eventName) - // add any extra dimensions + // add any extra Properties for k, v := range dimensions { - TelemetryClient.Context().CommonProperties[k] = v + event.Properties[k] = v } - Log("Sending Event : %s\n", eventName) - event := appinsights.NewEventTelemetry(eventName) TelemetryClient.Track(event) - - // restore original CommonProperties - TelemetryClient.Context().CommonProperties = _commonProps } From e165275bb8c346051cf851fb36dbb91ad7cf8afc Mon Sep 17 00:00:00 2001 From: Dilip Raghunathan Date: Mon, 15 Oct 2018 15:14:41 -0700 Subject: [PATCH 23/25] Sending an event every 5 mins(Heartbeat) (#146) --- installer/conf/td-agent-bit.conf | 2 -- source/code/go/src/plugins/telemetry.go | 7 ++++--- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/installer/conf/td-agent-bit.conf b/installer/conf/td-agent-bit.conf index b01b3a352..2553f405f 100644 --- a/installer/conf/td-agent-bit.conf +++ b/installer/conf/td-agent-bit.conf @@ -12,8 +12,6 @@ Parser docker Mem_Buf_Limit 30m Path_Key filepath - Buffer_Chunk_Size 1m - Buffer_Max_Size 1m Skip_Long_Lines On [OUTPUT] diff --git a/source/code/go/src/plugins/telemetry.go b/source/code/go/src/plugins/telemetry.go index 621d88eec..b1bc4439b 100644 --- a/source/code/go/src/plugins/telemetry.go +++ b/source/code/go/src/plugins/telemetry.go @@ -36,8 +36,8 @@ const ( metricNameAvgFlushRate = "ContainerLogAvgRecordsFlushedPerSec" defaultTelemetryPushIntervalSeconds = 300 - // EventNameContainerLogInit name of the event - EventNameContainerLogInit = "ContainerLogPluginInitialized" + eventNameContainerLogInit = "ContainerLogPluginInitialized" + eventNameDaemonSetHeartbeat = "ContainerLogDaemonSetHeartbeatEvent" ) // Initialize initializes the telemetry artifacts @@ -111,9 +111,10 @@ func SendContainerLogFlushRateMetric(telemetryPushIntervalProperty string, agent runtime.Goexit() } - SendEvent(EventNameContainerLogInit, make(map[string]string)) + SendEvent(eventNameContainerLogInit, make(map[string]string)) for ; true; <-ContainerLogTelemetryTicker.C { + SendEvent(eventNameDaemonSetHeartbeat, make(map[string]string)) DataUpdateMutex.Lock() flushRate := FlushedRecordsCount / FlushedRecordsTimeTaken * 1000 Log("Flushed Records : %f Time Taken : %f flush Rate : %f", FlushedRecordsCount, FlushedRecordsTimeTaken, flushRate) From cfe1ca94c259c533a938834a54f1279e703d7e4b Mon Sep 17 00:00:00 2001 From: Vishwanath Date: Tue, 16 Oct 2018 13:03:30 -0700 Subject: [PATCH 24/25] PR feedback to cleanup removed workflows --- installer/conf/container.conf | 28 +--------------------------- 1 file changed, 1 insertion(+), 27 deletions(-) diff --git a/installer/conf/container.conf b/installer/conf/container.conf index 1916300cb..17317871c 100755 --- a/installer/conf/container.conf +++ b/installer/conf/container.conf @@ -46,7 +46,7 @@ # Filter for correct format to endpoint - + type filter_container @@ -63,19 +63,6 @@ max_retry_wait 9m - - type out_oms_api - log_level debug - buffer_chunk_limit 20m - buffer_type file - buffer_path %STATE_DIR_WS%/out_oms_containerprocess*.buffer - buffer_queue_limit 20 - flush_interval 20s - retry_limit 10 - retry_wait 15s - max_retry_wait 9m - - type out_oms log_level debug @@ -102,19 +89,6 @@ max_retry_wait 9m - - type out_oms - log_level debug - buffer_chunk_limit 20m - buffer_type file - buffer_path %STATE_DIR_WS%/out_oms_servicelog*.buffer - buffer_queue_limit 20 - flush_interval 20s - retry_limit 10 - retry_wait 15s - max_retry_wait 9m - - type out_oms log_level debug From 892b51c6b166cf10424bf5b6768633f44aa4cfa7 Mon Sep 17 00:00:00 2001 From: Vishwanath Date: Tue, 16 Oct 2018 13:04:55 -0700 Subject: [PATCH 25/25] updating agent version for telemetry --- installer/conf/td-agent-bit.conf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/installer/conf/td-agent-bit.conf b/installer/conf/td-agent-bit.conf index 2553f405f..667f2edc2 100644 --- a/installer/conf/td-agent-bit.conf +++ b/installer/conf/td-agent-bit.conf @@ -19,4 +19,4 @@ EnableTelemetry true TelemetryPushIntervalSeconds 300 Match oms.container.log.* - AgentVersion internaltest1004-2 \ No newline at end of file + AgentVersion ciprod10162018