Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
38 changes: 38 additions & 0 deletions changelog/fragments/ansible-helm-cobra.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
entries:
- description: >
The Ansible and Helm operators now use controller-runtime's zap package
to define logging flags.
kind: change
breaking: true
migration:
header: Use new logging flags when running the Ansible and Helm operators
body: |
The Ansible and Helm operators now use controller-runtime's zap package
to define logging flags.

The `--zap-sample` and `--zap-time-encoding` flag have been removed since
they are not present in controller-runtime's flagset. These flags are no
longer supported.

The `--zap-level` flag is called `--zap-log-level` now. Rename any usage of
`--zap-level` to `--zap-log-level`

- description: >
The Ansible and Helm operators have a `version` subcommand that prints the
version information for the `ansible-operator` and `helm-operator` binaries.
kind: addition

- description: >
The Ansible and Helm operators now use a `run` subcommand to run the operator
kind: change
breaking: true
migration:
header: Core Ansible and Helm operator logic moved to `run` subcommand
body: |
If you are using the `ansible-operator` and `helm-operator` binaries
directly, update your usage to call `ansible-operator run` and
`helm-operator run` (e.g. in your Makefile's `make run` target).

If you are using the base image and you are not overriding the operator
entrypoint, no change is necessary because the base image has been updated
to call the `run` subcommand by default.
233 changes: 10 additions & 223 deletions cmd/ansible-operator/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,236 +15,23 @@
package main

import (
"fmt"
"os"
"runtime"
"strconv"
"strings"
"log"

"github.com/spf13/pflag"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/rest"
"sigs.k8s.io/controller-runtime/pkg/cache"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/config"
"sigs.k8s.io/controller-runtime/pkg/healthz"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/manager/signals"
"github.com/spf13/cobra"

"github.com/operator-framework/operator-sdk/internal/ansible/controller"
"github.com/operator-framework/operator-sdk/internal/ansible/flags"
"github.com/operator-framework/operator-sdk/internal/ansible/proxy"
"github.com/operator-framework/operator-sdk/internal/ansible/proxy/controllermap"
"github.com/operator-framework/operator-sdk/internal/ansible/runner"
"github.com/operator-framework/operator-sdk/internal/ansible/watches"
"github.com/operator-framework/operator-sdk/internal/log/zap"
"github.com/operator-framework/operator-sdk/internal/util/k8sutil"
sdkVersion "github.com/operator-framework/operator-sdk/version"
"github.com/operator-framework/operator-sdk/internal/cmd/ansible-operator/run"
"github.com/operator-framework/operator-sdk/internal/cmd/ansible-operator/version"
)

var (
metricsHost = "0.0.0.0"
log = logf.Log.WithName("cmd")
healthProbePort int32 = 6789
)

func printVersion() {
log.Info("Version",
"Go Version", runtime.Version(),
"GOOS", runtime.GOOS,
"GOARCH", runtime.GOARCH,
"ansible-operator", sdkVersion.Version)
}

func main() {
f := &flags.Flags{}
f.AddTo(pflag.CommandLine)
pflag.Parse()
logf.SetLogger(zap.Logger())

printVersion()

cfg, err := config.GetConfig()
if err != nil {
log.Error(err, "Failed to get config.")
os.Exit(1)
}

// Deprecated: OPERATOR_NAME environment variable is an artifact of the
// legacy operator-sdk project scaffolding. Flag `--leader-election-id`
// should be used instead.
if operatorName, found := os.LookupEnv("OPERATOR_NAME"); found {
log.Info("Environment variable OPERATOR_NAME has been deprecated, use --leader-election-id instead.")
if pflag.CommandLine.Lookup("leader-election-id").Changed {
log.Info("Ignoring OPERATOR_NAME environment variable since --leader-election-id is set")
} else {
f.LeaderElectionID = operatorName
}
}

// Set default manager options
// TODO: probably should expose the host & port as an environment variables
options := manager.Options{
HealthProbeBindAddress: fmt.Sprintf("%s:%d", metricsHost, healthProbePort),
MetricsBindAddress: f.MetricsAddress,
LeaderElection: f.EnableLeaderElection,
LeaderElectionID: f.LeaderElectionID,
LeaderElectionNamespace: f.LeaderElectionNamespace,
NewClient: func(cache cache.Cache, config *rest.Config, options client.Options) (client.Client, error) {
c, err := client.New(config, options)
if err != nil {
return nil, err
}
return &client.DelegatingClient{
Reader: cache,
Writer: c,
StatusClient: c,
}, nil
},
}

namespace, found := os.LookupEnv(k8sutil.WatchNamespaceEnvVar)
log = log.WithValues("Namespace", namespace)
if found {
if namespace == metav1.NamespaceAll {
log.Info("Watching all namespaces.")
options.Namespace = metav1.NamespaceAll
} else {
if strings.Contains(namespace, ",") {
log.Info("Watching multiple namespaces.")
options.NewCache = cache.MultiNamespacedCacheBuilder(strings.Split(namespace, ","))
} else {
log.Info("Watching single namespace.")
options.Namespace = namespace
}
}
} else {
log.Info(fmt.Sprintf("%v environment variable not set. Watching all namespaces.",
k8sutil.WatchNamespaceEnvVar))
options.Namespace = metav1.NamespaceAll
}

err = setAnsibleEnvVars(f)
if err != nil {
log.Error(err, "Failed to set environment variable.")
os.Exit(1)
}

// Create a new manager to provide shared dependencies and start components
mgr, err := manager.New(cfg, options)
if err != nil {
log.Error(err, "Failed to create a new manager.")
os.Exit(1)
}

cMap := controllermap.NewControllerMap()
watches, err := watches.Load(f.WatchesFile, f.MaxConcurrentReconciles, f.AnsibleVerbosity)
if err != nil {
log.Error(err, "Failed to load watches.")
os.Exit(1)
}
for _, w := range watches {
runner, err := runner.New(w)
if err != nil {
log.Error(err, "Failed to create runner")
os.Exit(1)
}

ctr := controller.Add(mgr, controller.Options{
GVK: w.GroupVersionKind,
Runner: runner,
ManageStatus: w.ManageStatus,
AnsibleDebugLogs: getAnsibleDebugLog(),
MaxConcurrentReconciles: w.MaxConcurrentReconciles,
ReconcilePeriod: w.ReconcilePeriod,
Selector: w.Selector,
})
if ctr == nil {
log.Error(fmt.Errorf("failed to add controller for GVK %v", w.GroupVersionKind.String()), "")
os.Exit(1)
}

cMap.Store(w.GroupVersionKind, &controllermap.Contents{Controller: *ctr,
WatchDependentResources: w.WatchDependentResources,
WatchClusterScopedResources: w.WatchClusterScopedResources,
OwnerWatchMap: controllermap.NewWatchMap(),
AnnotationWatchMap: controllermap.NewWatchMap(),
}, w.Blacklist)
}

err = mgr.AddHealthzCheck("ping", healthz.Ping)
if err != nil {
log.Error(err, "Failed to add Healthz check.")
}

done := make(chan error)

// start the proxy
err = proxy.Run(done, proxy.Options{
Address: "localhost",
Port: 8888,
KubeConfig: mgr.GetConfig(),
Cache: mgr.GetCache(),
RESTMapper: mgr.GetRESTMapper(),
ControllerMap: cMap,
OwnerInjection: f.InjectOwnerRef,
WatchedNamespaces: []string{namespace},
})
if err != nil {
log.Error(err, "Error starting proxy.")
os.Exit(1)
root := cobra.Command{
Use: "ansible-operator",
}

// start the operator
go func() {
done <- mgr.Start(signals.SetupSignalHandler())
}()

// wait for either to finish
err = <-done
if err != nil {
log.Error(err, "Proxy or operator exited with error.")
os.Exit(1)
}
log.Info("Exiting.")
}

// getAnsibleDebugLog return the value from the ANSIBLE_DEBUG_LOGS it order to
// print the full Ansible logs
func getAnsibleDebugLog() bool {
const envVar = "ANSIBLE_DEBUG_LOGS"
val := false
if envVal, ok := os.LookupEnv(envVar); ok {
if i, err := strconv.ParseBool(envVal); err != nil {
log.Info("Could not parse environment variable as an boolean; using default value",
"envVar", envVar, "default", val)
} else {
val = i
}
} else if !ok {
log.Info("Environment variable not set; using default value", "envVar", envVar,
envVar, val)
}
return val
}

// setAnsibleEnvVars will set environment variables based on CLI flags
func setAnsibleEnvVars(f *flags.Flags) error {
if len(f.AnsibleRolesPath) > 0 {
if err := os.Setenv(flags.AnsibleRolesPathEnvVar, f.AnsibleRolesPath); err != nil {
return fmt.Errorf("failed to set environment variable %s: %v", flags.AnsibleRolesPathEnvVar, err)
}
log.Info("Set the environment variable", "envVar", flags.AnsibleRolesPathEnvVar,
"value", f.AnsibleRolesPath)
}
root.AddCommand(run.NewCmd())
root.AddCommand(version.NewCmd())

if len(f.AnsibleCollectionsPath) > 0 {
if err := os.Setenv(flags.AnsibleCollectionsPathEnvVar, f.AnsibleCollectionsPath); err != nil {
return fmt.Errorf("failed to set environment variable %s: %v", flags.AnsibleCollectionsPathEnvVar, err)
}
log.Info("Set the environment variable", "envVar", flags.AnsibleCollectionsPathEnvVar,
"value", f.AnsibleCollectionsPath)
if err := root.Execute(); err != nil {
log.Fatal(err)
}
return nil
}
Loading