Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
68 changes: 36 additions & 32 deletions api/v1alpha1/cluster/cluster_config.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,22 +2,10 @@ package cluster

// ClusterConfig represents the cluster configuration
type ClusterConfig struct {
Enabled *bool `yaml:"enabled"`
Driver *string `yaml:"driver"`
ControlPlanes struct {
Count *int `yaml:"count,omitempty"`
CPU *int `yaml:"cpu,omitempty"`
Memory *int `yaml:"memory,omitempty"`
Nodes map[string]NodeConfig `yaml:"nodes,omitempty"`
HostPorts []string `yaml:"hostports,omitempty"`
} `yaml:"controlplanes,omitempty"`
Workers struct {
Count *int `yaml:"count,omitempty"`
CPU *int `yaml:"cpu,omitempty"`
Memory *int `yaml:"memory,omitempty"`
Nodes map[string]NodeConfig `yaml:"nodes,omitempty"`
HostPorts []string `yaml:"hostports,omitempty"`
} `yaml:"workers,omitempty"`
Enabled *bool `yaml:"enabled"`
Driver *string `yaml:"driver"`
ControlPlanes NodeGroupConfig `yaml:"controlplanes,omitempty"`
Workers NodeGroupConfig `yaml:"workers,omitempty"`
}

// NodeConfig represents the node configuration
Expand All @@ -28,6 +16,16 @@ type NodeConfig struct {
HostPorts []string `yaml:"hostports,omitempty"`
}

// NodeGroupConfig represents the configuration for a group of nodes
type NodeGroupConfig struct {
Count *int `yaml:"count,omitempty"`
CPU *int `yaml:"cpu,omitempty"`
Memory *int `yaml:"memory,omitempty"`
Nodes map[string]NodeConfig `yaml:"nodes,omitempty"`
HostPorts []string `yaml:"hostports,omitempty"`
Volumes []string `yaml:"volumes,omitempty"`
}

// Merge performs a deep merge of the current ClusterConfig with another ClusterConfig.
func (base *ClusterConfig) Merge(overlay *ClusterConfig) {
if overlay.Enabled != nil {
Expand All @@ -51,6 +49,14 @@ func (base *ClusterConfig) Merge(overlay *ClusterConfig) {
base.ControlPlanes.Nodes[key] = node
}
}
if overlay.ControlPlanes.HostPorts != nil {
base.ControlPlanes.HostPorts = make([]string, len(overlay.ControlPlanes.HostPorts))
copy(base.ControlPlanes.HostPorts, overlay.ControlPlanes.HostPorts)
}
if overlay.ControlPlanes.Volumes != nil {
base.ControlPlanes.Volumes = make([]string, len(overlay.ControlPlanes.Volumes))
copy(base.ControlPlanes.Volumes, overlay.ControlPlanes.Volumes)
}
if overlay.Workers.Count != nil {
base.Workers.Count = overlay.Workers.Count
}
Expand All @@ -70,6 +76,10 @@ func (base *ClusterConfig) Merge(overlay *ClusterConfig) {
base.Workers.HostPorts = make([]string, len(overlay.Workers.HostPorts))
copy(base.Workers.HostPorts, overlay.Workers.HostPorts)
}
if overlay.Workers.Volumes != nil {
base.Workers.Volumes = make([]string, len(overlay.Workers.Volumes))
copy(base.Workers.Volumes, overlay.Workers.Volumes)
}
}

// Copy creates a deep copy of the ClusterConfig object
Expand All @@ -84,52 +94,46 @@ func (c *ClusterConfig) Copy() *ClusterConfig {
Hostname: node.Hostname,
Node: node.Node,
Endpoint: node.Endpoint,
HostPorts: append([]string{}, node.HostPorts...), // Copy HostPorts for each node
HostPorts: append([]string{}, node.HostPorts...),
}
}
controlPlanesHostPortsCopy := make([]string, len(c.ControlPlanes.HostPorts))
copy(controlPlanesHostPortsCopy, c.ControlPlanes.HostPorts)
controlPlanesVolumesCopy := make([]string, len(c.ControlPlanes.Volumes))
copy(controlPlanesVolumesCopy, c.ControlPlanes.Volumes)

workersNodesCopy := make(map[string]NodeConfig, len(c.Workers.Nodes))
for key, node := range c.Workers.Nodes {
workersNodesCopy[key] = NodeConfig{
Hostname: node.Hostname,
Node: node.Node,
Endpoint: node.Endpoint,
HostPorts: append([]string{}, node.HostPorts...), // Copy HostPorts for each node
HostPorts: append([]string{}, node.HostPorts...),
}
}
workersHostPortsCopy := make([]string, len(c.Workers.HostPorts))
copy(workersHostPortsCopy, c.Workers.HostPorts)
workersVolumesCopy := make([]string, len(c.Workers.Volumes))
copy(workersVolumesCopy, c.Workers.Volumes)

return &ClusterConfig{
Enabled: c.Enabled,
Driver: c.Driver,
ControlPlanes: struct {
Count *int `yaml:"count,omitempty"`
CPU *int `yaml:"cpu,omitempty"`
Memory *int `yaml:"memory,omitempty"`
Nodes map[string]NodeConfig `yaml:"nodes,omitempty"`
HostPorts []string `yaml:"hostports,omitempty"`
}{
ControlPlanes: NodeGroupConfig{
Count: c.ControlPlanes.Count,
CPU: c.ControlPlanes.CPU,
Memory: c.ControlPlanes.Memory,
Nodes: controlPlanesNodesCopy,
HostPorts: controlPlanesHostPortsCopy,
Volumes: controlPlanesVolumesCopy,
},
Workers: struct {
Count *int `yaml:"count,omitempty"`
CPU *int `yaml:"cpu,omitempty"`
Memory *int `yaml:"memory,omitempty"`
Nodes map[string]NodeConfig `yaml:"nodes,omitempty"`
HostPorts []string `yaml:"hostports,omitempty"`
}{
Workers: NodeGroupConfig{
Count: c.Workers.Count,
CPU: c.Workers.CPU,
Memory: c.Workers.Memory,
Nodes: workersNodesCopy,
HostPorts: workersHostPortsCopy,
Volumes: workersVolumesCopy,
},
}
}
62 changes: 54 additions & 8 deletions api/v1alpha1/cluster/cluster_config_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,28 +28,37 @@ func TestClusterConfig_Merge(t *testing.T) {
Memory *int `yaml:"memory,omitempty"`
Nodes map[string]NodeConfig `yaml:"nodes,omitempty"`
HostPorts []string `yaml:"hostports,omitempty"`
Volumes []string `yaml:"volumes,omitempty"`
}{
Count: ptrInt(3),
CPU: ptrInt(4),
Memory: ptrInt(8192),
Nodes: map[string]NodeConfig{
"node1": {Hostname: ptrString("base-node1")},
"node1": {
Hostname: ptrString("base-node1"),
},
},
HostPorts: []string{"1000:1000/tcp", "2000:2000/tcp"},
Volumes: []string{"${WINDSOR_PROJECT_ROOT}/base/volume1:/var/local/base1"},
},
Workers: struct {
Count *int `yaml:"count,omitempty"`
CPU *int `yaml:"cpu,omitempty"`
Memory *int `yaml:"memory,omitempty"`
Nodes map[string]NodeConfig `yaml:"nodes,omitempty"`
HostPorts []string `yaml:"hostports,omitempty"`
Volumes []string `yaml:"volumes,omitempty"`
}{
Count: ptrInt(5),
CPU: ptrInt(2),
Memory: ptrInt(4096),
Nodes: map[string]NodeConfig{
"worker1": {Hostname: ptrString("base-worker1")},
"worker1": {
Hostname: ptrString("base-worker1"),
},
},
HostPorts: []string{"8080", "9090"},
Volumes: []string{"${WINDSOR_PROJECT_ROOT}/base/worker/volume1:/var/local/worker1"},
},
}

Expand All @@ -62,28 +71,37 @@ func TestClusterConfig_Merge(t *testing.T) {
Memory *int `yaml:"memory,omitempty"`
Nodes map[string]NodeConfig `yaml:"nodes,omitempty"`
HostPorts []string `yaml:"hostports,omitempty"`
Volumes []string `yaml:"volumes,omitempty"`
}{
Count: ptrInt(1),
CPU: ptrInt(2),
Memory: ptrInt(4096),
Nodes: map[string]NodeConfig{
"node2": {Hostname: ptrString("overlay-node2")},
"node2": {
Hostname: ptrString("overlay-node2"),
},
},
HostPorts: []string{"3000:3000/tcp", "4000:4000/tcp"},
Volumes: []string{"${WINDSOR_PROJECT_ROOT}/overlay/volume2:/var/local/overlay2"},
},
Workers: struct {
Count *int `yaml:"count,omitempty"`
CPU *int `yaml:"cpu,omitempty"`
Memory *int `yaml:"memory,omitempty"`
Nodes map[string]NodeConfig `yaml:"nodes,omitempty"`
HostPorts []string `yaml:"hostports,omitempty"`
Volumes []string `yaml:"volumes,omitempty"`
}{
Count: ptrInt(3),
CPU: ptrInt(1),
Memory: ptrInt(2048),
Nodes: map[string]NodeConfig{
"worker2": {Hostname: ptrString("overlay-worker2")},
"worker2": {
Hostname: ptrString("overlay-worker2"),
},
},
HostPorts: []string{"8082", "9092"},
Volumes: []string{"${WINDSOR_PROJECT_ROOT}/overlay/worker/volume2:/var/local/worker2"},
},
}

Expand All @@ -95,8 +113,11 @@ func TestClusterConfig_Merge(t *testing.T) {
if base.Driver == nil || *base.Driver != "overlay-driver" {
t.Errorf("Driver mismatch: expected 'overlay-driver', got '%s'", *base.Driver)
}
if len(base.ControlPlanes.HostPorts) != 2 || base.ControlPlanes.HostPorts[0] != "3000:3000/tcp" || base.ControlPlanes.HostPorts[1] != "4000:4000/tcp" {
t.Errorf("ControlPlanes HostPorts mismatch: expected ['3000:3000/tcp', '4000:4000/tcp'], got %v", base.ControlPlanes.HostPorts)
}
if len(base.Workers.HostPorts) != 2 || base.Workers.HostPorts[0] != "8082" || base.Workers.HostPorts[1] != "9092" {
t.Errorf("HostPorts mismatch: expected ['8082', '9092'], got %v", base.Workers.HostPorts)
t.Errorf("Workers HostPorts mismatch: expected ['8082', '9092'], got %v", base.Workers.HostPorts)
}
if base.ControlPlanes.Count == nil || *base.ControlPlanes.Count != 1 {
t.Errorf("ControlPlanes Count mismatch: expected 1, got %v", *base.ControlPlanes.Count)
Expand All @@ -122,6 +143,9 @@ func TestClusterConfig_Merge(t *testing.T) {
if len(base.Workers.Nodes) != 1 || base.Workers.Nodes["worker2"].Hostname == nil || *base.Workers.Nodes["worker2"].Hostname != "overlay-worker2" {
t.Errorf("Workers Nodes mismatch: expected 'overlay-worker2', got %v", base.Workers.Nodes)
}
if len(base.Workers.Volumes) != 1 || base.Workers.Volumes[0] != "${WINDSOR_PROJECT_ROOT}/overlay/worker/volume2:/var/local/worker2" {
t.Errorf("Workers Volumes mismatch: expected ['${WINDSOR_PROJECT_ROOT}/overlay/worker/volume2:/var/local/worker2'], got %v", base.Workers.Volumes)
}
})

t.Run("MergeWithAllNils", func(t *testing.T) {
Expand All @@ -134,25 +158,29 @@ func TestClusterConfig_Merge(t *testing.T) {
Memory *int `yaml:"memory,omitempty"`
Nodes map[string]NodeConfig `yaml:"nodes,omitempty"`
HostPorts []string `yaml:"hostports,omitempty"`
Volumes []string `yaml:"volumes,omitempty"`
}{
Count: nil,
CPU: nil,
Memory: nil,
Nodes: nil,
HostPorts: nil,
Volumes: nil,
},
Workers: struct {
Count *int `yaml:"count,omitempty"`
CPU *int `yaml:"cpu,omitempty"`
Memory *int `yaml:"memory,omitempty"`
Nodes map[string]NodeConfig `yaml:"nodes,omitempty"`
HostPorts []string `yaml:"hostports,omitempty"`
Volumes []string `yaml:"volumes,omitempty"`
}{
Count: nil,
CPU: nil,
Memory: nil,
Nodes: nil,
HostPorts: nil,
Volumes: nil,
},
}

Expand All @@ -165,25 +193,29 @@ func TestClusterConfig_Merge(t *testing.T) {
Memory *int `yaml:"memory,omitempty"`
Nodes map[string]NodeConfig `yaml:"nodes,omitempty"`
HostPorts []string `yaml:"hostports,omitempty"`
Volumes []string `yaml:"volumes,omitempty"`
}{
Count: nil,
CPU: nil,
Memory: nil,
Nodes: nil,
HostPorts: nil,
Volumes: nil,
},
Workers: struct {
Count *int `yaml:"count,omitempty"`
CPU *int `yaml:"cpu,omitempty"`
Memory *int `yaml:"memory,omitempty"`
Nodes map[string]NodeConfig `yaml:"nodes,omitempty"`
HostPorts []string `yaml:"hostports,omitempty"`
Volumes []string `yaml:"volumes,omitempty"`
}{
Count: nil,
CPU: nil,
Memory: nil,
Nodes: nil,
HostPorts: nil,
Volumes: nil,
},
}

Expand All @@ -196,7 +228,10 @@ func TestClusterConfig_Merge(t *testing.T) {
t.Errorf("Driver mismatch: expected nil, got '%s'", *base.Driver)
}
if base.Workers.HostPorts != nil {
t.Errorf("HostPorts mismatch: expected nil, got %v", base.Workers.HostPorts)
t.Errorf("Workers HostPorts mismatch: expected nil, got %v", base.Workers.HostPorts)
}
if base.ControlPlanes.HostPorts != nil {
t.Errorf("ControlPlanes HostPorts mismatch: expected nil, got %v", base.ControlPlanes.HostPorts)
}
if base.ControlPlanes.Count != nil {
t.Errorf("ControlPlanes Count mismatch: expected nil, got %v", *base.ControlPlanes.Count)
Expand All @@ -222,6 +257,9 @@ func TestClusterConfig_Merge(t *testing.T) {
if base.Workers.Nodes != nil {
t.Errorf("Workers Nodes mismatch: expected nil, got %v", base.Workers.Nodes)
}
if base.Workers.Volumes != nil {
t.Errorf("Workers Volumes mismatch: expected nil, got %v", base.Workers.Volumes)
}
})
}

Expand All @@ -236,29 +274,37 @@ func TestClusterConfig_Copy(t *testing.T) {
Memory *int `yaml:"memory,omitempty"`
Nodes map[string]NodeConfig `yaml:"nodes,omitempty"`
HostPorts []string `yaml:"hostports,omitempty"`
Volumes []string `yaml:"volumes,omitempty"`
}{
Count: ptrInt(3),
CPU: ptrInt(4),
Memory: ptrInt(8192),
Nodes: map[string]NodeConfig{
"node1": {Hostname: ptrString("original-node1")},
"node1": {
Hostname: ptrString("original-node1"),
},
},
HostPorts: []string{"1000:1000/tcp", "2000:2000/tcp"},
Volumes: []string{"${WINDSOR_PROJECT_ROOT}/original/volume1:/var/local/original1"},
},
Workers: struct {
Count *int `yaml:"count,omitempty"`
CPU *int `yaml:"cpu,omitempty"`
Memory *int `yaml:"memory,omitempty"`
Nodes map[string]NodeConfig `yaml:"nodes,omitempty"`
HostPorts []string `yaml:"hostports,omitempty"`
Volumes []string `yaml:"volumes,omitempty"`
}{
Count: ptrInt(5),
CPU: ptrInt(2),
Memory: ptrInt(4096),
Nodes: map[string]NodeConfig{
"worker1": {Hostname: ptrString("original-worker1")},
"worker1": {
Hostname: ptrString("original-worker1"),
},
},
HostPorts: []string{"3000:3000/tcp", "4000:4000/tcp"},
Volumes: []string{"${WINDSOR_PROJECT_ROOT}/original/worker/volume1:/var/local/worker1"},
},
}

Expand Down
16 changes: 15 additions & 1 deletion cmd/down.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ package cmd
import (
"fmt"
"os"
"path/filepath"

"github.com/spf13/cobra"
ctrl "github.com/windsorcli/cli/pkg/controller"
Expand Down Expand Up @@ -36,6 +37,9 @@ var downCmd = &cobra.Command{
return fmt.Errorf("No config handler found")
}

// Resolve the shell
shell := controller.ResolveShell()

// Determine if the container runtime is enabled
containerRuntimeEnabled := configHandler.GetBool("docker.enabled")

Expand All @@ -55,9 +59,19 @@ var downCmd = &cobra.Command{

// Clean up context specific artifacts if --clean flag is set
if cleanFlag {
if err := controller.ResolveConfigHandler().Clean(); err != nil {
if err := configHandler.Clean(); err != nil {
return fmt.Errorf("Error cleaning up context specific artifacts: %w", err)
}

// Delete everything in the .volumes folder
projectRoot, err := shell.GetProjectRoot()
if err != nil {
return fmt.Errorf("Error retrieving project root: %w", err)
}
volumesPath := filepath.Join(projectRoot, ".volumes")
if err := osRemoveAll(volumesPath); err != nil {
return fmt.Errorf("Error deleting .volumes folder: %w", err)
}
}

// Print success message
Expand Down
Loading
Loading