diff --git a/api/v1alpha1/cluster/cluster_config.go b/api/v1alpha1/cluster/cluster_config.go index af9a12df7..d595d2ec4 100644 --- a/api/v1alpha1/cluster/cluster_config.go +++ b/api/v1alpha1/cluster/cluster_config.go @@ -6,6 +6,7 @@ type ClusterConfig struct { Platform *string `yaml:"platform,omitempty"` Driver *string `yaml:"driver,omitempty"` Endpoint *string `yaml:"endpoint,omitempty"` + Image *string `yaml:"image,omitempty"` ControlPlanes NodeGroupConfig `yaml:"controlplanes,omitempty"` Workers NodeGroupConfig `yaml:"workers,omitempty"` } @@ -15,6 +16,7 @@ type NodeConfig struct { Hostname *string `yaml:"hostname,omitempty"` Node *string `yaml:"node,omitempty"` Endpoint *string `yaml:"endpoint,omitempty"` + Image *string `yaml:"image,omitempty"` HostPorts []string `yaml:"hostports,omitempty"` } @@ -23,6 +25,7 @@ type NodeGroupConfig struct { Count *int `yaml:"count,omitempty"` CPU *int `yaml:"cpu,omitempty"` Memory *int `yaml:"memory,omitempty"` + Image *string `yaml:"image,omitempty"` Nodes map[string]NodeConfig `yaml:"nodes,omitempty"` HostPorts []string `yaml:"hostports,omitempty"` Volumes []string `yaml:"volumes,omitempty"` @@ -42,6 +45,9 @@ func (base *ClusterConfig) Merge(overlay *ClusterConfig) { if overlay.Endpoint != nil { base.Endpoint = overlay.Endpoint } + if overlay.Image != nil { + base.Image = overlay.Image + } if overlay.ControlPlanes.Count != nil { base.ControlPlanes.Count = overlay.ControlPlanes.Count } @@ -51,6 +57,9 @@ func (base *ClusterConfig) Merge(overlay *ClusterConfig) { if overlay.ControlPlanes.Memory != nil { base.ControlPlanes.Memory = overlay.ControlPlanes.Memory } + if overlay.ControlPlanes.Image != nil { + base.ControlPlanes.Image = overlay.ControlPlanes.Image + } if overlay.ControlPlanes.Nodes != nil { base.ControlPlanes.Nodes = make(map[string]NodeConfig, len(overlay.ControlPlanes.Nodes)) for key, node := range overlay.ControlPlanes.Nodes { @@ -74,6 +83,9 @@ func (base *ClusterConfig) Merge(overlay *ClusterConfig) { if overlay.Workers.Memory != nil { base.Workers.Memory = overlay.Workers.Memory } + if overlay.Workers.Image != nil { + base.Workers.Image = overlay.Workers.Image + } if overlay.Workers.Nodes != nil { base.Workers.Nodes = make(map[string]NodeConfig, len(overlay.Workers.Nodes)) for key, node := range overlay.Workers.Nodes { @@ -102,6 +114,7 @@ func (c *ClusterConfig) Copy() *ClusterConfig { Hostname: node.Hostname, Node: node.Node, Endpoint: node.Endpoint, + Image: node.Image, HostPorts: append([]string{}, node.HostPorts...), } } @@ -116,6 +129,7 @@ func (c *ClusterConfig) Copy() *ClusterConfig { Hostname: node.Hostname, Node: node.Node, Endpoint: node.Endpoint, + Image: node.Image, HostPorts: append([]string{}, node.HostPorts...), } } @@ -129,10 +143,12 @@ func (c *ClusterConfig) Copy() *ClusterConfig { Platform: c.Platform, Driver: c.Driver, Endpoint: c.Endpoint, + Image: c.Image, ControlPlanes: NodeGroupConfig{ Count: c.ControlPlanes.Count, CPU: c.ControlPlanes.CPU, Memory: c.ControlPlanes.Memory, + Image: c.ControlPlanes.Image, Nodes: controlPlanesNodesCopy, HostPorts: controlPlanesHostPortsCopy, Volumes: controlPlanesVolumesCopy, @@ -141,6 +157,7 @@ func (c *ClusterConfig) Copy() *ClusterConfig { Count: c.Workers.Count, CPU: c.Workers.CPU, Memory: c.Workers.Memory, + Image: c.Workers.Image, Nodes: workersNodesCopy, HostPorts: workersHostPortsCopy, Volumes: workersVolumesCopy, diff --git a/api/v1alpha1/cluster/cluster_config_test.go b/api/v1alpha1/cluster/cluster_config_test.go index 9a5c843b2..c08bc0929 100644 --- a/api/v1alpha1/cluster/cluster_config_test.go +++ b/api/v1alpha1/cluster/cluster_config_test.go @@ -24,10 +24,12 @@ func TestClusterConfig_Merge(t *testing.T) { Driver: ptrString("base-driver"), Platform: ptrString("base-platform"), Endpoint: ptrString("base-endpoint"), + Image: ptrString("base-image"), ControlPlanes: struct { Count *int `yaml:"count,omitempty"` CPU *int `yaml:"cpu,omitempty"` Memory *int `yaml:"memory,omitempty"` + Image *string `yaml:"image,omitempty"` Nodes map[string]NodeConfig `yaml:"nodes,omitempty"` HostPorts []string `yaml:"hostports,omitempty"` Volumes []string `yaml:"volumes,omitempty"` @@ -35,9 +37,11 @@ func TestClusterConfig_Merge(t *testing.T) { Count: ptrInt(3), CPU: ptrInt(4), Memory: ptrInt(8192), + Image: ptrString("base-controlplane-image"), Nodes: map[string]NodeConfig{ "node1": { Hostname: ptrString("base-node1"), + Image: ptrString("base-node1-image"), }, }, HostPorts: []string{"1000:1000/tcp", "2000:2000/tcp"}, @@ -47,6 +51,7 @@ func TestClusterConfig_Merge(t *testing.T) { Count *int `yaml:"count,omitempty"` CPU *int `yaml:"cpu,omitempty"` Memory *int `yaml:"memory,omitempty"` + Image *string `yaml:"image,omitempty"` Nodes map[string]NodeConfig `yaml:"nodes,omitempty"` HostPorts []string `yaml:"hostports,omitempty"` Volumes []string `yaml:"volumes,omitempty"` @@ -54,9 +59,11 @@ func TestClusterConfig_Merge(t *testing.T) { Count: ptrInt(5), CPU: ptrInt(2), Memory: ptrInt(4096), + Image: ptrString("base-worker-image"), Nodes: map[string]NodeConfig{ "worker1": { Hostname: ptrString("base-worker1"), + Image: ptrString("base-worker1-image"), }, }, HostPorts: []string{"8080", "9090"}, @@ -69,10 +76,12 @@ func TestClusterConfig_Merge(t *testing.T) { Driver: ptrString("overlay-driver"), Platform: ptrString("overlay-platform"), Endpoint: ptrString("overlay-endpoint"), + Image: ptrString("overlay-image"), ControlPlanes: struct { Count *int `yaml:"count,omitempty"` CPU *int `yaml:"cpu,omitempty"` Memory *int `yaml:"memory,omitempty"` + Image *string `yaml:"image,omitempty"` Nodes map[string]NodeConfig `yaml:"nodes,omitempty"` HostPorts []string `yaml:"hostports,omitempty"` Volumes []string `yaml:"volumes,omitempty"` @@ -80,9 +89,11 @@ func TestClusterConfig_Merge(t *testing.T) { Count: ptrInt(1), CPU: ptrInt(2), Memory: ptrInt(4096), + Image: ptrString("overlay-controlplane-image"), Nodes: map[string]NodeConfig{ "node2": { Hostname: ptrString("overlay-node2"), + Image: ptrString("overlay-node2-image"), }, }, HostPorts: []string{"3000:3000/tcp", "4000:4000/tcp"}, @@ -92,6 +103,7 @@ func TestClusterConfig_Merge(t *testing.T) { Count *int `yaml:"count,omitempty"` CPU *int `yaml:"cpu,omitempty"` Memory *int `yaml:"memory,omitempty"` + Image *string `yaml:"image,omitempty"` Nodes map[string]NodeConfig `yaml:"nodes,omitempty"` HostPorts []string `yaml:"hostports,omitempty"` Volumes []string `yaml:"volumes,omitempty"` @@ -99,9 +111,11 @@ func TestClusterConfig_Merge(t *testing.T) { Count: ptrInt(3), CPU: ptrInt(1), Memory: ptrInt(2048), + Image: ptrString("overlay-worker-image"), Nodes: map[string]NodeConfig{ "worker2": { Hostname: ptrString("overlay-worker2"), + Image: ptrString("overlay-worker2-image"), }, }, HostPorts: []string{"8082", "9092"}, @@ -123,6 +137,9 @@ func TestClusterConfig_Merge(t *testing.T) { if base.Endpoint == nil || *base.Endpoint != "overlay-endpoint" { t.Errorf("Endpoint mismatch: expected 'overlay-endpoint', got '%s'", *base.Endpoint) } + if base.Image == nil || *base.Image != "overlay-image" { + t.Errorf("Image mismatch: expected 'overlay-image', got '%s'", *base.Image) + } if len(base.ControlPlanes.HostPorts) != 2 || base.ControlPlanes.HostPorts[0] != "3000:3000/tcp" || base.ControlPlanes.HostPorts[1] != "4000:4000/tcp" { t.Errorf("ControlPlanes HostPorts mismatch: expected ['3000:3000/tcp', '4000:4000/tcp'], got %v", base.ControlPlanes.HostPorts) } @@ -138,9 +155,15 @@ func TestClusterConfig_Merge(t *testing.T) { if base.ControlPlanes.Memory == nil || *base.ControlPlanes.Memory != 4096 { t.Errorf("ControlPlanes Memory mismatch: expected 4096, got %v", *base.ControlPlanes.Memory) } + if base.ControlPlanes.Image == nil || *base.ControlPlanes.Image != "overlay-controlplane-image" { + t.Errorf("ControlPlanes Image mismatch: expected 'overlay-controlplane-image', got '%s'", *base.ControlPlanes.Image) + } if len(base.ControlPlanes.Nodes) != 1 || base.ControlPlanes.Nodes["node2"].Hostname == nil || *base.ControlPlanes.Nodes["node2"].Hostname != "overlay-node2" { t.Errorf("ControlPlanes Nodes mismatch: expected 'overlay-node2', got %v", base.ControlPlanes.Nodes) } + if base.ControlPlanes.Nodes["node2"].Image == nil || *base.ControlPlanes.Nodes["node2"].Image != "overlay-node2-image" { + t.Errorf("ControlPlanes Nodes Image mismatch: expected 'overlay-node2-image', got '%s'", *base.ControlPlanes.Nodes["node2"].Image) + } if base.Workers.Count == nil || *base.Workers.Count != 3 { t.Errorf("Workers Count mismatch: expected 3, got %v", *base.Workers.Count) } @@ -150,9 +173,15 @@ func TestClusterConfig_Merge(t *testing.T) { if base.Workers.Memory == nil || *base.Workers.Memory != 2048 { t.Errorf("Workers Memory mismatch: expected 2048, got %v", *base.Workers.Memory) } + if base.Workers.Image == nil || *base.Workers.Image != "overlay-worker-image" { + t.Errorf("Workers Image mismatch: expected 'overlay-worker-image', got '%s'", *base.Workers.Image) + } if len(base.Workers.Nodes) != 1 || base.Workers.Nodes["worker2"].Hostname == nil || *base.Workers.Nodes["worker2"].Hostname != "overlay-worker2" { t.Errorf("Workers Nodes mismatch: expected 'overlay-worker2', got %v", base.Workers.Nodes) } + if base.Workers.Nodes["worker2"].Image == nil || *base.Workers.Nodes["worker2"].Image != "overlay-worker2-image" { + t.Errorf("Workers Nodes Image mismatch: expected 'overlay-worker2-image', got '%s'", *base.Workers.Nodes["worker2"].Image) + } if len(base.Workers.Volumes) != 1 || base.Workers.Volumes[0] != "${WINDSOR_PROJECT_ROOT}/overlay/worker/volume2:/var/local/worker2" { t.Errorf("Workers Volumes mismatch: expected ['${WINDSOR_PROJECT_ROOT}/overlay/worker/volume2:/var/local/worker2'], got %v", base.Workers.Volumes) } @@ -164,10 +193,12 @@ func TestClusterConfig_Merge(t *testing.T) { Driver: nil, Platform: nil, Endpoint: nil, + Image: nil, ControlPlanes: struct { Count *int `yaml:"count,omitempty"` CPU *int `yaml:"cpu,omitempty"` Memory *int `yaml:"memory,omitempty"` + Image *string `yaml:"image,omitempty"` Nodes map[string]NodeConfig `yaml:"nodes,omitempty"` HostPorts []string `yaml:"hostports,omitempty"` Volumes []string `yaml:"volumes,omitempty"` @@ -175,6 +206,7 @@ func TestClusterConfig_Merge(t *testing.T) { Count: nil, CPU: nil, Memory: nil, + Image: nil, Nodes: nil, HostPorts: nil, Volumes: nil, @@ -183,6 +215,7 @@ func TestClusterConfig_Merge(t *testing.T) { Count *int `yaml:"count,omitempty"` CPU *int `yaml:"cpu,omitempty"` Memory *int `yaml:"memory,omitempty"` + Image *string `yaml:"image,omitempty"` Nodes map[string]NodeConfig `yaml:"nodes,omitempty"` HostPorts []string `yaml:"hostports,omitempty"` Volumes []string `yaml:"volumes,omitempty"` @@ -190,6 +223,7 @@ func TestClusterConfig_Merge(t *testing.T) { Count: nil, CPU: nil, Memory: nil, + Image: nil, Nodes: nil, HostPorts: nil, Volumes: nil, @@ -201,10 +235,12 @@ func TestClusterConfig_Merge(t *testing.T) { Driver: nil, Platform: nil, Endpoint: nil, + Image: nil, ControlPlanes: struct { Count *int `yaml:"count,omitempty"` CPU *int `yaml:"cpu,omitempty"` Memory *int `yaml:"memory,omitempty"` + Image *string `yaml:"image,omitempty"` Nodes map[string]NodeConfig `yaml:"nodes,omitempty"` HostPorts []string `yaml:"hostports,omitempty"` Volumes []string `yaml:"volumes,omitempty"` @@ -212,6 +248,7 @@ func TestClusterConfig_Merge(t *testing.T) { Count: nil, CPU: nil, Memory: nil, + Image: nil, Nodes: nil, HostPorts: nil, Volumes: nil, @@ -220,6 +257,7 @@ func TestClusterConfig_Merge(t *testing.T) { Count *int `yaml:"count,omitempty"` CPU *int `yaml:"cpu,omitempty"` Memory *int `yaml:"memory,omitempty"` + Image *string `yaml:"image,omitempty"` Nodes map[string]NodeConfig `yaml:"nodes,omitempty"` HostPorts []string `yaml:"hostports,omitempty"` Volumes []string `yaml:"volumes,omitempty"` @@ -227,6 +265,7 @@ func TestClusterConfig_Merge(t *testing.T) { Count: nil, CPU: nil, Memory: nil, + Image: nil, Nodes: nil, HostPorts: nil, Volumes: nil, @@ -247,6 +286,9 @@ func TestClusterConfig_Merge(t *testing.T) { if base.Endpoint != nil { t.Errorf("Endpoint mismatch: expected nil, got '%s'", *base.Endpoint) } + if base.Image != nil { + t.Errorf("Image mismatch: expected nil, got '%s'", *base.Image) + } if base.Workers.HostPorts != nil { t.Errorf("Workers HostPorts mismatch: expected nil, got %v", base.Workers.HostPorts) } @@ -262,6 +304,9 @@ func TestClusterConfig_Merge(t *testing.T) { if base.ControlPlanes.Memory != nil { t.Errorf("ControlPlanes Memory mismatch: expected nil, got %v", *base.ControlPlanes.Memory) } + if base.ControlPlanes.Image != nil { + t.Errorf("ControlPlanes Image mismatch: expected nil, got '%s'", *base.ControlPlanes.Image) + } if base.ControlPlanes.Nodes != nil { t.Errorf("ControlPlanes Nodes mismatch: expected nil, got %v", base.ControlPlanes.Nodes) } @@ -274,6 +319,9 @@ func TestClusterConfig_Merge(t *testing.T) { if base.Workers.Memory != nil { t.Errorf("Workers Memory mismatch: expected nil, got %v", *base.Workers.Memory) } + if base.Workers.Image != nil { + t.Errorf("Workers Image mismatch: expected nil, got '%s'", *base.Workers.Image) + } if base.Workers.Nodes != nil { t.Errorf("Workers Nodes mismatch: expected nil, got %v", base.Workers.Nodes) } @@ -290,10 +338,12 @@ func TestClusterConfig_Copy(t *testing.T) { Driver: ptrString("original-driver"), Platform: ptrString("original-platform"), Endpoint: ptrString("original-endpoint"), + Image: ptrString("original-image"), ControlPlanes: struct { Count *int `yaml:"count,omitempty"` CPU *int `yaml:"cpu,omitempty"` Memory *int `yaml:"memory,omitempty"` + Image *string `yaml:"image,omitempty"` Nodes map[string]NodeConfig `yaml:"nodes,omitempty"` HostPorts []string `yaml:"hostports,omitempty"` Volumes []string `yaml:"volumes,omitempty"` @@ -301,9 +351,11 @@ func TestClusterConfig_Copy(t *testing.T) { Count: ptrInt(3), CPU: ptrInt(4), Memory: ptrInt(8192), + Image: ptrString("original-controlplane-image"), Nodes: map[string]NodeConfig{ "node1": { Hostname: ptrString("original-node1"), + Image: ptrString("original-node1-image"), }, }, HostPorts: []string{"1000:1000/tcp", "2000:2000/tcp"}, @@ -313,6 +365,7 @@ func TestClusterConfig_Copy(t *testing.T) { Count *int `yaml:"count,omitempty"` CPU *int `yaml:"cpu,omitempty"` Memory *int `yaml:"memory,omitempty"` + Image *string `yaml:"image,omitempty"` Nodes map[string]NodeConfig `yaml:"nodes,omitempty"` HostPorts []string `yaml:"hostports,omitempty"` Volumes []string `yaml:"volumes,omitempty"` @@ -320,9 +373,11 @@ func TestClusterConfig_Copy(t *testing.T) { Count: ptrInt(5), CPU: ptrInt(2), Memory: ptrInt(4096), + Image: ptrString("original-worker-image"), Nodes: map[string]NodeConfig{ "worker1": { Hostname: ptrString("original-worker1"), + Image: ptrString("original-worker1-image"), }, }, HostPorts: []string{"3000:3000/tcp", "4000:4000/tcp"}, @@ -344,6 +399,9 @@ func TestClusterConfig_Copy(t *testing.T) { if original.Endpoint == nil || copy.Endpoint == nil || *original.Endpoint != *copy.Endpoint { t.Errorf("Endpoint mismatch: expected %v, got %v", *original.Endpoint, *copy.Endpoint) } + if original.Image == nil || copy.Image == nil || *original.Image != *copy.Image { + t.Errorf("Image mismatch: expected %v, got %v", *original.Image, *copy.Image) + } if len(original.Workers.HostPorts) != len(copy.Workers.HostPorts) { t.Errorf("Workers HostPorts length mismatch: expected %d, got %d", len(original.Workers.HostPorts), len(copy.Workers.HostPorts)) } @@ -361,6 +419,9 @@ func TestClusterConfig_Copy(t *testing.T) { if original.Workers.Memory == nil || copy.Workers.Memory == nil || *original.Workers.Memory != *copy.Workers.Memory { t.Errorf("Workers Memory mismatch: expected %v, got %v", *original.Workers.Memory, *copy.Workers.Memory) } + if original.Workers.Image == nil || copy.Workers.Image == nil || *original.Workers.Image != *copy.Workers.Image { + t.Errorf("Workers Image mismatch: expected %v, got %v", *original.Workers.Image, *copy.Workers.Image) + } if len(original.Workers.Nodes) != len(copy.Workers.Nodes) { t.Errorf("Workers Nodes length mismatch: expected %d, got %d", len(original.Workers.Nodes), len(copy.Workers.Nodes)) } @@ -382,6 +443,9 @@ func TestClusterConfig_Copy(t *testing.T) { if original.ControlPlanes.Memory == nil || copy.ControlPlanes.Memory == nil || *original.ControlPlanes.Memory != *copy.ControlPlanes.Memory { t.Errorf("ControlPlanes Memory mismatch: expected %v, got %v", *original.ControlPlanes.Memory, *copy.ControlPlanes.Memory) } + if original.ControlPlanes.Image == nil || copy.ControlPlanes.Image == nil || *original.ControlPlanes.Image != *copy.ControlPlanes.Image { + t.Errorf("ControlPlanes Image mismatch: expected %v, got %v", *original.ControlPlanes.Image, *copy.ControlPlanes.Image) + } if len(original.ControlPlanes.Nodes) != len(copy.ControlPlanes.Nodes) { t.Errorf("ControlPlanes Nodes length mismatch: expected %d, got %d", len(original.ControlPlanes.Nodes), len(copy.ControlPlanes.Nodes)) } @@ -390,6 +454,9 @@ func TestClusterConfig_Copy(t *testing.T) { t.Errorf("ControlPlanes Nodes mismatch for key %s: expected %v, got %v", key, *node.Hostname, *copy.ControlPlanes.Nodes[key].Hostname) } } + if original.ControlPlanes.Nodes["node1"].Image == nil || copy.ControlPlanes.Nodes["node1"].Image == nil || *original.ControlPlanes.Nodes["node1"].Image != *copy.ControlPlanes.Nodes["node1"].Image { + t.Errorf("ControlPlanes Nodes Image mismatch: expected %v, got %v", *original.ControlPlanes.Nodes["node1"].Image, *copy.ControlPlanes.Nodes["node1"].Image) + } if original.Workers.Count == nil || copy.Workers.Count == nil || *original.Workers.Count != *copy.Workers.Count { t.Errorf("Workers Count mismatch: expected %v, got %v", *original.Workers.Count, *copy.Workers.Count) } @@ -399,6 +466,9 @@ func TestClusterConfig_Copy(t *testing.T) { if original.Workers.Memory == nil || copy.Workers.Memory == nil || *original.Workers.Memory != *copy.Workers.Memory { t.Errorf("Workers Memory mismatch: expected %v, got %v", *original.Workers.Memory, *copy.Workers.Memory) } + if original.Workers.Image == nil || copy.Workers.Image == nil || *original.Workers.Image != *copy.Workers.Image { + t.Errorf("Workers Image mismatch: expected %v, got %v", *original.Workers.Image, *copy.Workers.Image) + } if len(original.Workers.Nodes) != len(copy.Workers.Nodes) { t.Errorf("Workers Nodes length mismatch: expected %d, got %d", len(original.Workers.Nodes), len(copy.Workers.Nodes)) } @@ -407,6 +477,12 @@ func TestClusterConfig_Copy(t *testing.T) { t.Errorf("Workers Nodes mismatch for key %s: expected %v, got %v", key, *node.Hostname, *copy.Workers.Nodes[key].Hostname) } } + if original.ControlPlanes.Nodes["node1"].Image == nil || copy.ControlPlanes.Nodes["node1"].Image == nil || *original.ControlPlanes.Nodes["node1"].Image != *copy.ControlPlanes.Nodes["node1"].Image { + t.Errorf("ControlPlanes Nodes Image mismatch: expected %v, got %v", *original.ControlPlanes.Nodes["node1"].Image, *copy.ControlPlanes.Nodes["node1"].Image) + } + if original.Workers.Nodes["worker1"].Image == nil || copy.Workers.Nodes["worker1"].Image == nil || *original.Workers.Nodes["worker1"].Image != *copy.Workers.Nodes["worker1"].Image { + t.Errorf("Workers Nodes Image mismatch: expected %v, got %v", *original.Workers.Nodes["worker1"].Image, *copy.Workers.Nodes["worker1"].Image) + } // Modify the copy and ensure original is unchanged copy.Enabled = ptrBool(false) diff --git a/pkg/constants/constants.go b/pkg/constants/constants.go index e8a435ee7..dc296a035 100644 --- a/pkg/constants/constants.go +++ b/pkg/constants/constants.go @@ -17,8 +17,8 @@ const ( // Default Talos settings const ( - // renovate: datasource=docker depName=ghcr.io/siderolabs/talos - DEFAULT_TALOS_IMAGE = "ghcr.io/siderolabs/talos:v1.9.1" + // renovate: datasource=github-releases depName=siderolabs/talos + DEFAULT_TALOS_IMAGE = "ghcr.io/siderolabs/talos:v1.9.5" DEFAULT_TALOS_WORKER_CPU = 4 DEFAULT_TALOS_WORKER_RAM = 4 DEFAULT_TALOS_CONTROL_PLANE_CPU = 2 diff --git a/pkg/services/talos_service.go b/pkg/services/talos_service.go index 227b71dce..333931043 100644 --- a/pkg/services/talos_service.go +++ b/pkg/services/talos_service.go @@ -16,8 +16,8 @@ import ( // Initialize the global port settings var ( - nextAPIPort = 50001 - defaultAPIPort = 50000 + nextAPIPort = constants.DEFAULT_TALOS_API_PORT + 1 + defaultAPIPort = constants.DEFAULT_TALOS_API_PORT portLock sync.Mutex extraPortIndex = 0 controlPlaneLeader *TalosService @@ -150,11 +150,11 @@ func (s *TalosService) SetAddress(address string) error { // GetComposeConfig creates a Docker Compose configuration for Talos services. // It dynamically retrieves CPU and RAM settings based on whether the node is a worker // or part of the control plane. The function identifies endpoint ports for service communication and ensures -// that all necessary volume directories are defined. It configures the container with the latest image, +// that all necessary volume directories are defined. It configures the container with the appropriate image +// (prioritizing node-specific, then group-specific, then cluster-wide, and finally default image settings), // environment variables, security options, and volume mounts. The service name is constructed using the node // name, and port mappings are set up, including both default and node-specific ports. The resulting configuration -// provides comprehensive service and volume specifications for deployment, ensuring compatibility with the -// docker-compose.yml file. +// provides comprehensive service and volume specifications for deployment. func (s *TalosService) GetComposeConfig() (*types.Config, error) { config := s.configHandler.GetConfig() if config.Cluster == nil { @@ -186,8 +186,27 @@ func (s *TalosService) GetComposeConfig() (*types.Config, error) { publishedPort = parts[1] } + var image string + + nodeImage := s.configHandler.GetString(fmt.Sprintf("cluster.%s.nodes.%s.image", nodeType, nodeName), "") + if nodeImage != "" { + image = nodeImage + } else { + groupImage := s.configHandler.GetString(fmt.Sprintf("cluster.%s.image", nodeType), "") + if groupImage != "" { + image = groupImage + } else { + clusterImage := s.configHandler.GetString("cluster.image", "") + if clusterImage != "" { + image = clusterImage + } else { + image = constants.DEFAULT_TALOS_IMAGE + } + } + } + commonConfig := types.ServiceConfig{ - Image: constants.DEFAULT_TALOS_IMAGE, + Image: image, Environment: map[string]*string{"PLATFORM": ptrString("container")}, Restart: "always", ReadOnly: true, diff --git a/pkg/services/talos_service_test.go b/pkg/services/talos_service_test.go index 11305d325..a1b81c68a 100644 --- a/pkg/services/talos_service_test.go +++ b/pkg/services/talos_service_test.go @@ -4,6 +4,7 @@ import ( "fmt" "math" "os" + "strconv" "strings" "testing" @@ -50,7 +51,7 @@ func setupTalosServiceMocks(optionalInjector ...di.Injector) *MockComponents { mockConfigHandler.GetStringFunc = func(key string, defaultValue ...string) string { switch key { case "cluster.workers.nodes.worker1.endpoint": - return "192.168.1.1:50000" + return "192.168.1.1:" + strconv.Itoa(constants.DEFAULT_TALOS_API_PORT) case "cluster.workers.nodes.worker2.endpoint": return "192.168.1.2:50001" case "dns.domain": @@ -483,7 +484,7 @@ func TestTalosService_SetAddress(t *testing.T) { } // Simulate used ports to trigger the loop - usedHostPorts[50000] = true // Ensure the defaultAPIPort is also marked as used + usedHostPorts[constants.DEFAULT_TALOS_API_PORT] = true // Ensure the defaultAPIPort is also marked as used usedHostPorts[50001] = true usedHostPorts[50002] = true @@ -956,7 +957,7 @@ func TestTalosService_GetComposeConfig(t *testing.T) { foundAPIPort := false foundKubePort := false for _, port := range serviceConfig.Ports { - if port.Target == 50000 && port.Protocol == "tcp" { + if port.Target == uint32(constants.DEFAULT_TALOS_API_PORT) && port.Protocol == "tcp" { foundAPIPort = true } if port.Target == 6443 && port.Published == "6443" && port.Protocol == "tcp" { @@ -1014,7 +1015,7 @@ func TestTalosService_GetComposeConfig(t *testing.T) { // Verify only API port is present port := serviceConfig.Ports[0] - if port.Target != 50000 || port.Protocol != "tcp" { + if port.Target != uint32(constants.DEFAULT_TALOS_API_PORT) || port.Protocol != "tcp" { t.Errorf("expected API port configuration, got target=%d protocol=%s", port.Target, port.Protocol) } }) @@ -1108,8 +1109,8 @@ func TestTalosService_GetComposeConfig(t *testing.T) { if len(ports1) != 1 { t.Fatalf("expected 1 port in service1, got %d", len(ports1)) } - if ports1[0].Target != 50000 || ports1[0].Published != "50001" { - t.Errorf("expected port 50000:50001 in service1, got %d:%s", ports1[0].Target, ports1[0].Published) + if ports1[0].Target != uint32(constants.DEFAULT_TALOS_API_PORT) || ports1[0].Published != "50001" { + t.Errorf("expected port %d:50001 in service1, got %d:%s", constants.DEFAULT_TALOS_API_PORT, ports1[0].Target, ports1[0].Published) } // Check ports for second service @@ -1117,8 +1118,8 @@ func TestTalosService_GetComposeConfig(t *testing.T) { if len(ports2) != 1 { t.Fatalf("expected 1 port in service2, got %d", len(ports2)) } - if ports2[0].Target != 50000 || ports2[0].Published != "50002" { - t.Errorf("expected port 50000:50002 in service2, got %d:%s", ports2[0].Target, ports2[0].Published) + if ports2[0].Target != uint32(constants.DEFAULT_TALOS_API_PORT) || ports2[0].Published != "50002" { + t.Errorf("expected port %d:50002 in service2, got %d:%s", constants.DEFAULT_TALOS_API_PORT, ports2[0].Target, ports2[0].Published) } })