From 01140fa71ee8e2e2538709c26a5d74de70049515 Mon Sep 17 00:00:00 2001 From: "Battye, Andrew" Date: Mon, 23 Jan 2017 08:10:05 +0000 Subject: [PATCH 001/109] Hacks to work for m3 use case --- .../lbaasv2/drivers/bigip/l2_service.py | 1 + .../lbaasv2/drivers/bigip/network_helper.py | 119 ++++++++++++++++-- .../lbaasv2/drivers/bigip/network_service.py | 78 ++++++++++-- .../lbaasv2/drivers/bigip/selfips.py | 14 ++- .../lbaasv2/drivers/bigip/tenants.py | 36 +++--- 5 files changed, 209 insertions(+), 39 deletions(-) diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/l2_service.py b/f5_openstack_agent/lbaasv2/drivers/bigip/l2_service.py index 9c35da5d4..ce46768d5 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/l2_service.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/l2_service.py @@ -130,6 +130,7 @@ def set_context(self, context): def is_common_network(self, network): # Does this network belong in the /Common folder? + return True return network['shared'] or \ (network['id'] in self.conf.common_network_ids) or \ ('router:external' in network and diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/network_helper.py b/f5_openstack_agent/lbaasv2/drivers/bigip/network_helper.py index 4c8693f41..9205b3163 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/network_helper.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/network_helper.py @@ -59,6 +59,11 @@ class NetworkHelper(object): 'strict': 'disabled', } + route_defaults = { + 'name': None, + 'partition': '/' + const.DEFAULT_PARTITION, + } + @log_helpers.log_method_call def create_l2gre_multipoint_profile(self, bigip, name, partition=const.DEFAULT_PARTITION): @@ -174,24 +179,31 @@ def get_selfip_addr(self, bigip, name, partition=const.DEFAULT_PARTITION): err.message)) return None - def route_domain_exists(self, bigip, partition=const.DEFAULT_PARTITION, + def route_domain_exists(self, bigip, partition=const.DEFAULT_PARTITION, name=None, domain_id=None): - if partition == 'Common': - return True + # if partition == 'Common': + # return True + + if name: + name = self._get_route_domain_name(name) + r = bigip.tm.net.route_domains.route_domain - name = partition + if domain_id: name += '_aux_' + str(domain_id) + return r.exists(name=name, partition=partition) @log_helpers.log_method_call - def get_route_domain(self, bigip, partition=const.DEFAULT_PARTITION): + def get_route_domain(self, bigip, partition=const.DEFAULT_PARTITION, name=None): # this only works when the domain was created with is_aux=False, # same as the original code. - if partition == 'Common': - name = '0' + + if name: + name = self._get_route_domain_name(name) else: name = partition + r = bigip.tm.net.route_domains.route_domain return r.load(name=name, partition=partition) @@ -231,11 +243,16 @@ def _get_next_domain_id(self, bigip): return lowest_available_index @log_helpers.log_method_call - def create_route_domain(self, bigip, partition=const.DEFAULT_PARTITION, + def create_route_domain(self, bigip, partition=const.DEFAULT_PARTITION, name=None, strictness=False, is_aux=False): + + name = self._get_route_domain_name(name) + rd = bigip.tm.net.route_domains.route_domain - name = partition + if not name: + name = partition id = self._get_next_domain_id(bigip) + if is_aux: name += '_aux_' + str(id) payload = NetworkHelper.route_domain_defaults @@ -252,8 +269,11 @@ def create_route_domain(self, bigip, partition=const.DEFAULT_PARTITION, def delete_route_domain(self, bigip, partition=const.DEFAULT_PARTITION, name=None): r = bigip.tm.net.route_domains.route_domain - if not name: + if name: + name = self._get_route_domain_name(name) + else: name = partition + obj = r.load(name=name, partition=partition) obj.delete() @@ -285,6 +305,60 @@ def get_route_domain_names(self, bigip, partition=const.DEFAULT_PARTITION): rd_names_list.append(rd.name) return rd_names_list + + @log_helpers.log_method_call + def route_exists(self, bigip, partition=const.DEFAULT_PARTITION, name=None): + rc = bigip.tm.net.routes.route + + if name: + name = self._get_route_name(name) + + return rc.exists(name=name, partition=partition) + + + + @log_helpers.log_method_call + def get_route(self, bigip, partition=const.DEFAULT_PARTITION, name=None): + rc = bigip.tm.net.routes.route + + if name: + name = self._get_route_name(name) + + return rc.load(name=name, partition=partition) + + @log_helpers.log_method_call + def create_route(self, bigip, partition=const.DEFAULT_PARTITION, name=None, gateway_ip='0.0.0.0', rd_id=0, destination_ip='0.0.0.0',netmask=0): + if self.route_exists(bigip, name=name, partition=partition): + LOG.info("Skipping create of route %s route already exists" % name) + return + + rc = bigip.tm.net.routes.route + + if name: + name = self._get_route_name(name) + + destination_ip+= '%' + str(rd_id)+ '/'+str(netmask) + gateway_ip+= '%' + str(rd_id) + + payload = NetworkHelper.route_defaults + + + + payload['name'] = name + payload['partition'] = partition + payload['gw'] = gateway_ip + payload['network'] = destination_ip + + + rc.create(**payload) + + @log_helpers.log_method_call + def delete_route(self ,bigip, partition=const.DEFAULT_PARTITION, name=None): + + if self.route_exists(bigip, partition=partition, name=name): + obj = self.get_route(bigip, partition, name) + obj.delete() + @log_helpers.log_method_call def get_vlans_in_route_domain(self, bigip, @@ -293,6 +367,21 @@ def get_vlans_in_route_domain(self, rd = self.get_route_domain(bigip, partition) return getattr(rd, 'vlans', []) + @log_helpers.log_method_call + def _get_route_domain_name(self, name): + if not name or name.startswith('rd-'): + return name + + return "rd-%s" % (name) + + @log_helpers.log_method_call + def _get_route_name(self, name): + if not name or name.startswith('rt-'): + return name + + return "rt-%s" % name + + @log_helpers.log_method_call def create_vlan(self, bigip, model): name = model.get('name', None) @@ -354,9 +443,15 @@ def add_vlan_to_domain( self, bigip, name, - partition=const.DEFAULT_PARTITION): + partition=const.DEFAULT_PARTITION, rd_name=None): + + if rd_name: + rd_name = self._get_route_domain_name(rd_name) + else: + rd_name = partition + """Add VLANs to Domain.""" - rd = self.get_route_domain(bigip, partition) + rd = self.get_route_domain(bigip, partition, rd_name) existing_vlans = getattr(rd, 'vlans', []) if name in existing_vlans: return False diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/network_service.py b/f5_openstack_agent/lbaasv2/drivers/bigip/network_service.py index b80d2050b..6b8956552 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/network_service.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/network_service.py @@ -16,6 +16,7 @@ import itertools import netaddr +import constants_v2 as const from neutron.common.exceptions import NeutronException from neutron.plugins.common import constants as plugin_const from oslo_log import log as logging @@ -172,6 +173,20 @@ def prep_service_networking(self, service, traffic_group): self.bigip_selfip_manager.assure_gateway_on_subnet( assure_bigip, subnetinfo, traffic_group) + self._assure_subnet_gateway(service) + + def _assure_subnet_gateway(self,service): + network_id = service['loadbalancer']['network_id'] + + for bigip in self.driver.get_all_bigips(): + rd = self.network_helper.get_route_domain(bigip, partition=const.DEFAULT_PARTITION, name=network_id) + + for subnet_id, subnet in service['subnets'].iteritems(): + + if not self.network_helper.route_exists(bigip, const.DEFAULT_PARTITION,subnet_id): + self.network_helper.create_route(bigip, const.DEFAULT_PARTITION,subnet_id, subnet['gateway_ip'], rd.id) + + def _annotate_service_route_domains(self, service): # Add route domain notation to pool member and vip addresses. LOG.debug("Service before route domains: %s" % service) @@ -220,9 +235,11 @@ def _annotate_service_route_domains(self, service): def assign_route_domain(self, tenant_id, network, subnet): # Assign route domain for a network - if self.l2_service.is_common_network(network): - network['route_domain_id'] = 0 - return + + + # if self.l2_service.is_common_network(network): + # network['route_domain_id'] = 0 + # return LOG.debug("assign route domain get from cache %s" % network) route_domain_id = self.get_route_domain_from_cache(network) @@ -237,10 +254,13 @@ def assign_route_domain(self, tenant_id, network, subnet): if self.conf.max_namespaces_per_tenant == 1: bigip = self.driver.get_bigip() LOG.debug("bigip before get_domain: %s" % bigip) - partition_id = self.service_adapter.get_folder_name( - tenant_id) + # partition_id = self.service_adapter.get_folder_name( + # tenant_id) + + partition_id='Common' + tenant_rd = self.network_helper.get_route_domain( - bigip, partition=partition_id) + bigip, partition=partition_id, name=network['id']) network['route_domain_id'] = tenant_rd.id return @@ -576,6 +596,8 @@ def post_service_networking(self, service, all_subnet_hints): # Non Shared Config - Local Per BIG-IP self.update_bigip_l2(service) + + # Delete shared config objects deleted_names = set() for bigip in self.driver.get_config_bigips(): @@ -653,9 +675,15 @@ def update_bigip_member_l2(self, bigip, loadbalancer, member): net_folder = self.service_adapter.get_folder_name( loadbalancer['tenant_id'] ) + + mac = None + + if member.get('port'): + mac = member['port']['mac_address'] + fdb_info = {'network': network, 'ip_address': member['address'], - 'mac_address': member['port']['mac_address']} + 'mac_address': mac} self.l2_service.add_bigip_fdbs( bigip, net_folder, fdb_info, member) @@ -727,13 +755,20 @@ def _assure_delete_nets_shared(self, bigip, service, subnet_hints): if not self.conf.f5_snat_mode: gw_name = delete_gateway(bigip, subnetinfo) deleted_names.add(gw_name) + else: + if self._is_last_on_network(service): + self.network_helper.delete_route(bigip, const.DEFAULT_PARTITION,subnetinfo['subnet_id']) + + my_deleted_names, my_in_use_subnets = \ self.bigip_snat_manager.delete_bigip_snats( bigip, subnetinfo, tenant_id) deleted_names = deleted_names.union(my_deleted_names) + for in_use_subnetid in my_in_use_subnets: subnet_hints['check_for_delete_subnets'].pop( in_use_subnetid, None) + except NeutronException as exc: LOG.error("assure_delete_nets_shared: exception: %s" % str(exc.msg)) @@ -745,7 +780,13 @@ def _assure_delete_nets_shared(self, bigip, service, subnet_hints): def _assure_delete_nets_nonshared(self, bigip, service, subnet_hints): # Delete non shared base objects for networks + deleted_names = set() + + if not self._is_last_on_network(service): + return deleted_names + + for subnetinfo in self._get_subnets_to_delete(bigip, service, subnet_hints): @@ -766,6 +807,11 @@ def _assure_delete_nets_nonshared(self, bigip, service, subnet_hints): partition=network_folder ) + + if self._is_last_on_network(service): + self.network_helper.delete_route(bigip, const.DEFAULT_PARTITION,subnetinfo['subnet_id']) + + local_selfip_name = "local-" + bigip.device_name + \ "-" + subnet['id'] @@ -812,6 +858,24 @@ def _assure_delete_nets_nonshared(self, bigip, service, subnet_hints): return deleted_names + def _is_last_on_network(self, service): + # REALLY not effecient - need to add get lb by network method to RPC interface + + network_id= service['loadbalancer']['network_id'] + + lb_id = service['loadbalancer']['id'] + + loadbalancers = self.driver.plugin_rpc.get_all_loadbalancers() + + for lb in loadbalancers: + lb_service = self.driver.plugin_rpc.get_service_by_loadbalancer_id(lb['lb_id']) + + if lb_service['loadbalancer']['id'] != lb_id and lb_service['loadbalancer']['network_id']==network_id : + return False + + return True + + def _get_subnets_to_delete(self, bigip, service, subnet_hints): # Clean up any Self IP, SNATs, networks, and folder for # services items that we deleted. diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/selfips.py b/f5_openstack_agent/lbaasv2/drivers/bigip/selfips.py index cfb72ee73..9cdbb0300 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/selfips.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/selfips.py @@ -40,7 +40,7 @@ def __init__(self, driver, l2_service, l3_binding): self.selfip_manager = BigIPResourceHelper(ResourceType.selfip) self.network_helper = NetworkHelper() - def _create_bigip_selfip(self, bigip, model): + def _create_bigip_selfip(self, bigip, model, network_id): created = False if self.selfip_manager.exists(bigip, name=model['name'], partition=model['partition']): @@ -59,7 +59,8 @@ def _create_bigip_selfip(self, bigip, model): self.network_helper.add_vlan_to_domain( bigip, name=model['vlan'], - partition=model['partition']) + partition=model['partition'], + rd_name=network_id) self.selfip_manager.create(bigip, model) created = True except HTTPError as err: @@ -132,7 +133,10 @@ def assure_bigip_selfip(self, bigip, service, subnetinfo): "floating": "disabled", "partition": network_folder } - self._create_bigip_selfip(bigip, model) + + network_id = network['id'] + + self._create_bigip_selfip(bigip, model, network_id) if self.l3_binding: self.l3_binding.bind_address(subnet_id=subnet['id'], @@ -203,7 +207,9 @@ def assure_gateway_on_subnet(self, bigip, subnetinfo, traffic_group): 'partition': network_folder } - if not self._create_bigip_selfip(bigip, model): + network_id = network['id'] + + if not self._create_bigip_selfip(bigip, model, network_id): LOG.error("failed to create gateway selfip") if self.l3_binding: diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/tenants.py b/f5_openstack_agent/lbaasv2/drivers/bigip/tenants.py index 82929b67b..66e7443e0 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/tenants.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/tenants.py @@ -14,6 +14,7 @@ # limitations under the License. # +import constants_v2 as const from oslo_log import log as logging from f5_openstack_agent.lbaasv2.drivers.bigip import exceptions as f5ex @@ -47,6 +48,7 @@ def assure_tenant_created(self, service): adaptations shouldn't we be using ServiceModelAdapter... though I suppose this is the other way. """ + network_id = service['loadbalancer']['network_id'] tenant_id = service['loadbalancer']['tenant_id'] traffic_group = self.driver.service_to_traffic_group(service) traffic_group = '/Common/' + traffic_group @@ -86,17 +88,19 @@ def assure_tenant_created(self, service): if self.conf.use_namespaces: for bigip in self.driver.get_all_bigips(): if not self.network_helper.route_domain_exists(bigip, - folder_name): + const.DEFAULT_PARTITION,network_id): try: self.network_helper.create_route_domain( bigip, - folder_name, + "Common",network_id, self.conf.f5_route_domain_strictness) except Exception as err: LOG.exception(err.message) raise f5ex.RouteDomainCreationException( "Failed to create route domain for " - "tenant in %s" % (folder_name)) + "tenant in %s" % (const.DEFAULT_PARTITION)) + + def assure_tenant_cleanup(self, service, all_subnet_hints): """Delete tenant partition.""" @@ -110,23 +114,23 @@ def assure_tenant_cleanup(self, service, all_subnet_hints): # otherwise called once def _assure_bigip_tenant_cleanup(self, bigip, service, subnet_hints): tenant_id = service['loadbalancer']['tenant_id'] + network_id = service['loadbalancer']['network_id'] - self._remove_tenant_replication_mode(bigip, tenant_id) + self._remove_tenant_replication_mode(bigip, tenant_id, network_id) - def _remove_tenant_replication_mode(self, bigip, tenant_id): + def _remove_tenant_replication_mode(self, bigip, tenant_id, network_id): # Remove tenant in replication sync-mode partition = self.service_adapter.get_folder_name(tenant_id) - domain_names = self.network_helper.get_route_domain_names(bigip, - partition) - for domain_name in domain_names: - try: - self.network_helper.delete_route_domain(bigip, - partition, - domain_name) - except Exception as err: - LOG.error("Failed to delete route domain %s. " - "%s. Manual intervention might be required." - % (domain_name, err.message)) + + + try: + self.network_helper.delete_route_domain(bigip, + "Common", + network_id) + except Exception as err: + LOG.error("Failed to delete route domain %s. " + "%s. Manual intervention might be required." + % (network_id, err.message)) if self.driver.disconnected_service.network_exists( bigip, partition): From 6d4495964e4a7123711d25cf1eac21e158f54efd Mon Sep 17 00:00:00 2001 From: "Battye, Andrew" Date: Thu, 9 Feb 2017 11:28:47 +0000 Subject: [PATCH 002/109] Apple existing patches - common networks and disable certificate verification for barbican calls --- .../lbaasv2/drivers/bigip/barbican_cert.py | 2 +- f5_openstack_agent/lbaasv2/drivers/bigip/l2_service.py | 10 +++++----- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/barbican_cert.py b/f5_openstack_agent/lbaasv2/drivers/bigip/barbican_cert.py index af4b4fb00..b6ddb9242 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/barbican_cert.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/barbican_cert.py @@ -103,7 +103,7 @@ def _init_barbican_client(self): # NOTE: Session is deprecated in keystoneclient 2.1.0 # and will be removed in a future keystoneclient release. - sess = Session(auth=auth) + sess = Session(auth=auth, verify=False) self.barbican = Client(session=sess) # test barbican service diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/l2_service.py b/f5_openstack_agent/lbaasv2/drivers/bigip/l2_service.py index ce46768d5..9f6dc2c1a 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/l2_service.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/l2_service.py @@ -131,11 +131,11 @@ def set_context(self, context): def is_common_network(self, network): # Does this network belong in the /Common folder? return True - return network['shared'] or \ - (network['id'] in self.conf.common_network_ids) or \ - ('router:external' in network and - network['router:external'] and - self.conf.f5_common_external_networks) + # return network['shared'] or \ + # (network['id'] in self.conf.common_network_ids) or \ + # ('router:external' in network and + # network['router:external'] and + # self.conf.f5_common_external_networks) def get_vlan_name(self, network, hostname): # Construct a consistent vlan name From 1d0c2b058ec334dfbf147fc6f78dbbcf3f64ec57 Mon Sep 17 00:00:00 2001 From: "Battye, Andrew" Date: Thu, 9 Feb 2017 13:03:44 +0000 Subject: [PATCH 003/109] Catch and log route creation issues. Its causing issues during deletion, need to eventually set LB into error state, but currently limited to external IP use case. --- .../lbaasv2/drivers/bigip/network_service.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/network_service.py b/f5_openstack_agent/lbaasv2/drivers/bigip/network_service.py index 8f296cd0e..614b3301d 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/network_service.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/network_service.py @@ -184,8 +184,11 @@ def _assure_subnet_gateway(self,service): for subnet_id, subnet in service['subnets'].iteritems(): if not self.network_helper.route_exists(bigip, const.DEFAULT_PARTITION,subnet_id): - self.network_helper.create_route(bigip, const.DEFAULT_PARTITION,subnet_id, subnet['gateway_ip'], rd.id) - + try: + self.network_helper.create_route(bigip, const.DEFAULT_PARTITION,subnet_id, subnet['gateway_ip'], rd.id) + except Exception as err: + LOG.error("Failed to create default gateway route for network %s subnet %s" % (network_id, subnet_id)) + LOG.exception(err) def _annotate_service_route_domains(self, service): # Add route domain notation to pool member and vip addresses. From 2a20ec200efd3b1df034f773e8c02ed976c0c25d Mon Sep 17 00:00:00 2001 From: "Battye, Andrew" Date: Tue, 28 Feb 2017 16:05:23 +0000 Subject: [PATCH 004/109] Get loadbalancers per network via RPC --- .../lbaasv2/drivers/bigip/network_service.py | 8 ++---- .../lbaasv2/drivers/bigip/plugin_rpc.py | 25 +++++++++++++++++++ 2 files changed, 27 insertions(+), 6 deletions(-) diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/network_service.py b/f5_openstack_agent/lbaasv2/drivers/bigip/network_service.py index 614b3301d..7befadf92 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/network_service.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/network_service.py @@ -866,18 +866,14 @@ def _assure_delete_nets_nonshared(self, bigip, service, subnet_hints): return deleted_names def _is_last_on_network(self, service): - # REALLY not effecient - need to add get lb by network method to RPC interface - network_id= service['loadbalancer']['network_id'] lb_id = service['loadbalancer']['id'] - loadbalancers = self.driver.plugin_rpc.get_all_loadbalancers() + loadbalancers = self.driver.plugin_rpc.get_loadbalancers_by_network(network_id) for lb in loadbalancers: - lb_service = self.driver.plugin_rpc.get_service_by_loadbalancer_id(lb['lb_id']) - - if lb_service['loadbalancer']['id'] != lb_id and lb_service['loadbalancer']['network_id']==network_id : + if lb['lb_id'] != lb_id: return False return True diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/plugin_rpc.py b/f5_openstack_agent/lbaasv2/drivers/bigip/plugin_rpc.py index fc1aadc31..bc83ec1d5 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/plugin_rpc.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/plugin_rpc.py @@ -504,3 +504,28 @@ def get_pending_loadbalancers(self, env=None, group=None, host=None): "get_all_loadbalancers") return loadbalancers + + @log_helpers.log_method_call + def get_loadbalancers_by_network(self, network_id, env=None,group=None,host=None): + """Retrieve a list of loadbalancers for a network.""" + loadbalancers = [] + + if not env: + env = self.env + + try: + loadbalancers = self._call( + self.context, + self._make_msg('get_loadbalancers_by_network', + env=env, + network_id=network_id, + group=group, + host=host), + topic=self.topic + ) + except messaging.MessageDeliveryFailure: + LOG.error("agent->plugin RPC exception caught: ", + "get_loadbalancers_by_network") + + return loadbalancers + From 9882f3df3e30aed3af3899a81f3ad9981b1bcfe7 Mon Sep 17 00:00:00 2001 From: "Battye, Andrew" Date: Wed, 5 Apr 2017 16:31:18 +0100 Subject: [PATCH 005/109] Patch for default pool switching support --- .../lbaasv2/drivers/bigip/lbaas_builder.py | 9 ++++++++- .../lbaasv2/drivers/bigip/service_adapter.py | 12 ++++++++++-- 2 files changed, 18 insertions(+), 3 deletions(-) diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py b/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py index f524a0ea1..bd500bac6 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py @@ -119,6 +119,13 @@ def _assure_listeners_created(self, service): svc = {"loadbalancer": loadbalancer, "listener": listener, "networks": networks} + + default_pool_id = listener.get('default_pool_id', '') + if default_pool_id: + pool = self.get_pool_by_id(service, default_pool_id) + if pool: + svc['pool'] = pool + if listener['provisioning_status'] == plugin_const.PENDING_UPDATE: if 'old_listener' in service: # Delete existing VS and proceed with creating a new VS @@ -352,7 +359,7 @@ def _check_monitor_delete(service): @staticmethod def get_pool_by_id(service, pool_id): - if "pools" in service: + if pool_id and "pools" in service: pools = service["pools"] for pool in pools: if pool["id"] == pool_id: diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/service_adapter.py b/f5_openstack_agent/lbaasv2/drivers/bigip/service_adapter.py index 8f89d4640..7f1f19420 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/service_adapter.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/service_adapter.py @@ -97,7 +97,9 @@ def get_virtual(self, service): listener["session_persistence"] = \ service["pool"]["session_persistence"] - vip = self._map_virtual(loadbalancer, listener) + vip = self._map_virtual( + loadbalancer, listener, service.get('pool', None)) + self._add_bigip_items(listener, vip) return vip @@ -313,9 +315,15 @@ def _get_lb_method(self, method): else: return 'round-robin' - def _map_virtual(self, loadbalancer, listener): + def _map_virtual(self, loadbalancer, listener, pool=None): vip = self._init_virtual_name(loadbalancer, listener) + if pool: + p = self.init_pool_name(loadbalancer, pool) + vip["pool"] = p["name"] + + + vip["description"] = self.get_resource_description(listener) if "protocol" in listener: From 30d70f67c1755bad2e79d695137628e10b3c1d03 Mon Sep 17 00:00:00 2001 From: "Battye, Andrew" Date: Fri, 21 Apr 2017 15:33:58 +0100 Subject: [PATCH 006/109] All members that are not neutron ports - for external IP support --- f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py b/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py index 5fcd1d8be..caba1b151 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py @@ -244,10 +244,10 @@ def _assure_members(self, service, all_subnet_hints): "member": member, "pool": pool} - if 'port' not in member and \ - member['provisioning_status'] != plugin_const.PENDING_DELETE: - LOG.error("Member definition does not include Neutron port") - continue + # if 'port' not in member and \ + # member['provisioning_status'] != plugin_const.PENDING_DELETE: + # LOG.error("Member definition does not include Neutron port") + # continue # delete member if pool is being deleted if member['provisioning_status'] == plugin_const.PENDING_DELETE or\ From 363c9d8e60ceab8065c2c1d623cd97b62aa2b660 Mon Sep 17 00:00:00 2001 From: "Battye, Andrew" Date: Wed, 26 Apr 2017 16:21:28 +0100 Subject: [PATCH 007/109] Fix to use ESD with L4 VS 0 switch to standard TCP on apply and back to performance L4 when removed --- .../lbaasv2/drivers/bigip/listener_service.py | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/listener_service.py b/f5_openstack_agent/lbaasv2/drivers/bigip/listener_service.py index f04011e5b..9075d5746 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/listener_service.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/listener_service.py @@ -601,14 +601,17 @@ def apply_esd(self, svc, esd, bigips): if 'lbaas_fallback_persist' in esd: update_attrs['fallbackPersistence'] = esd['lbaas_fallback_persist'] - if profiles: - # always use http and oneconnect + # always use http and oneconnect for non TCP listener + listener = svc["listener"] + if profiles and not listener['protocol'] == 'TCP': + profiles.append({'name': 'http', 'partition': 'Common', 'context': 'all'}) profiles.append({'name': 'oneconnect', 'partition': 'Common', 'context': 'all'}) + if profiles: update_attrs['profiles'] = profiles # iRules @@ -639,6 +642,14 @@ def remove_esd(self, svc, esd, bigips): tls['name'] = vs['name'] tls['partition'] = vs['partition'] + listener = svc["listener"] + + if listener['protocol'] == 'TCP': + # Revert VS back to fastL4. Must do an update to replace + # profiles instead of using add/remove profile. Leave http + # profiles in place for non-TCP listeners. + vs['profiles'] = ['/Common/fastL4'] + # remove iRules if 'lbaas_irule' in esd: vs['rules'] = [] From eb0d6690009b80b82b7fe23eb9d4ae98e8ca8ec1 Mon Sep 17 00:00:00 2001 From: "Battye, Andrew" Date: Tue, 2 May 2017 13:53:33 +0100 Subject: [PATCH 008/109] Prevent update failure in case address js missing in member --- f5_openstack_agent/lbaasv2/drivers/bigip/pool_service.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/pool_service.py b/f5_openstack_agent/lbaasv2/drivers/bigip/pool_service.py index 92765e29f..b6e1bb50e 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/pool_service.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/pool_service.py @@ -182,8 +182,13 @@ def update_member(self, service, bigips): if m.exists(name=urllib.quote(member["name"]), partition=part): m = m.load(name=urllib.quote(member["name"]), partition=part) - member.pop("address") - m.modify(**member) + if member.get("address"): + member.pop("address") + m.modify(**member) + else: + LOG.error("Unable to update member, no address") + LOG.error(member) + def _get_monitor_helper(self, service): monitor_type = self.service_adapter.get_monitor_type(service) From d5c0bb22a5a871eab27b827a7169cc6675015d70 Mon Sep 17 00:00:00 2001 From: "Battye, Andrew" Date: Tue, 30 May 2017 18:42:53 +0100 Subject: [PATCH 009/109] Try to make sure pools are created if missing --- f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py b/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py index caba1b151..9f335369b 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py @@ -168,7 +168,11 @@ def _assure_pools_created(self, service): plugin_const.PENDING_CREATE: self.pool_builder.create_pool(svc, bigips) else: - self.pool_builder.update_pool(svc, bigips) + try: + self.pool_builder.update_pool(svc, bigips) + except HTTPError as err: + if err.response.status_code == 404: + self.pool_builder.create_pool(svc, bigips) # assign pool name to virtual pool_name = self.service_adapter.init_pool_name( From 4e51950212ff3d7c528657e054a0a987fe417686 Mon Sep 17 00:00:00 2001 From: "Battye, Andrew" Date: Tue, 30 May 2017 18:54:52 +0100 Subject: [PATCH 010/109] create pools before listeners --- f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py b/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py index 9f335369b..c41500999 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py @@ -58,13 +58,14 @@ def assure_service(self, service, traffic_group, all_subnet_hints): self._assure_loadbalancer_created(service, all_subnet_hints) + self._assure_pools_created(service) + self._assure_listeners_created(service) self._assure_l7policies_created(service) self._assure_l7rules_created(service) - self._assure_pools_created(service) self._assure_monitors(service) From c96e190ff1cebc7bb611179f9a2662fe5340fa18 Mon Sep 17 00:00:00 2001 From: "Battye, Andrew" Date: Tue, 30 May 2017 19:06:10 +0100 Subject: [PATCH 011/109] A nice chicken and egg sandwich to unpick tomorrow --- f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py b/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py index c41500999..9f335369b 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py @@ -58,14 +58,13 @@ def assure_service(self, service, traffic_group, all_subnet_hints): self._assure_loadbalancer_created(service, all_subnet_hints) - self._assure_pools_created(service) - self._assure_listeners_created(service) self._assure_l7policies_created(service) self._assure_l7rules_created(service) + self._assure_pools_created(service) self._assure_monitors(service) From b5a63140c20b17ece762d86b8cdd7c137fae1952 Mon Sep 17 00:00:00 2001 From: "Battye, Andrew" Date: Wed, 31 May 2017 08:57:44 +0100 Subject: [PATCH 012/109] Assure pool created before configuring any listeners it may be attached to --- .../lbaasv2/drivers/bigip/lbaas_builder.py | 33 ++++++++++++++++++- 1 file changed, 32 insertions(+), 1 deletion(-) diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py b/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py index 727d55abb..672071a6e 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py @@ -58,13 +58,15 @@ def assure_service(self, service, traffic_group, all_subnet_hints): self._assure_loadbalancer_created(service, all_subnet_hints) + self._assure_pools_created(service) + self._assure_listeners_created(service) self._assure_l7policies_created(service) self._assure_l7rules_created(service) - self._assure_pools_created(service) + self._assure_pools_cconfigured(service) self._assure_monitors(service) @@ -175,6 +177,34 @@ def _assure_pools_created(self, service): if err.response.status_code == 404: self.pool_builder.create_pool(svc, bigips) + except HTTPError as err: + if err.response.status_code != 409: + pool['provisioning_status'] = plugin_const.ERROR + loadbalancer['provisioning_status'] = ( + plugin_const.ERROR) + raise f5_ex.PoolCreationException(err.message) + + except Exception as err: + pool['provisioning_status'] = plugin_const.ERROR + loadbalancer['provisioning_status'] = plugin_const.ERROR + raise f5_ex.PoolCreationException(err.message) + + def _assure_pools_configured(self, service): + if "pools" not in service: + return + + pools = service["pools"] + loadbalancer = service["loadbalancer"] + + bigips = self.driver.get_config_bigips() + + for pool in pools: + if pool['provisioning_status'] != plugin_const.PENDING_DELETE: + svc = {"loadbalancer": loadbalancer, + "pool": pool} + svc['members'] = self._get_pool_members(service, pool['id']) + + try: # assign pool name to virtual pool_name = self.service_adapter.init_pool_name( loadbalancer, pool) @@ -201,6 +231,7 @@ def _assure_pools_created(self, service): loadbalancer['provisioning_status'] = plugin_const.ERROR raise f5_ex.PoolCreationException(err.message) + def _get_pool_members(self, service, pool_id): '''Return a list of members associated with given pool.''' From d5c744ec4203a5da0823ff885aa9378a5eb2184b Mon Sep 17 00:00:00 2001 From: "Battye, Andrew" Date: Wed, 31 May 2017 09:16:15 +0100 Subject: [PATCH 013/109] Fix typo --- f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py b/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py index 672071a6e..49f2ae397 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py @@ -66,7 +66,7 @@ def assure_service(self, service, traffic_group, all_subnet_hints): self._assure_l7rules_created(service) - self._assure_pools_cconfigured(service) + self._assure_pools_configured(service) self._assure_monitors(service) From e5f166702e136cbd93073f71f548cca5348e15e0 Mon Sep 17 00:00:00 2001 From: "Battye, Andrew" Date: Tue, 6 Jun 2017 11:35:40 +0100 Subject: [PATCH 014/109] Handle case where virtual address is out of sync with route domain --- .../lbaasv2/drivers/bigip/virtual_address.py | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/virtual_address.py b/f5_openstack_agent/lbaasv2/drivers/bigip/virtual_address.py index b7e1a9574..6388d3540 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/virtual_address.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/virtual_address.py @@ -94,12 +94,20 @@ def load(self, bigip): def update(self, bigip): - # Get the model object, pop immutables and update - model = self.model() - model.pop("address") - va = self.virtual_address.update(bigip, model) - return va + model = self.model() + remote = self.load(bigip) + if remote.address != model["address"]: + # could be route domain or IP has changed + try: + self.delete(bigip) + except: + LOG.error("Failed to deleted redundant virtual address %s", remote) + return self.create(bigip) + else: + # pop immutables and update + model.pop("address") + return self.virtual_address.update(bigip, model) def assure(self, bigip, delete=False): From f42ee0c4b53f496cd8421bdacb22cb4f5ff69452 Mon Sep 17 00:00:00 2001 From: "Battye, Andrew" Date: Tue, 6 Jun 2017 11:57:29 +0100 Subject: [PATCH 015/109] This seems like a blatant typo, causes any LB with L7 rules on to fail on sync --- f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py b/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py index 49f2ae397..bf4503f97 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py @@ -548,7 +548,7 @@ def _assure_l7rules_created(self, service): l7policy = self.get_l7policy_for_rule( service['l7policies'], l7rule) name = l7policy.get('name', None) - if name and self.driver.is_esd(name): + if name and self.is_esd(name): LOG.error("L7 policy {0} is an ESD. Cannot add " "an L7 rule to and ESD.".format(name)) continue @@ -571,7 +571,7 @@ def _assure_l7rules_deleted(self, service): l7policy = self.get_l7policy_for_rule( service['l7policies'], l7rule) name = l7policy.get('name', None) - if name and self.driver.is_esd(name): + if name and self.is_esd(name): continue self.l7service.bigips = self.driver.get_config_bigips() self.l7service.delete_l7rule(l7rule, service, bigips) From ab71ed766492a55fa2a822111f2039f742fa619b Mon Sep 17 00:00:00 2001 From: "Battye, Andrew" Date: Tue, 6 Jun 2017 11:58:10 +0100 Subject: [PATCH 016/109] Include LBs in error status, not just active ones --- f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py b/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py index 8159b852c..698e447df 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py @@ -505,7 +505,7 @@ def sync_state(self): # Validate each service we own, i.e. loadbalancers to which this # agent is bound, that does not exist in our service cache. - for lb_id in active_loadbalancer_ids: + for lb_id in all_loadbalancer_ids: if not self.cache.get_by_loadbalancer_id(lb_id): self.validate_service(lb_id) From c6cc30b35c7cd3dec096e42d5b48ba3dcdd72fc7 Mon Sep 17 00:00:00 2001 From: "Battye, Andrew" Date: Thu, 8 Jun 2017 16:52:09 +0100 Subject: [PATCH 017/109] Missed previous patch in merge --- f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py b/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py index e8bf9f6b9..beea7bca9 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py @@ -427,7 +427,7 @@ def _check_monitor_delete(service): @staticmethod def get_pool_by_id(service, pool_id): - if "pools" in service: + if pool_id and "pools" in service: pools = service["pools"] for pool in pools: if pool["id"] == pool_id: From 03328ecc14dd651822b5c8f045958d1d439e5666 Mon Sep 17 00:00:00 2001 From: "Battye, Andrew" Date: Fri, 9 Jun 2017 13:24:50 +0100 Subject: [PATCH 018/109] Sync and check objects in provisioning status of ERROR --- .../lbaasv2/drivers/bigip/agent_manager.py | 39 +++++++++++++++++-- .../lbaasv2/drivers/bigip/icontrol_driver.py | 14 +++---- .../lbaasv2/drivers/bigip/lbaas_builder.py | 16 ++++++++ 3 files changed, 59 insertions(+), 10 deletions(-) diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py b/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py index 698e447df..c9da46c1d 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py @@ -565,8 +565,8 @@ def validate_service(self, lb_id): lb_id ) self.cache.put(service, self.agent_host) - if not self.lbdriver.service_exists(service): - LOG.error('active loadbalancer %s is not on BIG-IP...syncing' + if not self.lbdriver.service_exists(service) or self._service_has_provisioning_error(service): + LOG.info('active loadbalancer %s is not on BIG-IP or has Error state...syncing' % lb_id) if self.lbdriver.service_rename_required(service): @@ -582,12 +582,45 @@ def validate_service(self, lb_id): self.lbdriver.sync(service) else: - LOG.debug("Found service definition for %s" % (lb_id)) + LOG.debug("Found service definition for %s, state is ACTIVE, move on" % (lb_id)) except q_exception.NeutronException as exc: LOG.error("NeutronException: %s" % exc.msg) except Exception as exc: LOG.exception("Service validation error: %s" % exc.message) + def _service_has_provisioning_error(self, service): + + loadbalancer = service['loadbalancer'] + + if loadbalancer["provisioning_status"] == plugin_const.ERROR: + return True + + for listener in service['listeners']: + + if listener["provisioning_status"] == plugin_const.ERROR: + return True + + for pool in service['pools']: + if pool["provisioning_status"] == plugin_const.ERROR: + return True + + for healthmonitor in service['healthmonitors']: + if healthmonitor["provisioning_status"] == plugin_const.ERROR: + return True + + for l7policies in service['l7policies']: + if l7policies["provisioning_status"] == plugin_const.ERROR: + return True + + for l7_rules in service['l7policy_rules']: + if l7_rules["provisioning_status"] == plugin_const.ERROR: + return True + + + return False + + + @log_helpers.log_method_call def refresh_service(self, lb_id): try: diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py b/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py index 4ab1a9ead..9e662a4cf 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py @@ -1329,7 +1329,7 @@ def _update_member_status(self, members, timed_out): provisioning_status = member['provisioning_status'] if (provisioning_status == plugin_const.PENDING_CREATE or - provisioning_status == plugin_const.PENDING_UPDATE): + provisioning_status == plugin_const.PENDING_UPDATE or provisioning_status == plugin_const.ACTIVE): if timed_out: member['provisioning_status'] = plugin_const.ERROR @@ -1358,7 +1358,7 @@ def _update_health_monitor_status(self, health_monitors): if 'provisioning_status' in health_monitor: provisioning_status = health_monitor['provisioning_status'] if (provisioning_status == plugin_const.PENDING_CREATE or - provisioning_status == plugin_const.PENDING_UPDATE): + provisioning_status == plugin_const.PENDING_UPDATE or provisioning_status == plugin_const.ACTIVE): self.plugin_rpc.update_health_monitor_status( health_monitor['id'], plugin_const.ACTIVE, @@ -1380,7 +1380,7 @@ def _update_pool_status(self, pools): if 'provisioning_status' in pool: provisioning_status = pool['provisioning_status'] if (provisioning_status == plugin_const.PENDING_CREATE or - provisioning_status == plugin_const.PENDING_UPDATE): + provisioning_status == plugin_const.PENDING_UPDATE or provisioning_status == plugin_const.ACTIVE): self.plugin_rpc.update_pool_status( pool['id'], plugin_const.ACTIVE, @@ -1401,7 +1401,7 @@ def _update_listener_status(self, service): if 'provisioning_status' in listener: provisioning_status = listener['provisioning_status'] if (provisioning_status == plugin_const.PENDING_CREATE or - provisioning_status == plugin_const.PENDING_UPDATE): + provisioning_status == plugin_const.PENDING_UPDATE or provisioning_status == plugin_const.ACTIVE): self.plugin_rpc.update_listener_status( listener['id'], plugin_const.ACTIVE, @@ -1425,7 +1425,7 @@ def _update_l7rule_status(self, l7rules): if 'provisioning_status' in l7rule: provisioning_status = l7rule['provisioning_status'] if (provisioning_status == plugin_const.PENDING_CREATE or - provisioning_status == plugin_const.PENDING_UPDATE): + provisioning_status == plugin_const.PENDING_UPDATE or provisioning_status == plugin_const.ACTIVE): self.plugin_rpc.update_l7rule_status( l7rule['id'], l7rule['policy_id'], @@ -1447,7 +1447,7 @@ def _update_l7policy_status(self, l7policies): if 'provisioning_status' in l7policy: provisioning_status = l7policy['provisioning_status'] if (provisioning_status == plugin_const.PENDING_CREATE or - provisioning_status == plugin_const.PENDING_UPDATE): + provisioning_status == plugin_const.PENDING_UPDATE or provisioning_status == plugin_const.ACTIVE): self.plugin_rpc.update_l7policy_status( l7policy['id'], plugin_const.ACTIVE, @@ -1468,7 +1468,7 @@ def _update_loadbalancer_status(self, service, timed_out=False): plugin_const.ERROR) if (provisioning_status == plugin_const.PENDING_CREATE or - provisioning_status == plugin_const.PENDING_UPDATE): + provisioning_status == plugin_const.PENDING_UPDATE or provisioning_status == plugin_const.ACTIVE): if timed_out: operating_status = (lb_const.OFFLINE) if provisioning_status == plugin_const.PENDING_CREATE: diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py b/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py index beea7bca9..6f9a45def 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py @@ -110,6 +110,8 @@ def _assure_loadbalancer_created(self, service, all_subnet_hints): all_subnet_hints, False) + loadbalancer['provisioning_status'] = plugin_const.ACTIVE + def _assure_listeners_created(self, service): if 'listeners' not in service: return @@ -150,6 +152,8 @@ def _assure_listeners_created(self, service): listener['provisioning_status'] = plugin_const.ERROR raise f5_ex.VirtualServerCreationException(err.message) + listener['provisioning_status'] = plugin_const.ACTIVE + def _assure_pools_created(self, service): if "pools" not in service: return @@ -189,6 +193,8 @@ def _assure_pools_created(self, service): loadbalancer['provisioning_status'] = plugin_const.ERROR raise f5_ex.PoolCreationException(err.message) + pool['provisioning_status'] = plugin_const.ACTIVE + def _assure_pools_configured(self, service): if "pools" not in service: return @@ -231,6 +237,8 @@ def _assure_pools_configured(self, service): loadbalancer['provisioning_status'] = plugin_const.ERROR raise f5_ex.PoolCreationException(err.message) + pool['provisioning_status'] = plugin_const.ACTIVE + def _get_pool_members(self, service, pool_id): '''Return a list of members associated with given pool.''' @@ -287,6 +295,8 @@ def _assure_monitors(self, service): monitor['provisioning_status'] = plugin_const.ERROR raise f5_ex.MonitorCreationException(err.message) + monitor['provisioning_status'] = plugin_const.ACTIVE + def _assure_members(self, service, all_subnet_hints): if not (("pools" in service) and ("members" in service)): return @@ -339,6 +349,8 @@ def _assure_members(self, service, all_subnet_hints): member["network_id"], all_subnet_hints, True) + member['provisioning_status'] = plugin_const.ACTIVE + def _assure_loadbalancer_deleted(self, service): if (service['loadbalancer']['provisioning_status'] != @@ -502,6 +514,8 @@ def _assure_l7policies_created(self, service): plugin_const.ERROR raise f5_ex.L7PolicyCreationException(err.message) + l7policy['provisioning_status'] = plugin_const.ACTIVE + def _assure_l7policies_deleted(self, service): if 'l7policies' not in service: return @@ -562,6 +576,8 @@ def _assure_l7rules_created(self, service): plugin_const.ERROR raise f5_ex.L7PolicyCreationException(err.message) + l7rule['provisioning_status'] = plugin_const.ACTIVE + def _assure_l7rules_deleted(self, service): if 'l7policy_rules' not in service: return From 8aba8269b792d6250fa7c4cdc27f2b6a28a21b4f Mon Sep 17 00:00:00 2001 From: "Battye, Andrew" Date: Tue, 13 Jun 2017 13:40:16 +0100 Subject: [PATCH 019/109] =?UTF-8?q?Don=E2=80=99t=20set=20active=20if=20pen?= =?UTF-8?q?ding=20delete?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py b/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py index 6f9a45def..f91b3134d 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py @@ -152,7 +152,8 @@ def _assure_listeners_created(self, service): listener['provisioning_status'] = plugin_const.ERROR raise f5_ex.VirtualServerCreationException(err.message) - listener['provisioning_status'] = plugin_const.ACTIVE + if listener['provisioning_status'] != plugin_const.PENDING_DELETE: + listener['provisioning_status'] = plugin_const.ACTIVE def _assure_pools_created(self, service): if "pools" not in service: @@ -326,6 +327,7 @@ def _assure_members(self, service, all_subnet_hints): else: try: self.pool_builder.create_member(svc, bigips) + member['provisioning_status'] = plugin_const.ACTIVE except HTTPError as err: if err.response.status_code != 409: # FIXME(RB) @@ -340,6 +342,8 @@ def _assure_members(self, service, all_subnet_hints): except Exception as err: member['provisioning_status'] = plugin_const.ERROR raise f5_ex.MemberUpdateException(err.message) + + except Exception as err: member['provisioning_status'] = plugin_const.ERROR raise f5_ex.MemberCreationException(err.message) @@ -349,7 +353,7 @@ def _assure_members(self, service, all_subnet_hints): member["network_id"], all_subnet_hints, True) - member['provisioning_status'] = plugin_const.ACTIVE + def _assure_loadbalancer_deleted(self, service): From 45e6488088a01bdb0509a9fb96838c19003f0996 Mon Sep 17 00:00:00 2001 From: "Battye, Andrew" Date: Tue, 13 Jun 2017 16:27:13 +0100 Subject: [PATCH 020/109] =?UTF-8?q?Don=E2=80=99t=20interfere=20with=20LBs?= =?UTF-8?q?=20in=20pending=20delete?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py b/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py index f91b3134d..8fa5e8e88 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py @@ -109,8 +109,8 @@ def _assure_loadbalancer_created(self, service, all_subnet_hints): loadbalancer["network_id"], all_subnet_hints, False) - - loadbalancer['provisioning_status'] = plugin_const.ACTIVE + if loadbalancer['provisioning_status'] != plugin_const.PENDING_DELETE: + loadbalancer['provisioning_status'] = plugin_const.ACTIVE def _assure_listeners_created(self, service): if 'listeners' not in service: From 7bcce32af993ac967f5b7f1e11eda6e046648dd8 Mon Sep 17 00:00:00 2001 From: "Battye, Andrew" Date: Wed, 14 Jun 2017 10:35:05 +0100 Subject: [PATCH 021/109] Add guard for missing loadbalancer in service --- .../lbaasv2/drivers/bigip/icontrol_driver.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py b/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py index 9e662a4cf..30bbd3dad 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py @@ -975,16 +975,20 @@ def tunnel_sync(self): @is_connected def sync(self, service): """Sync service defintion to device""" + + load_balancer = service.get('loadbalancer',None) + # plugin_rpc may not be set when unit testing - if self.plugin_rpc: + if self.plugin_rpc and load_balancer: # Get the latest service. It may have changed. service = self.plugin_rpc.get_service_by_loadbalancer_id( - service['loadbalancer']['id'] + load_balancer.get('id') ) - if service['loadbalancer']: + + if service.get('loadbalancer',None): return self._common_service_handler(service) else: - LOG.debug("Attempted sync of deleted pool") + LOG.debug("Attempted sync of deleted load balancer") @serialized('backup_configuration') @is_connected From 7ed39b12f8da90252bc2c1a95cfc33f63c931720 Mon Sep 17 00:00:00 2001 From: "Battye, Andrew" Date: Wed, 30 Aug 2017 13:09:07 +0100 Subject: [PATCH 022/109] Initial fix to support VS VIP as snat pool IP --- .../lbaasv2/drivers/bigip/network_service.py | 37 +++++- .../lbaasv2/drivers/bigip/service_adapter.py | 4 + .../lbaasv2/drivers/bigip/snats.py | 123 ++++++++++++------ 3 files changed, 124 insertions(+), 40 deletions(-) diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/network_service.py b/f5_openstack_agent/lbaasv2/drivers/bigip/network_service.py index 222261eca..49a4dd2fc 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/network_service.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/network_service.py @@ -15,6 +15,7 @@ import itertools import netaddr +import re import constants_v2 as const from neutron.common.exceptions import NeutronException @@ -196,6 +197,8 @@ def prep_service_networking(self, service, traffic_group): for subnetinfo in subnetsinfo: if self.conf.f5_snat_addresses_per_subnet > 0: self._assure_subnet_snats(assure_bigips, service, subnetinfo) + elif self.conf.f5_snat_addresses_per_subnet == -1: + self._assure_lb_snats(assure_bigips, service, subnetinfo) if subnetinfo['is_for_member'] and not self.conf.f5_snat_mode: try: @@ -592,6 +595,37 @@ def _assure_subnet_snats(self, assure_bigips, service, subnetinfo): self.bigip_snat_manager.assure_bigip_snats( assure_bigip, subnetinfo, snat_addrs, tenant_id) + def _assure_lb_snats(self, assure_bigips, service, subnetinfo): + # Ensure snat for loadbalancer exists on bigips + tenant_id = service['loadbalancer']['tenant_id'] + + lb_id = service['loadbalancer']['id'] + + + assure_bigips = \ + [bigip for bigip in assure_bigips + if tenant_id not in bigip.assured_tenant_snat_subnets or + lb_id not in + bigip.assured_tenant_snat_subnets[tenant_id]] + + LOG.debug("_assure_subnet_snats: getting snat addrs for: %s" % + lb_id) + if len(assure_bigips): + + ip_address = service['loadbalancer']["vip_address"] + + match = re.search("%[0-9]+$", str(ip_address)) + + if match is not None: + ip_address = ip_address[:-len(match.group(0))] + + snat_addrs = [ip_address] + for assure_bigip in assure_bigips: + self.bigip_snat_manager.assure_bigip_snats( + assure_bigip, subnetinfo, snat_addrs, tenant_id, lb_id) + + pass + def _allocate_gw_addr(self, subnetinfo): # Create a name for the port and for the IP Forwarding # Virtual Server as well as the floating Self IP which @@ -789,6 +823,7 @@ def _assure_delete_nets_shared(self, bigip, service, subnet_hints): # Assure shared configuration (which syncs) is deleted deleted_names = set() tenant_id = service['loadbalancer']['tenant_id'] + lb_id = service['loadbalancer']['id'] delete_gateway = self.bigip_selfip_manager.delete_gateway_on_subnet for subnetinfo in self._get_subnets_to_delete(bigip, @@ -805,7 +840,7 @@ def _assure_delete_nets_shared(self, bigip, service, subnet_hints): my_deleted_names, my_in_use_subnets = \ self.bigip_snat_manager.delete_bigip_snats( - bigip, subnetinfo, tenant_id) + bigip, subnetinfo, tenant_id,lb_id) deleted_names = deleted_names.union(my_deleted_names) for in_use_subnetid in my_in_use_subnets: subnet_hints['check_for_delete_subnets'].pop( diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/service_adapter.py b/f5_openstack_agent/lbaasv2/drivers/bigip/service_adapter.py index 0c725f000..b63eff3e7 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/service_adapter.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/service_adapter.py @@ -92,6 +92,10 @@ def get_virtual(self, service): if listener["use_snat"] and self.snat_count() > 0: listener["snat_pool_name"] = self.get_folder_name( loadbalancer["tenant_id"]) + elif listener["use_snat"] and self.snat_count() == -1: + listener["snat_pool_name"] = "lb_" + loadbalancer["id"] + + # transfer session_persistence from pool to listener if "pool" in service and "session_persistence" in service["pool"]: diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/snats.py b/f5_openstack_agent/lbaasv2/drivers/bigip/snats.py index 70d09eb0f..d88f76eec 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/snats.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/snats.py @@ -39,16 +39,19 @@ def __init__(self, driver, l2_service, l3_binding): ResourceType.snat_translation) self.network_helper = NetworkHelper() - def _get_snat_name(self, subnet, tenant_id): + def _get_snat_name(self, subnet, tenant_id, lb_id=None): + + key = self._get_pool_uuid(subnet,lb_id) + # Get the snat name based on HA type if self.driver.conf.f5_ha_type == 'standalone': - return 'snat-traffic-group-local-only-' + subnet['id'] + return 'snat-traffic-group-local-only-' + key elif self.driver.conf.f5_ha_type == 'pair': - return 'snat-traffic-group-1-' + subnet['id'] + return 'snat-traffic-group-1-' + key elif self.driver.conf.f5_ha_type == 'scalen': traffic_group = self.driver.tenant_to_traffic_group(tenant_id) base_traffic_group = os.path.basename(traffic_group) - return 'snat-' + base_traffic_group + '-' + subnet['id'] + return 'snat-' + base_traffic_group + '-' + key LOG.error('Invalid f5_ha_type:%s' % self.driver.conf.f5_ha_type) return '' @@ -98,41 +101,48 @@ def get_snat_addrs(self, subnetinfo, tenant_id, snat_count): return snat_addrs - def assure_bigip_snats(self, bigip, subnetinfo, snat_addrs, tenant_id): + def assure_bigip_snats(self, bigip, subnetinfo, snat_addrs, tenant_id,lb_id=None): # Ensure Snat Addresses are configured on a bigip. # Called for every bigip only in replication mode. # otherwise called once and synced. network = subnetinfo['network'] snat_info = {} - if self.l2_service.is_common_network(network): + + + # if we config to -1 for VIP pool IP then we use tenant partition + + if self.driver.conf.f5_snat_addresses_per_subnet > 0: snat_info['network_folder'] = 'Common' - else: + elif self.driver.conf.f5_snat_addresses_per_subnet == -1: snat_info['network_folder'] = ( self.driver.service_adapter.get_folder_name(tenant_id) ) - snat_info['pool_name'] = self.driver.service_adapter.get_folder_name( - tenant_id - ) - snat_info['pool_folder'] = self.driver.service_adapter.get_folder_name( - tenant_id - ) + + snat_info['pool_name'] = self._get_snat_pool_name(tenant_id,lb_id) + + + snat_info['pool_folder'] = self.driver.service_adapter.get_folder_name(tenant_id) + snat_info['addrs'] = snat_addrs - self._assure_bigip_snats(bigip, subnetinfo, snat_info, tenant_id) + self._assure_bigip_snats(bigip, subnetinfo, snat_info, tenant_id,lb_id) - def _assure_bigip_snats(self, bigip, subnetinfo, snat_info, tenant_id): + def _assure_bigip_snats(self, bigip, subnetinfo, snat_info, tenant_id,lb_id=None): # Configure the ip addresses for snat network = subnetinfo['network'] subnet = subnetinfo['subnet'] + key = self._get_pool_uuid(subnet,lb_id) + + if tenant_id not in bigip.assured_tenant_snat_subnets: bigip.assured_tenant_snat_subnets[tenant_id] = [] - if subnet['id'] in bigip.assured_tenant_snat_subnets[tenant_id]: + if key in bigip.assured_tenant_snat_subnets[tenant_id]: return - snat_name = self._get_snat_name(subnet, tenant_id) + snat_name = self._get_snat_name(subnet, tenant_id, lb_id) for i, snat_address in enumerate(snat_info['addrs']): ip_address = snat_address + \ '%' + str(network['route_domain_id']) @@ -198,9 +208,10 @@ def _assure_bigip_snats(self, bigip, subnetinfo, snat_info, tenant_id): self.l3_binding.bind_address(subnet_id=subnet['id'], ip_address=ip_address) - bigip.assured_tenant_snat_subnets[tenant_id].append(subnet['id']) - def delete_bigip_snats(self, bigip, subnetinfo, tenant_id): + bigip.assured_tenant_snat_subnets[tenant_id].append(key) + + def delete_bigip_snats(self, bigip, subnetinfo, tenant_id, lb_id=None): # Assure shared snat configuration (which syncs) is deleted. # if not subnetinfo['network']: @@ -208,46 +219,66 @@ def delete_bigip_snats(self, bigip, subnetinfo, tenant_id): 'for missing network ... skipping.') return set() - return self._delete_bigip_snats(bigip, subnetinfo, tenant_id) + return self._delete_bigip_snats(bigip, subnetinfo, tenant_id, lb_id) - def _remove_assured_tenant_snat_subnet(self, bigip, tenant_id, subnet): + def _remove_assured_tenant_snat_subnet(self, bigip, tenant_id, subnet,lb_id=None): # Remove ref for the subnet for this tenant""" + + key = self._get_pool_uuid(subnet,lb_id) + if tenant_id in bigip.assured_tenant_snat_subnets: tenant_snat_subnets = \ bigip.assured_tenant_snat_subnets[tenant_id] - if tenant_snat_subnets and subnet['id'] in tenant_snat_subnets: + if tenant_snat_subnets and key in tenant_snat_subnets: LOG.debug( 'Remove subnet id %s from ' 'bigip.assured_tenant_snat_subnets for tenant %s' % (subnet['id'], tenant_id)) - tenant_snat_subnets.remove(subnet['id']) + tenant_snat_subnets.remove(key) else: LOG.debug( 'Subnet id %s does not exist in ' 'bigip.assured_tenant_snat_subnets for tenant %s' % - (subnet['id'], tenant_id)) + (key, tenant_id)) else: LOG.debug( 'Tenant id %s does not exist in ' 'bigip.assured_tenant_snat_subnets' % tenant_id) - def _delete_bigip_snats(self, bigip, subnetinfo, tenant_id): + def _delete_bigip_snats(self, bigip, subnetinfo, tenant_id, lb_id=None): # Assure snats deleted in standalone mode """ subnet = subnetinfo['subnet'] network = subnetinfo['network'] - if self.l2_service.is_common_network(network): + + key = self._get_pool_uuid(subnet,lb_id) + + partition = self.driver.service_adapter.get_folder_name( + tenant_id + ) + + if self.driver.conf.f5_snat_addresses_per_subnet > 0: partition = 'Common' - else: - partition = self.driver.service_adapter.get_folder_name(tenant_id) + elif self.driver.conf.f5_snat_addresses_per_subnet == -1: + partition = ( + self.driver.service_adapter.get_folder_name(tenant_id) + ) + - snat_pool_name = self.driver.service_adapter.get_folder_name(tenant_id) - snat_pool_folder = snat_pool_name + snat_pool_name = self._get_snat_pool_name(tenant_id,lb_id) + snat_pool_folder = self.driver.service_adapter.get_folder_name(tenant_id) deleted_names = set() in_use_subnets = set() # Delete SNATs on traffic-group-local-only - snat_name = self._get_snat_name(subnet, tenant_id) - for i in range(self.driver.conf.f5_snat_addresses_per_subnet): + snat_name = self._get_snat_name(subnet, tenant_id,key) + + count = 0 + if self.driver.conf.f5_snat_addresses_per_subnet > 0: + count = self.driver.conf.f5_snat_addresses_per_subnet + elif self.driver.conf.f5_snat_addresses_per_subnet == -1: + count = 1 + + for i in range(count): index_snat_name = snat_name + "_" + str(i) tmos_snat_name = index_snat_name @@ -307,18 +338,18 @@ def _delete_bigip_snats(self, bigip, subnetinfo, tenant_id): # Check if subnet in use by any tenants/snatpools. If in use, # add subnet to hints list of subnets in use. - self._remove_assured_tenant_snat_subnet(bigip, tenant_id, subnet) + self._remove_assured_tenant_snat_subnet(bigip, tenant_id, subnet,lb_id) LOG.debug( - 'Check cache for subnet %s in use by other tenant' % - subnet['id']) + 'Check cache for pool %s in use by other tenant' % + key) in_use_count = 0 for loop_tenant_id in bigip.assured_tenant_snat_subnets: tenant_snat_subnets = \ bigip.assured_tenant_snat_subnets[loop_tenant_id] - if subnet['id'] in tenant_snat_subnets: + if key in tenant_snat_subnets: LOG.debug( - 'Subnet %s in use (tenant %s)' % - (subnet['id'], loop_tenant_id)) + 'Pool %s in use (tenant %s)' % + (key, loop_tenant_id)) in_use_count += 1 if in_use_count: @@ -327,7 +358,7 @@ def _delete_bigip_snats(self, bigip, subnetinfo, tenant_id): LOG.debug('Check subnet in use by any tenant') member_use_count = \ self.get_snatpool_member_use_count( - bigip, subnet['id']) + bigip, key) if member_use_count: LOG.debug('Subnet in use - do not delete') in_use_subnets.add(subnet['id']) @@ -356,3 +387,17 @@ def get_snatpool_member_use_count(self, bigip, member_name): if member_name == os.path.basename(member): snat_count += 1 return snat_count + + def _get_snat_pool_name(self, tenant_id, lb_id): + if lb_id is not None and self.driver.conf.f5_snat_addresses_per_subnet == -1: + return "lb_"+lb_id + else: + return self.driver.service_adapter.get_folder_name( + tenant_id + ) + + def _get_pool_uuid(self,subnet, lb_id): + if lb_id is not None and self.driver.conf.f5_snat_addresses_per_subnet == -1: + return lb_id + else: + return subnet['id'] \ No newline at end of file From ce2f8565d1b3b55811b031094b72ad33c29e0de6 Mon Sep 17 00:00:00 2001 From: "Battye, Andrew" Date: Tue, 5 Sep 2017 13:32:15 +0100 Subject: [PATCH 023/109] Add commands for individual sync and delete, --- .../lbaasv2/drivers/bigip/cli/__init__.py | 0 .../drivers/bigip/cli/actions/__init__.py | 0 .../drivers/bigip/cli/actions/base_action.py | 85 +++++++++++++++++++ .../drivers/bigip/cli/actions/delete.py | 31 +++++++ .../lbaasv2/drivers/bigip/cli/actions/sync.py | 32 +++++++ .../lbaasv2/drivers/bigip/cli/f5_cli_utils.py | 46 ++++++++++ .../lbaasv2/drivers/bigip/esd_filehandler.py | 2 +- .../lbaasv2/drivers/bigip/icontrol_driver.py | 26 +++++- .../lbaasv2/drivers/bigip/listener_service.py | 39 ++++++--- .../lbaasv2/drivers/bigip/network_service.py | 3 +- .../lbaasv2/drivers/bigip/service_adapter.py | 8 +- .../lbaasv2/drivers/bigip/snats.py | 33 ++++--- .../lbaasv2/drivers/bigip/utils.py | 19 +++++ setup.py | 3 +- 14 files changed, 297 insertions(+), 30 deletions(-) create mode 100644 f5_openstack_agent/lbaasv2/drivers/bigip/cli/__init__.py create mode 100644 f5_openstack_agent/lbaasv2/drivers/bigip/cli/actions/__init__.py create mode 100644 f5_openstack_agent/lbaasv2/drivers/bigip/cli/actions/base_action.py create mode 100644 f5_openstack_agent/lbaasv2/drivers/bigip/cli/actions/delete.py create mode 100644 f5_openstack_agent/lbaasv2/drivers/bigip/cli/actions/sync.py create mode 100644 f5_openstack_agent/lbaasv2/drivers/bigip/cli/f5_cli_utils.py diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/cli/__init__.py b/f5_openstack_agent/lbaasv2/drivers/bigip/cli/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/cli/actions/__init__.py b/f5_openstack_agent/lbaasv2/drivers/bigip/cli/actions/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/cli/actions/base_action.py b/f5_openstack_agent/lbaasv2/drivers/bigip/cli/actions/base_action.py new file mode 100644 index 000000000..a27131f4b --- /dev/null +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/cli/actions/base_action.py @@ -0,0 +1,85 @@ +from oslo_config import cfg +from oslo_utils import importutils +from oslo_log import log as logging +import socket +import collections +import sys + +import errno +import inspect +import sys + +import f5_openstack_agent.lbaasv2.drivers.bigip.exceptions as exceptions + +from oslo_config import cfg +from oslo_log import log as oslo_logging +from oslo_service import service + +from neutron.agent.linux import interface +from neutron.agent.common import config +from neutron.common import config as common_config +from neutron.common import rpc as n_rpc + +import f5_openstack_agent.lbaasv2.drivers.bigip.icontrol_driver as driver +import f5_openstack_agent.lbaasv2.drivers.bigip.agent_manager as manager +import f5_openstack_agent.lbaasv2.drivers.bigip.agent as agent +import f5_openstack_agent.lbaasv2.drivers.bigip.constants_v2 as f5constants + + + + +LOG = logging.getLogger(__name__) + + +class BaseAction(object): + + + def __init__(self,namespace): + + self.lb_id = namespace.lb_id + self.config_files = namespace.config + self.conf = cfg.CONF + + config_files = [] + + for s in self.config_files: + config_files.append("--config-file") + config_files.append(s) + + common_config.init(config_files) + + cfg.CONF.register_opts(manager.OPTS) + cfg.CONF.register_opts(interface.OPTS) + cfg.CONF.register_opts(agent.OPTS) + cfg.CONF.register_opts(driver.OPTS) + config.register_agent_state_opts_helper(cfg.CONF) + config.register_root_helper(cfg.CONF) + + self.host = socket.gethostname() + + if namespace.log: + common_config.setup_logging() + + + self.manager = manager.LbaasAgentManager(cfg.CONF) + self.driver = self.manager.lbdriver + + + def replace_dict_value(self,obj,key,new_value): + if isinstance(obj,dict): + for k, v in obj.iteritems(): + if k == key or isinstance(v,dict) or isinstance(v,list): + obj[k] = self.replace_dict_value(v,key,new_value) + result = obj + elif isinstance(obj,list): + result = [] + for v in obj: + result.append(self.replace_dict_value(v, key, new_value)) + + else: + result= new_value + + return result + + + diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/cli/actions/delete.py b/f5_openstack_agent/lbaasv2/drivers/bigip/cli/actions/delete.py new file mode 100644 index 000000000..159bf857c --- /dev/null +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/cli/actions/delete.py @@ -0,0 +1,31 @@ + +import base_action + + +from oslo_log import log as logging +from neutron.plugins.common import constants as plugin_const + +LOG = logging.getLogger(__name__) + + +class Delete(base_action.BaseAction): + + def __init__(self, namespace): + super(Delete, self).__init__(namespace) + + def execute(self): + service = self.manager.plugin_rpc.get_service_by_loadbalancer_id( + self.lb_id + ) + + if not bool(service): + print("Loadbalancer {} not found".format(self.lb_id)) + exit(1) + + service = self.replace_dict_value(service, 'provisioning_status', plugin_const.PENDING_DELETE) + + + self.driver._common_service_handler(service,cli_sync=True) + + print("All device configuration forloadbalancer {} has been removed".format(self.lb_id)) + diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/cli/actions/sync.py b/f5_openstack_agent/lbaasv2/drivers/bigip/cli/actions/sync.py new file mode 100644 index 000000000..c650d5c45 --- /dev/null +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/cli/actions/sync.py @@ -0,0 +1,32 @@ + +import base_action + +from neutron.plugins.common import constants as plugin_const + +from oslo_log import log as logging + +LOG = logging.getLogger(__name__) + + +class Sync(base_action.BaseAction): + + def __init__(self, namespace): + super(Sync, self).__init__(namespace) + + def execute(self): + + service = self.manager.plugin_rpc.get_service_by_loadbalancer_id( + self.lb_id + ) + + if not bool(service): + print("Loadbalancer {} not found".format(self.lb_id)) + exit(1) + + service = self.replace_dict_value(service, 'provisioning_status', plugin_const.PENDING_CREATE) + + self.driver._common_service_handler(service) + + print("The device state of loadbalancer {} has been synced with Neutron".format(self.lb_id)) + + diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/cli/f5_cli_utils.py b/f5_openstack_agent/lbaasv2/drivers/bigip/cli/f5_cli_utils.py new file mode 100644 index 000000000..ee03f5951 --- /dev/null +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/cli/f5_cli_utils.py @@ -0,0 +1,46 @@ +import argparse +import urllib3 +import requests +from requests.packages.urllib3.exceptions import InsecureRequestWarning +import warnings +warnings.filterwarnings("ignore") + +requests.packages.urllib3.disable_warnings(InsecureRequestWarning) +urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) + +from oslo_utils import importutils + + +ACTION_MODULE = 'f5_openstack_agent.lbaasv2.drivers.bigip.cli.actions.' + +class Execute(argparse.Action): + def __init__(self, option_strings, dest, nargs=None, **kwargs): + super(Execute, self).__init__(option_strings, dest, **kwargs) + self.actions = {"sync":"sync.Sync","delete":"delete.Delete"} + + def __call__(self, parser, namespace, values, option_string=None): + action = self.actions.get(values) + if action: + + instance = importutils.import_object(ACTION_MODULE+action,namespace) + + + instance.execute() + + +def main(): + parser = argparse.ArgumentParser(prog='f5_utils', description='Operations utilities for F5 LBAAS driver.') + + parser.add_argument('command', + help='command to execute',action=Execute,choices=["sync", "delete"]) + + parser.add_argument('--lb-id',dest='lb_id', + help='router id',action='store') + + parser.add_argument('--config-file', dest='config', action='append', + default=["/etc/neutron/f5-oslbaasv2-agent.ini", "/etc/neutron/neutron.conf"], + help='Configuration files') + parser.add_argument('--log',dest='log', action='store_true', + help='Enable openstack log output') + + parser.parse_args() diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/esd_filehandler.py b/f5_openstack_agent/lbaasv2/drivers/bigip/esd_filehandler.py index bfdb81cb8..73252d473 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/esd_filehandler.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/esd_filehandler.py @@ -157,7 +157,7 @@ def verify_esd(self, bigip, name, esd): valid_esd[tag] = esd[tag] LOG.debug("Tag {0} is valid for ESD {1}.".format(tag, name)) except f5_ex.esdJSONFileInvalidException as err: - LOG.error('Tag {0} failed validation for ESD {1} and was not ' + LOG.info('Tag {0} failed validation for ESD {1} and was not ' 'added to ESD. Error: {2}'. format(tag, name, err.message)) diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py b/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py index 30bbd3dad..f09890c16 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py @@ -280,6 +280,24 @@ help='Parent profile used when creating client SSL profiles ' 'for listeners with TERMINATED_HTTPS protocols.' ), + cfg.ListOpt( + 'f5_default_http_profiles', + default='/Common/http,/Common/oneconnect', + help='Default profiles to use for HTTP Protocol VS' + ), + + cfg.ListOpt( + 'f5_default_https_profiles', + default='/Common/http,/Common/oneconnect', + help='Default profiles to use for HTTPS Protocol VS' + ), + + cfg.ListOpt( + 'f5_default_terminated_https_profiles', + default='/Common/http,/Common/oneconnect', + help='Default profiles to use for TERMINATED_HTTPS Protocol VS' + ), + cfg.StrOpt( 'os_tenant_name', default=None, @@ -1176,7 +1194,7 @@ def get_loadbalancers_in_tenant(self, tenant_id): def _common_service_handler(self, service, delete_partition=False, - delete_event=False): + delete_event=False,cli_sync=False): # Assure that the service is configured on bigip(s) start_time = time() @@ -1247,12 +1265,12 @@ def _common_service_handler(self, service, {'check_for_delete_subnets': {}, 'do_not_delete_subnets': []} - LOG.debug("XXXXXXXXX: Pre assure service") + # pdb.set_trace() self.lbaas_builder.assure_service(service, traffic_group, all_subnet_hints) - LOG.debug("XXXXXXXXX: Post assure service") + if self.network_builder: start_time = time() @@ -1282,7 +1300,7 @@ def _common_service_handler(self, service, self.tenant_manager.assure_tenant_cleanup(service, all_subnet_hints) - if do_service_update: + if do_service_update and not cli_sync: self.update_service_status(service) lb_provisioning_status = loadbalancer.get("provisioning_status", diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/listener_service.py b/f5_openstack_agent/lbaasv2/drivers/bigip/listener_service.py index 67cd7b1c1..9b4ff8589 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/listener_service.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/listener_service.py @@ -21,9 +21,13 @@ from neutron_lbaas.services.loadbalancer import constants as lb_const from requests import HTTPError +from f5_openstack_agent.lbaasv2.drivers.bigip import utils + LOG = logging.getLogger(__name__) + + class ListenerServiceBuilder(object): u"""Create LBaaS v2 Listener on BIG-IP®s. @@ -66,7 +70,14 @@ def create_listener(self, service, bigips): self.vs_helper.create(bigip, vip) except HTTPError as err: if err.response.status_code == 409: - LOG.debug("Virtual server already exists") + LOG.debug("Virtual server already exists updating") + try: + self.vs_helper.update(bigip, vip) + except Exception as e: + LOG.warn("Update triggered in create failed, this could be due to timing issues in assure_service") + LOG.warn('VS info %s',service['listener']) + LOG.warn('Exception %s',e) + else: LOG.exception("Virtual server creation error: %s" % err.message) @@ -208,9 +219,15 @@ def update_session_persistence(self, service, bigips): if listener['protocol'] == 'TCP': self._remove_profile(vip, 'fastL4', bigip) - # HTTP listeners should have http and oneconnect profiles - self._add_profile(vip, 'http', bigip) - self._add_profile(vip, 'oneconnect', bigip) + # Add default profiles + + profiles = utils.get_default_profiles(self.service_adapter.conf, listener['protocol']) + + if(len(profiles) >0 ): + for profile in profiles: + self._add_profile(vip, profile.get('profile'), bigip) + + if persistence_type == 'APP_COOKIE' and \ 'cookie_name' in persistence: @@ -601,16 +618,16 @@ def apply_esd(self, svc, esd, bigips): if 'lbaas_fallback_persist' in esd: update_attrs['fallbackPersistence'] = esd['lbaas_fallback_persist'] - # always use http and oneconnect for non TCP listener + # always use defauklts for non TCP listener listener = svc["listener"] if profiles and not listener['protocol'] == 'TCP': - profiles.append({'name': 'http', - 'partition': 'Common', - 'context': 'all'}) - profiles.append({'name': 'oneconnect', - 'partition': 'Common', - 'context': 'all'}) + default_profiles = utils.get_default_profiles(self.service_adapter.conf, listener['protocol']) + + if(len(default_profiles) >0 ): + for profile in default_profiles: + profiles.append({'name':profile.get('profile'),'partition':profile.get('partition'),'context':'all'}) + if profiles: update_attrs['profiles'] = profiles diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/network_service.py b/f5_openstack_agent/lbaasv2/drivers/bigip/network_service.py index 49a4dd2fc..8bba945c9 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/network_service.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/network_service.py @@ -570,6 +570,7 @@ def get_neutron_net_short_name(network): def _assure_subnet_snats(self, assure_bigips, service, subnetinfo): # Ensure snat for subnet exists on bigips + lb_id = service['loadbalancer']['id'] tenant_id = service['loadbalancer']['tenant_id'] subnet = subnetinfo['subnet'] snats_per_subnet = self.conf.f5_snat_addresses_per_subnet @@ -593,7 +594,7 @@ def _assure_subnet_snats(self, assure_bigips, service, subnetinfo): (snats_per_subnet, len(snat_addrs))) for assure_bigip in assure_bigips: self.bigip_snat_manager.assure_bigip_snats( - assure_bigip, subnetinfo, snat_addrs, tenant_id) + assure_bigip, subnetinfo, snat_addrs, tenant_id, lb_id) def _assure_lb_snats(self, assure_bigips, service, subnetinfo): # Ensure snat for loadbalancer exists on bigips diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/service_adapter.py b/f5_openstack_agent/lbaasv2/drivers/bigip/service_adapter.py index b63eff3e7..84697d8bf 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/service_adapter.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/service_adapter.py @@ -436,7 +436,13 @@ def _add_bigip_items(self, listener, vip): vip['profiles'] = ['/Common/fastL4'] else: # add profiles for HTTP, HTTPS, TERMINATED_HTTPS protocols - vip['profiles'] = ['/Common/http', '/Common/oneconnect'] + + default_profiles = utils.get_default_profiles(self.conf,listener['protocol']) + profiles=[] + for profile in default_profiles: + profiles.append('/{}/{}'.format(profile.get('partition'), profile.get('profile'))) + + vip['profiles'] = profiles # mask if "ip_address" in vip: diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/snats.py b/f5_openstack_agent/lbaasv2/drivers/bigip/snats.py index d88f76eec..a33d642a9 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/snats.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/snats.py @@ -129,6 +129,25 @@ def assure_bigip_snats(self, bigip, subnetinfo, snat_addrs, tenant_id,lb_id=None self._assure_bigip_snats(bigip, subnetinfo, snat_info, tenant_id,lb_id) + # try to delete any incorrectly named SNAT pools + if self.driver.conf.f5_snat_addresses_per_subnet == -1: + pool_name = self._get_snat_pool_name(tenant_id) + elif self.driver.conf.f5_snat_addresses_per_subnet > 0: + pool_name = "lb_"+lb_id + + try: + if self.snatpool_manager.exists(bigip,name=pool_name, partition=snat_info['pool_folder']): + snatpool = self.snatpool_manager.load(bigip, pool_name, snat_info['pool_folder']) + if snatpool is not None: + snatpool.delete() + + except Exception as exc: + pass + + + + + def _assure_bigip_snats(self, bigip, subnetinfo, snat_info, tenant_id,lb_id=None): # Configure the ip addresses for snat network = subnetinfo['network'] @@ -248,17 +267,9 @@ def _remove_assured_tenant_snat_subnet(self, bigip, tenant_id, subnet,lb_id=None def _delete_bigip_snats(self, bigip, subnetinfo, tenant_id, lb_id=None): # Assure snats deleted in standalone mode """ subnet = subnetinfo['subnet'] - network = subnetinfo['network'] - key = self._get_pool_uuid(subnet,lb_id) - partition = self.driver.service_adapter.get_folder_name( - tenant_id - ) - - if self.driver.conf.f5_snat_addresses_per_subnet > 0: - partition = 'Common' - elif self.driver.conf.f5_snat_addresses_per_subnet == -1: + if self.driver.conf.f5_snat_addresses_per_subnet == -1 and lb_id is not None: partition = ( self.driver.service_adapter.get_folder_name(tenant_id) ) @@ -388,7 +399,7 @@ def get_snatpool_member_use_count(self, bigip, member_name): snat_count += 1 return snat_count - def _get_snat_pool_name(self, tenant_id, lb_id): + def _get_snat_pool_name(self, tenant_id, lb_id=None): if lb_id is not None and self.driver.conf.f5_snat_addresses_per_subnet == -1: return "lb_"+lb_id else: @@ -396,7 +407,7 @@ def _get_snat_pool_name(self, tenant_id, lb_id): tenant_id ) - def _get_pool_uuid(self,subnet, lb_id): + def _get_pool_uuid(self,subnet, lb_id=None): if lb_id is not None and self.driver.conf.f5_snat_addresses_per_subnet == -1: return lb_id else: diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/utils.py b/f5_openstack_agent/lbaasv2/drivers/bigip/utils.py index 238ef3db6..d678c4719 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/utils.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/utils.py @@ -19,6 +19,7 @@ from distutils.version import LooseVersion from eventlet import greenthread from oslo_log import log as logging +from neutron_lbaas.services.loadbalancer import constants as lb_const LOG = logging.getLogger(__name__) OBJ_PREFIX = 'uuid_' @@ -28,6 +29,24 @@ class IpNotInCidrNotation(Exception): pass +def get_default_profiles(conf, listener_protocol): + defaults = {lb_const.PROTOCOL_HTTP:conf.f5_default_http_profiles,lb_const.PROTOCOL_HTTPS:conf.f5_default_https_profiles,lb_const.PROTOCOL_TERMINATED_HTTPS:conf.f5_default_terminated_https_profiles} + + profiles = defaults.get(listener_protocol,['/Common/http','/Common/oneconnect']) + + result = [] + + if(profiles is not None): + for profile in profiles: + l = profile[1:].split("/") + if len(l)==2: + result.append({'partition': l[0], 'profile': l[1]}) + return result + + + + + def strip_domain_address(ip_address): """Return the address or address/netmask from a route domain address. diff --git a/setup.py b/setup.py index 3ae72b4b5..71d0955da 100644 --- a/setup.py +++ b/setup.py @@ -40,7 +40,8 @@ ], entry_points={ 'console_scripts': [ - 'f5-oslbaasv2-agent = f5_openstack_agent.lbaasv2.drivers.bigip.agent:main' + 'f5-oslbaasv2-agent = f5_openstack_agent.lbaasv2.drivers.bigip.agent:main', + 'f5-utils = f5_openstack_agent.lbaasv2.drivers.bigip.cli.f5_cli_utils:main' ] }, install_requires=['f5-sdk==2.3.3'] From e8df675be285d7f7c7bc5570f2610588f9703d97 Mon Sep 17 00:00:00 2001 From: "Battye, Andrew" Date: Tue, 5 Sep 2017 14:38:53 +0100 Subject: [PATCH 024/109] Add sync all cli action and check for input parameters --- .../drivers/bigip/cli/actions/base_action.py | 1 + .../drivers/bigip/cli/actions/delete.py | 6 +++ .../lbaasv2/drivers/bigip/cli/actions/sync.py | 5 +++ .../drivers/bigip/cli/actions/sync_all.py | 39 +++++++++++++++++++ .../lbaasv2/drivers/bigip/cli/f5_cli_utils.py | 7 +++- 5 files changed, 56 insertions(+), 2 deletions(-) create mode 100644 f5_openstack_agent/lbaasv2/drivers/bigip/cli/actions/sync_all.py diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/cli/actions/base_action.py b/f5_openstack_agent/lbaasv2/drivers/bigip/cli/actions/base_action.py index a27131f4b..58ef8618d 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/cli/actions/base_action.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/cli/actions/base_action.py @@ -37,6 +37,7 @@ class BaseAction(object): def __init__(self,namespace): self.lb_id = namespace.lb_id + self.project_id = namespace.project_id self.config_files = namespace.config self.conf = cfg.CONF diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/cli/actions/delete.py b/f5_openstack_agent/lbaasv2/drivers/bigip/cli/actions/delete.py index 159bf857c..7b8cffccd 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/cli/actions/delete.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/cli/actions/delete.py @@ -14,6 +14,12 @@ def __init__(self, namespace): super(Delete, self).__init__(namespace) def execute(self): + if self.lb_id is None : + print("Please specify an LB id with --lb_id") + exit(1) + + print("Starting delete attempt for load balancer {}".format(self.lb_id)) + service = self.manager.plugin_rpc.get_service_by_loadbalancer_id( self.lb_id ) diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/cli/actions/sync.py b/f5_openstack_agent/lbaasv2/drivers/bigip/cli/actions/sync.py index c650d5c45..2183723ae 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/cli/actions/sync.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/cli/actions/sync.py @@ -14,6 +14,11 @@ def __init__(self, namespace): super(Sync, self).__init__(namespace) def execute(self): + if self.lb_id is None: + print("Please specify an LB id with --lb_id") + exit(1) + + print("Starting sync attempt for load balancer {}".format(self.lb_id)) service = self.manager.plugin_rpc.get_service_by_loadbalancer_id( self.lb_id diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/cli/actions/sync_all.py b/f5_openstack_agent/lbaasv2/drivers/bigip/cli/actions/sync_all.py new file mode 100644 index 000000000..10354804f --- /dev/null +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/cli/actions/sync_all.py @@ -0,0 +1,39 @@ + +import base_action + +from neutron.plugins.common import constants as plugin_const + +from oslo_log import log as logging + +LOG = logging.getLogger(__name__) + + +class SyncAll(base_action.BaseAction): + + def __init__(self, namespace): + super(SyncAll, self).__init__(namespace) + + def execute(self): + + services = self.manager.plugin_rpc. get_all_loadbalancers(host=self.manager.agent_host) + + if self.project_id is not None: + print("Syncing all LBs in project {}".format(self.project_id)) + else: + print("Syncing all LBs hosted on agent {}".format(self.host)) + + for service in services: + + if self.project_id is None or service['tenant_id']==self.project_id : + + detailed_service = self.manager.plugin_rpc.get_service_by_loadbalancer_id(service['lb_id']) + + print("Starting sync attempt for load balancer {}".format(service['lb_id'])) + + + detailed_service = self.replace_dict_value(detailed_service, 'provisioning_status', plugin_const.PENDING_CREATE) + self.driver._common_service_handler(detailed_service) + + print("The device state of loadbalancer {} has been synced with Neutron".format(service['lb_id'])) + + diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/cli/f5_cli_utils.py b/f5_openstack_agent/lbaasv2/drivers/bigip/cli/f5_cli_utils.py index ee03f5951..48cbcc0ba 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/cli/f5_cli_utils.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/cli/f5_cli_utils.py @@ -16,7 +16,7 @@ class Execute(argparse.Action): def __init__(self, option_strings, dest, nargs=None, **kwargs): super(Execute, self).__init__(option_strings, dest, **kwargs) - self.actions = {"sync":"sync.Sync","delete":"delete.Delete"} + self.actions = {"sync":"sync.Sync","sync-all":"sync_all.SyncAll","delete":"delete.Delete"} def __call__(self, parser, namespace, values, option_string=None): action = self.actions.get(values) @@ -32,11 +32,14 @@ def main(): parser = argparse.ArgumentParser(prog='f5_utils', description='Operations utilities for F5 LBAAS driver.') parser.add_argument('command', - help='command to execute',action=Execute,choices=["sync", "delete"]) + help='command to execute',action=Execute,choices=["sync", "sync-all", "delete"]) parser.add_argument('--lb-id',dest='lb_id', help='router id',action='store') + parser.add_argument('--project-id',dest='project_id', + help='project id',action='store') + parser.add_argument('--config-file', dest='config', action='append', default=["/etc/neutron/f5-oslbaasv2-agent.ini", "/etc/neutron/neutron.conf"], help='Configuration files') From 4cb67c7fec6eb59542528b7d188dff244033c141 Mon Sep 17 00:00:00 2001 From: "Battye, Andrew" Date: Wed, 13 Sep 2017 10:03:00 +0100 Subject: [PATCH 025/109] Remove some noise from the logs --- f5_openstack_agent/lbaasv2/drivers/bigip/agent.py | 8 ++++++++ .../lbaasv2/drivers/bigip/agent_manager.py | 2 +- .../lbaasv2/drivers/bigip/icontrol_driver.py | 10 +++++----- 3 files changed, 14 insertions(+), 6 deletions(-) diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/agent.py b/f5_openstack_agent/lbaasv2/drivers/bigip/agent.py index b3859f7ad..61ffc9437 100755 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/agent.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/agent.py @@ -17,8 +17,16 @@ import errno import inspect import sys +import urllib3 +import requests import f5_openstack_agent.lbaasv2.drivers.bigip.exceptions as exceptions +from requests.packages.urllib3.exceptions import InsecureRequestWarning + + +requests.packages.urllib3.disable_warnings(InsecureRequestWarning) +urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) + try: from oslo_config import cfg diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py b/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py index c9da46c1d..3f1db5439 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py @@ -621,7 +621,7 @@ def _service_has_provisioning_error(self, service): - @log_helpers.log_method_call + def refresh_service(self, lb_id): try: service = self.plugin_rpc.get_service_by_loadbalancer_id( diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py b/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py index f09890c16..540737c55 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py @@ -1395,7 +1395,7 @@ def _update_health_monitor_status(self, health_monitors): self.plugin_rpc.update_health_monitor_status( health_monitor['id']) - @log_helpers.log_method_call + def _update_pool_status(self, pools): """Update pool status in OpenStack """ for pool in pools: @@ -1415,7 +1415,7 @@ def _update_pool_status(self, pools): elif provisioning_status == plugin_const.ERROR: self.plugin_rpc.update_pool_status(pool['id']) - @log_helpers.log_method_call + def _update_listener_status(self, service): """Update listener status in OpenStack """ listeners = service['listeners'] @@ -1440,7 +1440,7 @@ def _update_listener_status(self, service): provisioning_status, lb_const.OFFLINE) - @log_helpers.log_method_call + def _update_l7rule_status(self, l7rules): """Update l7rule status in OpenStack """ for l7rule in l7rules: @@ -1461,7 +1461,7 @@ def _update_l7rule_status(self, l7rules): self.plugin_rpc.update_l7rule_status( l7rule['id'], l7rule['policy_id']) - @log_helpers.log_method_call + def _update_l7policy_status(self, l7policies): LOG.debug("_update_l7policy_status") """Update l7policy status in OpenStack """ @@ -1482,7 +1482,7 @@ def _update_l7policy_status(self, l7policies): elif provisioning_status == plugin_const.ERROR: self.plugin_rpc.update_l7policy_status(l7policy['id']) - @log_helpers.log_method_call + def _update_loadbalancer_status(self, service, timed_out=False): """Update loadbalancer status in OpenStack """ loadbalancer = service.get('loadbalancer', {}) From a9c40318807a7ba5c735f54bff850f44a91b3e46 Mon Sep 17 00:00:00 2001 From: "Battye, Andrew" Date: Wed, 13 Sep 2017 10:04:24 +0100 Subject: [PATCH 026/109] If VS has an ESD ensure its a standard TCP VS not fastL4 to avoid 400 on update --- .../lbaasv2/drivers/bigip/lbaas_builder.py | 10 +++++++++- .../lbaasv2/drivers/bigip/listener_service.py | 7 +++++-- 2 files changed, 14 insertions(+), 3 deletions(-) diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py b/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py index 8fa5e8e88..a5d036c24 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py @@ -126,6 +126,14 @@ def _assure_listeners_created(self, service): "listener": listener, "networks": networks} + + has_esd=False + l7_profiles = listener.get('l7_policies', []) + for policy in l7_profiles: + if self.is_esd(policy.get('name', None)): + has_esd = True + + default_pool_id = listener.get('default_pool_id', '') if default_pool_id: pool = self.get_pool_by_id(service, default_pool_id) @@ -134,7 +142,7 @@ def _assure_listeners_created(self, service): if listener['provisioning_status'] == plugin_const.PENDING_UPDATE: try: - self.listener_builder.update_listener(svc, bigips) + self.listener_builder.update_listener(svc, bigips, has_esd=has_esd) except Exception as err: loadbalancer['provisioning_status'] = plugin_const.ERROR listener['provisioning_status'] = plugin_const.ERROR diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/listener_service.py b/f5_openstack_agent/lbaasv2/drivers/bigip/listener_service.py index 9b4ff8589..de01de74d 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/listener_service.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/listener_service.py @@ -158,7 +158,7 @@ def create_ssl_profile(self, container_ref, bigip, vip, sni_default=False): # add ssl profile to virtual server self._add_profile(vip, name, bigip, context='clientside') - def update_listener(self, service, bigips): + def update_listener(self, service, bigips,has_esd=False): u"""Update Listener from a single BIG-IP® system. Updates virtual servers that represents a Listener object. @@ -169,6 +169,9 @@ def update_listener(self, service, bigips): """ vip = self.service_adapter.get_virtual(service) + if has_esd and service['listener']['protocol'] == lb_const.PROTOCOL_TCP: + vip['profiles'] = ["/Common/tcp"] + for bigip in bigips: self.vs_helper.update(bigip, vip) @@ -618,7 +621,7 @@ def apply_esd(self, svc, esd, bigips): if 'lbaas_fallback_persist' in esd: update_attrs['fallbackPersistence'] = esd['lbaas_fallback_persist'] - # always use defauklts for non TCP listener + # always use defaults for non TCP listener listener = svc["listener"] if profiles and not listener['protocol'] == 'TCP': From 94f3769c5c0cbe8fd9d68eb56d11c1e73c1a6dfa Mon Sep 17 00:00:00 2001 From: "Battye, Andrew" Date: Wed, 13 Sep 2017 12:16:44 +0100 Subject: [PATCH 027/109] Ensure VLAN is included during update --- f5_openstack_agent/lbaasv2/drivers/bigip/listener_service.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/listener_service.py b/f5_openstack_agent/lbaasv2/drivers/bigip/listener_service.py index de01de74d..8b02f6f01 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/listener_service.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/listener_service.py @@ -172,7 +172,10 @@ def update_listener(self, service, bigips,has_esd=False): if has_esd and service['listener']['protocol'] == lb_const.PROTOCOL_TCP: vip['profiles'] = ["/Common/tcp"] + + network_id = service['loadbalancer']['network_id'] for bigip in bigips: + self.service_adapter.get_vlan(vip, bigip, network_id) self.vs_helper.update(bigip, vip) def update_listener_pool(self, service, name, bigips): From 2db8a30dc3e3ba25ae5ecd84ee2c783898f1cc30 Mon Sep 17 00:00:00 2001 From: "Battye, Andrew" Date: Tue, 19 Sep 2017 10:26:34 +0100 Subject: [PATCH 028/109] Add some instrumentation and improve queue handling for backlogged requests --- .../lbaasv2/drivers/bigip/agent_manager.py | 6 +- .../lbaasv2/drivers/bigip/lbaas_builder.py | 31 ++++--- .../lbaasv2/drivers/bigip/lbaas_driver.py | 1 + .../lbaasv2/drivers/bigip/network_service.py | 9 ++- .../lbaasv2/drivers/bigip/plugin_rpc.py | 6 +- .../lbaasv2/drivers/bigip/utils.py | 80 ++++++++++++++++++- 6 files changed, 117 insertions(+), 16 deletions(-) diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py b/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py index 3f1db5439..36064ea13 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py @@ -36,7 +36,7 @@ from f5_openstack_agent.lbaasv2.drivers.bigip import constants_v2 from f5_openstack_agent.lbaasv2.drivers.bigip import plugin_rpc - +from f5_openstack_agent.lbaasv2.drivers.bigip import utils LOG = logging.getLogger(__name__) @@ -465,6 +465,7 @@ def tunnel_sync(self): return self.lbdriver.tunnel_sync() @log_helpers.log_method_call + @utils.instrument_execution_time def sync_state(self): """Sync state of BIG-IP with that of the neutron database.""" resync = False @@ -558,6 +559,7 @@ def sync_state(self): return resync @log_helpers.log_method_call + @utils.instrument_execution_time def validate_service(self, lb_id): try: @@ -621,7 +623,7 @@ def _service_has_provisioning_error(self, service): - + @utils.instrument_execution_time def refresh_service(self, lb_id): try: service = self.plugin_rpc.get_service_by_loadbalancer_id( diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py b/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py index a5d036c24..ddde6992e 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py @@ -27,6 +27,8 @@ from f5_openstack_agent.lbaasv2.drivers.bigip import pool_service from f5_openstack_agent.lbaasv2.drivers.bigip import virtual_address +from f5_openstack_agent.lbaasv2.drivers.bigip import utils + from requests import HTTPError LOG = logging.getLogger(__name__) @@ -51,6 +53,7 @@ def __init__(self, conf, driver, l2_service=None): self.l7service = l7policy_service.L7PolicyService(conf) self.esd = None + @utils.instrument_execution_time def assure_service(self, service, traffic_group, all_subnet_hints): """Assure that a service is configured on the BIGIP.""" start_time = time() @@ -85,7 +88,7 @@ def assure_service(self, service, traffic_group, all_subnet_hints): LOG.debug(" _assure_service took %.5f secs" % (time() - start_time)) return all_subnet_hints - + @utils.instrument_execution_time def _assure_loadbalancer_created(self, service, all_subnet_hints): if 'loadbalancer' not in service: return @@ -112,6 +115,7 @@ def _assure_loadbalancer_created(self, service, all_subnet_hints): if loadbalancer['provisioning_status'] != plugin_const.PENDING_DELETE: loadbalancer['provisioning_status'] = plugin_const.ACTIVE + @utils.instrument_execution_time def _assure_listeners_created(self, service): if 'listeners' not in service: return @@ -163,6 +167,7 @@ def _assure_listeners_created(self, service): if listener['provisioning_status'] != plugin_const.PENDING_DELETE: listener['provisioning_status'] = plugin_const.ACTIVE + @utils.instrument_execution_time def _assure_pools_created(self, service): if "pools" not in service: return @@ -204,6 +209,7 @@ def _assure_pools_created(self, service): pool['provisioning_status'] = plugin_const.ACTIVE + @utils.instrument_execution_time def _assure_pools_configured(self, service): if "pools" not in service: return @@ -247,7 +253,7 @@ def _assure_pools_configured(self, service): raise f5_ex.PoolCreationException(err.message) pool['provisioning_status'] = plugin_const.ACTIVE - + @utils.instrument_execution_time def _get_pool_members(self, service, pool_id): '''Return a list of members associated with given pool.''' @@ -257,6 +263,7 @@ def _get_pool_members(self, service, pool_id): members.append(member) return members + @utils.instrument_execution_time def _update_listener_pool(self, service, listener_id, pool_name, bigips): listener = self.get_listener_by_id(service, listener_id) if listener is not None: @@ -270,6 +277,7 @@ def _update_listener_pool(self, service, listener_id, pool_name, bigips): listener['provisioning_status'] = plugin_const.ERROR raise f5_ex.VirtualServerUpdateException(err.message) + @utils.instrument_execution_time def _assure_monitors(self, service): if not (("pools" in service) and ("healthmonitors" in service)): return @@ -305,7 +313,7 @@ def _assure_monitors(self, service): raise f5_ex.MonitorCreationException(err.message) monitor['provisioning_status'] = plugin_const.ACTIVE - + @utils.instrument_execution_time def _assure_members(self, service, all_subnet_hints): if not (("pools" in service) and ("members" in service)): return @@ -363,7 +371,7 @@ def _assure_members(self, service, all_subnet_hints): True) - + @utils.instrument_execution_time def _assure_loadbalancer_deleted(self, service): if (service['loadbalancer']['provisioning_status'] != plugin_const.PENDING_DELETE): @@ -384,6 +392,7 @@ def _assure_loadbalancer_deleted(self, service): for bigip in bigips: vip_address.assure(bigip, delete=True) + @utils.instrument_execution_time def _assure_pools_deleted(self, service): if 'pools' not in service: return @@ -418,7 +427,7 @@ def _assure_pools_deleted(self, service): except Exception as err: pool['provisioning_status'] = plugin_const.ERROR raise f5_ex.PoolDeleteException(err.message) - + @utils.instrument_execution_time def _assure_listeners_deleted(self, service): if 'listeners' not in service: return @@ -487,6 +496,7 @@ def _update_subnet_hints(self, status, subnet_id, 'subnet_id': subnet_id, 'is_for_member': is_member} + @utils.instrument_execution_time def listener_exists(self, bigip, service): """Test the existence of the listener defined by service.""" try: @@ -499,6 +509,7 @@ def listener_exists(self, bigip, service): return True + @utils.instrument_execution_time def _assure_l7policies_created(self, service): if 'l7policies' not in service: return @@ -527,7 +538,7 @@ def _assure_l7policies_created(self, service): raise f5_ex.L7PolicyCreationException(err.message) l7policy['provisioning_status'] = plugin_const.ACTIVE - + @utils.instrument_execution_time def _assure_l7policies_deleted(self, service): if 'l7policies' not in service: return @@ -562,7 +573,7 @@ def _assure_l7policies_deleted(self, service): service['loadbalancer']['provisioning_status'] = \ plugin_const.ERROR raise f5_ex.L7PolicyDeleteException(err.message) - + @utils.instrument_execution_time def _assure_l7rules_created(self, service): if 'l7policy_rules' not in service: return @@ -589,7 +600,7 @@ def _assure_l7rules_created(self, service): raise f5_ex.L7PolicyCreationException(err.message) l7rule['provisioning_status'] = plugin_const.ACTIVE - + @utils.instrument_execution_time def _assure_l7rules_deleted(self, service): if 'l7policy_rules' not in service: return @@ -612,7 +623,7 @@ def _assure_l7rules_deleted(self, service): service['loadbalancer']['provisioning_status'] = \ plugin_const.ERROR raise f5_ex.L7PolicyDeleteException(err.message) - + @utils.instrument_execution_time def get_listener_stats(self, service, stats): """Get statistics for a loadbalancer service. @@ -646,7 +657,7 @@ def get_listener_stats(self, service, stats): collected_stats[stat] += vs_stats[stat] return collected_stats - + @utils.instrument_execution_time def update_operating_status(self, service): bigip = self.driver.get_active_bigip() loadbalancer = service["loadbalancer"] diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_driver.py b/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_driver.py index a5738cc0e..413922d76 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_driver.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_driver.py @@ -26,6 +26,7 @@ def __init__(self, conf): # XXX 'conf' appears to be unused self.plugin_rpc = None # XXX overridden in the only known subclass self.connected = False # XXX overridden in the only known subclass self.service_queue = [] + self.queues = {} self.agent_configurations = {} # XXX overridden in subclass def set_context(self, context): diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/network_service.py b/f5_openstack_agent/lbaasv2/drivers/bigip/network_service.py index 8bba945c9..98f937c55 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/network_service.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/network_service.py @@ -31,7 +31,7 @@ from f5_openstack_agent.lbaasv2.drivers.bigip.selfips import BigipSelfIpManager from f5_openstack_agent.lbaasv2.drivers.bigip.snats import BigipSnatManager from f5_openstack_agent.lbaasv2.drivers.bigip.utils import strip_domain_address - +from f5_openstack_agent.lbaasv2.drivers.bigip import utils LOG = logging.getLogger(__name__) @@ -154,6 +154,7 @@ def is_service_connected(self, service): return True + @utils.instrument_execution_time def prep_service_networking(self, service, traffic_group): """Assure network connectivity is established on all bigips.""" if self.conf.f5_global_routed_mode: @@ -667,6 +668,7 @@ def _allocate_gw_addr(self, subnetinfo): LOG.exception(ermsg) return True + @utils.instrument_execution_time def post_service_networking(self, service, all_subnet_hints): # Assure networks are deleted from big-ips if self.conf.f5_global_routed_mode: @@ -707,6 +709,7 @@ def post_service_networking(self, service, all_subnet_hints): self.driver.plugin_rpc.delete_port_by_name( port_name=port_name) + @utils.instrument_execution_time def update_bigip_l2(self, service): # Update fdb entries on bigip loadbalancer = service['loadbalancer'] @@ -855,6 +858,7 @@ def _assure_delete_nets_shared(self, bigip, service, subnet_hints): return deleted_names + @utils.instrument_execution_time def _assure_delete_nets_nonshared(self, bigip, service, subnet_hints): # Delete non shared base objects for networks deleted_names = set() @@ -947,7 +951,7 @@ def _is_last_on_network(self, service): return True - + @utils.instrument_execution_time def _get_subnets_to_delete(self, bigip, service, subnet_hints): # Clean up any Self IP, SNATs, networks, and folder for # services items that we deleted. @@ -971,6 +975,7 @@ def _get_subnets_to_delete(self, bigip, service, subnet_hints): return subnets_to_delete + @utils.instrument_execution_time def _ips_exist_on_subnet(self, bigip, service, subnet, route_domain): # Does the big-ip have any IP addresses on this subnet? LOG.debug("_ips_exist_on_subnet entry %s rd %s" diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/plugin_rpc.py b/f5_openstack_agent/lbaasv2/drivers/bigip/plugin_rpc.py index 51573e70f..889463802 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/plugin_rpc.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/plugin_rpc.py @@ -23,7 +23,7 @@ from neutron_lbaas.services.loadbalancer import constants as lb_const from f5_openstack_agent.lbaasv2.drivers.bigip import constants_v2 as constants - +from f5_openstack_agent.lbaasv2.drivers.bigip import utils LOG = logging.getLogger @@ -418,6 +418,7 @@ def delete_port(self, port_id=None, mac_address=None): ) @log_helpers.log_method_call + @utils.instrument_execution_time def get_service_by_loadbalancer_id(self, loadbalancer_id=None): """Retrieve the service definition for this loadbalancer.""" @@ -437,6 +438,7 @@ def get_service_by_loadbalancer_id(self, return service @log_helpers.log_method_call + @utils.instrument_execution_time def get_all_loadbalancers(self, env=None, group=None, host=None): """Retrieve a list of loadbalancers in Neutron.""" loadbalancers = [] @@ -460,6 +462,7 @@ def get_all_loadbalancers(self, env=None, group=None, host=None): return loadbalancers @log_helpers.log_method_call + @utils.instrument_execution_time def get_active_loadbalancers(self, env=None, group=None, host=None): """Retrieve a list of active loadbalancers for this agent.""" loadbalancers = [] @@ -483,6 +486,7 @@ def get_active_loadbalancers(self, env=None, group=None, host=None): return loadbalancers @log_helpers.log_method_call + @utils.instrument_execution_time def get_pending_loadbalancers(self, env=None, group=None, host=None): """Retrieve a list of pending loadbalancers for this agent.""" loadbalancers = [] diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/utils.py b/f5_openstack_agent/lbaasv2/drivers/bigip/utils.py index d678c4719..755a46ad8 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/utils.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/utils.py @@ -15,12 +15,16 @@ # from time import time import uuid +import eventlet from distutils.version import LooseVersion from eventlet import greenthread + from oslo_log import log as logging from neutron_lbaas.services.loadbalancer import constants as lb_const +from threading import Thread, current_thread + LOG = logging.getLogger(__name__) OBJ_PREFIX = 'uuid_' @@ -44,6 +48,19 @@ def get_default_profiles(conf, listener_protocol): return result +def instrument_execution_time(method): + + def timed(*args, **kw): + ts = time() + result = method(*args, **kw) + te = time() + + LOG.debug('******* PROFILE %r : %r %2.2f sec' % \ + (current_thread().name, method.__name__, te-ts)) + return result + + return timed + @@ -64,8 +81,69 @@ def strip_domain_address(ip_address): else: return ip_address.split('%')[0] +def serialized (method_name): + """Outer wrapper in order to specify method name.""" + + def real_serialized(method): + """Decorator to serialize calls to configure via iControl.""" + def wrapper(*args, **kwargs): + # args[0] must be an instance of iControlDriver + my_request_id = uuid.uuid4() + + service = None + if len(args) > 0: + last_arg = args[-1] + if isinstance(last_arg, dict) and ('loadbalancer' in last_arg): + service = last_arg + if 'service' in kwargs: + service = kwargs['service'] + + lb_id = 'generic' + + if service is not None: + lb = service.get('loadbalancer') + if lb is not None: + lb_id = lb.get('id') + + queue = args[0].queues.get(lb_id) + if queue is None: + queue = eventlet.queue.Queue(1) + args[0].queues[lb_id] = queue + + wait_start = time() + queue.put(my_request_id) + LOG.debug('Waited %.2f secs to put request %s on to queue %s %s' + % (time() - wait_start, my_request_id, lb_id, queue)) + + + try: + + start_time = time() + + result = method(*args, **kwargs) + LOG.debug('%s request %s took %.5f secs' + % (str(method_name), my_request_id, + time() - start_time)) + + except Exception: + LOG.error('%s request %s FAILED' + % (str(method_name), my_request_id)) + raise + finally: + wait_start = time() + wait_request = queue.get() + LOG.debug('Waited %.2f secs to get request %s' + % (time() - wait_start,wait_request)) + + + return result + + return wrapper + + return real_serialized + -def serialized(method_name): +def serialized_old(method_name): """Outer wrapper in order to specify method name.""" def real_serialized(method): """Decorator to serialize calls to configure via iControl.""" From 93d147fff6c0c22e345cf7fc675706e8310acfde Mon Sep 17 00:00:00 2001 From: "Battye, Andrew" Date: Tue, 19 Sep 2017 16:38:31 +0100 Subject: [PATCH 029/109] Fix l7 policy wrappers to exist per VS so we can have more than one VS per partition with policies. --- .../lbaasv2/drivers/bigip/l7policy_adapter.py | 15 +++++++++++---- .../lbaasv2/drivers/bigip/listener_adapter.py | 2 +- 2 files changed, 12 insertions(+), 5 deletions(-) diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/l7policy_adapter.py b/f5_openstack_agent/lbaasv2/drivers/bigip/l7policy_adapter.py index bc078db76..8c1227c26 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/l7policy_adapter.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/l7policy_adapter.py @@ -159,15 +159,22 @@ def _check_if_adapted_rules_empty(self): 'wrapper_policy.' raise PolicyHasNoRules(msg) - def _adapt_policy(self): + def _adapt_policy(self,service): '''Setup the wrapper policy, which will contain rules.''' if not self.service['l7rules']: msg = 'No Rules given to implement. A Policy cannot be attached ' \ 'to a Virtual until it has one or more Rules.' raise PolicyHasNoRules(msg) + + listener_id = '' + for l7_policy in service['l7policies']: + listener_id = l7_policy.get('listener_id','') + break + + self.policy_dict = {} - self.policy_dict['name'] = 'wrapper_policy' + self.policy_dict['name'] = 'wrapper_policy_'+listener_id self.policy_dict['partition'] = self.folder self.policy_dict['strategy'] = 'first-match' self.policy_dict['rules'] = [] @@ -181,9 +188,9 @@ def translate(self, service): self.service = service self.folder = self.get_folder_name( self.service['l7policies'][0]['tenant_id']) - self._adapt_policy() + self._adapt_policy(service) return self.policy_dict def translate_name(self, l7policy): - return {'name': 'wrapper_policy', + return {'name': 'wrapper_policy_'+l7policy.get('listener_id',''), 'partition': self.get_folder_name(l7policy['tenant_id'])} diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/listener_adapter.py b/f5_openstack_agent/lbaasv2/drivers/bigip/listener_adapter.py index 596862c04..a97bded18 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/listener_adapter.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/listener_adapter.py @@ -25,6 +25,6 @@ def translate(self, service, listener, l7policy=None): listener.get('tenant_id', ''))} if l7policy: - f5_vs['l7policy_name'] = "wrapper_policy" + f5_vs['l7policy_name'] = "wrapper_policy_"+listener.get('id','') return f5_vs From d4aba74d1f8b03e75c71f7a3ca8c6c43472e4c31 Mon Sep 17 00:00:00 2001 From: Torsten Lesmann Date: Thu, 28 Sep 2017 17:20:32 +0200 Subject: [PATCH 030/109] Added ESD's for x-forwarded-for, http-compression and one-connect. Add and removal of ESD to listeners has to be reworked. --- .../lbaasv2/drivers/bigip/esd_filehandler.py | 12 ++ .../lbaasv2/drivers/bigip/lbaas_builder.py | 60 ++++++--- .../lbaasv2/drivers/bigip/listener_service.py | 122 ++++++++++++++++++ .../lbaasv2/drivers/bigip/resource_helper.py | 21 ++- 4 files changed, 194 insertions(+), 21 deletions(-) diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/esd_filehandler.py b/f5_openstack_agent/lbaasv2/drivers/bigip/esd_filehandler.py index 73252d473..2692340ea 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/esd_filehandler.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/esd_filehandler.py @@ -210,6 +210,18 @@ def verify_tag(self, tag): 'resource_type': ResourceType.tcp_profile, 'value_type': types.StringTypes}, + 'lbaas_http': { + 'resource_type': ResourceType.http_profile, + 'value_type': types.StringTypes}, + + 'lbaas_one_connect': { + 'resource_type': ResourceType.one_connect_profile, + 'value_type': types.StringTypes}, + + 'lbaas_http_compression': { + 'resource_type': ResourceType.http_compression_profile, + 'value_type': types.StringTypes}, + 'lbaas_cssl_profile': { 'resource_type': ResourceType.client_ssl_profile, 'value_type': types.StringTypes}, diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py b/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py index ddde6992e..20762e910 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py @@ -81,6 +81,8 @@ def assure_service(self, service, traffic_group, all_subnet_hints): self._assure_l7policies_deleted(service) + self._assure_esds_applied(service) + self._assure_listeners_deleted(service) self._assure_loadbalancer_deleted(service) @@ -509,6 +511,41 @@ def listener_exists(self, bigip, service): return True + def _assure_esds_applied(self, service): + + LOG.debug('1*****************************') + if 'l7policies' not in service: + return + + bigips = self.driver.get_config_bigips() + l7policies = service['l7policies'] + svcs = {'loadbalancer': service['loadbalancer'],'listeners': {}} + LOG.debug('2*****************************') + for l7policy in l7policies: + if l7policy['provisioning_status'] != plugin_const.PENDING_DELETE: + try: + name = l7policy.get('name', None) + if name and self.is_esd(name): + esd = self.get_esd(name) + if esd is not None: + listeners = svcs.get('listeners') + listener = self.get_listener_by_id(service, l7policy.get('listener_id', '')) + if listener.get('id') in listeners.keys(): + svcs.get('listeners').get(listener.get('id')).get('esds').append(esd) + else: + # pool is needed to reset session persistence + pool = None + if listener['default_pool_id']: + pool = self.get_pool_by_id( service, listener.get('default_pool_id', '')) + svcs.get('listeners')[listener.get('id')]={'listener':listener,'pool':pool,'esds':[esd]} + except Exception as err: + LOG.debug('Error processing ESD :%s', err) + + LOG.debug('3***************************** %s', svcs) + + self.listener_builder.apply_esds(svcs, bigips) + + @utils.instrument_execution_time def _assure_l7policies_created(self, service): if 'l7policies' not in service: @@ -521,13 +558,8 @@ def _assure_l7policies_created(self, service): try: name = l7policy.get('name', None) if name and self.is_esd(name): - listener = self.get_listener_by_id( - service, l7policy.get('listener_id', '')) + continue - svc = {"loadbalancer": service["loadbalancer"], - "listener": listener} - esd = self.get_esd(name) - self.listener_builder.apply_esd(svc, esd, bigips) else: self.l7service.create_l7policy( l7policy, service, bigips) @@ -538,6 +570,7 @@ def _assure_l7policies_created(self, service): raise f5_ex.L7PolicyCreationException(err.message) l7policy['provisioning_status'] = plugin_const.ACTIVE + @utils.instrument_execution_time def _assure_l7policies_deleted(self, service): if 'l7policies' not in service: @@ -550,19 +583,7 @@ def _assure_l7policies_deleted(self, service): try: name = l7policy.get('name', None) if name and self.is_esd(name): - listener = self.get_listener_by_id( - service, l7policy.get('listener_id', '')) - svc = {"loadbalancer": service["loadbalancer"], - "listener": listener} - - # pool is needed to reset session persistence - if listener['default_pool_id']: - pool = self.get_pool_by_id( - service, listener.get('default_pool_id', '')) - if pool: - svc['pool'] = pool - esd = self.get_esd(name) - self.listener_builder.remove_esd(svc, esd, bigips) + continue else: # Note: use update_l7policy because a listener can have # multiple policies @@ -573,6 +594,7 @@ def _assure_l7policies_deleted(self, service): service['loadbalancer']['provisioning_status'] = \ plugin_const.ERROR raise f5_ex.L7PolicyDeleteException(err.message) + @utils.instrument_execution_time def _assure_l7rules_created(self, service): if 'l7policy_rules' not in service: diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/listener_service.py b/f5_openstack_agent/lbaasv2/drivers/bigip/listener_service.py index 8b02f6f01..249ff6c2b 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/listener_service.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/listener_service.py @@ -22,6 +22,8 @@ from requests import HTTPError from f5_openstack_agent.lbaasv2.drivers.bigip import utils +#import pydevd + LOG = logging.getLogger(__name__) @@ -37,6 +39,7 @@ class ListenerServiceBuilder(object): """ def __init__(self, service_adapter, cert_manager, parent_ssl_profile=None): + #pydevd.settrace('10.29.12.100', port=22100, stdoutToServer=True, stderrToServer=True) self.cert_manager = cert_manager self.parent_ssl_profile = parent_ssl_profile self.vs_helper = resource_helper.BigIPResourceHelper( @@ -45,6 +48,7 @@ def __init__(self, service_adapter, cert_manager, parent_ssl_profile=None): LOG.debug("ListenerServiceBuilder: using parent_ssl_profile %s ", parent_ssl_profile) + def create_listener(self, service, bigips): u"""Create listener on set of BIG-IP®s. @@ -581,6 +585,107 @@ def _remove_irule(self, vs, irule_name, bigip, rule_partition='Common'): LOG.debug("Removed iRule {0} for virtual sever {1}". format(irule_name, vs_name)) + def apply_esds(self, svcs, bigips): + loadbalancer = svcs.get('loadbalancer') + listeners = svcs.get('listeners', {}).values() + for svc in listeners: + svc['loadbalancer'] = loadbalancer + esds = svc.get('esds', []) + profiles = [] + policies = [] + irules = [] + # get virtual server name + update_attrs = self.service_adapter.get_virtual_name(svc) + + for esd in esds: + # start with server tcp profile + if 'lbaas_stcp' in esd: + # set serverside tcp profile + profiles.append({'name': esd['lbaas_stcp'], + 'partition': 'Common', + 'context': 'serverside'}) + # restrict client profile + ctcp_context = 'clientside' + else: + # no serverside profile; use client profile for both + ctcp_context = 'all' + + # must define client profile; default to tcp if not in ESD + if 'lbaas_ctcp' in esd: + ctcp_profile = esd['lbaas_ctcp'] + else: + ctcp_profile = 'tcp' + profiles.append({'name': ctcp_profile, + 'partition': 'Common', + 'context': ctcp_context}) + # http profiles + if 'lbaas_http' in esd: + profiles.append({'name': esd['lbaas_http'], + 'partition': 'Common', + 'context': 'all'}) + + # one connect profiles + if 'lbaas_one_connect' in esd: + profiles.append({'name': esd['lbaas_one_connect'], + 'partition': 'Common', + 'context': 'all'}) + + # http compression profiles + if 'lbaas_http_compression' in esd: + profiles.append({'name': esd['lbaas_http_compression'], + 'partition': 'Common', + 'context': 'all'}) + + # SSL profiles + if 'lbaas_cssl_profile' in esd: + profiles.append({'name': esd['lbaas_cssl_profile'], + 'partition': 'Common', + 'context': 'clientside'}) + if 'lbaas_sssl_profile' in esd: + profiles.append({'name': esd['lbaas_sssl_profile'], + 'partition': 'Common', + 'context': 'serverside'}) + + # persistence + if 'lbaas_persist' in esd: + update_attrs['persist'] = [{'name': esd['lbaas_persist']}] + if 'lbaas_fallback_persist' in esd: + update_attrs['fallbackPersistence'] = esd['lbaas_fallback_persist'] + + # # always use defaults for non TCP listener + # listener = svc["listener"] + # if profiles and not listener['protocol'] == 'TCP': + # + # default_profiles = utils.get_default_profiles(self.service_adapter.conf, listener['protocol']) + # + # if(len(default_profiles) >0 ): + # for profile in default_profiles: + # profiles.append({'name':profile.get('profile'),'partition':profile.get('partition'),'context':'all'}) + # + + # iRules + if 'lbaas_irule' in esd: + for irule in esd['lbaas_irule']: + irules.append('/Common/' + irule) + + # L7 policies + if 'lbaas_policy' in esd: + for policy in esd['lbaas_policy']: + policies.append({'name': policy, 'partition': 'Common'}) + + if profiles: + update_attrs['profiles'] = profiles + + update_attrs['rules'] = irules + + update_attrs['policies'] = policies + + # udpate BIG-IPs + for bigip in bigips: + self.vs_helper.update(bigip, update_attrs) + + + def apply_esd(self, svc, esd, bigips): profiles = [] @@ -607,6 +712,23 @@ def apply_esd(self, svc, esd, bigips): profiles.append({'name': ctcp_profile, 'partition': 'Common', 'context': ctcp_context}) + # http profiles + if 'lbaas_http' in esd: + profiles.append({'name': esd['lbaas_http'], + 'partition': 'Common', + 'context': 'all'}) + + # one connect profiles + if 'lbaas_one_connect' in esd: + profiles.append({'name': esd['lbaas_one_connect'], + 'partition': 'Common', + 'context': 'all'}) + + # http compression profiles + if 'lbaas_http_compression' in esd: + profiles.append({'name': esd['lbaas_http_compression'], + 'partition': 'Common', + 'context': 'all'}) # SSL profiles if 'lbaas_cssl_profile' in esd: diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/resource_helper.py b/f5_openstack_agent/lbaasv2/drivers/bigip/resource_helper.py index f3406f695..b98ab4ac3 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/resource_helper.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/resource_helper.py @@ -59,6 +59,9 @@ class ResourceType(Enum): ssl_persistence = 33 universal_persistence = 34 ssl_cert_file = 35 + http_profile = 36 + one_connect_profile = 37 + http_compression_profile = 38 class BigIPResourceHelper(object): @@ -246,7 +249,14 @@ def _resource(self, bigip): ResourceType.universal_persistence: lambda bigip: bigip.tm.ltm.persistence.universal, ResourceType.ssl_cert_file: - lambda bigip: bigip.tm.sys.file.ssl_certs.ssl_cert + lambda bigip: bigip.tm.sys.file.ssl_certs.ssl_cert, + ResourceType.http_profile: + lambda bigip: bigip.tm.ltm.profile.https.http, + ResourceType.one_connect_profile: + lambda bigip: bigip.tm.ltm.profile.one_connects.one_connect, + ResourceType.http_compression_profile: + lambda bigip: bigip.tm.ltm.profile.http_compressions.http_compression + }[self.resource_type](bigip) def _collection(self, bigip): @@ -312,7 +322,14 @@ def _collection(self, bigip): ResourceType.universal_persistence: lambda bigip: bigip.tm.ltm.persistence.universals, ResourceType.ssl_cert_file: - lambda bigip: bigip.tm.sys.file.ssl_certs + lambda bigip: bigip.tm.sys.file.ssl_certs, + ResourceType.http_profile: + lambda bigip: bigip.tm.ltm.profile.https, + ResourceType.one_connect_profile: + lambda bigip: bigip.tm.ltm.profile.one_connects, + ResourceType.http_compression_profile: + lambda bigip: bigip.tm.ltm.profile.http_compressions + } if self.resource_type in collection_map: From d600830462e21c7a830399bc652274ce06512412 Mon Sep 17 00:00:00 2001 From: "Battye, Andrew" Date: Thu, 28 Sep 2017 18:23:57 +0100 Subject: [PATCH 031/109] Begin to make ESD actually usable for more than the most basic use cases --- .../lbaasv2/drivers/bigip/esd_filehandler.py | 6 + .../lbaasv2/drivers/bigip/icontrol_driver.py | 25 +- .../lbaasv2/drivers/bigip/listener_service.py | 384 ++++++++++-------- .../lbaasv2/drivers/bigip/resource_helper.py | 9 +- .../lbaasv2/drivers/bigip/service_adapter.py | 4 +- .../lbaasv2/drivers/bigip/utils.py | 23 +- 6 files changed, 255 insertions(+), 196 deletions(-) diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/esd_filehandler.py b/f5_openstack_agent/lbaasv2/drivers/bigip/esd_filehandler.py index 2692340ea..1a7988b46 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/esd_filehandler.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/esd_filehandler.py @@ -184,6 +184,9 @@ def verify_value(self, bigip, tag, value): # verify value exists on BIG-IP if isinstance(value, list): is_valid = self.is_valid_value_list(bigip, value, resource_type) + elif value=='': + # ESD Processing we will handle this as a special case and use this to toggle things like fastl4 + is_valid = True else: is_valid = self.is_valid_value(bigip, value, resource_type) @@ -202,6 +205,9 @@ def verify_tag(self, tag): # we are implementing the tags that can be applied only to listeners valid_esd_tags = { + 'lbaas_fastl4': { + 'resource_type': ResourceType.fastl4_profile, + 'value_type': types.StringTypes}, 'lbaas_ctcp': { 'resource_type': ResourceType.tcp_profile, 'value_type': types.StringTypes}, diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py b/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py index 540737c55..d59fe1d21 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py @@ -280,24 +280,31 @@ help='Parent profile used when creating client SSL profiles ' 'for listeners with TERMINATED_HTTPS protocols.' ), - cfg.ListOpt( - 'f5_default_http_profiles', - default='/Common/http,/Common/oneconnect', + cfg.StrOpt( + 'f5_default_http_profile', + default='/Common/http', help='Default profiles to use for HTTP Protocol VS' ), - cfg.ListOpt( - 'f5_default_https_profiles', - default='/Common/http,/Common/oneconnect', + cfg.StrOpt( + 'f5_default_https_profile', + default='/Common/http', help='Default profiles to use for HTTPS Protocol VS' ), - cfg.ListOpt( - 'f5_default_terminated_https_profiles', - default='/Common/http,/Common/oneconnect', + cfg.StrOpt( + 'f5_default_terminated_https_profile', + default='/Common/http', help='Default profiles to use for TERMINATED_HTTPS Protocol VS' ), + cfg.StrOpt( + 'f5_default_oneconnect_profile', + default='/Common/oneconnect', + help='Default oneconnect profile for HTTP virtual servers' + ) + , + cfg.StrOpt( 'os_tenant_name', default=None, diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/listener_service.py b/f5_openstack_agent/lbaasv2/drivers/bigip/listener_service.py index 249ff6c2b..30785fdb8 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/listener_service.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/listener_service.py @@ -233,9 +233,9 @@ def update_session_persistence(self, service, bigips): profiles = utils.get_default_profiles(self.service_adapter.conf, listener['protocol']) - if(len(profiles) >0 ): - for profile in profiles: - self._add_profile(vip, profile.get('profile'), bigip) + + for profile in profiles.values(): + self._add_profile(vip, profile.get('name'), bigip) @@ -591,58 +591,77 @@ def apply_esds(self, svcs, bigips): for svc in listeners: svc['loadbalancer'] = loadbalancer esds = svc.get('esds', []) - profiles = [] + listener = svc.get('listener', []) + + fastl4 = {'partition':'Common','name':'fastL4','context':'all'} + stcp_profiles = [] + ctcp_profiles = [] + cssl_profiles = [] + sssl_profiles = [] + http_profile = {} + oneconnect_profile = {} + compression_profile = {} + persistence_profiles = [] + policies = [] irules = [] # get virtual server name update_attrs = self.service_adapter.get_virtual_name(svc) for esd in esds: - # start with server tcp profile - if 'lbaas_stcp' in esd: - # set serverside tcp profile - profiles.append({'name': esd['lbaas_stcp'], - 'partition': 'Common', - 'context': 'serverside'}) - # restrict client profile - ctcp_context = 'clientside' - else: - # no serverside profile; use client profile for both - ctcp_context = 'all' + # start with server tcp profile, only add if not already got some + ctcp_context = 'all' - # must define client profile; default to tcp if not in ESD - if 'lbaas_ctcp' in esd: - ctcp_profile = esd['lbaas_ctcp'] - else: - ctcp_profile = 'tcp' - profiles.append({'name': ctcp_profile, - 'partition': 'Common', - 'context': ctcp_context}) + if 'lbaas_fastl4' in esd: + if esd['lbaas_fastl4']=='': + fastl4= {} + + + + if len(stcp_profiles)==0: + if 'lbaas_stcp' in esd: + # set serverside tcp profile + stcp_profiles.append({'name': esd['lbaas_stcp'], + 'partition': 'Common', + 'context': 'serverside'}) + # restrict client profile + ctcp_context = 'clientside' + + + if len(ctcp_profiles)==0: + # must define client profile; default to tcp if not in ESD + if 'lbaas_ctcp' in esd: + ctcp_profile = esd['lbaas_ctcp'] + else: + ctcp_profile = 'tcp' + ctcp_profiles.append({'name': ctcp_profile, + 'partition': 'Common', + 'context': ctcp_context}) # http profiles - if 'lbaas_http' in esd: - profiles.append({'name': esd['lbaas_http'], + if 'lbaas_http' in esd and not bool(http_profile): + http_profile = {'name': esd['lbaas_http'], 'partition': 'Common', - 'context': 'all'}) + 'context': 'all'} # one connect profiles - if 'lbaas_one_connect' in esd: - profiles.append({'name': esd['lbaas_one_connect'], + if 'lbaas_one_connect' in esd and not bool(oneconnect_profile) : + oneconnect_profile = {'name': esd['lbaas_one_connect'], 'partition': 'Common', - 'context': 'all'}) + 'context': 'all'} # http compression profiles - if 'lbaas_http_compression' in esd: - profiles.append({'name': esd['lbaas_http_compression'], + if 'lbaas_http_compression' in esd and not bool(compression_profile): + compression_profile = {'name': esd['lbaas_http_compression'], 'partition': 'Common', - 'context': 'all'}) + 'context': 'all'} # SSL profiles if 'lbaas_cssl_profile' in esd: - profiles.append({'name': esd['lbaas_cssl_profile'], + cssl_profiles.append({'name': esd['lbaas_cssl_profile'], 'partition': 'Common', 'context': 'clientside'}) if 'lbaas_sssl_profile' in esd: - profiles.append({'name': esd['lbaas_sssl_profile'], + sssl_profiles.append({'name': esd['lbaas_sssl_profile'], 'partition': 'Common', 'context': 'serverside'}) @@ -652,17 +671,6 @@ def apply_esds(self, svcs, bigips): if 'lbaas_fallback_persist' in esd: update_attrs['fallbackPersistence'] = esd['lbaas_fallback_persist'] - # # always use defaults for non TCP listener - # listener = svc["listener"] - # if profiles and not listener['protocol'] == 'TCP': - # - # default_profiles = utils.get_default_profiles(self.service_adapter.conf, listener['protocol']) - # - # if(len(default_profiles) >0 ): - # for profile in default_profiles: - # profiles.append({'name':profile.get('profile'),'partition':profile.get('partition'),'context':'all'}) - # - # iRules if 'lbaas_irule' in esd: for irule in esd['lbaas_irule']: @@ -673,149 +681,175 @@ def apply_esds(self, svcs, bigips): for policy in esd['lbaas_policy']: policies.append({'name': policy, 'partition': 'Common'}) - if profiles: - update_attrs['profiles'] = profiles - update_attrs['rules'] = irules - update_attrs['policies'] = policies - # udpate BIG-IPs - for bigip in bigips: - self.vs_helper.update(bigip, update_attrs) + profiles=[] + if listener['protocol'] == lb_const.PROTOCOL_TCP: + if bool(fastl4): + profiles.append(fastl4) + else: + profiles = ctcp_profiles+ctcp_profiles + else: + default_profiles = utils.get_default_profiles(self.service_adapter.conf, listener['protocol']) + if bool(http_profile): + profiles.append(http_profile) + else: + profiles.append( default_profiles['http']) - def apply_esd(self, svc, esd, bigips): - profiles = [] + if bool(oneconnect_profile): + profiles.append(oneconnect_profile) + else: + profiles.append(default_profiles['oneconnect']) - # get virtual server name - update_attrs = self.service_adapter.get_virtual_name(svc) + if bool(compression_profile): + profiles.append(compression_profile) - # start with server tcp profile - if 'lbaas_stcp' in esd: - # set serverside tcp profile - profiles.append({'name': esd['lbaas_stcp'], - 'partition': 'Common', - 'context': 'serverside'}) - # restrict client profile - ctcp_context = 'clientside' - else: - # no serverside profile; use client profile for both - ctcp_context = 'all' + if profiles: + update_attrs['profiles'] = profiles - # must define client profile; default to tcp if not in ESD - if 'lbaas_ctcp' in esd: - ctcp_profile = esd['lbaas_ctcp'] - else: - ctcp_profile = 'tcp' - profiles.append({'name': ctcp_profile, - 'partition': 'Common', - 'context': ctcp_context}) - # http profiles - if 'lbaas_http' in esd: - profiles.append({'name': esd['lbaas_http'], - 'partition': 'Common', - 'context': 'all'}) - - # one connect profiles - if 'lbaas_one_connect' in esd: - profiles.append({'name': esd['lbaas_one_connect'], - 'partition': 'Common', - 'context': 'all'}) - - # http compression profiles - if 'lbaas_http_compression' in esd: - profiles.append({'name': esd['lbaas_http_compression'], - 'partition': 'Common', - 'context': 'all'}) - - # SSL profiles - if 'lbaas_cssl_profile' in esd: - profiles.append({'name': esd['lbaas_cssl_profile'], - 'partition': 'Common', - 'context': 'clientside'}) - if 'lbaas_sssl_profile' in esd: - profiles.append({'name': esd['lbaas_sssl_profile'], - 'partition': 'Common', - 'context': 'serverside'}) - - # persistence - if 'lbaas_persist' in esd: - update_attrs['persist'] = [{'name': esd['lbaas_persist']}] - if 'lbaas_fallback_persist' in esd: - update_attrs['fallbackPersistence'] = esd['lbaas_fallback_persist'] - - # always use defaults for non TCP listener - listener = svc["listener"] - if profiles and not listener['protocol'] == 'TCP': - - default_profiles = utils.get_default_profiles(self.service_adapter.conf, listener['protocol']) - - if(len(default_profiles) >0 ): - for profile in default_profiles: - profiles.append({'name':profile.get('profile'),'partition':profile.get('partition'),'context':'all'}) - - if profiles: - update_attrs['profiles'] = profiles - - # iRules - if 'lbaas_irule' in esd: - irules = [] - for irule in esd['lbaas_irule']: - irules.append('/Common/' + irule) update_attrs['rules'] = irules - # L7 policies - if 'lbaas_policy' in esd: - policies = [] - for policy in esd['lbaas_policy']: - policies.append({'name': policy, 'partition': 'Common'}) update_attrs['policies'] = policies - # udpate BIG-IPs - for bigip in bigips: - self.vs_helper.update(bigip, update_attrs) - - def remove_esd(self, svc, esd, bigips): - # original service object definition of listener - vs = self.service_adapter.get_virtual(svc) - - # add back SSL profile for TLS? - tls = self.service_adapter.get_tls(svc) - if tls: - tls['name'] = vs['name'] - tls['partition'] = vs['partition'] - - listener = svc["listener"] - - if listener['protocol'] == 'TCP': - # Revert VS back to fastL4. Must do an update to replace - # profiles instead of using add/remove profile. Leave http - # profiles in place for non-TCP listeners. - vs['profiles'] = ['/Common/fastL4'] - - # remove iRules - if 'lbaas_irule' in esd: - vs['rules'] = [] + # udpate BIG-IPs + for bigip in bigips: + self.vs_helper.update(bigip, update_attrs) - # remove policies - if 'lbaas_policy' in esd: - vs['policies'] = [] - # reset persistence to original definition - if 'pool' in svc: - vip_persist = self.service_adapter.get_session_persistence(svc) - vs.update(vip_persist) - for bigip in bigips: - try: - # update VS back to original listener definition - self.vs_helper.update(bigip, vs) - - # add back SSL profile for TLS - if tls: - self.add_ssl_profile(tls, bigip) - except Exception as err: - LOG.exception("Virtual server update error: %s" % err.message) - raise + # def apply_esd(self, svc, esd, bigips): + # profiles = [] + # + # # get virtual server name + # update_attrs = self.service_adapter.get_virtual_name(svc) + # + # # start with server tcp profile + # if 'lbaas_stcp' in esd: + # # set serverside tcp profile + # profiles.append({'name': esd['lbaas_stcp'], + # 'partition': 'Common', + # 'context': 'serverside'}) + # # restrict client profile + # ctcp_context = 'clientside' + # else: + # # no serverside profile; use client profile for both + # ctcp_context = 'all' + # + # # must define client profile; default to tcp if not in ESD + # if 'lbaas_ctcp' in esd: + # ctcp_profile = esd['lbaas_ctcp'] + # else: + # ctcp_profile = 'tcp' + # profiles.append({'name': ctcp_profile, + # 'partition': 'Common', + # 'context': ctcp_context}) + # # http profiles + # if 'lbaas_http' in esd: + # profiles.append({'name': esd['lbaas_http'], + # 'partition': 'Common', + # 'context': 'all'}) + # + # # one connect profiles + # if 'lbaas_one_connect' in esd: + # profiles.append({'name': esd['lbaas_one_connect'], + # 'partition': 'Common', + # 'context': 'all'}) + # + # # http compression profiles + # if 'lbaas_http_compression' in esd: + # profiles.append({'name': esd['lbaas_http_compression'], + # 'partition': 'Common', + # 'context': 'all'}) + # + # # SSL profiles + # if 'lbaas_cssl_profile' in esd: + # profiles.append({'name': esd['lbaas_cssl_profile'], + # 'partition': 'Common', + # 'context': 'clientside'}) + # if 'lbaas_sssl_profile' in esd: + # profiles.append({'name': esd['lbaas_sssl_profile'], + # 'partition': 'Common', + # 'context': 'serverside'}) + # + # # persistence + # if 'lbaas_persist' in esd: + # update_attrs['persist'] = [{'name': esd['lbaas_persist']}] + # if 'lbaas_fallback_persist' in esd: + # update_attrs['fallbackPersistence'] = esd['lbaas_fallback_persist'] + # + # # always use defaults for non TCP listener + # listener = svc["listener"] + # if profiles and not listener['protocol'] == 'TCP': + # + # default_profiles = utils.get_default_profiles(self.service_adapter.conf, listener['protocol']) + # + # if(len(default_profiles) >0 ): + # for profile in default_profiles: + # profiles.append({'name':profile.get('profile'),'partition':profile.get('partition'),'context':'all'}) + # + # if profiles: + # update_attrs['profiles'] = profiles + # + # # iRules + # if 'lbaas_irule' in esd: + # irules = [] + # for irule in esd['lbaas_irule']: + # irules.append('/Common/' + irule) + # update_attrs['rules'] = irules + # + # # L7 policies + # if 'lbaas_policy' in esd: + # policies = [] + # for policy in esd['lbaas_policy']: + # policies.append({'name': policy, 'partition': 'Common'}) + # update_attrs['policies'] = policies + # + # # udpate BIG-IPs + # for bigip in bigips: + # self.vs_helper.update(bigip, update_attrs) + # + # def remove_esd(self, svc, esd, bigips): + # # original service object definition of listener + # vs = self.service_adapter.get_virtual(svc) + # + # # add back SSL profile for TLS? + # tls = self.service_adapter.get_tls(svc) + # if tls: + # tls['name'] = vs['name'] + # tls['partition'] = vs['partition'] + # + # listener = svc["listener"] + # + # if listener['protocol'] == 'TCP': + # # Revert VS back to fastL4. Must do an update to replace + # # profiles instead of using add/remove profile. Leave http + # # profiles in place for non-TCP listeners. + # vs['profiles'] = ['/Common/fastL4'] + # + # # remove iRules + # if 'lbaas_irule' in esd: + # vs['rules'] = [] + # + # # remove policies + # if 'lbaas_policy' in esd: + # vs['policies'] = [] + # + # # reset persistence to original definition + # if 'pool' in svc: + # vip_persist = self.service_adapter.get_session_persistence(svc) + # vs.update(vip_persist) + # + # for bigip in bigips: + # try: + # # update VS back to original listener definition + # self.vs_helper.update(bigip, vs) + # + # # add back SSL profile for TLS + # if tls: + # self.add_ssl_profile(tls, bigip) + # except Exception as err: + # LOG.exception("Virtual server update error: %s" % err.message) + # raise diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/resource_helper.py b/f5_openstack_agent/lbaasv2/drivers/bigip/resource_helper.py index b98ab4ac3..9cbca8d0e 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/resource_helper.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/resource_helper.py @@ -62,6 +62,7 @@ class ResourceType(Enum): http_profile = 36 one_connect_profile = 37 http_compression_profile = 38 + fastl4_profile = 39 class BigIPResourceHelper(object): @@ -255,7 +256,9 @@ def _resource(self, bigip): ResourceType.one_connect_profile: lambda bigip: bigip.tm.ltm.profile.one_connects.one_connect, ResourceType.http_compression_profile: - lambda bigip: bigip.tm.ltm.profile.http_compressions.http_compression + lambda bigip: bigip.tm.ltm.profile.http_compressions.http_compression, + ResourceType.fastl4_profile: + lambda bigip: bigip.tm.ltm.profile.fastl4s.fastl4, }[self.resource_type](bigip) @@ -328,7 +331,9 @@ def _collection(self, bigip): ResourceType.one_connect_profile: lambda bigip: bigip.tm.ltm.profile.one_connects, ResourceType.http_compression_profile: - lambda bigip: bigip.tm.ltm.profile.http_compressions + lambda bigip: bigip.tm.ltm.profile.http_compressions, + ResourceType.fastl4_profile: + lambda bigip: bigip.tm.ltm.profile.fastl4s, } diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/service_adapter.py b/f5_openstack_agent/lbaasv2/drivers/bigip/service_adapter.py index 84697d8bf..96fca7683 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/service_adapter.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/service_adapter.py @@ -439,8 +439,8 @@ def _add_bigip_items(self, listener, vip): default_profiles = utils.get_default_profiles(self.conf,listener['protocol']) profiles=[] - for profile in default_profiles: - profiles.append('/{}/{}'.format(profile.get('partition'), profile.get('profile'))) + for profile in default_profiles.values(): + profiles.append('/{}/{}'.format(profile.get('partition'), profile.get('name'))) vip['profiles'] = profiles diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/utils.py b/f5_openstack_agent/lbaasv2/drivers/bigip/utils.py index 755a46ad8..42a5eaa9a 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/utils.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/utils.py @@ -34,17 +34,24 @@ class IpNotInCidrNotation(Exception): def get_default_profiles(conf, listener_protocol): - defaults = {lb_const.PROTOCOL_HTTP:conf.f5_default_http_profiles,lb_const.PROTOCOL_HTTPS:conf.f5_default_https_profiles,lb_const.PROTOCOL_TERMINATED_HTTPS:conf.f5_default_terminated_https_profiles} + http_defaults = {lb_const.PROTOCOL_HTTP:conf.f5_default_http_profile,lb_const.PROTOCOL_HTTPS:conf.f5_default_https_profile,lb_const.PROTOCOL_TERMINATED_HTTPS:conf.f5_default_terminated_https_profile} - profiles = defaults.get(listener_protocol,['/Common/http','/Common/oneconnect']) + http_profile = http_defaults.get(listener_protocol,'/Common/http') + oneconnect_profile= conf.f5_default_oneconnect_profile + + result = {} + + + + l = http_profile[1:].split("/") + if len(l)==2: + result['http']={'partition': l[0], 'name': l[1],'context':'all'} + + l = oneconnect_profile[1:].split("/") + if len(l)==2: + result['oneconnect']={'partition': l[0], 'name': l[1],'context':'all'} - result = [] - if(profiles is not None): - for profile in profiles: - l = profile[1:].split("/") - if len(l)==2: - result.append({'partition': l[0], 'profile': l[1]}) return result From eb360d25ca0c391031814a5f6ec71f864da3532f Mon Sep 17 00:00:00 2001 From: Torsten Lesmann Date: Fri, 29 Sep 2017 12:17:53 +0200 Subject: [PATCH 032/109] Changes in esd apply/remove logic --- etc/neutron/services/f5/esd/esd.json | 23 +++++++++++++ .../lbaasv2/drivers/bigip/icontrol_driver.py | 33 +++++++++++++----- .../lbaasv2/drivers/bigip/lbaas_builder.py | 19 +++++------ .../lbaasv2/drivers/bigip/listener_service.py | 34 ++++++++++++------- .../lbaasv2/drivers/bigip/service_adapter.py | 2 +- 5 files changed, 79 insertions(+), 32 deletions(-) create mode 100644 etc/neutron/services/f5/esd/esd.json diff --git a/etc/neutron/services/f5/esd/esd.json b/etc/neutron/services/f5/esd/esd.json new file mode 100644 index 000000000..10809cf26 --- /dev/null +++ b/etc/neutron/services/f5/esd/esd.json @@ -0,0 +1,23 @@ +{ + "proxy_protocol_2edF_v1_0": { + "lbaas_irule": ["proxy_protocol_2edF_v1_0"] + }, + "standard_tcp_a3de_v1_0": { + "lbaas_ctcp": "tcp" + }, + "x_forward_5b6e_v1_0": { + "lbaas_http": "http_xforward" + }, + "dev_x": { + "lbaas_http": "http_xforward" + }, + "dev_one": { + "lbaas_oneconnect": "cc_oneconnect" + }, + "dev_compress": { + "lbaas_http_compression": "cc_httpcompression" + }, + "dev_nofastl4": { + "lbaas_fastl4": "" + } +} diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py b/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py index d59fe1d21..c68947dfd 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py @@ -302,8 +302,7 @@ 'f5_default_oneconnect_profile', default='/Common/oneconnect', help='Default oneconnect profile for HTTP virtual servers' - ) - , + ), cfg.StrOpt( 'os_tenant_name', @@ -1662,22 +1661,40 @@ def _validate_bigip_version(self, bigip, hostname): @is_connected def create_l7policy(self, l7policy, service): """Create lb l7policy""" - LOG.debug("Creating l7policy") - self._common_service_handler(service) + if self.lbaas_builder.is_esd(l7policy.get('name')): + try: + self.lbaas_builder.assure_esds_applied(service) + finally: + self.update_service_status(service) + else: + LOG.debug("Creating l7policy") + self._common_service_handler(service) @serialized('update_l7policy') @is_connected def update_l7policy(self, old_l7policy, l7policy, service): """Update lb l7policy""" - LOG.debug("Updating l7policy") - self._common_service_handler(service) + if self.lbaas_builder.is_esd(l7policy.get('name')): + try: + self.lbaas_builder.assure_esds_applied(service) + finally: + self.update_service_status(service) + else: + LOG.debug("Updating l7policy") + self._common_service_handler(service) @serialized('delete_l7policy') @is_connected def delete_l7policy(self, l7policy, service): """Delete lb l7policy""" - LOG.debug("Deleting l7policy") - self._common_service_handler(service) + if self.lbaas_builder.is_esd(l7policy.get('name')): + try: + self.lbaas_builder.assure_esds_applied(service) + finally: + self.update_service_status(service) + else: + LOG.debug("Deleting l7policy") + self._common_service_handler(service) @serialized('create_l7rule') @is_connected diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py b/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py index 20762e910..8e579a728 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py @@ -81,7 +81,7 @@ def assure_service(self, service, traffic_group, all_subnet_hints): self._assure_l7policies_deleted(service) - self._assure_esds_applied(service) + self.assure_esds_applied(service) self._assure_listeners_deleted(service) @@ -511,16 +511,20 @@ def listener_exists(self, bigip, service): return True - def _assure_esds_applied(self, service): + def assure_esds_applied(self, service): - LOG.debug('1*****************************') if 'l7policies' not in service: return bigips = self.driver.get_config_bigips() l7policies = service['l7policies'] svcs = {'loadbalancer': service['loadbalancer'],'listeners': {}} - LOG.debug('2*****************************') + for listener in service.get('listeners'): + pool = None + if listener['default_pool_id']: + pool = self.get_pool_by_id( service, listener.get('default_pool_id', '')) + svcs.get('listeners')[listener.get('id')]={'listener':listener,'pool':pool,'esds':[]} + for l7policy in l7policies: if l7policy['provisioning_status'] != plugin_const.PENDING_DELETE: try: @@ -532,16 +536,9 @@ def _assure_esds_applied(self, service): listener = self.get_listener_by_id(service, l7policy.get('listener_id', '')) if listener.get('id') in listeners.keys(): svcs.get('listeners').get(listener.get('id')).get('esds').append(esd) - else: - # pool is needed to reset session persistence - pool = None - if listener['default_pool_id']: - pool = self.get_pool_by_id( service, listener.get('default_pool_id', '')) - svcs.get('listeners')[listener.get('id')]={'listener':listener,'pool':pool,'esds':[esd]} except Exception as err: LOG.debug('Error processing ESD :%s', err) - LOG.debug('3***************************** %s', svcs) self.listener_builder.apply_esds(svcs, bigips) diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/listener_service.py b/f5_openstack_agent/lbaasv2/drivers/bigip/listener_service.py index 30785fdb8..640de7222 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/listener_service.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/listener_service.py @@ -226,8 +226,8 @@ def update_session_persistence(self, service, bigips): # For TCP listeners, must remove fastL4 profile before adding # adding http/oneconnect profiles. if persistence_type != 'SOURCE_IP': - if listener['protocol'] == 'TCP': - self._remove_profile(vip, 'fastL4', bigip) + #if listener['protocol'] == 'TCP': + #self._remove_profile(vip, 'fastL4', bigip) # Add default profiles @@ -639,21 +639,30 @@ def apply_esds(self, svcs, bigips): 'context': ctcp_context}) # http profiles if 'lbaas_http' in esd and not bool(http_profile): - http_profile = {'name': esd['lbaas_http'], - 'partition': 'Common', - 'context': 'all'} + if esd['lbaas_http'] == '': + http_profile = {} + else: + http_profile = {'name': esd['lbaas_http'], + 'partition': 'Common', + 'context': 'all'} # one connect profiles if 'lbaas_one_connect' in esd and not bool(oneconnect_profile) : - oneconnect_profile = {'name': esd['lbaas_one_connect'], - 'partition': 'Common', - 'context': 'all'} + if esd['lbaas_one_connect'] == '': + oneconnect_profile = {} + else: + oneconnect_profile = {'name': esd['lbaas_one_connect'], + 'partition': 'Common', + 'context': 'all'} # http compression profiles if 'lbaas_http_compression' in esd and not bool(compression_profile): - compression_profile = {'name': esd['lbaas_http_compression'], - 'partition': 'Common', - 'context': 'all'} + if esd['lbaas_http_compression'] == '': + compression_profile = {} + else: + compression_profile = {'name': esd['lbaas_http_compression'], + 'partition': 'Common', + 'context': 'all'} # SSL profiles if 'lbaas_cssl_profile' in esd: @@ -690,7 +699,7 @@ def apply_esds(self, svcs, bigips): if bool(fastl4): profiles.append(fastl4) else: - profiles = ctcp_profiles+ctcp_profiles + profiles = stcp_profiles + ctcp_profiles else: default_profiles = utils.get_default_profiles(self.service_adapter.conf, listener['protocol']) @@ -707,6 +716,7 @@ def apply_esds(self, svcs, bigips): if bool(compression_profile): profiles.append(compression_profile) + LOG.debug('Torsten **********************: %s', profiles) if profiles: update_attrs['profiles'] = profiles diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/service_adapter.py b/f5_openstack_agent/lbaasv2/drivers/bigip/service_adapter.py index 96fca7683..b9f6a0d86 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/service_adapter.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/service_adapter.py @@ -419,7 +419,7 @@ def _add_bigip_items(self, listener, vip): if 'session_persistence' in listener: persistence_type = listener['session_persistence'] if persistence_type == 'APP_COOKIE': - virtual_type = 'standard' + #virtual_type = 'standard' vip['persist'] = [{'name': 'app_cookie_' + vip['name']}] elif persistence_type == 'SOURCE_IP': From dc96e718bf79e2085d64084b9d10007d075da251 Mon Sep 17 00:00:00 2001 From: "Battye, Andrew" Date: Fri, 29 Sep 2017 12:28:41 +0100 Subject: [PATCH 033/109] More ESD updates - make changes fail in VS updates if incompatible, needs update to recover in case ESD fixes things --- .../lbaasv2/drivers/bigip/lbaas_builder.py | 62 ++++++++++--------- .../lbaasv2/drivers/bigip/listener_service.py | 18 +++--- .../lbaasv2/drivers/bigip/service_adapter.py | 3 +- 3 files changed, 44 insertions(+), 39 deletions(-) diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py b/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py index 8e579a728..10032f5e5 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py @@ -512,36 +512,38 @@ def listener_exists(self, bigip, service): return True def assure_esds_applied(self, service): - - if 'l7policies' not in service: - return - - bigips = self.driver.get_config_bigips() - l7policies = service['l7policies'] - svcs = {'loadbalancer': service['loadbalancer'],'listeners': {}} - for listener in service.get('listeners'): - pool = None - if listener['default_pool_id']: - pool = self.get_pool_by_id( service, listener.get('default_pool_id', '')) - svcs.get('listeners')[listener.get('id')]={'listener':listener,'pool':pool,'esds':[]} - - for l7policy in l7policies: - if l7policy['provisioning_status'] != plugin_const.PENDING_DELETE: - try: - name = l7policy.get('name', None) - if name and self.is_esd(name): - esd = self.get_esd(name) - if esd is not None: - listeners = svcs.get('listeners') - listener = self.get_listener_by_id(service, l7policy.get('listener_id', '')) - if listener.get('id') in listeners.keys(): - svcs.get('listeners').get(listener.get('id')).get('esds').append(esd) - except Exception as err: - LOG.debug('Error processing ESD :%s', err) - - - self.listener_builder.apply_esds(svcs, bigips) - + try: + if 'l7policies' not in service: + return + + bigips = self.driver.get_config_bigips() + l7policies = service['l7policies'] + svcs = {'loadbalancer': service['loadbalancer'],'listeners': {}} + for listener in service.get('listeners'): + pool = None + if listener['default_pool_id']: + pool = self.get_pool_by_id( service, listener.get('default_pool_id', '')) + svcs.get('listeners')[listener.get('id')]={'listener':listener,'pool':pool,'esds':[]} + + for l7policy in l7policies: + if l7policy['provisioning_status'] != plugin_const.PENDING_DELETE: + try: + name = l7policy.get('name', None) + if name and self.is_esd(name): + esd = self.get_esd(name) + if esd is not None: + listeners = svcs.get('listeners') + listener = self.get_listener_by_id(service, l7policy.get('listener_id', '')) + if listener.get('id') in listeners.keys(): + svcs.get('listeners').get(listener.get('id')).get('esds').append(esd) + except Exception as err: + LOG.debug('Error processing ESD :%s', err) + + + self.listener_builder.apply_esds(svcs, bigips) + except Exception as err: + LOG.exception(err) + service['loadbalancer']['provisioning_status']=plugin_const.ERROR @utils.instrument_execution_time def _assure_l7policies_created(self, service): diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/listener_service.py b/f5_openstack_agent/lbaasv2/drivers/bigip/listener_service.py index 640de7222..528a4fd95 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/listener_service.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/listener_service.py @@ -703,18 +703,20 @@ def apply_esds(self, svcs, bigips): else: default_profiles = utils.get_default_profiles(self.service_adapter.conf, listener['protocol']) - if bool(http_profile): - profiles.append(http_profile) - else: + if bool(http_profile): + profiles.append(http_profile) + else: + if listener['protocol'] != lb_const.PROTOCOL_TCP: profiles.append( default_profiles['http']) - if bool(oneconnect_profile): - profiles.append(oneconnect_profile) - else: + if bool(oneconnect_profile): + profiles.append(oneconnect_profile) + else: + if listener['protocol'] != lb_const.PROTOCOL_TCP: profiles.append(default_profiles['oneconnect']) - if bool(compression_profile): - profiles.append(compression_profile) + if bool(compression_profile): + profiles.append(compression_profile) LOG.debug('Torsten **********************: %s', profiles) if profiles: diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/service_adapter.py b/f5_openstack_agent/lbaasv2/drivers/bigip/service_adapter.py index b9f6a0d86..3f14bbf62 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/service_adapter.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/service_adapter.py @@ -440,7 +440,8 @@ def _add_bigip_items(self, listener, vip): default_profiles = utils.get_default_profiles(self.conf,listener['protocol']) profiles=[] for profile in default_profiles.values(): - profiles.append('/{}/{}'.format(profile.get('partition'), profile.get('name'))) + if listener['protocol'] != 'TCP': + profiles.append('/{}/{}'.format(profile.get('partition'), profile.get('name'))) vip['profiles'] = profiles From 47c82c0006762a44c86ff4e0e44e8c924dbb7b87 Mon Sep 17 00:00:00 2001 From: "Battye, Andrew" Date: Fri, 29 Sep 2017 16:46:41 +0100 Subject: [PATCH 034/109] Refactor to apply esds as basis of listener update --- .../lbaasv2/drivers/bigip/icontrol_driver.py | 30 +- .../lbaasv2/drivers/bigip/lbaas_builder.py | 49 +- .../lbaasv2/drivers/bigip/listener_service.py | 448 +++++++----------- .../lbaasv2/drivers/bigip/service_adapter.py | 3 + 4 files changed, 182 insertions(+), 348 deletions(-) diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py b/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py index c68947dfd..7fb9856ab 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py @@ -1661,40 +1661,22 @@ def _validate_bigip_version(self, bigip, hostname): @is_connected def create_l7policy(self, l7policy, service): """Create lb l7policy""" - if self.lbaas_builder.is_esd(l7policy.get('name')): - try: - self.lbaas_builder.assure_esds_applied(service) - finally: - self.update_service_status(service) - else: - LOG.debug("Creating l7policy") - self._common_service_handler(service) + LOG.debug("Creating l7policy") + self._common_service_handler(service) @serialized('update_l7policy') @is_connected def update_l7policy(self, old_l7policy, l7policy, service): """Update lb l7policy""" - if self.lbaas_builder.is_esd(l7policy.get('name')): - try: - self.lbaas_builder.assure_esds_applied(service) - finally: - self.update_service_status(service) - else: - LOG.debug("Updating l7policy") - self._common_service_handler(service) + LOG.debug("Updating l7policy") + self._common_service_handler(service) @serialized('delete_l7policy') @is_connected def delete_l7policy(self, l7policy, service): """Delete lb l7policy""" - if self.lbaas_builder.is_esd(l7policy.get('name')): - try: - self.lbaas_builder.assure_esds_applied(service) - finally: - self.update_service_status(service) - else: - LOG.debug("Deleting l7policy") - self._common_service_handler(service) + LOG.debug("Deleting l7policy") + self._common_service_handler(service) @serialized('create_l7rule') @is_connected diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py b/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py index 10032f5e5..3b06bc758 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py @@ -43,7 +43,7 @@ def __init__(self, conf, driver, l2_service=None): self.driver = driver self.l2_service = l2_service self.service_adapter = driver.service_adapter - self.listener_builder = listener_service.ListenerServiceBuilder( + self.listener_builder = listener_service.ListenerServiceBuilder(self, self.service_adapter, driver.cert_manager, conf.f5_parent_ssl_profile) @@ -81,7 +81,7 @@ def assure_service(self, service, traffic_group, all_subnet_hints): self._assure_l7policies_deleted(service) - self.assure_esds_applied(service) + self._assure_listeners_deleted(service) @@ -133,11 +133,6 @@ def _assure_listeners_created(self, service): "networks": networks} - has_esd=False - l7_profiles = listener.get('l7_policies', []) - for policy in l7_profiles: - if self.is_esd(policy.get('name', None)): - has_esd = True default_pool_id = listener.get('default_pool_id', '') @@ -148,10 +143,11 @@ def _assure_listeners_created(self, service): if listener['provisioning_status'] == plugin_const.PENDING_UPDATE: try: - self.listener_builder.update_listener(svc, bigips, has_esd=has_esd) + self.listener_builder.update_listener(svc, bigips) except Exception as err: loadbalancer['provisioning_status'] = plugin_const.ERROR listener['provisioning_status'] = plugin_const.ERROR + LOG.exception(err) raise f5_ex.VirtualServerUpdateException(err.message) elif listener['provisioning_status'] != \ @@ -242,11 +238,14 @@ def _assure_pools_configured(self, service): # update virtual sever pool name, session persistence self.listener_builder.update_session_persistence( svc, bigips) + + except HTTPError as err: if err.response.status_code != 409: pool['provisioning_status'] = plugin_const.ERROR loadbalancer['provisioning_status'] = ( plugin_const.ERROR) + LOG.exception(err) raise f5_ex.PoolCreationException(err.message) except Exception as err: @@ -511,40 +510,6 @@ def listener_exists(self, bigip, service): return True - def assure_esds_applied(self, service): - try: - if 'l7policies' not in service: - return - - bigips = self.driver.get_config_bigips() - l7policies = service['l7policies'] - svcs = {'loadbalancer': service['loadbalancer'],'listeners': {}} - for listener in service.get('listeners'): - pool = None - if listener['default_pool_id']: - pool = self.get_pool_by_id( service, listener.get('default_pool_id', '')) - svcs.get('listeners')[listener.get('id')]={'listener':listener,'pool':pool,'esds':[]} - - for l7policy in l7policies: - if l7policy['provisioning_status'] != plugin_const.PENDING_DELETE: - try: - name = l7policy.get('name', None) - if name and self.is_esd(name): - esd = self.get_esd(name) - if esd is not None: - listeners = svcs.get('listeners') - listener = self.get_listener_by_id(service, l7policy.get('listener_id', '')) - if listener.get('id') in listeners.keys(): - svcs.get('listeners').get(listener.get('id')).get('esds').append(esd) - except Exception as err: - LOG.debug('Error processing ESD :%s', err) - - - self.listener_builder.apply_esds(svcs, bigips) - except Exception as err: - LOG.exception(err) - service['loadbalancer']['provisioning_status']=plugin_const.ERROR - @utils.instrument_execution_time def _assure_l7policies_created(self, service): if 'l7policies' not in service: diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/listener_service.py b/f5_openstack_agent/lbaasv2/drivers/bigip/listener_service.py index 528a4fd95..ab9c23346 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/listener_service.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/listener_service.py @@ -16,6 +16,8 @@ from oslo_log import log as logging +from neutron.plugins.common import constants as plugin_const + from f5_openstack_agent.lbaasv2.drivers.bigip import resource_helper from f5_openstack_agent.lbaasv2.drivers.bigip import ssl_profile from neutron_lbaas.services.loadbalancer import constants as lb_const @@ -38,8 +40,10 @@ class ListenerServiceBuilder(object): defined in service object to a BIG-IP® virtual server. """ - def __init__(self, service_adapter, cert_manager, parent_ssl_profile=None): + def __init__(self, lbaas_builder, service_adapter, cert_manager, parent_ssl_profile=None): #pydevd.settrace('10.29.12.100', port=22100, stdoutToServer=True, stderrToServer=True) + + self.lbaas_builder = lbaas_builder self.cert_manager = cert_manager self.parent_ssl_profile = parent_ssl_profile self.vs_helper = resource_helper.BigIPResourceHelper( @@ -76,11 +80,14 @@ def create_listener(self, service, bigips): if err.response.status_code == 409: LOG.debug("Virtual server already exists updating") try: - self.vs_helper.update(bigip, vip) + self.update_listener(service, bigips) + #self.vs_helper.update(bigip, vip) except Exception as e: LOG.warn("Update triggered in create failed, this could be due to timing issues in assure_service") LOG.warn('VS info %s',service['listener']) + LOG.exception(e) LOG.warn('Exception %s',e) + raise e else: LOG.exception("Virtual server creation error: %s" % @@ -162,7 +169,7 @@ def create_ssl_profile(self, container_ref, bigip, vip, sni_default=False): # add ssl profile to virtual server self._add_profile(vip, name, bigip, context='clientside') - def update_listener(self, service, bigips,has_esd=False): + def update_listener(self, service, bigips): u"""Update Listener from a single BIG-IP® system. Updates virtual servers that represents a Listener object. @@ -171,11 +178,8 @@ def update_listener(self, service, bigips,has_esd=False): and load balancer definition. :param bigips: Array of BigIP class instances to update. """ - vip = self.service_adapter.get_virtual(service) - - if has_esd and service['listener']['protocol'] == lb_const.PROTOCOL_TCP: - vip['profiles'] = ["/Common/tcp"] + vip = self.apply_esds(service) network_id = service['loadbalancer']['network_id'] for bigip in bigips: @@ -225,18 +229,17 @@ def update_session_persistence(self, service, bigips): for bigip in bigips: # For TCP listeners, must remove fastL4 profile before adding # adding http/oneconnect profiles. - if persistence_type != 'SOURCE_IP': - #if listener['protocol'] == 'TCP': - #self._remove_profile(vip, 'fastL4', bigip) - - # Add default profiles - - profiles = utils.get_default_profiles(self.service_adapter.conf, listener['protocol']) - - - for profile in profiles.values(): - self._add_profile(vip, profile.get('name'), bigip) - + # if persistence_type != 'SOURCE_IP': + # #if listener['protocol'] == 'TCP': + # #self._remove_profile(vip, 'fastL4', bigip) + # + # # Add default profiles + # + # profiles = utils.get_default_profiles(self.service_adapter.conf, listener['protocol']) + # + # + # for profile in profiles.values(): + # self._add_profile(vip, profile.get('name'), bigip) if persistence_type == 'APP_COOKIE' and \ @@ -585,283 +588,164 @@ def _remove_irule(self, vs, irule_name, bigip, rule_partition='Common'): LOG.debug("Removed iRule {0} for virtual sever {1}". format(irule_name, vs_name)) - def apply_esds(self, svcs, bigips): - loadbalancer = svcs.get('loadbalancer') - listeners = svcs.get('listeners', {}).values() - for svc in listeners: - svc['loadbalancer'] = loadbalancer - esds = svc.get('esds', []) - listener = svc.get('listener', []) - - fastl4 = {'partition':'Common','name':'fastL4','context':'all'} - stcp_profiles = [] - ctcp_profiles = [] - cssl_profiles = [] - sssl_profiles = [] - http_profile = {} - oneconnect_profile = {} - compression_profile = {} - persistence_profiles = [] - - policies = [] - irules = [] - # get virtual server name - update_attrs = self.service_adapter.get_virtual_name(svc) - - for esd in esds: - # start with server tcp profile, only add if not already got some - ctcp_context = 'all' - - if 'lbaas_fastl4' in esd: - if esd['lbaas_fastl4']=='': - fastl4= {} - - - - if len(stcp_profiles)==0: - if 'lbaas_stcp' in esd: - # set serverside tcp profile - stcp_profiles.append({'name': esd['lbaas_stcp'], - 'partition': 'Common', - 'context': 'serverside'}) - # restrict client profile - ctcp_context = 'clientside' - - - if len(ctcp_profiles)==0: - # must define client profile; default to tcp if not in ESD - if 'lbaas_ctcp' in esd: - ctcp_profile = esd['lbaas_ctcp'] - else: - ctcp_profile = 'tcp' - ctcp_profiles.append({'name': ctcp_profile, - 'partition': 'Common', - 'context': ctcp_context}) - # http profiles - if 'lbaas_http' in esd and not bool(http_profile): - if esd['lbaas_http'] == '': - http_profile = {} - else: - http_profile = {'name': esd['lbaas_http'], - 'partition': 'Common', - 'context': 'all'} - - # one connect profiles - if 'lbaas_one_connect' in esd and not bool(oneconnect_profile) : - if esd['lbaas_one_connect'] == '': - oneconnect_profile = {} - else: - oneconnect_profile = {'name': esd['lbaas_one_connect'], - 'partition': 'Common', - 'context': 'all'} - - # http compression profiles - if 'lbaas_http_compression' in esd and not bool(compression_profile): - if esd['lbaas_http_compression'] == '': - compression_profile = {} - else: - compression_profile = {'name': esd['lbaas_http_compression'], - 'partition': 'Common', - 'context': 'all'} + def apply_esds(self, service): - # SSL profiles - if 'lbaas_cssl_profile' in esd: - cssl_profiles.append({'name': esd['lbaas_cssl_profile'], - 'partition': 'Common', - 'context': 'clientside'}) - if 'lbaas_sssl_profile' in esd: - sssl_profiles.append({'name': esd['lbaas_sssl_profile'], - 'partition': 'Common', - 'context': 'serverside'}) + LOG.debug("**************************") + LOG.debug(service) - # persistence - if 'lbaas_persist' in esd: - update_attrs['persist'] = [{'name': esd['lbaas_persist']}] - if 'lbaas_fallback_persist' in esd: - update_attrs['fallbackPersistence'] = esd['lbaas_fallback_persist'] + listener = service['listener'] - # iRules - if 'lbaas_irule' in esd: - for irule in esd['lbaas_irule']: - irules.append('/Common/' + irule) + l7policies = listener.get('l7_policies') - # L7 policies - if 'lbaas_policy' in esd: - for policy in esd['lbaas_policy']: - policies.append({'name': policy, 'partition': 'Common'}) + if l7policies is None: + return + esds=[] + # pool = None + # if listener['default_pool_id']: + # pool = self.get_pool_by_id( service, listener.get('default_pool_id', '')) + # svcs.get('listeners')[listener.get('id')]={'listener':listener,'pool':pool,'esds':[]} - profiles=[] - if listener['protocol'] == lb_const.PROTOCOL_TCP: - if bool(fastl4): - profiles.append(fastl4) - else: - profiles = stcp_profiles + ctcp_profiles - else: - default_profiles = utils.get_default_profiles(self.service_adapter.conf, listener['protocol']) - if bool(http_profile): - profiles.append(http_profile) - else: - if listener['protocol'] != lb_const.PROTOCOL_TCP: - profiles.append( default_profiles['http']) + fastl4 = {'partition':'Common','name':'fastL4','context':'all'} + stcp_profiles = [] + ctcp_profiles = [] + cssl_profiles = [] + sssl_profiles = [] + http_profile = {} + oneconnect_profile = {} + compression_profile = {} + persistence_profiles = [] + + policies = [] + irules = [] + # get virtual server name + + update_attrs = self.service_adapter.get_virtual(service) + + + + for l7policy in l7policies: + name = l7policy.get('name', None) + if name and self.lbaas_builder.is_esd(name) and l7policy.get('provisioning_status')!= plugin_const.PENDING_DELETE: + esd = self.lbaas_builder.get_esd(name) + if esd is not None: - if bool(oneconnect_profile): - profiles.append(oneconnect_profile) + # start with server tcp profile, only add if not already got some + ctcp_context = 'all' + + if 'lbaas_fastl4' in esd: + if esd['lbaas_fastl4']=='': + fastl4= {} + + + + if len(stcp_profiles)==0: + if 'lbaas_stcp' in esd: + # set serverside tcp profile + stcp_profiles.append({'name': esd['lbaas_stcp'], + 'partition': 'Common', + 'context': 'serverside'}) + # restrict client profile + ctcp_context = 'clientside' + + + if len(ctcp_profiles)==0: + # must define client profile; default to tcp if not in ESD + if 'lbaas_ctcp' in esd: + ctcp_profile = esd['lbaas_ctcp'] + else: + ctcp_profile = 'tcp' + ctcp_profiles.append({'name': ctcp_profile, + 'partition': 'Common', + 'context': ctcp_context}) + # http profiles + if 'lbaas_http' in esd and not bool(http_profile): + if esd['lbaas_http'] == '': + http_profile = {} + else: + http_profile = {'name': esd['lbaas_http'], + 'partition': 'Common', + 'context': 'all'} + + # one connect profiles + if 'lbaas_one_connect' in esd and not bool(oneconnect_profile) : + if esd['lbaas_one_connect'] == '': + oneconnect_profile = {} + else: + oneconnect_profile = {'name': esd['lbaas_one_connect'], + 'partition': 'Common', + 'context': 'all'} + + # http compression profiles + if 'lbaas_http_compression' in esd and not bool(compression_profile): + if esd['lbaas_http_compression'] == '': + compression_profile = {} + else: + compression_profile = {'name': esd['lbaas_http_compression'], + 'partition': 'Common', + 'context': 'all'} + + # SSL profiles + if 'lbaas_cssl_profile' in esd: + cssl_profiles.append({'name': esd['lbaas_cssl_profile'], + 'partition': 'Common', + 'context': 'clientside'}) + if 'lbaas_sssl_profile' in esd: + sssl_profiles.append({'name': esd['lbaas_sssl_profile'], + 'partition': 'Common', + 'context': 'serverside'}) + + # persistence + if 'lbaas_persist' in esd: + update_attrs['persist'] = [{'name': esd['lbaas_persist']}] + if 'lbaas_fallback_persist' in esd: + update_attrs['fallbackPersistence'] = esd['lbaas_fallback_persist'] + + # iRules + if 'lbaas_irule' in esd: + for irule in esd['lbaas_irule']: + irules.append('/Common/' + irule) + + # L7 policies + if 'lbaas_policy' in esd: + for policy in esd['lbaas_policy']: + policies.append({'name': policy, 'partition': 'Common'}) + + profiles=[] + + if listener['protocol'] == lb_const.PROTOCOL_TCP: + if bool(fastl4): + profiles.append(fastl4) else: - if listener['protocol'] != lb_const.PROTOCOL_TCP: - profiles.append(default_profiles['oneconnect']) + profiles = stcp_profiles + ctcp_profiles + else: + default_profiles = utils.get_default_profiles(self.service_adapter.conf, listener['protocol']) - if bool(compression_profile): - profiles.append(compression_profile) + if bool(http_profile): + profiles.append(http_profile) + else: + if listener['protocol'] != lb_const.PROTOCOL_TCP: + profiles.append( default_profiles['http']) - LOG.debug('Torsten **********************: %s', profiles) - if profiles: - update_attrs['profiles'] = profiles + if bool(oneconnect_profile): + profiles.append(oneconnect_profile) + else: + if listener['protocol'] != lb_const.PROTOCOL_TCP: + profiles.append(default_profiles['oneconnect']) - update_attrs['rules'] = irules + if bool(compression_profile): + profiles.append(compression_profile) - update_attrs['policies'] = policies + LOG.debug('Torsten **********************: %s', profiles) + if profiles: + update_attrs['profiles'] = profiles - # udpate BIG-IPs - for bigip in bigips: - self.vs_helper.update(bigip, update_attrs) - - - - # def apply_esd(self, svc, esd, bigips): - # profiles = [] - # - # # get virtual server name - # update_attrs = self.service_adapter.get_virtual_name(svc) - # - # # start with server tcp profile - # if 'lbaas_stcp' in esd: - # # set serverside tcp profile - # profiles.append({'name': esd['lbaas_stcp'], - # 'partition': 'Common', - # 'context': 'serverside'}) - # # restrict client profile - # ctcp_context = 'clientside' - # else: - # # no serverside profile; use client profile for both - # ctcp_context = 'all' - # - # # must define client profile; default to tcp if not in ESD - # if 'lbaas_ctcp' in esd: - # ctcp_profile = esd['lbaas_ctcp'] - # else: - # ctcp_profile = 'tcp' - # profiles.append({'name': ctcp_profile, - # 'partition': 'Common', - # 'context': ctcp_context}) - # # http profiles - # if 'lbaas_http' in esd: - # profiles.append({'name': esd['lbaas_http'], - # 'partition': 'Common', - # 'context': 'all'}) - # - # # one connect profiles - # if 'lbaas_one_connect' in esd: - # profiles.append({'name': esd['lbaas_one_connect'], - # 'partition': 'Common', - # 'context': 'all'}) - # - # # http compression profiles - # if 'lbaas_http_compression' in esd: - # profiles.append({'name': esd['lbaas_http_compression'], - # 'partition': 'Common', - # 'context': 'all'}) - # - # # SSL profiles - # if 'lbaas_cssl_profile' in esd: - # profiles.append({'name': esd['lbaas_cssl_profile'], - # 'partition': 'Common', - # 'context': 'clientside'}) - # if 'lbaas_sssl_profile' in esd: - # profiles.append({'name': esd['lbaas_sssl_profile'], - # 'partition': 'Common', - # 'context': 'serverside'}) - # - # # persistence - # if 'lbaas_persist' in esd: - # update_attrs['persist'] = [{'name': esd['lbaas_persist']}] - # if 'lbaas_fallback_persist' in esd: - # update_attrs['fallbackPersistence'] = esd['lbaas_fallback_persist'] - # - # # always use defaults for non TCP listener - # listener = svc["listener"] - # if profiles and not listener['protocol'] == 'TCP': - # - # default_profiles = utils.get_default_profiles(self.service_adapter.conf, listener['protocol']) - # - # if(len(default_profiles) >0 ): - # for profile in default_profiles: - # profiles.append({'name':profile.get('profile'),'partition':profile.get('partition'),'context':'all'}) - # - # if profiles: - # update_attrs['profiles'] = profiles - # - # # iRules - # if 'lbaas_irule' in esd: - # irules = [] - # for irule in esd['lbaas_irule']: - # irules.append('/Common/' + irule) - # update_attrs['rules'] = irules - # - # # L7 policies - # if 'lbaas_policy' in esd: - # policies = [] - # for policy in esd['lbaas_policy']: - # policies.append({'name': policy, 'partition': 'Common'}) - # update_attrs['policies'] = policies - # - # # udpate BIG-IPs - # for bigip in bigips: - # self.vs_helper.update(bigip, update_attrs) - # - # def remove_esd(self, svc, esd, bigips): - # # original service object definition of listener - # vs = self.service_adapter.get_virtual(svc) - # - # # add back SSL profile for TLS? - # tls = self.service_adapter.get_tls(svc) - # if tls: - # tls['name'] = vs['name'] - # tls['partition'] = vs['partition'] - # - # listener = svc["listener"] - # - # if listener['protocol'] == 'TCP': - # # Revert VS back to fastL4. Must do an update to replace - # # profiles instead of using add/remove profile. Leave http - # # profiles in place for non-TCP listeners. - # vs['profiles'] = ['/Common/fastL4'] - # - # # remove iRules - # if 'lbaas_irule' in esd: - # vs['rules'] = [] - # - # # remove policies - # if 'lbaas_policy' in esd: - # vs['policies'] = [] - # - # # reset persistence to original definition - # if 'pool' in svc: - # vip_persist = self.service_adapter.get_session_persistence(svc) - # vs.update(vip_persist) - # - # for bigip in bigips: - # try: - # # update VS back to original listener definition - # self.vs_helper.update(bigip, vs) - # - # # add back SSL profile for TLS - # if tls: - # self.add_ssl_profile(tls, bigip) - # except Exception as err: - # LOG.exception("Virtual server update error: %s" % err.message) - # raise + update_attrs['rules'] = update_attrs.get('rules',[])+irules + + update_attrs['policies'] = update_attrs.get('policies',[])+policies + + return update_attrs diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/service_adapter.py b/f5_openstack_agent/lbaasv2/drivers/bigip/service_adapter.py index 3f14bbf62..fd7ea051e 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/service_adapter.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/service_adapter.py @@ -357,6 +357,7 @@ def _map_virtual(self, loadbalancer, listener, pool=None): + vip["description"] = self.get_resource_description(listener) if "protocol" in listener: @@ -394,6 +395,8 @@ def _map_virtual(self, loadbalancer, listener, pool=None): if "pool" in listener: vip["pool"] = listener["pool"] + else: + vip["pool"] = None return vip From a77e7a2d7818085dcfc843561f6ede3cb6b448fc Mon Sep 17 00:00:00 2001 From: "Battye, Andrew" Date: Mon, 2 Oct 2017 11:32:03 +0100 Subject: [PATCH 035/109] Fix one connect example --- etc/neutron/services/f5/esd/esd.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/etc/neutron/services/f5/esd/esd.json b/etc/neutron/services/f5/esd/esd.json index 10809cf26..9c728f190 100644 --- a/etc/neutron/services/f5/esd/esd.json +++ b/etc/neutron/services/f5/esd/esd.json @@ -12,7 +12,7 @@ "lbaas_http": "http_xforward" }, "dev_one": { - "lbaas_oneconnect": "cc_oneconnect" + "lbaas_one_connect": "cc_oneconnect" }, "dev_compress": { "lbaas_http_compression": "cc_httpcompression" From 86bfe02bcf89e233752bab8a8ee65b692448f92a Mon Sep 17 00:00:00 2001 From: "Battye, Andrew" Date: Mon, 2 Oct 2017 11:34:53 +0100 Subject: [PATCH 036/109] Remove comments --- .../lbaasv2/drivers/bigip/listener_service.py | 17 ----------------- 1 file changed, 17 deletions(-) diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/listener_service.py b/f5_openstack_agent/lbaasv2/drivers/bigip/listener_service.py index ab9c23346..7d114b45c 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/listener_service.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/listener_service.py @@ -590,9 +590,6 @@ def _remove_irule(self, vs, irule_name, bigip, rule_partition='Common'): def apply_esds(self, service): - LOG.debug("**************************") - LOG.debug(service) - listener = service['listener'] l7policies = listener.get('l7_policies') @@ -600,16 +597,6 @@ def apply_esds(self, service): if l7policies is None: return - esds=[] - - # pool = None - # if listener['default_pool_id']: - # pool = self.get_pool_by_id( service, listener.get('default_pool_id', '')) - # svcs.get('listeners')[listener.get('id')]={'listener':listener,'pool':pool,'esds':[]} - - - - fastl4 = {'partition':'Common','name':'fastL4','context':'all'} stcp_profiles = [] ctcp_profiles = [] @@ -641,8 +628,6 @@ def apply_esds(self, service): if esd['lbaas_fastl4']=='': fastl4= {} - - if len(stcp_profiles)==0: if 'lbaas_stcp' in esd: # set serverside tcp profile @@ -652,7 +637,6 @@ def apply_esds(self, service): # restrict client profile ctcp_context = 'clientside' - if len(ctcp_profiles)==0: # must define client profile; default to tcp if not in ESD if 'lbaas_ctcp' in esd: @@ -740,7 +724,6 @@ def apply_esds(self, service): if bool(compression_profile): profiles.append(compression_profile) - LOG.debug('Torsten **********************: %s', profiles) if profiles: update_attrs['profiles'] = profiles From 28cdbbfea0b7c6f75c310d199cb13e4e7abffe5a Mon Sep 17 00:00:00 2001 From: "Battye, Andrew" Date: Wed, 11 Oct 2017 09:31:49 +0100 Subject: [PATCH 037/109] =?UTF-8?q?Remove=20=C2=AE=20=20non=20ascii=20char?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .github/ISSUE_TEMPLATE.md | 8 +-- CONTRIBUTING.md | 4 +- README.rst | 10 +-- docs/index.rst | 6 +- docs/ref_agent-config-file.rst | 2 +- etc/init.d/f5-oslbaasv2-agent | 2 +- .../services/f5/f5-openstack-agent.ini | 68 +++++++++---------- .../lbaasv2/drivers/bigip/agent_manager.py | 4 +- .../lbaasv2/drivers/bigip/icontrol_driver.py | 8 +-- .../lbaasv2/drivers/bigip/l2_service.py | 2 +- .../lbaasv2/drivers/bigip/l3_binding.py | 4 +- .../lbaasv2/drivers/bigip/lbaas_builder.py | 4 +- .../lbaasv2/drivers/bigip/listener_service.py | 16 ++--- .../drivers/bigip/loadbalancer_service.py | 8 +-- .../lbaasv2/drivers/bigip/pool_service.py | 16 ++--- .../lbaasv2/drivers/bigip/resource_helper.py | 26 +++---- .../lbaasv2/drivers/bigip/selfips.py | 2 +- .../lbaasv2/drivers/bigip/service_adapter.py | 4 +- .../lbaasv2/drivers/bigip/virtual_address.py | 4 +- .../lbaasv2/drivers/bigip/vlan_binding.py | 4 +- test/send_to_driver/test_listener.py | 2 +- 21 files changed, 102 insertions(+), 102 deletions(-) diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md index 174a0962e..788bee9f4 100644 --- a/.github/ISSUE_TEMPLATE.md +++ b/.github/ISSUE_TEMPLATE.md @@ -22,10 +22,10 @@ #### Description - + #### Deployment - - - + + + diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 48a25d422..7c1e3d5f3 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -58,7 +58,7 @@ $ py.test --cov ./ --cov-report=html $ open htmlcov/index.html ``` -If you are running our functional tests you will need a real BIG-IP® to run +If you are running our functional tests you will need a real BIG-IP to run them against; you can get one of those pretty easily in [Amazon EC2](https://aws.amazon.com/marketplace/pp/B00JL3UASY/ref=srh_res_product_title?ie=UTF8&sr=0-10&qid=1449332167461). ## License @@ -77,4 +77,4 @@ See the License for the specific language governing permissions and limitations under the License. ### Contributor License Agreement -Individuals or business entities who contribute to this project must have completed and submitted the [F5® Contributor License Agreement](http://f5-openstack-docs.readthedocs.org/en/latest/cla_landing.html#cla-landing) to Openstack_CLA@f5.com prior to their code submission being included in this project. +Individuals or business entities who contribute to this project must have completed and submitted the [F5 Contributor License Agreement](http://f5-openstack-docs.readthedocs.org/en/latest/cla_landing.html#cla-landing) to Openstack_CLA@f5.com prior to their code submission being included in this project. diff --git a/README.rst b/README.rst index 5e21abfd0..c9b7a5b16 100644 --- a/README.rst +++ b/README.rst @@ -24,7 +24,7 @@ f5-openstack-agent Introduction ************ -The F5® agent translates from 'OpenStack' to 'F5®'. It uses the `f5-sdk `_ to translate OpenStack messaging calls -- such as those from the Neutron RPC messaging queue -- into iControl® REST calls to F5® technologies, such as BIG-IP®. +The F5 agent translates from 'OpenStack' to 'F5'. It uses the `f5-sdk `_ to translate OpenStack messaging calls -- such as those from the Neutron RPC messaging queue -- into iControl REST calls to F5 technologies, such as BIG-IP. Documentation ************* @@ -34,9 +34,9 @@ Documentation is published on Read the Docs, at http://f5-openstack-agent.readth Compatibility ************* -The F5® OpenStack agent is compatible with OpenStack releases from Liberty forward. If you are using Kilo or earlier, you'll need the `LBaaSv1 plugin `_. +The F5 OpenStack agent is compatible with OpenStack releases from Liberty forward. If you are using Kilo or earlier, you'll need the `LBaaSv1 plugin `_. -See the `F5® OpenStack Releases and Support Matrix `_ for more information. +See the `F5 OpenStack Releases and Support Matrix `_ for more information. Installation ************ @@ -59,7 +59,7 @@ Test **** Before you open a pull request, your code must have passing `pytest `__ unit tests. In addition, you should -include a set of functional tests written to use a real BIG-IP® device +include a set of functional tests written to use a real BIG-IP device for testing. Information on how to run our set of tests is included below. @@ -160,7 +160,7 @@ limitations under the License. Contributor License Agreement ============================= -Individuals or business entities who contribute to this project must have completed and submitted the `F5® Contributor License Agreement `_ to Openstack\_CLA@f5.com prior to their code submission being included in this project. +Individuals or business entities who contribute to this project must have completed and submitted the `F5 Contributor License Agreement `_ to Openstack\_CLA@f5.com prior to their code submission being included in this project. .. |Build Status| image:: https://travis-ci.org/F5Networks/f5-openstack-agent.svg?branch=liberty diff --git a/docs/index.rst b/docs/index.rst index 252cd882f..d60465e14 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -1,6 +1,6 @@ .. _home: -F5® OpenStack Agent +F5 OpenStack Agent =================== |Build Status| |Docs Build Status| @@ -28,13 +28,13 @@ Installation .. include:: topic_install-f5-agent.rst :start-line: 3 -For more information about using F5® technologies in OpenStack with Neutron LBaaSv2, please see the :ref:`f5-openstack-lbaasv2-driver documentation `. +For more information about using F5 technologies in OpenStack with Neutron LBaaSv2, please see the :ref:`f5-openstack-lbaasv2-driver documentation `. Configuration and Usage ----------------------- -See the :ref:`F5® OpenStack LBaaSv2 documentation `. +See the :ref:`F5 OpenStack LBaaSv2 documentation `. .. |Build Status| image:: https://travis-ci.org/F5Networks/f5-openstack-agent.svg?branch=liberty diff --git a/docs/ref_agent-config-file.rst b/docs/ref_agent-config-file.rst index 62944b424..d87e8b5a2 100644 --- a/docs/ref_agent-config-file.rst +++ b/docs/ref_agent-config-file.rst @@ -4,7 +4,7 @@ Agent Configuration File ------------------------ -A sample F5® OpenStack agent configuration file is shown below. The file can be found at ``/etc/neutron/services/f5/f5-openstack-agent.ini``. When setting up your own F5® agent(s), be sure to use the correct information for your environment. +A sample F5 OpenStack agent configuration file is shown below. The file can be found at ``/etc/neutron/services/f5/f5-openstack-agent.ini``. When setting up your own F5 agent(s), be sure to use the correct information for your environment. .. literalinclude:: ../etc/neutron/services/f5/f5-openstack-agent.ini diff --git a/etc/init.d/f5-oslbaasv2-agent b/etc/init.d/f5-oslbaasv2-agent index 5959aad0a..1e10b26e6 100755 --- a/etc/init.d/f5-oslbaasv2-agent +++ b/etc/init.d/f5-oslbaasv2-agent @@ -8,7 +8,7 @@ # Default-Start: 2 3 4 5 # Default-Stop: 0 1 6 # Short-Description: f5-openstack-agent -# Description: Provides the F5® OpenStack agent to configure BIG-IP® +# Description: Provides the F5 OpenStack agent to configure BIG-IP ### END INIT INFO PROJECT_NAME=neutron diff --git a/etc/neutron/services/f5/f5-openstack-agent.ini b/etc/neutron/services/f5/f5-openstack-agent.ini index 0a95754f7..45239e715 100644 --- a/etc/neutron/services/f5/f5-openstack-agent.ini +++ b/etc/neutron/services/f5/f5-openstack-agent.ini @@ -50,17 +50,17 @@ periodic_interval = 10 # Environment Settings ############################################################################### # -# Since many TMOS® object names must start with an alpha character +# Since many TMOS object names must start with an alpha character # the environment_prefix is used to prefix all service objects. # -# Objects created on the BIG-IP® by this agent will have their names prefixed +# Objects created on the BIG-IP by this agent will have their names prefixed # by an environment string. This allows you set this string. The default is # 'Project'. # # WARNING - you should only set this before creating any objects. If you change # it with established objects, the objects created with an alternative prefix, # will no longer be associated with this agent and all objects in neutron -# and on the the BIG-IP® associated with the old environment will need to be managed +# and on the the BIG-IP associated with the old environment will need to be managed # manually. # # environment_prefix = 'Project' @@ -183,10 +183,10 @@ f5_external_physical_mappings = default:1.1:True # Some systems require the need to bind and prune VLANs ids # allowed to specific ports, often for security. # -# An example would be if a LBaaS iControl® endpoint is using +# An example would be if a LBaaS iControl endpoint is using # tagged VLANs. When a VLAN tagged network is added to a -# specific BIG-IP® device, the facing switch port will need -# to allow traffic for that VLAN tag through to the BIG-IP®'s +# specific BIG-IP device, the facing switch port will need +# to allow traffic for that VLAN tag through to the BIG-IP's # port for traffic to flow. # # What is required is a software hook which allows the binding. @@ -197,16 +197,16 @@ f5_external_physical_mappings = default:1.1:True # vlan_binding_driver = f5.oslbaasv1agent.drivers.bigip.vlan_binding.NullBinding # # The interface_port_static_mappings allows for a JSON encoded dictionary -# mapping BIG-IP® devices and interfaces to corresponding ports. A port id can be +# mapping BIG-IP devices and interfaces to corresponding ports. A port id can be # any string which is meaningful to a vlan_binding_driver. It can be a # switch_id and port, or it might be a neutron port_id. # -# In addition to any static mappings, when the iControl® endpoints +# In addition to any static mappings, when the iControl endpoints # are initialized, all their TMM interfaces will be collect # for each device and neutron will be queried to see if which # device port_ids correspond to known neutron ports. If they do, # automatic entries for all mapped port_ids will be made referencing -# the BIG-IP® device name and interface and the neutron port_ids. +# the BIG-IP device name and interface and the neutron port_ids. # # interface_port_static_mappings = {"device_name_1":{"interface_ida":"port_ida","interface_idb":"port_idb"}, {"device_name_2":{"interface_ida":"port_ida","interface_idb":"port_idb"}} # @@ -216,7 +216,7 @@ f5_external_physical_mappings = default:1.1:True # # Device Tunneling (VTEP) Self IPs # -# This is the name of a BIG-IP® self IP address to use for VTEP addresses. +# This is the name of a BIG-IP self IP address to use for VTEP addresses. # # If no gre or vxlan tunneling is required, these settings should be # commented out or set to None. @@ -265,10 +265,10 @@ f5_populate_static_arp = False # # Device Tunneling (VTEP) self IPs # -# This is a boolean entry which determines if the BIG-IP® will use +# This is a boolean entry which determines if the BIG-IP will use # L2 Population service to update its fdb tunnel entries. This needs # to be set up in accordance with the way the other tunnel agents are -# set up. If the BIG-IP® agent and other tunnel agents don't match +# set up. If the BIG-IP agent and other tunnel agents don't match # the tunnel setup will not work properly. # l2_population = True @@ -303,13 +303,13 @@ l2_population = True # L3 Segmentation Mode Settings ############################################################################### # -# Global Routed Mode - No L2 or L3 Segmentation on BIG-IP® +# Global Routed Mode - No L2 or L3 Segmentation on BIG-IP # # This setting will cause the agent to assume that all VIPs # and pool members will be reachable via global device -# L3 routes, which must be already provisioned on the BIG-IP®s. +# L3 routes, which must be already provisioned on the BIG-IPs. # -# In f5_global_routed_mode, BIG-IP® will not assume L2 +# In f5_global_routed_mode, BIG-IP will not assume L2 # adjacentcy to any neutron network, therefore no # L2 segementation between tenant services in the data plane # will be provisioned by the agent. Because the routing @@ -320,22 +320,22 @@ l2_population = True # # WARNING: setting this mode to True will override # the use_namespaces, setting it to False, because only -# one global routing space will used on the BIG-IP®. This +# one global routing space will used on the BIG-IP. This # means overlapping IP addresses between tenants is no # longer supported. # # WARNING: setting this mode to True will override # the f5_snat_mode, setting it to True, because pool members -# will never be considered L2 adjacent to the BIG-IP® by +# will never be considered L2 adjacent to the BIG-IP by # the agent. All member access will be via L3 routing, which -# will need to be set up on the BIG-IP® before LBaaS provisions +# will need to be set up on the BIG-IP before LBaaS provisions # resources on behalf of tenants. # # WARNING: setting this mode to True will override the # f5_snat_addresses_per_subnet, setting it to 0 (zero). # This will force all VIPs to use AutoMap SNAT for which # enough Self IP will need to be pre-provisioned on the -# BIG-IP® to handle all pool member connections. The SNAT, +# BIG-IP to handle all pool member connections. The SNAT, # an L3 mechanism, will all be global without reference # to any specific tenant SNAT pool. # @@ -344,7 +344,7 @@ l2_population = True # because no L2 information will be taken from # neutron, thus making the assumption that all VIP # L3 addresses will be globally routable without -# segmentation at L2 on the BIG-IP®. +# segmentation at L2 on the BIG-IP. # f5_global_routed_mode = True # @@ -399,14 +399,14 @@ f5_route_domain_strictness = False # This setting will force the use of SNATs. # # If this is set to False, a SNAT will not -# be created (routed mode) and the BIG-IP® +# be created (routed mode) and the BIG-IP # will attempt to set up a floating self IP # as the subnet's default gateway address. # and a wild card IP forwarding virtual # server will be set up on member's network. # Setting this to False will mean Neutron # floating self IPs will not longer work -# if the same BIG-IP® device is not being used +# if the same BIG-IP device is not being used # as the Neutron Router implementation. # # This setting will be forced to True if @@ -444,16 +444,16 @@ f5_common_external_networks = True # separated list where if the name is a neutron # network id used for a vip or a pool member, # the network should not be created or deleted -# on the BIG-IP®, but rather assumed that the value +# on the BIG-IP, but rather assumed that the value # is the name of the network already created in # the Common partition with all L3 addresses # assigned to route domain 0. This is useful # for shared networks which are already defined -# on the BIG-IP® prior to LBaaS configuration. The +# on the BIG-IP prior to LBaaS configuration. The # network should not be managed by the LBaaS agent, # but can be used for VIPs or pool members # -# If your Internet VLAN on your BIG-IP® is named +# If your Internet VLAN on your BIG-IP is named # /Common/external, and that corresponds to # Neutron uuid: 71718972-78e2-449e-bb56-ce47cc9d2680 # then the entry would look like: @@ -472,7 +472,7 @@ f5_common_external_networks = True # Some systems require the need to bind L3 addresses # to specific ports, often for security. # -# An example would be if a LBaaS iControl® endpoint is using +# An example would be if a LBaaS iControl endpoint is using # untagged VLANs and is a nova guest instance. By # default, neutron will attempt to apply security rule # for anti-spoofing which will not allow just any L3 @@ -492,7 +492,7 @@ f5_common_external_networks = True # vary between providers. They may look like a neutron port id # and a nova guest instance id. # -# In addition to any static mappings, when the iControl® endpoints +# In addition to any static mappings, when the iControl endpoints # are initialized, all their TMM MAC addresses will be collect # and neutron will be queried to see if the MAC addresses # correspond to known neutron ports. If they do, automatic entries @@ -511,7 +511,7 @@ f5_bigip_lbaas_device_driver = f5_openstack_agent.lbaasv2.drivers.bigip.icontrol # # ############################################################################### -# Device Driver - iControl® Driver Setting +# Device Driver - iControl Driver Setting ############################################################################### # # icontrol_hostname is valid for external device type only. @@ -524,17 +524,17 @@ f5_bigip_lbaas_device_driver = f5_openstack_agent.lbaasv2.drivers.bigip.icontrol # is not standalone, all devices in the sync failover # device group for the hostname specified must have # their management IP address reachable to the agent. -# If order to access devices' iControl® interfaces via +# If order to access devices' iControl interfaces via # self IPs, you should specify them as a comma # separated list below. # icontrol_hostname = 10.190.7.232 # -# If you are using vCMP® with VLANs, you will need to configure -# your vCMP® host addresses, in addition to the guests addresses. -# vCMP® Host access is necessary for provisioning VLANs to a guest. -# Use icontrol_hostname for vCMP® guests and icontrol_vcmp_hostname -# for vCMP® hosts. The plug-in will automatically determine +# If you are using vCMP with VLANs, you will need to configure +# your vCMP host addresses, in addition to the guests addresses. +# vCMP Host access is necessary for provisioning VLANs to a guest. +# Use icontrol_hostname for vCMP guests and icontrol_vcmp_hostname +# for vCMP hosts. The plug-in will automatically determine # which host corresponds to each guest. # # icontrol_vcmp_hostname = 192.168.1.245 diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py b/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py index 36064ea13..40511d106 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py @@ -235,7 +235,7 @@ def __init__(self, conf): else: self.agent_host = conf.host - # Load the iControl® driver. + # Load the iControl driver. self._load_driver(conf) # Initialize agent configurations @@ -264,7 +264,7 @@ def __init__(self, conf): self.admin_state_up = True - # Set iControl® driver context for RPC. + # Set iControl driver context for RPC. self.lbdriver.set_context(self.context) # Setup RPC: diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py b/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py index 7fb9856ab..893baf3f7 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py @@ -69,7 +69,7 @@ NS_PREFIX = 'qlbaas-' __VERSION__ = '0.1.1' -# configuration objects specific to iControl® driver +# configuration objects specific to iControl driver # XXX see /etc/neutron/services/f5/f5-openstack-agent.ini OPTS = [ # XXX maybe we should make this a dictionary cfg.StrOpt( @@ -354,7 +354,7 @@ def __init__(self, conf, registerOpts=True): self.connected = False # overrides base, same value self.driver_name = 'f5-lbaasv2-icontrol' - # BIG-IP® containers + # BIG-IP containers self.__bigips = {} self.__traffic_groups = [] self.agent_configurations = {} # overrides base, same value @@ -537,7 +537,7 @@ def _init_bigip_hostnames(self): self.hostnames = sorted(self.hostnames) def _init_bigips(self): - # Connect to all BIG-IP®s + # Connect to all BIG-IPs if self.connected: return try: @@ -1642,7 +1642,7 @@ def _init_traffic_groups(self, bigip): self.__traffic_groups.sort() def _validate_bigip_version(self, bigip, hostname): - # Ensure the BIG-IP® has sufficient version + # Ensure the BIG-IP has sufficient version major_version = self.system_helper.get_major_version(bigip) if major_version < f5const.MIN_TMOS_MAJOR_VERSION: raise f5ex.MajorVersionValidateFailed( diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/l2_service.py b/f5_openstack_agent/lbaasv2/drivers/bigip/l2_service.py index 87421d21a..279d3c448 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/l2_service.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/l2_service.py @@ -34,7 +34,7 @@ def _get_tunnel_name(network): - # BIG-IP® object name for a tunnel + # BIG-IP object name for a tunnel tunnel_type = network['provider:network_type'] tunnel_id = network['provider:segmentation_id'] return 'tunnel-' + str(tunnel_type) + '-' + str(tunnel_id) diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/l3_binding.py b/f5_openstack_agent/lbaasv2/drivers/bigip/l3_binding.py index f5e9a36ee..76a6fde11 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/l3_binding.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/l3_binding.py @@ -58,8 +58,8 @@ def __init__(self, conf, driver): LOG.debug('l3_binding_static_mappings not configured') def register_bigip_mac_addresses(self): - # Delayed binding BIG-IP® ports will be called - # after BIG-IP® endpoints are registered. + # Delayed binding BIG-IP ports will be called + # after BIG-IP endpoints are registered. if not self.__initialized__bigip_ports: for bigip in self.driver.get_all_bigips(): LOG.debug('Request Port information for MACs: %s' diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py b/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py index 3b06bc758..2a5804bf2 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py @@ -35,8 +35,8 @@ class LBaaSBuilder(object): - # F5® LBaaS Driver using iControl® for BIG-IP® to - # create objects (vips, pools) - not using an iApp®.""" + # F5 LBaaS Driver using iControl for BIG-IP to + # create objects (vips, pools) - not using an iApp.""" def __init__(self, conf, driver, l2_service=None): self.conf = conf diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/listener_service.py b/f5_openstack_agent/lbaasv2/drivers/bigip/listener_service.py index 7d114b45c..dd4618373 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/listener_service.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/listener_service.py @@ -33,11 +33,11 @@ class ListenerServiceBuilder(object): - u"""Create LBaaS v2 Listener on BIG-IP®s. + u"""Create LBaaS v2 Listener on BIG-IPs. Handles requests to create, update, delete LBaaS v2 listener - objects on one or more BIG-IP® systems. Maps LBaaS listener - defined in service object to a BIG-IP® virtual server. + objects on one or more BIG-IP systems. Maps LBaaS listener + defined in service object to a BIG-IP virtual server. """ def __init__(self, lbaas_builder, service_adapter, cert_manager, parent_ssl_profile=None): @@ -54,9 +54,9 @@ def __init__(self, lbaas_builder, service_adapter, cert_manager, parent_ssl_prof def create_listener(self, service, bigips): - u"""Create listener on set of BIG-IP®s. + u"""Create listener on set of BIG-IPs. - Create a BIG-IP® virtual server to represent an LBaaS + Create a BIG-IP virtual server to represent an LBaaS Listener object. :param service: Dictionary which contains a both a listener @@ -97,7 +97,7 @@ def create_listener(self, service, bigips): self.add_ssl_profile(tls, bigip) def get_listener(self, service, bigip): - u"""Retrieve BIG-IP® virtual from a single BIG-IP® system. + u"""Retrieve BIG-IP virtual from a single BIG-IP system. :param service: Dictionary which contains a both a listener and load balancer definition. @@ -110,7 +110,7 @@ def get_listener(self, service, bigip): return obj def delete_listener(self, service, bigips): - u"""Delete Listener from a set of BIG-IP® systems. + u"""Delete Listener from a set of BIG-IP systems. Delete virtual server that represents a Listener object. @@ -170,7 +170,7 @@ def create_ssl_profile(self, container_ref, bigip, vip, sni_default=False): self._add_profile(vip, name, bigip, context='clientside') def update_listener(self, service, bigips): - u"""Update Listener from a single BIG-IP® system. + u"""Update Listener from a single BIG-IP system. Updates virtual servers that represents a Listener object. diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/loadbalancer_service.py b/f5_openstack_agent/lbaasv2/drivers/bigip/loadbalancer_service.py index 6b8dc1342..27c820550 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/loadbalancer_service.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/loadbalancer_service.py @@ -27,16 +27,16 @@ class LoadBalancerServiceBuilder(object): - """Create loadbalancer related objects on BIG-IP®s + """Create loadbalancer related objects on BIG-IPs Handles requests to create and delete LBaaS v2 tenant partition - folders on one or more BIG-IP® systems. + folders on one or more BIG-IP systems. """ def __init__(self): self.folder_helper = BigIPResourceHelper(ResourceType.folder) def create_partition(self, service, bigips): - """Create tenant partition on set of BIG-IP®s. + """Create tenant partition on set of BIG-IPs. Creates a partition if it is not named "Common". @@ -50,7 +50,7 @@ def create_partition(self, service, bigips): self.folder_helper.create(bigip, folder) def delete_partition(self, service, bigips): - """Deletes partition from a set of BIG-IP® systems. + """Deletes partition from a set of BIG-IP systems. Deletes partition if it is not named "Common". diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/pool_service.py b/f5_openstack_agent/lbaasv2/drivers/bigip/pool_service.py index 8c2f7d1ee..dfdbcc952 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/pool_service.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/pool_service.py @@ -28,10 +28,10 @@ class PoolServiceBuilder(object): - """Create LBaaS v2 pools and related objects on BIG-IP®s. + """Create LBaaS v2 pools and related objects on BIG-IPs. Handles requests to create, update, delete LBaaS v2 pools, - health monitors, and members on one or more BIG-IP® systems. + health monitors, and members on one or more BIG-IP systems. """ def __init__(self, service_adapter): @@ -44,9 +44,9 @@ def __init__(self, service_adapter): self.node_helper = BigIPResourceHelper(ResourceType.node) def create_pool(self, service, bigips): - """Create a pool on set of BIG-IP®s. + """Create a pool on set of BIG-IPs. - Creates a BIG-IP® pool to represent an LBaaS pool object. + Creates a BIG-IP pool to represent an LBaaS pool object. :param service: Dictionary which contains a both a pool and load balancer definition. @@ -57,9 +57,9 @@ def create_pool(self, service, bigips): self.pool_helper.create(bigip, pool) def delete_pool(self, service, bigips): - """Delete a pool on set of BIG-IP®s. + """Delete a pool on set of BIG-IPs. - Deletes a BIG-IP® pool defined by LBaaS pool object. + Deletes a BIG-IP pool defined by LBaaS pool object. :param service: Dictionary which contains a both a pool and load balancer definition. @@ -73,7 +73,7 @@ def delete_pool(self, service, bigips): partition=pool["partition"]) def update_pool(self, service, bigips): - """Update BIG-IP® pool. + """Update BIG-IP pool. :param service: Dictionary which contains a both a pool and load balancer definition. @@ -126,7 +126,7 @@ def update_healthmonitor(self, service, bigips): # Note: can't use BigIPResourceHelper class because members # are created within pool objects. Following member methods - # use the F5® SDK directly. + # use the F5 SDK directly. def create_member(self, service, bigips): pool = self.service_adapter.get_pool(service) member = self.service_adapter.get_member(service) diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/resource_helper.py b/f5_openstack_agent/lbaasv2/drivers/bigip/resource_helper.py index 9cbca8d0e..8f42de3d9 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/resource_helper.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/resource_helper.py @@ -22,7 +22,7 @@ class ResourceType(Enum): - u"""Defines supported BIG-IP® resource types.""" + u"""Defines supported BIG-IP resource types.""" nat = 1 pool = 2 @@ -66,9 +66,9 @@ class ResourceType(Enum): class BigIPResourceHelper(object): - u"""Helper class for creating, updating and deleting BIG-IP® resources. + u"""Helper class for creating, updating and deleting BIG-IP resources. - Reduces some of the boilerplate that surrounds using the F5® SDK. + Reduces some of the boilerplate that surrounds using the F5 SDK. Example usage: bigip = BigIP("10.1.1.1", "admin", "admin") pool = {"name": "pool1", @@ -84,13 +84,13 @@ def __init__(self, resource_type): self.resource_type = resource_type def create(self, bigip, model): - u"""Create/update resource (e.g., pool) on a BIG-IP® system. + u"""Create/update resource (e.g., pool) on a BIG-IP system. First checks to see if resource has been created and creates it if not. :param bigip: BigIP instance to use for creating resource. - :param model: Dictionary of BIG-IP® attributes to add resource. Must + :param model: Dictionary of BIG-IP attributes to add resource. Must include name and partition. :returns: created or updated resource object. """ @@ -105,7 +105,7 @@ def exists(self, bigip, name=None, partition=None): return resource.exists(name=name, partition=partition) def delete(self, bigip, name=None, partition=None): - u"""Delete a resource on a BIG-IP® system. + u"""Delete a resource on a BIG-IP system. Checks if resource exists and deletes it. Returns without error if resource does not exist. @@ -120,10 +120,10 @@ def delete(self, bigip, name=None, partition=None): obj.delete() def load(self, bigip, name=None, partition=None): - u"""Retrieve a BIG-IP® resource from a BIG-IP®. + u"""Retrieve a BIG-IP resource from a BIG-IP. Populates a resource object with attributes for instance on a - BIG-IP® system. + BIG-IP system. :param bigip: BigIP instance to use for creating resource. :param name: Name of resource to load. @@ -134,13 +134,13 @@ def load(self, bigip, name=None, partition=None): return resource.load(name=name, partition=partition) def update(self, bigip, model): - u"""Update a resource (e.g., pool) on a BIG-IP® system. + u"""Update a resource (e.g., pool) on a BIG-IP system. - Modifies a resource on a BIG-IP® system using attributes + Modifies a resource on a BIG-IP system using attributes defined in the model object. :param bigip: BigIP instance to use for creating resource. - :param model: Dictionary of BIG-IP® attributes to update resource. + :param model: Dictionary of BIG-IP attributes to update resource. Must include name and partition in order to identify resource. """ partition = None @@ -152,9 +152,9 @@ def update(self, bigip, model): return resource def get_resources(self, bigip, partition=None): - u"""Retrieve a collection BIG-IP® of resources from a BIG-IP®. + u"""Retrieve a collection BIG-IP of resources from a BIG-IP. - Generates a list of resources objects on a BIG-IP® system. + Generates a list of resources objects on a BIG-IP system. :param bigip: BigIP instance to use for creating resource. :param name: Name of resource to load. diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/selfips.py b/f5_openstack_agent/lbaasv2/drivers/bigip/selfips.py index 9cdbb0300..4b453f889 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/selfips.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/selfips.py @@ -144,7 +144,7 @@ def assure_bigip_selfip(self, bigip, service, subnetinfo): def _get_bigip_selfip_address(self, bigip, subnet): u"""Ensure a selfip address is allocated on Neutron network.""" - # Get ip address for selfip to use on BIG-IP®. + # Get ip address for selfip to use on BIG-IP. selfip_address = "" selfip_name = "local-" + bigip.device_name + "-" + subnet['id'] ports = self.driver.plugin_rpc.get_port_by_name(port_name=selfip_name) diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/service_adapter.py b/f5_openstack_agent/lbaasv2/drivers/bigip/service_adapter.py index fd7ea051e..e14661304 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/service_adapter.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/service_adapter.py @@ -28,9 +28,9 @@ class UnsupportedProtocolException(Exception): class ServiceModelAdapter(object): - """Class to translate LBaaS service objects to BIG-IP® model objects. + """Class to translate LBaaS service objects to BIG-IP model objects. - Creates BIG-IP® model objects (dictionary of resource attributes) given + Creates BIG-IP model objects (dictionary of resource attributes) given an LBaaS service objet. """ diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/virtual_address.py b/f5_openstack_agent/lbaasv2/drivers/bigip/virtual_address.py index bb0b32409..aa41267b7 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/virtual_address.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/virtual_address.py @@ -24,9 +24,9 @@ class VirtualAddress(object): - u"""Class to translate LBaaS loadbalancer objects to BIG-IP® virtual address. + u"""Class to translate LBaaS loadbalancer objects to BIG-IP virtual address. - Creates BIG-IP® virtual address objects given an LBaaS service object. + Creates BIG-IP virtual address objects given an LBaaS service object. """ def __init__(self, adapter, loadbalancer): diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/vlan_binding.py b/f5_openstack_agent/lbaasv2/drivers/bigip/vlan_binding.py index d13796150..562d96563 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/vlan_binding.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/vlan_binding.py @@ -44,8 +44,8 @@ def __init__(self, conf, driver): LOG.debug('interface_port_static_mappings not configured') def register_bigip_interfaces(self): - # Delayed binding BIG-IP® ports will be called - # after BIG-IP® endpoints are registered. + # Delayed binding BIG-IP ports will be called + # after BIG-IP endpoints are registered. if not self.__initialized__bigip_ports: for bigip in self.driver.get_all_bigips(): diff --git a/test/send_to_driver/test_listener.py b/test/send_to_driver/test_listener.py index 60fd642ee..79448a4e3 100644 --- a/test/send_to_driver/test_listener.py +++ b/test/send_to_driver/test_listener.py @@ -37,7 +37,7 @@ def test_create_listener(bigip): # create partition lb_service.prep_service(service, bigips) - # create BIG-IP® virtual servers + # create BIG-IP virtual servers listeners = service["listeners"] loadbalancer = service["loadbalancer"] From 7400a4f43d6b8f4a2ba5591e4b2d286b46a136ac Mon Sep 17 00:00:00 2001 From: Torsten Lesmann Date: Thu, 12 Oct 2017 17:01:57 +0200 Subject: [PATCH 038/109] Take care on TLS listener settings in esd apply mechanism --- dev_install | 6 ++ .../lbaasv2/drivers/bigip/listener_service.py | 96 ++++++++++++++++++- 2 files changed, 98 insertions(+), 4 deletions(-) create mode 100755 dev_install diff --git a/dev_install b/dev_install new file mode 100755 index 000000000..92c6c7829 --- /dev/null +++ b/dev_install @@ -0,0 +1,6 @@ +git init +python setup.py install + +python /var/lib/openstack/bin/f5-oslbaasv2-agent --config-file /etc/neutron/f5-oslbaasv2-agent.ini --config-file /etc/neutron/neutron.conf --log-file /var/log/neutron/f5-agent.log + +#python /var/lib/kolla/venv/bin/f5-oslbaasv2-purge --config-file /etc/neutron/f5-oslbaasv2-agent.ini --partition 234 \ No newline at end of file diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/listener_service.py b/f5_openstack_agent/lbaasv2/drivers/bigip/listener_service.py index dd4618373..29ac0e34d 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/listener_service.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/listener_service.py @@ -24,8 +24,6 @@ from requests import HTTPError from f5_openstack_agent.lbaasv2.drivers.bigip import utils -#import pydevd - LOG = logging.getLogger(__name__) @@ -41,7 +39,6 @@ class ListenerServiceBuilder(object): """ def __init__(self, lbaas_builder, service_adapter, cert_manager, parent_ssl_profile=None): - #pydevd.settrace('10.29.12.100', port=22100, stdoutToServer=True, stderrToServer=True) self.lbaas_builder = lbaas_builder self.cert_manager = cert_manager @@ -147,6 +144,7 @@ def add_ssl_profile(self, tls, bigip): container_ref = container["tls_container_id"] self.create_ssl_profile(container_ref, bigip, vip, False) + def create_ssl_profile(self, container_ref, bigip, vip, sni_default=False): cert = self.cert_manager.get_certificate(container_ref) key = self.cert_manager.get_private_key(container_ref) @@ -323,6 +321,66 @@ def _create_app_cookie_persist_rule(self, cookiename): rule_text += "}\n\n" return rule_text + def _cc_create_app_cookie_persist_rule(self, cookiename): + """Create cookie persistence rule. + + :param cookiename: Name to substitute in rule. + """ + rule_text = """ + when RULE_INIT { + + # Cookie name prefix + set static::ck_pattern BIGipServer*, %s + + # Log debug to /var/log/ltm? 1=yes, 0=no) + set static::ck_debug 1 + + # Cookie encryption passphrase + # Change this to a custom string! + set static::ck_pass "abc123" + } + when HTTP_REQUEST { + + if {$static::ck_debug}{log local0. "Request cookie names: [HTTP::cookie names]"} + + # Check if the cookie names in the request match our string glob pattern + if {[set cookie_names [lsearch -all -inline [HTTP::cookie names] $static::ck_pattern]] ne ""}{ + + # We have at least one match so loop through the cookie(s) by name + if {$static::ck_debug}{log local0. "Matching cookie names: [HTTP::cookie names]"} + foreach cookie_name $cookie_names { + + # Decrypt the cookie value and check if the decryption failed (null return value) + if {[HTTP::cookie decrypt $cookie_name $static::ck_pass] eq ""}{ + + # Cookie wasn't encrypted, delete it + if {$static::ck_debug}{log local0. "Removing cookie as decryption failed for $cookie_name"} + HTTP::cookie remove $cookie_name + } + } + if {$static::ck_debug}{log local0. "Cookie header(s): [HTTP::header values Cookie]"} + } + } + when HTTP_RESPONSE { + + if {$static::ck_debug}{log local0. "Response cookie names: [HTTP::cookie names]"} + + # Check if the cookie names in the request match our string glob pattern + if {[set cookie_names [lsearch -all -inline [HTTP::cookie names] $static::ck_pattern]] ne ""}{ + + # We have at least one match so loop through the cookie(s) by name + if {$static::ck_debug}{log local0. "Matching cookie names: [HTTP::cookie names]"} + foreach cookie_name $cookie_names { + + # Encrypt the cookie value + HTTP::cookie encrypt $cookie_name $static::ck_pass + } + if {$static::ck_debug}{log local0. "Set-Cookie header(s): [HTTP::header values Set-Cookie]"} + } + } + """ % (cookiename) + return rule_text + def remove_session_persistence(self, service, bigips): """Resest persistence for virtual server instance. @@ -590,6 +648,7 @@ def _remove_irule(self, vs, irule_name, bigip, rule_partition='Common'): def apply_esds(self, service): + listener = service['listener'] l7policies = listener.get('l7_policies') @@ -613,7 +672,32 @@ def apply_esds(self, service): update_attrs = self.service_adapter.get_virtual(service) - + # get ssl certificates for listener + tls = self.service_adapter.get_tls(service) + # initialize client ssl profile with already existing certificates + if bool(tls): + if "default_tls_container_id" in tls: + container_ref = tls["default_tls_container_id"] + def_name = self.cert_manager.get_name(container_ref, + self.service_adapter.prefix) + cssl_profiles.append({'name': def_name, + 'partition': 'Common', + 'context': 'clientside'}) + + if "sni_containers" in tls and tls["sni_containers"]: + for container in tls["sni_containers"]: + if 'tls_container_id' in container: + sni_ref = container['tls_container_id'] + sni_name = self.cert_manager.get_name(sni_ref, + self.service_adapter.prefix) + cssl_profiles.append({'name': sni_name, + 'partition': 'Common', + 'context': 'clientside'}) + + # print "********************** start esdlog ******************************" + # print update_attrs + # print tls + # print "********************** end esdlog ******************************" for l7policy in l7policies: name = l7policy.get('name', None) @@ -715,6 +799,10 @@ def apply_esds(self, service): if listener['protocol'] != lb_const.PROTOCOL_TCP: profiles.append( default_profiles['http']) + if bool(cssl_profiles): + for cssl_profile in cssl_profiles: + profiles.append(cssl_profile) + if bool(oneconnect_profile): profiles.append(oneconnect_profile) else: From f15dd0dd9b84a4e08325842dad11a53b22c4993d Mon Sep 17 00:00:00 2001 From: Torsten Lesmann Date: Thu, 19 Oct 2017 09:10:36 +0200 Subject: [PATCH 039/109] Implementation of SSL client profile handling in case of listener update. - Create Profiles in F5 in case new profiles are attached either as default or sni profile - Attach new profiles to listeners - Delete unreferenced profiles --- dev_install | 2 +- .../lbaasv2/drivers/bigip/icontrol_driver.py | 2 +- .../lbaasv2/drivers/bigip/lbaas_builder.py | 17 ++-- .../lbaasv2/drivers/bigip/listener_service.py | 98 +++++++++++++++++-- 4 files changed, 101 insertions(+), 18 deletions(-) diff --git a/dev_install b/dev_install index 92c6c7829..c179ee0b9 100755 --- a/dev_install +++ b/dev_install @@ -1,4 +1,4 @@ -git init +#git init python setup.py install python /var/lib/openstack/bin/f5-oslbaasv2-agent --config-file /etc/neutron/f5-oslbaasv2-agent.ini --config-file /etc/neutron/neutron.conf --log-file /var/log/neutron/f5-agent.log diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py b/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py index 893baf3f7..ce379b2d2 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py @@ -1272,7 +1272,7 @@ def _common_service_handler(self, service, 'do_not_delete_subnets': []} - # pdb.set_trace() + #pdb.set_trace() self.lbaas_builder.assure_service(service, traffic_group, all_subnet_hints) diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py b/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py index 2a5804bf2..767531d6c 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py @@ -126,14 +126,17 @@ def _assure_listeners_created(self, service): loadbalancer = service["loadbalancer"] networks = service["networks"] bigips = self.driver.get_config_bigips() - + old_listener = service.get('old_listener') for listener in listeners: - svc = {"loadbalancer": loadbalancer, - "listener": listener, - "networks": networks} - - - + if (old_listener != None and old_listener.get('id') == listener.get('id')): + svc = {"loadbalancer": loadbalancer, + "listener": listener, + "old_listener": old_listener, + "networks": networks} + else: + svc = {"loadbalancer": loadbalancer, + "listener": listener, + "networks": networks} default_pool_id = listener.get('default_pool_id', '') if default_pool_id: diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/listener_service.py b/f5_openstack_agent/lbaasv2/drivers/bigip/listener_service.py index 29ac0e34d..2699f6b4e 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/listener_service.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/listener_service.py @@ -13,6 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # +# import pdb from oslo_log import log as logging @@ -129,7 +130,7 @@ def delete_listener(self, service, bigips): # delete ssl profiles self.remove_ssl_profiles(tls, bigip) - def add_ssl_profile(self, tls, bigip): + def add_ssl_profile(self, tls, bigip, add_to_vip=True): # add profile to virtual server vip = {'name': tls['name'], 'partition': tls['partition']} @@ -137,15 +138,15 @@ def add_ssl_profile(self, tls, bigip): if "default_tls_container_id" in tls: container_ref = tls["default_tls_container_id"] self.create_ssl_profile( - container_ref, bigip, vip, True) + container_ref, bigip, vip, True, add_to_vip) if "sni_containers" in tls and tls["sni_containers"]: for container in tls["sni_containers"]: container_ref = container["tls_container_id"] - self.create_ssl_profile(container_ref, bigip, vip, False) + self.create_ssl_profile(container_ref, bigip, vip, False, add_to_vip) - def create_ssl_profile(self, container_ref, bigip, vip, sni_default=False): + def create_ssl_profile(self, container_ref, bigip, vip, sni_default=False, add_to_vip=True): cert = self.cert_manager.get_certificate(container_ref) key = self.cert_manager.get_private_key(container_ref) name = self.cert_manager.get_name(container_ref, @@ -165,7 +166,8 @@ def create_ssl_profile(self, container_ref, bigip, vip, sni_default=False): del key # add ssl profile to virtual server - self._add_profile(vip, name, bigip, context='clientside') + if add_to_vip: + self._add_profile(vip, name, bigip, context='clientside') def update_listener(self, service, bigips): u"""Update Listener from a single BIG-IP system. @@ -177,12 +179,94 @@ def update_listener(self, service, bigips): :param bigips: Array of BigIP class instances to update. """ + u""" + ATTENTION: The hole impl. is a hack. + For ssl profile settings the order is very important: + 1. A new ssl profile is created but not applied to the listener + 2. The esd_apply configures the listener with the new profile (so the old one will be detached) + 3. The update will apply the changes to the listener + 4. The remove_ssl is than be able to remove unneeded ssl profiles because they got detached in 3. + """ + + # check for ssl client cert changes + old_default = None + old_sni_containers = None + new_default = None + new_sni_containers = None + vip = self.service_adapter.get_virtual(service) + + old_listener = service.get('old_listener') + if old_listener != None: + listener = service.get('listener') + if old_listener.get('default_tls_container_id') != listener.get('default_tls_container_id'): + old_default = old_listener.get('default_tls_container_id') + new_default = listener.get('default_tls_container_id') + + # determine sni delta with set substraction + old_snis = old_listener.get('sni_containers') + new_snis = listener.get('sni_containers') + old_ids = [] + new_ids = [] + for old in old_snis: + old_ids.append(old.get('tls_container_id')) + for new in new_snis: + new_ids.append(new.get('tls_container_id')) + new_sni_containers = self._make_sni_tls(vip, list(set(new_ids) - set(old_ids))) + old_sni_containers = self._make_sni_tls(vip, list(set(old_ids) - set(new_ids))) + + # create old and new tls listener configurations + # create new ssl-profiles on F5 BUT DO NOT APPLY them to listener + old_tls = None + if (new_default != None or new_sni_containers['sni_containers']): + new_tls = self.service_adapter.get_tls(service) + new_tls = self._make_default_tls(vip, new_tls.get('default_tls_container_id')) + + if old_default != None: + old_tls = self._make_default_tls(vip, old_default) + + for bigip in bigips: + # create ssl profile but do not apply + if bool(new_tls): + try: + self.add_ssl_profile(new_tls, bigip, False) + except: + pass + if new_sni_containers['sni_containers']: + try: + self.add_ssl_profile(new_sni_containers, bigip, False) + except: + pass + + + # process esd's AND create new client ssl config for listener vip = self.apply_esds(service) + # apply changes to listener AND remove not needed ssl profiles on F5 network_id = service['loadbalancer']['network_id'] for bigip in bigips: self.service_adapter.get_vlan(vip, bigip, network_id) self.vs_helper.update(bigip, vip) + # delete ssl profiles + if bool(old_tls): + try: + self.remove_ssl_profiles(old_tls, bigip) + except: + pass + if old_sni_containers['sni_containers']: + try: + self.remove_ssl_profiles(old_sni_containers, bigip) + except: + pass + + + def _make_default_tls(self, vip, id): + return {'name': vip['name'], 'partition': vip['partition'], 'default_tls_container_id': id} + + def _make_sni_tls(self, vip, ids): + containers = {'name': vip['name'], 'partition': vip['partition'], 'sni_containers': []} + for id in ids: + containers['sni_containers'].append({'tls_container_id': id}) + return containers def update_listener_pool(self, service, name, bigips): """Update virtual server's default pool attribute. @@ -694,10 +778,6 @@ def apply_esds(self, service): 'partition': 'Common', 'context': 'clientside'}) - # print "********************** start esdlog ******************************" - # print update_attrs - # print tls - # print "********************** end esdlog ******************************" for l7policy in l7policies: name = l7policy.get('name', None) From f5e0dc0e83647ad16decf4281578ab2538689fb4 Mon Sep 17 00:00:00 2001 From: Torsten Lesmann Date: Fri, 20 Oct 2017 15:27:12 +0200 Subject: [PATCH 040/109] Create and use SSL profile with right Default SNI Flag --- etc/neutron/services/f5/esd/esd.json | 23 ++++++----- .../lbaasv2/drivers/bigip/icontrol_driver.py | 2 + .../lbaasv2/drivers/bigip/listener_service.py | 41 +++++++++++++------ setup.py | 1 + 4 files changed, 45 insertions(+), 22 deletions(-) diff --git a/etc/neutron/services/f5/esd/esd.json b/etc/neutron/services/f5/esd/esd.json index 9c728f190..e79f87eab 100644 --- a/etc/neutron/services/f5/esd/esd.json +++ b/etc/neutron/services/f5/esd/esd.json @@ -1,23 +1,26 @@ { "proxy_protocol_2edF_v1_0": { + "lbaas_fastl4": "", + "lbaas_ctcp": "tcp", "lbaas_irule": ["proxy_protocol_2edF_v1_0"] }, "standard_tcp_a3de_v1_0": { + "lbaas_fastl4": "", "lbaas_ctcp": "tcp" }, "x_forward_5b6e_v1_0": { - "lbaas_http": "http_xforward" + "lbaas_irule": ["cc_x_forward_5b6e_v1_0"] }, - "dev_x": { - "lbaas_http": "http_xforward" + "one_connect_dd5c_v1_0": { + "lbaas_one_connect": "oneconnect" }, - "dev_one": { - "lbaas_one_connect": "cc_oneconnect" + "no_one_connect_3caB_v1_0": { + "lbaas_one_connect": "" }, - "dev_compress": { - "lbaas_http_compression": "cc_httpcompression" + "http_compression_e4a2_v1_0": { + "lbaas_http_compression": "cc_http_compression_e4a2_v1_0" }, - "dev_nofastl4": { - "lbaas_fastl4": "" + "cookie_encryption_b82a_v1_0": { + "lbaas_irule": ["cc_cookie_encryption_b82a_v1_0"] } -} +} \ No newline at end of file diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py b/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py index ce379b2d2..eb4a9cce1 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py @@ -15,6 +15,8 @@ # limitations under the License. # +#import pdb + import datetime import hashlib import json diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/listener_service.py b/f5_openstack_agent/lbaasv2/drivers/bigip/listener_service.py index 2699f6b4e..cd721e615 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/listener_service.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/listener_service.py @@ -13,7 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# import pdb + +#import pdb from oslo_log import log as logging @@ -29,8 +30,6 @@ LOG = logging.getLogger(__name__) - - class ListenerServiceBuilder(object): u"""Create LBaaS v2 Listener on BIG-IPs. @@ -92,6 +91,7 @@ def create_listener(self, service, bigips): err.message) raise if tls: + #pdb.set_trace() self.add_ssl_profile(tls, bigip) def get_listener(self, service, bigip): @@ -159,7 +159,16 @@ def create_ssl_profile(self, container_ref, bigip, vip, sni_default=False, add_t name, cert, key, - sni_default=sni_default, + sni_default=True, + parent_profile=self.parent_ssl_profile) + + # upload cert/key and create SSL profile + ssl_profile.SSLProfileHelper.create_client_ssl_profile( + bigip, + name + '_NotDefault', + cert, + key, + sni_default=False, parent_profile=self.parent_ssl_profile) finally: del cert @@ -167,7 +176,10 @@ def create_ssl_profile(self, container_ref, bigip, vip, sni_default=False, add_t # add ssl profile to virtual server if add_to_vip: - self._add_profile(vip, name, bigip, context='clientside') + f5name = name + if not sni_default: + f5name += '_NotDefault' + self._add_profile(vip, f5name, bigip, context='clientside') def update_listener(self, service, bigips): u"""Update Listener from a single BIG-IP system. @@ -194,8 +206,10 @@ def update_listener(self, service, bigips): new_default = None new_sni_containers = None vip = self.service_adapter.get_virtual(service) - old_listener = service.get('old_listener') + + #pdb.set_trace() + if old_listener != None: listener = service.get('listener') if old_listener.get('default_tls_container_id') != listener.get('default_tls_container_id'): @@ -217,7 +231,7 @@ def update_listener(self, service, bigips): # create old and new tls listener configurations # create new ssl-profiles on F5 BUT DO NOT APPLY them to listener old_tls = None - if (new_default != None or new_sni_containers['sni_containers']): + if (new_default != None or (new_sni_containers != None and new_sni_containers['sni_containers'])): new_tls = self.service_adapter.get_tls(service) new_tls = self._make_default_tls(vip, new_tls.get('default_tls_container_id')) @@ -226,12 +240,12 @@ def update_listener(self, service, bigips): for bigip in bigips: # create ssl profile but do not apply - if bool(new_tls): + if new_tls != None: try: self.add_ssl_profile(new_tls, bigip, False) except: pass - if new_sni_containers['sni_containers']: + if new_sni_containers != None and new_sni_containers['sni_containers']: try: self.add_ssl_profile(new_sni_containers, bigip, False) except: @@ -247,12 +261,12 @@ def update_listener(self, service, bigips): self.service_adapter.get_vlan(vip, bigip, network_id) self.vs_helper.update(bigip, vip) # delete ssl profiles - if bool(old_tls): + if old_tls != None: try: self.remove_ssl_profiles(old_tls, bigip) except: pass - if old_sni_containers['sni_containers']: + if old_sni_containers != None and old_sni_containers['sni_containers']: try: self.remove_ssl_profiles(old_sni_containers, bigip) except: @@ -513,6 +527,8 @@ def remove_ssl_profiles(self, tls, bigip): i = container_ref.rindex("/") + 1 name = self.service_adapter.prefix + container_ref[i:] self._remove_ssl_profile(name, bigip) + self._remove_ssl_profile(name + '_NotDefault', bigip) + if "sni_containers" in tls and tls["sni_containers"]: for container in tls["sni_containers"]: @@ -520,6 +536,7 @@ def remove_ssl_profiles(self, tls, bigip): i = container_ref.rindex("/") + 1 name = self.service_adapter.prefix + container_ref[i:] self._remove_ssl_profile(name, bigip) + self._remove_ssl_profile(name + '_NotDefault', bigip) def _remove_ssl_profile(self, name, bigip): """Delete profile. @@ -773,7 +790,7 @@ def apply_esds(self, service): if 'tls_container_id' in container: sni_ref = container['tls_container_id'] sni_name = self.cert_manager.get_name(sni_ref, - self.service_adapter.prefix) + self.service_adapter.prefix) + '_NotDefault' cssl_profiles.append({'name': sni_name, 'partition': 'Common', 'context': 'clientside'}) diff --git a/setup.py b/setup.py index 71d0955da..79d332a92 100644 --- a/setup.py +++ b/setup.py @@ -24,6 +24,7 @@ author_email="f5_openstack_agent@f5.com", data_files=[('/etc/neutron/services/f5', ['etc/neutron/services/f5/f5-openstack-agent.ini']), ('/etc/neutron/services/f5/esd', ['etc/neutron/services/f5/esd/demo.json']), + ('/etc/neutron/services/f5/esd', ['etc/neutron/services/f5/esd/esd.json']), ('/etc/init.d', ['etc/init.d/f5-oslbaasv2-agent']), ('/usr/lib/systemd/system', ['lib/systemd/system/f5-openstack-agent.service']), ('/usr/bin/f5', ['bin/debug_bundler.py'])], From 4985d8846edcc6523e0f02773cb0a588c923444b Mon Sep 17 00:00:00 2001 From: Torsten Lesmann Date: Wed, 25 Oct 2017 12:13:42 +0200 Subject: [PATCH 041/109] Don't create L7Policies for ESD's in F5 when an L7Policy+Rule is added to a listener which already had ESD's applied --- .../lbaasv2/drivers/bigip/l7policy_service.py | 26 ++++++++++++------- .../lbaasv2/drivers/bigip/lbaas_builder.py | 4 ++- .../lbaasv2/drivers/bigip/listener_service.py | 15 +++++------ 3 files changed, 25 insertions(+), 20 deletions(-) diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/l7policy_service.py b/f5_openstack_agent/lbaasv2/drivers/bigip/l7policy_service.py index 3fe394637..1c3bbe5ee 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/l7policy_service.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/l7policy_service.py @@ -29,12 +29,15 @@ from f5_openstack_agent.lbaasv2.drivers.bigip.vs_builder import \ VirtualServerBuilder +#import pdb + LOG = logging.getLogger(__name__) class L7PolicyService(object): """Handles requests to create, update, delete L7 policies on BIG-IPs.""" - def __init__(self, conf): + def __init__(self, lbaas_builder, conf): + self.lbaas_builder = lbaas_builder self.conf = conf def create_l7policy(self, l7policy, service_object, bigips): @@ -55,7 +58,7 @@ def create_l7policy(self, l7policy, service_object, bigips): # create L7 policy try: l7policy_adapter = L7PolicyServiceAdapter(self.conf) - policies = self.build_policy(l7policy, lbaas_service) + policies = self.build_l7policy(l7policy, lbaas_service) if policies['l7policies']: f5_l7policy = l7policy_adapter.translate(policies) stack.append(L7PolicyBuilder(event, f5_l7policy)) @@ -109,7 +112,7 @@ def update_l7policy(self, l7policy, service_object, bigips): try: l7policy_adapter = L7PolicyServiceAdapter(self.conf) - policies = self.build_policy(l7policy, lbaas_service) + policies = self.build_l7policy(l7policy, lbaas_service) if policies['l7policies']: f5_l7policy = l7policy_adapter.translate(policies) stack.append(L7PolicyBuilder(event, f5_l7policy)) @@ -157,8 +160,7 @@ def update_l7rule(self, l7rule, service_object, bigips): # re-create policy with updated rule self.update_l7policy(l7policy, service_object, bigips) - @staticmethod - def build_policy(l7policy, lbaas_service): + def build_l7policy(self, l7policy, lbaas_service): # build data structure for service adapter input LOG.debug("L7PolicyService: service") import pprint @@ -174,11 +176,15 @@ def build_policy(l7policy, lbaas_service): for policy_id in listener['l7_policies']: policy = lbaas_service.get_l7policy(policy_id['id']) if policy: - os_policies['l7policies'].append(policy) - for rule in policy['rules']: - l7rule = lbaas_service.get_l7rule(rule['id']) - if l7rule: - os_policies['l7rules'].append(l7rule) + is_esd = False + if policy['name'] and self.lbaas_builder.is_esd(policy['name']): + is_esd = True + if not is_esd: + os_policies['l7policies'].append(policy) + for rule in policy['rules']: + l7rule = lbaas_service.get_l7rule(rule['id']) + if l7rule: + os_policies['l7rules'].append(l7rule) LOG.debug(pprint.pformat(os_policies, indent=4)) return os_policies diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py b/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py index 767531d6c..74b22abd9 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py @@ -31,6 +31,8 @@ from requests import HTTPError +#import pdb + LOG = logging.getLogger(__name__) @@ -50,7 +52,7 @@ def __init__(self, conf, driver, l2_service=None): self.pool_builder = pool_service.PoolServiceBuilder( self.service_adapter ) - self.l7service = l7policy_service.L7PolicyService(conf) + self.l7service = l7policy_service.L7PolicyService(self, conf) self.esd = None @utils.instrument_execution_time diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/listener_service.py b/f5_openstack_agent/lbaasv2/drivers/bigip/listener_service.py index cd721e615..7c8c66d42 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/listener_service.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/listener_service.py @@ -14,7 +14,7 @@ # limitations under the License. # -#import pdb +import pdb from oslo_log import log as logging @@ -751,6 +751,7 @@ def apply_esds(self, service): listener = service['listener'] + default_profiles = utils.get_default_profiles(self.service_adapter.conf, listener['protocol']) l7policies = listener.get('l7_policies') @@ -763,7 +764,7 @@ def apply_esds(self, service): cssl_profiles = [] sssl_profiles = [] http_profile = {} - oneconnect_profile = {} + oneconnect_profile = default_profiles.get('oneconnect') compression_profile = {} persistence_profiles = [] @@ -796,6 +797,7 @@ def apply_esds(self, service): 'context': 'clientside'}) + pdb.set_trace() for l7policy in l7policies: name = l7policy.get('name', None) if name and self.lbaas_builder.is_esd(name) and l7policy.get('provisioning_status')!= plugin_const.PENDING_DELETE: @@ -836,8 +838,8 @@ def apply_esds(self, service): 'partition': 'Common', 'context': 'all'} - # one connect profiles - if 'lbaas_one_connect' in esd and not bool(oneconnect_profile) : + # one connect profiles if not already set + if 'lbaas_one_connect' in esd: if esd['lbaas_one_connect'] == '': oneconnect_profile = {} else: @@ -887,8 +889,6 @@ def apply_esds(self, service): profiles.append(fastl4) else: profiles = stcp_profiles + ctcp_profiles - else: - default_profiles = utils.get_default_profiles(self.service_adapter.conf, listener['protocol']) if bool(http_profile): profiles.append(http_profile) @@ -902,9 +902,6 @@ def apply_esds(self, service): if bool(oneconnect_profile): profiles.append(oneconnect_profile) - else: - if listener['protocol'] != lb_const.PROTOCOL_TCP: - profiles.append(default_profiles['oneconnect']) if bool(compression_profile): profiles.append(compression_profile) From 904864571087b7d961ec3ebe97c1aaeebc589c57 Mon Sep 17 00:00:00 2001 From: Torsten Lesmann Date: Wed, 25 Oct 2017 13:10:53 +0200 Subject: [PATCH 042/109] Reduce Log output in l7policy build and do not pretty print result --- .../lbaasv2/drivers/bigip/l7policy_service.py | 11 ++++++----- .../lbaasv2/drivers/bigip/lbaas_builder.py | 1 - 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/l7policy_service.py b/f5_openstack_agent/lbaasv2/drivers/bigip/l7policy_service.py index 1c3bbe5ee..f600b5b8d 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/l7policy_service.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/l7policy_service.py @@ -163,10 +163,10 @@ def update_l7rule(self, l7rule, service_object, bigips): def build_l7policy(self, l7policy, lbaas_service): # build data structure for service adapter input LOG.debug("L7PolicyService: service") - import pprint - LOG.debug(pprint.pformat(lbaas_service.service_object, indent=4)) - LOG.debug("L7PolicyService: l7policy") - LOG.debug(pprint.pformat(l7policy, indent=4)) + #import pprint + #LOG.debug(pprint.pformat(lbaas_service.service_object, indent=4)) + #LOG.debug("L7PolicyService: l7policy") + #LOG.debug(pprint.pformat(l7policy, indent=4)) os_policies = {'l7rules': [], 'l7policies': []} @@ -186,7 +186,8 @@ def build_l7policy(self, l7policy, lbaas_service): if l7rule: os_policies['l7rules'].append(l7rule) - LOG.debug(pprint.pformat(os_policies, indent=4)) + #LOG.debug(pprint.pformat(os_policies, indent=4)) + LOG.debug(os_policies) return os_policies @staticmethod diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py b/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py index 74b22abd9..4f7870b87 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py @@ -528,7 +528,6 @@ def _assure_l7policies_created(self, service): name = l7policy.get('name', None) if name and self.is_esd(name): continue - else: self.l7service.create_l7policy( l7policy, service, bigips) From 8f025de64bc51261c03f591f577053d67271cba2 Mon Sep 17 00:00:00 2001 From: Torsten Lesmann Date: Wed, 25 Oct 2017 13:14:01 +0200 Subject: [PATCH 043/109] Disable debugging --- f5_openstack_agent/lbaasv2/drivers/bigip/listener_service.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/listener_service.py b/f5_openstack_agent/lbaasv2/drivers/bigip/listener_service.py index 7c8c66d42..488ccd1fd 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/listener_service.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/listener_service.py @@ -14,7 +14,7 @@ # limitations under the License. # -import pdb +#import pdb from oslo_log import log as logging @@ -797,7 +797,7 @@ def apply_esds(self, service): 'context': 'clientside'}) - pdb.set_trace() + #pdb.set_trace() for l7policy in l7policies: name = l7policy.get('name', None) if name and self.lbaas_builder.is_esd(name) and l7policy.get('provisioning_status')!= plugin_const.PENDING_DELETE: From 9b40e11ae13c05da984010ce76c11ad4a241953e Mon Sep 17 00:00:00 2001 From: Torsten Lesmann Date: Fri, 3 Nov 2017 10:35:30 +0100 Subject: [PATCH 044/109] Set Default TLS Cert also as Trusted Cert for client authentication. This can be used for SSO enablement in combination with specific iRule. Should have no impact in case client Auth. is not switched on on the listener. --- etc/neutron/services/f5/esd/esd.json | 3 +++ f5_openstack_agent/lbaasv2/drivers/bigip/ssl_profile.py | 4 +++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/etc/neutron/services/f5/esd/esd.json b/etc/neutron/services/f5/esd/esd.json index e79f87eab..8607ff749 100644 --- a/etc/neutron/services/f5/esd/esd.json +++ b/etc/neutron/services/f5/esd/esd.json @@ -22,5 +22,8 @@ }, "cookie_encryption_b82a_v1_0": { "lbaas_irule": ["cc_cookie_encryption_b82a_v1_0"] + }, + "sso_22b0_v1_0": { + "lbaas_irule": ["cc_sso_22b0_v1_0"] } } \ No newline at end of file diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/ssl_profile.py b/f5_openstack_agent/lbaasv2/drivers/bigip/ssl_profile.py index 18018b5e3..4b9210dfc 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/ssl_profile.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/ssl_profile.py @@ -73,7 +73,9 @@ def create_client_ssl_profile( partition='Common', certKeyChain=chain, sniDefault=sni_default, - defaultsFrom=parent_profile) + defaultsFrom=parent_profile, + clientCertCa=certfilename, + caFile=certfilename) except Exception as err: LOG.error("Error creating SSL profile: %s" % err.message) raise SSLProfileError(err.message) From 9b876713545e60e2685e252c99e79ae33d0a1695 Mon Sep 17 00:00:00 2001 From: Torsten Lesmann Date: Mon, 6 Nov 2017 09:48:19 +0100 Subject: [PATCH 045/109] Revert Trusted Ca for Client Authentication. Doesn't work due to F5 error: Selected client SSL profiles do not match security policies for Virtual Server --- f5_openstack_agent/lbaasv2/drivers/bigip/ssl_profile.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/ssl_profile.py b/f5_openstack_agent/lbaasv2/drivers/bigip/ssl_profile.py index 4b9210dfc..18018b5e3 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/ssl_profile.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/ssl_profile.py @@ -73,9 +73,7 @@ def create_client_ssl_profile( partition='Common', certKeyChain=chain, sniDefault=sni_default, - defaultsFrom=parent_profile, - clientCertCa=certfilename, - caFile=certfilename) + defaultsFrom=parent_profile) except Exception as err: LOG.error("Error creating SSL profile: %s" % err.message) raise SSLProfileError(err.message) From 72206b15333c09cb52a6dcc94794ec5ebc449d86 Mon Sep 17 00:00:00 2001 From: Torsten Lesmann Date: Tue, 7 Nov 2017 12:39:00 +0100 Subject: [PATCH 046/109] Fix for TCP Listeners: Don't apply oneconnect when FastL4 --- .../lbaasv2/drivers/bigip/listener_service.py | 108 +++++++++--------- 1 file changed, 57 insertions(+), 51 deletions(-) diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/listener_service.py b/f5_openstack_agent/lbaasv2/drivers/bigip/listener_service.py index 488ccd1fd..832cb4865 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/listener_service.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/listener_service.py @@ -206,50 +206,52 @@ def update_listener(self, service, bigips): new_default = None new_sni_containers = None vip = self.service_adapter.get_virtual(service) - old_listener = service.get('old_listener') #pdb.set_trace() - if old_listener != None: - listener = service.get('listener') - if old_listener.get('default_tls_container_id') != listener.get('default_tls_container_id'): - old_default = old_listener.get('default_tls_container_id') - new_default = listener.get('default_tls_container_id') - - # determine sni delta with set substraction - old_snis = old_listener.get('sni_containers') - new_snis = listener.get('sni_containers') - old_ids = [] - new_ids = [] - for old in old_snis: - old_ids.append(old.get('tls_container_id')) - for new in new_snis: - new_ids.append(new.get('tls_container_id')) - new_sni_containers = self._make_sni_tls(vip, list(set(new_ids) - set(old_ids))) - old_sni_containers = self._make_sni_tls(vip, list(set(old_ids) - set(new_ids))) - - # create old and new tls listener configurations - # create new ssl-profiles on F5 BUT DO NOT APPLY them to listener - old_tls = None - if (new_default != None or (new_sni_containers != None and new_sni_containers['sni_containers'])): - new_tls = self.service_adapter.get_tls(service) - new_tls = self._make_default_tls(vip, new_tls.get('default_tls_container_id')) - - if old_default != None: - old_tls = self._make_default_tls(vip, old_default) - - for bigip in bigips: - # create ssl profile but do not apply - if new_tls != None: - try: - self.add_ssl_profile(new_tls, bigip, False) - except: - pass - if new_sni_containers != None and new_sni_containers['sni_containers']: - try: - self.add_ssl_profile(new_sni_containers, bigip, False) - except: - pass + listener = service.get('listener') + if listener.get('protocol') == 'TERMINATED_HTTPS': + old_listener = service.get('old_listener') + if old_listener != None: + listener = service.get('listener') + if old_listener.get('default_tls_container_id') != listener.get('default_tls_container_id'): + old_default = old_listener.get('default_tls_container_id') + new_default = listener.get('default_tls_container_id') + + # determine sni delta with set substraction + old_snis = old_listener.get('sni_containers') + new_snis = listener.get('sni_containers') + old_ids = [] + new_ids = [] + for old in old_snis: + old_ids.append(old.get('tls_container_id')) + for new in new_snis: + new_ids.append(new.get('tls_container_id')) + new_sni_containers = self._make_sni_tls(vip, list(set(new_ids) - set(old_ids))) + old_sni_containers = self._make_sni_tls(vip, list(set(old_ids) - set(new_ids))) + + # create old and new tls listener configurations + # create new ssl-profiles on F5 BUT DO NOT APPLY them to listener + old_tls = None + if (new_default != None or (new_sni_containers != None and new_sni_containers['sni_containers'])): + new_tls = self.service_adapter.get_tls(service) + new_tls = self._make_default_tls(vip, new_tls.get('default_tls_container_id')) + + if old_default != None: + old_tls = self._make_default_tls(vip, old_default) + + for bigip in bigips: + # create ssl profile but do not apply + if new_tls != None: + try: + self.add_ssl_profile(new_tls, bigip, False) + except: + pass + if new_sni_containers != None and new_sni_containers['sni_containers']: + try: + self.add_ssl_profile(new_sni_containers, bigip, False) + except: + pass # process esd's AND create new client ssl config for listener @@ -261,16 +263,17 @@ def update_listener(self, service, bigips): self.service_adapter.get_vlan(vip, bigip, network_id) self.vs_helper.update(bigip, vip) # delete ssl profiles - if old_tls != None: - try: - self.remove_ssl_profiles(old_tls, bigip) - except: - pass - if old_sni_containers != None and old_sni_containers['sni_containers']: - try: - self.remove_ssl_profiles(old_sni_containers, bigip) - except: - pass + if listener.get('protocol') == 'TERMINATED_HTTPS': + if old_tls != None: + try: + self.remove_ssl_profiles(old_tls, bigip) + except: + pass + if old_sni_containers != None and old_sni_containers['sni_containers']: + try: + self.remove_ssl_profiles(old_sni_containers, bigip) + except: + pass def _make_default_tls(self, vip, id): @@ -887,6 +890,7 @@ def apply_esds(self, service): if listener['protocol'] == lb_const.PROTOCOL_TCP: if bool(fastl4): profiles.append(fastl4) + oneconnect_profile = None else: profiles = stcp_profiles + ctcp_profiles @@ -913,4 +917,6 @@ def apply_esds(self, service): update_attrs['policies'] = update_attrs.get('policies',[])+policies + LOG.info("APPLY_ESD: Listener after ESDs got applied: %s", update_attrs) + return update_attrs From 4b1bbb157eb7557b648330af7304494a8cb357c1 Mon Sep 17 00:00:00 2001 From: Torsten Lesmann Date: Tue, 14 Nov 2017 14:47:24 +0100 Subject: [PATCH 047/109] Use intermediate certificates as chain and for caClientTrust. Do not use OneConnect in case of Proxy Protocol and L7 protocol ESD --- etc/neutron/services/f5/esd/esd.json | 6 +- .../lbaasv2/drivers/bigip/barbican_cert.py | 22 ++++++++ .../lbaasv2/drivers/bigip/listener_service.py | 19 +++++-- .../lbaasv2/drivers/bigip/ssl_profile.py | 56 +++++++++++++++++-- 4 files changed, 93 insertions(+), 10 deletions(-) diff --git a/etc/neutron/services/f5/esd/esd.json b/etc/neutron/services/f5/esd/esd.json index 8607ff749..9f4b4f9a4 100644 --- a/etc/neutron/services/f5/esd/esd.json +++ b/etc/neutron/services/f5/esd/esd.json @@ -2,11 +2,13 @@ "proxy_protocol_2edF_v1_0": { "lbaas_fastl4": "", "lbaas_ctcp": "tcp", - "lbaas_irule": ["proxy_protocol_2edF_v1_0"] + "lbaas_irule": ["proxy_protocol_2edF_v1_0"], + "lbaas_one_connect": "" }, "standard_tcp_a3de_v1_0": { "lbaas_fastl4": "", - "lbaas_ctcp": "tcp" + "lbaas_ctcp": "tcp", + "lbaas_one_connect": "" }, "x_forward_5b6e_v1_0": { "lbaas_irule": ["cc_x_forward_5b6e_v1_0"] diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/barbican_cert.py b/f5_openstack_agent/lbaasv2/drivers/bigip/barbican_cert.py index b6ddb9242..343aba524 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/barbican_cert.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/barbican_cert.py @@ -165,3 +165,25 @@ def get_name(self, container_ref, prefix): i = container_ref.rindex("/") + 1 return prefix + container_ref[i:] + + def get_container(self, container_ref): + """Retrieves container object from certificate manager. + + :param string container_ref: Reference to container stored in a + certificate manager. + :returns string: Container Object + """ + return self.barbican.containers.get(container_ref) + + def get_intermediates(self, container_ref): + """Retrieves intermediates from barbican certificate. + + :param string container_ref: Reference to container stored in a + certificate manager. + :returns string: Intermediate payload data. + """ + container = self.barbican.containers.get(container_ref) + if (container.intermediates and container.intermediates.payload): + return container.intermediates.payload + else: + return None \ No newline at end of file diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/listener_service.py b/f5_openstack_agent/lbaasv2/drivers/bigip/listener_service.py index 832cb4865..1384a314e 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/listener_service.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/listener_service.py @@ -91,7 +91,6 @@ def create_listener(self, service, bigips): err.message) raise if tls: - #pdb.set_trace() self.add_ssl_profile(tls, bigip) def get_listener(self, service, bigip): @@ -149,9 +148,13 @@ def add_ssl_profile(self, tls, bigip, add_to_vip=True): def create_ssl_profile(self, container_ref, bigip, vip, sni_default=False, add_to_vip=True): cert = self.cert_manager.get_certificate(container_ref) key = self.cert_manager.get_private_key(container_ref) + intermediate = self.cert_manager.get_intermediates(container_ref) name = self.cert_manager.get_name(container_ref, self.service_adapter.prefix) + container = self.cert_manager.get_container(container_ref) + caClientTrust = bool(container.name and container.name.startswith('CATrust')) + try: # upload cert/key and create SSL profile ssl_profile.SSLProfileHelper.create_client_ssl_profile( @@ -159,20 +162,28 @@ def create_ssl_profile(self, container_ref, bigip, vip, sni_default=False, add_t name, cert, key, + intermediate, sni_default=True, - parent_profile=self.parent_ssl_profile) + parent_profile=self.parent_ssl_profile, + caClientTrust=caClientTrust + ) # upload cert/key and create SSL profile ssl_profile.SSLProfileHelper.create_client_ssl_profile( bigip, - name + '_NotDefault', + name, cert, key, + intermediate, sni_default=False, - parent_profile=self.parent_ssl_profile) + parent_profile=self.parent_ssl_profile, + caClientTrust=caClientTrust + ) + finally: del cert del key + del intermediate # add ssl profile to virtual server if add_to_vip: diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/ssl_profile.py b/f5_openstack_agent/lbaasv2/drivers/bigip/ssl_profile.py index 18018b5e3..052a8d17c 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/ssl_profile.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/ssl_profile.py @@ -28,14 +28,19 @@ class SSLProfileHelper(object): @staticmethod def create_client_ssl_profile( - bigip, name, cert, key, sni_default=False, parent_profile=None): + bigip, name, cert, key, intermediate=None, sni_default=False, parent_profile=None, caClientTrust=False): uploader = bigip.shared.file_transfer.uploads cert_registrar = bigip.tm.sys.crypto.certs + intermediate_registrar = bigip.tm.sys.crypto.certs key_registrar = bigip.tm.sys.crypto.keys ssl_client_profile = bigip.tm.ltm.profile.client_ssls.client_ssl + profilename = name + if not sni_default: + profilename = name + '_NotDefault' + # No need to create if it exists - if ssl_client_profile.exists(name=name, partition='Common'): + if ssl_client_profile.exists(name=profilename, partition='Common'): return # Check that parent profile exists; use default if not. @@ -45,12 +50,17 @@ def create_client_ssl_profile( certfilename = name + '.crt' keyfilename = name + '.key' + # we need both names because uploader fiddles around with names + intermediatefilename = name + '.chain' + intermediatecrtfilename = intermediatefilename + '.crt' try: # In-memory upload -- data not written to local file system but # is saved as a file on the BIG-IP. uploader.upload_bytes(cert, certfilename) uploader.upload_bytes(key, keyfilename) + if intermediate: + uploader.upload_bytes(intermediate, intermediatefilename) # import certificate param_set = {} @@ -65,15 +75,53 @@ def create_client_ssl_profile( '/var/config/rest/downloads/', keyfilename) key_registrar.exec_cmd('install', **param_set) + if intermediate: + # import intermediates + param_set = {} + param_set['name'] = intermediatefilename + param_set['from-local-file'] = os.path.join( + '/var/config/rest/downloads/', intermediatefilename) + intermediate_registrar.exec_cmd('install', **param_set) + # create ssl-client profile from cert/key pair - chain = [{'name': name, + if intermediate: + chain = [{'name': name, + 'cert': '/Common/' + certfilename, + 'key': '/Common/' + keyfilename, + 'chain': '/Common/' + intermediatecrtfilename}] + # create ssl-client profile from cert/key pair + else: + chain = [{'name': name, 'cert': '/Common/' + certfilename, 'key': '/Common/' + keyfilename}] - ssl_client_profile.create(name=name, + + + if caClientTrust and intermediate: + ssl_client_profile.create(name=profilename, + partition='Common', + certKeyChain=chain, + sniDefault=sni_default, + defaultsFrom=parent_profile, + clientCertCa=intermediatecrtfilename, + caFile=intermediatecrtfilename) + LOG.info("Creating SSL profile WITH caClientTrust and WITH intermediate %s", chain) + elif (not caClientTrust) and intermediate: + ssl_client_profile.create(name=profilename, partition='Common', certKeyChain=chain, sniDefault=sni_default, defaultsFrom=parent_profile) + LOG.info("Creating SSL profile WITHOUT caClientTrust and WITH intermediate %s", chain) + elif (not caClientTrust) and (not intermediate): + ssl_client_profile.create(name=profilename, + partition='Common', + certKeyChain=chain, + sniDefault=sni_default, + defaultsFrom=parent_profile) + LOG.info("Creating SSL profile WITHOUT caClientTrust and WITHOUT intermediate %s", chain) + else: + LOG.error("ERROR: Cannot create a SSL profile WITH caClientTrust and WITHOUT intermediate") + raise SSLProfileError("ERROR: Cannot create a SSL profile WITH caClientTrust and WITHOUT intermediate") except Exception as err: LOG.error("Error creating SSL profile: %s" % err.message) raise SSLProfileError(err.message) From 1be78b542f0b0df2363ceaadf2b0ac3318b3aea0 Mon Sep 17 00:00:00 2001 From: Torsten Lesmann Date: Mon, 20 Nov 2017 15:08:54 +0100 Subject: [PATCH 048/109] Ignore exceptions when SSL Profiles are created or deleted or attached to listener to make sure that all F5 instances will have at least the same configuration state afterwards even when it's error state. --- .../lbaasv2/drivers/bigip/listener_service.py | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/listener_service.py b/f5_openstack_agent/lbaasv2/drivers/bigip/listener_service.py index 1384a314e..838cb4678 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/listener_service.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/listener_service.py @@ -91,7 +91,12 @@ def create_listener(self, service, bigips): err.message) raise if tls: - self.add_ssl_profile(tls, bigip) + # Don't stop processing in case of errors. Otherwise the other F5's won't get the same vs + try: + self.add_ssl_profile(tls, bigip) + except: + pass + def get_listener(self, service, bigip): u"""Retrieve BIG-IP virtual from a single BIG-IP system. @@ -127,7 +132,11 @@ def delete_listener(self, service, bigips): partition=vip["partition"]) # delete ssl profiles - self.remove_ssl_profiles(tls, bigip) + # Don't stop processing in case of errors. Otherwise the other F5's might have a different configuration + try: + self.remove_ssl_profiles(tls, bigip) + except: + pass def add_ssl_profile(self, tls, bigip, add_to_vip=True): # add profile to virtual server From c92cf3feac4031e4f4e1ce167e870386ba0bd4b7 Mon Sep 17 00:00:00 2001 From: Torsten Lesmann Date: Tue, 21 Nov 2017 15:53:06 +0100 Subject: [PATCH 049/109] Fix handling when Listener Certificate is also used as SNI certificate. --- .../lbaasv2/drivers/bigip/listener_service.py | 42 ++++++++++++++----- 1 file changed, 32 insertions(+), 10 deletions(-) diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/listener_service.py b/f5_openstack_agent/lbaasv2/drivers/bigip/listener_service.py index 838cb4678..42748203f 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/listener_service.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/listener_service.py @@ -69,6 +69,7 @@ def create_listener(self, service, bigips): service['listener']['operating_status'] = lb_const.ONLINE network_id = service['loadbalancer']['network_id'] + error = None for bigip in bigips: self.service_adapter.get_vlan(vip, bigip, network_id) try: @@ -85,7 +86,6 @@ def create_listener(self, service, bigips): LOG.exception(e) LOG.warn('Exception %s',e) raise e - else: LOG.exception("Virtual server creation error: %s" % err.message) @@ -94,8 +94,13 @@ def create_listener(self, service, bigips): # Don't stop processing in case of errors. Otherwise the other F5's won't get the same vs try: self.add_ssl_profile(tls, bigip) - except: - pass + except Exception as err: + LOG.error("Error adding SSL Profile to listener: {0}".format(err)) + error = err if error is None else error + + if error: + service['listener']['provisioning_status'] = 'ERROR' + raise error def get_listener(self, service, bigip): @@ -125,7 +130,7 @@ def delete_listener(self, service, bigips): if tls: tls['name'] = vip['name'] tls['partition'] = vip['partition'] - + error = None for bigip in bigips: self.vs_helper.delete(bigip, name=vip["name"], @@ -135,14 +140,18 @@ def delete_listener(self, service, bigips): # Don't stop processing in case of errors. Otherwise the other F5's might have a different configuration try: self.remove_ssl_profiles(tls, bigip) - except: - pass + except Exception as err: + LOG.error("Error adding SSL Profile to listener: {0}".format(err)) + error = err if error is None else error + + if error: + raise error def add_ssl_profile(self, tls, bigip, add_to_vip=True): # add profile to virtual server vip = {'name': tls['name'], 'partition': tls['partition']} - + error = None if "default_tls_container_id" in tls: container_ref = tls["default_tls_container_id"] self.create_ssl_profile( @@ -150,8 +159,14 @@ def add_ssl_profile(self, tls, bigip, add_to_vip=True): if "sni_containers" in tls and tls["sni_containers"]: for container in tls["sni_containers"]: - container_ref = container["tls_container_id"] - self.create_ssl_profile(container_ref, bigip, vip, False, add_to_vip) + try: + container_ref = container["tls_container_id"] + self.create_ssl_profile(container_ref, bigip, vip, False, add_to_vip) + except Exception as err: + LOG.error("Error creating SSL Profile for listener: {0}".format(err)) + error = err if error is None else error + if error: + raise error def create_ssl_profile(self, container_ref, bigip, vip, sni_default=False, add_to_vip=True): @@ -278,10 +293,15 @@ def update_listener(self, service, bigips): vip = self.apply_esds(service) # apply changes to listener AND remove not needed ssl profiles on F5 + error = None network_id = service['loadbalancer']['network_id'] for bigip in bigips: self.service_adapter.get_vlan(vip, bigip, network_id) - self.vs_helper.update(bigip, vip) + try: + self.vs_helper.update(bigip, vip) + except Exception as err: + LOG.error("Error changing listener: {0}".format(err)) + error = err if error is None else error # delete ssl profiles if listener.get('protocol') == 'TERMINATED_HTTPS': if old_tls != None: @@ -295,6 +315,8 @@ def update_listener(self, service, bigips): except: pass + if error: + raise error def _make_default_tls(self, vip, id): return {'name': vip['name'], 'partition': vip['partition'], 'default_tls_container_id': id} From 67032466a875145505d3fbe24fe21d03e586f98c Mon Sep 17 00:00:00 2001 From: Torsten Lesmann Date: Tue, 12 Dec 2017 16:31:19 +0100 Subject: [PATCH 050/109] Added esd for http/https redirect and sso required --- etc/neutron/services/f5/esd/esd.json | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/etc/neutron/services/f5/esd/esd.json b/etc/neutron/services/f5/esd/esd.json index 9f4b4f9a4..a095e4db8 100644 --- a/etc/neutron/services/f5/esd/esd.json +++ b/etc/neutron/services/f5/esd/esd.json @@ -27,5 +27,11 @@ }, "sso_22b0_v1_0": { "lbaas_irule": ["cc_sso_22b0_v1_0"] + }, + "sso_required_f544_v1_0": { + "lbaas_irule": ["cc_sso_required_f544_v1_0"] + }, + "http_redirect_a26c_v1_0": { + "lbaas_irule": ["cc_http_redirect_a26c_v1_0"] } } \ No newline at end of file From cae95601000c0513c3b6bd50ae70cdad8364869b Mon Sep 17 00:00:00 2001 From: Torsten Lesmann Date: Tue, 16 Jan 2018 11:19:20 +0100 Subject: [PATCH 051/109] LBaaS: Fix bug where virtual servers got deployed across F5 guest groups when an F5 agent hosting objects is down and changes to objects which are alredy deployed by that agent were made. LBaaS Staging: Enabled cc_clientssl as parent ssl profile Enabled environment_group_numer to avoid deployemtns across f5 groups --- etc/neutron/services/f5/f5-openstack-agent.ini | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/etc/neutron/services/f5/f5-openstack-agent.ini b/etc/neutron/services/f5/f5-openstack-agent.ini index 45239e715..e2e66cd45 100644 --- a/etc/neutron/services/f5/f5-openstack-agent.ini +++ b/etc/neutron/services/f5/f5-openstack-agent.ini @@ -585,4 +585,4 @@ os_project_domain_name = default # inherit settings from the parent you define. This must be an existing profile, # and if it does not exist on your BIG-IP system the agent will use the default # profile, clientssl. -f5_parent_ssl_profile = clientssl +f5_parent_ssl_profile = cc_clientssl From 39a55a90cf47e1e3c537aec302860053c72712eb Mon Sep 17 00:00:00 2001 From: Torsten Lesmann Date: Fri, 19 Jan 2018 15:02:31 +0100 Subject: [PATCH 052/109] Fix for F5 upgrade to release 12.1.3. vlan creation fails because a thrown Exception type got changed with that HF. --- f5_openstack_agent/lbaasv2/drivers/bigip/network_helper.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/network_helper.py b/f5_openstack_agent/lbaasv2/drivers/bigip/network_helper.py index 9205b3163..5fc0788bb 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/network_helper.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/network_helper.py @@ -15,6 +15,7 @@ import constants_v2 as const from f5.bigip.tm.net.vlan import TagModeDisallowedForTMOSVersion +from icontrol.exceptions import iControlUnexpectedHTTPError import netaddr import os import urllib @@ -420,6 +421,12 @@ def create_vlan(self, bigip, model): LOG.warn(e.message) payload.pop('tagMode') i.create(**payload) + # ccloud: 12.1.3 throws a different exception in case QinQ isn't allowed + except iControlUnexpectedHTTPError as ie: + # Providing the tag-mode is not supported + LOG.info(ie.message) + payload.pop('tagMode') + i.create(**payload) if not partition == const.DEFAULT_PARTITION: self.add_vlan_to_domain_by_id(bigip, name, partition, From a7cfe92cb308a8c5e93dd0c2ec45850f711f033f Mon Sep 17 00:00:00 2001 From: Torsten Lesmann Date: Fri, 19 Jan 2018 15:18:19 +0100 Subject: [PATCH 053/109] Fix for F5 upgrade to release 12.1.3. vlan creation fails because a thrown Exception type got changed with that HF. Now catching generic type of Exception to be safe against potential changes --- f5_openstack_agent/lbaasv2/drivers/bigip/network_helper.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/network_helper.py b/f5_openstack_agent/lbaasv2/drivers/bigip/network_helper.py index 5fc0788bb..63aaa7396 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/network_helper.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/network_helper.py @@ -422,7 +422,7 @@ def create_vlan(self, bigip, model): payload.pop('tagMode') i.create(**payload) # ccloud: 12.1.3 throws a different exception in case QinQ isn't allowed - except iControlUnexpectedHTTPError as ie: + except Exception as ie: # Providing the tag-mode is not supported LOG.info(ie.message) payload.pop('tagMode') From 6f0fb17669cf47863a0e04fe617b9a1cf3a96a0a Mon Sep 17 00:00:00 2001 From: Torsten Lesmann Date: Tue, 30 Jan 2018 10:05:49 +0100 Subject: [PATCH 054/109] Fix error in VS IP retrieval in case of SNAT --- .../lbaasv2/drivers/bigip/lbaas_builder.py | 2 - .../lbaasv2/drivers/bigip/network_helper.py | 109 +++++++++++++++++- .../lbaasv2/drivers/bigip/network_service.py | 2 + .../lbaasv2/drivers/bigip/snats.py | 2 + 4 files changed, 110 insertions(+), 5 deletions(-) diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py b/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py index 4f7870b87..c706fda5d 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py @@ -83,8 +83,6 @@ def assure_service(self, service, traffic_group, all_subnet_hints): self._assure_l7policies_deleted(service) - - self._assure_listeners_deleted(service) self._assure_loadbalancer_deleted(service) diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/network_helper.py b/f5_openstack_agent/lbaasv2/drivers/bigip/network_helper.py index 63aaa7396..44a2a5b4e 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/network_helper.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/network_helper.py @@ -15,7 +15,6 @@ import constants_v2 as const from f5.bigip.tm.net.vlan import TagModeDisallowedForTMOSVersion -from icontrol.exceptions import iControlUnexpectedHTTPError import netaddr import os import urllib @@ -24,6 +23,7 @@ from oslo_log import helpers as log_helpers from oslo_log import log as logging from requests.exceptions import HTTPError +#from f5_openstack_agent.lbaasv2.drivers.bigip.utils import strip_domain_address LOG = logging.getLogger(__name__) @@ -615,14 +615,21 @@ def get_virtual_service_insertion( partition=const.DEFAULT_PARTITION): """Returns list of virtual server addresses""" vs = bigip.tm.ltm.virtuals - virtual_servers = vs.get_collection(partition=partition) + filter = "$filter=partition%20eq%20" + partition + # The filtering for partition of origin call below doesn't work. Therefore a new filtering is used + #virtual_servers = vs.get_collection(partition=partition) + virtual_servers = vs.get_collection(requests_params={'params': filter}) virtual_services = [] for virtual_server in virtual_servers: name = virtual_server.name virtual_address = {name: {}} dest = os.path.basename(virtual_server.destination) - (vip_addr, vip_port) = self.split_addr_port(dest) + # Don't take vs with snap pools instead of real ip's + if (virtual_server.sourceAddressTranslation and virtual_server.sourceAddressTranslation['type'] == 'snat'): + continue + else: + (vip_addr, vip_port) = self.split_addr_port(dest) virtual_address[name]['address'] = vip_addr virtual_address[name]['netmask'] = virtual_server.mask @@ -632,6 +639,26 @@ def get_virtual_service_insertion( return virtual_services + @log_helpers.log_method_call + def get_snat_addresses( + self, + bigip, + partition=const.DEFAULT_PARTITION): + """Returns list of snat addresses""" + filter = "$filter=partition%20eq%20" + partition + + snat_addrs = [] + try: + snats = bigip.tm.ltm.snat_translations.get_collection(requests_params={'params': filter}) + for snat in snats: + snat_addrs.append(snat.address) + + except Exception as e: + LOG.error('get_snat_addresses', + 'could not get addresses due to: %s' + % e.message) + return snat_addrs + @log_helpers.log_method_call def get_node_addresses(self, bigip, partition=const.DEFAULT_PARTITION): """Get the addresses of nodes within the partition.""" @@ -643,6 +670,82 @@ def get_node_addresses(self, bigip, partition=const.DEFAULT_PARTITION): return node_addrs + # Dummy method to check functionality from a standalone python script. + # The origin is in network_service as privta emethod descared + # def ips_exist_on_subnet(self, bigip, service, subnet, route_domain): + # # Does the big-ip have any IP addresses on this subnet? + # LOG.debug("_ips_exist_on_subnet entry %s rd %s" + # % (str(subnet['cidr']), route_domain)) + # route_domain = str(route_domain) + # ipsubnet = netaddr.IPNetwork(subnet['cidr']) + # + # # Are there any virtual addresses on this subnet? + # folder = service['loadbalancer']['tenant_id'] + # virtual_services = self.get_virtual_service_insertion( + # bigip, + # partition=folder + # ) + # for virt_serv in virtual_services: + # print virt_serv + # (_, dest) = virt_serv.items()[0] + # LOG.debug(" _ips_exist_on_subnet: checking vip %s" + # % str(dest['address'])) + # if len(dest['address'].split('%')) > 1: + # vip_route_domain = dest['address'].split('%')[1] + # else: + # vip_route_domain = '0' + # if vip_route_domain != route_domain: + # continue + # vip_addr = strip_domain_address(dest['address']) + # if netaddr.IPAddress(vip_addr) in ipsubnet: + # LOG.debug(" _ips_exist_on_subnet: found") + # return True + # + # # If there aren't any virtual addresses, are there + # # snat addresses on this subnet? + # snats = self.get_snat_addresses( + # bigip, + # partition=folder + # ) + # for snat in snats: + # LOG.debug(" _ips_exist_on_subnet: checking snat %s" + # % str(snat)) + # if len(snat.split('%')) > 1: + # snat_route_domain = snat.split('%')[1] + # else: + # snat_route_domain = '0' + # if snat_route_domain != route_domain: + # continue + # snat_addr = strip_domain_address(snat) + # if netaddr.IPAddress(snat_addr) in ipsubnet: + # LOG.debug(" _ips_exist_on_subnet: found") + # return True + # + # # If there aren't any virtual addresses and snats, are there + # # node addresses on this subnet? + # nodes = self.get_node_addresses( + # bigip, + # partition=folder + # ) + # for node in nodes: + # LOG.debug(" _ips_exist_on_subnet: checking node %s" + # % str(node)) + # if len(node.split('%')) > 1: + # node_route_domain = node.split('%')[1] + # else: + # node_route_domain = '0' + # if node_route_domain != route_domain: + # continue + # node_addr = strip_domain_address(node) + # if netaddr.IPAddress(node_addr) in ipsubnet: + # LOG.debug(" _ips_exist_on_subnet: found") + # return True + # + # LOG.debug(" _ips_exist_on_subnet exit %s" + # % str(subnet['cidr'])) + # # nothing found + # return False + @log_helpers.log_method_call def add_fdb_entry( self, diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/network_service.py b/f5_openstack_agent/lbaasv2/drivers/bigip/network_service.py index 98f937c55..303d11fc6 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/network_service.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/network_service.py @@ -13,6 +13,8 @@ # limitations under the License. # +#import pdb + import itertools import netaddr import re diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/snats.py b/f5_openstack_agent/lbaasv2/drivers/bigip/snats.py index a33d642a9..b5c5d129d 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/snats.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/snats.py @@ -26,6 +26,8 @@ ResourceType from oslo_log import log as logging +#import pdb + LOG = logging.getLogger(__name__) From 41e82afa3ff6a05ed7eaf8bdae2ae70bb7771727 Mon Sep 17 00:00:00 2001 From: Andrew Karpow Date: Fri, 9 Mar 2018 17:32:53 +0100 Subject: [PATCH 055/109] do periodic orphan cleaning every 6 hours --- .../lbaasv2/drivers/bigip/agent_manager.py | 38 +++++++++++++++++++ .../lbaasv2/drivers/bigip/icontrol_driver.py | 15 +++++++- .../lbaasv2/drivers/bigip/snats.py | 3 ++ 3 files changed, 55 insertions(+), 1 deletion(-) diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py b/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py index 40511d106..beb4dabda 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py @@ -220,6 +220,7 @@ def __init__(self, conf): # Create the cache of provisioned services self.cache = LogicalServiceCache() self.last_resync = datetime.datetime.now() + self.last_clean_orphans = datetime.datetime.now() - datetime.timedelta(hours=5,minutes=50) self.needs_resync = False self.plugin_rpc = None self.pending_services = {} @@ -439,6 +440,43 @@ def periodic_resync(self, context): if self.sync_state(): self.needs_resync = True + # ccloud: check 6 hour timeout to clean orphaned snat pools + @periodic_task.periodic_task + def periodic_clean_orphans(self, context): + now = datetime.datetime.now() + LOG.debug("%s: periodic_clean_orphans called." % now) + + # call every 6 hours + if (now - self.last_clean_orphans).seconds > 60 * 60 * 6: + self.last_clean_orphans = now + self.clean_orphaned_snat_objects() + + # ccloud: clean orphaned snat pools + @log_helpers.log_method_call + def clean_orphaned_snat_objects(self): + cleaned = False + LOG.debug("sapcc: cleaning orphaned snat objects") + + try: + snat_pools = self.lbdriver.get_all_snat_pools() + if snat_pools: + self.purge_orphaned_snat_pools(snat_pools) + + except Exception as e: + LOG.error("sapcc: Unable to purge snat_pools: %s" % e.message) + cleaned = True + + return cleaned + + # ccloud: try purging all snat pools + @log_helpers.log_method_call + def purge_orphaned_snat_pools(self, snat_pools): + for pool in snat_pools: + try: + pool.delete() + except Exception as e: + LOG.debug("Failed purging snat pool: %s" % e.message) + @periodic_task.periodic_task(spacing=30) def update_operating_status(self, context): if not self.plugin_rpc: diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py b/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py index eb4a9cce1..37b3ea786 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py @@ -898,6 +898,19 @@ def delete_health_monitor(self, health_monitor, service): LOG.debug("Deleting health monitor") return self._common_service_handler(service) + # sapcc: get all snat pools + @serialized('get_all_snat_pools') + @is_connected + def get_all_snat_pools(self): + LOG.debug('getting all snat pools on BIG-IPs') + + snat_pools = [] + if self.network_builder: + for bigip in self.get_all_bigips(): + snat_pools += self.network_builder.bigip_snat_manager.get_snats(bigip) + + return snat_pools + @is_connected def get_stats(self, service): lb_stats = {} @@ -945,7 +958,7 @@ def remove_orphans(self, all_loadbalancers): bigip.system.purge_orphaned_folders_contents(existing_tenants) for bigip in self.get_all_bigips(): - bigip.system.purge_orphaned_folders(existing_tenants) + bigip.system.purge_orphaned_folders(existing_tenants)\ def fdb_add(self, fdb): # Add (L2toL3) forwarding database entries diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/snats.py b/f5_openstack_agent/lbaasv2/drivers/bigip/snats.py index b5c5d129d..77b58a47f 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/snats.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/snats.py @@ -70,6 +70,9 @@ def _get_snat_traffic_group(self, tenant_id): LOG.error('Invalid f5_ha_type:%s' % self.driver.conf.f5_ha_type) return '' + def get_snats(self, bigip): + return self.snatpool_manager.get_resources(bigip) + def get_snat_addrs(self, subnetinfo, tenant_id, snat_count): # Get the ip addresses for snat """ subnet = subnetinfo['subnet'] From df64d3c051a944d1babc6d87c535a412215577e6 Mon Sep 17 00:00:00 2001 From: Torsten Lesmann Date: Fri, 23 Mar 2018 16:14:09 +0100 Subject: [PATCH 056/109] Revert "do periodic orphan cleaning every 6 hours" This reverts commit 41e82af --- .../lbaasv2/drivers/bigip/agent_manager.py | 38 ------------------- .../lbaasv2/drivers/bigip/icontrol_driver.py | 15 +------- .../lbaasv2/drivers/bigip/snats.py | 3 -- 3 files changed, 1 insertion(+), 55 deletions(-) diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py b/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py index beb4dabda..40511d106 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py @@ -220,7 +220,6 @@ def __init__(self, conf): # Create the cache of provisioned services self.cache = LogicalServiceCache() self.last_resync = datetime.datetime.now() - self.last_clean_orphans = datetime.datetime.now() - datetime.timedelta(hours=5,minutes=50) self.needs_resync = False self.plugin_rpc = None self.pending_services = {} @@ -440,43 +439,6 @@ def periodic_resync(self, context): if self.sync_state(): self.needs_resync = True - # ccloud: check 6 hour timeout to clean orphaned snat pools - @periodic_task.periodic_task - def periodic_clean_orphans(self, context): - now = datetime.datetime.now() - LOG.debug("%s: periodic_clean_orphans called." % now) - - # call every 6 hours - if (now - self.last_clean_orphans).seconds > 60 * 60 * 6: - self.last_clean_orphans = now - self.clean_orphaned_snat_objects() - - # ccloud: clean orphaned snat pools - @log_helpers.log_method_call - def clean_orphaned_snat_objects(self): - cleaned = False - LOG.debug("sapcc: cleaning orphaned snat objects") - - try: - snat_pools = self.lbdriver.get_all_snat_pools() - if snat_pools: - self.purge_orphaned_snat_pools(snat_pools) - - except Exception as e: - LOG.error("sapcc: Unable to purge snat_pools: %s" % e.message) - cleaned = True - - return cleaned - - # ccloud: try purging all snat pools - @log_helpers.log_method_call - def purge_orphaned_snat_pools(self, snat_pools): - for pool in snat_pools: - try: - pool.delete() - except Exception as e: - LOG.debug("Failed purging snat pool: %s" % e.message) - @periodic_task.periodic_task(spacing=30) def update_operating_status(self, context): if not self.plugin_rpc: diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py b/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py index 37b3ea786..eb4a9cce1 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py @@ -898,19 +898,6 @@ def delete_health_monitor(self, health_monitor, service): LOG.debug("Deleting health monitor") return self._common_service_handler(service) - # sapcc: get all snat pools - @serialized('get_all_snat_pools') - @is_connected - def get_all_snat_pools(self): - LOG.debug('getting all snat pools on BIG-IPs') - - snat_pools = [] - if self.network_builder: - for bigip in self.get_all_bigips(): - snat_pools += self.network_builder.bigip_snat_manager.get_snats(bigip) - - return snat_pools - @is_connected def get_stats(self, service): lb_stats = {} @@ -958,7 +945,7 @@ def remove_orphans(self, all_loadbalancers): bigip.system.purge_orphaned_folders_contents(existing_tenants) for bigip in self.get_all_bigips(): - bigip.system.purge_orphaned_folders(existing_tenants)\ + bigip.system.purge_orphaned_folders(existing_tenants) def fdb_add(self, fdb): # Add (L2toL3) forwarding database entries diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/snats.py b/f5_openstack_agent/lbaasv2/drivers/bigip/snats.py index 77b58a47f..b5c5d129d 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/snats.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/snats.py @@ -70,9 +70,6 @@ def _get_snat_traffic_group(self, tenant_id): LOG.error('Invalid f5_ha_type:%s' % self.driver.conf.f5_ha_type) return '' - def get_snats(self, bigip): - return self.snatpool_manager.get_resources(bigip) - def get_snat_addrs(self, subnetinfo, tenant_id, snat_count): # Get the ip addresses for snat """ subnet = subnetinfo['subnet'] From 86494bb678a95fcf15c52101007ce1783f026412 Mon Sep 17 00:00:00 2001 From: Andrew Karpow Date: Fri, 23 Mar 2018 17:12:38 +0100 Subject: [PATCH 057/109] This reverts commit df64d3c and fixes SNAT orphan cleaning --- .../lbaasv2/drivers/bigip/agent_manager.py | 33 +++++++++++++++++++ .../lbaasv2/drivers/bigip/icontrol_driver.py | 31 ++++++++++++++++- .../lbaasv2/drivers/bigip/snats.py | 3 ++ 3 files changed, 66 insertions(+), 1 deletion(-) diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py b/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py index 40511d106..32f3f6302 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py @@ -220,6 +220,7 @@ def __init__(self, conf): # Create the cache of provisioned services self.cache = LogicalServiceCache() self.last_resync = datetime.datetime.now() + self.last_clean_orphans = datetime.datetime.now() - datetime.timedelta(hours=5,minutes=50) self.needs_resync = False self.plugin_rpc = None self.pending_services = {} @@ -439,6 +440,38 @@ def periodic_resync(self, context): if self.sync_state(): self.needs_resync = True + # ccloud: check 6 hour timeout to clean orphaned snat pools + @periodic_task.periodic_task + def periodic_clean_orphans(self, context): + now = datetime.datetime.now() + + # call every 6 hours + if (now - self.last_clean_orphans).seconds > 60 * 60 * 6: + self.last_clean_orphans = now + self.clean_orphaned_snat_objects() + + # ccloud: clean orphaned snat pools + @log_helpers.log_method_call + def clean_orphaned_snat_objects(self): + virtual_addresses = self.lbdriver.get_all_virtual_addresses() + snat_pools = self.lbdriver.get_all_snat_pools() + + for va in virtual_addresses: + snat_obj = self.find_in_collection(va.name.replace('Project_', 'lb_'), snat_pools) + if snat_obj is not None: + snat_pools.remove(snat_obj) + + for orphaned_snat in snat_pools: + orphaned_snat.delete() + LOG.debug("sapcc: purging orphaned snat pool %s" % orphaned_snat.name) + + # ccloud: try purging all snat pools + def find_in_collection(self, name, collection): + for item in collection: + if item is not None and item.name == name: + return item + return None + @periodic_task.periodic_task(spacing=30) def update_operating_status(self, context): if not self.plugin_rpc: diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py b/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py index eb4a9cce1..938f56141 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py @@ -375,6 +375,8 @@ def __init__(self, conf, registerOpts=True): resource_helper.ResourceType.virtual) self.pool_manager = resource_helper.BigIPResourceHelper( resource_helper.ResourceType.pool) + self.va_manager = resource_helper.BigIPResourceHelper( + resource_helper.ResourceType.virtual_address) if self.conf.trace_service_requests: path = '/var/log/neutron/service/' @@ -898,6 +900,33 @@ def delete_health_monitor(self, health_monitor, service): LOG.debug("Deleting health monitor") return self._common_service_handler(service) + # sapcc: get all snat pools + @serialized('get_all_snat_pools') + @is_connected + def get_all_snat_pools(self, partition=None): + LOG.debug('getting all snat pools on BIG-IPs') + + snat_pools = [] + if self.network_builder: + for bigip in self.get_all_bigips(): + snat_pools += self.network_builder.bigip_snat_manager.get_snats(bigip, partition) + + return snat_pools + + # sapcc: get all virtual_addresses + @serialized('get_all_virtual_addresses') + @is_connected + def get_all_virtual_addresses(self): + LOG.debug('getting all virtual addresses on BIG-IPs') + + virtual_address_s = [] + if self.network_builder: + for bigip in self.get_all_bigips(): + virtual_address_s += self.va_manager.get_resources(bigip) + + return virtual_address_s + + @is_connected def get_stats(self, service): lb_stats = {} @@ -945,7 +974,7 @@ def remove_orphans(self, all_loadbalancers): bigip.system.purge_orphaned_folders_contents(existing_tenants) for bigip in self.get_all_bigips(): - bigip.system.purge_orphaned_folders(existing_tenants) + bigip.system.purge_orphaned_folders(existing_tenants)\ def fdb_add(self, fdb): # Add (L2toL3) forwarding database entries diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/snats.py b/f5_openstack_agent/lbaasv2/drivers/bigip/snats.py index b5c5d129d..c9117c3b4 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/snats.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/snats.py @@ -70,6 +70,9 @@ def _get_snat_traffic_group(self, tenant_id): LOG.error('Invalid f5_ha_type:%s' % self.driver.conf.f5_ha_type) return '' + def get_snats(self, bigip, partition=None): + return self.snatpool_manager.get_resources(bigip, partition) + def get_snat_addrs(self, subnetinfo, tenant_id, snat_count): # Get the ip addresses for snat """ subnet = subnetinfo['subnet'] From 92509376fc34a99bb681d2045e8813962c6714c5 Mon Sep 17 00:00:00 2001 From: Andrew Karpow Date: Tue, 27 Mar 2018 14:26:52 +0200 Subject: [PATCH 058/109] fixed logging error --- f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py b/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py index 32f3f6302..36dba741e 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py @@ -462,8 +462,8 @@ def clean_orphaned_snat_objects(self): snat_pools.remove(snat_obj) for orphaned_snat in snat_pools: - orphaned_snat.delete() LOG.debug("sapcc: purging orphaned snat pool %s" % orphaned_snat.name) + orphaned_snat.delete() # ccloud: try purging all snat pools def find_in_collection(self, name, collection): From 051011624d85dbdab712ca93d3a316f9dde9fa7c Mon Sep 17 00:00:00 2001 From: Andrew Karpow Date: Wed, 28 Mar 2018 16:42:08 +0200 Subject: [PATCH 059/109] added holzhammer command to f5 cli: cleans partition and resyncs it --- .../drivers/bigip/cli/actions/base_action.py | 8 +-- .../drivers/bigip/cli/actions/holzhammer.py | 41 ++++++++++++++ .../lbaasv2/drivers/bigip/cli/f5_cli_utils.py | 54 ++++++++++++------- 3 files changed, 82 insertions(+), 21 deletions(-) create mode 100644 f5_openstack_agent/lbaasv2/drivers/bigip/cli/actions/holzhammer.py diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/cli/actions/base_action.py b/f5_openstack_agent/lbaasv2/drivers/bigip/cli/actions/base_action.py index 58ef8618d..e9fd4459a 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/cli/actions/base_action.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/cli/actions/base_action.py @@ -35,10 +35,12 @@ class BaseAction(object): def __init__(self,namespace): + # append appends config paths to defaults... not what we intend + if len(namespace.config) > 2: + self.config_files = namespace.config[2:] + else: + self.config_files = namespace.config - self.lb_id = namespace.lb_id - self.project_id = namespace.project_id - self.config_files = namespace.config self.conf = cfg.CONF config_files = [] diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/cli/actions/holzhammer.py b/f5_openstack_agent/lbaasv2/drivers/bigip/cli/actions/holzhammer.py new file mode 100644 index 000000000..c48c6a5dc --- /dev/null +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/cli/actions/holzhammer.py @@ -0,0 +1,41 @@ +import base_action + +from sync_all import SyncAll +from f5_openstack_agent.lbaasv2.drivers.bigip import system_helper +from oslo_log import log as logging + +LOG = logging.getLogger(__name__) + +class Holzhammer(base_action.BaseAction): + + def __init__(self, namespace): + self.sure = namespace.sure + self.project_id = namespace.project_id + self.sh = system_helper.SystemHelper() + super(Holzhammer, self).__init__(namespace) + + def execute(self): + if self.sure is None: + print("Please be sure by appending --i-am-sure-what-i-am-doing") + exit(1) + + if self.project_id is None: + print("Please specify an Project id with --project-id") + exit(1) + + for bigip in self.driver.get_all_bigips(): + try: + print("Cleaning Partition %s" % "Project_" + self.project_id) + self.sh.purge_folder_contents(bigip, "Project_" + self.project_id) + except Exception as err: + print(err.message) + + # Crude hack, but it works :D + + # I wanted to reuse the code of SyncAll but don't want to initalize a new SyncAll class instance, + # which would connect to F5 and reinitalize objects etc... + # Instead, I just cast this Holzhammer instance to the SyncAll Class and re-execute meself :) + Syncer = self + self.__class__ = SyncAll + Syncer.execute() + diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/cli/f5_cli_utils.py b/f5_openstack_agent/lbaasv2/drivers/bigip/cli/f5_cli_utils.py index 48cbcc0ba..37777a751 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/cli/f5_cli_utils.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/cli/f5_cli_utils.py @@ -1,4 +1,6 @@ import argparse + +import sys import urllib3 import requests from requests.packages.urllib3.exceptions import InsecureRequestWarning @@ -10,40 +12,56 @@ from oslo_utils import importutils - ACTION_MODULE = 'f5_openstack_agent.lbaasv2.drivers.bigip.cli.actions.' -class Execute(argparse.Action): - def __init__(self, option_strings, dest, nargs=None, **kwargs): - super(Execute, self).__init__(option_strings, dest, **kwargs) - self.actions = {"sync":"sync.Sync","sync-all":"sync_all.SyncAll","delete":"delete.Delete"} +class Execute(argparse._SubParsersAction): + def __init__(self, option_strings, **kwargs): + super(Execute, self).__init__(option_strings, **kwargs) + self.actions = {"sync":"sync.Sync","sync-all":"sync_all.SyncAll","delete":"delete.Delete","holzhammer":"holzhammer.Holzhammer"} def __call__(self, parser, namespace, values, option_string=None): - action = self.actions.get(values) + super(Execute, self).__call__(parser, namespace, values, option_string) + action = self.actions.get(values[0]) if action: instance = importutils.import_object(ACTION_MODULE+action,namespace) - - instance.execute() - def main(): - parser = argparse.ArgumentParser(prog='f5_utils', description='Operations utilities for F5 LBAAS driver.') - parser.add_argument('command', - help='command to execute',action=Execute,choices=["sync", "sync-all", "delete"]) - - parser.add_argument('--lb-id',dest='lb_id', - help='router id',action='store') - - parser.add_argument('--project-id',dest='project_id', - help='project id',action='store') + parser = argparse.ArgumentParser(prog='f5_utils', description='Operations utilities for F5 LBAAS driver.') parser.add_argument('--config-file', dest='config', action='append', default=["/etc/neutron/f5-oslbaasv2-agent.ini", "/etc/neutron/neutron.conf"], help='Configuration files') + parser.add_argument('--log',dest='log', action='store_true', help='Enable openstack log output') + subparsers = parser.add_subparsers(title='command', description='valid subcommands', + help='command to execute', action=Execute, dest='subcommand') + + parser_sync = subparsers.add_parser('sync', help='sync a specific load balancer') + parser_sync.add_argument('--lb-id',dest='lb_id', + help='router id',action='store') + + parser_sync_all = subparsers.add_parser('sync-all', help='sync all load balancer') + parser_sync_all.add_argument('--project-id',dest='project_id', + help='project id',action='store') + + + parser_delete = subparsers.add_parser('delete', help='delete a specific load balancer') + parser_delete.add_argument('--lb-id',dest='lb_id', + help='router id',action='store') + + parser_holzhammer = subparsers.add_parser('holzhammer', help='purge and resync project') + parser_holzhammer.add_argument('--i-am-sure-what-i-am-doing', action='store_true', dest='sure', + help='declaration of liability') + parser_holzhammer.add_argument('--project-id',dest='project_id', + help='project id',action='store') + parser.parse_args() + + +if __name__ == "__main__": + sys.exit(main()) From 41e862a1091ec4cc309fe4563ed1bd7f91a5804c Mon Sep 17 00:00:00 2001 From: Andrew Karpow Date: Fri, 6 Apr 2018 19:09:32 +0200 Subject: [PATCH 060/109] fixed purge_folder and fixed tag-mode exceptions --- f5_openstack_agent/lbaasv2/drivers/bigip/network_helper.py | 1 - f5_openstack_agent/lbaasv2/drivers/bigip/system_helper.py | 2 ++ 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/network_helper.py b/f5_openstack_agent/lbaasv2/drivers/bigip/network_helper.py index 44a2a5b4e..673b7657b 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/network_helper.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/network_helper.py @@ -409,7 +409,6 @@ def create_vlan(self, bigip, model): payload = {'name': interface} if tag: payload['tagged'] = True - payload['tagMode'] = "service" else: payload['untagged'] = True diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/system_helper.py b/f5_openstack_agent/lbaasv2/drivers/bigip/system_helper.py index 2bbaa1b00..fa137f44d 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/system_helper.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/system_helper.py @@ -140,6 +140,7 @@ def purge_folder_contents(self, bigip, folder): ltm_types = [ ResourceType.virtual, ResourceType.virtual_address, + ResourceType.l7policy, ResourceType.pool, ResourceType.http_monitor, ResourceType.https_monitor, @@ -149,6 +150,7 @@ def purge_folder_contents(self, bigip, folder): ResourceType.snat, ResourceType.snatpool, ResourceType.snat_translation, + ResourceType.universal_persistence, ResourceType.rule ] for ltm_type in ltm_types: From 100088e3221570e3d4f1c247484be2060274dd54 Mon Sep 17 00:00:00 2001 From: Andrew Karpow Date: Fri, 6 Apr 2018 19:09:58 +0200 Subject: [PATCH 061/109] fixed f5 util, added druckhammer command that completley purges F5 --- .../drivers/bigip/cli/actions/delete.py | 1 + .../drivers/bigip/cli/actions/druckhammer.py | 111 ++++++++++++++++++ .../drivers/bigip/cli/actions/holzhammer.py | 23 ++-- .../lbaasv2/drivers/bigip/cli/actions/sync.py | 1 + .../drivers/bigip/cli/actions/sync_all.py | 1 + .../lbaasv2/drivers/bigip/cli/f5_cli_utils.py | 17 ++- 6 files changed, 139 insertions(+), 15 deletions(-) create mode 100644 f5_openstack_agent/lbaasv2/drivers/bigip/cli/actions/druckhammer.py diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/cli/actions/delete.py b/f5_openstack_agent/lbaasv2/drivers/bigip/cli/actions/delete.py index 7b8cffccd..8b7367488 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/cli/actions/delete.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/cli/actions/delete.py @@ -11,6 +11,7 @@ class Delete(base_action.BaseAction): def __init__(self, namespace): + self.lb_id = namespace.lb_id super(Delete, self).__init__(namespace) def execute(self): diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/cli/actions/druckhammer.py b/f5_openstack_agent/lbaasv2/drivers/bigip/cli/actions/druckhammer.py new file mode 100644 index 000000000..01c3b888b --- /dev/null +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/cli/actions/druckhammer.py @@ -0,0 +1,111 @@ +import base_action + +from sync_all import SyncAll +from f5_openstack_agent.lbaasv2.drivers.bigip import system_helper, resource_helper +from oslo_log import log as logging + +LOG = logging.getLogger(__name__) + +class Druckhammer(base_action.BaseAction): + + def __init__(self, namespace): + self.exempt_folders = ['/', 'Common', 'Drafts'] + self.sure = namespace.sure + self.sync = namespace.sync + self.project_id = None + self.sh = system_helper.SystemHelper() + self.rd_manager = resource_helper.BigIPResourceHelper( + resource_helper.ResourceType.route_domain) + self.si_manager = resource_helper.BigIPResourceHelper( + resource_helper.ResourceType.selfip) + self.vlan_manger = resource_helper.BigIPResourceHelper( + resource_helper.ResourceType.vlan) + super(Druckhammer, self).__init__(namespace) + + def execute(self): + if not self.sure: + print("Please be sure by appending --i-am-sure-what-i-am-doing") + exit(1) + + for bigip in self.driver.get_all_bigips(): + for folder in self.sh.get_folders(bigip): + if folder in self.exempt_folders: + continue + + try: + print("Purging Folder \"%s\"" % folder) + self.sh.purge_folder_contents(bigip, folder) + self.sh.purge_folder(bigip, folder) + except Exception as err: + print("Failed purging folder %s: %s" % (folder, err.message)) + + for route in self.get_routes(bigip): + try: + print("Purging route \"%s\"" % route.name) + route.delete() + except Exception as err: + print("Failed purging route %s: %s" % (route.name, err.message)) + + + for selfip in self.get_selfips(bigip): + try: + print("Purging selfip \"%s\"" % selfip.name) + selfip.delete() + except Exception as err: + print("Failed purging selfip %s: %s" % (selfip.name, err.message)) + + for route_domain in self.get_routedomains(bigip): + try: + print("Purging route_domain \"%s\"" % route_domain.name) + route_domain.delete() + except Exception as err: + print("Failed purging route_domain %s: %s" % (route_domain.name, err.message)) + + + for vlan in self.get_vlans(bigip): + try: + print("Purging vlan \"%s\"" % vlan.name) + vlan.delete() + except Exception as err: + print("Failed purging vlan %s: %s" % (vlan.name, err.message)) + + if self.sync: + # Crude hack, but it works :D + # I wanted to reuse the code of SyncAll but don't want to initalize a new SyncAll class instance, + # which would connect to F5 and reinitalize objects etc... + # Instead, I just cast this druckhammer instance to the SyncAll Class and re-execute meself :) + Syncer = self + self.__class__ = SyncAll + Syncer.execute() + + def get_selfips(self, bigip): + selfips = [] + for selfip in self.si_manager.get_resources(bigip, "Common"): + if selfip.name.startswith("local-" + bigip.device_name): + selfips.append(selfip) + + return selfips + + def get_routedomains(self, bigip): + routedomains = [] + for routedomain in self.rd_manager.get_resources(bigip, "Common"): + if routedomain.name.startswith("rd-"): + routedomains.append(routedomain) + + return routedomains + + def get_routes(self, bigip): + routes = [] + for route in bigip.tm.net.routes.get_collection(): + if route.name.startswith("rt-"): + routes.append(route) + + return routes + + def get_vlans(self, bigip): + vlans = [] + for vlan in self.vlan_manger.get_resources(bigip, "Common"): + if vlan.name.startswith("vlan-"): + vlans.append(vlan) + + return vlans diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/cli/actions/holzhammer.py b/f5_openstack_agent/lbaasv2/drivers/bigip/cli/actions/holzhammer.py index c48c6a5dc..bfaf2a9e6 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/cli/actions/holzhammer.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/cli/actions/holzhammer.py @@ -9,16 +9,12 @@ class Holzhammer(base_action.BaseAction): def __init__(self, namespace): - self.sure = namespace.sure + self.sync = namespace.sync self.project_id = namespace.project_id self.sh = system_helper.SystemHelper() super(Holzhammer, self).__init__(namespace) def execute(self): - if self.sure is None: - print("Please be sure by appending --i-am-sure-what-i-am-doing") - exit(1) - if self.project_id is None: print("Please specify an Project id with --project-id") exit(1) @@ -30,12 +26,11 @@ def execute(self): except Exception as err: print(err.message) - # Crude hack, but it works :D - - # I wanted to reuse the code of SyncAll but don't want to initalize a new SyncAll class instance, - # which would connect to F5 and reinitalize objects etc... - # Instead, I just cast this Holzhammer instance to the SyncAll Class and re-execute meself :) - Syncer = self - self.__class__ = SyncAll - Syncer.execute() - + if self.sync: + # Crude hack, but it works :D + # I wanted to reuse the code of SyncAll but don't want to initalize a new SyncAll class instance, + # which would connect to F5 and reinitalize objects etc... + # Instead, I just cast this Holzhammer instance to the SyncAll Class and re-execute meself :) + Syncer = self + self.__class__ = SyncAll + Syncer.execute() diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/cli/actions/sync.py b/f5_openstack_agent/lbaasv2/drivers/bigip/cli/actions/sync.py index 2183723ae..6995cab4c 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/cli/actions/sync.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/cli/actions/sync.py @@ -11,6 +11,7 @@ class Sync(base_action.BaseAction): def __init__(self, namespace): + self.lb_id = namespace.lb_id super(Sync, self).__init__(namespace) def execute(self): diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/cli/actions/sync_all.py b/f5_openstack_agent/lbaasv2/drivers/bigip/cli/actions/sync_all.py index 10354804f..9f795cab0 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/cli/actions/sync_all.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/cli/actions/sync_all.py @@ -11,6 +11,7 @@ class SyncAll(base_action.BaseAction): def __init__(self, namespace): + self.project_id = namespace.project_id super(SyncAll, self).__init__(namespace) def execute(self): diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/cli/f5_cli_utils.py b/f5_openstack_agent/lbaasv2/drivers/bigip/cli/f5_cli_utils.py index 37777a751..abd9f68b5 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/cli/f5_cli_utils.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/cli/f5_cli_utils.py @@ -17,7 +17,13 @@ class Execute(argparse._SubParsersAction): def __init__(self, option_strings, **kwargs): super(Execute, self).__init__(option_strings, **kwargs) - self.actions = {"sync":"sync.Sync","sync-all":"sync_all.SyncAll","delete":"delete.Delete","holzhammer":"holzhammer.Holzhammer"} + self.actions = { + "sync":"sync.Sync", + "sync-all":"sync_all.SyncAll", + "delete":"delete.Delete", + "holzhammer":"holzhammer.Holzhammer", + "druckhammer":"druckhammer.Druckhammer" + } def __call__(self, parser, namespace, values, option_string=None): super(Execute, self).__call__(parser, namespace, values, option_string) @@ -57,9 +63,18 @@ def main(): parser_holzhammer = subparsers.add_parser('holzhammer', help='purge and resync project') parser_holzhammer.add_argument('--i-am-sure-what-i-am-doing', action='store_true', dest='sure', help='declaration of liability') + parser_holzhammer.add_argument('--no-sync', action='store_false', dest='sync', + help='disable resync of project') parser_holzhammer.add_argument('--project-id',dest='project_id', help='project id',action='store') + parser_druckhammer = subparsers.add_parser('druckhammer', help='purge all bigip ' + 'partitions/vlans/selfip/routes/routedomains') + parser_druckhammer.add_argument('--i-am-sure-what-i-am-doing', action='store_true', dest='sure', + help='declaration of liability') + parser_druckhammer.add_argument('--sync', action='store_true', dest='sync', + help='resync all LB from agent/neutron') + parser.parse_args() From 7b7d504c3ef21352a47cd4fa8f1d27665f777b48 Mon Sep 17 00:00:00 2001 From: Torsten Lesmann Date: Tue, 3 Jul 2018 10:07:03 +0200 Subject: [PATCH 062/109] Dev/mitaka m3 (#1) * Avoid different route IDs on HA pair members, Better Orphan handling * Avoid different route IDs on HA pair members, Better Orphan handling #2 * Fix Folder name retrieval * Minor changes to periodic_sync behaviour. Do NOT allow pool members without route domain in case global routing is switched off * Fix first start for orphan cleaner. Do it based on env_group_number or randomly so that not all agents use same start * Add some more logging if an attempt for a member crud action without route domain is made * Log messages and minor fixes for orphan handling * Orphan cache logging * Allow removal of orphan nodes even they are linked as members in pools (delete member too). Clean orphans every 30 minutes for testing * remove orphan objects after 2 attempts * Fix for F5 agent. Report and delete orphan cache * New orphan cleanup interval * New orphan cache interval * Fix error message * Fix : maximum recursion depth exceeded in case Loadbalancer orphan deletion --- .gitignore | 1 + .../lbaasv2/drivers/bigip/agent_manager.py | 414 +++++++++++--- .../drivers/bigip/cli/actions/sync_all.py | 4 +- .../lbaasv2/drivers/bigip/cluster_manager.py | 9 +- .../lbaasv2/drivers/bigip/icontrol_driver.py | 518 +++++++++++++++++- .../lbaasv2/drivers/bigip/lbaas_driver.py | 111 ++-- .../lbaasv2/drivers/bigip/network_helper.py | 15 +- .../lbaasv2/drivers/bigip/network_service.py | 30 +- .../lbaasv2/drivers/bigip/plugin_rpc.py | 92 +++- .../lbaasv2/drivers/bigip/pool_service.py | 6 + .../lbaasv2/drivers/bigip/resource_helper.py | 14 +- .../lbaasv2/drivers/bigip/system_helper.py | 6 +- .../lbaasv2/drivers/bigip/tenants.py | 65 ++- 13 files changed, 1095 insertions(+), 190 deletions(-) diff --git a/.gitignore b/.gitignore index 76373e102..29a7a38ad 100644 --- a/.gitignore +++ b/.gitignore @@ -115,3 +115,4 @@ logs # vim *~ *.swp +*.iml diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py b/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py index 36dba741e..96927d857 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py @@ -17,6 +17,7 @@ import datetime import uuid +from random import randint from oslo_config import cfg from oslo_log import helpers as log_helpers @@ -37,6 +38,7 @@ from f5_openstack_agent.lbaasv2.drivers.bigip import constants_v2 from f5_openstack_agent.lbaasv2.drivers.bigip import plugin_rpc from f5_openstack_agent.lbaasv2.drivers.bigip import utils +from f5_openstack_agent.lbaasv2.drivers.bigip import exceptions as f5_ex LOG = logging.getLogger(__name__) @@ -220,13 +222,26 @@ def __init__(self, conf): # Create the cache of provisioned services self.cache = LogicalServiceCache() self.last_resync = datetime.datetime.now() - self.last_clean_orphans = datetime.datetime.now() - datetime.timedelta(hours=5,minutes=50) + # try to avoid orphan cleaning right after start and schedule differently on agents + if self.conf.environment_group_number: + start = int(self.conf.environment_group_number) + else: + start = randint(1, 3) + # run orphan cleanup every 3 hours + self.orphans_clean_interval = 60 + # schedule first run with 1 hour difference on every agent. Start first run after 5 minutes, 1h and 5 mins, ... + t = [60, 40, 20] + if start < 1: + start = 1 + self.last_clean_orphans = datetime.datetime.now() - datetime.timedelta(minutes=t[start-1] + 5) + self.needs_resync = False self.plugin_rpc = None self.pending_services = {} self.service_resync_interval = conf.service_resync_interval - LOG.debug('setting service resync intervl to %d seconds' % + + LOG.debug('Setting service resync interval to %d seconds' % self.service_resync_interval) # Set the agent ID @@ -415,40 +430,58 @@ def initialize_service_hook(self, started_by): @periodic_task.periodic_task(spacing=10) def periodic_resync(self, context): - """Resync tunnels/service state.""" - now = datetime.datetime.now() - LOG.debug("%s: periodic_resync called." % now) - - # Only force resync if the agent thinks it is - # synchronized and the resync timer has exired - if (now - self.last_resync).seconds > self.service_resync_interval: - if not self.needs_resync: - self.needs_resync = True - LOG.debug( - 'Forcing resync of services on resync timer (%d seconds).' - % self.service_resync_interval) - self.cache.services = {} - self.last_resync = now - self.lbdriver.flush_cache() - LOG.debug("periodic_sync: service_resync_interval expired: %s" - % str(self.needs_resync)) - # resync if we need to - if self.needs_resync: - self.needs_resync = False - if self.tunnel_sync(): - self.needs_resync = True - if self.sync_state(): - self.needs_resync = True + try: + """Resync tunnels/service state.""" + now = datetime.datetime.now() + LOG.debug("%s: periodic_resync called." % now) + + # Only force resync if the agent thinks it is + # synchronized and the resync timer has exired + if (now - self.last_resync).seconds > self.service_resync_interval: + LOG.info("ccloud - periodic_resync: Running sync tasks") + if not self.needs_resync: + self.needs_resync = True + LOG.debug( + 'Forcing resync of services on resync timer (%d seconds).' + % self.service_resync_interval) + self.cache.services = {} + self.last_resync = now + self.lbdriver.flush_cache() + LOG.debug("periodic_sync: service_resync_interval expired: %s" + % str(self.needs_resync)) + else: + LOG.info("ccloud - periodic_resync: Skipped because resync interval not expired. Waiting another {0} seconds".format((self.service_resync_interval - (now - self.last_resync ).seconds))) + # resync if we need to + if self.needs_resync: + LOG.info('periodic_resync: Forcing resync of services.') + self.needs_resync = False + if self.tunnel_sync(): + self.needs_resync = True + if self.sync_state(): + self.needs_resync = True + # clean any objects orphaned on devices and persist config + if (self.last_clean_orphans + datetime.timedelta(minutes=self.orphans_clean_interval)) < now: + LOG.info("ccloud - periodic_resync: Start cleaning orphan objects from F5 device") + self.last_clean_orphans = self.last_clean_orphans + datetime.timedelta(minutes=self.orphans_clean_interval) + if self.clean_orphaned_and_save_device_config(): + self.needs_resync = True + LOG.info("ccloud - periodic_resync: Finished cleaning orphan objects from F5 device. Remaining objects --> {0}".format(self.lbdriver.get_orphans_cache())) + else: + LOG.info("ccloud - periodic_resync: Skipping cleaning orphan objects because cleanup interval not expired. Waiting another {0} seconds" + .format((self.last_clean_orphans + datetime.timedelta(minutes=self.orphans_clean_interval) - now).seconds)) + LOG.info("ccloud - periodic_resync: Resync took {0} seconds".format((datetime.datetime.now() - now).seconds)) + else: + LOG.info("ccloud - periodic_resync: Resync not needed! Discarding ...") - # ccloud: check 6 hour timeout to clean orphaned snat pools - @periodic_task.periodic_task - def periodic_clean_orphans(self, context): - now = datetime.datetime.now() + except Exception as e: + LOG.exception(("ccloud - Exception in periodic resync happend: " + str(e.message))) + pass - # call every 6 hours - if (now - self.last_clean_orphans).seconds > 60 * 60 * 6: - self.last_clean_orphans = now - self.clean_orphaned_snat_objects() + def clean_orphaned_and_save_device_config(self): + # clean orphan snats, resync not needed because they are unknown to neutron + self.clean_orphaned_snat_objects() + # clean all other orphans and trigger resync if needed + return self.clean_orphaned_objects_and_save_device_config() # ccloud: clean orphaned snat pools @log_helpers.log_method_call @@ -586,7 +619,7 @@ def sync_state(self): % list(known_services)) except Exception as e: - LOG.error("Unable to retrieve ready service: %s" % e.message) + LOG.warning("Unable to retrieve service. Service might be deleted in between: %s" % e.message) resync = True return resync @@ -600,61 +633,58 @@ def validate_service(self, lb_id): lb_id ) self.cache.put(service, self.agent_host) - if not self.lbdriver.service_exists(service) or self._service_has_provisioning_error(service): - LOG.info('active loadbalancer %s is not on BIG-IP or has Error state...syncing' - % lb_id) - - if self.lbdriver.service_rename_required(service): - self.lbdriver.service_object_teardown(service) - LOG.error('active loadbalancer %s is configured with ' - 'non-unique names on BIG-IP...rename in ' - 'progress.' - % lb_id) - LOG.error('removing the service objects that are ' - 'incorrectly named') - else: - LOG.debug('service rename not required') - + if not self.lbdriver.service_exists(service) or \ + self.has_provisioning_status_of_error(service): + LOG.info("active loadbalancer '{}' is not on BIG-IP" + " or has error state...syncing".format(lb_id)) self.lbdriver.sync(service) else: - LOG.debug("Found service definition for %s, state is ACTIVE, move on" % (lb_id)) + LOG.debug("Found service definition for '{}', state is ACTIVE" + " move on.".format(lb_id)) + except f5_ex.InvalidNetworkType as exc: + LOG.warning(exc.msg) except q_exception.NeutronException as exc: LOG.error("NeutronException: %s" % exc.msg) except Exception as exc: LOG.exception("Service validation error: %s" % exc.message) - def _service_has_provisioning_error(self, service): - - loadbalancer = service['loadbalancer'] - - if loadbalancer["provisioning_status"] == plugin_const.ERROR: - return True - - for listener in service['listeners']: - - if listener["provisioning_status"] == plugin_const.ERROR: - return True - - for pool in service['pools']: - if pool["provisioning_status"] == plugin_const.ERROR: - return True - - for healthmonitor in service['healthmonitors']: - if healthmonitor["provisioning_status"] == plugin_const.ERROR: - return True - - for l7policies in service['l7policies']: - if l7policies["provisioning_status"] == plugin_const.ERROR: - return True - - for l7_rules in service['l7policy_rules']: - if l7_rules["provisioning_status"] == plugin_const.ERROR: - return True - - - return False - - + @staticmethod + def has_provisioning_status_of_error(service): + """Determine if a service is in an ERROR/DEGRADED status. + + This staticmethod will go through a service object and determine if it + has an ERROR status anywhere within the object. + """ + expected_tree = dict(loadbalancer=dict, members=list, pools=list, + listeners=list, healthmonitors=list, + l7policies=list, l7policy_rules=list) + error_status = False # assume we're in the clear unless otherwise... + loadbalancer = service.get('loadbalancer', dict()) + + def handle_error(error_status, obj): + provisioning_status = obj.get('provisioning_status') + if provisioning_status == plugin_const.ERROR: + obj_id = obj.get('id', 'unknown') + LOG.warning("Service object has object of type(id) {}({})" + " that is in '{}' status.".format( + item, obj_id, plugin_const.ERROR)) + error_status = True + return error_status + + for item in expected_tree: + obj = service.get(item, expected_tree[item]()) + if expected_tree[item] == dict and isinstance(service[item], dict): + error_status = handle_error(error_status, obj) + elif expected_tree[item] == list and \ + isinstance(obj, list): + for item in obj: + if len(item) == 1: + # {'networks': [{'id': {}}]} + item = item[item.keys()[0]] + error_status = handle_error(error_status, item) + if error_status: + loadbalancer['provisioning_status'] = plugin_const.ERROR + return error_status @utils.instrument_execution_time def refresh_service(self, lb_id): @@ -709,12 +739,222 @@ def destroy_service(self, lb_id): self.cache.remove_by_loadbalancer_id(lb_id) @log_helpers.log_method_call - def remove_orphans(self, all_loadbalancers): + def clean_orphaned_objects_and_save_device_config(self): + + cleaned = False try: - self.lbdriver.remove_orphans(all_loadbalancers) - except Exception as exc: - LOG.error("Exception: removing orphans: %s" % exc.message) + # + # Global cluster refresh tasks + # + + # global_agent = self.plugin_rpc.get_clusterwide_agent( + # self.conf.environment_prefix, + # self.conf.environment_group_number + # ) + # + # if 'host' not in global_agent: + # LOG.debug('No global agent available to sync config') + # return True + + # ccloud: Set the agent to the cluster wide one as we have onley one per cluster at the moment + + global_agent = {} + global_agent['host'] = self.agent_host + + if global_agent['host'] == self.agent_host: + LOG.debug('this agent is the global config agent') + # We're the global agent perform global cluster tasks + + # Ask BIG-IP for all deployed loadbalancers (virtual addresses) + lbs = self.lbdriver.get_all_deployed_loadbalancers( + purge_orphaned_folders=True) + if lbs: + self.purge_orphaned_loadbalancers(lbs) + + # Ask the BIG-IP for all deployed listeners to make + # sure we are not orphaning listeners which have + # valid loadbalancers in a OK state + listeners = self.lbdriver.get_all_deployed_listeners() + if listeners: + self.purge_orphaned_listeners(listeners) + + policies = self.lbdriver.get_all_deployed_l7_policys() + if policies: + self.purge_orphaned_l7_policys(policies) + + # Ask the BIG-IP for all deployed pools not associated + # to a virtual server + pools = self.lbdriver.get_all_deployed_pools() + if pools: + self.purge_orphaned_pools(pools) + self.purge_orphaned_nodes(pools) + + # Ask the BIG-IP for all deployed monitors not associated + # to a pool + monitors = self.lbdriver.get_all_deployed_health_monitors() + if monitors: + self.purge_orphaned_health_monitors(monitors) + + else: + LOG.debug('the global agent is %s' % (global_agent['host'])) + cleaned = False + + cleaned = True + # serialize config and save to disk + self.lbdriver.backup_configuration() + except Exception as e: + LOG.error("Unable to sync state: %s" % e.message) + cleaned = True + + return cleaned + + @log_helpers.log_method_call + def purge_orphaned_loadbalancers(self, lbs): + """Gets 'unknown' loadbalancers from Neutron and purges them + + Provisioning status of 'unknown' on loadbalancers means that the object + does not exist in Neutron. These should be deleted to consolidate + hanging objects. + """ + lbs_status = self.plugin_rpc.validate_loadbalancers_state( + list(lbs.keys())) + LOG.debug('validate_loadbalancers_state returned: %s' + % lbs_status) + lbs_removed = False + for lbid in lbs_status: + # If the statu is Unknown, it no longer exists + # in Neutron and thus should be removed from the BIG-IP + if lbs_status[lbid] in ['Unknown']: + LOG.debug('removing orphaned loadbalancer %s' + % lbid) + # This will remove pools, virtual servers and + # virtual addresses + self.lbdriver.purge_orphaned_loadbalancer( + tenant_id=lbs[lbid]['tenant_id'], + loadbalancer_id=lbid, + hostnames=lbs[lbid]['hostnames']) + lbs_removed = True + if lbs_removed: + # If we have removed load balancers, then scrub + # for tenant folders we can delete because they + # no longer contain loadbalancers. + self.lbdriver.get_all_deployed_loadbalancers( + purge_orphaned_folders=True) + + @log_helpers.log_method_call + def purge_orphaned_listeners(self, listeners): + """Deletes the hanging listeners from the deleted loadbalancers""" + listener_status = self.plugin_rpc.validate_listeners_state( + list(listeners.keys())) + LOG.debug('validated_pools_state returned: %s' + % listener_status) + for listenerid in listener_status: + # If the listener status is Unknown, it no longer exists + # in Neutron and thus should be removed from BIG-IP + if listener_status[listenerid] in ['Unknown']: + LOG.debug('removing orphaned listener %s' + % listenerid) + self.lbdriver.purge_orphaned_listener( + tenant_id=listeners[listenerid]['tenant_id'], + listener_id=listenerid, + hostnames=listeners[listenerid]['hostnames']) + + @log_helpers.log_method_call + def purge_orphaned_l7_policys(self, policies): + """Deletes hanging l7_policies from the deleted listeners""" + policies_used = set() + listeners = self.lbdriver.get_all_deployed_listeners( + expand_subcollections=True) + for li_id in listeners: + policy = listeners[li_id]['l7_policy'] + if policy: + policy = policy.split('/')[2] + policies_used.add(policy) + has_l7policies = \ + self.plugin_rpc.validate_l7policys_state_by_listener( + listeners.keys()) + # Ask Neutron for the status of all deployed l7_policys + for policy_key in policies: + policy = policies.get(policy_key) + purged = False + if policy_key not in policies_used: + LOG.debug("policy '{}' no longer referenced by a listener: " + "({})".format(policy_key, policies_used)) + self.lbdriver.purge_orphaned_l7_policy( + tenant_id=policy['tenant_id'], + l7_policy_id=policy_key, + hostnames=policy['hostnames']) + purged = True + elif not has_l7policies.get(policy['id'], False): + # should always be present on Neutron DB! + LOG.debug("policy '{}' no longer present in Neutron's DB: " + "({})".format(policy_key, has_l7policies)) + self.lbdriver.purge_orphaned_l7_policy( + tenant_id=policy['tenant_id'], + l7_policy_id=policy_key, + hostnames=policy['hostnames'], + listener_id=li_id) + purged = True + if purged: + LOG.info("purging orphaned l7policy {} as it's no longer in " + "Neutron".format(policy_key)) + + @log_helpers.log_method_call + def purge_orphaned_nodes(self, pools): + """Deletes hanging nodes from the deleted listeners""" + pools_members = self.plugin_rpc.get_pools_members( + list(pools.keys())) + + tenant_members = dict() + for pool_id, pool in pools.iteritems(): + tenant_id = pool['tenant_id'] + members = pools_members.get(pool_id, list()) + + if tenant_id not in tenant_members: + tenant_members[tenant_id] = members + else: + tenant_members[tenant_id].extend(members) + + self.lbdriver.purge_orphaned_nodes(tenant_members) + + @log_helpers.log_method_call + def purge_orphaned_pools(self, pools): + """Deletes hanging pools from the deleted listeners""" + # Ask Neutron for the status of all deployed pools + pools_status = self.plugin_rpc.validate_pools_state( + list(pools.keys())) + LOG.debug('validated_pools_state returned: %s' + % pools_status) + for poolid in pools_status: + # If the pool status is Unknown, it no longer exists + # in Neutron and thus should be removed from BIG-IP + if pools_status[poolid] in ['Unknown']: + LOG.debug('removing orphaned pool %s' % poolid) + self.lbdriver.purge_orphaned_pool( + tenant_id=pools[poolid]['tenant_id'], + pool_id=poolid, + hostnames=pools[poolid]['hostnames']) + + @log_helpers.log_method_call + def purge_orphaned_health_monitors(self, monitors): + """Deletes hanging Health Monitors from the deleted Pools""" + # ask Neutron for for the status of all deployed monitors... + monitors_used = set() + pools = self.lbdriver.get_all_deployed_pools() + LOG.debug("pools found: {}".format(pools)) + for pool_id in pools: + monitorid = pools.get(pool_id).get('monitors', 'None') + monitors_used.add(monitorid) + LOG.debug('health monitors in use: {}'.format(monitors_used)) + for monitorid in monitors: + if monitorid not in monitors_used: + LOG.debug("purging healthmonitor {} as it is not " + "in ({})".format(monitorid, monitors_used)) + self.lbdriver.purge_orphaned_health_monitor( + tenant_id=monitors[monitorid]['tenant_id'], + monitor_id=monitorid, + hostnames=monitors[monitorid]['hostnames']) @log_helpers.log_method_call def create_loadbalancer(self, context, loadbalancer, service): diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/cli/actions/sync_all.py b/f5_openstack_agent/lbaasv2/drivers/bigip/cli/actions/sync_all.py index 9f795cab0..3e3db7750 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/cli/actions/sync_all.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/cli/actions/sync_all.py @@ -16,7 +16,7 @@ def __init__(self, namespace): def execute(self): - services = self.manager.plugin_rpc. get_all_loadbalancers(host=self.manager.agent_host) + services = self.manager.plugin_rpc.get_all_loadbalancers(host=self.manager.agent_host) if self.project_id is not None: print("Syncing all LBs in project {}".format(self.project_id)) @@ -25,7 +25,7 @@ def execute(self): for service in services: - if self.project_id is None or service['tenant_id']==self.project_id : + if self.project_id is None or service['tenant_id'] == self.project_id : detailed_service = self.manager.plugin_rpc.get_service_by_loadbalancer_id(service['lb_id']) diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/cluster_manager.py b/f5_openstack_agent/lbaasv2/drivers/bigip/cluster_manager.py index e102e1dfe..f0969efdb 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/cluster_manager.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/cluster_manager.py @@ -61,8 +61,13 @@ def get_traffic_groups(self, bigip): def save_config(self, bigip): try: - c = bigip.tm.sys.config - c.save() + # invalid for the version of f5-sdk in requirements + # c = bigip.tm.sys.config + # c.save() + bigip.tm.util.bash.exec_cmd( + command='run', + utilCmdArgs="-c 'tmsh save sys config'" + ) except HTTPError as err: LOG.error("Error saving config." "Repsponse status code: %s. Response " diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py b/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py index 938f56141..a0b53af41 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py @@ -22,7 +22,9 @@ import json import logging as std_logging import os +import urllib import urllib2 +from requests import HTTPError from eventlet import greenthread from time import strftime @@ -378,6 +380,9 @@ def __init__(self, conf, registerOpts=True): self.va_manager = resource_helper.BigIPResourceHelper( resource_helper.ResourceType.virtual_address) + self.orphan_cache = {} + self.orphan_cache_last_reset = datetime.datetime.now() + if self.conf.trace_service_requests: path = '/var/log/neutron/service/' if not os.path.exists(path): @@ -791,6 +796,502 @@ def flush_cache(self): bigip.assured_tenant_snat_subnets = {} bigip.assured_gateway_subnets = [] + @serialized('get_all_deployed_loadbalancers') + @is_connected + def get_all_deployed_loadbalancers(self, purge_orphaned_folders=False): + LOG.debug('getting all deployed loadbalancers on BIG-IPs') + deployed_lb_dict = {} + for bigip in self.get_all_bigips(): + folders = self.system_helper.get_folders(bigip) + for folder in folders: + tenant_id = folder[len(self.service_adapter.prefix):] + if str(folder).startswith(self.service_adapter.prefix): + resource = resource_helper.BigIPResourceHelper( + resource_helper.ResourceType.virtual_address) + deployed_lbs = resource.get_resources(bigip, folder) + if deployed_lbs: + for lb in deployed_lbs: + if lb.name.startswith(self.service_adapter.prefix): + lb_id = lb.name[len(self.service_adapter.prefix):] + if lb_id in deployed_lb_dict: + deployed_lb_dict[lb_id][ + 'hostnames'].append(bigip.hostname) + else: + deployed_lb_dict[lb_id] = { + 'id': lb_id, + 'tenant_id': tenant_id, + 'hostnames': [bigip.hostname] + } + else: + # delay to assure we are not in the tenant creation + # process before a virtual address is created. + greenthread.sleep(10) + deployed_lbs = resource.get_resources(bigip, folder) + if deployed_lbs: + for lb in deployed_lbs: + if lb.name.startswith(self.service_adapter.prefix): + lb_id = lb.name[len(self.service_adapter.prefix):] + deployed_lb_dict[lb_id] = \ + {'id': lb_id, 'tenant_id': tenant_id} + else: + # Orphaned folder! + if purge_orphaned_folders: + try: + self.system_helper.purge_folder_contents( + bigip, folder) + self.system_helper.purge_folder( + bigip, folder) + LOG.error('orphaned folder %s on %s' % + (folder, bigip.hostname)) + except Exception as exc: + LOG.error('Error purging folder %s: %s' % + (folder, str(exc))) + return deployed_lb_dict + + @serialized('get_all_deployed_listeners') + @is_connected + def get_all_deployed_listeners(self, expand_subcollections=False): + LOG.debug('getting all deployed listeners on BIG-IPs') + deployed_virtual_dict = {} + for bigip in self.get_all_bigips(): + folders = self.system_helper.get_folders(bigip) + for folder in folders: + tenant_id = folder[len(self.service_adapter.prefix):] + if str(folder).startswith(self.service_adapter.prefix): + resource = resource_helper.BigIPResourceHelper( + resource_helper.ResourceType.virtual) + deployed_listeners = resource.get_resources( + bigip, folder, expand_subcollections) + if deployed_listeners: + for virtual in deployed_listeners: + virtual_id = \ + virtual.name[len(self.service_adapter.prefix):] + l7_policy = '' + if hasattr(virtual, 'policiesReference') and \ + 'items' in virtual.policiesReference: + l7_policy = \ + virtual.policiesReference['items'][0] + l7_policy = l7_policy['fullPath'] + if virtual_id in deployed_virtual_dict: + deployed_virtual_dict[virtual_id][ + 'hostnames'].append(bigip.hostname) + else: + deployed_virtual_dict[virtual_id] = { + 'id': virtual_id, + 'tenant_id': tenant_id, + 'hostnames': [bigip.hostname], + 'l7_policy': l7_policy + } + return deployed_virtual_dict + + def _is_orphan(self, device_name, id): + # clear cache every 48 hours + if (self.orphan_cache_last_reset + datetime.timedelta(hours=48)) < datetime.datetime.now(): + LOG.info('ccloud: Orphan objects cache cleared to avoid orphan orphans :-)') + self.orphan_cache_last_reset = datetime.datetime.now() + self.orphan_cache.clear() + # check if orphan can be deleted or rise counter by 1 + if not id or not device_name: + return False + else: + key = device_name + '-' + id + if key in self.orphan_cache: + if self.orphan_cache[key] >= 2: + LOG.info('ccloud: Orphan object %s marked for deletion %d times. Object will be deleted NOW' % (key, self.orphan_cache[key])) + return True + else: + self.orphan_cache[key] += 1 + LOG.info('ccloud: Orphan object %s marked for deletion %d times' % (key, self.orphan_cache[key])) + else: + self.orphan_cache[key] = 1 + LOG.info('ccloud: Orphan object %s marked for deletion %d times' % (key, self.orphan_cache[key])) + return False + + def _remove_from_orphan_cache(self, device_name, id): + if id and device_name: + key = device_name + '-' + id + if key in self.orphan_cache: + LOG.info('ccloud: Orphan object %s got deleted and is removed from orphan cache' % key) + try: + del self.orphan_cache[key] + except Exception: + pass + return + + def get_orphans_cache(self): + return self.orphan_cache + + @serialized('purge_orphaned_nodes') + @is_connected + @log_helpers.log_method_call + def purge_orphaned_nodes(self, tenant_members): + node_helper = resource_helper.BigIPResourceHelper( + resource_helper.ResourceType.node) + pool_helper = resource_helper.BigIPResourceHelper(resource_helper.ResourceType.pool) + for bigip in self.get_all_bigips(): + for tenant_id, members in tenant_members.iteritems(): + partition = self.service_adapter.prefix + tenant_id + nodes = node_helper.get_resources(bigip, partition=partition) + node_dict = {n.name: n for n in nodes} + # extract member addresses (they don't have rd info) into array + member_addresses = [] + for m in members: + member_addresses.append(m['address']) + # get all pool members from f5 to get rid of the ones not defined in openstack + pools = pool_helper.get_resources(bigip, partition=partition) + orphaned_members = [] + for pool in pools: + xmembers = pool.members_s.get_collection() + for xmember in xmembers: + if xmember.address.split('%')[0] not in member_addresses: + orphaned_members.append(xmember) + # all nodes are orphaned which aren't referenced as member + orphaned_nodes = [] + for n in nodes: + if (not self.conf.f5_global_routed_mode) and ('%' not in n.address): + orphaned_nodes.append(n.name) + elif not n.address.split('%')[0] in member_addresses: + orphaned_nodes.append(n.name) + + for node_name in orphaned_nodes: + try: + if self._is_orphan(bigip.device_name, node_name): + for omember in orphaned_members: + if omember.address == node_name: + omember.delete() + node_helper.delete(bigip, name=urllib.quote(node_name), + partition=partition) + self._remove_from_orphan_cache(bigip.device_name, node_name) + except HTTPError as error: + if error.response.status_code == 400: + LOG.error(error.response) + + # for member in members: + # + # rd = self.network_builder.find_subnet_route_domain( + # tenant_id, member.get('subnet_id', None)) + # node_name = "{}%{}".format(member['address'], rd) + # node_dict.pop(node_name, None) + # + # for node_name, node in node_dict.iteritems(): + # try: + # node_helper.delete(bigip, name=urllib.quote(node_name), + # partition=partition) + # except HTTPError as error: + # if error.response.status_code == 400: + # LOG.error(error.response) + + @serialized('get_all_deployed_pools') + @is_connected + def get_all_deployed_pools(self): + LOG.debug('getting all deployed pools on BIG-IPs') + deployed_pool_dict = {} + for bigip in self.get_all_bigips(): + folders = self.system_helper.get_folders(bigip) + for folder in folders: + tenant_id = folder[len(self.service_adapter.prefix):] + if str(folder).startswith(self.service_adapter.prefix): + resource = resource_helper.BigIPResourceHelper( + resource_helper.ResourceType.pool) + deployed_pools = resource.get_resources(bigip, folder) + if deployed_pools: + for pool in deployed_pools: + pool_id = \ + pool.name[len(self.service_adapter.prefix):] + monitor_id = '' + if hasattr(pool, 'monitor'): + monitor = pool.monitor.split('/')[2].strip() + monitor_id = \ + monitor[len(self.service_adapter.prefix):] + LOG.debug( + 'pool {} has monitor {}'.format( + pool.name, monitor)) + else: + LOG.debug( + 'pool {} has no healthmonitors'.format( + pool.name)) + if pool_id in deployed_pool_dict: + deployed_pool_dict[pool_id][ + 'hostnames'].append(bigip.hostname) + else: + deployed_pool_dict[pool_id] = { + 'id': pool_id, + 'tenant_id': tenant_id, + 'hostnames': [bigip.hostname], + 'monitors': monitor_id + } + return deployed_pool_dict + + @serialized('purge_orphaned_pool') + @is_connected + @log_helpers.log_method_call + def purge_orphaned_pool(self, tenant_id=None, pool_id=None, + hostnames=list()): + node_helper = resource_helper.BigIPResourceHelper( + resource_helper.ResourceType.node) + for bigip in self.get_all_bigips(): + if bigip.hostname in hostnames: + try: + pool_name = self.service_adapter.prefix + pool_id + partition = self.service_adapter.prefix + tenant_id + pool = resource_helper.BigIPResourceHelper( + resource_helper.ResourceType.pool).load( + bigip, pool_name, partition) + members = pool.members_s.get_collection() + if self._is_orphan(bigip.device_name, pool_id): + pool.delete() + self._remove_from_orphan_cache(bigip.device_name, pool_id) + for member in members: + node_name = member.address + try: + if self._is_orphan(bigip.device_name, node_name): + node_helper.delete(bigip, + name=urllib.quote(node_name), + partition=partition) + self._remove_from_orphan_cache(bigip.device_name, node_name) + except HTTPError as e: + if e.response.status_code == 404: + pass + if e.response.status_code == 400: + LOG.warn("Failed to delete node -- in use") + else: + LOG.exception("Failed to delete node") + except HTTPError as err: + if err.response.status_code == 404: + LOG.debug('pool %s not on BIG-IP %s.' + % (pool_id, bigip.hostname)) + except Exception as exc: + LOG.exception('Exception purging pool %s' % str(exc)) + + @serialized('get_all_deployed_monitors') + @is_connected + def get_all_deployed_health_monitors(self): + """Retrieve a list of all Health Monitors deployed""" + LOG.debug('getting all deployed monitors on BIG-IP\'s') + monitor_types = ['http_monitor', 'https_monitor', 'tcp_monitor', + 'ping_monitor'] + deployed_monitor_dict = {} + adapter_prefix = self.service_adapter.prefix + for bigip in self.get_all_bigips(): + folders = self.system_helper.get_folders(bigip) + for folder in folders: + tenant_id = folder[len(adapter_prefix):] + if str(folder).startswith(adapter_prefix): + resources = map( + lambda x: resource_helper.BigIPResourceHelper( + getattr(resource_helper.ResourceType, x)), + monitor_types) + for resource in resources: + deployed_monitors = resource.get_resources( + bigip, folder) + if deployed_monitors: + for monitor in deployed_monitors: + monitor_id = monitor.name[len(adapter_prefix):] + if monitor_id in deployed_monitor_dict: + deployed_monitor_dict[monitor_id][ + 'hostnames'].append(bigip.hostname) + else: + deployed_monitor_dict[monitor_id] = { + 'id': monitor_id, + 'tenant_id': tenant_id, + 'hostnames': [bigip.hostname] + } + return deployed_monitor_dict + + @serialized('purge_orphaned_health_monitor') + @is_connected + @log_helpers.log_method_call + def purge_orphaned_health_monitor(self, tenant_id=None, monitor_id=None, + hostnames=list()): + """Purge all monitors that exist on the BIG-IP but not in Neutron""" + resource_types = [ + resource_helper.BigIPResourceHelper(x) for x in [ + resource_helper.ResourceType.http_monitor, + resource_helper.ResourceType.https_monitor, + resource_helper.ResourceType.ping_monitor, + resource_helper.ResourceType.tcp_monitor]] + for bigip in self.get_all_bigips(): + if bigip.hostname in hostnames: + try: + monitor_name = self.service_adapter.prefix + monitor_id + partition = self.service_adapter.prefix + tenant_id + monitor = None + for monitor_type in resource_types: + try: + monitor = monitor_type.load(bigip, monitor_name, + partition) + break + except HTTPError as err: + if err.response.status_code == 404: + continue + if self._is_orphan(bigip.device_name, monitor_id): + monitor.delete() + self._remove_from_orphan_cache(bigip.device_name, monitor_id) + except TypeError as err: + if 'NoneType' in err: + LOG.exception("Could not find monitor {}".format( + monitor_name)) + except Exception as exc: + LOG.exception('Exception purging monitor %s' % str(exc)) + + @serialized('get_all_deployed_l7_policys') + @is_connected + def get_all_deployed_l7_policys(self): + """Retrieve a dict of all l7policies deployed + + The dict returned will have the following format: + {policy_bigip_id_0: {'id': policy_id_0, + 'tenant_id': tenant_id, + 'hostnames': [hostnames_0]} + ... + } + Where hostnames is the list of BIG-IP hostnames impacted, and the + policy_id is the policy_bigip_id without 'wrapper_policy_' + """ + LOG.debug('getting all deployed l7_policys on BIG-IP\'s') + deployed_l7_policys_dict = {} + for bigip in self.get_all_bigips(): + folders = self.system_helper.get_folders(bigip) + for folder in folders: + tenant_id = folder[len(self.service_adapter.prefix):] + if str(folder).startswith(self.service_adapter.prefix): + resource = resource_helper.BigIPResourceHelper( + resource_helper.ResourceType.l7policy) + deployed_l7_policys = resource.get_resources( + bigip, folder) + if deployed_l7_policys: + for l7_policy in deployed_l7_policys: + l7_policy_id = l7_policy.name + if l7_policy_id in deployed_l7_policys_dict: + my_dict = \ + deployed_l7_policys_dict[l7_policy_id] + my_dict['hostnames'].append(bigip.hostname) + else: + po_id = l7_policy_id.replace( + 'wrapper_policy_', '') + deployed_l7_policys_dict[l7_policy_id] = { + 'id': po_id, + 'tenant_id': tenant_id, + 'hostnames': [bigip.hostname] + } + return deployed_l7_policys_dict + + @serialized('purge_orphaned_l7_policy') + @is_connected + @log_helpers.log_method_call + def purge_orphaned_l7_policy(self, tenant_id=None, l7_policy_id=None, + hostnames=list(), listener_id=None): + """Purge all l7_policys that exist on the BIG-IP but not in Neutron""" + for bigip in self.get_all_bigips(): + if bigip.hostname in hostnames: + error = None + try: + l7_policy_name = l7_policy_id + partition = self.service_adapter.prefix + tenant_id + if listener_id and partition: + if self.service_adapter.prefix not in listener_id: + listener_id = \ + self.service_adapter.prefix + listener_id + li_resource = resource_helper.BigIPResourceHelper( + resource_helper.ResourceType.virtual).load( + bigip, listener_id, partition) + li_resource.update(policies=[]) + l7_policy = resource_helper.BigIPResourceHelper( + resource_helper.ResourceType.l7policy).load( + bigip, l7_policy_name, partition) + if self._is_orphan(bigip.device_name, l7_policy_id): + l7_policy.delete() + self._remove_from_orphan_cache(bigip.device_name, l7_policy_id) + except HTTPError as err: + if err.response.status_code == 404: + LOG.debug('l7_policy %s not on BIG-IP %s.' + % (l7_policy_id, bigip.hostname)) + else: + error = err + except Exception as exc: + error = err + if error: + kwargs = dict( + tenant_id=tenant_id, l7_policy_id=l7_policy_id, + hostname=bigip.hostname, listener_id=listener_id) + LOG.exception('Exception: purge_orphaned_l7_policy({}) ' + '"{}"'.format(kwargs, exc)) + + @serialized('purge_orphaned_loadbalancer') + @is_connected + @log_helpers.log_method_call + def purge_orphaned_loadbalancer(self, tenant_id=None, + loadbalancer_id=None, hostnames=list()): + for bigip in self.get_all_bigips(): + if bigip.hostname in hostnames: + try: + va_name = self.service_adapter.prefix + loadbalancer_id + partition = self.service_adapter.prefix + tenant_id + va = resource_helper.BigIPResourceHelper( + resource_helper.ResourceType.virtual_address).load( + bigip, va_name, partition) + # get virtual services (listeners) + # referencing this virtual address + vses = resource_helper.BigIPResourceHelper( + resource_helper.ResourceType.virtual).get_resources( + bigip, partition) + vs_dest_compare = '/' + partition + '/' + va.name + for vs in vses: + if str(vs.destination).startswith(vs_dest_compare): + if hasattr(vs, 'pool'): + pool = resource_helper.BigIPResourceHelper( + resource_helper.ResourceType.pool).load( + bigip, os.path.basename(vs.pool), + partition) + vs_name = vs.name + if self._is_orphan(bigip.device_name, vs_name): + vs.delete() + self._remove_from_orphan_cache(bigip.device_name, vs_name) + pool_name = pool.name + if self._is_orphan(bigip.device_name, pool_name): + pool.delete() + self._remove_from_orphan_cache(bigip.device_name, pool_name) + else: + vs_name = vs.name + if self._is_orphan(bigip.device_name, vs_name): + vs.delete() + self._remove_from_orphan_cache(bigip.device_name, vs_name) + if self._is_orphan(bigip.device_name, va_name): + resource_helper.BigIPResourceHelper( + resource_helper.ResourceType.virtual_address).delete( + bigip, va_name, partition) + self._remove_from_orphan_cache(bigip.device_name, va_name) + except HTTPError as err: + if err.response.status_code == 404: + LOG.debug('loadbalancer %s not on BIG-IP %s.' + % (loadbalancer_id, bigip.hostname)) + except Exception as exc: + LOG.exception('Exception purging loadbalancer %s' + % str(exc)) + + @serialized('purge_orphaned_listener') + @is_connected + @log_helpers.log_method_call + def purge_orphaned_listener( + self, tenant_id=None, listener_id=None, hostnames=[]): + for bigip in self.get_all_bigips(): + if bigip.hostname in hostnames: + try: + listener_name = self.service_adapter.prefix + listener_id + partition = self.service_adapter.prefix + tenant_id + listener = resource_helper.BigIPResourceHelper( + resource_helper.ResourceType.virtual).load( + bigip, listener_name, partition) + if self._is_orphan(bigip.device_name, listener_id): + listener.delete() + self._remove_from_orphan_cache(bigip.device_name, listener_id) + except HTTPError as err: + if err.response.status_code == 404: + LOG.debug('listener %s not on BIG-IP %s.' + % (listener_id, bigip.hostname)) + except Exception as exc: + LOG.exception('Exception purging listener %s' % str(exc)) + @serialized('create_loadbalancer') @is_connected def create_loadbalancer(self, loadbalancer, service): @@ -959,23 +1460,6 @@ def get_stats(self, service): finally: return lb_stats - @serialized('remove_orphans') - def remove_orphans(self, all_loadbalancers): - """Remove out-of-date configuration on big-ips """ - existing_tenants = [] - existing_lbs = [] - for loadbalancer in all_loadbalancers: - existing_tenants.append(loadbalancer['tenant_id']) - existing_lbs.append(loadbalancer['lb_id']) - - for bigip in self.get_all_bigips(): - bigip.pool.purge_orphaned_pools(existing_lbs) - for bigip in self.get_all_bigips(): - bigip.system.purge_orphaned_folders_contents(existing_tenants) - - for bigip in self.get_all_bigips(): - bigip.system.purge_orphaned_folders(existing_tenants)\ - def fdb_add(self, fdb): # Add (L2toL3) forwarding database entries self.remove_ips_from_fdb_update(fdb) diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_driver.py b/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_driver.py index 413922d76..e57ba1a9e 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_driver.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_driver.py @@ -15,13 +15,13 @@ class LBaaSBaseDriver(object): - """Abstract base LBaaS Driver class for interfacing with Agent Manager """ + """Abstract base LBaaS Driver class for interfacing with Agent Manager.""" def __init__(self, conf): # XXX 'conf' appears to be unused - '''Maybe we can remove this method altogether? Or maybe it's for future + """Maybe we can remove this method altogether? Or maybe it's for future. subclassing... - ''' + """ self.agent_id = None self.plugin_rpc = None # XXX overridden in the only known subclass self.connected = False # XXX overridden in the only known subclass @@ -30,15 +30,18 @@ def __init__(self, conf): # XXX 'conf' appears to be unused self.agent_configurations = {} # XXX overridden in subclass def set_context(self, context): - """Set the global context object for the lbaas driver """ + """Set the global context object for the lbaas driver.""" raise NotImplementedError() + def set_plugin_rpc(self, plugin_rpc): + """Provide LBaaS Plugin RPC access.""" + def post_init(self): """Run after agent is fully connected """ raise NotImplementedError() - def set_tunnel_rpc(self, tunnel_rpc): # XXX into this class? - """Provide FDB Connector RPC access """ + def set_tunnel_rpc(self, tunnel_rpc): + """Provide FDB Connector RPC access.""" raise NotImplementedError() def set_l2pop_rpc(self, l2pop_rpc): @@ -50,7 +53,7 @@ def connect(self): raise NotImplementedError() def flush_cache(self): - """Remove all cached items """ + """Remove all cached items.""" raise NotImplementedError() def backup_configuration(self): @@ -58,112 +61,130 @@ def backup_configuration(self): raise NotImplementedError() def get_stats(self, service): - """Get Stats for a loadbalancer Service """ + """Get Stats for a loadbalancer Service.""" raise NotImplementedError() + def get_all_deployed_loadbalancers(self, purge_orphaned_folders=True): + """Get all Loadbalancers defined on devices.""" + raise NotImplemented + + def purge_orphaned_loadbalancer(self, tenant_id, loadbalancer_id, + hostnames): + """Remove all loadbalancers without references in Neutron.""" + raise NotImplemented + def exists(self, service): - """Check If LBaaS Service is Defined on Driver Target """ + """Check If LBaaS Service is Defined on Driver Target.""" raise NotImplementedError() def sync(self, service): """Force Sync a Service on Driver Target """ raise NotImplementedError() - def remove_orphans(self, known_services): - """Remove Unknown Service from Driver Target """ - raise NotImplementedError() - - def create_vip(self, vip, service): - """LBaaS Create VIP """ - raise NotImplementedError() - - def update_vip(self, old_vip, vip, service): - """LBaaS Update VIP """ - raise NotImplementedError() - - def delete_vip(self, vip, service): - """LBaaS Delete VIP """ - raise NotImplementedError() - def create_pool(self, pool, service): - """LBaaS Delete VIP """ + """LBaaS Create Pool.""" raise NotImplementedError() def update_pool(self, old_pool, pool, service): - """LBaaS Update Pool """ + """LBaaS Update Pool.""" raise NotImplementedError() def delete_pool(self, pool, service): - """LBaaS Delete Pool """ + """LBaaS Delete Pool.""" raise NotImplementedError() def create_member(self, member, service): - """LBaaS Create Member """ + """LBaaS Create Member.""" raise NotImplementedError() def update_member(self, old_member, member, service): - """LBaaS Update Member """ + """LBaaS Update Member.""" raise NotImplementedError() def delete_member(self, member, service): - """LBaaS Delete Member """ + """LBaaS Delete Member.""" raise NotImplementedError() def create_pool_health_monitor(self, health_monitor, pool, service): - """LBaaS Create Pool Health Monitor """ + """LBaaS Create Pool Health Monitor.""" raise NotImplementedError() def update_health_monitor(self, old_health_monitor, health_monitor, pool, service): - """LBaaS Update Health Monitor """ + """LBaaS Update Health Monitor.""" + raise NotImplementedError() + + def delete_health_monitor(self, health_monitor, pool, service): + """LBaaS Delete Health Monior.""" raise NotImplementedError() def delete_pool_health_monitor(self, health_monitor, pool, service): - """LBaaS Delete Health Monitor """ + """LBaaS Delete Health Monitor.""" + raise NotImplementedError() + + def get_all_deployed_health_monitors(self): + """Get listing of all deployed Health Monitors""" + raise NotImplementedError() + + def purge_orphaned_health_monitor(self, tenant_id=None, monitor_id=None, + hostnames=list()): + """LBaaS Purge Health Monitor.""" + raise NotImplementedError() + + def get_all_deployed_l7_policys(self): + """Get listing of all deployed Health Monitors""" + raise NotImplementedError() + + def purge_orphaned_l7_policy(self, tenant_id=None, monitor_id=None, + hostnames=list()): + """LBaaS Purge Health Monitor.""" raise NotImplementedError() def tunnel_update(self, **kwargs): - """Neutron Core Tunnel Update """ + """Neutron Core Tunnel Update.""" raise NotImplementedError() def tunnel_sync(self): - """Neutron Core Tunnel Sync Messages """ + """Neutron Core Tunnel Sync Messages.""" raise NotImplementedError() def fdb_add(self, fdb_entries): - """L2 Population FDB Add """ + """L2 Population FDB Add.""" raise NotImplementedError() def fdb_remove(self, fdb_entries): - """L2 Population FDB Remove """ + """L2 Population FDB Remove.""" raise NotImplementedError() def fdb_update(self, fdb_entries): - """L2 Population FDB Update """ + """L2 Population FDB Update.""" raise NotImplementedError() def create_l7policy(self, l7policy, service): - """LBaaS Create l7policy """ + """LBaaS Create l7policy.""" raise NotImplementedError() def update_l7policy(self, old_l7policy, l7policy, service): - """LBaaS Update l7policy """ + """LBaaS Update l7policy.""" raise NotImplementedError() def delete_l7policy(self, l7policy, service): - """LBaaS Delete l7policy """ + """LBaaS Delete l7policy.""" raise NotImplementedError() def create_l7rule(self, l7rule, service): - """LBaaS Create l7rule """ + """LBaaS Create l7rule.""" raise NotImplementedError() def update_l7rule(self, old_l7rule, l7rule, service): - """LBaaS Update l7rule """ + """LBaaS Update l7rule.""" raise NotImplementedError() def delete_l7rule(self, l7rule, service): - """LBaaS Delete l7rule """ + """LBaaS Delete l7rule.""" + raise NotImplementedError() + + def get_orphans_cache(self): raise NotImplementedError() diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/network_helper.py b/f5_openstack_agent/lbaasv2/drivers/bigip/network_helper.py index 673b7657b..f6e38ba2f 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/network_helper.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/network_helper.py @@ -193,7 +193,11 @@ def route_domain_exists(self, bigip, partition=const.DEFAULT_PARTITION, name=Non if domain_id: name += '_aux_' + str(domain_id) - return r.exists(name=name, partition=partition) + if r.exists(name=name, partition=partition): + return r.load(name=name, partition=partition) + else: + return None + @log_helpers.log_method_call def get_route_domain(self, bigip, partition=const.DEFAULT_PARTITION, name=None): @@ -245,14 +249,19 @@ def _get_next_domain_id(self, bigip): @log_helpers.log_method_call def create_route_domain(self, bigip, partition=const.DEFAULT_PARTITION, name=None, - strictness=False, is_aux=False): + strictness=False, is_aux=False, rd_id=None): name = self._get_route_domain_name(name) rd = bigip.tm.net.route_domains.route_domain if not name: name = partition - id = self._get_next_domain_id(bigip) + + # ccloud: use an given id to avoid inconsistencies across bigip pair members + if rd_id is None: + id = self._get_next_domain_id(bigip) + else: + id = rd_id if is_aux: name += '_aux_' + str(id) diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/network_service.py b/f5_openstack_agent/lbaasv2/drivers/bigip/network_service.py index 303d11fc6..c75aeb7e4 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/network_service.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/network_service.py @@ -234,9 +234,9 @@ def _assure_subnet_gateway(self,service): def _annotate_service_route_domains(self, service): # Add route domain notation to pool member and vip addresses. + # ccloud: don't allow creation of members without route domain in case of NOT global routed mode setting tenant_id = service['loadbalancer']['tenant_id'] self.update_rds_cache(tenant_id) - if 'members' in service: for member in service['members']: if 'address' in member: @@ -253,14 +253,25 @@ def _annotate_service_route_domains(self, service): member['subnet_id'] )) if member_network: - self.assign_route_domain( - tenant_id, member_network, member_subnet) - rd_id = ( - '%' + str(member_network['route_domain_id']) - ) - member['address'] += rd_id + self.assign_route_domain(tenant_id, member_network, member_subnet) + if 'route_domain_id' in member_network and member_network['route_domain_id']: + rd_id = ( + '%' + str(member_network['route_domain_id']) + ) + if rd_id != '%0': + member['address'] += rd_id + else: + raise f5_ex.RouteDomainQueryException('ccloud: NETWORK-RDCHECK1 Global routing disabled but route domain ID 0 was found. Discarding ...') + else: + raise f5_ex.RouteDomainQueryException('ccloud: NETWORK-RDCHECK2 Global routing disabled but route domain ID could not be found for pool member. Discarding ...') + else: + raise f5_ex.RouteDomainQueryException('ccloud: NETWORK-RDCHECK3 Global routing disabled but NO member network can be found for pool member. Discarding ...') else: - member['address'] += '%0' + if not self.conf.f5_global_routed_mode: + raise f5_ex.RouteDomainQueryException('ccloud: NETWORK-RDCHECK4 Global routing disabled but NO member network ID given for pool member. Discarding ...') + else: + member['address'] += '%0' + LOG.info("ccloud: NETWORK-RDCHECK5 Using default Route Domain because of global routing %s" % member['address']) if 'vip_address' in service['loadbalancer']: loadbalancer = service['loadbalancer'] @@ -766,7 +777,8 @@ def update_bigip_member_l2(self, bigip, loadbalancer, member): self.l2_service.add_bigip_fdbs( bigip, net_folder, fdb_info, member) else: - LOG.warning('LBaaS member, %s, is not associated with Neutron ' + #ccloud: reduced to info, external(non project) member IP's never get an port in neutron + LOG.info('LBaaS member, %s, is not associated with Neutron ' 'port. No fdb entries will be created for this ' 'member.' % member['address']) diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/plugin_rpc.py b/f5_openstack_agent/lbaasv2/drivers/bigip/plugin_rpc.py index 889463802..ac12335f6 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/plugin_rpc.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/plugin_rpc.py @@ -481,7 +481,7 @@ def get_active_loadbalancers(self, env=None, group=None, host=None): ) except messaging.MessageDeliveryFailure: LOG.error("agent->plugin RPC exception caught: ", - "get_all_loadbalancers") + "get_active_loadbalancers") return loadbalancers @@ -505,7 +505,7 @@ def get_pending_loadbalancers(self, env=None, group=None, host=None): ) except messaging.MessageDeliveryFailure: LOG.error("agent->plugin RPC exception caught: ", - "get_all_loadbalancers") + "get_pending_loadbalancers") return loadbalancers @@ -533,3 +533,91 @@ def get_loadbalancers_by_network(self, network_id, env=None,group=None,host=None return loadbalancers + @log_helpers.log_method_call + def validate_loadbalancers_state(self, loadbalancers): + """Get the status of a list of loadbalancers IDs in Neutron""" + lb_status = {} + try: + lb_status = self._call( + self.context, + self._make_msg('validate_loadbalancers_state', + loadbalancers=loadbalancers, + host=self.host), + topic=self.topic + ) + except messaging.MessageDeliveryFailure: + LOG.error("agent->plugin RPC exception caught: ", + "validate_loadbalancers_state") + + return lb_status + + @log_helpers.log_method_call + def validate_listeners_state(self, listeners): + """Get the status of a list of listener IDs in Neutron""" + listener_status = {} + try: + listener_status = self._call( + self.context, + self._make_msg('validate_listeners_state', + listeners=listeners, + host=self.host), + topic=self.topic + ) + except messaging.MessageDeliveryFailure: + LOG.error("agent->plugin RPC exception caught: ", + "validate_pool_state") + + return listener_status + + @log_helpers.log_method_call + def validate_pools_state(self, pools): + """Get the status of a list of pools IDs in Neutron""" + pool_status = {} + try: + pool_status = self._call( + self.context, + self._make_msg('validate_pools_state', + pools=pools, + host=self.host), + topic=self.topic + ) + except messaging.MessageDeliveryFailure: + LOG.error("agent->plugin RPC exception caught: ", + "validate_pool_state") + + return pool_status + + @log_helpers.log_method_call + def get_pools_members(self, pools): + """Get the members of a list of pools IDs in Neutron.""" + pools_members = {} + try: + pools_members = self._call( + self.context, + self._make_msg('get_pools_members', + pools=pools, + host=self.host), + topic=self.topic + ) + except messaging.MessageDeliveryFailure: + LOG.error("agent->plugin RPC exception caught: ", + "get_pools_members") + + return pools_members + + @log_helpers.log_method_call + def validate_l7policys_state_by_listener(self, listeners): + """Get the status of a list of l7policys IDs in Neutron""" + l7policy_status = {} + try: + l7policy_status = self._call( + self.context, + self._make_msg('validate_l7policys_state_by_listener', + listeners=listeners), + topic=self.topic + ) + except messaging.MessageDeliveryFailure: + LOG.error("agent->plugin RPC exception caught: ", + "validate_l7policys_state_by_listener") + + return l7policy_status diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/pool_service.py b/f5_openstack_agent/lbaasv2/drivers/bigip/pool_service.py index dfdbcc952..949f190da 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/pool_service.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/pool_service.py @@ -130,6 +130,8 @@ def update_healthmonitor(self, service, bigips): def create_member(self, service, bigips): pool = self.service_adapter.get_pool(service) member = self.service_adapter.get_member(service) + if '%' not in member['address'] or '%0' in member['address']: + LOG.error("ccloud: POOL-RDCHECK1 - trying to create member with address: %s", member['address']) for bigip in bigips: part = pool["partition"] p = self.pool_helper.load(bigip, @@ -141,6 +143,8 @@ def create_member(self, service, bigips): def delete_member(self, service, bigips): pool = self.service_adapter.get_pool(service) member = self.service_adapter.get_member(service) + if '%' not in member['address'] or '%0' in member['address']: + LOG.error("ccloud: POOL-RDCHECK2 - trying to create member with address: %s", member['address']) part = pool["partition"] for bigip in bigips: p = self.pool_helper.load(bigip, @@ -171,6 +175,8 @@ def delete_member(self, service, bigips): def update_member(self, service, bigips): pool = self.service_adapter.get_pool(service) member = self.service_adapter.get_member(service) + if '%' not in member['address'] or '%0' in member['address']: + LOG.error("ccloud: POOL-RDCHECK3 - trying to create member with address: %s", member['address']) part = pool["partition"] for bigip in bigips: diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/resource_helper.py b/f5_openstack_agent/lbaasv2/drivers/bigip/resource_helper.py index 8f42de3d9..5bc50a228 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/resource_helper.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/resource_helper.py @@ -151,7 +151,8 @@ def update(self, bigip, model): return resource - def get_resources(self, bigip, partition=None): + def get_resources(self, bigip, partition=None, + expand_subcollections=False): u"""Retrieve a collection BIG-IP of resources from a BIG-IP. Generates a list of resources objects on a BIG-IP system. @@ -169,10 +170,15 @@ def get_resources(self, bigip, partition=None): raise err if collection: + params = {'params': ''} if partition: - params = { - 'params': get_filter(bigip, 'partition', 'eq', partition) - } + params['params'] = get_filter( + bigip, 'partition', 'eq', partition) + if expand_subcollections and \ + isinstance(params['params'], dict): + params['params']['expandSubcollections'] = 'true' + elif expand_subcollections: + params['params'] += '&expandSubCollections=true' resources = collection.get_collection(requests_params=params) else: resources = collection.get_collection() diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/system_helper.py b/f5_openstack_agent/lbaasv2/drivers/bigip/system_helper.py index fa137f44d..49f6dc53c 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/system_helper.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/system_helper.py @@ -48,9 +48,11 @@ def folder_exists(self, bigip, folder): def get_folders(self, bigip): f_collection = [] - folders = bigip.tm.sys.folders.get_collection() + # example: bigip.tm.sys.folders.get_collection(requests_params={'params': '$select=name'}) + folders = bigip.tm.sys.folders.get_collection(requests_params={'params': '$select=name'}) for folder in folders: - f_collection.append(folder.name) + if 'name' in folder: + f_collection.append(folder['name']) return f_collection diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/tenants.py b/f5_openstack_agent/lbaasv2/drivers/bigip/tenants.py index b662eb983..0a43f27e6 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/tenants.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/tenants.py @@ -73,22 +73,54 @@ def assure_tenant_created(self, service): (tenant_id)) # create tenant route domain + + # ccloud: change of rd creartion to avoid different id's on the bigip pair members if self.conf.use_namespaces: + # Determine which bigip needs a rd and if the rd is already created somewhere else so that id should be used + route_domain_id = None + bigiprds = [] for bigip in self.driver.get_all_bigips(): - if not self.network_helper.route_domain_exists(bigip, - const.DEFAULT_PARTITION,network_id): - try: - self.network_helper.create_route_domain( - bigip, - "Common",network_id, - self.conf.f5_route_domain_strictness) - except Exception as err: - LOG.exception(err.message) - raise f5ex.RouteDomainCreationException( - "Failed to create route domain for " - "tenant in %s" % (const.DEFAULT_PARTITION)) - + bigip_route_domain = self.network_helper.route_domain_exists(bigip, const.DEFAULT_PARTITION,network_id) + bigip_route_domain_id = bigip_route_domain.id if bigip_route_domain else None + # rd already created but not different between bigips (maybe not created on all of them) + if bigip_route_domain_id and route_domain_id is None: + route_domain_id = bigip_route_domain_id + # rd already created on different bigips with DIFFERENT id --> ERROR + elif bigip_route_domain_id and route_domain_id and bigip_route_domain_id != bigip_route_domain_id: + LOG.error("Route Domain Failure: RD for network %s is defined with ID %s on one and with %s on another Bigip" + % (network_id, bigip_route_domain_id, route_domain_id)) + raise f5ex.RouteDomainCreationException("Route Domain Failure: RD for network %s is defined with ID %s on one and with %s on another Bigip" + % (network_id, bigip_route_domain_id, route_domain_id)) + # rd not created anywhere until now + elif bigip_route_domain_id is None: + bigiprds.append(bigip) + # now we have the bigip's with the missing rd and an rd id in case it's created on one of the bigip's + # create rd in bigips where it's missing either with the given id or a new one to be determined + for bigip in bigiprds: + try: + bigip_route_domain = self.network_helper.create_route_domain( + bigip, + partition=const.DEFAULT_PARTITION, + name=network_id, + strictness=self.conf.f5_route_domain_strictness, + is_aux=False, + rd_id=route_domain_id) + # use newly created id for next bigip + if route_domain_id is None: + route_domain_id = bigip_route_domain.id + # something went wrong + elif bigip_route_domain.id != route_domain_id: + LOG.error("Route Domain Failure: Attempt to create RD for network %s with ID %s on one and with %s on another Bigip" + % (network_id, bigip_route_domain_id, route_domain_id)) + raise f5ex.RouteDomainCreationException("Route Domain Failure: RD for network %s is defined with ID %s on one and with %s on another Bigip" + % (network_id, bigip_route_domain_id, route_domain_id)) + # error within rd creation procedure + except Exception as err: + LOG.exception(err.message) + raise f5ex.RouteDomainCreationException("Failed to create route domain for network %s in tenant in %s" % (network_id, const.DEFAULT_PARTITION)) + LOG.debug("Allocated route domain for network %s for tenant %s" + % (network_id, tenant_id)) def assure_tenant_cleanup(self, service, all_subnet_hints): """Delete tenant partition.""" @@ -116,14 +148,13 @@ def _remove_tenant_replication_mode(self, bigip, tenant_id, network_id): "Common", network_id) except Exception as err: - LOG.error("Failed to delete route domain %s. " + LOG.info("Failed to delete route domain %s. " "%s. Manual intervention might be required." % (network_id, err.message)) try: self.system_helper.delete_folder(bigip, partition) except Exception as err: - LOG.error( + LOG.info( "Folder deletion exception for tenant partition %s occurred. " - "Manual cleanup might be required." % (tenant_id)) - LOG.exception("%s" % err.message) + "Manual cleanup might be required." % (tenant_id)) \ No newline at end of file From 1f4790a3f469ca595c6b46e482d29914ce078035 Mon Sep 17 00:00:00 2001 From: Torsten Lesmann Date: Tue, 3 Jul 2018 11:29:37 +0200 Subject: [PATCH 063/109] Fix: Orphan pool handling. Delete pool only if available --- .../lbaasv2/drivers/bigip/icontrol_driver.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py b/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py index a0b53af41..50f33c913 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py @@ -1247,10 +1247,10 @@ def purge_orphaned_loadbalancer(self, tenant_id=None, if self._is_orphan(bigip.device_name, vs_name): vs.delete() self._remove_from_orphan_cache(bigip.device_name, vs_name) - pool_name = pool.name - if self._is_orphan(bigip.device_name, pool_name): - pool.delete() - self._remove_from_orphan_cache(bigip.device_name, pool_name) + pool_name = pool.name + if self._is_orphan(bigip.device_name, pool_name): + pool.delete() + self._remove_from_orphan_cache(bigip.device_name, pool_name) else: vs_name = vs.name if self._is_orphan(bigip.device_name, vs_name): From 9cfaa674d3a3464608483478525aa62721569040 Mon Sep 17 00:00:00 2001 From: Torsten Lesmann Date: Wed, 4 Jul 2018 06:35:05 +0200 Subject: [PATCH 064/109] Disable orphan cleaning as safety measure until data inconsistencies got fixed --- .../lbaasv2/drivers/bigip/agent_manager.py | 3 ++- .../lbaasv2/drivers/bigip/icontrol_driver.py | 16 ++++++++-------- 2 files changed, 10 insertions(+), 9 deletions(-) diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py b/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py index 96927d857..12ca02bb2 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py @@ -460,7 +460,8 @@ def periodic_resync(self, context): if self.sync_state(): self.needs_resync = True # clean any objects orphaned on devices and persist config - if (self.last_clean_orphans + datetime.timedelta(minutes=self.orphans_clean_interval)) < now: + # wtn orphan + if False: #(self.last_clean_orphans + datetime.timedelta(minutes=self.orphans_clean_interval)) < now: LOG.info("ccloud - periodic_resync: Start cleaning orphan objects from F5 device") self.last_clean_orphans = self.last_clean_orphans + datetime.timedelta(minutes=self.orphans_clean_interval) if self.clean_orphaned_and_save_device_config(): diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py b/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py index 50f33c913..313c82f58 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py @@ -799,6 +799,8 @@ def flush_cache(self): @serialized('get_all_deployed_loadbalancers') @is_connected def get_all_deployed_loadbalancers(self, purge_orphaned_folders=False): + # wtn orphan + purge_orphaned_folders = False LOG.debug('getting all deployed loadbalancers on BIG-IPs') deployed_lb_dict = {} for bigip in self.get_all_bigips(): @@ -837,15 +839,13 @@ def get_all_deployed_loadbalancers(self, purge_orphaned_folders=False): # Orphaned folder! if purge_orphaned_folders: try: - self.system_helper.purge_folder_contents( - bigip, folder) - self.system_helper.purge_folder( - bigip, folder) - LOG.error('orphaned folder %s on %s' % - (folder, bigip.hostname)) + if self._is_orphan(bigip.device_name, folder): + self.system_helper.purge_folder_contents(bigip, folder) + self.system_helper.purge_folder(bigip, folder) + self._remove_from_orphan_cache(bigip.device_name, folder) + LOG.warning('ccloud: orphan folder purged %s on %s' % (folder, bigip.hostname)) except Exception as exc: - LOG.error('Error purging folder %s: %s' % - (folder, str(exc))) + LOG.error('Error purging folder %s: %s' % (folder, str(exc))) return deployed_lb_dict @serialized('get_all_deployed_listeners') From a7f83542f9c491504b4d04474f0ffe447c9ff516 Mon Sep 17 00:00:00 2001 From: Torsten Lesmann Date: Fri, 6 Jul 2018 11:28:49 +0200 Subject: [PATCH 065/109] F5 agent: Enable snap pool cleaning again --- f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py b/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py index 12ca02bb2..e12db9523 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py @@ -460,8 +460,7 @@ def periodic_resync(self, context): if self.sync_state(): self.needs_resync = True # clean any objects orphaned on devices and persist config - # wtn orphan - if False: #(self.last_clean_orphans + datetime.timedelta(minutes=self.orphans_clean_interval)) < now: + if (self.last_clean_orphans + datetime.timedelta(minutes=self.orphans_clean_interval)) < now: LOG.info("ccloud - periodic_resync: Start cleaning orphan objects from F5 device") self.last_clean_orphans = self.last_clean_orphans + datetime.timedelta(minutes=self.orphans_clean_interval) if self.clean_orphaned_and_save_device_config(): @@ -481,8 +480,10 @@ def periodic_resync(self, context): def clean_orphaned_and_save_device_config(self): # clean orphan snats, resync not needed because they are unknown to neutron self.clean_orphaned_snat_objects() + # wtn orphan + return True # clean all other orphans and trigger resync if needed - return self.clean_orphaned_objects_and_save_device_config() + # return self.clean_orphaned_objects_and_save_device_config() # ccloud: clean orphaned snat pools @log_helpers.log_method_call From acb2315ad11cea32aa43369e71cf464f418c64de Mon Sep 17 00:00:00 2001 From: Torsten Lesmann Date: Wed, 11 Jul 2018 10:12:22 +0200 Subject: [PATCH 066/109] Fix wrong naming for capacity_score attribute --- f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py b/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py index e12db9523..2ca1a5821 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py @@ -406,7 +406,7 @@ def _report_state(self): ) ) self.agent_state['configurations'][ - 'environment_capaciy_score'] = env_score + 'environment_capacity_score'] = env_score else: self.agent_state['configurations'][ 'environment_capacity_score'] = 0 From cbb5ae4bcdd9863755f4d765ced7a8096cd3fd7c Mon Sep 17 00:00:00 2001 From: Torsten Lesmann Date: Wed, 11 Jul 2018 13:58:47 +0200 Subject: [PATCH 067/109] Scheduling: Add virtuals and virtual_address as metrics for capacity calculation --- f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py b/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py index 313c82f58..1d2122ec6 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py @@ -2135,6 +2135,12 @@ def get_ssltps(self, bigip=None, global_statistics=None): def get_node_count(self, bigip=None, global_statistics=None): return len(bigip.tm.ltm.nodes.get_collection()) + def get_virtual_address_count(self, bigip=None, global_statistics=None): + return len(bigip.tm.ltm.virtual_address_s.get_collection()) + + def get_virtual_server_count(self, bigip=None, global_statistics=None): + return len(bigip.tm.ltm.virtuals.get_collection()) + def get_clientssl_profile_count(self, bigip=None, global_statistics=None): return ssl_profile.SSLProfileHelper.get_client_ssl_profile_count(bigip) From af5030aacc32b266d21271bead531201c4cb910e Mon Sep 17 00:00:00 2001 From: Torsten Lesmann Date: Wed, 11 Jul 2018 15:45:54 +0200 Subject: [PATCH 068/109] enable orphan cleanup by configuration switch ccloud_orphans_cleanup_interval . --- .../lbaasv2/drivers/bigip/agent_manager.py | 46 +++++++++++-------- 1 file changed, 28 insertions(+), 18 deletions(-) diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py b/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py index 2ca1a5821..aa9868c20 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py @@ -116,6 +116,12 @@ help=( 'Amount of time to wait for a pending service to become active') ), + cfg.IntOpt( + 'ccloud_orphans_cleanup_interval', + default=0, + help=( + 'Rescheduling interval for orphan cleanup in hours') + ), ] @@ -227,8 +233,16 @@ def __init__(self, conf): start = int(self.conf.environment_group_number) else: start = randint(1, 3) - # run orphan cleanup every 3 hours - self.orphans_clean_interval = 60 + + # get orphan cleanup interval and set to a value between 0 and 24 if nonsense given + orphan_interval = int(self.conf.ccloud_orphans_cleanup_interval) + if orphan_interval < 0: + orphan_interval = 0 + elif orphan_interval > 24: + orphan_interval = 24 + + # define interval in minutes + self.orphans_clean_interval = 60 * orphan_interval # schedule first run with 1 hour difference on every agent. Start first run after 5 minutes, 1h and 5 mins, ... t = [60, 40, 20] if start < 1: @@ -451,6 +465,7 @@ def periodic_resync(self, context): % str(self.needs_resync)) else: LOG.info("ccloud - periodic_resync: Skipped because resync interval not expired. Waiting another {0} seconds".format((self.service_resync_interval - (now - self.last_resync ).seconds))) + # resync if we need to if self.needs_resync: LOG.info('periodic_resync: Forcing resync of services.') @@ -459,32 +474,28 @@ def periodic_resync(self, context): self.needs_resync = True if self.sync_state(): self.needs_resync = True - # clean any objects orphaned on devices and persist config + self.clean_orphaned_snat_objects() + else: + LOG.info("ccloud - periodic_resync: Resync not needed! Discarding ...") + + if self.orphans_clean_interval > 0: if (self.last_clean_orphans + datetime.timedelta(minutes=self.orphans_clean_interval)) < now: - LOG.info("ccloud - periodic_resync: Start cleaning orphan objects from F5 device") + LOG.info("ccloud - orphans: Start cleaning orphan objects from F5 device") self.last_clean_orphans = self.last_clean_orphans + datetime.timedelta(minutes=self.orphans_clean_interval) - if self.clean_orphaned_and_save_device_config(): - self.needs_resync = True - LOG.info("ccloud - periodic_resync: Finished cleaning orphan objects from F5 device. Remaining objects --> {0}".format(self.lbdriver.get_orphans_cache())) + self.needs_resync = self.clean_orphaned_objects_and_save_device_config() + LOG.info("ccloud - orphans: Finished cleaning orphan objects from F5 device. Remaining objects --> {0}".format(self.lbdriver.get_orphans_cache())) else: LOG.info("ccloud - periodic_resync: Skipping cleaning orphan objects because cleanup interval not expired. Waiting another {0} seconds" .format((self.last_clean_orphans + datetime.timedelta(minutes=self.orphans_clean_interval) - now).seconds)) - LOG.info("ccloud - periodic_resync: Resync took {0} seconds".format((datetime.datetime.now() - now).seconds)) else: - LOG.info("ccloud - periodic_resync: Resync not needed! Discarding ...") + LOG.info("ccloud - orphans: No orphan cleaning enabled. Only SNAT pool orphan handling might be done") + + LOG.info("ccloud - periodic_resync: Resync took {0} seconds".format((datetime.datetime.now() - now).seconds)) except Exception as e: LOG.exception(("ccloud - Exception in periodic resync happend: " + str(e.message))) pass - def clean_orphaned_and_save_device_config(self): - # clean orphan snats, resync not needed because they are unknown to neutron - self.clean_orphaned_snat_objects() - # wtn orphan - return True - # clean all other orphans and trigger resync if needed - # return self.clean_orphaned_objects_and_save_device_config() - # ccloud: clean orphaned snat pools @log_helpers.log_method_call def clean_orphaned_snat_objects(self): @@ -500,7 +511,6 @@ def clean_orphaned_snat_objects(self): LOG.debug("sapcc: purging orphaned snat pool %s" % orphaned_snat.name) orphaned_snat.delete() - # ccloud: try purging all snat pools def find_in_collection(self, name, collection): for item in collection: if item is not None and item.name == name: From 0d8cb5d516f7938177705a07ccee5f3c35d4a8b4 Mon Sep 17 00:00:00 2001 From: Torsten Lesmann Date: Thu, 12 Jul 2018 13:52:27 +0200 Subject: [PATCH 069/109] Orphans: Configure Interval and TESTRUN mode for orphan cleanup --- .../lbaasv2/drivers/bigip/agent_manager.py | 23 ++++++++------- .../lbaasv2/drivers/bigip/icontrol_driver.py | 29 +++++++++++++++---- 2 files changed, 35 insertions(+), 17 deletions(-) diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py b/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py index aa9868c20..426ab13d9 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py @@ -235,19 +235,20 @@ def __init__(self, conf): start = randint(1, 3) # get orphan cleanup interval and set to a value between 0 and 24 if nonsense given - orphan_interval = int(self.conf.ccloud_orphans_cleanup_interval) - if orphan_interval < 0: - orphan_interval = 0 - elif orphan_interval > 24: - orphan_interval = 24 + orphans_interval = int(self.conf.ccloud_orphans_cleanup_interval) + if orphans_interval < 0: + orphans_interval = 0 + elif orphans_interval > 24: + orphans_interval = 24 # define interval in minutes - self.orphans_clean_interval = 60 * orphan_interval + self.orphans_cleanup_interval = 60 * orphans_interval # schedule first run with 1 hour difference on every agent. Start first run after 5 minutes, 1h and 5 mins, ... t = [60, 40, 20] if start < 1: start = 1 self.last_clean_orphans = datetime.datetime.now() - datetime.timedelta(minutes=t[start-1] + 5) + LOG.info('ccloud: Orphan cleanup interval = %s', self.orphans_cleanup_interval) self.needs_resync = False self.plugin_rpc = None @@ -478,17 +479,17 @@ def periodic_resync(self, context): else: LOG.info("ccloud - periodic_resync: Resync not needed! Discarding ...") - if self.orphans_clean_interval > 0: - if (self.last_clean_orphans + datetime.timedelta(minutes=self.orphans_clean_interval)) < now: + if self.orphans_cleanup_interval > 0: + if (self.last_clean_orphans + datetime.timedelta(minutes=self.orphans_cleanup_interval)) < now: LOG.info("ccloud - orphans: Start cleaning orphan objects from F5 device") - self.last_clean_orphans = self.last_clean_orphans + datetime.timedelta(minutes=self.orphans_clean_interval) + self.last_clean_orphans = self.last_clean_orphans + datetime.timedelta(minutes=self.orphans_cleanup_interval) self.needs_resync = self.clean_orphaned_objects_and_save_device_config() LOG.info("ccloud - orphans: Finished cleaning orphan objects from F5 device. Remaining objects --> {0}".format(self.lbdriver.get_orphans_cache())) else: LOG.info("ccloud - periodic_resync: Skipping cleaning orphan objects because cleanup interval not expired. Waiting another {0} seconds" - .format((self.last_clean_orphans + datetime.timedelta(minutes=self.orphans_clean_interval) - now).seconds)) + .format((self.last_clean_orphans + datetime.timedelta(minutes=self.orphans_cleanup_interval) - now).seconds)) else: - LOG.info("ccloud - orphans: No orphan cleaning enabled. Only SNAT pool orphan handling might be done") + LOG.info("ccloud - orphans: No orphan cleaning enabled. Only SNAT pool orphan handling will be done") LOG.info("ccloud - periodic_resync: Resync took {0} seconds".format((datetime.datetime.now() - now).seconds)) diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py b/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py index 1d2122ec6..67af68077 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py @@ -317,6 +317,11 @@ 'trace_service_requests', default=False, help='Log service object.' + ), + cfg.BoolOpt( + 'ccloud_orphan_cleanup_testrun', + default=True, + help='Simulate orphan cleaning without real deletion if set to True' ) ] @@ -382,6 +387,8 @@ def __init__(self, conf, registerOpts=True): self.orphan_cache = {} self.orphan_cache_last_reset = datetime.datetime.now() + self.orphan_cleanup_testrun = self.conf.ccloud_orphan_cleanup_testrun + LOG.info('ccloud: Orphan cleanup testrun = %s', self.orphan_cleanup_testrun) if self.conf.trace_service_requests: path = '/var/log/neutron/service/' @@ -799,8 +806,6 @@ def flush_cache(self): @serialized('get_all_deployed_loadbalancers') @is_connected def get_all_deployed_loadbalancers(self, purge_orphaned_folders=False): - # wtn orphan - purge_orphaned_folders = False LOG.debug('getting all deployed loadbalancers on BIG-IPs') deployed_lb_dict = {} for bigip in self.get_all_bigips(): @@ -895,16 +900,28 @@ def _is_orphan(self, device_name, id): return False else: key = device_name + '-' + id + if key in self.orphan_cache: if self.orphan_cache[key] >= 2: - LOG.info('ccloud: Orphan object %s marked for deletion %d times. Object will be deleted NOW' % (key, self.orphan_cache[key])) - return True + if self.orphan_cleanup_testrun: + LOG.info('ccloud: Orphan TESTRUN: object %s marked for deletion %d times. Object would have be deleted NOW' % (key, self.orphan_cache[key])) + del self.orphan_cache[key] + return False + else: + LOG.info('ccloud: Orphan object %s marked for deletion %d times. Object will be deleted NOW' % (key, self.orphan_cache[key])) + return True else: self.orphan_cache[key] += 1 - LOG.info('ccloud: Orphan object %s marked for deletion %d times' % (key, self.orphan_cache[key])) + if self.orphan_cleanup_testrun: + LOG.info('ccloud: Orphan TESTRUN %s marked for deletion %d times' % (key, self.orphan_cache[key])) + else: + LOG.info('ccloud: Orphan object %s marked for deletion %d times' % (key, self.orphan_cache[key])) else: self.orphan_cache[key] = 1 - LOG.info('ccloud: Orphan object %s marked for deletion %d times' % (key, self.orphan_cache[key])) + if self.orphan_cleanup_testrun: + LOG.info('ccloud: Orphan TESTRUN object %s marked for deletion %d times' % (key, self.orphan_cache[key])) + else: + LOG.info('ccloud: Orphan object %s marked for deletion %d times' % (key, self.orphan_cache[key])) return False def _remove_from_orphan_cache(self, device_name, id): From fce10bd6ff98015e9e98a23b73c7232e9352bafb Mon Sep 17 00:00:00 2001 From: Torsten Lesmann Date: Fri, 13 Jul 2018 08:56:25 +0200 Subject: [PATCH 070/109] LBaaS: Fix params for orphan cleanup --- .../lbaasv2/drivers/bigip/agent_manager.py | 21 ++++++++++++------- .../lbaasv2/drivers/bigip/icontrol_driver.py | 8 +------ 2 files changed, 15 insertions(+), 14 deletions(-) diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py b/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py index 426ab13d9..f3af2e0da 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py @@ -116,12 +116,17 @@ help=( 'Amount of time to wait for a pending service to become active') ), - cfg.IntOpt( + cfg.FloatOpt( 'ccloud_orphans_cleanup_interval', - default=0, + default=0.0, help=( 'Rescheduling interval for orphan cleanup in hours') ), + cfg.BoolOpt( + 'ccloud_orphans_cleanup_testrun', + default=True, + help='Simulate orphan cleaning without real deletion if set to True' + ) ] @@ -235,11 +240,11 @@ def __init__(self, conf): start = randint(1, 3) # get orphan cleanup interval and set to a value between 0 and 24 if nonsense given - orphans_interval = int(self.conf.ccloud_orphans_cleanup_interval) - if orphans_interval < 0: - orphans_interval = 0 - elif orphans_interval > 24: - orphans_interval = 24 + orphans_interval = float(self.conf.ccloud_orphans_cleanup_interval) + if orphans_interval < 0.0: + orphans_interval = 0.0 + elif orphans_interval > 24.0: + orphans_interval = 24.0 # define interval in minutes self.orphans_cleanup_interval = 60 * orphans_interval @@ -248,7 +253,9 @@ def __init__(self, conf): if start < 1: start = 1 self.last_clean_orphans = datetime.datetime.now() - datetime.timedelta(minutes=t[start-1] + 5) + LOG.info('ccloud: Orphan cleanup testrun = %s', self.conf.ccloud_orphans_cleanup_testrun) LOG.info('ccloud: Orphan cleanup interval = %s', self.orphans_cleanup_interval) + LOG.info('ccloud: Orphan cleanup first run will start at %s UTC', self.last_clean_orphans + datetime.timedelta(minutes=self.orphans_cleanup_interval)) self.needs_resync = False self.plugin_rpc = None diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py b/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py index 67af68077..b386f0373 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py @@ -317,11 +317,6 @@ 'trace_service_requests', default=False, help='Log service object.' - ), - cfg.BoolOpt( - 'ccloud_orphan_cleanup_testrun', - default=True, - help='Simulate orphan cleaning without real deletion if set to True' ) ] @@ -387,8 +382,7 @@ def __init__(self, conf, registerOpts=True): self.orphan_cache = {} self.orphan_cache_last_reset = datetime.datetime.now() - self.orphan_cleanup_testrun = self.conf.ccloud_orphan_cleanup_testrun - LOG.info('ccloud: Orphan cleanup testrun = %s', self.orphan_cleanup_testrun) + self.orphan_cleanup_testrun = self.conf.ccloud_orphans_cleanup_testrun if self.conf.trace_service_requests: path = '/var/log/neutron/service/' From 98164c23fd04f8f27c474a0ff17828749b28fef5 Mon Sep 17 00:00:00 2001 From: Torsten Lesmann Date: Fri, 13 Jul 2018 11:30:54 +0200 Subject: [PATCH 071/109] LBaaS: Added params for orphan cleanup --- f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py b/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py index f3af2e0da..08432b86d 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py @@ -249,10 +249,11 @@ def __init__(self, conf): # define interval in minutes self.orphans_cleanup_interval = 60 * orphans_interval # schedule first run with 1 hour difference on every agent. Start first run after 5 minutes, 1h and 5 mins, ... - t = [60, 40, 20] + x = self.orphans_cleanup_interval / 3 + t = [x*3, x*2, x] if start < 1: start = 1 - self.last_clean_orphans = datetime.datetime.now() - datetime.timedelta(minutes=t[start-1] + 5) + self.last_clean_orphans = datetime.datetime.now() - datetime.timedelta(minutes=t[start-1] - 5) LOG.info('ccloud: Orphan cleanup testrun = %s', self.conf.ccloud_orphans_cleanup_testrun) LOG.info('ccloud: Orphan cleanup interval = %s', self.orphans_cleanup_interval) LOG.info('ccloud: Orphan cleanup first run will start at %s UTC', self.last_clean_orphans + datetime.timedelta(minutes=self.orphans_cleanup_interval)) From 3077edf69ef34daf49d7ecf2e0fff4053202ee8c Mon Sep 17 00:00:00 2001 From: Torsten Lesmann Date: Tue, 17 Jul 2018 14:44:57 +0200 Subject: [PATCH 072/109] Try to fix orphan node cleaner (cyclic dependency) --- .../lbaasv2/drivers/bigip/agent_manager.py | 2 +- .../lbaasv2/drivers/bigip/icontrol_driver.py | 17 ++++++++++++----- 2 files changed, 13 insertions(+), 6 deletions(-) diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py b/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py index 08432b86d..87301a8b7 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py @@ -492,7 +492,7 @@ def periodic_resync(self, context): LOG.info("ccloud - orphans: Start cleaning orphan objects from F5 device") self.last_clean_orphans = self.last_clean_orphans + datetime.timedelta(minutes=self.orphans_cleanup_interval) self.needs_resync = self.clean_orphaned_objects_and_save_device_config() - LOG.info("ccloud - orphans: Finished cleaning orphan objects from F5 device. Remaining objects --> {0}".format(self.lbdriver.get_orphans_cache())) + LOG.info("ccloud - orphans: Finished cleaning orphan objects from F5 device. {0} objects remaining --> {1}".format(len(self.lbdriver.get_orphans_cache()), self.lbdriver.get_orphans_cache())) else: LOG.info("ccloud - periodic_resync: Skipping cleaning orphan objects because cleanup interval not expired. Waiting another {0} seconds" .format((self.last_clean_orphans + datetime.timedelta(minutes=self.orphans_cleanup_interval) - now).seconds)) diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py b/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py index b386f0373..162d52369 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py @@ -936,11 +936,11 @@ def get_orphans_cache(self): @is_connected @log_helpers.log_method_call def purge_orphaned_nodes(self, tenant_members): - node_helper = resource_helper.BigIPResourceHelper( - resource_helper.ResourceType.node) - pool_helper = resource_helper.BigIPResourceHelper(resource_helper.ResourceType.pool) for bigip in self.get_all_bigips(): for tenant_id, members in tenant_members.iteritems(): + node_helper = resource_helper.BigIPResourceHelper( + resource_helper.ResourceType.node) + pool_helper = resource_helper.BigIPResourceHelper(resource_helper.ResourceType.pool) partition = self.service_adapter.prefix + tenant_id nodes = node_helper.get_resources(bigip, partition=partition) node_dict = {n.name: n for n in nodes} @@ -970,12 +970,19 @@ def purge_orphaned_nodes(self, tenant_members): for omember in orphaned_members: if omember.address == node_name: omember.delete() + node_helper = resource_helper.BigIPResourceHelper( + resource_helper.ResourceType.node) node_helper.delete(bigip, name=urllib.quote(node_name), partition=partition) self._remove_from_orphan_cache(bigip.device_name, node_name) except HTTPError as error: if error.response.status_code == 400: LOG.error(error.response) + except Exception as err: + LOG.debug('ccloud: orphaned nodes --> {}'.format(orphaned_nodes)) + LOG.debug('ccloud: orphaned members --> {}'.format(orphaned_members)) + LOG.exception(err) + raise err # for member in members: # @@ -1038,11 +1045,11 @@ def get_all_deployed_pools(self): @log_helpers.log_method_call def purge_orphaned_pool(self, tenant_id=None, pool_id=None, hostnames=list()): - node_helper = resource_helper.BigIPResourceHelper( - resource_helper.ResourceType.node) for bigip in self.get_all_bigips(): if bigip.hostname in hostnames: try: + node_helper = resource_helper.BigIPResourceHelper( + resource_helper.ResourceType.node) pool_name = self.service_adapter.prefix + pool_id partition = self.service_adapter.prefix + tenant_id pool = resource_helper.BigIPResourceHelper( From 61d046ceb3a0023a9875bcf582ca573d7c14cabe Mon Sep 17 00:00:00 2001 From: Torsten Lesmann Date: Wed, 18 Jul 2018 10:08:18 +0200 Subject: [PATCH 073/109] LBaaS: Fix cyclic reference bug inside orphan member deletion --- .../lbaasv2/drivers/bigip/icontrol_driver.py | 21 ++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py b/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py index 162d52369..32fe7c7d0 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py @@ -936,11 +936,13 @@ def get_orphans_cache(self): @is_connected @log_helpers.log_method_call def purge_orphaned_nodes(self, tenant_members): + + node_helper = resource_helper.BigIPResourceHelper( + resource_helper.ResourceType.node) + pool_helper = resource_helper.BigIPResourceHelper(resource_helper.ResourceType.pool) + for bigip in self.get_all_bigips(): for tenant_id, members in tenant_members.iteritems(): - node_helper = resource_helper.BigIPResourceHelper( - resource_helper.ResourceType.node) - pool_helper = resource_helper.BigIPResourceHelper(resource_helper.ResourceType.pool) partition = self.service_adapter.prefix + tenant_id nodes = node_helper.get_resources(bigip, partition=partition) node_dict = {n.name: n for n in nodes} @@ -966,12 +968,17 @@ def purge_orphaned_nodes(self, tenant_members): for node_name in orphaned_nodes: try: - if self._is_orphan(bigip.device_name, node_name): + if self._is_orphan(bigip.device_name, node_name, True): + # determine members to delete + del_members = [] for omember in orphaned_members: if omember.address == node_name: - omember.delete() - node_helper = resource_helper.BigIPResourceHelper( - resource_helper.ResourceType.node) + del_members.append(omember) + # delete members and remove them from orphans list + for del_member in del_members: + orphaned_members.remove(del_member) + del_member.delete() + # delete node node_helper.delete(bigip, name=urllib.quote(node_name), partition=partition) self._remove_from_orphan_cache(bigip.device_name, node_name) From f424758d823a11b03980149d6e11adf1ad920df5 Mon Sep 17 00:00:00 2001 From: Torsten Lesmann Date: Wed, 18 Jul 2018 10:57:48 +0200 Subject: [PATCH 074/109] Fix --- f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py b/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py index 32fe7c7d0..1a3c4e387 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py @@ -968,7 +968,7 @@ def purge_orphaned_nodes(self, tenant_members): for node_name in orphaned_nodes: try: - if self._is_orphan(bigip.device_name, node_name, True): + if self._is_orphan(bigip.device_name, node_name): # determine members to delete del_members = [] for omember in orphaned_members: From a282abdc37af31ec79c2c1727e59a4286068c709 Mon Sep 17 00:00:00 2001 From: Torsten Lesmann Date: Thu, 19 Jul 2018 14:14:15 +0200 Subject: [PATCH 075/109] LBaaS: Get orphan members with wrong port numbers --- .../lbaasv2/drivers/bigip/icontrol_driver.py | 127 +++++++++--------- 1 file changed, 66 insertions(+), 61 deletions(-) diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py b/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py index 1a3c4e387..78202fdc5 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py @@ -67,6 +67,9 @@ from f5_openstack_agent.lbaasv2.drivers.bigip.virtual_address import \ VirtualAddress +import re +import time + LOG = logging.getLogger(__name__) @@ -898,7 +901,7 @@ def _is_orphan(self, device_name, id): if key in self.orphan_cache: if self.orphan_cache[key] >= 2: if self.orphan_cleanup_testrun: - LOG.info('ccloud: Orphan TESTRUN: object %s marked for deletion %d times. Object would have be deleted NOW' % (key, self.orphan_cache[key])) + LOG.info('ccloud: Orphan TESTRUN: object %s marked for deletion %d times. Object would have been deleted NOW' % (key, self.orphan_cache[key])) del self.orphan_cache[key] return False else: @@ -936,75 +939,77 @@ def get_orphans_cache(self): @is_connected @log_helpers.log_method_call def purge_orphaned_nodes(self, tenant_members): - + # This algotithm is not able to determine nodes and members with an rd which isn't right fot the tenant, but + # it detects at least nodes and members without rd in case of a non global routed scenario node_helper = resource_helper.BigIPResourceHelper( resource_helper.ResourceType.node) pool_helper = resource_helper.BigIPResourceHelper(resource_helper.ResourceType.pool) - for bigip in self.get_all_bigips(): for tenant_id, members in tenant_members.iteritems(): partition = self.service_adapter.prefix + tenant_id nodes = node_helper.get_resources(bigip, partition=partition) - node_dict = {n.name: n for n in nodes} - # extract member addresses (they don't have rd info) into array - member_addresses = [] - for m in members: - member_addresses.append(m['address']) - # get all pool members from f5 to get rid of the ones not defined in openstack pools = pool_helper.get_resources(bigip, partition=partition) - orphaned_members = [] + allf5members = [] + orphan_members = [] + orphan_nodes = [] + # All f5 members without rd or which are not part of os members are orphan for pool in pools: - xmembers = pool.members_s.get_collection() - for xmember in xmembers: - if xmember.address.split('%')[0] not in member_addresses: - orphaned_members.append(xmember) - # all nodes are orphaned which aren't referenced as member - orphaned_nodes = [] - for n in nodes: - if (not self.conf.f5_global_routed_mode) and ('%' not in n.address): - orphaned_nodes.append(n.name) - elif not n.address.split('%')[0] in member_addresses: - orphaned_nodes.append(n.name) - - for node_name in orphaned_nodes: - try: - if self._is_orphan(bigip.device_name, node_name): - # determine members to delete - del_members = [] - for omember in orphaned_members: - if omember.address == node_name: - del_members.append(omember) - # delete members and remove them from orphans list - for del_member in del_members: - orphaned_members.remove(del_member) - del_member.delete() - # delete node - node_helper.delete(bigip, name=urllib.quote(node_name), - partition=partition) - self._remove_from_orphan_cache(bigip.device_name, node_name) - except HTTPError as error: - if error.response.status_code == 400: - LOG.error(error.response) - except Exception as err: - LOG.debug('ccloud: orphaned nodes --> {}'.format(orphaned_nodes)) - LOG.debug('ccloud: orphaned members --> {}'.format(orphaned_members)) - LOG.exception(err) - raise err - - # for member in members: - # - # rd = self.network_builder.find_subnet_route_domain( - # tenant_id, member.get('subnet_id', None)) - # node_name = "{}%{}".format(member['address'], rd) - # node_dict.pop(node_name, None) - # - # for node_name, node in node_dict.iteritems(): - # try: - # node_helper.delete(bigip, name=urllib.quote(node_name), - # partition=partition) - # except HTTPError as error: - # if error.response.status_code == 400: - # LOG.error(error.response) + f5members = pool.members_s.get_collection() + # create cross pool membership list for verifying if node is used somewhere as member + allf5members.extend(f5members) + for f5member in f5members: + orphan_members.append(f5member) + for member in members: + # check with or without rd + if (not self.conf.f5_global_routed_mode): + if re.match(r"({0})(%\d+)(:{1})".format(member['address'], member['protocol_port']), f5member.name): + orphan_members.remove(f5member) + break + else: + if re.match(r"({0})(:{1})".format(member['address'], member['protocol_port']), f5member.name): + orphan_members.remove(f5member) + break + # All f5 nodes without rd or which are not used inside any membership are orphan + for node in nodes: + orphan_nodes.append(node.address) + # Node with no route id is orphan + if (not self.conf.f5_global_routed_mode) and ('%' not in node.address): + continue + for f5member in allf5members: + if node.address == f5member.address: + orphan_nodes.remove(node.address) + break + + # Log the determined orphans + orphan_member_names = [] + for omember in orphan_members: + orphan_member_names.append(omember.name) + LOG.debug('ccloud: Deleting orphan nodes --> {0}'.format(orphan_nodes)) + LOG.debug('ccloud: Deleting orphan members --> {0}'.format(orphan_member_names)) + + # Delete orphan members + for member in orphan_members: + member_name = member.name + if self._is_orphan(bigip.device_name, member_name): + try: + member.delete() + self._remove_from_orphan_cache(bigip.device_name, member_name) + time.sleep(1) + except HTTPError as error: + LOG.warning("ccloud: Failed to delete orphan member %s: %s", (member_name, error.response)) + except Exception as err: + LOG.error("ccloud: Error - Failed to delete orphan member %s: %s", (member_name, err.response)) + # Delete orphan nodes + for node in orphan_nodes: + if self._is_orphan(bigip.device_name, node): + try: + node_helper.delete(bigip, name=urllib.quote(node), partition=partition) + self._remove_from_orphan_cache(bigip.device_name, node) + time.sleep(1) + except HTTPError as error: + LOG.warning("ccloud: Failed to delete orphan node %s: %s", (node, error.response)) + except Exception as err: + LOG.error("ccloud: Error - Failed to delete orphan member %s: %s", (node, err.response)) @serialized('get_all_deployed_pools') @is_connected From f576bc66d11ea285b37314edecc18ed0c82b6c02 Mon Sep 17 00:00:00 2001 From: Torsten Lesmann Date: Fri, 20 Jul 2018 09:35:29 +0200 Subject: [PATCH 076/109] Fix 'Module not callable' because of module time double import --- .../lbaasv2/drivers/bigip/icontrol_driver.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py b/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py index 78202fdc5..994bb0888 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py @@ -68,8 +68,6 @@ VirtualAddress import re -import time - LOG = logging.getLogger(__name__) @@ -984,8 +982,10 @@ def purge_orphaned_nodes(self, tenant_members): orphan_member_names = [] for omember in orphan_members: orphan_member_names.append(omember.name) - LOG.debug('ccloud: Deleting orphan nodes --> {0}'.format(orphan_nodes)) - LOG.debug('ccloud: Deleting orphan members --> {0}'.format(orphan_member_names)) + if len(orphan_nodes) > 0: + LOG.debug('ccloud: Deleting orphan nodes --> {0}'.format(orphan_nodes)) + if len(orphan_member_names) > 0: + LOG.debug('ccloud: Deleting orphan members --> {0}'.format(orphan_member_names)) # Delete orphan members for member in orphan_members: @@ -994,7 +994,6 @@ def purge_orphaned_nodes(self, tenant_members): try: member.delete() self._remove_from_orphan_cache(bigip.device_name, member_name) - time.sleep(1) except HTTPError as error: LOG.warning("ccloud: Failed to delete orphan member %s: %s", (member_name, error.response)) except Exception as err: @@ -1005,11 +1004,11 @@ def purge_orphaned_nodes(self, tenant_members): try: node_helper.delete(bigip, name=urllib.quote(node), partition=partition) self._remove_from_orphan_cache(bigip.device_name, node) - time.sleep(1) except HTTPError as error: LOG.warning("ccloud: Failed to delete orphan node %s: %s", (node, error.response)) except Exception as err: LOG.error("ccloud: Error - Failed to delete orphan member %s: %s", (node, err.response)) + return True @serialized('get_all_deployed_pools') @is_connected From 78c393195bae6e69564dbf1e1db5683652b80485 Mon Sep 17 00:00:00 2001 From: Torsten Lesmann Date: Tue, 24 Jul 2018 11:17:00 +0200 Subject: [PATCH 077/109] LBaaS: Don't abort orphan cleanup in case of a snat pool deletion error. Schedule lb's to overloaded agent in case partition is already scheduled. Clean orphan cache every 24 h --- f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py | 7 +++++-- .../lbaasv2/drivers/bigip/icontrol_driver.py | 5 +++-- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py b/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py index 87301a8b7..c5742c958 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py @@ -502,7 +502,7 @@ def periodic_resync(self, context): LOG.info("ccloud - periodic_resync: Resync took {0} seconds".format((datetime.datetime.now() - now).seconds)) except Exception as e: - LOG.exception(("ccloud - Exception in periodic resync happend: " + str(e.message))) + LOG.exception("ccloud - Exception in periodic resync happend: " + str(e.message)) pass # ccloud: clean orphaned snat pools @@ -518,7 +518,10 @@ def clean_orphaned_snat_objects(self): for orphaned_snat in snat_pools: LOG.debug("sapcc: purging orphaned snat pool %s" % orphaned_snat.name) - orphaned_snat.delete() + try: + orphaned_snat.delete() + except Exception as e: + LOG.warning("sapcc: attempt made to purge orphaned snat pool which might be in use: " + str(e.message)) def find_in_collection(self, name, collection): for item in collection: diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py b/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py index 994bb0888..cc2048476 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py @@ -885,11 +885,12 @@ def get_all_deployed_listeners(self, expand_subcollections=False): return deployed_virtual_dict def _is_orphan(self, device_name, id): - # clear cache every 48 hours - if (self.orphan_cache_last_reset + datetime.timedelta(hours=48)) < datetime.datetime.now(): + # clear cache every x hours + if (self.orphan_cache_last_reset + datetime.timedelta(hours=24)) < datetime.datetime.now(): LOG.info('ccloud: Orphan objects cache cleared to avoid orphan orphans :-)') self.orphan_cache_last_reset = datetime.datetime.now() self.orphan_cache.clear() + # check if orphan can be deleted or rise counter by 1 if not id or not device_name: return False From be5ef3e5386206162f0a4950480ea6ae9ff6f622 Mon Sep 17 00:00:00 2001 From: Torsten Lesmann Date: Fri, 27 Jul 2018 11:41:28 +0200 Subject: [PATCH 078/109] LBaaS: Disable orphan cleanup on f5 device if missing loadbalancer agent bindings exist in neutron db. Otherwise we might delete active objects in data path. --- .../lbaasv2/drivers/bigip/agent_manager.py | 33 +++++++++++++++++-- .../lbaasv2/drivers/bigip/plugin_rpc.py | 23 +++++++++++++ 2 files changed, 54 insertions(+), 2 deletions(-) diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py b/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py index c5742c958..2cea2371d 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py @@ -491,7 +491,8 @@ def periodic_resync(self, context): if (self.last_clean_orphans + datetime.timedelta(minutes=self.orphans_cleanup_interval)) < now: LOG.info("ccloud - orphans: Start cleaning orphan objects from F5 device") self.last_clean_orphans = self.last_clean_orphans + datetime.timedelta(minutes=self.orphans_cleanup_interval) - self.needs_resync = self.clean_orphaned_objects_and_save_device_config() + if self.clean_orphaned_objects_and_save_device_config(): + self.needs_resync = True LOG.info("ccloud - orphans: Finished cleaning orphan objects from F5 device. {0} objects remaining --> {1}".format(len(self.lbdriver.get_orphans_cache()), self.lbdriver.get_orphans_cache())) else: LOG.info("ccloud - periodic_resync: Skipping cleaning orphan objects because cleanup interval not expired. Waiting another {0} seconds" @@ -768,6 +769,34 @@ def clean_orphaned_objects_and_save_device_config(self): cleaned = False try: + + unbound_loadbalancers = self.plugin_rpc.get_loadbalancers_without_agent_binding() + + if len(unbound_loadbalancers) > 0: + # create list with tenant and id for error logging + ubl = [[lb['tenant_id'], lb['id']] for lb in unbound_loadbalancers] + # verify if unbound lb's reside on this agent to give an idea for correction + # the host or unknown will be added to the unbounds + all_lbs = self.lbdriver.get_all_deployed_loadbalancers(purge_orphaned_folders=False) + for lbu in ubl: + if lbu[1] in all_lbs: + lbu.append("{0}".format(self.agent_host)) + + # Abort cleanup in case of non testrun, otherwise report errors and continue with testrun + if not self.conf.ccloud_orphans_cleanup_testrun: + LOG.error("ccloud: {2} Loadbalancers without an agent binding found. Orphan cleanup process aborted!!! Agent name: {1}. " + "Manual intervention needed to clarify state of unbound loadbalancers and where they should belong to. " + "If an agent name is given as 3rd argument the agent has detected that it is hosting the LB but binding in neutron DB is missing. " + "If no agent name is given, this agent doesn't host the LB, but maybe another one." + "The following loadbalancers without binding were found [tenant.id, lb.id, ]: {0}".format(ubl, self.agent_host, len(unbound_loadbalancers))) + return False + else: + LOG.error("ccloud: {2} Loadbalancers without an agent binding found.Orphan cleanup testrun will continue. Agent name: {1}. " + "Manual intervention needed to clarify state of unbound loadbalancers and where they should belong to. " + "If an agent name is given as 3rd argument the agent has detected that it is hosting the LB but binding in neutron DB is missing. " + "If no agent name is given, this agent doesn't host the LB, but maybe another one." + "The following loadbalancers without binding were found [tenant.id, lb.id, ]: {0}".format(ubl, self.agent_host, len(unbound_loadbalancers))) + # # Global cluster refresh tasks # @@ -828,7 +857,7 @@ def clean_orphaned_objects_and_save_device_config(self): # serialize config and save to disk self.lbdriver.backup_configuration() except Exception as e: - LOG.error("Unable to sync state: %s" % e.message) + LOG.error("Unable to clean_orphaned_objects_and_save_device_config: %s" % e.message) cleaned = True return cleaned diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/plugin_rpc.py b/f5_openstack_agent/lbaasv2/drivers/bigip/plugin_rpc.py index ac12335f6..d7a247bd9 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/plugin_rpc.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/plugin_rpc.py @@ -509,6 +509,29 @@ def get_pending_loadbalancers(self, env=None, group=None, host=None): return loadbalancers + @log_helpers.log_method_call + @utils.instrument_execution_time + def get_loadbalancers_without_agent_binding(self, env=None, group=None): + """Retrieve a list of loadbalancers without an agent binding in Neutron.""" + unbound_loadbalancers = [] + + if not env: + env = self.env + + try: + unbound_loadbalancers = self._call( + self.context, + self._make_msg('get_loadbalancers_without_agent_binding', + env=env, + group=group), + topic=self.topic + ) + except messaging.MessageDeliveryFailure: + LOG.error("agent->plugin RPC exception caught: ", + "get_loadbalancers_without_agent_binding") + + return unbound_loadbalancers + @log_helpers.log_method_call def get_loadbalancers_by_network(self, network_id, env=None,group=None,host=None): """Retrieve a list of loadbalancers for a network.""" From 6a4cffc1d3a837717945d4b4c67820d60ab35c2e Mon Sep 17 00:00:00 2001 From: Torsten Lesmann Date: Fri, 27 Jul 2018 14:08:00 +0200 Subject: [PATCH 079/109] LBaaS: Better message output for Missing Bindings --- .../lbaasv2/drivers/bigip/agent_manager.py | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py b/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py index 2cea2371d..15a7efb57 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py @@ -778,20 +778,29 @@ def clean_orphaned_objects_and_save_device_config(self): # verify if unbound lb's reside on this agent to give an idea for correction # the host or unknown will be added to the unbounds all_lbs = self.lbdriver.get_all_deployed_loadbalancers(purge_orphaned_folders=False) + hosted = 0 for lbu in ubl: if lbu[1] in all_lbs: + hosted += 1 lbu.append("{0}".format(self.agent_host)) + if hosted > 0: + LOG.error("ccloud: MissingAgentBinding: {0} Loadbalancers with a missing neutron agent binding are hosted on this agent. " + "The binding to this agent {1} has to be repaired in neutron DB".format(hosted, self.agent_host)) + else: + LOG.warning("ccloud: MissingAgentBinding: NO Loadbalancers with a missing neutron agent binding are hosted on this agent {1}. " + "These LBs might be deleted in neutron DB if no other agent is hosting them.".format(hosted, self.agent_host)) + # Abort cleanup in case of non testrun, otherwise report errors and continue with testrun if not self.conf.ccloud_orphans_cleanup_testrun: - LOG.error("ccloud: {2} Loadbalancers without an agent binding found. Orphan cleanup process aborted!!! Agent name: {1}. " + LOG.error("ccloud: MissingAgentBinding: {2} Loadbalancers without an agent binding found. Orphan cleanup process aborted!!! Agent name: {1}. " "Manual intervention needed to clarify state of unbound loadbalancers and where they should belong to. " "If an agent name is given as 3rd argument the agent has detected that it is hosting the LB but binding in neutron DB is missing. " "If no agent name is given, this agent doesn't host the LB, but maybe another one." "The following loadbalancers without binding were found [tenant.id, lb.id, ]: {0}".format(ubl, self.agent_host, len(unbound_loadbalancers))) return False else: - LOG.error("ccloud: {2} Loadbalancers without an agent binding found.Orphan cleanup testrun will continue. Agent name: {1}. " + LOG.error("ccloud: MissingAgentBinding: {2} Loadbalancers without an agent binding found.Orphan cleanup testrun will continue. Agent name: {1}. " "Manual intervention needed to clarify state of unbound loadbalancers and where they should belong to. " "If an agent name is given as 3rd argument the agent has detected that it is hosting the LB but binding in neutron DB is missing. " "If no agent name is given, this agent doesn't host the LB, but maybe another one." From 7e5f765f217c92e7496a7cbcec924aa13ec1738f Mon Sep 17 00:00:00 2001 From: Torsten Lesmann Date: Tue, 7 Aug 2018 09:24:26 +0200 Subject: [PATCH 080/109] LBaaS: Lower the level of some error messages in F5 agent to warning/info --- .../lbaasv2/drivers/bigip/agent_manager.py | 6 +++++- .../lbaasv2/drivers/bigip/icontrol_driver.py | 10 +++++----- f5_openstack_agent/lbaasv2/drivers/bigip/snats.py | 5 ++++- 3 files changed, 14 insertions(+), 7 deletions(-) diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py b/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py index 15a7efb57..68972433f 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py @@ -591,8 +591,12 @@ def sync_state(self): LOG.debug("currently known loadbalancer ids before sync are: %s" % list(known_services)) + # ccloud: Get rid of 'Cached service not found in neutron database' message + # Clear cache entry if not found in neutron. In case of a temp issue + # lb will be added again with next sync for deleted_lb in owned_services - all_loadbalancer_ids: - LOG.error("Cached service not found in neutron database") + self.cache.remove_by_loadbalancer_id(deleted_lb) + LOG.info("ccloud: Cached service not found in neutron database. Clearing cache for LB_id %s" % deleted_lb) # self.destroy_service(deleted_lb) # Validate each service we own, i.e. loadbalancers to which this diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py b/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py index cc2048476..58e04f4f1 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py @@ -1682,7 +1682,7 @@ def _service_exists(self, service): for bigip in self.get_config_bigips(): # Does the tenant folder exist? if not self.system_helper.folder_exists(bigip, folder_name): - LOG.error("Folder %s does not exists on bigip: %s" % + LOG.warning("Folder %s does not exists on bigip: %s" % (folder_name, bigip.hostname)) return False @@ -1690,7 +1690,7 @@ def _service_exists(self, service): virtual_address = VirtualAddress(self.service_adapter, loadbalancer) if not virtual_address.exists(bigip): - LOG.error("Virtual address %s(%s) does not " + LOG.warning("Virtual address %s(%s) does not " "exists on bigip: %s" % (virtual_address.name, virtual_address.address, bigip.hostname)) @@ -1705,7 +1705,7 @@ def _service_exists(self, service): if not self.vs_manager.exists(bigip, name=virtual_server['name'], partition=folder_name): - LOG.error("Virtual /%s/%s not found on bigip: %s" % + LOG.warning("Virtual /%s/%s not found on bigip: %s" % (virtual_server['name'], folder_name, bigip.hostname)) return False @@ -1719,7 +1719,7 @@ def _service_exists(self, service): bigip, name=bigip_pool['name'], partition=folder_name): - LOG.error("Pool /%s/%s not found on bigip: %s" % + LOG.warning("Pool /%s/%s not found on bigip: %s" % (bigip_pool['name'], folder_name, bigip.hostname)) return False @@ -1731,7 +1731,7 @@ def _service_exists(self, service): monitor_ep = self._get_monitor_endpoint(bigip, svc) if not monitor_ep.exists(name=monitor['name'], partition=folder_name): - LOG.error("Monitor /%s/%s not found on bigip: %s" % + LOG.warning("Monitor /%s/%s not found on bigip: %s" % (monitor['name'], folder_name, bigip.hostname)) return False diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/snats.py b/f5_openstack_agent/lbaasv2/drivers/bigip/snats.py index c9117c3b4..6d1758cb9 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/snats.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/snats.py @@ -350,7 +350,10 @@ def _delete_bigip_snats(self, bigip, subnetinfo, tenant_id, lb_id=None): except HTTPError as err: LOG.error("Update SNAT pool failed %s" % err.message) except HTTPError as err: - LOG.error("Failed to load SNAT pool %s" % err.message) + if err.response.status_code == 404: + LOG.info("ccloud: Failed to load SNAT pool for deletion %s" % err.message) + else: + LOG.error("Failed to load SNAT pool %s" % err.message) # Check if subnet in use by any tenants/snatpools. If in use, # add subnet to hints list of subnets in use. From 9cd9f8bbf25d98f48ee1862d273f109247337c83 Mon Sep 17 00:00:00 2001 From: Torsten Lesmann Date: Tue, 7 Aug 2018 15:18:08 +0200 Subject: [PATCH 081/109] Fix start time issue if more than 3 env groups exists (as in qa now) --- f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py b/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py index 68972433f..03ce83bbf 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py @@ -250,7 +250,7 @@ def __init__(self, conf): self.orphans_cleanup_interval = 60 * orphans_interval # schedule first run with 1 hour difference on every agent. Start first run after 5 minutes, 1h and 5 mins, ... x = self.orphans_cleanup_interval / 3 - t = [x*3, x*2, x] + t = [x*3, x*2, x, x*3, x*2, x, x*3, x*2, x, x*3, x*2, x] if start < 1: start = 1 self.last_clean_orphans = datetime.datetime.now() - datetime.timedelta(minutes=t[start-1] - 5) From 7df7c87e8f394137adc870e4191446b53f0978e5 Mon Sep 17 00:00:00 2001 From: Torsten Lesmann Date: Wed, 31 Oct 2018 15:30:10 +0100 Subject: [PATCH 082/109] Fix creation of vs without rd. Make orphan cleanup more robust if partitions are mssing on one group member. Fix orphan persistence profiles on F5 when listener gets deleted before its default pool with persistence setting is deleted. --- etc/neutron/services/f5/esd/esd.json | 6 ++++ .../lbaasv2/drivers/bigip/icontrol_driver.py | 10 +++++-- .../lbaasv2/drivers/bigip/lbaas_builder.py | 29 ++++++++++++------- .../lbaasv2/drivers/bigip/network_service.py | 28 +++++++++++------- 4 files changed, 50 insertions(+), 23 deletions(-) diff --git a/etc/neutron/services/f5/esd/esd.json b/etc/neutron/services/f5/esd/esd.json index a095e4db8..3383e1baa 100644 --- a/etc/neutron/services/f5/esd/esd.json +++ b/etc/neutron/services/f5/esd/esd.json @@ -5,6 +5,12 @@ "lbaas_irule": ["proxy_protocol_2edF_v1_0"], "lbaas_one_connect": "" }, + "proxy_protocol_V2_e8f6_v1_0": { + "lbaas_fastl4": "", + "lbaas_ctcp": "tcp", + "lbaas_irule": ["cc_proxy_protocol_V2_e8f6_v1_0"], + "lbaas_one_connect": "" + }, "standard_tcp_a3de_v1_0": { "lbaas_fastl4": "", "lbaas_ctcp": "tcp", diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py b/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py index 58e04f4f1..950f1f6f2 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py @@ -945,9 +945,15 @@ def purge_orphaned_nodes(self, tenant_members): pool_helper = resource_helper.BigIPResourceHelper(resource_helper.ResourceType.pool) for bigip in self.get_all_bigips(): for tenant_id, members in tenant_members.iteritems(): + partition = self.service_adapter.prefix + tenant_id - nodes = node_helper.get_resources(bigip, partition=partition) - pools = pool_helper.get_resources(bigip, partition=partition) + try: + nodes = node_helper.get_resources(bigip, partition=partition) + pools = pool_helper.get_resources(bigip, partition=partition) + except Exception as err: + LOG.info('ccloud: Error in node or pool retrieval for partition %s: %s', (partition, err.response)) + continue + allf5members = [] orphan_members = [] orphan_nodes = [] diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py b/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py index c706fda5d..cc0de9c95 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py @@ -46,9 +46,9 @@ def __init__(self, conf, driver, l2_service=None): self.l2_service = l2_service self.service_adapter = driver.service_adapter self.listener_builder = listener_service.ListenerServiceBuilder(self, - self.service_adapter, - driver.cert_manager, - conf.f5_parent_ssl_profile) + self.service_adapter, + driver.cert_manager, + conf.f5_parent_ssl_profile) self.pool_builder = pool_service.PoolServiceBuilder( self.service_adapter ) @@ -180,8 +180,7 @@ def _assure_pools_created(self, service): for pool in pools: if pool['provisioning_status'] != plugin_const.PENDING_DELETE: - svc = {"loadbalancer": loadbalancer, - "pool": pool} + svc = {"loadbalancer": loadbalancer, "pool": pool} svc['members'] = self._get_pool_members(service, pool['id']) try: @@ -222,8 +221,7 @@ def _assure_pools_configured(self, service): for pool in pools: if pool['provisioning_status'] != plugin_const.PENDING_DELETE: - svc = {"loadbalancer": loadbalancer, - "pool": pool} + svc = {"loadbalancer": loadbalancer, "pool": pool} svc['members'] = self._get_pool_members(service, pool['id']) try: @@ -333,12 +331,12 @@ def _assure_members(self, service, all_subnet_hints): "pool": pool} if 'port' not in member and \ - member['provisioning_status'] != plugin_const.PENDING_DELETE: + member['provisioning_status'] != plugin_const.PENDING_DELETE: LOG.warning("Member definition does not include Neutron port") # delete member if pool is being deleted - if member['provisioning_status'] == plugin_const.PENDING_DELETE or\ - pool['provisioning_status'] == plugin_const.PENDING_DELETE: + if member['provisioning_status'] == plugin_const.PENDING_DELETE or \ + pool['provisioning_status'] == plugin_const.PENDING_DELETE: try: self.pool_builder.delete_member(svc, bigips) except Exception as err: @@ -431,6 +429,7 @@ def _assure_pools_deleted(self, service): except Exception as err: pool['provisioning_status'] = plugin_const.ERROR raise f5_ex.PoolDeleteException(err.message) + @utils.instrument_execution_time def _assure_listeners_deleted(self, service): if 'listeners' not in service: @@ -444,6 +443,14 @@ def _assure_listeners_deleted(self, service): if listener['provisioning_status'] == plugin_const.PENDING_DELETE: svc = {"loadbalancer": loadbalancer, "listener": listener} + # ccloud: try to delete persistence which might be attached to listener + # ignore errors, persistence might be used somewhere else if pool is used more than once as default + try: + self.listener_builder.remove_session_persistence( + svc, bigips) + except Exception: + pass + # delete the listener try: self.listener_builder.delete_listener(svc, bigips) except Exception as err: @@ -711,4 +718,4 @@ def get_esd(self, name): return None def is_esd(self, name): - return self.esd.get_esd(name) is not None + return self.esd.get_esd(name) is not None \ No newline at end of file diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/network_service.py b/f5_openstack_agent/lbaasv2/drivers/bigip/network_service.py index c75aeb7e4..53671db86 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/network_service.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/network_service.py @@ -275,17 +275,25 @@ def _annotate_service_route_domains(self, service): if 'vip_address' in service['loadbalancer']: loadbalancer = service['loadbalancer'] - if 'network_id' in loadbalancer: - lb_network = self.service_adapter.get_network_from_service( - service, loadbalancer['network_id']) - vip_subnet = self.service_adapter.get_subnet_from_service( - service, loadbalancer['vip_subnet_id']) - self.assign_route_domain( - tenant_id, lb_network, vip_subnet) - rd_id = '%' + str(lb_network['route_domain_id']) - service['loadbalancer']['vip_address'] += rd_id + if 'network_id' in loadbalancer and loadbalancer['network_id']: + lb_network = self.service_adapter.get_network_from_service(service, loadbalancer['network_id']) + vip_subnet = self.service_adapter.get_subnet_from_service(service, loadbalancer['vip_subnet_id']) + self.assign_route_domain(tenant_id, lb_network, vip_subnet) + if 'route_domain_id' in lb_network and lb_network['route_domain_id']: + rd_id = '%' + str(lb_network['route_domain_id']) + if rd_id != '%0': + loadbalancer['vip_address'] += rd_id + else: + raise f5_ex.RouteDomainQueryException('ccloud: NETWORK-RDCHECK5 Global routing disabled but route domain ID 0 was found. Discarding ...') + else: + raise f5_ex.RouteDomainQueryException('ccloud: NETWORK-RDCHECK6 Global routing disabled but route domain ID could not be found for virtual_address member. Discarding ...') else: - service['loadbalancer']['vip_address'] += '%0' + if not self.conf.f5_global_routed_mode: + raise f5_ex.RouteDomainQueryException('ccloud: NETWORK-RDCHECK7 Global routing disabled but NO vip_address network ID given. Discarding ...') + else: + loadbalancer['vip_address'] += '%0' + LOG.info("ccloud: NETWORK-RDCHECK8 Using default Route Domain because of global routing %s" % loadbalancer['vip_address']) + def is_common_network(self, network): return self.l2_service.is_common_network(network) From 2e52a66ea9ec1f44afa5806a7173ce384cebf090 Mon Sep 17 00:00:00 2001 From: Torsten Lesmann Date: Tue, 27 Nov 2018 15:02:45 +0100 Subject: [PATCH 083/109] LBaaS: Clean orphan cache more reliable after 24 hours --- .../lbaasv2/drivers/bigip/agent_manager.py | 3 ++- .../lbaasv2/drivers/bigip/icontrol_driver.py | 9 +++++++-- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py b/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py index 03ce83bbf..113f8d014 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py @@ -493,7 +493,8 @@ def periodic_resync(self, context): self.last_clean_orphans = self.last_clean_orphans + datetime.timedelta(minutes=self.orphans_cleanup_interval) if self.clean_orphaned_objects_and_save_device_config(): self.needs_resync = True - LOG.info("ccloud - orphans: Finished cleaning orphan objects from F5 device. {0} objects remaining --> {1}".format(len(self.lbdriver.get_orphans_cache()), self.lbdriver.get_orphans_cache())) + orphan_cache = self.lbdriver.get_orphans_cache() + LOG.info("ccloud - orphans: Finished cleaning orphan objects from F5 device. {0} objects remaining --> {1}".format(len(orphan_cache), orphan_cache)) else: LOG.info("ccloud - periodic_resync: Skipping cleaning orphan objects because cleanup interval not expired. Waiting another {0} seconds" .format((self.last_clean_orphans + datetime.timedelta(minutes=self.orphans_cleanup_interval) - now).seconds)) diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py b/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py index 950f1f6f2..ce7e1925e 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py @@ -884,13 +884,16 @@ def get_all_deployed_listeners(self, expand_subcollections=False): } return deployed_virtual_dict - def _is_orphan(self, device_name, id): - # clear cache every x hours + def _maintain_orphan_cache(self): + # clear cache every 24 hours to delete outdated objects if (self.orphan_cache_last_reset + datetime.timedelta(hours=24)) < datetime.datetime.now(): LOG.info('ccloud: Orphan objects cache cleared to avoid orphan orphans :-)') self.orphan_cache_last_reset = datetime.datetime.now() self.orphan_cache.clear() + def _is_orphan(self, device_name, id): + + self._maintain_orphan_cache() # check if orphan can be deleted or rise counter by 1 if not id or not device_name: return False @@ -931,7 +934,9 @@ def _remove_from_orphan_cache(self, device_name, id): pass return + def get_orphans_cache(self): + self._maintain_orphan_cache() return self.orphan_cache @serialized('purge_orphaned_nodes') From ddf6b854de9d52b5695826fce3db53f383521d45 Mon Sep 17 00:00:00 2001 From: Torsten Lesmann Date: Wed, 16 Jan 2019 11:31:51 +0100 Subject: [PATCH 084/109] Fix error inside orphan cache log message Only one line fixed so that orphan cache is read upfront log message --- f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py b/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py index 113f8d014..b11dd0cec 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py @@ -493,7 +493,7 @@ def periodic_resync(self, context): self.last_clean_orphans = self.last_clean_orphans + datetime.timedelta(minutes=self.orphans_cleanup_interval) if self.clean_orphaned_objects_and_save_device_config(): self.needs_resync = True - orphan_cache = self.lbdriver.get_orphans_cache() + orphan_cache = self.lbdriver.get_orphans_cache() LOG.info("ccloud - orphans: Finished cleaning orphan objects from F5 device. {0} objects remaining --> {1}".format(len(orphan_cache), orphan_cache)) else: LOG.info("ccloud - periodic_resync: Skipping cleaning orphan objects because cleanup interval not expired. Waiting another {0} seconds" From a138100072458e3951a813e04e1c3cc892241670 Mon Sep 17 00:00:00 2001 From: Torsten Lesmann Date: Mon, 17 Dec 2018 16:24:41 +0100 Subject: [PATCH 085/109] Better robustness to sync ha pair members which are out of sync due to downtime --- etc/neutron/services/f5/esd/esd.json | 4 + .../lbaasv2/drivers/bigip/agent_manager.py | 51 +++--- .../lbaasv2/drivers/bigip/listener_service.py | 4 +- .../lbaasv2/drivers/bigip/pool_service.py | 152 +++++++++++++----- .../lbaasv2/drivers/bigip/tenants.py | 5 +- 5 files changed, 155 insertions(+), 61 deletions(-) diff --git a/etc/neutron/services/f5/esd/esd.json b/etc/neutron/services/f5/esd/esd.json index 3383e1baa..e1e2914aa 100644 --- a/etc/neutron/services/f5/esd/esd.json +++ b/etc/neutron/services/f5/esd/esd.json @@ -11,6 +11,10 @@ "lbaas_irule": ["cc_proxy_protocol_V2_e8f6_v1_0"], "lbaas_one_connect": "" }, + "fastl4_protocol_keepalive_dd2b_v1_0": { + "lbaas_fastl4": "cc_fastl4", + "lbaas_one_connect": "" + }, "standard_tcp_a3de_v1_0": { "lbaas_fastl4": "", "lbaas_ctcp": "tcp", diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py b/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py index b11dd0cec..f576d3f75 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py @@ -165,6 +165,7 @@ def size(self): def put(self, service, agent_host): """Add a service to the cache.""" + LOG.warning("cloud: Loadbalancer put into cache %s", service['loadbalancer']['id']) port_id = service['loadbalancer'].get('vip_port_id', None) loadbalancer_id = service['loadbalancer']['id'] tenant_id = service['loadbalancer']['tenant_id'] @@ -464,10 +465,9 @@ def periodic_resync(self, context): LOG.info("ccloud - periodic_resync: Running sync tasks") if not self.needs_resync: self.needs_resync = True - LOG.debug( - 'Forcing resync of services on resync timer (%d seconds).' - % self.service_resync_interval) + LOG.debug('Forcing resync of services on resync timer (%d seconds).' % self.service_resync_interval) self.cache.services = {} + LOG.warning("ccloud: cache cleared: service cache = ".format(self.cache.services)) self.last_resync = now self.lbdriver.flush_cache() LOG.debug("periodic_sync: service_resync_interval expired: %s" @@ -483,7 +483,10 @@ def periodic_resync(self, context): self.needs_resync = True if self.sync_state(): self.needs_resync = True - self.clean_orphaned_snat_objects() + try: + self.clean_orphaned_snat_objects() + except Exception as e: + LOG.warning("ccloud - Couldn't clear orphan snat objects because of : " + str(e.message)) else: LOG.info("ccloud - periodic_resync: Resync not needed! Discarding ...") @@ -510,20 +513,25 @@ def periodic_resync(self, context): # ccloud: clean orphaned snat pools @log_helpers.log_method_call def clean_orphaned_snat_objects(self): - virtual_addresses = self.lbdriver.get_all_virtual_addresses() - snat_pools = self.lbdriver.get_all_snat_pools() + try: + virtual_addresses = self.lbdriver.get_all_virtual_addresses() + snat_pools = self.lbdriver.get_all_snat_pools() + + for va in virtual_addresses: + snat_obj = self.find_in_collection(va.name.replace('Project_', 'lb_'), snat_pools) + if snat_obj is not None: + snat_pools.remove(snat_obj) + + for orphaned_snat in snat_pools: + LOG.debug("sapcc: purging orphaned snat pool %s" % orphaned_snat.name) + try: + orphaned_snat.delete() + except Exception as e: + LOG.warning("sapcc: attempt made to purge orphaned snat pool which might be in use: " + str(e.message)) - for va in virtual_addresses: - snat_obj = self.find_in_collection(va.name.replace('Project_', 'lb_'), snat_pools) - if snat_obj is not None: - snat_pools.remove(snat_obj) + except Exception as e: + LOG.warning("Unable to clean snat objects: %s" % e.message) - for orphaned_snat in snat_pools: - LOG.debug("sapcc: purging orphaned snat pool %s" % orphaned_snat.name) - try: - orphaned_snat.delete() - except Exception as e: - LOG.warning("sapcc: attempt made to purge orphaned snat pool which might be in use: " + str(e.message)) def find_in_collection(self, name, collection): for item in collection: @@ -560,6 +568,8 @@ def tunnel_sync(self): @utils.instrument_execution_time def sync_state(self): """Sync state of BIG-IP with that of the neutron database.""" + LOG.debug("manager:sync_state: Resync loadbalancers via validate_service") + resync = False known_services = set() @@ -591,6 +601,9 @@ def sync_state(self): % list(active_loadbalancer_ids)) LOG.debug("currently known loadbalancer ids before sync are: %s" % list(known_services)) + LOG.debug("ccloud: plugin got all loadbalancer ids as: %s" + % list(all_loadbalancer_ids)) + LOG.debug("ccloud: cache : {}".format(self.cache)) # ccloud: Get rid of 'Cached service not found in neutron database' message # Clear cache entry if not found in neutron. In case of a temp issue @@ -604,6 +617,7 @@ def sync_state(self): # agent is bound, that does not exist in our service cache. for lb_id in all_loadbalancer_ids: if not self.cache.get_by_loadbalancer_id(lb_id): + LOG.debug("ccloud: validate_service: %s" % lb_id) self.validate_service(lb_id) # This produces a list of loadbalancers with pending tasks to @@ -657,12 +671,12 @@ def sync_state(self): @log_helpers.log_method_call @utils.instrument_execution_time def validate_service(self, lb_id): - try: service = self.plugin_rpc.get_service_by_loadbalancer_id( lb_id ) self.cache.put(service, self.agent_host) + LOG.info("ccloud: validate_service - get service from rpc '{}' for sync".format(service)) if not self.lbdriver.service_exists(service) or \ self.has_provisioning_status_of_error(service): LOG.info("active loadbalancer '{}' is not on BIG-IP" @@ -723,6 +737,7 @@ def refresh_service(self, lb_id): lb_id ) self.cache.put(service, self.agent_host) + LOG.info("ccloud: refresh_service - get service from rpc '{}' for sync".format(service)) if self.lbdriver.sync(service): self.needs_resync = True except q_exception.NeutronException as exc: @@ -914,7 +929,7 @@ def purge_orphaned_listeners(self, listeners): """Deletes the hanging listeners from the deleted loadbalancers""" listener_status = self.plugin_rpc.validate_listeners_state( list(listeners.keys())) - LOG.debug('validated_pools_state returned: %s' + LOG.debug('validated_listeners_state returned: %s' % listener_status) for listenerid in listener_status: # If the listener status is Unknown, it no longer exists diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/listener_service.py b/f5_openstack_agent/lbaasv2/drivers/bigip/listener_service.py index 42748203f..ec6861770 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/listener_service.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/listener_service.py @@ -78,7 +78,7 @@ def create_listener(self, service, bigips): if err.response.status_code == 409: LOG.debug("Virtual server already exists updating") try: - self.update_listener(service, bigips) + self.update_listener(service, [bigip]) #self.vs_helper.update(bigip, vip) except Exception as e: LOG.warn("Update triggered in create failed, this could be due to timing issues in assure_service") @@ -855,6 +855,8 @@ def apply_esds(self, service): if 'lbaas_fastl4' in esd: if esd['lbaas_fastl4']=='': fastl4= {} + else: + fastl4 = {'partition': 'Common', 'name': esd['lbaas_fastl4'], 'context': 'all'} if len(stcp_profiles)==0: if 'lbaas_stcp' in esd: diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/pool_service.py b/f5_openstack_agent/lbaasv2/drivers/bigip/pool_service.py index 949f190da..a9c596687 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/pool_service.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/pool_service.py @@ -53,8 +53,17 @@ def create_pool(self, service, bigips): :param bigips: Array of BigIP class instances to create pool. """ pool = self.service_adapter.get_pool(service) + ex = None for bigip in bigips: - self.pool_helper.create(bigip, pool) + try: + self.pool_helper.create(bigip, pool) + LOG.warning("Pool created: %s", pool['name']) + except HTTPError as err: + LOG.info("Pool creation FAILED: %s", pool['name']) + ex = err + if ex: + raise err + def delete_pool(self, service, bigips): """Delete a pool on set of BIG-IPs. @@ -66,11 +75,19 @@ def delete_pool(self, service, bigips): :param bigips: Array of BigIP class instances to delete pool. """ pool = self.service_adapter.get_pool(service) - + ex = None for bigip in bigips: - self.pool_helper.delete(bigip, - name=pool["name"], - partition=pool["partition"]) + try: + self.pool_helper.delete(bigip, + name=pool["name"], + partition=pool["partition"]) + LOG.info("Pool deleted: %s", pool['name']) + except HTTPError as err: + LOG.info("Pool deletion FAILED: %s", pool['name']) + ex = err + if ex: + raise err + def update_pool(self, service, bigips): """Update BIG-IP pool. @@ -80,8 +97,17 @@ def update_pool(self, service, bigips): :param bigips: Array of BigIP class instances to create pool. """ pool = self.service_adapter.get_pool(service) + ex = None for bigip in bigips: - self.pool_helper.update(bigip, pool) + try: + self.pool_helper.update(bigip, pool) + LOG.info("Pool updated FAILED: %s", pool['name']) + except HTTPError as err: + LOG.info("Pool update FAILED: %s", pool['name']) + ex = err + if ex: + raise err + def create_healthmonitor(self, service, bigips): # create member @@ -89,11 +115,19 @@ def create_healthmonitor(self, service, bigips): hm_helper = self._get_monitor_helper(service) pool = self.service_adapter.get_pool(service) + ex = None for bigip in bigips: - hm_helper.create(bigip, hm) + try: + hm_helper.create(bigip, hm) + # update pool with new health monitor + self.pool_helper.update(bigip, pool) + LOG.info("Health Monitor created: %s", hm['name']) + except HTTPError as err: + LOG.info("Health Monitor creation FAILED: %s", hm['name']) + ex = err + if ex: + raise err - # update pool with new health monitor - self.pool_helper.update(bigip, pool) def delete_healthmonitor(self, service, bigips): # delete health monitor @@ -104,25 +138,39 @@ def delete_healthmonitor(self, service, bigips): pool = self.service_adapter.get_pool(service) pool["monitor"] = "" + ex = None for bigip in bigips: - # need to first remove monitor reference from pool - self.pool_helper.update(bigip, pool) - - # after updating pool, delete monitor - hm_helper.delete(bigip, - name=hm["name"], - partition=hm["partition"]) + try: + # need to first remove monitor reference from pool + self.pool_helper.update(bigip, pool) + # after updating pool, delete monitor + hm_helper.delete(bigip, + name=hm["name"], + partition=hm["partition"]) + LOG.info("Health Monitor deleted: %s", hm['name']) + except HTTPError as err: + LOG.info("Health Monitor deletion FAILED: %s", hm['name']) + ex = err + if ex: + raise err def update_healthmonitor(self, service, bigips): hm = self.service_adapter.get_healthmonitor(service) hm_helper = self._get_monitor_helper(service) pool = self.service_adapter.get_pool(service) + ex = None for bigip in bigips: - hm_helper.update(bigip, hm) - - # update pool with new health monitor - self.pool_helper.update(bigip, pool) + try: + hm_helper.update(bigip, hm) + # update pool with new health monitor + self.pool_helper.update(bigip, pool) + LOG.info("Health Monitor updated: %s", hm['name']) + except HTTPError as err: + LOG.info("Health Monitor update FAILED: %s", hm['name']) + ex = err + if ex: + raise err # Note: can't use BigIPResourceHelper class because members # are created within pool objects. Following member methods @@ -132,13 +180,22 @@ def create_member(self, service, bigips): member = self.service_adapter.get_member(service) if '%' not in member['address'] or '%0' in member['address']: LOG.error("ccloud: POOL-RDCHECK1 - trying to create member with address: %s", member['address']) + + ex = None for bigip in bigips: - part = pool["partition"] - p = self.pool_helper.load(bigip, - name=pool["name"], - partition=part) - m = p.members_s.members - m.create(**member) + try: + part = pool["partition"] + p = self.pool_helper.load(bigip, + name=pool["name"], + partition=part) + m = p.members_s.members + m.create(**member) + LOG.info("Member created: %s", member['address']) + except HTTPError as err: + LOG.info("Member creation FAILED: %s", member['address']) + ex = err + if ex: + raise err def delete_member(self, service, bigips): pool = self.service_adapter.get_pool(service) @@ -146,6 +203,8 @@ def delete_member(self, service, bigips): if '%' not in member['address'] or '%0' in member['address']: LOG.error("ccloud: POOL-RDCHECK2 - trying to create member with address: %s", member['address']) part = pool["partition"] + + ex = None for bigip in bigips: p = self.pool_helper.load(bigip, name=pool["name"], @@ -158,38 +217,53 @@ def delete_member(self, service, bigips): m = m.load(name=urllib.quote(member["name"]), partition=part) - m.delete() try: + m.delete() + LOG.info("Member deleted: %s", member['address']) + node = self.service_adapter.get_member_node(service) self.node_helper.delete(bigip, name=urllib.quote(node["name"]), partition=node["partition"]) + LOG.info("Node deleted: %s", node["name"]) + except HTTPError as err: # Possilbe error if node is shared with another member. # If so, ignore the error. if err.response.status_code == 400: LOG.debug(err.message) else: - raise + LOG.info("Member or Node deletion FAILED: %s", member['address']) + ex = err + if ex: + raise err def update_member(self, service, bigips): pool = self.service_adapter.get_pool(service) member = self.service_adapter.get_member(service) if '%' not in member['address'] or '%0' in member['address']: LOG.error("ccloud: POOL-RDCHECK3 - trying to create member with address: %s", member['address']) - part = pool["partition"] - for bigip in bigips: - p = self.pool_helper.load(bigip, - name=pool["name"], - partition=part) - m = p.members_s.members - if m.exists(name=urllib.quote(member["name"]), partition=part): - m = m.load(name=urllib.quote(member["name"]), - partition=part) - member.pop("address", None) - m.modify(**member) + ex = None + for bigip in bigips: + try: + p = self.pool_helper.load(bigip, + name=pool["name"], + partition=part) + + m = p.members_s.members + if m.exists(name=urllib.quote(member["name"]), partition=part): + m = m.load(name=urllib.quote(member["name"]), + partition=part) + member.pop("address", None) + m.modify(**member) + #LOG.info("Member updated: %s", member['address']) + except HTTPError as err: + #LOG.info("Member update FAILED: %s", member['address']) + ex = err + if ex: + raise err def _get_monitor_helper(self, service): monitor_type = self.service_adapter.get_monitor_type(service) diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/tenants.py b/f5_openstack_agent/lbaasv2/drivers/bigip/tenants.py index 0a43f27e6..130292d3b 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/tenants.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/tenants.py @@ -149,12 +149,11 @@ def _remove_tenant_replication_mode(self, bigip, tenant_id, network_id): network_id) except Exception as err: LOG.info("Failed to delete route domain %s. " - "%s. Manual intervention might be required." - % (network_id, err.message)) + "Manual intervention might be required." % (network_id)) try: self.system_helper.delete_folder(bigip, partition) except Exception as err: LOG.info( - "Folder deletion exception for tenant partition %s occurred. " + "Folder deletion failed for tenant partition %s. " "Manual cleanup might be required." % (tenant_id)) \ No newline at end of file From b1d05fb0340f6101cb0b84736876c8da35440922 Mon Sep 17 00:00:00 2001 From: Torsten Lesmann Date: Wed, 16 Jan 2019 14:24:30 +0100 Subject: [PATCH 086/109] Improved handling for inactive agents to support upgrade of F5 devices better. Fixed wrong health monitor timer settings and ignored http method. --- .../services/f5/f5-openstack-agent.ini | 1 + .../lbaasv2/drivers/bigip/agent.py | 2 +- .../lbaasv2/drivers/bigip/agent_manager.py | 483 +++++---- .../lbaasv2/drivers/bigip/constants_v2.py | 4 + .../lbaasv2/drivers/bigip/icontrol_driver.py | 951 ++++++++++++------ .../lbaasv2/drivers/bigip/lbaas_builder.py | 4 +- .../lbaasv2/drivers/bigip/lbaas_driver.py | 32 +- .../lbaasv2/drivers/bigip/network_service.py | 68 +- .../lbaasv2/drivers/bigip/plugin_rpc.py | 79 ++ .../lbaasv2/drivers/bigip/pool_service.py | 85 +- .../lbaasv2/drivers/bigip/service_adapter.py | 41 +- 11 files changed, 1194 insertions(+), 556 deletions(-) diff --git a/etc/neutron/services/f5/f5-openstack-agent.ini b/etc/neutron/services/f5/f5-openstack-agent.ini index e2e66cd45..a63ac03e9 100644 --- a/etc/neutron/services/f5/f5-openstack-agent.ini +++ b/etc/neutron/services/f5/f5-openstack-agent.ini @@ -586,3 +586,4 @@ os_project_domain_name = default # and if it does not exist on your BIG-IP system the agent will use the default # profile, clientssl. f5_parent_ssl_profile = cc_clientssl +f5_parent_https_monitor = /Common/cc_https \ No newline at end of file diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/agent.py b/f5_openstack_agent/lbaasv2/drivers/bigip/agent.py index 61ffc9437..648425001 100755 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/agent.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/agent.py @@ -72,7 +72,7 @@ def start(self): self.manager.run_periodic_tasks, None, None - ) # Hmmm.... "tg"? + ) # tg = olso_service thread group to run periodic tasks super(F5AgentService, self).start() diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py b/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py index f576d3f75..ca904ff96 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py @@ -16,7 +16,7 @@ # import datetime -import uuid +import sys from random import randint from oslo_config import cfg @@ -28,12 +28,12 @@ from oslo_utils import importutils from neutron.agent import rpc as agent_rpc -from neutron.common import constants as plugin_const -from neutron.common import exceptions as q_exception from neutron.common import topics from neutron import context as ncontext from neutron.plugins.ml2.drivers.l2pop import rpc as l2pop_rpc from neutron_lbaas.services.loadbalancer import constants as lb_const +from neutron_lib import constants as plugin_const +from neutron_lib import exceptions as q_exception from f5_openstack_agent.lbaasv2.drivers.bigip import constants_v2 from f5_openstack_agent.lbaasv2.drivers.bigip import plugin_rpc @@ -44,6 +44,16 @@ # XXX OPTS is used in (at least) agent.py Maybe move/rename to agent.py OPTS = [ + cfg.IntOpt( + 'periodic_interval', + default=10, + help='Seconds between periodic task runs' + ), + cfg.BoolOpt( + 'start_agent_admin_state_up', + default=True, + help='Should the agent force its admin_state_up to True on boot' + ), cfg.StrOpt( # XXX should we use this with internal classes? 'f5_bigip_lbaas_device_driver', # XXX maybe remove "device" and "f5"? default=('f5_openstack_agent.lbaasv2.drivers.bigip.icontrol_driver.' @@ -116,6 +126,12 @@ help=( 'Amount of time to wait for a pending service to become active') ), + cfg.IntOpt( + 'f5_errored_services_timeout', + default=60, + help=( + 'Amount of time to wait for a errored service to become active') + ), cfg.FloatOpt( 'ccloud_orphans_cleanup_interval', default=0.0, @@ -129,6 +145,8 @@ ) ] +PERIODIC_TASK_INTERVAL = 60 + class LogicalServiceCache(object): """Manage a cache of known services.""" @@ -165,7 +183,6 @@ def size(self): def put(self, service, agent_host): """Add a service to the cache.""" - LOG.warning("cloud: Loadbalancer put into cache %s", service['loadbalancer']['id']) port_id = service['loadbalancer'].get('vip_port_id', None) loadbalancer_id = service['loadbalancer']['id'] tenant_id = service['loadbalancer']['tenant_id'] @@ -226,11 +243,15 @@ def __init__(self, conf): """Initialize LbaasAgentManager.""" super(LbaasAgentManager, self).__init__(conf) LOG.debug("Initializing LbaasAgentManager") + LOG.debug("runtime environment: %s" % sys.version) self.conf = conf self.context = ncontext.get_admin_context_without_session() self.serializer = None + global PERIODIC_TASK_INTERVAL + PERIODIC_TASK_INTERVAL = self.conf.periodic_interval + # Create the cache of provisioned services self.cache = LogicalServiceCache() self.last_resync = datetime.datetime.now() @@ -260,20 +281,25 @@ def __init__(self, conf): LOG.info('ccloud: Orphan cleanup first run will start at %s UTC', self.last_clean_orphans + datetime.timedelta(minutes=self.orphans_cleanup_interval)) self.needs_resync = False + # used after recovering of errored devices + self.forced_resync = False + self.forced_resync_tries = 0 self.plugin_rpc = None + self.tunnel_rpc = None + self.l2_pop_rpc = None + self.state_rpc = None self.pending_services = {} self.service_resync_interval = conf.service_resync_interval - - LOG.debug('Setting service resync interval to %d seconds' % + LOG.debug('setting service resync intervl to %d seconds' % self.service_resync_interval) # Set the agent ID if self.conf.agent_id: self.agent_host = self.conf.agent_id - LOG.debug('setting agent host to %s' % self.agent_host) else: self.agent_host = conf.host + LOG.debug('setting agent host to %s' % self.agent_host) # Load the iControl driver. self._load_driver(conf) @@ -291,7 +317,9 @@ def __init__(self, conf): if len(nv) > 1: agent_configurations[nv[0]] = nv[1] - # Initialize agent-state + # Initialize agent-state to a default values + self.admin_state_up = self.conf.start_agent_admin_state_up + self.agent_state = { 'binary': constants_v2.AGENT_BINARY_NAME, 'host': self.agent_host, @@ -302,20 +330,34 @@ def __init__(self, conf): 'start_flag': True } - self.admin_state_up = True - - # Set iControl driver context for RPC. - self.lbdriver.set_context(self.context) - - # Setup RPC: + # Setup RPC for communications to and from controller self._setup_rpc() - # Allow driver to run post init process not that the RPC is all setup. - self.lbdriver.post_init() + # Set driver context for RPC. + self.lbdriver.set_context(self.context) + # Allow the driver to make callbacks to the LBaaS driver plugin + self.lbdriver.set_plugin_rpc(self.plugin_rpc) + # Allow the driver to update tunnel endpoints + self.lbdriver.set_tunnel_rpc(self.tunnel_rpc) + # Allow the driver to update forwarding records in the SDN + self.lbdriver.set_l2pop_rpc(self.l2_pop_rpc) + # Allow the driver to force and agent state report to the controller + self.lbdriver.set_agent_report_state(self._report_state) # Set the flag to resync tunnels/services self.needs_resync = True + # Mark this agent admin_state_up per startup policy + if(self.admin_state_up): + self.plugin_rpc.set_agent_admin_state(self.admin_state_up) + + # Start state reporting of agent to Neutron + report_interval = self.conf.AGENT.report_interval + if report_interval: + heartbeat = loopingcall.FixedIntervalLoopingCall( + self._report_state) + heartbeat.start(interval=report_interval) + def _load_driver(self, conf): self.lbdriver = None @@ -325,21 +367,7 @@ def _load_driver(self, conf): self.lbdriver = importutils.import_object( conf.f5_bigip_lbaas_device_driver, self.conf) - - if self.lbdriver.initialized: - if not self.conf.agent_id: - # If not set statically, add the driver agent env hash - agent_hash = str( - uuid.uuid5(uuid.NAMESPACE_DNS, - self.conf.environment_prefix + - '.' + self.lbdriver.hostnames[0]) - ) - self.agent_host = conf.host + ":" + agent_hash - LOG.debug('setting agent host to %s' % self.agent_host) - else: - LOG.error('Driver did not initialize. Fix the driver config ' - 'and restart the agent.') - return + return except ImportError as ie: msg = ('Error importing loadbalancer device driver: %s error %s' % (conf.f5_bigip_lbaas_device_driver, repr(ie))) @@ -348,12 +376,20 @@ def _load_driver(self, conf): def _setup_rpc(self): - # LBaaS Plugin API + # + # Setting up outbound (callbacks) communications from agent + # + + # setup the topic to send oslo messages RPC calls + # from this agent to the controller topic = constants_v2.TOPIC_PROCESS_ON_HOST_V2 if self.conf.environment_specific_plugin: topic = topic + '_' + self.conf.environment_prefix LOG.debug('agent in %s environment will send callbacks to %s' % (self.conf.environment_prefix, topic)) + + # create our class we will use to send callbacks to the controller + # for processing by the driver plugin self.plugin_rpc = plugin_rpc.LBaaSv2PluginRPC( topic, self.context, @@ -362,67 +398,91 @@ def _setup_rpc(self): self.agent_host ) - # Allow driver to make callbacks using the - # same RPC proxy as the manager - self.lbdriver.set_plugin_rpc(self.plugin_rpc) + # + # Setting up outbound communcations with the neutron agent extension + # + self.state_rpc = agent_rpc.PluginReportStateAPI(topic) - self._setup_state_rpc(topic) + # + # Setting up all inbound notifications and outbound callbacks + # for standard neutron agent services: + # + # tunnel_sync - used to advertise the driver VTEP endpoints + # and optionally learn about other VTEP endpoints + # + # update - used to get updates to agent state triggered by + # the controller, like setting admin_state_up + # the agent + # + # l2_populateion - used to get updates on neutron SDN topology + # changes + # + # We only establish notification if we care about L2/L3 updates + # - # Setup message queues to listen for updates from - # Neutron. if not self.conf.f5_global_routed_mode: - # Core plugin - self.lbdriver.set_tunnel_rpc(agent_rpc.PluginApi(topics.PLUGIN)) + # notifications when tunnel endpoints get added + self.tunnel_rpc = agent_rpc.PluginApi(topics.PLUGIN) + + # define which controler notifications the agent comsumes consumers = [[constants_v2.TUNNEL, topics.UPDATE]] + + # if we are dynamically changing tunnel peers, + # register to recieve and send notificatoins via RPC if self.conf.l2_population: - # L2 Populate plugin Callbacks API - self.lbdriver.set_l2pop_rpc( - l2pop_rpc.L2populationAgentNotifyAPI()) + # communications of notifications from the + # driver to neutron for SDN topology changes + self.l2_pop_rpc = l2pop_rpc.L2populationAgentNotifyAPI() + # notification of SDN topology updates from the + # controller by adding to the general consumer list consumers.append( [topics.L2POPULATION, topics.UPDATE, self.agent_host] ) + # kick off the whole RPC process by creating + # a connection to the message bus self.endpoints = [self] - self.connection = agent_rpc.create_consumers( self.endpoints, topics.AGENT, consumers ) - def _setup_state_rpc(self, topic): - # Agent state API - self.state_rpc = agent_rpc.PluginReportStateAPI(topic) - report_interval = self.conf.AGENT.report_interval - if report_interval: - heartbeat = loopingcall.FixedIntervalLoopingCall( - self._report_state) - heartbeat.start(interval=report_interval) - - def _report_state(self): - + def _report_state(self, force_resync=False): try: - # assure the agent is connected - # FIXME: what happens if we can't connect. - if not self.lbdriver.connected: - self.lbdriver.connect() - - service_count = self.cache.size - self.agent_state['configurations']['services'] = service_count - if hasattr(self.lbdriver, 'service_queue'): - self.agent_state['configurations']['request_queue_depth'] = ( - len(self.lbdriver.service_queue) - ) + if force_resync: + self.needs_resync = True + self.cache.services = {} + self.lbdriver.flush_cache() + # use the admin_state_up to notify the + # controller if all backend devices + # are functioning properly. If not + # automatically set the admin_state_up + # for this agent to False + if self.lbdriver: + if not self.lbdriver.backend_integrity(): + self.needs_resync = True + self.cache.services = {} + self.lbdriver.flush_cache() + self.plugin_rpc.set_agent_admin_state(False) + self.admin_state_up = False + else: + # if we are transitioning from down to up, + # change the controller state for this agent + if not self.admin_state_up: + self.plugin_rpc.set_agent_admin_state(True) + self.admin_state_up = True - # Add configuration from icontrol_driver. - if self.lbdriver.agent_configurations: + if self.lbdriver: self.agent_state['configurations'].update( - self.lbdriver.agent_configurations + self.lbdriver.get_agent_configurations() ) - # Compute the capacity score. + # add the capacity score, used by the scheduler + # for horizontal scaling of an environment, from + # the driver if self.conf.capacity_policy: env_score = ( self.lbdriver.generate_capacity_score( @@ -430,7 +490,7 @@ def _report_state(self): ) ) self.agent_state['configurations'][ - 'environment_capacity_score'] = env_score + 'environment_capaciy_score'] = env_score else: self.agent_state['configurations'][ 'environment_capacity_score'] = 0 @@ -438,9 +498,14 @@ def _report_state(self): LOG.debug("reporting state of agent as: %s" % self.agent_state) self.state_rpc.report_state(self.context, self.agent_state) self.agent_state.pop('start_flag', None) + LOG.debug("ccloud: reporting state of agent succesfully done") + except Exception as e: LOG.exception(("Failed to report state: " + str(e.message))) + # callback from oslo messaging letting us know we are properly + # connected to the message bus so we can register for inbound + # messages to this agent def initialize_service_hook(self, started_by): """Create service hook to listen for messanges on agent topic.""" node_topic = "%s_%s.%s" % (constants_v2.TOPIC_LOADBALANCER_AGENT_V2, @@ -450,14 +515,69 @@ def initialize_service_hook(self, started_by): endpoints = [started_by.manager] started_by.conn.create_consumer( node_topic, endpoints, fanout=False) - self.sync_state() - @periodic_task.periodic_task(spacing=10) + @periodic_task.periodic_task(spacing=PERIODIC_TASK_INTERVAL, run_immediately=True) + def connect_driver(self, context): + """Trigger driver connect attempts to all devices.""" + if self.lbdriver: + self.lbdriver.connect() + + @periodic_task.periodic_task(spacing=(PERIODIC_TASK_INTERVAL/2)) + def recover_errored_devices(self, context): + """Try to reconnect to errored devices.""" + if self.lbdriver: + LOG.debug("running periodic task to recover disconnected BIG-IPs") + recovered = self.lbdriver.recover_errored_devices() + # clear the cache to resync everything in case of a recovery + if recovered: + self.needs_resync = True + self.forced_resync = True + + # Taken from actual mitaka for documentation purpose. Functionality also disabled in f5 neutron driver + # Disabled because it makes no sense to move all Objects from a device group to another only because agent is down. + # Changes can't be made to objects hosted by agents but traffic won't be affected. + # Movement would affect traffic because of ARP update issues in ASR + @periodic_task.periodic_task( + spacing=constants_v2.UPDATE_OPERATING_STATUS_INTERVAL) + def scrub_dead_agents_in_env_and_group(self, context): + """Triggering a dead agent scrub on the controller.""" + LOG.debug("ccloud: scrubbing - running periodic scrub_dead_agents_in_env_and_group for EnvGroup %s", self.conf.environment_group_number) + if not self.plugin_rpc: + return + + self.plugin_rpc.scrub_dead_agents(self.conf.environment_prefix, + self.conf.environment_group_number) + + @periodic_task.periodic_task( + spacing=constants_v2.UPDATE_OPERATING_STATUS_INTERVAL) + def update_operating_status(self, context): + """Update pool member operational status from devices to controller.""" + if not self.plugin_rpc: + return + + active_loadbalancers = \ + self.plugin_rpc.get_active_loadbalancers(host=self.agent_host) + for loadbalancer in active_loadbalancers: + if self.agent_host == loadbalancer['agent_host']: + try: + lb_id = loadbalancer['lb_id'] + LOG.debug( + 'getting operating status for loadbalancer %s.', lb_id) + svc = self.plugin_rpc.get_service_by_loadbalancer_id( + lb_id) + self.lbdriver.update_operating_status(svc) + + except Exception as e: + LOG.exception('Error updating status %s.', e.message) + + # setup a period task to decide if it is time empty the local service + # cache and resync service definitions form the controller + @periodic_task.periodic_task(spacing=PERIODIC_TASK_INTERVAL) def periodic_resync(self, context): + + """Determine if it is time to resync services from controller.""" try: - """Resync tunnels/service state.""" now = datetime.datetime.now() - LOG.debug("%s: periodic_resync called." % now) # Only force resync if the agent thinks it is # synchronized and the resync timer has exired @@ -473,7 +593,15 @@ def periodic_resync(self, context): LOG.debug("periodic_sync: service_resync_interval expired: %s" % str(self.needs_resync)) else: - LOG.info("ccloud - periodic_resync: Skipped because resync interval not expired. Waiting another {0} seconds".format((self.service_resync_interval - (now - self.last_resync ).seconds))) + LOG.info("ccloud - periodic_resync: Waiting another {0} seconds for a timer triggered resync".format((self.service_resync_interval - (now - self.last_resync ).seconds))) + + # use forced resync switch which is only set by recovering of errored F5 to guarantee sync + if self.forced_resync: + self.forced_resync_tries += 1 + self.needs_resync = True + self.cache.services = {} + self.lbdriver.flush_cache() + LOG.info("ccloud - periodic_resync: Resync enforced because of recovering of errored F5 device") # resync if we need to if self.needs_resync: @@ -483,6 +611,9 @@ def periodic_resync(self, context): self.needs_resync = True if self.sync_state(): self.needs_resync = True + if not self.needs_resync or self.forced_resync_tries > 2: + self.forced_resync = False + self.forced_resync_tries = 0 try: self.clean_orphaned_snat_objects() except Exception as e: @@ -539,63 +670,32 @@ def find_in_collection(self, name, collection): return item return None - @periodic_task.periodic_task(spacing=30) - def update_operating_status(self, context): - if not self.plugin_rpc: - return - - active_loadbalancers = \ - self.plugin_rpc.get_active_loadbalancers(host=self.agent_host) - for loadbalancer in active_loadbalancers: - if self.agent_host == loadbalancer['agent_host']: - try: - lb_id = loadbalancer['lb_id'] - LOG.debug( - 'getting operating status for loadbalancer %s.', lb_id) - svc = self.plugin_rpc.get_service_by_loadbalancer_id( - lb_id) - self.lbdriver.update_operating_status(svc) - - except Exception as e: - LOG.exception('Error updating status %s.', e.message) - def tunnel_sync(self): - """Call into driver to advertise tunnels.""" + """Call into driver to advertise device tunnel endpoints.""" LOG.debug("manager:tunnel_sync: calling driver tunnel_sync") return self.lbdriver.tunnel_sync() @log_helpers.log_method_call @utils.instrument_execution_time def sync_state(self): - """Sync state of BIG-IP with that of the neutron database.""" - LOG.debug("manager:sync_state: Resync loadbalancers via validate_service") - + """Synchronize device configuration from controller state.""" resync = False - known_services = set() - owned_services = set() - for lb_id, service in self.cache.services.iteritems(): - known_services.add(lb_id) - if self.agent_host == service.agent_host: - owned_services.add(lb_id) - now = datetime.datetime.now() + if hasattr(self, 'lbdriver'): + if not self.lbdriver.backend_integrity(): + return resync + + known_services, owned_services = self._all_vs_known_services() try: # Get loadbalancers from the environment which are bound to # this agent. - active_loadbalancers = ( - self.plugin_rpc.get_active_loadbalancers(host=self.agent_host) - ) - active_loadbalancer_ids = set( - [lb['lb_id'] for lb in active_loadbalancers] - ) - - all_loadbalancers = ( - self.plugin_rpc.get_all_loadbalancers(host=self.agent_host) - ) - all_loadbalancer_ids = set( - [lb['lb_id'] for lb in all_loadbalancers] - ) + active_loadbalancers, active_loadbalancer_ids = \ + self._get_remote_loadbalancers('get_active_loadbalancers', + host=self.agent_host) + all_loadbalancers, all_loadbalancer_ids = \ + self._get_remote_loadbalancers('get_all_loadbalancers', + host=self.agent_host) LOG.debug("plugin produced the list of active loadbalancer ids: %s" % list(active_loadbalancer_ids)) @@ -615,59 +715,79 @@ def sync_state(self): # Validate each service we own, i.e. loadbalancers to which this # agent is bound, that does not exist in our service cache. - for lb_id in all_loadbalancer_ids: - if not self.cache.get_by_loadbalancer_id(lb_id): - LOG.debug("ccloud: validate_service: %s" % lb_id) - self.validate_service(lb_id) - - # This produces a list of loadbalancers with pending tasks to - # be performed. - pending_loadbalancers = ( - self.plugin_rpc.get_pending_loadbalancers(host=self.agent_host) - ) - pending_lb_ids = set( - [lb['lb_id'] for lb in pending_loadbalancers] - ) - LOG.debug( - "plugin produced the list of pending loadbalancer ids: %s" - % list(pending_lb_ids)) - - for lb_id in pending_lb_ids: - lb_pending = self.refresh_service(lb_id) - if lb_pending: - if lb_id not in self.pending_services: - self.pending_services[lb_id] = now - - time_added = self.pending_services[lb_id] - time_expired = ((now - time_added).seconds > - self.conf.f5_pending_services_timeout) - - if time_expired: - lb_pending = False - self.service_timeout(lb_id) + self._validate_services(all_loadbalancer_ids) - if not lb_pending: - del self.pending_services[lb_id] - - # If there are services in the pending cache resync - if self.pending_services: - resync = True + resync = self._refresh_pending_services() # Get a list of any cached service we now know after # refreshing services - known_services = set() - for (lb_id, service) in self.cache.services.iteritems(): - if self.agent_host == service.agent_host: - known_services.add(lb_id) + owned_services, known_services = self._all_vs_known_services() LOG.debug("currently known loadbalancer ids after sync: %s" % list(known_services)) except Exception as e: - LOG.warning("Unable to retrieve service. Service might be deleted in between: %s" % e.message) + LOG.exception("Unable to sync state: %s" % e.message) resync = True return resync + def _all_vs_known_services(self): + all_services = set() + known_services = set() + for lb_id, service in self.cache.services.iteritems(): + all_services.add(lb_id) + if self.agent_host == service.agent_host: + known_services.add(lb_id) + return all_services, known_services + + def _refresh_pending_services(self): + now = datetime.datetime.now() + resync = False + # This produces a list of loadbalancers with pending tasks to + # be performed. + pending_loadbalancers, pending_lb_ids = \ + self._get_remote_loadbalancers('get_pending_loadbalancers', + host=self.agent_host) + LOG.debug( + "plugin produced the list of pending loadbalancer ids: %s" + % list(pending_lb_ids)) + + for lb_id in list(pending_lb_ids): + lb_pending = self.refresh_service(lb_id) + if lb_pending: + if lb_id not in self.pending_services: + self.pending_services[lb_id] = now + + time_added = self.pending_services[lb_id] + has_expired = bool((now - time_added).seconds > + self.conf.f5_pending_services_timeout) + + if has_expired: + lb_pending = False + self.service_timeout(lb_id) + + if not lb_pending: + try: + del self.pending_services[lb_id] + except KeyError as e: + LOG.error("LB not found in pending services: {0}".format( + e.message)) + + # If there are services in the pending cache resync + if self.pending_services: + resync = True + return resync + + def _get_remote_loadbalancers(self, plugin_rpc_attr, host=None): + loadbalancers = getattr(self.plugin_rpc, plugin_rpc_attr)(host=host) + lb_ids = [lb['lb_id'] for lb in loadbalancers] + return tuple(loadbalancers), set(lb_ids) + + def _validate_services(self, lb_ids): + for lb_id in lb_ids: + if not self.cache.get_by_loadbalancer_id(lb_id): + self.validate_service(lb_id) + @log_helpers.log_method_call @utils.instrument_execution_time def validate_service(self, lb_id): @@ -675,16 +795,25 @@ def validate_service(self, lb_id): service = self.plugin_rpc.get_service_by_loadbalancer_id( lb_id ) - self.cache.put(service, self.agent_host) - LOG.info("ccloud: validate_service - get service from rpc '{}' for sync".format(service)) - if not self.lbdriver.service_exists(service) or \ - self.has_provisioning_status_of_error(service): - LOG.info("active loadbalancer '{}' is not on BIG-IP" - " or has error state...syncing".format(lb_id)) + try: + found = True + if (not self.lbdriver.service_exists(service)) or self.has_provisioning_status_of_error(service): + LOG.warning("Active loadbalancer '{}' is not on BIG-IP" + " or has error state".format(lb_id)) + found = False + else: + LOG.debug("Found service definition for '{}', state is ACTIVE" + " move on.".format(lb_id)) + except Exception as ex: + #iControlUnexpectedHTTPError + LOG.warning("ccloud: Service %s not found on BIGip because of exception %s " % (lb_id, ex.message)) + found = False + # really not found or Exception happend: Try to fix it + if not found: + LOG.info("ccloud: Start syncing loadbalancer '{}'".format(lb_id)) self.lbdriver.sync(service) - else: - LOG.debug("Found service definition for '{}', state is ACTIVE" - " move on.".format(lb_id)) + LOG.info("ccloud: Finished syncing loadbalancer '{}'".format(lb_id)) + self.cache.put(service, self.agent_host) except f5_ex.InvalidNetworkType as exc: LOG.warning(exc.msg) except q_exception.NeutronException as exc: @@ -1269,15 +1398,15 @@ def delete_health_monitor(self, context, health_monitor, service): def agent_updated(self, context, payload): """Handle the agent_updated notification event.""" if payload['admin_state_up'] != self.admin_state_up: + LOG.info("agent administration status updated %s!", payload) self.admin_state_up = payload['admin_state_up'] - if self.admin_state_up: - # FIXME: This needs to be changed back to True - self.needs_resync = False + # the agent transitioned to down to up and the + # driver reports healthy, trash the cache + # and force an update to update agent scheduler + if self.lbdriver.backend_integrity() and self.admin_state_up: + self._report_state(True) else: - for loadbalancer_id in self.cache.get_loadbalancer_ids(): - LOG.debug("DESTROYING loadbalancer: " + loadbalancer_id) - # self.destroy_service(loadbalancer_id) - LOG.info("agent_updated by server side %s!", payload) + self._report_state(False) @log_helpers.log_method_call def tunnel_update(self, context, **kwargs): diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/constants_v2.py b/f5_openstack_agent/lbaasv2/drivers/bigip/constants_v2.py index b6c002046..bd8d797b8 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/constants_v2.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/constants_v2.py @@ -16,6 +16,8 @@ # Service resync interval RESYNC_INTERVAL = 300 +UPDATE_OPERATING_STATUS_INTERVAL = 30 + # Topic for tunnel notifications between the plugin and agent TUNNEL = 'tunnel' @@ -55,3 +57,5 @@ DEVICE_HEALTH_SCORE_CPS_WEIGHT = 1 DEVICE_HEALTH_SCORE_CPS_PERIOD = 5 DEVICE_HEALTH_SCORE_CPS_MAX = 100 + +DEVICE_CONNECTION_TIMEOUT = 5 diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py b/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py index ce7e1925e..21320f354 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py @@ -15,15 +15,12 @@ # limitations under the License. # -#import pdb - import datetime import hashlib import json import logging as std_logging import os import urllib -import urllib2 from requests import HTTPError from eventlet import greenthread @@ -31,7 +28,6 @@ from time import time from neutron.common.exceptions import InvalidConfigurationOption -from neutron.common.exceptions import NeutronException from neutron.plugins.common import constants as plugin_const from neutron_lbaas.services.loadbalancer import constants as lb_const @@ -308,7 +304,13 @@ default='/Common/oneconnect', help='Default oneconnect profile for HTTP virtual servers' ), - + # ccloud: new parent monitor profile for https monitors for tmos > v13.1.1.2 + # default parent is standard https monitor. New onle will be /Common/cc_https + cfg.StrOpt( + 'f5_parent_https_monitor', + default='/Common/https', + help='Parent monitor for https monitors.' + ), cfg.StrOpt( 'os_tenant_name', default=None, @@ -322,21 +324,21 @@ ] -def is_connected(method): - # Decorator to check we are connected before provisioning. +def is_operational(method): + # Decorator to check we are operational before provisioning. def wrapper(*args, **kwargs): instance = args[0] - if instance.connected: + if instance.operational: try: return method(*args, **kwargs) except IOError as ioe: LOG.error('IO Error detected: %s' % method.__name__) - instance.connect_bigips() # what's this do? + LOG.error(str(ioe)) raise ioe else: - LOG.error('Cannot execute %s. Not connected. Connecting.' + LOG.error('Cannot execute %s. Not operational. Re-initializing.' % method.__name__) - instance.connect_bigips() + instance._init_bigips() return wrapper @@ -355,14 +357,30 @@ def __init__(self, conf, registerOpts=True): self.hostnames = None self.device_type = conf.f5_device_type self.plugin_rpc = None # overrides base, same value - self.__last_connect_attempt = None - self.connected = False # overrides base, same value + self.agent_report_state = None # overrides base, same value + self.operational = False # overrides base, same value self.driver_name = 'f5-lbaasv2-icontrol' + # # BIG-IP containers + # + + # BIG-IPs which currectly active self.__bigips = {} + self.__last_connect_attempt = None + + # HA and traffic group validation + self.ha_validated = False + self.tg_initialized = False + # traffic groups discovered from BIG-IPs for service placement self.__traffic_groups = [] + + # base configurations to report to Neutron agent state reports self.agent_configurations = {} # overrides base, same value + self.agent_configurations['device_drivers'] = [self.driver_name] + self.agent_configurations['icontrol_endpoints'] = {} + + # service component managers self.tenant_manager = None self.cluster_manager = None self.system_helper = None @@ -371,9 +389,12 @@ def __init__(self, conf, registerOpts=True): self.vlan_binding = None self.l3_binding = None self.cert_manager = None # overrides register_OPTS + + # server helpers self.stat_helper = stat_helper.StatHelper() self.network_helper = network_helper.NetworkHelper() + # f5-sdk helpers self.vs_manager = resource_helper.BigIPResourceHelper( resource_helper.ResourceType.virtual) self.pool_manager = resource_helper.BigIPResourceHelper( @@ -385,95 +406,64 @@ def __init__(self, conf, registerOpts=True): self.orphan_cache_last_reset = datetime.datetime.now() self.orphan_cleanup_testrun = self.conf.ccloud_orphans_cleanup_testrun - if self.conf.trace_service_requests: - path = '/var/log/neutron/service/' - if not os.path.exists(path): - os.makedirs(path) - self.file_name = path + strftime("%H%M%S-%m%d%Y") + '.json' - with open(self.file_name, 'w') as fp: - fp.write('[{}] ') - - if self.conf.f5_global_routed_mode: - LOG.info('WARNING - f5_global_routed_mode enabled.' - ' There will be no L2 or L3 orchestration' - ' or tenant isolation provisioned. All vips' - ' and pool members must be routable through' - ' pre-provisioned SelfIPs.') - self.conf.use_namespaces = False - self.conf.f5_snat_mode = True - self.conf.f5_snat_addresses_per_subnet = 0 - self.agent_configurations['tunnel_types'] = [] - self.agent_configurations['bridge_mappings'] = {} - else: - self.agent_configurations['tunnel_types'] = \ - self.conf.advertised_tunnel_types - for net_id in self.conf.common_network_ids: - LOG.debug('network %s will be mapped to /Common/%s' - % (net_id, self.conf.common_network_ids[net_id])) - - self.agent_configurations['common_networks'] = \ - self.conf.common_network_ids - LOG.debug('Setting static ARP population to %s' - % self.conf.f5_populate_static_arp) - self.agent_configurations['f5_common_external_networks'] = \ - self.conf.f5_common_external_networks - f5const.FDB_POPULATE_STATIC_ARP = self.conf.f5_populate_static_arp - - self.agent_configurations['device_drivers'] = [self.driver_name] - self._init_bigip_hostnames() - self._init_bigip_managers() - self.connect_bigips() - - # After we have a connection to the BIG-IPs, initialize vCMP - if self.network_builder: - self.network_builder.initialize_vcmp() - - self.agent_configurations['network_segment_physical_network'] = \ - self.conf.f5_network_segment_physical_network - - LOG.info('iControlDriver initialized to %d bigips with username:%s' - % (len(self.__bigips), self.conf.icontrol_username)) - LOG.info('iControlDriver dynamic agent configurations:%s' - % self.agent_configurations) - - # read enhanced services definitions - esd_dir = os.path.join(self.get_config_dir(), 'esd') - esd = EsdTagProcessor(esd_dir) try: - esd.process_esd(self.get_all_bigips()) - self.lbaas_builder.init_esd(esd) - except f5ex.esdJSONFileInvalidException as err: - LOG.error("Unable to initialize ESD. Error: %s.", err.message) - - self.initialized = True - - def connect_bigips(self): - self._init_bigips() - if self.conf.f5_global_routed_mode: - local_ips = [] - else: - try: - local_ips = self.network_builder.initialize_tunneling() - except Exception: - LOG.error("Error creating BigIP VTEPs in connect_bigips") - raise - - self._init_agent_config(local_ips) - - def post_init(self): - # run any post initialized tasks, now that the agent - # is fully connected - if self.vlan_binding: - LOG.debug( - 'Getting BIG-IP device interface for VLAN Binding') - self.vlan_binding.register_bigip_interfaces() - if self.l3_binding: - LOG.debug('Getting BIG-IP MAC Address for L3 Binding') - self.l3_binding.register_bigip_mac_addresses() + # debug logging of service requests recieved by driver + if self.conf.trace_service_requests: + path = '/var/log/neutron/service/' + if not os.path.exists(path): + os.makedirs(path) + self.file_name = path + strftime("%H%M%S-%m%d%Y") + '.json' + with open(self.file_name, 'w') as fp: + fp.write('[{}] ') + + # driver mode settings - GRM vs L2 adjacent + if self.conf.f5_global_routed_mode: + LOG.info('WARNING - f5_global_routed_mode enabled.' + ' There will be no L2 or L3 orchestration' + ' or tenant isolation provisioned. All vips' + ' and pool members must be routable through' + ' pre-provisioned SelfIPs.') + self.conf.use_namespaces = False + self.conf.f5_snat_mode = True + self.conf.f5_snat_addresses_per_subnet = 0 + self.agent_configurations['tunnel_types'] = [] + self.agent_configurations['bridge_mappings'] = {} + else: + self.agent_configurations['tunnel_types'] = \ + self.conf.advertised_tunnel_types + for net_id in self.conf.common_network_ids: + LOG.debug('network %s will be mapped to /Common/%s' + % (net_id, self.conf.common_network_ids[net_id])) + + self.agent_configurations['common_networks'] = \ + self.conf.common_network_ids + LOG.debug('Setting static ARP population to %s' + % self.conf.f5_populate_static_arp) + self.agent_configurations['f5_common_external_networks'] = \ + self.conf.f5_common_external_networks + f5const.FDB_POPULATE_STATIC_ARP = \ + self.conf.f5_populate_static_arp + + # parse the icontrol_hostname setting + self._init_bigip_hostnames() + # instantiate the managers + self._init_bigip_managers() + + self.initialized = True + LOG.debug('iControlDriver loaded successfully') + except Exception as exc: + LOG.error("exception in intializing driver %s" % str(exc)) + self._set_agent_status(False) - if self.network_builder: - self.network_builder.post_init() + def connect(self): + # initialize communications wiht BIG-IP via iControl + try: + self._init_bigips() + except Exception as exc: + LOG.error("exception in intializing communications to BIG-IPs %s" + % str(exc)) + self._set_agent_status(False) def _init_bigip_managers(self): @@ -547,19 +537,36 @@ def _init_bigip_hostnames(self): self.hostnames = [item.strip() for item in self.hostnames] self.hostnames = sorted(self.hostnames) + # initialize per host agent_configurations + for hostname in self.hostnames: + self.__bigips[hostname] = bigip = type('', (), {})() + bigip.hostname = hostname + bigip.status = 'creating' + bigip.status_message = 'creating BIG-IP from iControl hostnames' + bigip.device_interfaces = dict() + self.agent_configurations[ + 'icontrol_endpoints'][hostname] = {} + self.agent_configurations[ + 'icontrol_endpoints'][hostname]['failover_state'] = \ + 'undiscovered' + self.agent_configurations[ + 'icontrol_endpoints'][hostname]['status'] = 'unknown' + self.agent_configurations[ + 'icontrol_endpoints'][hostname]['status_message'] = '' + def _init_bigips(self): # Connect to all BIG-IPs - if self.connected: + if self.operational: + LOG.debug('iControl driver reports connection is operational') return + LOG.debug('initializing communications to BIG-IPs') try: + # setup logging options if not self.conf.debug: - sudslog = std_logging.getLogger('suds.client') - sudslog.setLevel(std_logging.FATAL) requests_log = std_logging.getLogger( "requests.packages.urllib3") requests_log.setLevel(std_logging.ERROR) requests_log.propagate = False - else: requests_log = std_logging.getLogger( "requests.packages.urllib3") @@ -568,172 +575,447 @@ def _init_bigips(self): self.__last_connect_attempt = datetime.datetime.now() - first_bigip = self._open_bigip(self.hostnames[0]) - self._init_bigip(first_bigip, self.hostnames[0], None) - self.__bigips[self.hostnames[0]] = first_bigip - - device_group_name = self._validate_ha(first_bigip) - self._init_traffic_groups(first_bigip) - - # connect to the rest of the devices - for hostname in self.hostnames[1:]: + for hostname in self.hostnames: + # connect to each BIG-IP and set it status bigip = self._open_bigip(hostname) - self._init_bigip(bigip, hostname, device_group_name) - self.__bigips[hostname] = bigip - - self.connected = True - - except NeutronException as exc: - LOG.error('Could not communicate with all ' + - 'iControl devices: %s' % exc.msg) - greenthread.sleep(5) # this should probably go away + if bigip.status == 'connected': + # set the status down until we assure initialized + bigip.status = 'initializing' + bigip.status_message = 'initializing HA viability' + LOG.debug('initializing HA viability %s' % hostname) + device_group_name = None + if not self.ha_validated: + device_group_name = self._validate_ha(bigip) + LOG.debug('HA validated from %s with DSG %s' % + (hostname, device_group_name)) + self.ha_validated = True + if not self.tg_initialized: + self._init_traffic_groups(bigip) + LOG.debug('learned traffic groups from %s as %s' % + (hostname, self.__traffic_groups)) + self.tg_initialized = True + LOG.debug('initializing bigip %s' % hostname) + self._init_bigip(bigip, hostname, device_group_name) + LOG.debug('initializing agent configurations %s' + % hostname) + self._init_agent_config(bigip) + # Assure basic BIG-IP HA is operational + LOG.debug('validating HA state for %s' % hostname) + bigip.status = 'validating_HA' + bigip.status_message = 'validating the current HA state' + if self._validate_ha_operational(bigip): + LOG.debug('setting status to active for %s' % hostname) + bigip.status = 'active' + bigip.status_message = 'BIG-IP ready for provisioning' + self._post_init() + else: + LOG.debug('setting status to error for %s' % hostname) + bigip.status = 'error' + bigip.status_message = 'BIG-IP is not operational' + self._set_agent_status(False) + else: + LOG.error('error opening BIG-IP %s - %s:%s' + % (hostname, bigip.status, bigip.status_message)) + self._set_agent_status(False) + except Exception as exc: + LOG.error('Invalid agent configuration: %s' % exc.message) raise + self._set_agent_status(force_resync=True) + + def _init_errored_bigips(self): + try: + errored_bigips = self.get_errored_bigips_hostnames() + recovered = False + if errored_bigips: + LOG.debug('attempting to recover %s BIG-IPs' % + len(errored_bigips)) + for hostname in errored_bigips: + # try to connect and set status + bigip = self._open_bigip(hostname) + if bigip.status == 'connected': + # set the status down until we assure initialized + bigip.status = 'initializing' + bigip.status_message = 'initializing HA viability' + LOG.debug('initializing HA viability %s' % hostname) + LOG.debug('proceeding to initialize %s' % hostname) + device_group_name = None + if not self.ha_validated: + device_group_name = self._validate_ha(bigip) + LOG.debug('HA validated from %s with DSG %s' % + (hostname, device_group_name)) + self.ha_validated = True + if not self.tg_initialized: + self._init_traffic_groups(bigip) + LOG.debug('known traffic groups initialized', + ' from %s as %s' % + (hostname, self.__traffic_groups)) + self.tg_initialized = True + LOG.debug('initializing bigip %s' % hostname) + self._init_bigip(bigip, hostname, device_group_name) + LOG.debug('initializing agent configurations %s' + % hostname) + self._init_agent_config(bigip) + + # Assure basic BIG-IP HA is operational + LOG.debug('validating HA state for %s' % hostname) + bigip.status = 'validating_HA' + bigip.status_message = \ + 'validating the current HA state' + if self._validate_ha_operational(bigip): + LOG.debug('setting status to active for %s' + % hostname) + bigip.status = 'active' + bigip.status_message = \ + 'BIG-IP ready for provisioning' + self._post_init() + self._set_agent_status(True) + else: + LOG.debug('setting status to error for %s' + % hostname) + bigip.status = 'error' + bigip.status_message = 'BIG-IP is not operational' + self._set_agent_status(False) + recovered = True + else: + LOG.debug('there are no disconnected BIG-IPs to recover') except Exception as exc: - LOG.error('Could not communicate with all ' + - 'iControl devices: %s' % exc.message) - greenthread.sleep(5) # this should probably go away + LOG.error('Invalid agent configuration: %s' % exc.message) raise + return recovered def _open_bigip(self, hostname): # Open bigip connection """ - LOG.info('Opening iControl connection to %s @ %s' % - (self.conf.icontrol_username, hostname)) - - return ManagementRoot(hostname, - self.conf.icontrol_username, - self.conf.icontrol_password) + try: + bigip = self.__bigips[hostname] + if bigip.status not in ['creating', 'error']: + LOG.debug('BIG-IP %s status invalid %s to open a connection' + % (hostname, bigip.status)) + return bigip + bigip.status = 'connecting' + bigip.status_message = 'requesting iControl endpoint' + LOG.info('opening iControl connection to %s @ %s' % + (self.conf.icontrol_username, hostname)) + bigip = ManagementRoot(hostname, + self.conf.icontrol_username, + self.conf.icontrol_password, + timeout=f5const.DEVICE_CONNECTION_TIMEOUT) + bigip.status = 'connected' + bigip.status_message = 'connected to BIG-IP' + self.__bigips[hostname] = bigip + return bigip + except Exception as exc: + LOG.error('could not communicate with ' + + 'iControl device: %s' % hostname) + # since no bigip object was created, create a dummy object + # so we can store the status and status_message attributes + errbigip = type('', (), {})() + errbigip.hostname = hostname + errbigip.status = 'error' + errbigip.status_message = str(exc)[:80] + self.__bigips[hostname] = errbigip + return errbigip def _init_bigip(self, bigip, hostname, check_group_name=None): # Prepare a bigip for usage - - major_version, minor_version = self._validate_bigip_version( - bigip, hostname) - - device_group_name = None - extramb = self.system_helper.get_provision_extramb(bigip) - if int(extramb) < f5const.MIN_EXTRA_MB: - raise f5ex.ProvisioningExtraMBValidateFailed( - 'Device %s BIG-IP not provisioned for ' - 'management LARGE.' % hostname) - - if self.conf.f5_ha_type == 'pair' and \ - self.cluster_manager.get_sync_status(bigip) == 'Standalone': - raise f5ex.BigIPClusterInvalidHA( - 'HA mode is pair and bigip %s in standalone mode' - % hostname) - - if self.conf.f5_ha_type == 'scalen' and \ - self.cluster_manager.get_sync_status(bigip) == 'Standalone': - raise f5ex.BigIPClusterInvalidHA( - 'HA mode is scalen and bigip %s in standalone mode' - % hostname) - - if self.conf.f5_ha_type != 'standalone': - device_group_name = self.cluster_manager.get_device_group(bigip) - if not device_group_name: + try: + major_version, minor_version = self._validate_bigip_version( + bigip, hostname) + + device_group_name = None + extramb = self.system_helper.get_provision_extramb(bigip) + if int(extramb) < f5const.MIN_EXTRA_MB: + raise f5ex.ProvisioningExtraMBValidateFailed( + 'Device %s BIG-IP not provisioned for ' + 'management LARGE.' % hostname) + + if self.conf.f5_ha_type == 'pair' and \ + self.cluster_manager.get_sync_status(bigip) == \ + 'Standalone': raise f5ex.BigIPClusterInvalidHA( - 'HA mode is %s and no sync failover ' - 'device group found for device %s.' - % (self.conf.f5_ha_type, hostname)) - if check_group_name and device_group_name != check_group_name: + 'HA mode is pair and bigip %s in standalone mode' + % hostname) + + if self.conf.f5_ha_type == 'scalen' and \ + self.cluster_manager.get_sync_status(bigip) == \ + 'Standalone': raise f5ex.BigIPClusterInvalidHA( - 'Invalid HA. Device %s is in device group' - ' %s but should be in %s.' - % (hostname, device_group_name, check_group_name)) - bigip.device_group_name = device_group_name + 'HA mode is scalen and bigip %s in standalone mode' + % hostname) + + if self.conf.f5_ha_type != 'standalone': + device_group_name = \ + self.cluster_manager.get_device_group(bigip) + if not device_group_name: + raise f5ex.BigIPClusterInvalidHA( + 'HA mode is %s and no sync failover ' + 'device group found for device %s.' + % (self.conf.f5_ha_type, hostname)) + if check_group_name and device_group_name != check_group_name: + raise f5ex.BigIPClusterInvalidHA( + 'Invalid HA. Device %s is in device group' + ' %s but should be in %s.' + % (hostname, device_group_name, check_group_name)) + bigip.device_group_name = device_group_name - if self.network_builder: - for network in self.conf.common_network_ids.values(): - if not self.network_builder.vlan_exists(bigip, - network, - folder='Common'): - raise f5ex.MissingNetwork( - 'Common network %s on %s does not exist' - % (network, bigip.hostname)) - - bigip.device_name = self.cluster_manager.get_device_name(bigip) - bigip.mac_addresses = self.system_helper.get_mac_addresses(bigip) - LOG.debug("Initialized BIG-IP %s with MAC addresses %s" % - (bigip.device_name, ', '.join(bigip.mac_addresses))) - bigip.device_interfaces = \ - self.system_helper.get_interface_macaddresses_dict(bigip) - bigip.assured_networks = {} - bigip.assured_tenant_snat_subnets = {} - bigip.assured_gateway_subnets = [] - - if self.conf.f5_ha_type != 'standalone': - self.cluster_manager.disable_auto_sync(device_group_name, bigip) - - # Turn off tunnel syncing... our VTEPs are local SelfIPs - if self.system_helper.get_tunnel_sync(bigip) == 'enable': - self.system_helper.set_tunnel_sync(bigip, enabled=False) - - LOG.debug('Connected to iControl %s @ %s ver %s.%s' - % (self.conf.icontrol_username, hostname, - major_version, minor_version)) + if self.network_builder: + for network in self.conf.common_network_ids.values(): + if not self.network_builder.vlan_exists(bigip, + network, + folder='Common'): + raise f5ex.MissingNetwork( + 'Common network %s on %s does not exist' + % (network, bigip.hostname)) + bigip.device_name = self.cluster_manager.get_device_name(bigip) + bigip.mac_addresses = self.system_helper.get_mac_addresses(bigip) + LOG.debug("Initialized BIG-IP %s with MAC addresses %s" % + (bigip.device_name, ', '.join(bigip.mac_addresses))) + bigip.device_interfaces = \ + self.system_helper.get_interface_macaddresses_dict(bigip) + bigip.assured_networks = {} + bigip.assured_tenant_snat_subnets = {} + bigip.assured_gateway_subnets = [] + + if self.conf.f5_ha_type != 'standalone': + self.cluster_manager.disable_auto_sync( + device_group_name, bigip) + + # validate VTEP SelfIPs + if not self.conf.f5_global_routed_mode: + self.network_builder.initialize_tunneling(bigip) + + # Turn off tunnel syncing between BIG-IP + # as our VTEPs properly use only local SelfIPs + if self.system_helper.get_tunnel_sync(bigip) == 'enable': + self.system_helper.set_tunnel_sync(bigip, enabled=False) + + LOG.debug('connected to iControl %s @ %s ver %s.%s' + % (self.conf.icontrol_username, hostname, + major_version, minor_version)) + except Exception as exc: + bigip.status = 'error' + bigip.status_message = str(exc)[:80] + raise return bigip - def _validate_ha(self, first_bigip): + def _post_init(self): + # After we have a connection to the BIG-IPs, initialize vCMP + # on all connected BIG-IPs + if self.network_builder: + self.network_builder.initialize_vcmp() + + self.agent_configurations['network_segment_physical_network'] = \ + self.conf.f5_network_segment_physical_network + + LOG.info('iControlDriver initialized to %d bigips with username:%s' + % (len(self.get_active_bigips()), + self.conf.icontrol_username)) + LOG.info('iControlDriver dynamic agent configurations:%s' + % self.agent_configurations) + + if self.vlan_binding: + LOG.debug( + 'getting BIG-IP device interface for VLAN Binding') + self.vlan_binding.register_bigip_interfaces() + + if self.l3_binding: + LOG.debug('getting BIG-IP MAC Address for L3 Binding') + self.l3_binding.register_bigip_mac_addresses() + + # endpoints = self.agent_configurations['icontrol_endpoints'] + # for ic_host in endpoints.keys(): + for hostbigip in self.get_all_bigips(): + + # hostbigip = self.__bigips[ic_host] + mac_addrs = [mac_addr for interface, mac_addr in + hostbigip.device_interfaces.items() + if interface != "mgmt"] + ports = self.plugin_rpc.get_ports_for_mac_addresses( + mac_addresses=mac_addrs) + if ports: + self.agent_configurations['nova_managed'] = True + else: + self.agent_configurations['nova_managed'] = False + + if self.network_builder: + self.network_builder.post_init() + + # read enhanced services definitions + esd_dir = os.path.join(self.get_config_dir(), 'esd') + esd = EsdTagProcessor(esd_dir) + try: + esd.process_esd(self.get_all_bigips()) + self.lbaas_builder.init_esd(esd) + #ccloud: self.service_adapter.init_esd(esd) + except f5ex.esdJSONFileInvalidException as err: + LOG.error("unable to initialize ESD. Error: %s.", err.message) + self._set_agent_status(False) + + def _validate_ha(self, bigip): # if there was only one address supplied and # this is not a standalone device, get the # devices trusted by this device. """ device_group_name = None if self.conf.f5_ha_type == 'standalone': if len(self.hostnames) != 1: + bigip.status = 'error' + bigip.status_message = \ + 'HA mode is standalone and %d hosts found.'\ + % len(self.hostnames) raise f5ex.BigIPClusterInvalidHA( 'HA mode is standalone and %d hosts found.' % len(self.hostnames)) + device_group_name = 'standalone' elif self.conf.f5_ha_type == 'pair': device_group_name = self.cluster_manager.\ - get_device_group(first_bigip) + get_device_group(bigip) if len(self.hostnames) != 2: mgmt_addrs = [] - devices = self.cluster_manager.devices(first_bigip, - device_group_name) + devices = self.cluster_manager.devices(bigip) for device in devices: mgmt_addrs.append( - self.cluster_manager.get_mgmt_addr_by_device(device)) + self.cluster_manager.get_mgmt_addr_by_device( + bigip, device)) self.hostnames = mgmt_addrs if len(self.hostnames) != 2: + bigip.status = 'error' + bigip.status_message = 'HA mode is pair and %d hosts found.' \ + % len(self.hostnames) raise f5ex.BigIPClusterInvalidHA( 'HA mode is pair and %d hosts found.' % len(self.hostnames)) elif self.conf.f5_ha_type == 'scalen': device_group_name = self.cluster_manager.\ - get_device_group(first_bigip) + get_device_group(bigip) if len(self.hostnames) < 2: mgmt_addrs = [] - devices = self.cluster_manager.devices(first_bigip, - device_group_name) + devices = self.cluster_manager.devices(bigip) for device in devices: mgmt_addrs.append( self.cluster_manager.get_mgmt_addr_by_device( - first_bigip, device)) + bigip, device) + ) self.hostnames = mgmt_addrs + if len(self.hostnames) < 2: + bigip.status = 'error' + bigip.status_message = 'HA mode is scale and 1 hosts found.' + raise f5ex.BigIPClusterInvalidHA( + 'HA mode is pair and 1 hosts found.') return device_group_name - def _init_agent_config(self, local_ips): - # Init agent config - icontrol_endpoints = {} - for host in self.__bigips: - hostbigip = self.__bigips[host] - ic_host = {} - ic_host['version'] = self.system_helper.get_version(hostbigip) - ic_host['device_name'] = hostbigip.device_name - ic_host['platform'] = self.system_helper.get_platform(hostbigip) - ic_host['serial_number'] = self.system_helper.get_serial_number( - hostbigip) - icontrol_endpoints[host] = ic_host - - self.agent_configurations['tunneling_ips'] = local_ips - self.agent_configurations['icontrol_endpoints'] = icontrol_endpoints + def _validate_ha_operational(self, bigip): + if self.conf.f5_ha_type == 'standalone': + return True + else: + # how many active BIG-IPs are there? + active_bigips = self.get_active_bigips() + if active_bigips: + sync_status = self.cluster_manager.get_sync_status(bigip) + if sync_status in ['Disconnected', 'Sync Failure']: + if len(active_bigips) > 1: + # the device should not be in the disconnected state + return False + if len(active_bigips) > 1: + # it should be in the same sync-failover group + # as the rest of the active bigips + device_group_name = \ + self.cluster_manager.get_device_group(bigip) + for active_bigip in active_bigips: + adgn = self.cluster_manager.get_device_group( + active_bigip) + if not adgn == device_group_name: + return False + return True + else: + return True + def _init_agent_config(self, bigip): + # Init agent config + ic_host = {} + ic_host['version'] = self.system_helper.get_version(bigip) + ic_host['device_name'] = bigip.device_name + ic_host['platform'] = self.system_helper.get_platform(bigip) + ic_host['serial_number'] = self.system_helper.get_serial_number(bigip) + ic_host['status'] = bigip.status + ic_host['status_message'] = bigip.status_message + ic_host['failover_state'] = self.get_failover_state(bigip) + if hasattr(bigip, 'local_ip') and bigip.local_ip: + ic_host['local_ip'] = bigip.local_ip + else: + ic_host['local_ip'] = 'VTEP disabled' + self.agent_configurations['tunnel_types'] = list() + self.agent_configurations['icontrol_endpoints'][bigip.hostname] = \ + ic_host if self.network_builder: self.agent_configurations['bridge_mappings'] = \ self.network_builder.interface_mapping + def _set_agent_status(self, force_resync=False): + for hostname in self.__bigips: + bigip = self.__bigips[hostname] + self.agent_configurations[ + 'icontrol_endpoints'][bigip.hostname][ + 'status'] = bigip.status + self.agent_configurations[ + 'icontrol_endpoints'][bigip.hostname][ + 'status_message'] = bigip.status_message + # Policy - if any BIG-IP are active we're operational + if self.get_active_bigips(): + self.operational = True + else: + self.operational = False + if self.agent_report_state: + self.agent_report_state(force_resync=force_resync) + + def get_failover_state(self, bigip): + try: + if hasattr(bigip, 'tm'): + fs = bigip.tm.sys.dbs.db.load(name='failover.state') + bigip.failover_state = fs.value + return bigip.failover_state + else: + return 'error' + except Exception as exc: + LOG.exception('Error getting %s failover state' % bigip.hostname) + bigip.status = 'error' + bigip.status_message = str(exc)[:80] + self._set_agent_status(False) + return 'error' + + def get_agent_configurations(self): + for hostname in self.__bigips: + bigip = self.__bigips[hostname] + if bigip.status == 'active': + failover_state = self.get_failover_state(bigip) + self.agent_configurations[ + 'icontrol_endpoints'][bigip.hostname][ + 'failover_state'] = failover_state + else: + self.agent_configurations[ + 'icontrol_endpoints'][bigip.hostname][ + 'failover_state'] = 'unknown' + self.agent_configurations['icontrol_endpoints'][ + bigip.hostname]['status'] = bigip.status + self.agent_configurations['icontrol_endpoints'][ + bigip.hostname]['status_message'] = bigip.status_message + self.agent_configurations['operational'] = \ + self.operational + LOG.debug('agent configurations are: %s' % self.agent_configurations) + return dict(self.agent_configurations) + + def recover_errored_devices(self): + # trigger a retry on errored BIG-IPs + try: + return self._init_errored_bigips() + except Exception as exc: + LOG.error("Could not recover BIG-IPs: %s" % exc.message) + + def backend_integrity(self): + if self.operational: + return True + return False + def generate_capacity_score(self, capacity_policy=None): - """Generate the capacity score of connected devices """ + """Generate the capacity score of connected devices.""" if capacity_policy: highest_metric = 0.0 highest_metric_name = None @@ -746,14 +1028,17 @@ def generate_capacity_score(self, capacity_policy=None): metric_func = getattr(self, func_name) metric_value = 0 for bigip in bigips: - global_stats = \ - self.stat_helper.get_global_statistics(bigip) - value = int( - metric_func(bigip=bigip, - global_statistics=global_stats) - ) - LOG.debug('calling capacity %s on %s returned: %s' - % (func_name, bigip.hostname, value)) + if bigip.status == 'active': + global_stats = \ + self.stat_helper.get_global_statistics(bigip) + value = int( + metric_func(bigip=bigip, + global_statistics=global_stats) + ) + LOG.debug('calling capacity %s on %s returned: %s' + % (func_name, bigip.hostname, value)) + else: + value = 0 if value > metric_value: metric_value = value metric_capacity = float(metric_value) / float(max_capacity) @@ -788,6 +1073,10 @@ def set_l2pop_rpc(self, l2pop_rpc): if self.network_builder: self.network_builder.set_l2pop_rpc(l2pop_rpc) + def set_agent_report_state(self, report_state_callback): + """Set Agent Report State.""" + self.agent_report_state = report_state_callback + def service_exists(self, service): return self._service_exists(service) @@ -799,7 +1088,7 @@ def flush_cache(self): bigip.assured_gateway_subnets = [] @serialized('get_all_deployed_loadbalancers') - @is_connected + @is_operational def get_all_deployed_loadbalancers(self, purge_orphaned_folders=False): LOG.debug('getting all deployed loadbalancers on BIG-IPs') deployed_lb_dict = {} @@ -849,7 +1138,7 @@ def get_all_deployed_loadbalancers(self, purge_orphaned_folders=False): return deployed_lb_dict @serialized('get_all_deployed_listeners') - @is_connected + @is_operational def get_all_deployed_listeners(self, expand_subcollections=False): LOG.debug('getting all deployed listeners on BIG-IPs') deployed_virtual_dict = {} @@ -940,7 +1229,7 @@ def get_orphans_cache(self): return self.orphan_cache @serialized('purge_orphaned_nodes') - @is_connected + @is_operational @log_helpers.log_method_call def purge_orphaned_nodes(self, tenant_members): # This algotithm is not able to determine nodes and members with an rd which isn't right fot the tenant, but @@ -1023,7 +1312,7 @@ def purge_orphaned_nodes(self, tenant_members): return True @serialized('get_all_deployed_pools') - @is_connected + @is_operational def get_all_deployed_pools(self): LOG.debug('getting all deployed pools on BIG-IPs') deployed_pool_dict = {} @@ -1064,7 +1353,7 @@ def get_all_deployed_pools(self): return deployed_pool_dict @serialized('purge_orphaned_pool') - @is_connected + @is_operational @log_helpers.log_method_call def purge_orphaned_pool(self, tenant_id=None, pool_id=None, hostnames=list()): @@ -1105,7 +1394,7 @@ def purge_orphaned_pool(self, tenant_id=None, pool_id=None, LOG.exception('Exception purging pool %s' % str(exc)) @serialized('get_all_deployed_monitors') - @is_connected + @is_operational def get_all_deployed_health_monitors(self): """Retrieve a list of all Health Monitors deployed""" LOG.debug('getting all deployed monitors on BIG-IP\'s') @@ -1140,7 +1429,7 @@ def get_all_deployed_health_monitors(self): return deployed_monitor_dict @serialized('purge_orphaned_health_monitor') - @is_connected + @is_operational @log_helpers.log_method_call def purge_orphaned_health_monitor(self, tenant_id=None, monitor_id=None, hostnames=list()): @@ -1176,7 +1465,7 @@ def purge_orphaned_health_monitor(self, tenant_id=None, monitor_id=None, LOG.exception('Exception purging monitor %s' % str(exc)) @serialized('get_all_deployed_l7_policys') - @is_connected + @is_operational def get_all_deployed_l7_policys(self): """Retrieve a dict of all l7policies deployed @@ -1218,7 +1507,7 @@ def get_all_deployed_l7_policys(self): return deployed_l7_policys_dict @serialized('purge_orphaned_l7_policy') - @is_connected + @is_operational @log_helpers.log_method_call def purge_orphaned_l7_policy(self, tenant_id=None, l7_policy_id=None, hostnames=list(), listener_id=None): @@ -1259,7 +1548,7 @@ def purge_orphaned_l7_policy(self, tenant_id=None, l7_policy_id=None, '"{}"'.format(kwargs, exc)) @serialized('purge_orphaned_loadbalancer') - @is_connected + @is_operational @log_helpers.log_method_call def purge_orphaned_loadbalancer(self, tenant_id=None, loadbalancer_id=None, hostnames=list()): @@ -1311,7 +1600,7 @@ def purge_orphaned_loadbalancer(self, tenant_id=None, % str(exc)) @serialized('purge_orphaned_listener') - @is_connected + @is_operational @log_helpers.log_method_call def purge_orphaned_listener( self, tenant_id=None, listener_id=None, hostnames=[]): @@ -1334,20 +1623,20 @@ def purge_orphaned_listener( LOG.exception('Exception purging listener %s' % str(exc)) @serialized('create_loadbalancer') - @is_connected + @is_operational def create_loadbalancer(self, loadbalancer, service): """Create virtual server""" return self._common_service_handler(service) @serialized('update_loadbalancer') - @is_connected + @is_operational def update_loadbalancer(self, old_loadbalancer, loadbalancer, service): """Update virtual server""" # anti-pattern three args unused. return self._common_service_handler(service) @serialized('delete_loadbalancer') - @is_connected + @is_operational def delete_loadbalancer(self, loadbalancer, service): """Delete loadbalancer""" LOG.debug("Deleting loadbalancer") @@ -1357,14 +1646,14 @@ def delete_loadbalancer(self, loadbalancer, service): delete_event=True) @serialized('create_listener') - @is_connected + @is_operational def create_listener(self, listener, service): """Create virtual server""" LOG.debug("Creating listener") return self._common_service_handler(service) @serialized('update_listener') - @is_connected + @is_operational def update_listener(self, old_listener, listener, service): """Update virtual server""" LOG.debug("Updating listener") @@ -1372,71 +1661,70 @@ def update_listener(self, old_listener, listener, service): return self._common_service_handler(service) @serialized('delete_listener') - @is_connected + @is_operational def delete_listener(self, listener, service): """Delete virtual server""" LOG.debug("Deleting listener") return self._common_service_handler(service) @serialized('create_pool') - @is_connected + @is_operational def create_pool(self, pool, service): """Create lb pool""" LOG.debug("Creating pool") return self._common_service_handler(service) @serialized('update_pool') - @is_connected + @is_operational def update_pool(self, old_pool, pool, service): """Update lb pool""" LOG.debug("Updating pool") return self._common_service_handler(service) @serialized('delete_pool') - @is_connected + @is_operational def delete_pool(self, pool, service): """Delete lb pool""" LOG.debug("Deleting pool") return self._common_service_handler(service) @serialized('create_member') - @is_connected + @is_operational def create_member(self, member, service): """Create pool member""" LOG.debug("Creating member") return self._common_service_handler(service) @serialized('update_member') - @is_connected + @is_operational def update_member(self, old_member, member, service): """Update pool member""" LOG.debug("Updating member") return self._common_service_handler(service) @serialized('delete_member') - @is_connected + @is_operational def delete_member(self, member, service): """Delete pool member""" LOG.debug("Deleting member") return self._common_service_handler(service, delete_event=True) @serialized('create_health_monitor') - @is_connected + @is_operational def create_health_monitor(self, health_monitor, service): """Create pool health monitor""" LOG.debug("Creating health monitor") return self._common_service_handler(service) @serialized('update_health_monitor') - @is_connected - def update_health_monitor(self, old_health_monitor, - health_monitor, service): + @is_operational + def update_health_monitor(self, old_health_monitor, health_monitor, service): """Update pool health monitor""" LOG.debug("Updating health monitor") return self._common_service_handler(service) @serialized('delete_health_monitor') - @is_connected + @is_operational def delete_health_monitor(self, health_monitor, service): """Delete pool health monitor""" LOG.debug("Deleting health monitor") @@ -1444,7 +1732,7 @@ def delete_health_monitor(self, health_monitor, service): # sapcc: get all snat pools @serialized('get_all_snat_pools') - @is_connected + @is_operational def get_all_snat_pools(self, partition=None): LOG.debug('getting all snat pools on BIG-IPs') @@ -1457,7 +1745,7 @@ def get_all_snat_pools(self, partition=None): # sapcc: get all virtual_addresses @serialized('get_all_virtual_addresses') - @is_connected + @is_operational def get_all_virtual_addresses(self): LOG.debug('getting all virtual addresses on BIG-IPs') @@ -1469,7 +1757,7 @@ def get_all_virtual_addresses(self): return virtual_address_s - @is_connected + @is_operational def get_stats(self, service): lb_stats = {} stats = ['clientside.bitsIn', @@ -1552,7 +1840,7 @@ def tunnel_sync(self): return False @serialized('sync') - @is_connected + @is_operational def sync(self, service): """Sync service defintion to device""" @@ -1571,7 +1859,7 @@ def sync(self, service): LOG.debug("Attempted sync of deleted load balancer") @serialized('backup_configuration') - @is_connected + @is_operational def backup_configuration(self): # Save Configuration on Devices for bigip in self.get_all_bigips(): @@ -1680,7 +1968,7 @@ def service_object_teardown(self, service): m_obj.delete() def _service_exists(self, service): - # Returns whether the bigip has a pool for the service + # Returns whether the bigip has the service defined if not service['loadbalancer']: return False loadbalancer = service['loadbalancer'] @@ -1689,6 +1977,10 @@ def _service_exists(self, service): loadbalancer['tenant_id'] ) + if self.network_builder: + # append route domain to member address + self.network_builder._annotate_service_route_domains(service) + # Foreach bigip in the cluster: for bigip in self.get_config_bigips(): # Does the tenant folder exist? @@ -1721,7 +2013,7 @@ def _service_exists(self, service): bigip.hostname)) return False - # Ensure that each virtual service exists. + # Ensure that each pool exists. for pool in service['pools']: svc = {"loadbalancer": loadbalancer, "pool": pool} @@ -1734,7 +2026,38 @@ def _service_exists(self, service): (bigip_pool['name'], folder_name, bigip.hostname)) return False - + else: + deployed_pool = self.pool_manager.load( + bigip, + name=bigip_pool['name'], + partition=folder_name) + deployed_members = \ + deployed_pool.members_s.get_collection() + + # First check that number of members deployed + # is equal to the number in the service. + if len(deployed_members) != len(pool['members']): + LOG.warning("Pool %s members member count mismatch " + "match: deployed %d != service %d" % + (bigip_pool['name'], len(deployed_members), + len(pool['members']))) + return False + + # Ensure each pool member exists + for member in service['members']: + if member['pool_id'] == pool['id']: + lb = self.lbaas_builder + pool = lb.get_pool_by_id( + service, member["pool_id"]) + svc = {"loadbalancer": loadbalancer, + "member": member, + "pool": pool} + if not lb.pool_builder.member_exists(svc, bigip): + LOG.warning("Pool member not found: %s" % + svc['member']) + return False + + # Ensure that each health monitor exists. for healthmonitor in service['healthmonitors']: svc = {"loadbalancer": loadbalancer, "healthmonitor": healthmonitor} @@ -1827,12 +2150,11 @@ def _common_service_handler(self, service, {'check_for_delete_subnets': {}, 'do_not_delete_subnets': []} - - #pdb.set_trace() + LOG.debug("ccloud: Pre assure service ***********************************************") self.lbaas_builder.assure_service(service, traffic_group, all_subnet_hints) - + LOG.debug("ccloud: Post assure service **********************************************") if self.network_builder: start_time = time() @@ -1874,8 +2196,7 @@ def _common_service_handler(self, service, return lb_pending def update_service_status(self, service, timed_out=False): - """Update status of objects in OpenStack """ - + """Update status of objects in controller.""" LOG.debug("_update_service_status") if not self.plugin_rpc: @@ -1907,7 +2228,7 @@ def update_service_status(self, service, timed_out=False): self._update_loadbalancer_status(service, timed_out) def _update_member_status(self, members, timed_out): - """Update member status in OpenStack """ + """Update member status in OpenStack.""" for member in members: if 'provisioning_status' in member: provisioning_status = member['provisioning_status'] @@ -2084,12 +2405,17 @@ def _update_loadbalancer_status(self, service, timed_out=False): else: LOG.error('Loadbalancer provisioning status is invalid') - @is_connected + @is_operational def update_operating_status(self, service): if 'members' in service: if self.network_builder: # append route domain to member address - self.network_builder._annotate_service_route_domains(service) + try: + self.network_builder._annotate_service_route_domains( + service) + except f5ex.InvalidNetworkType as exc: + LOG.warning(exc.msg) + return # get currrent member status self.lbaas_builder.update_operating_status(service) @@ -2110,8 +2436,9 @@ def get_active_bigip(self): return bigips[0] for bigip in bigips: - if self.cluster_manager.is_device_active(bigip): - return bigip + if hasattr(bigip, 'failover_state'): + if bigip.failover_state == 'active': + return bigip # if can't determine active, default to first one return bigips[0] @@ -2128,31 +2455,46 @@ def tenant_to_traffic_group(self, tenant_id): tg_index = int(hexhash, 16) % len(self.__traffic_groups) return self.__traffic_groups[tg_index] + # these functions should return only active BIG-IP + # not errored BIG-IPs. def get_bigip(self): - # Get one consistent big-ip - # As implemented I think this always returns the "first" bigip - # without any HTTP traffic? CONFIRMED: __bigips are mgmt_rts - hostnames = sorted(self.__bigips) - for i in range(len(hostnames)): # C-style make Pythonic. - try: - bigip = self.__bigips[hostnames[i]] # Calling devices?! - return bigip - except urllib2.URLError: - pass - raise urllib2.URLError('cannot communicate to any bigips') + hostnames = sorted(list(self.__bigips)) + for host in hostnames: + if hasattr(self.__bigips[host], 'status') and \ + self.__bigips[host].status == 'active': + return self.__bigips[host] def get_bigip_hosts(self): - # Get all big-ips hostnames under management - return self.__bigips + return_hosts = [] + for host in list(self.__bigips): + if hasattr(self.__bigips[host], 'status') and \ + self.__bigips[host].status == 'active': + return_hosts.append(host) + return sorted(return_hosts) def get_all_bigips(self): - # Get all big-ips under management - return self.__bigips.values() + return_bigips = [] + for host in list(self.__bigips): + if hasattr(self.__bigips[host], 'status') and \ + self.__bigips[host].status == 'active': + return_bigips.append(self.__bigips[host]) + return return_bigips def get_config_bigips(self): - # Return a list of big-ips that need to be configured. return self.get_all_bigips() + # these are the refactored methods + def get_active_bigips(self): + return self.get_all_bigips() + + def get_errored_bigips_hostnames(self): + return_hostnames = [] + for host in list(self.__bigips): + bigip = self.__bigips[host] + if hasattr(bigip, 'status') and bigip.status == 'error': + return_hostnames.append(host) + return return_hostnames + def get_inbound_throughput(self, bigip, global_statistics=None): return self.stat_helper.get_inbound_throughput( bigip, global_stats=global_statistics) @@ -2198,10 +2540,21 @@ def get_route_domain_count(self, bigip=None, global_statistics=None): return self.network_helper.get_route_domain_count(bigip) def _init_traffic_groups(self, bigip): - self.__traffic_groups = self.cluster_manager.get_traffic_groups(bigip) - if 'traffic-group-local-only' in self.__traffic_groups: - self.__traffic_groups.remove('traffic-group-local-only') - self.__traffic_groups.sort() + try: + LOG.debug('retrieving traffic groups from %s' % bigip.hostname) + self.__traffic_groups = \ + self.cluster_manager.get_traffic_groups(bigip) + if 'traffic-group-local-only' in self.__traffic_groups: + LOG.debug('removing reference to non-floating traffic group') + self.__traffic_groups.remove('traffic-group-local-only') + self.__traffic_groups.sort() + LOG.debug('service placement will done on traffic group(s): %s' + % self.__traffic_groups) + except Exception: + bigip.status = 'error' + bigip.status_message = \ + 'could not determine traffic groups for service placement' + raise def _validate_bigip_version(self, bigip, hostname): # Ensure the BIG-IP has sufficient version @@ -2220,42 +2573,42 @@ def _validate_bigip_version(self, bigip, hostname): return major_version, minor_version @serialized('create_l7policy') - @is_connected + @is_operational def create_l7policy(self, l7policy, service): """Create lb l7policy""" LOG.debug("Creating l7policy") self._common_service_handler(service) @serialized('update_l7policy') - @is_connected + @is_operational def update_l7policy(self, old_l7policy, l7policy, service): """Update lb l7policy""" LOG.debug("Updating l7policy") self._common_service_handler(service) @serialized('delete_l7policy') - @is_connected + @is_operational def delete_l7policy(self, l7policy, service): """Delete lb l7policy""" LOG.debug("Deleting l7policy") self._common_service_handler(service) @serialized('create_l7rule') - @is_connected + @is_operational def create_l7rule(self, pool, service): """Create lb l7rule""" LOG.debug("Creating l7rule") self._common_service_handler(service) @serialized('update_l7rule') - @is_connected + @is_operational def update_l7rule(self, old_l7rule, l7rule, service): """Update lb l7rule""" LOG.debug("Updating l7rule") self._common_service_handler(service) @serialized('delete_l7rule') - @is_connected + @is_operational def delete_l7rule(self, l7rule, service): """Delete lb l7rule""" LOG.debug("Deleting l7rule") @@ -2269,7 +2622,7 @@ def trace_service_requests(self, service): fp.write(']') def get_config_dir(self): - """Determines F5 agent configuration directory. + """Determine F5 agent configuration directory. Oslo cfg has a config_dir option, but F5 agent is not currently started with this option. To be complete, the code will check if diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py b/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py index cc0de9c95..b369ca6ea 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py @@ -50,8 +50,8 @@ def __init__(self, conf, driver, l2_service=None): driver.cert_manager, conf.f5_parent_ssl_profile) self.pool_builder = pool_service.PoolServiceBuilder( - self.service_adapter - ) + self.service_adapter, + conf.f5_parent_https_monitor) self.l7service = l7policy_service.L7PolicyService(self, conf) self.esd = None diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_driver.py b/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_driver.py index e57ba1a9e..becfad643 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_driver.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_driver.py @@ -36,8 +36,8 @@ def set_context(self, context): def set_plugin_rpc(self, plugin_rpc): """Provide LBaaS Plugin RPC access.""" - def post_init(self): - """Run after agent is fully connected """ + def set_agent_report_state(self, report_state_callback): + """Set Agent Report State.""" raise NotImplementedError() def set_tunnel_rpc(self, tunnel_rpc): @@ -45,21 +45,33 @@ def set_tunnel_rpc(self, tunnel_rpc): raise NotImplementedError() def set_l2pop_rpc(self, l2pop_rpc): - """Provide FDB Connector with L2 Population RPC access """ - raise NotImplementedError() - - def connect(self): - """Connect backend API endpoints """ + """Provide FDB Connector with L2 Population RPC access.""" raise NotImplementedError() def flush_cache(self): """Remove all cached items.""" raise NotImplementedError() + def backend_integrity(self): + """Return True, if the agent is be considered viable for services.""" + raise NotImplemented() + def backup_configuration(self): - """Persist backend configuratoins """ + """Persist backend configuratoins.""" raise NotImplementedError() + def generate_capacity_score(self, capacity_policy): + """Generate the capacity score of connected devices.""" + raise NotImplemented + + def update_operating_status(self): + """Update pool member operational status from devices to controller.""" + raise NotImplemented + + def recover_errored_devices(self): + """Trigger attempt to reconnect any errored devices.""" + raise NotImplemented + def get_stats(self, service): """Get Stats for a loadbalancer Service.""" raise NotImplementedError() @@ -73,12 +85,12 @@ def purge_orphaned_loadbalancer(self, tenant_id, loadbalancer_id, """Remove all loadbalancers without references in Neutron.""" raise NotImplemented - def exists(self, service): + def service_exists(self, service): """Check If LBaaS Service is Defined on Driver Target.""" raise NotImplementedError() def sync(self, service): - """Force Sync a Service on Driver Target """ + """Force Sync a Service on Driver Target.""" raise NotImplementedError() def create_pool(self, pool, service): diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/network_service.py b/f5_openstack_agent/lbaasv2/drivers/bigip/network_service.py index 53671db86..12fb72761 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/network_service.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/network_service.py @@ -78,51 +78,46 @@ def set_l2pop_rpc(self, l2pop_rpc): def initialize_vcmp(self): self.l2_service.initialize_vcmp_manager() - def initialize_tunneling(self): + def initialize_tunneling(self, bigip): # setup tunneling vtep_folder = self.conf.f5_vtep_folder vtep_selfip_name = self.conf.f5_vtep_selfip_name - local_ips = [] - for bigip in self.driver.get_all_bigips(): - - bigip.local_ip = None + bigip.local_ip = None - if not vtep_folder or vtep_folder.lower() == 'none': - vtep_folder = 'Common' + if not vtep_folder or vtep_folder.lower() == 'none': + vtep_folder = 'Common' - if vtep_selfip_name and \ - not vtep_selfip_name.lower() == 'none': + if vtep_selfip_name and \ + not vtep_selfip_name.lower() == 'none': - # profiles may already exist - # create vxlan_multipoint_profile` - self.network_helper.create_vxlan_multipoint_profile( - bigip, - 'vxlan_ovs', - partition='Common') - # create l2gre_multipoint_profile - self.network_helper.create_l2gre_multipoint_profile( - bigip, - 'gre_ovs', - partition='Common') + # profiles may already exist + # create vxlan_multipoint_profile` + self.network_helper.create_vxlan_multipoint_profile( + bigip, + 'vxlan_ovs', + partition='Common') + # create l2gre_multipoint_profile + self.network_helper.create_l2gre_multipoint_profile( + bigip, + 'gre_ovs', + partition='Common') - # find the IP address for the selfip for each box - local_ip = self.bigip_selfip_manager.get_selfip_addr( - bigip, - vtep_selfip_name, - partition=vtep_folder - ) + # find the IP address for the selfip for each box + local_ip = self.bigip_selfip_manager.get_selfip_addr( + bigip, + vtep_selfip_name, + partition=vtep_folder + ) - if local_ip: - bigip.local_ip = local_ip - local_ips.append(local_ip) - else: - raise f5_ex.MissingVTEPAddress( - 'device %s missing vtep selfip %s' - % (bigip.device_name, - '/' + vtep_folder + '/' + - vtep_selfip_name)) - return local_ips + if local_ip: + bigip.local_ip = local_ip + else: + raise f5_ex.MissingVTEPAddress( + 'device %s missing vtep selfip %s' + % (bigip.device_name, + '/' + vtep_folder + '/' + + vtep_selfip_name)) def is_service_connected(self, service): networks = service.get('networks', {}) @@ -233,6 +228,7 @@ def _assure_subnet_gateway(self,service): LOG.exception(err) def _annotate_service_route_domains(self, service): + # wtn : subnet for member has to be subnet for vip # Add route domain notation to pool member and vip addresses. # ccloud: don't allow creation of members without route domain in case of NOT global routed mode setting tenant_id = service['loadbalancer']['tenant_id'] diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/plugin_rpc.py b/f5_openstack_agent/lbaasv2/drivers/bigip/plugin_rpc.py index d7a247bd9..399bc6fe4 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/plugin_rpc.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/plugin_rpc.py @@ -532,6 +532,29 @@ def get_loadbalancers_without_agent_binding(self, env=None, group=None): return unbound_loadbalancers + @log_helpers.log_method_call + def get_errored_loadbalancers(self, env=None, group=None, host=None): + """Retrieve a list of errored loadbalancers for this agent.""" + loadbalancers = [] + + if not env: + env = self.env + + try: + loadbalancers = self._call( + self.context, + self._make_msg('get_errored_loadbalancers', + env=env, + group=group, + host=host), + topic=self.topic + ) + except messaging.MessageDeliveryFailure: + LOG.error("agent->plugin RPC exception caught: ", + "get_errored_loadbalancers") + + return loadbalancers + @log_helpers.log_method_call def get_loadbalancers_by_network(self, network_id, env=None,group=None,host=None): """Retrieve a list of loadbalancers for a network.""" @@ -556,6 +579,62 @@ def get_loadbalancers_by_network(self, network_id, env=None,group=None,host=None return loadbalancers + @log_helpers.log_method_call + def set_agent_admin_state(self, admin_state_up): + """Set the admin_state_up of for this agent""" + succeeded = False + try: + succeeded = self._call( + self.context, + self._make_msg('set_agent_admin_state', + admin_state_up=admin_state_up, + host=self.host), + topic=self.topic + ) + except messaging.MessageDeliveryFailure: + LOG.error("agent->plugin RPC exception caught: ", + "set_agent_admin_state") + + return succeeded + + @log_helpers.log_method_call + def scrub_dead_agents(self, env, group): + """Set the admin_state_up of for this agent""" + service = {} + try: + service = self._call( + self.context, + self._make_msg('scrub_dead_agents', + env=env, + group=group, + host=self.host), + topic=self.topic + ) + except messaging.MessageDeliveryFailure: + LOG.error("agent->plugin RPC exception caught: ", + "scrub_dead_agents") + + return service + + @log_helpers.log_method_call + def get_clusterwide_agent(self, env, group): + """Determin which agent performce global tasks for the cluster""" + agent = {} + try: + agent = self._call( + self.context, + self._make_msg('get_clusterwide_agent', + env=env, + group=group, + host=self.host), + topic=self.topic + ) + except messaging.MessageDeliveryFailure: + LOG.error("agent->plugin RPC exception caught: ", + "scrub_dead_agents") + + return agent + @log_helpers.log_method_call def validate_loadbalancers_state(self, loadbalancers): """Get the status of a list of loadbalancers IDs in Neutron""" diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/pool_service.py b/f5_openstack_agent/lbaasv2/drivers/bigip/pool_service.py index a9c596687..d11a68a2e 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/pool_service.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/pool_service.py @@ -34,7 +34,7 @@ class PoolServiceBuilder(object): health monitors, and members on one or more BIG-IP systems. """ - def __init__(self, service_adapter): + def __init__(self, service_adapter, f5_parent_https_monitor=None): self.service_adapter = service_adapter self.http_mon_helper = BigIPResourceHelper(ResourceType.http_monitor) self.https_mon_helper = BigIPResourceHelper(ResourceType.https_monitor) @@ -42,6 +42,7 @@ def __init__(self, service_adapter): self.ping_mon_helper = BigIPResourceHelper(ResourceType.ping_monitor) self.pool_helper = BigIPResourceHelper(ResourceType.pool) self.node_helper = BigIPResourceHelper(ResourceType.node) + self.f5_parent_https_monitor = f5_parent_https_monitor def create_pool(self, service, bigips): """Create a pool on set of BIG-IPs. @@ -57,12 +58,24 @@ def create_pool(self, service, bigips): for bigip in bigips: try: self.pool_helper.create(bigip, pool) - LOG.warning("Pool created: %s", pool['name']) + LOG.info("Pool created: %s", pool['name']) except HTTPError as err: - LOG.info("Pool creation FAILED: %s", pool['name']) - ex = err + if err.response.status_code == 409: + LOG.info("Pool already exists...updating") + try: + self.pool_helper.update(bigip, pool) + LOG.info("Pool updated: %s", pool['name']) + except Exception as err: + ex = err + LOG.error("Pool creation/update FAILED for pool %s on %s: %s", + pool['name'], bigip, err.message) + else: + ex = err + LOG.error("Pool creation FAILED for pool %s on %s: %s", + pool['name'], bigip, err.message) + if ex: - raise err + raise ex def delete_pool(self, service, bigips): @@ -86,7 +99,7 @@ def delete_pool(self, service, bigips): LOG.info("Pool deletion FAILED: %s", pool['name']) ex = err if ex: - raise err + raise ex def update_pool(self, service, bigips): @@ -106,12 +119,14 @@ def update_pool(self, service, bigips): LOG.info("Pool update FAILED: %s", pool['name']) ex = err if ex: - raise err + raise ex def create_healthmonitor(self, service, bigips): # create member hm = self.service_adapter.get_healthmonitor(service) + #ccloud: set additional attributes like parent monitor in case of creation, might be ignored for update + self._set_monitor_attributes(service, hm) hm_helper = self._get_monitor_helper(service) pool = self.service_adapter.get_pool(service) @@ -123,10 +138,20 @@ def create_healthmonitor(self, service, bigips): self.pool_helper.update(bigip, pool) LOG.info("Health Monitor created: %s", hm['name']) except HTTPError as err: - LOG.info("Health Monitor creation FAILED: %s", hm['name']) - ex = err + if err.response.status_code == 409: + try: + hm_helper.update(bigip, hm) + LOG.info("Health Monitor upserted: %s", hm['name']) + except Exception as err: + ex = err + LOG.error("Failed to upsert monitor %s on %s: %s", + hm['name'], bigip, err.message) + else: + ex = err + LOG.error("Failed to upsert monitor %s on %s: %s", + hm['name'], bigip, err.message) if ex: - raise err + raise ex def delete_healthmonitor(self, service, bigips): @@ -152,7 +177,7 @@ def delete_healthmonitor(self, service, bigips): LOG.info("Health Monitor deletion FAILED: %s", hm['name']) ex = err if ex: - raise err + raise ex def update_healthmonitor(self, service, bigips): hm = self.service_adapter.get_healthmonitor(service) @@ -170,7 +195,7 @@ def update_healthmonitor(self, service, bigips): LOG.info("Health Monitor update FAILED: %s", hm['name']) ex = err if ex: - raise err + raise ex # Note: can't use BigIPResourceHelper class because members # are created within pool objects. Following member methods @@ -195,7 +220,7 @@ def create_member(self, service, bigips): LOG.info("Member creation FAILED: %s", member['address']) ex = err if ex: - raise err + raise ex def delete_member(self, service, bigips): pool = self.service_adapter.get_pool(service) @@ -226,7 +251,6 @@ def delete_member(self, service, bigips): name=urllib.quote(node["name"]), partition=node["partition"]) LOG.info("Node deleted: %s", node["name"]) - except HTTPError as err: # Possilbe error if node is shared with another member. # If so, ignore the error. @@ -236,7 +260,7 @@ def delete_member(self, service, bigips): LOG.info("Member or Node deletion FAILED: %s", member['address']) ex = err if ex: - raise err + raise ex def update_member(self, service, bigips): pool = self.service_adapter.get_pool(service) @@ -263,7 +287,7 @@ def update_member(self, service, bigips): #LOG.info("Member update FAILED: %s", member['address']) ex = err if ex: - raise err + raise ex def _get_monitor_helper(self, service): monitor_type = self.service_adapter.get_monitor_type(service) @@ -277,6 +301,35 @@ def _get_monitor_helper(self, service): hm = self.http_mon_helper return hm + def _set_monitor_attributes(self, service, monitor): + monitor_type = self.service_adapter.get_monitor_type(service) + if monitor_type == "HTTPS": + if self.f5_parent_https_monitor: + monitor['defaultsFrom'] = self.f5_parent_https_monitor + + def member_exists(self, service, bigip): + """Return True if a member exists in a pool. + + :param service: Has pool and member name/partition + :param bigip: BIG-IP to get member status from. + :return: Boolean + """ + pool = self.service_adapter.get_pool(service) + member = self.service_adapter.get_member(service) + part = pool["partition"] + try: + p = self.pool_helper.load(bigip, + name=pool["name"], + partition=part) + + m = p.members_s.members + if m.exists(name=urllib.quote(member["name"]), partition=part): + return True + except Exception as e: + # log error but continue on + LOG.error("Error checking member exists: %s", e.message) + return False + def get_member_status(self, service, bigip, status_keys): """Return status values for a single pool. diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/service_adapter.py b/f5_openstack_agent/lbaasv2/drivers/bigip/service_adapter.py index e14661304..edaeb16cb 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/service_adapter.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/service_adapter.py @@ -212,27 +212,38 @@ def _map_healthmonitor(self, loadbalancer, lbaas_healthmonitor): lbaas_healthmonitor["type"] == "HTTPS"): # url path - if "url_path" in lbaas_healthmonitor: - healthmonitor["send"] = ("GET " + - lbaas_healthmonitor["url_path"] + - " HTTP/1.0\\r\\n\\r\\n") - else: - healthmonitor["send"] = "GET / HTTP/1.0\\r\\n\\r\\n" + if "url_path" not in lbaas_healthmonitor or not lbaas_healthmonitor["url_path"]: + lbaas_healthmonitor["url_path"] = "/" + if "http_method" not in lbaas_healthmonitor or not lbaas_healthmonitor["http_method"]: + lbaas_healthmonitor["http_method"] = "GET" + + healthmonitor["send"] = (lbaas_healthmonitor["http_method"] + " " + + lbaas_healthmonitor["url_path"] + + " HTTP/1.0\\r\\n\\r\\n") # expected codes healthmonitor["recv"] = self._get_recv_text( lbaas_healthmonitor) # interval - delay - if "delay" in lbaas_healthmonitor: - healthmonitor["interval"] = lbaas_healthmonitor["delay"] - - # timeout - if "timeout" in lbaas_healthmonitor: - if "max_retries" in lbaas_healthmonitor: - timeout = (int(lbaas_healthmonitor["max_retries"]) * - int(lbaas_healthmonitor["timeout"])) - healthmonitor["timeout"] = timeout + if "delay" not in lbaas_healthmonitor or not lbaas_healthmonitor["delay"]: + lbaas_healthmonitor["delay"] = "5" + healthmonitor["interval"] = lbaas_healthmonitor["delay"] + # ccloud : ignore OS timeout because F5 treats stuff different + # timeout = delay * interval + 1 second + if "max_retries" not in lbaas_healthmonitor or not lbaas_healthmonitor["max_retries"]: + lbaas_healthmonitor["max_retries"] = "3" + + timeout = (int(lbaas_healthmonitor["max_retries"]) * + int(lbaas_healthmonitor["delay"])) + 1 + healthmonitor["timeout"] = timeout + + # timeout OLD logic + # if "timeout" in lbaas_healthmonitor: + # if "max_retries" in lbaas_healthmonitor: + # timeout = (int(lbaas_healthmonitor["max_retries"]) * + # int(lbaas_healthmonitor["timeout"])) + # healthmonitor["timeout"] = timeout return healthmonitor From 13156704b6a7dc5f4dbc1a87fc835a1343311977 Mon Sep 17 00:00:00 2001 From: Torsten Lesmann Date: Thu, 17 Jan 2019 13:55:10 +0100 Subject: [PATCH 087/109] LBaaS: clean up messages and flow in periodic resync --- .../lbaasv2/drivers/bigip/agent_manager.py | 40 +++++++++---------- 1 file changed, 19 insertions(+), 21 deletions(-) diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py b/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py index ca904ff96..e28340d9e 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py @@ -581,31 +581,29 @@ def periodic_resync(self, context): # Only force resync if the agent thinks it is # synchronized and the resync timer has exired - if (now - self.last_resync).seconds > self.service_resync_interval: - LOG.info("ccloud - periodic_resync: Running sync tasks") - if not self.needs_resync: - self.needs_resync = True - LOG.debug('Forcing resync of services on resync timer (%d seconds).' % self.service_resync_interval) - self.cache.services = {} - LOG.warning("ccloud: cache cleared: service cache = ".format(self.cache.services)) - self.last_resync = now - self.lbdriver.flush_cache() - LOG.debug("periodic_sync: service_resync_interval expired: %s" - % str(self.needs_resync)) - else: - LOG.info("ccloud - periodic_resync: Waiting another {0} seconds for a timer triggered resync".format((self.service_resync_interval - (now - self.last_resync ).seconds))) - # use forced resync switch which is only set by recovering of errored F5 to guarantee sync if self.forced_resync: self.forced_resync_tries += 1 self.needs_resync = True self.cache.services = {} self.lbdriver.flush_cache() - LOG.info("ccloud - periodic_resync: Resync enforced because of recovering of errored F5 device") + self.last_resync = now + LOG.debug("ccloud - periodic_resync: Forcing resync of ALL services because of a recovered F5 device") + elif (now - self.last_resync).seconds > self.service_resync_interval: + if not self.needs_resync: + self.needs_resync = True + self.cache.services = {} + self.lbdriver.flush_cache() + self.last_resync = now + LOG.debug('ccloud - periodic_sync: Forcing resync of ALL services on resync timer (%d seconds).' % self.service_resync_interval) + else: + LOG.debug('ccloud - periodic_sync: Forcing resync of NON CACHED services on resync timer (%d seconds).' % self.service_resync_interval) + else: + LOG.debug("ccloud - periodic_resync: Waiting minimum {0} seconds for next timer triggered resync".format((self.service_resync_interval - (now - self.last_resync ).seconds))) # resync if we need to if self.needs_resync: - LOG.info('periodic_resync: Forcing resync of services.') + LOG.info('ccloud: periodic_resync: Starting resync ...') self.needs_resync = False if self.tunnel_sync(): self.needs_resync = True @@ -619,21 +617,21 @@ def periodic_resync(self, context): except Exception as e: LOG.warning("ccloud - Couldn't clear orphan snat objects because of : " + str(e.message)) else: - LOG.info("ccloud - periodic_resync: Resync not needed! Discarding ...") + LOG.debug("ccloud - periodic_resync: Resync not needed! Discarding ...") if self.orphans_cleanup_interval > 0: if (self.last_clean_orphans + datetime.timedelta(minutes=self.orphans_cleanup_interval)) < now: - LOG.info("ccloud - orphans: Start cleaning orphan objects from F5 device") + LOG.debug("ccloud - orphans: Start cleaning orphan objects from F5 device") self.last_clean_orphans = self.last_clean_orphans + datetime.timedelta(minutes=self.orphans_cleanup_interval) if self.clean_orphaned_objects_and_save_device_config(): self.needs_resync = True orphan_cache = self.lbdriver.get_orphans_cache() - LOG.info("ccloud - orphans: Finished cleaning orphan objects from F5 device. {0} objects remaining --> {1}".format(len(orphan_cache), orphan_cache)) + LOG.debug("ccloud - orphans: Finished cleaning orphan objects from F5 device. {0} objects remaining --> {1}".format(len(orphan_cache), orphan_cache)) else: - LOG.info("ccloud - periodic_resync: Skipping cleaning orphan objects because cleanup interval not expired. Waiting another {0} seconds" + LOG.debug("ccloud - periodic_resync: Skipping cleaning orphan objects because cleanup interval not expired. Waiting another {0} seconds" .format((self.last_clean_orphans + datetime.timedelta(minutes=self.orphans_cleanup_interval) - now).seconds)) else: - LOG.info("ccloud - orphans: No orphan cleaning enabled. Only SNAT pool orphan handling will be done") + LOG.debug("ccloud - orphans: No orphan cleaning enabled. Only SNAT pool orphan handling will be done") LOG.info("ccloud - periodic_resync: Resync took {0} seconds".format((datetime.datetime.now() - now).seconds)) From 736a31d88ff0dcba80fcfa728c08ed06abef9dbe Mon Sep 17 00:00:00 2001 From: Torsten Lesmann Date: Thu, 17 Jan 2019 14:42:33 +0100 Subject: [PATCH 088/109] LBaaS: clean up messages in periodic resync #2 --- .../lbaasv2/drivers/bigip/agent_manager.py | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py b/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py index e28340d9e..60f756b78 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py @@ -595,9 +595,11 @@ def periodic_resync(self, context): self.cache.services = {} self.lbdriver.flush_cache() self.last_resync = now - LOG.debug('ccloud - periodic_sync: Forcing resync of ALL services on resync timer (%d seconds).' % self.service_resync_interval) + LOG.debug('ccloud - periodic_resync: Forcing resync of ALL services on resync timer (%d seconds).' % self.service_resync_interval) else: - LOG.debug('ccloud - periodic_sync: Forcing resync of NON CACHED services on resync timer (%d seconds).' % self.service_resync_interval) + LOG.debug('ccloud - periodic_resync: Forcing resync of NON CACHED services on resync timer (%d seconds).' % self.service_resync_interval) + elif self.needs_resync: + LOG.debug('ccloud - periodic_resync: Starting requested resync of NON CACHED services.') else: LOG.debug("ccloud - periodic_resync: Waiting minimum {0} seconds for next timer triggered resync".format((self.service_resync_interval - (now - self.last_resync ).seconds))) @@ -621,22 +623,22 @@ def periodic_resync(self, context): if self.orphans_cleanup_interval > 0: if (self.last_clean_orphans + datetime.timedelta(minutes=self.orphans_cleanup_interval)) < now: - LOG.debug("ccloud - orphans: Start cleaning orphan objects from F5 device") + LOG.debug("ccloud - periodic_resync - orphans: Start cleaning orphan objects from F5 device") self.last_clean_orphans = self.last_clean_orphans + datetime.timedelta(minutes=self.orphans_cleanup_interval) if self.clean_orphaned_objects_and_save_device_config(): self.needs_resync = True orphan_cache = self.lbdriver.get_orphans_cache() - LOG.debug("ccloud - orphans: Finished cleaning orphan objects from F5 device. {0} objects remaining --> {1}".format(len(orphan_cache), orphan_cache)) + LOG.debug("ccloud - periodic_resync - orphans: Finished cleaning orphan objects from F5 device. {0} objects remaining --> {1}".format(len(orphan_cache), orphan_cache)) else: - LOG.debug("ccloud - periodic_resync: Skipping cleaning orphan objects because cleanup interval not expired. Waiting another {0} seconds" + LOG.debug("ccloud - periodic_resync - orphans: Skipping cleaning orphan objects because cleanup interval not expired. Waiting another {0} seconds" .format((self.last_clean_orphans + datetime.timedelta(minutes=self.orphans_cleanup_interval) - now).seconds)) else: - LOG.debug("ccloud - orphans: No orphan cleaning enabled. Only SNAT pool orphan handling will be done") + LOG.debug("ccloud - periodic_resync - orphans: No orphan cleaning enabled. Only SNAT pool orphan handling will be done") LOG.info("ccloud - periodic_resync: Resync took {0} seconds".format((datetime.datetime.now() - now).seconds)) except Exception as e: - LOG.exception("ccloud - Exception in periodic resync happend: " + str(e.message)) + LOG.exception("ccloud - periodic_resync: Exception in periodic resync happend: " + str(e.message)) pass # ccloud: clean orphaned snat pools @@ -701,7 +703,6 @@ def sync_state(self): % list(known_services)) LOG.debug("ccloud: plugin got all loadbalancer ids as: %s" % list(all_loadbalancer_ids)) - LOG.debug("ccloud: cache : {}".format(self.cache)) # ccloud: Get rid of 'Cached service not found in neutron database' message # Clear cache entry if not found in neutron. In case of a temp issue From deb85381aaec4a0547b1fde569350cf598969d92 Mon Sep 17 00:00:00 2001 From: Torsten Lesmann Date: Wed, 23 Jan 2019 09:27:36 +0100 Subject: [PATCH 089/109] Fix pool lb method for members with weight. Ratio methods get lost in many cases like changing monitors related to pool, ... (#2) --- f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py b/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py index b369ca6ea..59e11100c 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py @@ -67,12 +67,12 @@ def assure_service(self, service, traffic_group, all_subnet_hints): self._assure_listeners_created(service) - self._assure_pools_configured(service) - self._assure_monitors(service) self._assure_members(service, all_subnet_hints) + self._assure_pools_configured(service) + self._assure_pools_deleted(service) self._assure_l7policies_created(service) @@ -240,6 +240,9 @@ def _assure_pools_configured(self, service): self.listener_builder.update_session_persistence( svc, bigips) + # ccloud: update pool to set lb_method right + self.pool_builder.update_pool(svc, bigips) + except HTTPError as err: if err.response.status_code != 409: From 304ce001c2c8bbf8ffbacbd4ac04237f9861a71b Mon Sep 17 00:00:00 2001 From: Torsten Lesmann Date: Wed, 23 Jan 2019 17:09:01 +0100 Subject: [PATCH 090/109] Periodic schedule (#3) * Fix pool lb method for members with weight. Ratio methods get lost in many cases like changing monitors related to pool, ... * LBaaS: Delayed start for Full Cache sync and orphan cleanup --- .../lbaasv2/drivers/bigip/agent_manager.py | 55 +++++++++++++------ 1 file changed, 37 insertions(+), 18 deletions(-) diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py b/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py index 60f756b78..fc33e9a60 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py @@ -255,11 +255,21 @@ def __init__(self, conf): # Create the cache of provisioned services self.cache = LogicalServiceCache() self.last_resync = datetime.datetime.now() - # try to avoid orphan cleaning right after start and schedule differently on agents + self.service_resync_interval = conf.service_resync_interval + LOG.debug('setting service resync intervl to %d seconds' % self.service_resync_interval) + + + # calculate last resync date in a way that not all the agents do it at a same time when they got redeployed + # with that first agent will resync after start_delay seconds, second after start_delay*2 secs, ... + max_grps = 3 if self.conf.environment_group_number: - start = int(self.conf.environment_group_number) + grp_nr = int(self.conf.environment_group_number) else: - start = randint(1, 3) + grp_nr = randint(1, max_grps) + + rsi = self.service_resync_interval + start_delay = int(rsi / max_grps) + self.last_resync = datetime.datetime.now() - datetime.timedelta(seconds=(start_delay*(max_grps-grp_nr)+max_grps)) # get orphan cleanup interval and set to a value between 0 and 24 if nonsense given orphans_interval = float(self.conf.ccloud_orphans_cleanup_interval) @@ -268,17 +278,29 @@ def __init__(self, conf): elif orphans_interval > 24.0: orphans_interval = 24.0 - # define interval in minutes - self.orphans_cleanup_interval = 60 * orphans_interval - # schedule first run with 1 hour difference on every agent. Start first run after 5 minutes, 1h and 5 mins, ... - x = self.orphans_cleanup_interval / 3 - t = [x*3, x*2, x, x*3, x*2, x, x*3, x*2, x, x*3, x*2, x] - if start < 1: - start = 1 - self.last_clean_orphans = datetime.datetime.now() - datetime.timedelta(minutes=t[start-1] - 5) + self.orphans_cleanup_interval = 3600 * orphans_interval + orphan_delay = int(self.orphans_cleanup_interval / max_grps) + self.last_clean_orphans = self.last_resync - datetime.timedelta(seconds=(orphan_delay*(max_grps - grp_nr )+max_grps)) + + # + # + # + # - datetime.timedelta(minutes=t[grp_nr-1] - 5) + # + # + # + # + # # define interval in minutes + # self.orphans_cleanup_interval = 60 * orphans_interval + # # schedule first run with 1 hour difference on every agent. Start first run after 5 minutes, 1h and 5 mins, ... + # x = self.orphans_cleanup_interval / 3 + # t = [x*3, x*2, x, x*3, x*2, x, x*3, x*2, x, x*3, x*2, x] + # if grp_nr < 1: + # grp_nr = 1 + # self.last_clean_orphans = datetime.datetime.now() - datetime.timedelta(minutes=t[grp_nr-1] - 5) LOG.info('ccloud: Orphan cleanup testrun = %s', self.conf.ccloud_orphans_cleanup_testrun) LOG.info('ccloud: Orphan cleanup interval = %s', self.orphans_cleanup_interval) - LOG.info('ccloud: Orphan cleanup first run will start at %s UTC', self.last_clean_orphans + datetime.timedelta(minutes=self.orphans_cleanup_interval)) + LOG.info('ccloud: Orphan cleanup first run will start at %s UTC', self.last_clean_orphans + datetime.timedelta(seconds=self.orphans_cleanup_interval)) self.needs_resync = False # used after recovering of errored devices @@ -290,9 +312,6 @@ def __init__(self, conf): self.state_rpc = None self.pending_services = {} - self.service_resync_interval = conf.service_resync_interval - LOG.debug('setting service resync intervl to %d seconds' % - self.service_resync_interval) # Set the agent ID if self.conf.agent_id: @@ -622,16 +641,16 @@ def periodic_resync(self, context): LOG.debug("ccloud - periodic_resync: Resync not needed! Discarding ...") if self.orphans_cleanup_interval > 0: - if (self.last_clean_orphans + datetime.timedelta(minutes=self.orphans_cleanup_interval)) < now: + if (now - self.last_clean_orphans).seconds > self.orphans_cleanup_interval: LOG.debug("ccloud - periodic_resync - orphans: Start cleaning orphan objects from F5 device") - self.last_clean_orphans = self.last_clean_orphans + datetime.timedelta(minutes=self.orphans_cleanup_interval) + self.last_clean_orphans = self.last_clean_orphans + datetime.timedelta(seconds=self.orphans_cleanup_interval) if self.clean_orphaned_objects_and_save_device_config(): self.needs_resync = True orphan_cache = self.lbdriver.get_orphans_cache() LOG.debug("ccloud - periodic_resync - orphans: Finished cleaning orphan objects from F5 device. {0} objects remaining --> {1}".format(len(orphan_cache), orphan_cache)) else: LOG.debug("ccloud - periodic_resync - orphans: Skipping cleaning orphan objects because cleanup interval not expired. Waiting another {0} seconds" - .format((self.last_clean_orphans + datetime.timedelta(minutes=self.orphans_cleanup_interval) - now).seconds)) + .format((self.last_clean_orphans + datetime.timedelta(seconds=self.orphans_cleanup_interval) - now).seconds)) else: LOG.debug("ccloud - periodic_resync - orphans: No orphan cleaning enabled. Only SNAT pool orphan handling will be done") From 5ad53e236fa8c31e55f5db1e5f20e284a08df158 Mon Sep 17 00:00:00 2001 From: Torsten Lesmann Date: Wed, 23 Jan 2019 17:11:14 +0100 Subject: [PATCH 091/109] LBaaS: Delayed start for Full Cache sync and orphan cleanup --- .../lbaasv2/drivers/bigip/agent_manager.py | 18 +----------------- 1 file changed, 1 insertion(+), 17 deletions(-) diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py b/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py index fc33e9a60..9c5c4af2e 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py @@ -259,7 +259,7 @@ def __init__(self, conf): LOG.debug('setting service resync intervl to %d seconds' % self.service_resync_interval) - # calculate last resync date in a way that not all the agents do it at a same time when they got redeployed + # calculate last resync date in a way that not all the agents do it at a same time when they got redeployed # with that first agent will resync after start_delay seconds, second after start_delay*2 secs, ... max_grps = 3 if self.conf.environment_group_number: @@ -282,22 +282,6 @@ def __init__(self, conf): orphan_delay = int(self.orphans_cleanup_interval / max_grps) self.last_clean_orphans = self.last_resync - datetime.timedelta(seconds=(orphan_delay*(max_grps - grp_nr )+max_grps)) - # - # - # - # - datetime.timedelta(minutes=t[grp_nr-1] - 5) - # - # - # - # - # # define interval in minutes - # self.orphans_cleanup_interval = 60 * orphans_interval - # # schedule first run with 1 hour difference on every agent. Start first run after 5 minutes, 1h and 5 mins, ... - # x = self.orphans_cleanup_interval / 3 - # t = [x*3, x*2, x, x*3, x*2, x, x*3, x*2, x, x*3, x*2, x] - # if grp_nr < 1: - # grp_nr = 1 - # self.last_clean_orphans = datetime.datetime.now() - datetime.timedelta(minutes=t[grp_nr-1] - 5) LOG.info('ccloud: Orphan cleanup testrun = %s', self.conf.ccloud_orphans_cleanup_testrun) LOG.info('ccloud: Orphan cleanup interval = %s', self.orphans_cleanup_interval) LOG.info('ccloud: Orphan cleanup first run will start at %s UTC', self.last_clean_orphans + datetime.timedelta(seconds=self.orphans_cleanup_interval)) From 89a5c8be3db7c382d9940eafc111c4e47308fde6 Mon Sep 17 00:00:00 2001 From: Torsten Lesmann Date: Thu, 24 Jan 2019 10:38:53 +0100 Subject: [PATCH 092/109] LBaaS: Fix Delayed start for Full Cache sync and orphan cleanup in QA --- .../lbaasv2/drivers/bigip/agent_manager.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py b/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py index 9c5c4af2e..f23a39a07 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py @@ -266,11 +266,18 @@ def __init__(self, conf): grp_nr = int(self.conf.environment_group_number) else: grp_nr = randint(1, max_grps) + # Hack for QA with more than 3 env_grps + if grp_nr > max_grps: + max_grps = max_grps*2 rsi = self.service_resync_interval start_delay = int(rsi / max_grps) self.last_resync = datetime.datetime.now() - datetime.timedelta(seconds=(start_delay*(max_grps-grp_nr)+max_grps)) + LOG.info('ccloud: Periodic resync interval = %s', self.service_resync_interval) + LOG.info('ccloud: Periodic resync triggered by timer of ALL objects will be done latest after %s UTC', self.last_resync + datetime.timedelta(seconds=self.service_resync_interval)) + + # get orphan cleanup interval and set to a value between 0 and 24 if nonsense given orphans_interval = float(self.conf.ccloud_orphans_cleanup_interval) if orphans_interval < 0.0: @@ -590,14 +597,14 @@ def periodic_resync(self, context): self.needs_resync = True self.cache.services = {} self.lbdriver.flush_cache() - self.last_resync = now + self.last_resync = self.last_resync + datetime.timedelta(seconds=self.service_resync_interval) LOG.debug("ccloud - periodic_resync: Forcing resync of ALL services because of a recovered F5 device") elif (now - self.last_resync).seconds > self.service_resync_interval: if not self.needs_resync: self.needs_resync = True self.cache.services = {} self.lbdriver.flush_cache() - self.last_resync = now + self.last_resync = self.last_resync + datetime.timedelta(seconds=self.service_resync_interval) LOG.debug('ccloud - periodic_resync: Forcing resync of ALL services on resync timer (%d seconds).' % self.service_resync_interval) else: LOG.debug('ccloud - periodic_resync: Forcing resync of NON CACHED services on resync timer (%d seconds).' % self.service_resync_interval) From a675cf53ee0bd3609c0f88f232c6c448cec69910 Mon Sep 17 00:00:00 2001 From: Torsten Lesmann Date: Thu, 24 Jan 2019 15:48:58 +0100 Subject: [PATCH 093/109] Fix typo in capacity_score --- f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py b/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py index f23a39a07..e9ec703d3 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py @@ -500,7 +500,7 @@ def _report_state(self, force_resync=False): ) ) self.agent_state['configurations'][ - 'environment_capaciy_score'] = env_score + 'environment_capacity_score'] = env_score else: self.agent_state['configurations'][ 'environment_capacity_score'] = 0 From b8a153607fad06f2f96adb552c05df7fb9d12f8f Mon Sep 17 00:00:00 2001 From: Torsten Lesmann Date: Wed, 13 Feb 2019 14:12:04 +0100 Subject: [PATCH 094/109] Fix Bug for is_agent_down. --- .../lbaasv2/drivers/bigip/agent_manager.py | 2 +- .../drivers/bigip/cli/actions/base_action.py | 1 + .../lbaasv2/drivers/bigip/icontrol_driver.py | 13 +++++++++---- 3 files changed, 11 insertions(+), 5 deletions(-) diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py b/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py index e9ec703d3..10768a891 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py @@ -256,7 +256,7 @@ def __init__(self, conf): self.cache = LogicalServiceCache() self.last_resync = datetime.datetime.now() self.service_resync_interval = conf.service_resync_interval - LOG.debug('setting service resync intervl to %d seconds' % self.service_resync_interval) + LOG.debug('setting service resync interval to %d seconds' % self.service_resync_interval) # calculate last resync date in a way that not all the agents do it at a same time when they got redeployed diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/cli/actions/base_action.py b/f5_openstack_agent/lbaasv2/drivers/bigip/cli/actions/base_action.py index e9fd4459a..eacbf61e9 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/cli/actions/base_action.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/cli/actions/base_action.py @@ -65,6 +65,7 @@ def __init__(self,namespace): self.manager = manager.LbaasAgentManager(cfg.CONF) + self.manager.lbdriver.make_bigips_operational() self.driver = self.manager.lbdriver diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py b/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py index 21320f354..2a149bd48 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py @@ -336,7 +336,7 @@ def wrapper(*args, **kwargs): LOG.error(str(ioe)) raise ioe else: - LOG.error('Cannot execute %s. Not operational. Re-initializing.' + LOG.error('Cannot execute %s. Not operational. Re-initializing ...' % method.__name__) instance._init_bigips() return wrapper @@ -1087,6 +1087,12 @@ def flush_cache(self): bigip.assured_tenant_snat_subnets = {} bigip.assured_gateway_subnets = [] + # method is only needed for f5-utils cli calls like druckhammer, ... + @is_operational + def make_bigips_operational(self): + return + + @serialized('get_all_deployed_loadbalancers') @is_operational def get_all_deployed_loadbalancers(self, purge_orphaned_folders=False): @@ -2100,6 +2106,8 @@ def _common_service_handler(self, service, try: try: self.tenant_manager.assure_tenant_created(service) + LOG.debug(" _assure_tenant_created took %.5f secs" % + (time() - start_time)) except Exception as e: LOG.error("Tenant folder creation exception: %s", e.message) @@ -2108,9 +2116,6 @@ def _common_service_handler(self, service, plugin_const.ERROR raise e - LOG.debug(" _assure_tenant_created took %.5f secs" % - (time() - start_time)) - traffic_group = self.service_to_traffic_group(service) loadbalancer['traffic_group'] = traffic_group From 86e70aef8bd37c8e685519f6dcdb7287ca647453 Mon Sep 17 00:00:00 2001 From: Torsten Lesmann Date: Thu, 14 Feb 2019 14:30:45 +0100 Subject: [PATCH 095/109] Supress ERROR message for pending loadbalancer refreshing --- f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py b/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py index 10768a891..1505515b4 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py @@ -779,8 +779,11 @@ def _refresh_pending_services(self): try: del self.pending_services[lb_id] except KeyError as e: - LOG.error("LB not found in pending services: {0}".format( - e.message)) + # ccloud: message makes no sense if lb got deleted in between of self._get_remote_loadbalancers and + # self.refresh_service(lb_id) + pass + #LOG.error("LB not found in pending services: {0}".format( + # e.message)) # If there are services in the pending cache resync if self.pending_services: From b9f7f5a4f9bd870bb1f6ca9993f96f1485a3243e Mon Sep 17 00:00:00 2001 From: Torsten Lesmann Date: Mon, 18 Feb 2019 16:55:14 +0100 Subject: [PATCH 096/109] Do not run periodic tasks and report state in case of cli_sync calls made by f5-utils --- .../lbaasv2/drivers/bigip/agent_manager.py | 51 ++++++++++++++----- .../drivers/bigip/cli/actions/base_action.py | 3 +- .../drivers/bigip/cli/actions/sync_all.py | 2 +- 3 files changed, 39 insertions(+), 17 deletions(-) diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py b/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py index 1505515b4..5ebefe8b0 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py @@ -239,12 +239,13 @@ class LbaasAgentManager(periodic_task.PeriodicTasks): # b --> B target = oslo_messaging.Target(version='1.0') - def __init__(self, conf): + def __init__(self, conf, cli_sync=False): """Initialize LbaasAgentManager.""" super(LbaasAgentManager, self).__init__(conf) LOG.debug("Initializing LbaasAgentManager") LOG.debug("runtime environment: %s" % sys.version) + self.cli_sync = cli_sync self.conf = conf self.context = ncontext.get_admin_context_without_session() self.serializer = None @@ -351,22 +352,25 @@ def __init__(self, conf): self.lbdriver.set_tunnel_rpc(self.tunnel_rpc) # Allow the driver to update forwarding records in the SDN self.lbdriver.set_l2pop_rpc(self.l2_pop_rpc) - # Allow the driver to force and agent state report to the controller - self.lbdriver.set_agent_report_state(self._report_state) - # Set the flag to resync tunnels/services - self.needs_resync = True + # Disable state monitoring for utils calls like druckhammer, ... + if not self.cli_sync: + # Allow the driver to force and agent state report to the controller + self.lbdriver.set_agent_report_state(self._report_state) - # Mark this agent admin_state_up per startup policy - if(self.admin_state_up): - self.plugin_rpc.set_agent_admin_state(self.admin_state_up) + # Set the flag to resync tunnels/services + self.needs_resync = True + + # Mark this agent admin_state_up per startup policy + if(self.admin_state_up): + self.plugin_rpc.set_agent_admin_state(self.admin_state_up) - # Start state reporting of agent to Neutron - report_interval = self.conf.AGENT.report_interval - if report_interval: - heartbeat = loopingcall.FixedIntervalLoopingCall( - self._report_state) - heartbeat.start(interval=report_interval) + # Start state reporting of agent to Neutron + report_interval = self.conf.AGENT.report_interval + if report_interval: + heartbeat = loopingcall.FixedIntervalLoopingCall( + self._report_state) + heartbeat.start(interval=report_interval) def _load_driver(self, conf): self.lbdriver = None @@ -528,12 +532,20 @@ def initialize_service_hook(self, started_by): @periodic_task.periodic_task(spacing=PERIODIC_TASK_INTERVAL, run_immediately=True) def connect_driver(self, context): + + if self.cli_sync: + return + """Trigger driver connect attempts to all devices.""" if self.lbdriver: self.lbdriver.connect() @periodic_task.periodic_task(spacing=(PERIODIC_TASK_INTERVAL/2)) def recover_errored_devices(self, context): + + if self.cli_sync: + return + """Try to reconnect to errored devices.""" if self.lbdriver: LOG.debug("running periodic task to recover disconnected BIG-IPs") @@ -550,6 +562,10 @@ def recover_errored_devices(self, context): @periodic_task.periodic_task( spacing=constants_v2.UPDATE_OPERATING_STATUS_INTERVAL) def scrub_dead_agents_in_env_and_group(self, context): + + if self.cli_sync: + return + """Triggering a dead agent scrub on the controller.""" LOG.debug("ccloud: scrubbing - running periodic scrub_dead_agents_in_env_and_group for EnvGroup %s", self.conf.environment_group_number) if not self.plugin_rpc: @@ -561,6 +577,10 @@ def scrub_dead_agents_in_env_and_group(self, context): @periodic_task.periodic_task( spacing=constants_v2.UPDATE_OPERATING_STATUS_INTERVAL) def update_operating_status(self, context): + + if self.cli_sync: + return + """Update pool member operational status from devices to controller.""" if not self.plugin_rpc: return @@ -585,6 +605,9 @@ def update_operating_status(self, context): @periodic_task.periodic_task(spacing=PERIODIC_TASK_INTERVAL) def periodic_resync(self, context): + if self.cli_sync: + return + """Determine if it is time to resync services from controller.""" try: now = datetime.datetime.now() diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/cli/actions/base_action.py b/f5_openstack_agent/lbaasv2/drivers/bigip/cli/actions/base_action.py index eacbf61e9..e702acdd8 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/cli/actions/base_action.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/cli/actions/base_action.py @@ -63,8 +63,7 @@ def __init__(self,namespace): if namespace.log: common_config.setup_logging() - - self.manager = manager.LbaasAgentManager(cfg.CONF) + self.manager = manager.LbaasAgentManager(cfg.CONF, cli_sync=True) self.manager.lbdriver.make_bigips_operational() self.driver = self.manager.lbdriver diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/cli/actions/sync_all.py b/f5_openstack_agent/lbaasv2/drivers/bigip/cli/actions/sync_all.py index 3e3db7750..db133def1 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/cli/actions/sync_all.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/cli/actions/sync_all.py @@ -21,7 +21,7 @@ def execute(self): if self.project_id is not None: print("Syncing all LBs in project {}".format(self.project_id)) else: - print("Syncing all LBs hosted on agent {}".format(self.host)) + print("Syncing all LBs hosted on agent {}".format(self.manager.agent_host)) for service in services: From b13ef55de9acad52235ebdd153d8d09115a3641d Mon Sep 17 00:00:00 2001 From: Torsten Lesmann Date: Thu, 21 Feb 2019 11:36:56 +0100 Subject: [PATCH 097/109] Use mtu size from L2Network Layer for VLAN creation --- f5_openstack_agent/lbaasv2/drivers/bigip/l2_service.py | 9 +++++++++ .../lbaasv2/drivers/bigip/network_helper.py | 4 ++++ 2 files changed, 13 insertions(+) diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/l2_service.py b/f5_openstack_agent/lbaasv2/drivers/bigip/l2_service.py index 279d3c448..280c33519 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/l2_service.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/l2_service.py @@ -255,6 +255,9 @@ def _assure_device_network_flat(self, network, bigip, network_folder): 'partition': network_folder, 'description': network['id'], 'route_domain_id': network['route_domain_id']} + if network['mtu']: + model['mtu'] = network['mtu'] + self.network_helper.create_vlan(bigip, model) except Exception as err: LOG.exception("%s", err.message) @@ -307,6 +310,9 @@ def _assure_device_network_vlan(self, network, bigip, network_folder): 'partition': network_folder, 'description': network['id'], 'route_domain_id': network['route_domain_id']} + if network['mtu']: + model['mtu'] = network['mtu'] + self.network_helper.create_vlan(bigip, model) except Exception as err: LOG.exception("%s", err.message) @@ -403,6 +409,9 @@ def _assure_vcmp_device_network(self, bigip, vlan): 'interface': vlan['interface'], 'description': vlan['network']['id'], 'route_domain_id': vlan['network']['route_domain_id']} + if vlan['network']['mtu']: + model['mtu'] = vlan['network']['mtu'] + try: self.network_helper.create_vlan(vcmp_host['bigip'], model) LOG.debug(('Created VLAN %s on vCMP Host %s' % diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/network_helper.py b/f5_openstack_agent/lbaasv2/drivers/bigip/network_helper.py index f6e38ba2f..5e1d5c596 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/network_helper.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/network_helper.py @@ -400,6 +400,7 @@ def create_vlan(self, bigip, model): description = model.get('description', None) route_domain_id = model.get('route_domain_id', const.DEFAULT_ROUTE_DOMAIN_ID) + mtu = model.get('mtu', None) if not name: return None v = bigip.tm.net.vlans.vlan @@ -412,6 +413,9 @@ def create_vlan(self, bigip, model): if description: payload['description'] = description + if mtu: + payload['mtu'] = mtu + obj = v.create(**payload) interface = model.get('interface', None) if interface: From 7085b7a3e267e56062b1c30fb26ddb6857eeecc6 Mon Sep 17 00:00:00 2001 From: Torsten Lesmann Date: Mon, 18 Mar 2019 16:44:06 +0100 Subject: [PATCH 098/109] Do not update cache without service object --- f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py b/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py index 5ebefe8b0..d0516cbfb 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py @@ -848,7 +848,8 @@ def validate_service(self, lb_id): LOG.info("ccloud: Start syncing loadbalancer '{}'".format(lb_id)) self.lbdriver.sync(service) LOG.info("ccloud: Finished syncing loadbalancer '{}'".format(lb_id)) - self.cache.put(service, self.agent_host) + if service: + self.cache.put(service, self.agent_host) except f5_ex.InvalidNetworkType as exc: LOG.warning(exc.msg) except q_exception.NeutronException as exc: From 0982a4f455490324375c8b999ff54b6b62f79b44 Mon Sep 17 00:00:00 2001 From: Torsten Lesmann Date: Thu, 4 Apr 2019 10:49:46 +0200 Subject: [PATCH 099/109] LBaaS: Disarm messages for pool member status updates in case member are already gone. --- f5_openstack_agent/lbaasv2/drivers/bigip/pool_service.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/pool_service.py b/f5_openstack_agent/lbaasv2/drivers/bigip/pool_service.py index d11a68a2e..69aa0e1d6 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/pool_service.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/pool_service.py @@ -358,8 +358,7 @@ def get_member_status(self, service, bigip, status_keys): member_status = self.pool_helper.collect_stats( m, stat_keys=status_keys) else: - LOG.error("Unable to get member status. " - "Member %s does not exist.", member["name"]) + LOG.warning("Unable to get member status. Member %s does not exist.", member["name"]) except Exception as e: # log error but continue on From 50a82b2712b8ea7bf7a112741f7f2a03bdad030f Mon Sep 17 00:00:00 2001 From: Torsten Lesmann Date: Tue, 16 Apr 2019 16:11:29 +0200 Subject: [PATCH 100/109] LBaaS: Bugfix for default-pool/listener updates which prevents downtimes of the listener. Pool isn't removed and applied later, istead it's updated now. This cause 0 downtime. Related to issue: CCM-9985. Removed message in case of an upsert for pool members in case member already exists. --- .../lbaasv2/drivers/bigip/listener_service.py | 20 ++++++++----------- .../lbaasv2/drivers/bigip/pool_service.py | 4 +++- .../lbaasv2/drivers/bigip/service_adapter.py | 10 ++-------- 3 files changed, 13 insertions(+), 21 deletions(-) diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/listener_service.py b/f5_openstack_agent/lbaasv2/drivers/bigip/listener_service.py index ec6861770..6542569db 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/listener_service.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/listener_service.py @@ -290,7 +290,7 @@ def update_listener(self, service, bigips): # process esd's AND create new client ssl config for listener - vip = self.apply_esds(service) + self.apply_esds(service, vip) # apply changes to listener AND remove not needed ssl profiles on F5 error = None @@ -792,7 +792,7 @@ def _remove_irule(self, vs, irule_name, bigip, rule_partition='Common'): LOG.debug("Removed iRule {0} for virtual sever {1}". format(irule_name, vs_name)) - def apply_esds(self, service): + def apply_esds(self, service, vip): listener = service['listener'] @@ -817,8 +817,6 @@ def apply_esds(self, service): irules = [] # get virtual server name - update_attrs = self.service_adapter.get_virtual(service) - # get ssl certificates for listener tls = self.service_adapter.get_tls(service) # initialize client ssl profile with already existing certificates @@ -915,9 +913,9 @@ def apply_esds(self, service): # persistence if 'lbaas_persist' in esd: - update_attrs['persist'] = [{'name': esd['lbaas_persist']}] + vip['persist'] = [{'name': esd['lbaas_persist']}] if 'lbaas_fallback_persist' in esd: - update_attrs['fallbackPersistence'] = esd['lbaas_fallback_persist'] + vip['fallbackPersistence'] = esd['lbaas_fallback_persist'] # iRules if 'lbaas_irule' in esd: @@ -955,12 +953,10 @@ def apply_esds(self, service): profiles.append(compression_profile) if profiles: - update_attrs['profiles'] = profiles - - update_attrs['rules'] = update_attrs.get('rules',[])+irules + vip['profiles'] = profiles - update_attrs['policies'] = update_attrs.get('policies',[])+policies + vip['rules'] = vip.get('rules',[])+irules - LOG.info("APPLY_ESD: Listener after ESDs got applied: %s", update_attrs) + vip['policies'] = vip.get('policies',[])+policies - return update_attrs + LOG.info("APPLY_ESD: Listener after ESDs got applied: %s", vip) \ No newline at end of file diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/pool_service.py b/f5_openstack_agent/lbaasv2/drivers/bigip/pool_service.py index 69aa0e1d6..26039f682 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/pool_service.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/pool_service.py @@ -217,7 +217,9 @@ def create_member(self, service, bigips): m.create(**member) LOG.info("Member created: %s", member['address']) except HTTPError as err: - LOG.info("Member creation FAILED: %s", member['address']) + # ccloud: Do not log failure because create is always called and updates are made in case of failure + # create member method logs it's own method in case of failure + #LOG.info("Member creation FAILED: %s", member['address']) ex = err if ex: raise ex diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/service_adapter.py b/f5_openstack_agent/lbaasv2/drivers/bigip/service_adapter.py index edaeb16cb..dc3a10850 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/service_adapter.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/service_adapter.py @@ -365,9 +365,8 @@ def _map_virtual(self, loadbalancer, listener, pool=None): if pool: p = self.init_pool_name(loadbalancer, pool) vip["pool"] = p["name"] - - - + else: + vip["pool"] = None vip["description"] = self.get_resource_description(listener) @@ -404,11 +403,6 @@ def _map_virtual(self, loadbalancer, listener, pool=None): else: vip["disabled"] = True - if "pool" in listener: - vip["pool"] = listener["pool"] - else: - vip["pool"] = None - return vip def get_vlan(self, vip, bigip, network_id): From eca2d47d9e8bcb03fdb278cb6c89399a1f10ec3e Mon Sep 17 00:00:00 2001 From: Torsten Lesmann Date: Tue, 25 Jun 2019 09:27:40 +0200 Subject: [PATCH 101/109] Better resilience against F5 connection issues during startup phase --- .../lbaasv2/drivers/bigip/icontrol_driver.py | 162 ++++++++---------- 1 file changed, 73 insertions(+), 89 deletions(-) diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py b/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py index 2a149bd48..5b068e073 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py @@ -575,113 +575,96 @@ def _init_bigips(self): self.__last_connect_attempt = datetime.datetime.now() + min_one_initialized = False for hostname in self.hostnames: - # connect to each BIG-IP and set it status - bigip = self._open_bigip(hostname) - if bigip.status == 'connected': - # set the status down until we assure initialized - bigip.status = 'initializing' - bigip.status_message = 'initializing HA viability' - LOG.debug('initializing HA viability %s' % hostname) - device_group_name = None - if not self.ha_validated: - device_group_name = self._validate_ha(bigip) - LOG.debug('HA validated from %s with DSG %s' % - (hostname, device_group_name)) - self.ha_validated = True - if not self.tg_initialized: - self._init_traffic_groups(bigip) - LOG.debug('learned traffic groups from %s as %s' % - (hostname, self.__traffic_groups)) - self.tg_initialized = True - LOG.debug('initializing bigip %s' % hostname) - self._init_bigip(bigip, hostname, device_group_name) - LOG.debug('initializing agent configurations %s' - % hostname) - self._init_agent_config(bigip) - # Assure basic BIG-IP HA is operational - LOG.debug('validating HA state for %s' % hostname) - bigip.status = 'validating_HA' - bigip.status_message = 'validating the current HA state' - if self._validate_ha_operational(bigip): - LOG.debug('setting status to active for %s' % hostname) - bigip.status = 'active' - bigip.status_message = 'BIG-IP ready for provisioning' - self._post_init() - else: - LOG.debug('setting status to error for %s' % hostname) - bigip.status = 'error' - bigip.status_message = 'BIG-IP is not operational' - self._set_agent_status(False) - else: - LOG.error('error opening BIG-IP %s - %s:%s' - % (hostname, bigip.status, bigip.status_message)) - self._set_agent_status(False) + if self._init_bigip_from_hostname(hostname): + min_one_initialized = True + + if min_one_initialized: + self._set_agent_status(force_resync=True) + else: + self._set_agent_status(force_resync=False) + except Exception as exc: LOG.error('Invalid agent configuration: %s' % exc.message) + self._set_agent_status(force_resync=False) raise - self._set_agent_status(force_resync=True) def _init_errored_bigips(self): + min_one_initialized = False try: errored_bigips = self.get_errored_bigips_hostnames() - recovered = False if errored_bigips: LOG.debug('attempting to recover %s BIG-IPs' % len(errored_bigips)) for hostname in errored_bigips: # try to connect and set status - bigip = self._open_bigip(hostname) - if bigip.status == 'connected': - # set the status down until we assure initialized - bigip.status = 'initializing' - bigip.status_message = 'initializing HA viability' - LOG.debug('initializing HA viability %s' % hostname) - LOG.debug('proceeding to initialize %s' % hostname) - device_group_name = None - if not self.ha_validated: - device_group_name = self._validate_ha(bigip) - LOG.debug('HA validated from %s with DSG %s' % - (hostname, device_group_name)) - self.ha_validated = True - if not self.tg_initialized: - self._init_traffic_groups(bigip) - LOG.debug('known traffic groups initialized', - ' from %s as %s' % - (hostname, self.__traffic_groups)) - self.tg_initialized = True - LOG.debug('initializing bigip %s' % hostname) - self._init_bigip(bigip, hostname, device_group_name) - LOG.debug('initializing agent configurations %s' - % hostname) - self._init_agent_config(bigip) - - # Assure basic BIG-IP HA is operational - LOG.debug('validating HA state for %s' % hostname) - bigip.status = 'validating_HA' - bigip.status_message = \ - 'validating the current HA state' - if self._validate_ha_operational(bigip): - LOG.debug('setting status to active for %s' - % hostname) - bigip.status = 'active' - bigip.status_message = \ - 'BIG-IP ready for provisioning' - self._post_init() - self._set_agent_status(True) - else: - LOG.debug('setting status to error for %s' - % hostname) - bigip.status = 'error' - bigip.status_message = 'BIG-IP is not operational' - self._set_agent_status(False) - recovered = True + if self._init_bigip_from_hostname(hostname): + min_one_initialized = True + if min_one_initialized: + self._set_agent_status(force_resync=True) + else: + self._set_agent_status(force_resync=False) else: LOG.debug('there are no disconnected BIG-IPs to recover') except Exception as exc: LOG.error('Invalid agent configuration: %s' % exc.message) raise - return recovered + return min_one_initialized + + def _init_bigip_from_hostname(self, hostname): + initialized = True + LOG.debug('ccloud: _init_bigip_from_hostname: %s' % hostname) + try: + # connect to each BIG-IP and set it status + bigip = self._open_bigip(hostname) + if bigip.status == 'connected': + # set the status down until we assure initialized + bigip.status = 'initializing' + bigip.status_message = 'initializing HA viability' + LOG.debug('initializing HA viability %s' % hostname) + device_group_name = None + if not self.ha_validated: + device_group_name = self._validate_ha(bigip) + LOG.debug('HA validated from %s with DSG %s' % + (hostname, device_group_name)) + self.ha_validated = True + if not self.tg_initialized: + self._init_traffic_groups(bigip) + LOG.debug('learned traffic groups from %s as %s' % + (hostname, self.__traffic_groups)) + self.tg_initialized = True + LOG.debug('initializing bigip %s' % hostname) + self._init_bigip(bigip, hostname, device_group_name) + LOG.debug('initializing agent configurations %s' + % hostname) + self._init_agent_config(bigip) + # Assure basic BIG-IP HA is operational + LOG.debug('validating HA state for %s' % hostname) + bigip.status = 'validating_HA' + bigip.status_message = 'validating the current HA state' + if self._validate_ha_operational(bigip): + LOG.debug('setting status to active for %s' % hostname) + bigip.status = 'active' + bigip.status_message = 'BIG-IP ready for provisioning' + self._post_init() + else: + LOG.debug('setting status to error for %s' % hostname) + bigip.status = 'error' + bigip.status_message = 'BIG-IP is not operational' + initialized = False + else: + LOG.error('error opening BIG-IP %s - %s:%s' + % (hostname, bigip.status, bigip.status_message)) + initialized = False + except Exception as e: + bigip.status = 'error' + bigip.status_message = 'BIG-IP is not operational' + LOG.error('ccloud: Invalid agent configuration: BIG-IP %s - %s:%s - %s' + % (hostname, bigip.status, bigip.status_message, e.message)) + initialized = False + finally: + return initialized def _open_bigip(self, hostname): # Open bigip connection """ @@ -967,6 +950,7 @@ def _set_agent_status(self, force_resync=False): self.agent_report_state(force_resync=force_resync) def get_failover_state(self, bigip): + # wtn ging schief try: if hasattr(bigip, 'tm'): fs = bigip.tm.sys.dbs.db.load(name='failover.state') From 1c8d7c56b165acc74ff8666c9563eeedecb405ec Mon Sep 17 00:00:00 2001 From: Torsten Lesmann Date: Tue, 25 Jun 2019 12:15:43 +0200 Subject: [PATCH 102/109] Switch TCP listeners from fastL4 to new standard profile cc_fastL4. New profile enables TCP Keep alive for 180 seconds --- f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py | 1 - f5_openstack_agent/lbaasv2/drivers/bigip/listener_service.py | 5 ++--- f5_openstack_agent/lbaasv2/drivers/bigip/service_adapter.py | 2 +- 3 files changed, 3 insertions(+), 5 deletions(-) diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py b/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py index 5b068e073..11913f06c 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py @@ -950,7 +950,6 @@ def _set_agent_status(self, force_resync=False): self.agent_report_state(force_resync=force_resync) def get_failover_state(self, bigip): - # wtn ging schief try: if hasattr(bigip, 'tm'): fs = bigip.tm.sys.dbs.db.load(name='failover.state') diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/listener_service.py b/f5_openstack_agent/lbaasv2/drivers/bigip/listener_service.py index 6542569db..ae51d2418 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/listener_service.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/listener_service.py @@ -543,7 +543,7 @@ def remove_session_persistence(self, service, bigips): # Revert VS back to fastL4. Must do an update to replace # profiles instead of using add/remove profile. Leave http # profiles in place for non-TCP listeners. - vip['profiles'] = ['/Common/fastL4'] + vip['profiles'] = ['/Common/cc_fastL4'] for bigip in bigips: # Check for custom app_cookie profile. @@ -802,8 +802,7 @@ def apply_esds(self, service, vip): if l7policies is None: return - - fastl4 = {'partition':'Common','name':'fastL4','context':'all'} + fastl4 = {'partition':'Common','name':'cc_fastL4','context':'all'} stcp_profiles = [] ctcp_profiles = [] cssl_profiles = [] diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/service_adapter.py b/f5_openstack_agent/lbaasv2/drivers/bigip/service_adapter.py index dc3a10850..f18af5ac7 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/service_adapter.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/service_adapter.py @@ -441,7 +441,7 @@ def _add_bigip_items(self, listener, vip): vip['persist'] = [] if virtual_type == 'fastl4': - vip['profiles'] = ['/Common/fastL4'] + vip['profiles'] = ['/Common/cc_fastL4'] else: # add profiles for HTTP, HTTPS, TERMINATED_HTTPS protocols From 253812ce6daafce7f39a82fa738dbc3c8c6a88f4 Mon Sep 17 00:00:00 2001 From: Torsten Lesmann Date: Tue, 9 Jul 2019 10:28:46 +0200 Subject: [PATCH 103/109] Fix Loadbalancer Error state during creation in case that network isn't created/available at that moment --- .../lbaasv2/drivers/bigip/network_service.py | 40 +++++++++++-------- 1 file changed, 23 insertions(+), 17 deletions(-) diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/network_service.py b/f5_openstack_agent/lbaasv2/drivers/bigip/network_service.py index 12fb72761..667ac25e2 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/network_service.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/network_service.py @@ -135,19 +135,18 @@ def is_service_connected(self, service): segmentation_id = \ network.get('provider:segmentation_id', None) if not segmentation_id: - if network_type in supported_net_types and \ - self.conf.f5_network_segment_physical_network: + if network_type in supported_net_types and self.conf.f5_network_segment_physical_network: return False - - LOG.error("Misconfiguration: Segmentation ID is " - "missing from the service definition. " - "Please check the setting for " - "f5_network_segment_physical_network in " - "f5-openstack-agent.ini in case neutron " - "is operating in Hierarchical Port Binding " - "mode.") - raise f5_ex.InvalidNetworkDefinition( - "Network segment ID %s not defined" % network_id) + else: + LOG.error("Misconfiguration: Segmentation ID is " + "missing from the service definition. " + "Please check the setting for " + "f5_network_segment_physical_network in " + "f5-openstack-agent.ini in case neutron " + "is operating in Hierarchical Port Binding " + "mode.") + raise f5_ex.InvalidNetworkDefinition( + "Network segment ID %s not defined" % network_id) return True @@ -157,9 +156,13 @@ def prep_service_networking(self, service, traffic_group): if self.conf.f5_global_routed_mode: return - if not self.is_service_connected(service): + try: + if not self.is_service_connected(service): + raise f5_ex.NetworkNotReady( + "Network segment(s) definition incomplete") + except f5_ex.InvalidNetworkDefinition as exc: raise f5_ex.NetworkNotReady( - "Network segment(s) definition incomplete") + "Network segment(s) definition invalid %s", exc.message) if self.conf.use_namespaces: try: @@ -582,7 +585,11 @@ def get_neutron_net_short_name(network): net_type = network.get('provider:network_type', None) net_seg_key = network.get('provider:segmentation_id', None) if not net_type or not net_seg_key: - raise f5_ex.InvalidNetworkType + raise f5_ex.InvalidNetworkType( + 'Provider network attributes not complete:' + 'provider: network_type - {0} ' + 'and provider:segmentation_id - {1}' + .format(net_type, net_seg_key)) return net_type + '-' + str(net_seg_key) @@ -862,7 +869,7 @@ def _assure_delete_nets_shared(self, bigip, service, subnet_hints): my_deleted_names, my_in_use_subnets = \ self.bigip_snat_manager.delete_bigip_snats( - bigip, subnetinfo, tenant_id,lb_id) + bigip, subnetinfo, tenant_id, lb_id) deleted_names = deleted_names.union(my_deleted_names) for in_use_subnetid in my_in_use_subnets: subnet_hints['check_for_delete_subnets'].pop( @@ -1069,7 +1076,6 @@ def _get_subnets_to_assure(self, service): networks = dict() loadbalancer = service['loadbalancer'] service_adapter = self.service_adapter - lb_status = loadbalancer['provisioning_status'] if lb_status != plugin_const.PENDING_DELETE: if 'network_id' in loadbalancer: From 9ea9eb741866532f110399f4d49f531038ae48c1 Mon Sep 17 00:00:00 2001 From: Torsten Lesmann Date: Mon, 22 Jul 2019 09:36:37 +0200 Subject: [PATCH 104/109] Rework of assure_service logic to support terraform, ... better. (#4) Switched back to a modified origin serialized method to avoid conflicts on network, ... cleanup Delete snat pools on virtual address deletion Split assure into delete and create/update path to avoid object creations during delete actions --- .../lbaasv2/drivers/bigip/agent_manager.py | 1 + .../lbaasv2/drivers/bigip/icontrol_driver.py | 15 +- .../lbaasv2/drivers/bigip/lbaas_builder.py | 133 ++++++++++----- .../lbaasv2/drivers/bigip/listener_service.py | 39 +++-- .../lbaasv2/drivers/bigip/network_service.py | 99 ++++++------ .../lbaasv2/drivers/bigip/pool_service.py | 6 +- .../lbaasv2/drivers/bigip/tenants.py | 4 +- .../lbaasv2/drivers/bigip/utils.py | 151 ++++++++++-------- 8 files changed, 266 insertions(+), 182 deletions(-) diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py b/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py index d0516cbfb..f5f71f880 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/agent_manager.py @@ -857,6 +857,7 @@ def validate_service(self, lb_id): except Exception as exc: LOG.exception("Service validation error: %s" % exc.message) + @staticmethod def has_provisioning_status_of_error(service): """Determine if a service is in an ERROR/DEGRADED status. diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py b/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py index 11913f06c..cea23d2e2 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py @@ -1654,7 +1654,7 @@ def update_listener(self, old_listener, listener, service): def delete_listener(self, listener, service): """Delete virtual server""" LOG.debug("Deleting listener") - return self._common_service_handler(service) + return self._common_service_handler(service, delete_event=True) @serialized('create_pool') @is_operational @@ -1675,7 +1675,7 @@ def update_pool(self, old_pool, pool, service): def delete_pool(self, pool, service): """Delete lb pool""" LOG.debug("Deleting pool") - return self._common_service_handler(service) + return self._common_service_handler(service, delete_event=True) @serialized('create_member') @is_operational @@ -1717,7 +1717,7 @@ def update_health_monitor(self, old_health_monitor, health_monitor, service): def delete_health_monitor(self, health_monitor, service): """Delete pool health monitor""" LOG.debug("Deleting health monitor") - return self._common_service_handler(service) + return self._common_service_handler(service, delete_event=True) # sapcc: get all snat pools @serialized('get_all_snat_pools') @@ -2141,14 +2141,17 @@ def _common_service_handler(self, service, LOG.debug("ccloud: Pre assure service ***********************************************") self.lbaas_builder.assure_service(service, traffic_group, - all_subnet_hints) + all_subnet_hints, + delete_event) LOG.debug("ccloud: Post assure service **********************************************") if self.network_builder: start_time = time() try: + LOG.debug("ccloud: Pre post_service_networking ***********************************************") self.network_builder.post_service_networking( service, all_subnet_hints) + LOG.debug("ccloud: Post post_service_networking **********************************************") except Exception as error: LOG.error("Post-network exception: icontrol_driver: %s", error.message) @@ -2579,7 +2582,7 @@ def update_l7policy(self, old_l7policy, l7policy, service): def delete_l7policy(self, l7policy, service): """Delete lb l7policy""" LOG.debug("Deleting l7policy") - self._common_service_handler(service) + self._common_service_handler(service, delete_event=True) @serialized('create_l7rule') @is_operational @@ -2600,7 +2603,7 @@ def update_l7rule(self, old_l7rule, l7rule, service): def delete_l7rule(self, l7rule, service): """Delete lb l7rule""" LOG.debug("Deleting l7rule") - self._common_service_handler(service) + self._common_service_handler(service, delete_event=True) def trace_service_requests(self, service): with open(self.file_name, 'r+') as fp: diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py b/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py index 59e11100c..afb690b0d 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/lbaas_builder.py @@ -56,40 +56,47 @@ def __init__(self, conf, driver, l2_service=None): self.esd = None @utils.instrument_execution_time - def assure_service(self, service, traffic_group, all_subnet_hints): + def assure_service(self, service, traffic_group, all_subnet_hints, delete_event=False): """Assure that a service is configured on the BIGIP.""" start_time = time() LOG.debug("Starting assure_service") + # Needed also for delete events because of subnet hints self._assure_loadbalancer_created(service, all_subnet_hints) + # Create and update + if not delete_event: + self._assure_pools_created(service) - self._assure_pools_created(service) + self._assure_listeners_created(service) - self._assure_listeners_created(service) + self._assure_monitors_created(service) - self._assure_monitors(service) + self._assure_members_created(service, all_subnet_hints) - self._assure_members(service, all_subnet_hints) + self._assure_pools_configured(service) - self._assure_pools_configured(service) + self._assure_l7policies_created(service) - self._assure_pools_deleted(service) + self._assure_l7rules_created(service) + else: # delete + self._assure_monitors_deleted(service) - self._assure_l7policies_created(service) + self._assure_members_deleted(service, all_subnet_hints) - self._assure_l7rules_created(service) + self._assure_l7rules_deleted(service) - self._assure_l7rules_deleted(service) + self._assure_l7policies_deleted(service) - self._assure_l7policies_deleted(service) + self._assure_pools_deleted(service) - self._assure_listeners_deleted(service) + self._assure_listeners_deleted(service) - self._assure_loadbalancer_deleted(service) + self._assure_loadbalancer_deleted(service) LOG.debug(" _assure_service took %.5f secs" % (time() - start_time)) return all_subnet_hints + @utils.instrument_execution_time def _assure_loadbalancer_created(self, service, all_subnet_hints): if 'loadbalancer' not in service: @@ -231,18 +238,20 @@ def _assure_pools_configured(self, service): # get associated listeners for pool for listener in pool['listeners']: - svc['listener'] = \ - self.get_listener_by_id(service, listener['id']) - self.listener_builder.update_listener_pool( - svc, pool_name["name"], bigips) + listener = self.get_listener_by_id(service, listener['id']) - # update virtual sever pool name, session persistence - self.listener_builder.update_session_persistence( - svc, bigips) + if listener: + svc['listener'] = listener + self.listener_builder.update_listener_pool( + svc, pool_name["name"], bigips) + # update virtual sever pool name, session persistence + self.listener_builder.update_session_persistence( + svc, bigips) # ccloud: update pool to set lb_method right self.pool_builder.update_pool(svc, bigips) + pool['provisioning_status'] = plugin_const.ACTIVE except HTTPError as err: if err.response.status_code != 409: @@ -250,14 +259,14 @@ def _assure_pools_configured(self, service): loadbalancer['provisioning_status'] = ( plugin_const.ERROR) LOG.exception(err) - raise f5_ex.PoolCreationException(err.message) + raise f5_ex.PoolCreationException("ccloud: Error #1" + err.message) except Exception as err: pool['provisioning_status'] = plugin_const.ERROR loadbalancer['provisioning_status'] = plugin_const.ERROR - raise f5_ex.PoolCreationException(err.message) + LOG.exception(err) + raise f5_ex.PoolCreationException("ccloud: Error #2" + err.message) - pool['provisioning_status'] = plugin_const.ACTIVE @utils.instrument_execution_time def _get_pool_members(self, service, pool_id): '''Return a list of members associated with given pool.''' @@ -283,7 +292,8 @@ def _update_listener_pool(self, service, listener_id, pool_name, bigips): raise f5_ex.VirtualServerUpdateException(err.message) @utils.instrument_execution_time - def _assure_monitors(self, service): + def _assure_monitors_deleted(self, service): + if not (("pools" in service) and ("healthmonitors" in service)): return @@ -301,7 +311,22 @@ def _assure_monitors(self, service): except Exception as err: monitor['provisioning_status'] = plugin_const.ERROR raise f5_ex.MonitorDeleteException(err.message) - else: + + @utils.instrument_execution_time + def _assure_monitors_created(self, service): + + if not (("pools" in service) and ("healthmonitors" in service)): + return + + monitors = service["healthmonitors"] + loadbalancer = service["loadbalancer"] + bigips = self.driver.get_config_bigips() + + for monitor in monitors: + svc = {"loadbalancer": loadbalancer, + "healthmonitor": monitor, + "pool": self.get_pool_by_id(service, monitor["pool_id"])} + if monitor['provisioning_status'] != plugin_const.PENDING_DELETE: try: self.pool_builder.create_healthmonitor(svc, bigips) except HTTPError as err: @@ -318,8 +343,9 @@ def _assure_monitors(self, service): raise f5_ex.MonitorCreationException(err.message) monitor['provisioning_status'] = plugin_const.ACTIVE + @utils.instrument_execution_time - def _assure_members(self, service, all_subnet_hints): + def _assure_members_created(self, service, all_subnet_hints): if not (("pools" in service) and ("members" in service)): return @@ -338,14 +364,8 @@ def _assure_members(self, service, all_subnet_hints): LOG.warning("Member definition does not include Neutron port") # delete member if pool is being deleted - if member['provisioning_status'] == plugin_const.PENDING_DELETE or \ - pool['provisioning_status'] == plugin_const.PENDING_DELETE: - try: - self.pool_builder.delete_member(svc, bigips) - except Exception as err: - member['provisioning_status'] = plugin_const.ERROR - raise f5_ex.MemberDeleteException(err.message) - else: + if not (member['provisioning_status'] == plugin_const.PENDING_DELETE or \ + pool['provisioning_status'] == plugin_const.PENDING_DELETE): try: self.pool_builder.create_member(svc, bigips) member['provisioning_status'] = plugin_const.ACTIVE @@ -369,11 +389,45 @@ def _assure_members(self, service, all_subnet_hints): member['provisioning_status'] = plugin_const.ERROR raise f5_ex.MemberCreationException(err.message) - self._update_subnet_hints(member["provisioning_status"], - member["subnet_id"], - member["network_id"], - all_subnet_hints, - True) + self._update_subnet_hints(member["provisioning_status"], + member["subnet_id"], + member["network_id"], + all_subnet_hints, + True) + + @utils.instrument_execution_time + def _assure_members_deleted(self, service, all_subnet_hints): + if not (("pools" in service) and ("members" in service)): + return + + members = service["members"] + loadbalancer = service["loadbalancer"] + bigips = self.driver.get_config_bigips() + + for member in members: + pool = self.get_pool_by_id(service, member["pool_id"]) + svc = {"loadbalancer": loadbalancer, + "member": member, + "pool": pool} + + if 'port' not in member and \ + member['provisioning_status'] != plugin_const.PENDING_DELETE: + LOG.warning("Member definition does not include Neutron port") + + # delete member if pool is being deleted + if member['provisioning_status'] == plugin_const.PENDING_DELETE or \ + pool['provisioning_status'] == plugin_const.PENDING_DELETE: + try: + self.pool_builder.delete_member(svc, bigips) + except Exception as err: + member['provisioning_status'] = plugin_const.ERROR + raise f5_ex.MemberDeleteException(err.message) + + self._update_subnet_hints(member["provisioning_status"], + member["subnet_id"], + member["network_id"], + all_subnet_hints, + True) @utils.instrument_execution_time @@ -413,7 +467,6 @@ def _assure_pools_deleted(self, service): "pool": pool} try: - # update listeners for pool for listener in pool['listeners']: svc['listener'] = \ diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/listener_service.py b/f5_openstack_agent/lbaasv2/drivers/bigip/listener_service.py index ae51d2418..55d5b1f35 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/listener_service.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/listener_service.py @@ -178,7 +178,11 @@ def create_ssl_profile(self, container_ref, bigip, vip, sni_default=False, add_t container = self.cert_manager.get_container(container_ref) caClientTrust = bool(container.name and container.name.startswith('CATrust')) - + ex = None + error_default = False + error_not_default = False + # Add 2 client ssl profiles. Try to add both even if first one has errors + # try: # upload cert/key and create SSL profile ssl_profile.SSLProfileHelper.create_client_ssl_profile( @@ -191,7 +195,15 @@ def create_ssl_profile(self, container_ref, bigip, vip, sni_default=False, add_t parent_profile=self.parent_ssl_profile, caClientTrust=caClientTrust ) + except HTTPError as err: + if err.response.status_code != 409: + ex = err + error_default = True + except Exception as e: + ex = e + error_default = True + try: # upload cert/key and create SSL profile ssl_profile.SSLProfileHelper.create_client_ssl_profile( bigip, @@ -203,7 +215,13 @@ def create_ssl_profile(self, container_ref, bigip, vip, sni_default=False, add_t parent_profile=self.parent_ssl_profile, caClientTrust=caClientTrust ) - + except HTTPError as err: + if err.response.status_code != 409: + ex = err + error_not_default = True + except Exception as e: + ex = e + error_not_default = True finally: del cert del key @@ -212,9 +230,13 @@ def create_ssl_profile(self, container_ref, bigip, vip, sni_default=False, add_t # add ssl profile to virtual server if add_to_vip: f5name = name - if not sni_default: + if sni_default and not error_default: + self._add_profile(vip, f5name, bigip, context='clientside') + elif not sni_default and not error_not_default: f5name += '_NotDefault' - self._add_profile(vip, f5name, bigip, context='clientside') + self._add_profile(vip, f5name, bigip, context='clientside') + if ex: + raise ex def update_listener(self, service, bigips): u"""Update Listener from a single BIG-IP system. @@ -366,7 +388,6 @@ def update_session_persistence(self, service, bigips): persistence = pool['session_persistence'] persistence_type = persistence['type'] vip_persist = self.service_adapter.get_session_persistence(service) - listener = service['listener'] for bigip in bigips: # For TCP listeners, must remove fastL4 profile before adding # adding http/oneconnect profiles. @@ -593,14 +614,14 @@ def _remove_ssl_profile(self, name, bigip): ssl_client_profile = bigip.tm.ltm.profile.client_ssls.client_ssl if ssl_client_profile.exists(name=name, partition='Common'): obj = ssl_client_profile.load(name=name, partition='Common') - obj.delete() + if obj: + obj.delete() + LOG.info("ccloud: SSL Profile deleted: %s" % name) except Exception as err: # Not necessarily an error -- profile might be referenced # by another virtual server. - LOG.warn( - "Unable to delete profile %s. " - "Response message: %s." % (name, err.message)) + LOG.warn("Unable to delete profile %s . Response message: %s ." % (name, err.message)) def _remove_profile(self, vip, profile_name, bigip): """Delete profile. diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/network_service.py b/f5_openstack_agent/lbaasv2/drivers/bigip/network_service.py index 667ac25e2..b0dcc5664 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/network_service.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/network_service.py @@ -645,7 +645,7 @@ def _assure_lb_snats(self, assure_bigips, service, subnetinfo): if match is not None: ip_address = ip_address[:-len(match.group(0))] - snat_addrs = [ip_address] + snat_addrs = [ip_address] for assure_bigip in assure_bigips: self.bigip_snat_manager.assure_bigip_snats( assure_bigip, subnetinfo, snat_addrs, tenant_id, lb_id) @@ -704,34 +704,26 @@ def post_service_networking(self, service, all_subnet_hints): # Delete shared config objects deleted_names = set() + lb_is_last_on_network = self._is_last_on_network(service) + for bigip in self.driver.get_config_bigips(): LOG.debug('post_service_networking: calling ' - '_assure_delete_networks del nets sh for bigip %s %s' + '_assure_delete_networks del nets shared for bigip %s %s' % (bigip.device_name, all_subnet_hints)) subnet_hints = all_subnet_hints[bigip.device_name] - deleted_names = deleted_names.union( - self._assure_delete_nets_shared(bigip, service, - subnet_hints)) + deleted_names = deleted_names.union(self._assure_delete_nets_shared(bigip, service, subnet_hints, lb_is_last_on_network)) # Delete non shared config objects for bigip in self.driver.get_all_bigips(): LOG.debug(' post_service_networking: calling ' - ' _assure_delete_networks del nets ns for bigip %s' + ' _assure_delete_networks del nets NONshared for bigip %s' % bigip.device_name) - subnet_hints = all_subnet_hints[bigip.device_name] - - deleted_names = deleted_names.union( - self._assure_delete_nets_nonshared( - bigip, service, subnet_hints) - ) + deleted_names = deleted_names.union(self._assure_delete_nets_nonshared(bigip, service, subnet_hints, lb_is_last_on_network)) for port_name in deleted_names: - LOG.debug(' post_service_networking: calling ' - ' del port %s' - % port_name) - self.driver.plugin_rpc.delete_port_by_name( - port_name=port_name) + LOG.debug(' post_service_networking: calling del port %s' % port_name) + self.driver.plugin_rpc.delete_port_by_name(port_name=port_name) @utils.instrument_execution_time def update_bigip_l2(self, service): @@ -848,60 +840,67 @@ def delete_bigip_vip_l2(self, bigip, loadbalancer): self.l2_service.delete_bigip_fdbs( bigip, net_folder, fdb_info, loadbalancer) - def _assure_delete_nets_shared(self, bigip, service, subnet_hints): + def _assure_delete_nets_shared(self, bigip, service, subnet_hints, lb_is_last_on_network): # Assure shared configuration (which syncs) is deleted deleted_names = set() tenant_id = service['loadbalancer']['tenant_id'] lb_id = service['loadbalancer']['id'] + # delete all snats for a subnet id subnet doesn't hold any ip's anymore delete_gateway = self.bigip_selfip_manager.delete_gateway_on_subnet - for subnetinfo in self._get_subnets_to_delete(bigip, - service, - subnet_hints): + subnet_to_delete, subnet_with_deletion = self._get_subnets_to_delete(bigip, service, subnet_hints) + for subnetinfo in subnet_to_delete: try: + my_deleted_names, my_in_use_subnets = self.bigip_snat_manager.delete_bigip_snats(bigip, subnetinfo, tenant_id, lb_id) + deleted_names = deleted_names.union(my_deleted_names) + for in_use_subnetid in my_in_use_subnets: + subnet_hints['check_for_delete_subnets'].pop(in_use_subnetid, None) + if not self.conf.f5_snat_mode: gw_name = delete_gateway(bigip, subnetinfo) deleted_names.add(gw_name) - else: - if self._is_last_on_network(service): - self.network_helper.delete_route(bigip, const.DEFAULT_PARTITION,subnetinfo['subnet_id']) + elif lb_is_last_on_network: + self.network_helper.delete_route(bigip, const.DEFAULT_PARTITION,subnetinfo['subnet_id']) + except NeutronException as exc: + LOG.error("assure_delete_nets_shared: exception #1: %s" + % str(exc.msg)) + except Exception as exc: + LOG.error("assure_delete_nets_shared: exception #2: %s" + % str(exc.message)) - my_deleted_names, my_in_use_subnets = \ - self.bigip_snat_manager.delete_bigip_snats( - bigip, subnetinfo, tenant_id, lb_id) + # delete one snat for a loadbalancer if a load balancer deletion happend + for subnetinfo in subnet_with_deletion: + try: + my_deleted_names, my_in_use_subnets = self.bigip_snat_manager.delete_bigip_snats(bigip, subnetinfo, tenant_id, lb_id) deleted_names = deleted_names.union(my_deleted_names) - for in_use_subnetid in my_in_use_subnets: - subnet_hints['check_for_delete_subnets'].pop( - in_use_subnetid, None) + except NeutronException as exc: - LOG.error("assure_delete_nets_shared: exception: %s" + LOG.error("assure_delete_nets_shared: exception #3: %s" % str(exc.msg)) except Exception as exc: - LOG.error("assure_delete_nets_shared: exception: %s" + LOG.error("assure_delete_nets_shared: exception #4: %s" % str(exc.message)) return deleted_names @utils.instrument_execution_time - def _assure_delete_nets_nonshared(self, bigip, service, subnet_hints): + def _assure_delete_nets_nonshared(self, bigip, service, subnet_hints, lb_is_last_on_network): # Delete non shared base objects for networks deleted_names = set() - if not self._is_last_on_network(service): + if not lb_is_last_on_network: return deleted_names - for subnetinfo in self._get_subnets_to_delete(bigip, - service, - subnet_hints): + subnet_to_delete, subnet_with_deletion = self._get_subnets_to_delete(bigip, service, subnet_hints) + for subnetinfo in subnet_to_delete: try: network = subnetinfo['network'] if self.l2_service.is_common_network(network): network_folder = 'Common' else: - network_folder = self.service_adapter.get_folder_name( - service['loadbalancer']['tenant_id']) + network_folder = self.service_adapter.get_folder_name(service['loadbalancer']['tenant_id']) subnet = subnetinfo['subnet'] if self.conf.f5_populate_static_arp: @@ -913,12 +912,10 @@ def _assure_delete_nets_nonshared(self, bigip, service, subnet_hints): ) - if self._is_last_on_network(service): + if lb_is_last_on_network: self.network_helper.delete_route(bigip, const.DEFAULT_PARTITION,subnetinfo['subnet_id']) - - local_selfip_name = "local-" + bigip.device_name + \ - "-" + subnet['id'] + local_selfip_name = "local-" + bigip.device_name + "-" + subnet['id'] selfip_address = self.bigip_selfip_manager.get_selfip_addr( bigip, @@ -927,8 +924,7 @@ def _assure_delete_nets_nonshared(self, bigip, service, subnet_hints): ) if not selfip_address: - LOG.error("Failed to get self IP address %s in cleanup.", - local_selfip_name) + LOG.error("Failed to get self IP address %s in cleanup.", local_selfip_name) self.bigip_selfip_manager.delete_selfip( bigip, @@ -937,8 +933,7 @@ def _assure_delete_nets_nonshared(self, bigip, service, subnet_hints): ) if self.l3_binding and selfip_address: - self.l3_binding.unbind_address(subnet_id=subnet['id'], - ip_address=selfip_address) + self.l3_binding.unbind_address(subnet_id=subnet['id'], ip_address=selfip_address) deleted_names.add(local_selfip_name) @@ -950,8 +945,7 @@ def _assure_delete_nets_nonshared(self, bigip, service, subnet_hints): self.remove_from_rds_cache(network, subnet) tenant_id = service['loadbalancer']['tenant_id'] if tenant_id in bigip.assured_tenant_snat_subnets: - tenant_snat_subnets = \ - bigip.assured_tenant_snat_subnets[tenant_id] + tenant_snat_subnets = bigip.assured_tenant_snat_subnets[tenant_id] if subnet['id'] in tenant_snat_subnets: tenant_snat_subnets.remove(subnet['id']) except NeutronException as exc: @@ -977,10 +971,11 @@ def _is_last_on_network(self, service): return True @utils.instrument_execution_time - def _get_subnets_to_delete(self, bigip, service, subnet_hints): + def _get_subnets_to_delete(self, bigip, service, subnet_hints, whole_subnet=True): # Clean up any Self IP, SNATs, networks, and folder for # services items that we deleted. subnets_to_delete = [] + subnets_with_deletion = [] for subnetinfo in subnet_hints['check_for_delete_subnets'].values(): subnet = self.service_adapter.get_subnet_from_service( service, subnetinfo['subnet_id']) @@ -997,8 +992,10 @@ def _get_subnets_to_delete(self, bigip, service, subnet_hints): subnet, route_domain): subnets_to_delete.append(subnetinfo) + else: + subnets_with_deletion.append(subnetinfo) - return subnets_to_delete + return subnets_to_delete, subnets_with_deletion @utils.instrument_execution_time def _ips_exist_on_subnet(self, bigip, service, subnet, route_domain): diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/pool_service.py b/f5_openstack_agent/lbaasv2/drivers/bigip/pool_service.py index 26039f682..a1f4d375e 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/pool_service.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/pool_service.py @@ -114,9 +114,9 @@ def update_pool(self, service, bigips): for bigip in bigips: try: self.pool_helper.update(bigip, pool) - LOG.info("Pool updated FAILED: %s", pool['name']) + LOG.info("Pool updated DONE: %s", pool['name']) except HTTPError as err: - LOG.info("Pool update FAILED: %s", pool['name']) + LOG.debug("Pool update FAILED: %s", pool['name']) ex = err if ex: raise ex @@ -257,7 +257,7 @@ def delete_member(self, service, bigips): # Possilbe error if node is shared with another member. # If so, ignore the error. if err.response.status_code == 400: - LOG.debug(err.message) + LOG.debug("ccloud: Node %s not deleted because it's referenced as member somewhere else" % node['name']) else: LOG.info("Member or Node deletion FAILED: %s", member['address']) ex = err diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/tenants.py b/f5_openstack_agent/lbaasv2/drivers/bigip/tenants.py index 130292d3b..45cd29b1c 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/tenants.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/tenants.py @@ -80,7 +80,7 @@ def assure_tenant_created(self, service): route_domain_id = None bigiprds = [] for bigip in self.driver.get_all_bigips(): - bigip_route_domain = self.network_helper.route_domain_exists(bigip, const.DEFAULT_PARTITION,network_id) + bigip_route_domain = self.network_helper.route_domain_exists(bigip, const.DEFAULT_PARTITION, network_id) bigip_route_domain_id = bigip_route_domain.id if bigip_route_domain else None # rd already created but not different between bigips (maybe not created on all of them) if bigip_route_domain_id and route_domain_id is None: @@ -117,7 +117,7 @@ def assure_tenant_created(self, service): # error within rd creation procedure except Exception as err: LOG.exception(err.message) - raise f5ex.RouteDomainCreationException("Failed to create route domain for network %s in tenant in %s" % (network_id, const.DEFAULT_PARTITION)) + raise f5ex.RouteDomainCreationException("Failed to create route domain for network %s in tenant %s" % (network_id, const.DEFAULT_PARTITION)) LOG.debug("Allocated route domain for network %s for tenant %s" % (network_id, tenant_id)) diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/utils.py b/f5_openstack_agent/lbaasv2/drivers/bigip/utils.py index 42a5eaa9a..5f26f5109 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/utils.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/utils.py @@ -88,69 +88,7 @@ def strip_domain_address(ip_address): else: return ip_address.split('%')[0] -def serialized (method_name): - """Outer wrapper in order to specify method name.""" - - def real_serialized(method): - """Decorator to serialize calls to configure via iControl.""" - def wrapper(*args, **kwargs): - # args[0] must be an instance of iControlDriver - my_request_id = uuid.uuid4() - - service = None - if len(args) > 0: - last_arg = args[-1] - if isinstance(last_arg, dict) and ('loadbalancer' in last_arg): - service = last_arg - if 'service' in kwargs: - service = kwargs['service'] - - lb_id = 'generic' - - if service is not None: - lb = service.get('loadbalancer') - if lb is not None: - lb_id = lb.get('id') - - queue = args[0].queues.get(lb_id) - if queue is None: - queue = eventlet.queue.Queue(1) - args[0].queues[lb_id] = queue - - wait_start = time() - queue.put(my_request_id) - LOG.debug('Waited %.2f secs to put request %s on to queue %s %s' - % (time() - wait_start, my_request_id, lb_id, queue)) - - - try: - - start_time = time() - - result = method(*args, **kwargs) - LOG.debug('%s request %s took %.5f secs' - % (str(method_name), my_request_id, - time() - start_time)) - - except Exception: - LOG.error('%s request %s FAILED' - % (str(method_name), my_request_id)) - raise - finally: - wait_start = time() - wait_request = queue.get() - LOG.debug('Waited %.2f secs to get request %s' - % (time() - wait_start,wait_request)) - - - return result - - return wrapper - - return real_serialized - - -def serialized_old(method_name): +def serialized(method_name): """Outer wrapper in order to specify method name.""" def real_serialized(method): """Decorator to serialize calls to configure via iControl.""" @@ -160,13 +98,13 @@ def wrapper(*args, **kwargs): service_queue = args[0].service_queue my_request_id = uuid.uuid4() - service = None - if len(args) > 0: - last_arg = args[-1] - if isinstance(last_arg, dict) and ('loadbalancer' in last_arg): - service = last_arg - if 'service' in kwargs: - service = kwargs['service'] + # service = None + # if len(args) > 0: + # last_arg = args[-1] + # if isinstance(last_arg, dict) and ('loadbalancer' in last_arg): + # service = last_arg + # if 'service' in kwargs: + # service = kwargs['service'] # Consolidate create_member requests for the same pool. # @@ -178,7 +116,8 @@ def wrapper(*args, **kwargs): # To avoid race conditions, DO NOT add logging to this code # block. - req = (my_request_id, method_name, service) + #req = (my_request_id, method_name, service) + req = my_request_id service_queue.append(req) reqs_ahead_of_us = request_index(service_queue, my_request_id) while reqs_ahead_of_us != 0: @@ -216,7 +155,15 @@ def wrapper(*args, **kwargs): def request_index(request_queue, request_id): """Get index of request in request queue. + If we are not in the queue return the length of the list. + """ + try: + return request_queue.index(request_id) + except Exception: + return len(request_queue) +def request_index_xxx(request_queue, request_id): + """Get index of request in request queue. If we are not in the queue return the length of the list. """ for request in request_queue: @@ -225,6 +172,68 @@ def request_index(request_queue, request_id): return len(request_queue) +def serialized_ccloud (method_name): + """Outer wrapper in order to specify method name.""" + + def real_serialized(method): + """Decorator to serialize calls to configure via iControl.""" + def wrapper(*args, **kwargs): + # args[0] must be an instance of iControlDriver + my_request_id = uuid.uuid4() + + service = None + if len(args) > 0: + last_arg = args[-1] + if isinstance(last_arg, dict) and ('loadbalancer' in last_arg): + service = last_arg + if 'service' in kwargs: + service = kwargs['service'] + + lb_id = 'generic' + + if service is not None: + lb = service.get('loadbalancer') + if lb is not None: + lb_id = lb.get('id') + + queue = args[0].queues.get(lb_id) + if queue is None: + queue = eventlet.queue.Queue(1) + args[0].queues[lb_id] = queue + + wait_start = time() + queue.put(my_request_id) + LOG.debug('Waited %.2f secs to put request %s on to queue %s %s' + % (time() - wait_start, my_request_id, lb_id, queue)) + + + try: + + start_time = time() + + result = method(*args, **kwargs) + LOG.debug('%s request %s took %.5f secs' + % (str(method_name), my_request_id, + time() - start_time)) + + except Exception: + LOG.error('%s request %s FAILED' + % (str(method_name), my_request_id)) + raise + finally: + wait_start = time() + wait_request = queue.get() + LOG.debug('Waited %.2f secs to get request %s' + % (time() - wait_start,wait_request)) + + + return result + + return wrapper + + return real_serialized + + def get_filter(bigip, key, op, value): if LooseVersion(bigip.tmos_version) < LooseVersion('11.6.0'): return '$filter=%s+%s+%s' % (key, op, value) From 348d4fcdf40a1d78599375d7fbc8e40b6714ff2f Mon Sep 17 00:00:00 2001 From: Torsten Lesmann Date: Wed, 23 Oct 2019 08:06:45 +0200 Subject: [PATCH 105/109] Security fix --- requirements.functest.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.functest.txt b/requirements.functest.txt index 83f12d919..8ab33f95c 100644 --- a/requirements.functest.txt +++ b/requirements.functest.txt @@ -16,4 +16,4 @@ pytest-cov>=2.4.0,<3 responses==0.5.1 coverage==4.2 python-coveralls==2.8.0 -requests<=2.12.5 +requests>=2.20.0 From 5c42b78eb7c1f1c93d1b6c754e3c163e4a8b1e5a Mon Sep 17 00:00:00 2001 From: Torsten Lesmann Date: Tue, 5 Nov 2019 16:52:14 +0100 Subject: [PATCH 106/109] Set hardwareSyncookie on VLAN to enable SYN Flood protection --- f5_openstack_agent/lbaasv2/drivers/bigip/network_helper.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/network_helper.py b/f5_openstack_agent/lbaasv2/drivers/bigip/network_helper.py index 5e1d5c596..ee0e7164d 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/network_helper.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/network_helper.py @@ -407,9 +407,11 @@ def create_vlan(self, bigip, model): if v.exists(name=name, partition=partition): obj = v.load(name=name, partition=partition) else: + # ccloud: Enable SYN Flood protection payload = {'name': name, 'partition': partition, - 'tag': tag} + 'tag': tag, + 'hardwareSyncookie': 'enabled'} if description: payload['description'] = description From d83a2b2700bd9a39cffbaf4f914376e29390b9d5 Mon Sep 17 00:00:00 2001 From: Andrew Karpow Date: Tue, 11 Feb 2020 15:10:43 +0100 Subject: [PATCH 107/109] support for ssl key passphrases --- .../lbaasv2/drivers/bigip/listener_service.py | 7 +++++-- f5_openstack_agent/lbaasv2/drivers/bigip/ssl_profile.py | 5 ++++- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/listener_service.py b/f5_openstack_agent/lbaasv2/drivers/bigip/listener_service.py index 55d5b1f35..9e103adf4 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/listener_service.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/listener_service.py @@ -172,6 +172,7 @@ def add_ssl_profile(self, tls, bigip, add_to_vip=True): def create_ssl_profile(self, container_ref, bigip, vip, sni_default=False, add_to_vip=True): cert = self.cert_manager.get_certificate(container_ref) key = self.cert_manager.get_private_key(container_ref) + key_passphrase = self.cert_manager.get_private_key_passphrase(container_ref) intermediate = self.cert_manager.get_intermediates(container_ref) name = self.cert_manager.get_name(container_ref, self.service_adapter.prefix) @@ -193,7 +194,8 @@ def create_ssl_profile(self, container_ref, bigip, vip, sni_default=False, add_t intermediate, sni_default=True, parent_profile=self.parent_ssl_profile, - caClientTrust=caClientTrust + caClientTrust=caClientTrust, + key_passphrase=key_passphrase ) except HTTPError as err: if err.response.status_code != 409: @@ -213,7 +215,8 @@ def create_ssl_profile(self, container_ref, bigip, vip, sni_default=False, add_t intermediate, sni_default=False, parent_profile=self.parent_ssl_profile, - caClientTrust=caClientTrust + caClientTrust=caClientTrust, + key_passphrase=key_passphrase ) except HTTPError as err: if err.response.status_code != 409: diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/ssl_profile.py b/f5_openstack_agent/lbaasv2/drivers/bigip/ssl_profile.py index 052a8d17c..fc3220ee1 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/ssl_profile.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/ssl_profile.py @@ -28,7 +28,8 @@ class SSLProfileHelper(object): @staticmethod def create_client_ssl_profile( - bigip, name, cert, key, intermediate=None, sni_default=False, parent_profile=None, caClientTrust=False): + bigip, name, cert, key, intermediate=None, sni_default=False, parent_profile=None, caClientTrust=False, + key_passphrase=None): uploader = bigip.shared.file_transfer.uploads cert_registrar = bigip.tm.sys.crypto.certs intermediate_registrar = bigip.tm.sys.crypto.certs @@ -95,6 +96,8 @@ def create_client_ssl_profile( 'cert': '/Common/' + certfilename, 'key': '/Common/' + keyfilename}] + if key_password: + chain[0]['passphrase'] = key_passphrase if caClientTrust and intermediate: ssl_client_profile.create(name=profilename, From 4cf3e9ab94631cbcb81b3dfdf707897fd87c55fa Mon Sep 17 00:00:00 2001 From: Andrew Karpow Date: Wed, 12 Feb 2020 16:55:20 +0100 Subject: [PATCH 108/109] added missing get_private_key_passphrase --- .../lbaasv2/drivers/bigip/barbican_cert.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/barbican_cert.py b/f5_openstack_agent/lbaasv2/drivers/bigip/barbican_cert.py index 343aba524..a4644e030 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/barbican_cert.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/barbican_cert.py @@ -147,6 +147,17 @@ def get_private_key(self, container_ref): container = self.barbican.containers.get(container_ref) return container.private_key.payload + def get_private_key_passphrase(self, container_ref): + """Retrieves key passphrase from certificate manager. + + :param string container_ref: Reference to key stored in a + certificate manager. + :returns string: passphrase. + This method MUST be implemented, in agent-compliant cert managers. + """ + container = self.barbican.containers.get(container_ref) + return container.private_key_passphrase.payload + def get_name(self, container_ref, prefix): """Returns a name that uniquely identifies cert/key pair. From 61e19c85dcca202db8c594aaa09dee1c1389fc79 Mon Sep 17 00:00:00 2001 From: Andrew Karpow Date: Wed, 12 Feb 2020 17:42:16 +0100 Subject: [PATCH 109/109] fix rename of key_passphrase --- f5_openstack_agent/lbaasv2/drivers/bigip/ssl_profile.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/f5_openstack_agent/lbaasv2/drivers/bigip/ssl_profile.py b/f5_openstack_agent/lbaasv2/drivers/bigip/ssl_profile.py index fc3220ee1..ccabc11a9 100644 --- a/f5_openstack_agent/lbaasv2/drivers/bigip/ssl_profile.py +++ b/f5_openstack_agent/lbaasv2/drivers/bigip/ssl_profile.py @@ -96,7 +96,7 @@ def create_client_ssl_profile( 'cert': '/Common/' + certfilename, 'key': '/Common/' + keyfilename}] - if key_password: + if key_passphrase: chain[0]['passphrase'] = key_passphrase if caClientTrust and intermediate: