diff --git a/include/envoy/upstream/upstream.h b/include/envoy/upstream/upstream.h index 0eca9224fdea6..1eb310f1df0a5 100644 --- a/include/envoy/upstream/upstream.h +++ b/include/envoy/upstream/upstream.h @@ -354,6 +354,11 @@ class PrioritySet { */ virtual const std::vector& hostSetsPerPriority() const PURE; + /** + * @return true if the priority set does have any hosts in any priorities. + */ + virtual bool empty() const PURE; + /** * Parameter class for updateHosts. */ @@ -771,6 +776,11 @@ class Cluster { * @return the const PrioritySet for the cluster. */ virtual const PrioritySet& prioritySet() const PURE; + + /** + * @return true, if this cluster is initialized by empty config update. + */ + virtual bool initializedByEmptyConfig() const PURE; }; typedef std::shared_ptr ClusterSharedPtr; diff --git a/source/common/upstream/cluster_manager_impl.cc b/source/common/upstream/cluster_manager_impl.cc index d8cf188c8a41e..a4883ddb746f1 100644 --- a/source/common/upstream/cluster_manager_impl.cc +++ b/source/common/upstream/cluster_manager_impl.cc @@ -473,18 +473,55 @@ bool ClusterManagerImpl::addOrUpdateCluster(const envoy::api::v2::Cluster& clust cluster_warming_cb(cluster_name, ClusterWarmingState::Starting); cluster_entry->cluster_->initialize([this, cluster_name, cluster_warming_cb] { auto warming_it = warming_clusters_.find(cluster_name); - auto& cluster_entry = *warming_it->second; + auto& warming_cluster_entry = *warming_it->second; // If the cluster is being updated, we need to cancel any pending merged updates. // Otherwise, applyUpdates() will fire with a dangling cluster reference. updates_map_.erase(cluster_name); + // If management server sends a EDS response, for any other cluster grpc_mux_impl calls + // onConfigUpdate on this cluster with empty resources. + // See + // https://github.com/envoyproxy/envoy/blob/master/source/common/config/grpc_mux_impl.cc#L161 + // for more details. If the cluster is in fully initialized state, that would just increment + // update_empty stat. However, if the cluster is in warming state the initialization call back + // would be triggered and warming cluster would not have any hosts. So if onConfigUpdate was + // triggered by an EDS update that had no references to this cluster and active cluster has + // some hosts, copy the active cluster priority set to the warming cluster to prevent the + // hosts from being cleared after warming. + // See https://github.com/envoyproxy/envoy/issues/5168 for more context. + // This also ensures that we adhere to the clause "When a requested resource is missing in a + // RDS or EDS update, Envoy will retain the last known value for this resource." as documented + // in https://github.com/envoyproxy/data-plane-api/blob/master/XDS_PROTOCOL.md. + const auto active_it = active_clusters_.find(cluster_name); + if (active_it != active_clusters_.end()) { + const auto& active_cluster_entry = *active_it->second; + if (warming_cluster_entry.cluster_->initializedByEmptyConfig() && + !active_cluster_entry.cluster_->prioritySet().empty()) { + ENVOY_LOG(debug, "copying host set from active cluster {} to warming cluster", + cluster_name); + const auto& active_host_sets = + active_cluster_entry.cluster_->prioritySet().hostSetsPerPriority(); + for (size_t priority = 0; priority < active_host_sets.size(); ++priority) { + const auto& active_host_set = active_host_sets[priority]; + // TODO(ramaraochavali): Can we skip these copies by exporting out const shared_ptr from + // HostSet? + HostVectorConstSharedPtr hosts_copy(new HostVector(active_host_set->hosts())); + HostsPerLocalityConstSharedPtr hosts_per_locality_copy = + active_host_set->hostsPerLocality().clone(); + warming_cluster_entry.cluster_->prioritySet().updateHosts( + priority, HostSetImpl::partitionHosts(hosts_copy, hosts_per_locality_copy), + active_host_set->localityWeights(), {}, {}, + active_host_set->overprovisioningFactor()); + } + } + } active_clusters_[cluster_name] = std::move(warming_it->second); warming_clusters_.erase(warming_it); ENVOY_LOG(info, "warming cluster {} complete", cluster_name); - createOrUpdateThreadLocalCluster(cluster_entry); - onClusterInit(*cluster_entry.cluster_); + createOrUpdateThreadLocalCluster(warming_cluster_entry); + onClusterInit(*warming_cluster_entry.cluster_); cluster_warming_cb(cluster_name, ClusterWarmingState::Finished); updateGauges(); }); diff --git a/source/common/upstream/eds.cc b/source/common/upstream/eds.cc index f1895f37a88ad..e647a7a7f2886 100644 --- a/source/common/upstream/eds.cc +++ b/source/common/upstream/eds.cc @@ -47,7 +47,7 @@ void EdsClusterImpl::onConfigUpdate(const ResourceVector& resources, const std:: if (resources.empty()) { ENVOY_LOG(debug, "Missing ClusterLoadAssignment for {} in onConfigUpdate()", cluster_name_); info_->stats().update_empty_.inc(); - onPreInitComplete(); + onPreInitComplete(true); return; } if (resources.size() != 1) { @@ -120,7 +120,7 @@ void EdsClusterImpl::onConfigUpdate(const ResourceVector& resources, const std:: // If we didn't setup to initialize when our first round of health checking is complete, just // do it now. - onPreInitComplete(); + onPreInitComplete(false); } bool EdsClusterImpl::updateHostsPerLocality( @@ -163,7 +163,7 @@ bool EdsClusterImpl::updateHostsPerLocality( void EdsClusterImpl::onConfigUpdateFailed(const EnvoyException* e) { UNREFERENCED_PARAMETER(e); // We need to allow server startup to continue, even if we have a bad config. - onPreInitComplete(); + onPreInitComplete(false); } } // namespace Upstream diff --git a/source/common/upstream/health_discovery_service.h b/source/common/upstream/health_discovery_service.h index cd1b5f412b34f..3a7a9b505084f 100644 --- a/source/common/upstream/health_discovery_service.h +++ b/source/common/upstream/health_discovery_service.h @@ -55,6 +55,7 @@ class HdsCluster : public Cluster, Logger::Loggable { Outlier::Detector* outlierDetector() override { return outlier_detector_.get(); } const Outlier::Detector* outlierDetector() const override { return outlier_detector_.get(); } void initialize(std::function callback) override; + bool initializedByEmptyConfig() const override { return false; } // Creates and starts healthcheckers to its endpoints void startHealthchecks(AccessLog::AccessLogManager& access_log_manager, Runtime::Loader& runtime, diff --git a/source/common/upstream/logical_dns_cluster.cc b/source/common/upstream/logical_dns_cluster.cc index c5bcc8bbb67df..c8ade9c0f8708 100644 --- a/source/common/upstream/logical_dns_cluster.cc +++ b/source/common/upstream/logical_dns_cluster.cc @@ -134,7 +134,7 @@ void LogicalDnsCluster::startResolve() { } } - onPreInitComplete(); + onPreInitComplete(false); resolve_timer_->enableTimer(dns_refresh_rate_ms_); }); } diff --git a/source/common/upstream/original_dst_cluster.h b/source/common/upstream/original_dst_cluster.h index b88f18d57be29..3ed3774f45347 100644 --- a/source/common/upstream/original_dst_cluster.h +++ b/source/common/upstream/original_dst_cluster.h @@ -109,7 +109,7 @@ class OriginalDstCluster : public ClusterImplBase { void cleanup(); // ClusterImplBase - void startPreInit() override { onPreInitComplete(); } + void startPreInit() override { onPreInitComplete(false); } Event::Dispatcher& dispatcher_; const std::chrono::milliseconds cleanup_interval_ms_; diff --git a/source/common/upstream/subset_lb.h b/source/common/upstream/subset_lb.h index edc88780e4461..9b3a6503680dc 100644 --- a/source/common/upstream/subset_lb.h +++ b/source/common/upstream/subset_lb.h @@ -68,7 +68,7 @@ class SubsetLoadBalancer : public LoadBalancer, Logger::Loggable(&getOrCreateHostSet(priority)); diff --git a/source/common/upstream/upstream_impl.cc b/source/common/upstream/upstream_impl.cc index 25bb107e13707..1e3a540041456 100644 --- a/source/common/upstream/upstream_impl.cc +++ b/source/common/upstream/upstream_impl.cc @@ -741,13 +741,13 @@ void ClusterImplBase::initialize(std::function callback) { startPreInit(); } -void ClusterImplBase::onPreInitComplete() { +void ClusterImplBase::onPreInitComplete(const bool empty_update) { // Protect against multiple calls. if (initialization_started_) { return; } initialization_started_ = true; - + empty_update_ = empty_update; ENVOY_LOG(debug, "initializing secondary cluster {} completed", info()->name()); init_manager_.initialize([this]() { onInitDone(); }); } @@ -1059,7 +1059,7 @@ void StaticClusterImpl::startPreInit() { } priority_state_manager_.reset(); - onPreInitComplete(); + onPreInitComplete(false); } bool BaseDynamicClusterImpl::updateDynamicHostList(const HostVector& new_hosts, @@ -1372,7 +1372,7 @@ void StrictDnsClusterImpl::ResolveTarget::startResolve() { // multiple DNS names, this will return initialized after a single DNS resolution // completes. This is not perfect but is easier to code and unclear if the extra // complexity is needed so will start with this. - parent_.onPreInitComplete(); + parent_.onPreInitComplete(false); resolve_timer_->enableTimer(parent_.dns_refresh_rate_ms_); }); } diff --git a/source/common/upstream/upstream_impl.h b/source/common/upstream/upstream_impl.h index 54e41f9a71750..f11bc0bfb31c9 100644 --- a/source/common/upstream/upstream_impl.h +++ b/source/common/upstream/upstream_impl.h @@ -413,6 +413,15 @@ class PrioritySetImpl : public PrioritySet { const HostVector& hosts_removed, absl::optional overprovisioning_factor = absl::nullopt) override; + bool empty() const override { + for (auto const& host_set : host_sets_) { + if (!host_set->hosts().empty()) { + return false; + } + } + return true; + } + protected: // Allows subclasses of PrioritySetImpl to create their own type of HostSetImpl. virtual HostSetImplPtr createHostSet(uint32_t priority, @@ -578,6 +587,7 @@ class ClusterImplBase : public Cluster, protected Logger::Loggable initialization_complete_callback_; uint64_t pending_initialize_health_checks_{}; + bool empty_update_{}; }; /** diff --git a/test/common/upstream/cluster_manager_impl_test.cc b/test/common/upstream/cluster_manager_impl_test.cc index 9c6a57c7c7cd2..9bdc83dfb2593 100644 --- a/test/common/upstream/cluster_manager_impl_test.cc +++ b/test/common/upstream/cluster_manager_impl_test.cc @@ -1209,6 +1209,230 @@ TEST_F(ClusterManagerImplTest, DynamicAddRemove) { EXPECT_TRUE(Mock::VerifyAndClearExpectations(callbacks.get())); } +// This test validates that if warming cluster's initialization is triggered via empty config +// update, it does not clear the active cluster hosts. Regression to test to validate the behaviour +// observed in https://github.com/envoyproxy/envoy/issues/5168. +TEST_F(ClusterManagerImplTest, WarmingClusterWithEmptyConfigUpdate) { + const std::string json = R"EOF( + { + "clusters": [] + } + )EOF"; + + create(parseBootstrapFromJson(json)); + + InSequence s; + ReadyWatcher initialized; + EXPECT_CALL(initialized, ready()); + cluster_manager_->setInitializedCb([&]() -> void { initialized.ready(); }); + + std::shared_ptr cluster1(new NiceMock()); + + // Set up the HostSet with 1 healthy, 1 degraded and 1 unhealthy for active cluster. + HostSharedPtr host1 = makeTestHost(cluster1->info_, "tcp://127.0.0.1:80"); + host1->healthFlagSet(HostImpl::HealthFlag::DEGRADED_ACTIVE_HC); + HostSharedPtr host2 = makeTestHost(cluster1->info_, "tcp://127.0.0.1:80"); + host2->healthFlagSet(HostImpl::HealthFlag::FAILED_ACTIVE_HC); + HostSharedPtr host3 = makeTestHost(cluster1->info_, "tcp://127.0.0.1:80"); + + HostVector hosts{host1, host2, host3}; + auto hosts_ptr = std::make_shared(hosts); + + cluster1->priority_set_.updateHosts( + 0, HostSetImpl::partitionHosts(hosts_ptr, HostsPerLocalityImpl::empty()), nullptr, hosts, {}, + 200); + + EXPECT_CALL(factory_, clusterFromProto_(_, _, _, _)).WillOnce(Return(cluster1)); + EXPECT_CALL(*cluster1, initializePhase()).Times(0); + EXPECT_CALL(*cluster1, initialize(_)); + EXPECT_TRUE(cluster_manager_->addOrUpdateCluster(defaultStaticCluster("fake_cluster"), "", + dummyWarmingCb)); + checkStats(1 /*added*/, 0 /*modified*/, 0 /*removed*/, 0 /*active*/, 1 /*warming*/); + EXPECT_EQ(nullptr, cluster_manager_->get("fake_cluster")); + cluster1->initialize_callback_(); + + EXPECT_EQ(cluster1->info_, cluster_manager_->get("fake_cluster")->info()); + checkStats(1 /*added*/, 0 /*modified*/, 0 /*removed*/, 1 /*active*/, 0 /*warming*/); + + // Now trigger warming of this cluster with empty config update. + auto update_cluster = defaultStaticCluster("fake_cluster"); + update_cluster.mutable_per_connection_buffer_limit_bytes()->set_value(12345); + + std::shared_ptr cluster2(new NiceMock()); + EXPECT_CALL(factory_, clusterFromProto_(_, _, _, _)).WillOnce(Return(cluster2)); + EXPECT_CALL(*cluster2, initializePhase()).Times(0); + EXPECT_CALL(*cluster2, initializedByEmptyConfig()).WillOnce(Return(true)); + EXPECT_TRUE(cluster_manager_->addOrUpdateCluster(update_cluster, "", dummyWarmingCb)); + checkStats(1 /*added*/, 1 /*modified*/, 0 /*removed*/, 1 /*active*/, 1 /*warming*/); + cluster2->initialize_callback_(); + + checkStats(1 /*added*/, 1 /*modified*/, 0 /*removed*/, 1 /*active*/, 0 /*warming*/); + + EXPECT_EQ(cluster2->info_, cluster_manager_->get("fake_cluster")->info()); + EXPECT_EQ(1UL, cluster_manager_->clusters().size()); + + // Validate that the host updates are pushed to tls clusters. + auto* tls_cluster = cluster_manager_->get(cluster2->info_->name()); + + EXPECT_EQ(1, tls_cluster->prioritySet().hostSetsPerPriority().size()); + EXPECT_EQ(1, tls_cluster->prioritySet().hostSetsPerPriority()[0]->degradedHosts().size()); + EXPECT_EQ(host1, tls_cluster->prioritySet().hostSetsPerPriority()[0]->degradedHosts()[0]); + EXPECT_EQ(1, tls_cluster->prioritySet().hostSetsPerPriority()[0]->healthyHosts().size()); + EXPECT_EQ(host3, tls_cluster->prioritySet().hostSetsPerPriority()[0]->healthyHosts()[0]); + EXPECT_EQ(3, tls_cluster->prioritySet().hostSetsPerPriority()[0]->hosts().size()); + + factory_.tls_.shutdownThread(); + + EXPECT_TRUE(Mock::VerifyAndClearExpectations(cluster1.get())); + EXPECT_TRUE(Mock::VerifyAndClearExpectations(cluster2.get())); +} + +// Validates the behaviour when warming cluster's initialization is triggered with empty hosts EDS +// update. +TEST_F(ClusterManagerImplTest, WarmingClusterWithEmptyHosts) { + const std::string json = R"EOF( + { + "clusters": [] + } + )EOF"; + + create(parseBootstrapFromJson(json)); + + InSequence s; + ReadyWatcher initialized; + EXPECT_CALL(initialized, ready()); + cluster_manager_->setInitializedCb([&]() -> void { initialized.ready(); }); + + std::shared_ptr cluster1(new NiceMock()); + + // Set up the HostSet with 1 healthy, 1 degraded and 1 unhealthy for active cluster. + HostSharedPtr host1 = makeTestHost(cluster1->info_, "tcp://127.0.0.1:80"); + host1->healthFlagSet(HostImpl::HealthFlag::DEGRADED_ACTIVE_HC); + HostSharedPtr host2 = makeTestHost(cluster1->info_, "tcp://127.0.0.1:80"); + host2->healthFlagSet(HostImpl::HealthFlag::FAILED_ACTIVE_HC); + HostSharedPtr host3 = makeTestHost(cluster1->info_, "tcp://127.0.0.1:80"); + + HostVector hosts{host1, host2, host3}; + auto hosts_ptr = std::make_shared(hosts); + + cluster1->priority_set_.updateHosts( + 0, HostSetImpl::partitionHosts(hosts_ptr, HostsPerLocalityImpl::empty()), nullptr, hosts, {}, + 200); + + EXPECT_CALL(factory_, clusterFromProto_(_, _, _, _)).WillOnce(Return(cluster1)); + EXPECT_CALL(*cluster1, initializePhase()).Times(0); + EXPECT_CALL(*cluster1, initialize(_)); + EXPECT_TRUE(cluster_manager_->addOrUpdateCluster(defaultStaticCluster("fake_cluster"), "", + dummyWarmingCb)); + checkStats(1 /*added*/, 0 /*modified*/, 0 /*removed*/, 0 /*active*/, 1 /*warming*/); + EXPECT_EQ(nullptr, cluster_manager_->get("fake_cluster")); + cluster1->initialize_callback_(); + + EXPECT_EQ(cluster1->info_, cluster_manager_->get("fake_cluster")->info()); + checkStats(1 /*added*/, 0 /*modified*/, 0 /*removed*/, 1 /*active*/, 0 /*warming*/); + + // Now trigger warming of the cluster with empty hosts. This validates the intentional empty + // hosts update sent by management server is processed correctly. + auto update_cluster = defaultStaticCluster("fake_cluster"); + update_cluster.mutable_per_connection_buffer_limit_bytes()->set_value(12345); + + std::shared_ptr cluster2(new NiceMock()); + EXPECT_CALL(factory_, clusterFromProto_(_, _, _, _)).WillOnce(Return(cluster2)); + EXPECT_CALL(*cluster2, initializePhase()).Times(0); + EXPECT_CALL(*cluster2, initializedByEmptyConfig()).WillOnce(Return(false)); + EXPECT_TRUE(cluster_manager_->addOrUpdateCluster(update_cluster, "", dummyWarmingCb)); + checkStats(1 /*added*/, 1 /*modified*/, 0 /*removed*/, 1 /*active*/, 1 /*warming*/); + cluster2->initialize_callback_(); + + checkStats(1 /*added*/, 1 /*modified*/, 0 /*removed*/, 1 /*active*/, 0 /*warming*/); + + EXPECT_EQ(cluster2->info_, cluster_manager_->get("fake_cluster")->info()); + EXPECT_EQ(1UL, cluster_manager_->clusters().size()); + + // Validate that TLS cluster has empty priority set. + auto* tls_cluster = cluster_manager_->get(cluster2->info_->name()); + + EXPECT_EQ(true, tls_cluster->prioritySet().empty()); + + factory_.tls_.shutdownThread(); + + EXPECT_TRUE(Mock::VerifyAndClearExpectations(cluster1.get())); + EXPECT_TRUE(Mock::VerifyAndClearExpectations(cluster2.get())); +} + +// Validates that TLS updates are triggered correctly when warming cluster is initialized with empty +// config update. +TEST_F(ClusterManagerImplTest, WarmingClusterWithEmptyConfigUpdateTriggersTlsUpdatesCorrectly) { + createWithLocalClusterUpdate(); + + InSequence s; + ReadyWatcher initialized; + EXPECT_CALL(initialized, ready()); + cluster_manager_->setInitializedCb([&]() -> void { initialized.ready(); }); + + std::shared_ptr cluster1(new NiceMock()); + + // Set up the HostSet with 1 healthy, 1 degraded and 1 unhealthy for active cluster. + HostSharedPtr host1 = makeTestHost(cluster1->info_, "tcp://127.0.0.1:80"); + host1->healthFlagSet(HostImpl::HealthFlag::DEGRADED_ACTIVE_HC); + HostSharedPtr host2 = makeTestHost(cluster1->info_, "tcp://127.0.0.1:80"); + host2->healthFlagSet(HostImpl::HealthFlag::FAILED_ACTIVE_HC); + HostSharedPtr host3 = makeTestHost(cluster1->info_, "tcp://127.0.0.1:80"); + + HostVector hosts{host1, host2, host3}; + auto hosts_ptr = std::make_shared(hosts); + + cluster1->priority_set_.updateHosts( + 0, HostSetImpl::partitionHosts(hosts_ptr, HostsPerLocalityImpl::empty()), nullptr, hosts, {}, + 200); + + EXPECT_CALL(factory_, clusterFromProto_(_, _, _, _)).WillOnce(Return(cluster1)); + EXPECT_CALL(*cluster1, initializePhase()).Times(0); + EXPECT_CALL(*cluster1, initialize(_)); + + // Validate that TLS updates are triggered correctly. + EXPECT_CALL(local_cluster_update_, post(_, _, _)) + .WillOnce(Invoke([](uint32_t priority, const HostVector& hosts_added, + const HostVector& hosts_removed) -> void { + EXPECT_EQ(0, priority); + EXPECT_EQ(3, hosts_added.size()); + EXPECT_EQ(0, hosts_removed.size()); + })); + + EXPECT_TRUE(cluster_manager_->addOrUpdateCluster(defaultStaticCluster("fake_cluster"), "", + dummyWarmingCb)); + EXPECT_EQ(nullptr, cluster_manager_->get("fake_cluster")); + cluster1->initialize_callback_(); + + EXPECT_EQ(cluster1->info_, cluster_manager_->get("fake_cluster")->info()); + + // Now trigger warming of this cluster with no hosts. + auto update_cluster = defaultStaticCluster("fake_cluster"); + update_cluster.mutable_per_connection_buffer_limit_bytes()->set_value(12345); + + std::shared_ptr cluster2(new NiceMock()); + EXPECT_CALL(factory_, clusterFromProto_(_, _, _, _)).WillOnce(Return(cluster2)); + EXPECT_CALL(*cluster2, initializePhase()).Times(0); + EXPECT_CALL(*cluster2, initializedByEmptyConfig()).WillOnce(Return(true)); + + // Validate that TLS updates are triggered correctly after warming. + EXPECT_CALL(local_cluster_update_, post(_, _, _)) + .WillOnce(Invoke([](uint32_t priority, const HostVector& hosts_added, + const HostVector& hosts_removed) -> void { + EXPECT_EQ(0, priority); + EXPECT_EQ(3, hosts_added.size()); + EXPECT_EQ(0, hosts_removed.size()); + })); + + EXPECT_TRUE(cluster_manager_->addOrUpdateCluster(update_cluster, "", dummyWarmingCb)); + cluster2->initialize_callback_(); + + EXPECT_EQ(cluster2->info_, cluster_manager_->get("fake_cluster")->info()); + + EXPECT_TRUE(Mock::VerifyAndClearExpectations(cluster1.get())); + EXPECT_TRUE(Mock::VerifyAndClearExpectations(cluster2.get())); +} + TEST_F(ClusterManagerImplTest, addOrUpdateClusterStaticExists) { const std::string json = fmt::sprintf("{%s}", clustersJson({defaultStaticClusterJson("some_cluster")})); diff --git a/test/mocks/upstream/mocks.h b/test/mocks/upstream/mocks.h index 8e4cfd71589db..e9a67886840f3 100644 --- a/test/mocks/upstream/mocks.h +++ b/test/mocks/upstream/mocks.h @@ -88,6 +88,7 @@ class MockPrioritySet : public PrioritySet { MOCK_CONST_METHOD1(addMemberUpdateCb, Common::CallbackHandle*(MemberUpdateCb callback)); MOCK_CONST_METHOD1(addPriorityUpdateCb, Common::CallbackHandle*(PriorityUpdateCb callback)); MOCK_CONST_METHOD0(hostSetsPerPriority, const std::vector&()); + MOCK_CONST_METHOD0(empty, bool()); MOCK_METHOD0(hostSetsPerPriority, std::vector&()); MOCK_METHOD6(updateHosts, void(uint32_t priority, UpdateHostsParams&& update_hosts_params, LocalityWeightsConstSharedPtr locality_weights, @@ -159,6 +160,7 @@ class MockCluster : public Cluster { MOCK_METHOD1(initialize, void(std::function callback)); MOCK_CONST_METHOD0(initializePhase, InitializePhase()); MOCK_CONST_METHOD0(sourceAddress, const Network::Address::InstanceConstSharedPtr&()); + MOCK_CONST_METHOD0(initializedByEmptyConfig, bool()); std::shared_ptr info_{new NiceMock()}; std::function initialize_callback_;