Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
28 commits
Select commit Hold shift + click to select a range
17ba151
Get rid of redundant Builder class on abstract CloudSolrClient
jdyer1 Oct 13, 2025
b5c6c99
Make CloudHttp2SolrClient generic; update references to remove compil…
jdyer1 Oct 13, 2025
f55e6e4
remove generics; test with jdk client
jdyer1 Oct 13, 2025
4f31577
Enhance JDK client to track if it has issued a HEAD request by unique…
jdyer1 Oct 14, 2025
6923675
fix test randomization
jdyer1 Oct 14, 2025
dc9f389
make it final
jdyer1 Oct 14, 2025
d4bea59
use jdk client somtimes in retry test
jdyer1 Oct 14, 2025
1fe02cb
tidy
jdyer1 Oct 14, 2025
dd98e51
remove system.out.println
jdyer1 Oct 15, 2025
06b72c9
Require that sCNSP users pass-in CSC with Http2SolrCLient
jdyer1 Oct 15, 2025
f4e8d04
javadoc update
jdyer1 Oct 15, 2025
35831c5
tidy
jdyer1 Oct 15, 2025
0e50350
if not client or builder passed, decide between H2SC or HJSc based on…
jdyer1 Oct 15, 2025
474fe5e
tidy
jdyer1 Oct 15, 2025
9835ec0
make precommit happy
jdyer1 Oct 15, 2025
3577dfa
Merge branch 'main' into feature/SOLR-17771
jdyer1 Oct 15, 2025
398a9b6
fix two more classes that require http2solrclient delegate w/csc
jdyer1 Oct 15, 2025
d2fbe96
flub the metrics assert a little!
jdyer1 Oct 15, 2025
1bf8223
tidy
jdyer1 Oct 15, 2025
da66549
fix randomization bug - suggested by copilot
jdyer1 Oct 15, 2025
26ad6a9
really strip of query params - suggested by copilot
jdyer1 Oct 15, 2025
10de799
null the variable on teardown - suggested by copilot
jdyer1 Oct 15, 2025
042eae6
tidy
jdyer1 Oct 15, 2025
6ca39b4
Update solr/solrj/src/java/org/apache/solr/client/solrj/impl/HttpJdkS…
jdyer1 Oct 16, 2025
fc91fbe
- make doc comments genric (suggested by copilot)
jdyer1 Oct 16, 2025
d47d922
tidy
jdyer1 Oct 16, 2025
2c83145
Update solr/solrj/src/java/org/apache/solr/client/solrj/impl/CloudHtt…
jdyer1 Oct 17, 2025
7c5717f
fix awkward wording
jdyer1 Oct 17, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion gradle/libs.versions.toml
Original file line number Diff line number Diff line change
Expand Up @@ -487,9 +487,9 @@ opentelemetry-exporter-sender-okhttp = { module = "io.opentelemetry:opentelemetr
opentelemetry-runtime-telemetry = { module = "io.opentelemetry.instrumentation:opentelemetry-runtime-telemetry-java17", version.ref = "opentelemetry-runtime-telemetry" }
opentelemetry-sdk = { module = "io.opentelemetry:opentelemetry-sdk", version.ref = "opentelemetry" }
opentelemetry-sdk-extension-autoconfigure = { module = "io.opentelemetry:opentelemetry-sdk-extension-autoconfigure", version.ref = "opentelemetry" }
opentelemetry-sdk-metrics = { module = "io.opentelemetry:opentelemetry-sdk-metrics", version.ref = "opentelemetry" }
opentelemetry-sdk-testing = { module = "io.opentelemetry:opentelemetry-sdk-testing", version.ref = "opentelemetry" }
opentelemetry-sdk-trace = { module = "io.opentelemetry:opentelemetry-sdk-trace", version.ref = "opentelemetry" }
opentelemetry-sdk-metrics = { module = "io.opentelemetry:opentelemetry-sdk-metrics", version.ref = "opentelemetry" }
osgi-annotation = { module = "org.osgi:osgi.annotation", version.ref = "osgi-annotation" }
oshai-logging = { module = "io.github.oshai:kotlin-logging", version.ref = "oshai-logging" }
# @keep transitive dependency for version alignment
Expand Down
4 changes: 3 additions & 1 deletion solr/core/src/java/org/apache/solr/cli/RunExampleTool.java
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,7 @@
import org.apache.commons.exec.environment.EnvironmentUtils;
import org.apache.commons.io.file.PathUtils;
import org.apache.solr.client.solrj.SolrClient;
import org.apache.solr.client.solrj.impl.CloudHttp2SolrClient;
import org.apache.solr.client.solrj.impl.CloudSolrClient;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.util.EnvUtils;
Expand Down Expand Up @@ -629,7 +630,8 @@ protected void runCloudExample(CommandLine cli) throws Exception {
/** wait until the number of live nodes == numNodes. */
protected void waitToSeeLiveNodes(String zkHost, int numNodes) {
try (CloudSolrClient cloudClient =
new CloudSolrClient.Builder(Collections.singletonList(zkHost), Optional.empty()).build()) {
new CloudHttp2SolrClient.Builder(Collections.singletonList(zkHost), Optional.empty())
.build()) {
cloudClient.connect();
Set<String> liveNodes = cloudClient.getClusterState().getLiveNodes();
int numLiveNodes = (liveNodes != null) ? liveNodes.size() : 0;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,7 @@
import org.apache.kafka.common.serialization.StringDeserializer;
import org.apache.solr.client.solrj.SolrRequest;
import org.apache.solr.client.solrj.SolrResponse;
import org.apache.solr.client.solrj.impl.CloudHttp2SolrClient;
import org.apache.solr.client.solrj.impl.CloudSolrClient;
import org.apache.solr.client.solrj.request.UpdateRequest;
import org.apache.solr.common.SolrInputDocument;
Expand Down Expand Up @@ -121,7 +122,7 @@ public void close() {

protected CloudSolrClient createSolrClient() {
log.debug("Creating new SolrClient...");
return new CloudSolrClient.Builder(
return new CloudHttp2SolrClient.Builder(
Collections.singletonList(zkConnectString), Optional.empty())
.build();
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@
import org.apache.solr.client.solrj.SolrQuery.ORDER;
import org.apache.solr.client.solrj.SolrRequest;
import org.apache.solr.client.solrj.beans.Field;
import org.apache.solr.client.solrj.impl.CloudSolrClient;
import org.apache.solr.client.solrj.impl.CloudHttp2SolrClient;
import org.apache.solr.client.solrj.impl.HttpSolrClient;
import org.apache.solr.client.solrj.request.CollectionAdminRequest;
import org.apache.solr.client.solrj.response.CollectionAdminResponse;
Expand Down Expand Up @@ -254,7 +254,7 @@ private SolrClient getBaseURLCloudSolrClient() {
final List<String> solrUrls = new ArrayList<>();
solrUrls.add("http://solr1:8983/solr");
solrUrls.add("http://solr2:8983/solr");
return new CloudSolrClient.Builder(solrUrls).build();
return new CloudHttp2SolrClient.Builder(solrUrls).build();
// end::solrj-cloudsolrclient-baseurl[]
}

Expand All @@ -264,7 +264,7 @@ private SolrClient getZookeeperNoRootCloudSolrClient() {
zkServers.add("zookeeper1:2181");
zkServers.add("zookeeper2:2181");
zkServers.add("zookeeper3:2181");
return new CloudSolrClient.Builder(zkServers, Optional.empty()).build();
return new CloudHttp2SolrClient.Builder(zkServers, Optional.empty()).build();
// end::solrj-cloudsolrclient-zookeepernoroot[]
}

Expand All @@ -274,7 +274,7 @@ private SolrClient getZookeeperRootCloudSolrClient() {
zkServers.add("zookeeper1:2181");
zkServers.add("zookeeper2:2181");
zkServers.add("zookeeper3:2181");
return new CloudSolrClient.Builder(zkServers, Optional.of("/solr")).build();
return new CloudHttp2SolrClient.Builder(zkServers, Optional.of("/solr")).build();
// end::solrj-cloudsolrclient-zookeeperroot[]
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -180,8 +180,7 @@ private void getRemoteMetricsFromTags(

String baseUrl =
ctx.zkClientClusterStateProvider.getZkStateReader().getBaseUrlForNodeName(ctx.getNode());
SimpleSolrResponse rsp =
ctx.cloudSolrClient.getHttpClient().requestWithBaseUrl(baseUrl, req::process);
SimpleSolrResponse rsp = ctx.http2SolrClient().requestWithBaseUrl(baseUrl, req::process);

// TODO come up with a better solution to stream this response instead of loading in memory
try (InputStream prometheusStream = (InputStream) rsp.getResponse().get(STREAM_KEY)) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,10 @@ public class SolrClientNodeStateProvider implements NodeStateProvider, MapWriter
private Map<String, Map> nodeVsTags = new HashMap<>();

public SolrClientNodeStateProvider(CloudHttp2SolrClient solrClient) {
if (!(solrClient.getHttpClient() instanceof Http2SolrClient)) {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

why bother insisting on this?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

these call the very special "requestwithbaseUrl" method on the jetty client. I was unable to quickly create a common api for this with both clients so I decided for now we just need to enforce the jetty client. In practice, I would think these are only used internally so nothing is going to pass the jdk client here.

throw new IllegalArgumentException(
"The passed-in Cloud Solr Client must delegate to " + Http2SolrClient.class);
}
this.solrClient = solrClient;
try {
readReplicaDetails();
Expand Down Expand Up @@ -216,8 +220,7 @@ static void processMetricStream(

try (InputStream in =
(InputStream)
ctx.cloudSolrClient
.getHttpClient()
ctx.http2SolrClient()
.requestWithBaseUrl(baseUrl, req::process)
.getResponse()
.get(STREAM_KEY)) {
Expand Down Expand Up @@ -257,12 +260,20 @@ public boolean isNodeAlive(String node) {
}

public RemoteCallCtx(String node, CloudHttp2SolrClient cloudSolrClient) {
if (!(cloudSolrClient.getHttpClient() instanceof Http2SolrClient)) {
throw new IllegalArgumentException(
"The passed-in Cloud Solr Client must delegate to " + Http2SolrClient.class);
}
this.node = node;
this.cloudSolrClient = cloudSolrClient;
this.zkClientClusterStateProvider =
(ZkClientClusterStateProvider) cloudSolrClient.getClusterStateProvider();
}

protected Http2SolrClient http2SolrClient() {
return (Http2SolrClient) cloudSolrClient.getHttpClient();
}

/**
* Will attempt to call {@link #invoke(String, String, SolrParams)} up to five times, retrying
* on any IO Exceptions
Expand Down Expand Up @@ -311,7 +322,8 @@ public SimpleSolrResponse invoke(String solrNode, String path, SolrParams params
request.setResponseParser(new JavaBinResponseParser());

try {
return cloudSolrClient.getHttpClient().requestWithBaseUrl(url, request::process);
return ((Http2SolrClient) cloudSolrClient.getHttpClient())
.requestWithBaseUrl(url, request::process);
} catch (SolrServerException | IOException e) {
throw new SolrException(ErrorCode.SERVER_ERROR, "Fetching replica metrics failed", e);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
package org.apache.solr.client.solrj.impl;

import java.io.IOException;
import java.lang.invoke.MethodHandles;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
Expand All @@ -28,29 +29,45 @@
import org.apache.solr.client.solrj.request.RequestWriter;
import org.apache.solr.client.solrj.request.UpdateRequest;
import org.apache.solr.common.SolrException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

/**
* SolrJ client class to communicate with SolrCloud using Http2SolrClient. Instances of this class
* communicate with Zookeeper to discover Solr endpoints for SolrCloud collections, and then use the
* {@link LBHttp2SolrClient} to issue requests.
* SolrJ client class to communicate with SolrCloud using an Http/2-capable Solr Client. Instances
* of this class communicate with Zookeeper to discover Solr endpoints for SolrCloud collections,
* and then use the {@link LBHttp2SolrClient} to issue requests.
*
* @since solr 8.0
*/
@SuppressWarnings("serial")
public class CloudHttp2SolrClient extends CloudSolrClient {
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());

private final ClusterStateProvider stateProvider;
private final LBHttp2SolrClient<Http2SolrClient> lbClient;
private final Http2SolrClient myClient;
private final LBHttp2SolrClient<HttpSolrClientBase> lbClient;
private final HttpSolrClientBase myClient;
private final boolean clientIsInternal;

private static final boolean JETTY_CLIENT_AVAILABLE;

static {
boolean jettyClientAvailable = true;
try {
Class.forName("org.eclipse.jetty.client.HttpClient");
} catch (ClassNotFoundException e) {
jettyClientAvailable = false;
}
JETTY_CLIENT_AVAILABLE = jettyClientAvailable;
}

/**
* Create a new client object that connects to Zookeeper and is always aware of the SolrCloud
* state. If there is a fully redundant Zookeeper quorum and SolrCloud has enough replicas for
* every shard in a collection, there is no single point of failure. Updates will be sent to shard
* leaders by default.
*
* @param builder a {@link Http2SolrClient.Builder} with the options used to create the client.
* @param builder a {@link CloudHttp2SolrClient.Builder} with the options used to create the
* client.
*/
protected CloudHttp2SolrClient(Builder builder) {
super(builder.shardLeadersOnly, builder.parallelUpdates, builder.directUpdatesToLeadersOnly);
Expand All @@ -73,16 +90,20 @@ protected CloudHttp2SolrClient(Builder builder) {
// locks.
this.locks = objectList(builder.parallelCacheRefreshesLocks);

this.lbClient = new LBHttp2SolrClient.Builder<Http2SolrClient>(myClient).build();
this.lbClient = new LBHttp2SolrClient.Builder<>(myClient).build();
}

private Http2SolrClient createOrGetHttpClientFromBuilder(Builder builder) {
private HttpSolrClientBase createOrGetHttpClientFromBuilder(Builder builder) {
if (builder.httpClient != null) {
return builder.httpClient;
} else if (builder.internalClientBuilder != null) {
return builder.internalClientBuilder.build();
} else {
} else if (JETTY_CLIENT_AVAILABLE) {
log.debug("Using {} as the delegate http client", Http2SolrClient.class);
return new Http2SolrClient.Builder().build();
} else {
log.debug("Using {} as the delegate http client", HttpJdkSolrClient.class);
return new HttpJdkSolrClient.Builder().build();
}
}

Expand Down Expand Up @@ -113,7 +134,7 @@ private ClusterStateProvider createZkClusterStateProvider(Builder builder) {
}

private ClusterStateProvider createHttp2ClusterStateProvider(
List<String> solrUrls, Http2SolrClient httpClient) {
List<String> solrUrls, HttpSolrClientBase httpClient) {
try {
return new Http2ClusterStateProvider<>(solrUrls, httpClient);
} catch (Exception e) {
Expand Down Expand Up @@ -148,7 +169,7 @@ public void close() throws IOException {
}

@Override
public LBHttp2SolrClient<Http2SolrClient> getLbClient() {
public LBHttp2SolrClient<?> getLbClient() {
return lbClient;
}

Expand All @@ -157,7 +178,7 @@ public ClusterStateProvider getClusterStateProvider() {
return stateProvider;
}

public Http2SolrClient getHttpClient() {
public HttpSolrClientBase getHttpClient() {
return myClient;
}

Expand All @@ -171,12 +192,12 @@ public static class Builder {
protected Collection<String> zkHosts = new ArrayList<>();
protected List<String> solrUrls = new ArrayList<>();
protected String zkChroot;
protected Http2SolrClient httpClient;
protected HttpSolrClientBase httpClient;
protected boolean shardLeadersOnly = true;
protected boolean directUpdatesToLeadersOnly = false;
protected boolean parallelUpdates = true;
protected ClusterStateProvider stateProvider;
protected Http2SolrClient.Builder internalClientBuilder;
protected HttpSolrClientBuilderBase<?, ?> internalClientBuilder;
private RequestWriter requestWriter;
private ResponseParser responseParser;
private long retryExpiryTimeNano =
Expand Down Expand Up @@ -368,13 +389,13 @@ public Builder withCollectionCacheTtl(long timeToLive, TimeUnit unit) {
}

/**
* Set the internal {@link Http2SolrClient}.
* Set the internal Solr HTTP client.
*
* <p>Note: closing the client instance is the responsibility of the caller.
*
* @return this
*/
public Builder withHttpClient(Http2SolrClient httpSolrClient) {
public Builder withHttpClient(HttpSolrClientBase httpSolrClient) {
if (this.internalClientBuilder != null) {
throw new IllegalStateException(
"The builder can't accept an httpClient AND an internalClientBuilder, only one of those can be provided");
Expand All @@ -384,14 +405,14 @@ public Builder withHttpClient(Http2SolrClient httpSolrClient) {
}

/**
* If provided, the CloudHttp2SolrClient will build it's internal Http2SolrClient using this
* builder (instead of the empty default one). Providing this builder allows users to configure
* the internal clients (authentication, timeouts, etc.).
* If provided, the CloudHttp2SolrClient will build it's internal client using this builder
* (instead of the empty default one). Providing this builder allows users to configure the
* internal clients (authentication, timeouts, etc.).
*
* @param internalClientBuilder the builder to use for creating the internal http client.
* @return this
*/
public Builder withHttpClientBuilder(Http2SolrClient.Builder internalClientBuilder) {
public Builder withHttpClientBuilder(HttpSolrClientBuilderBase<?, ?> internalClientBuilder) {
if (this.httpClient != null) {
throw new IllegalStateException(
"The builder can't accept an httpClient AND an internalClientBuilder, only one of those can be provided");
Expand All @@ -401,7 +422,8 @@ public Builder withHttpClientBuilder(Http2SolrClient.Builder internalClientBuild
}

@Deprecated(since = "9.10")
public Builder withInternalClientBuilder(Http2SolrClient.Builder internalClientBuilder) {
public Builder withInternalClientBuilder(
HttpSolrClientBuilderBase<?, ?> internalClientBuilder) {
return withHttpClientBuilder(internalClientBuilder);
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,6 @@
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Random;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
Expand Down Expand Up @@ -120,68 +119,6 @@ public abstract class CloudSolrClient extends SolrClient {

protected volatile Object[] locks = objectList(3);

/** Constructs {@link CloudSolrClient} instances from provided configuration. */
public static class Builder extends CloudHttp2SolrClient.Builder {

/**
* Provide a series of Solr URLs to be used when configuring {@link CloudSolrClient} instances.
* The solr client will use these urls to understand the cluster topology, which solr nodes are
* active etc.
*
* <p>Provided Solr URLs are expected to point to the root Solr path
* ("http://hostname:8983/solr"); they should not include any collections, cores, or other path
* components.
*
* <p>Usage example:
*
* <pre>
* final List&lt;String&gt; solrBaseUrls = new ArrayList&lt;String&gt;();
* solrBaseUrls.add("http://solr1:8983/solr"); solrBaseUrls.add("http://solr2:8983/solr"); solrBaseUrls.add("http://solr3:8983/solr");
* final SolrClient client = new CloudSolrClient.Builder(solrBaseUrls).build();
* </pre>
*/
public Builder(List<String> solrUrls) {
super(solrUrls);
}

/**
* Provide a series of ZK hosts which will be used when configuring {@link CloudSolrClient}
* instances. This requires a dependency on {@code solr-solrj-zookeeper} which transitively
* depends on more JARs. The ZooKeeper based connection is the most reliable and performant
* means for CloudSolrClient to work. On the other hand, it means exposing ZooKeeper more
* broadly than to Solr nodes, which is a security risk.
*
* <p>Usage example when Solr stores data at the ZooKeeper root ('/'):
*
* <pre>
* final List&lt;String&gt; zkServers = new ArrayList&lt;String&gt;();
* zkServers.add("zookeeper1:2181"); zkServers.add("zookeeper2:2181"); zkServers.add("zookeeper3:2181");
* final SolrClient client = new CloudSolrClient.Builder(zkServers, Optional.empty()).build();
* </pre>
*
* Usage example when Solr data is stored in a ZooKeeper chroot:
*
* <pre>
* final List&lt;String&gt; zkServers = new ArrayList&lt;String&gt;();
* zkServers.add("zookeeper1:2181"); zkServers.add("zookeeper2:2181"); zkServers.add("zookeeper3:2181");
* final SolrClient client = new CloudSolrClient.Builder(zkServers, Optional.of("/solr")).build();
* </pre>
*
* @param zkHosts a List of at least one ZooKeeper host and port (e.g. "zookeeper1:2181")
* @param zkChroot the path to the root ZooKeeper node containing Solr data. Provide {@code
* java.util.Optional.empty()} if no ZK chroot is used.
*/
@Deprecated(since = "10.0")
public Builder(List<String> zkHosts, Optional<String> zkChroot) {
super(zkHosts, zkChroot);
}

/** for an expert use-case */
public Builder(ClusterStateProvider stateProvider) {
super(stateProvider);
}
}

static class StateCache extends ConcurrentHashMap<String, ExpiringCachedDocCollection> {
final AtomicLong puts = new AtomicLong();
final AtomicLong hits = new AtomicLong();
Expand Down
Loading
Loading