diff --git a/.gitignore b/.gitignore
index 4ab57f89..32f9c2ea 100644
--- a/.gitignore
+++ b/.gitignore
@@ -3,3 +3,5 @@ target
*.diff
Fortify*
logging.properties
+.idea
+.oca
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 15f68fa4..692c88a1 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -2,6 +2,10 @@
All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](http://keepachangelog.com/).
+## [6.0.0] [Unreleased]
+
+### TODO
+
## [5.4.18] 2025-10-01
### Added
diff --git a/README.md b/README.md
index cff205cb..cb24ece5 100644
--- a/README.md
+++ b/README.md
@@ -37,7 +37,7 @@ project. The version changes with each release.
com.oracle.nosql.sdknosqldriver
- 5.4.18
+ 6.0.0
```
diff --git a/driver/pom.xml b/driver/pom.xml
index 5ca2a03d..7c92425e 100644
--- a/driver/pom.xml
+++ b/driver/pom.xml
@@ -29,7 +29,7 @@
com.oracle.nosql.sdknosqldriver
- 5.4.18
+ 6.0.0jar
@@ -39,8 +39,8 @@
UTF-8
- 1.8
- 1.8
+ 11
+ 11${maven.build.timestamp}d-MMMM-yyyyCopyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved.
@@ -274,8 +274,8 @@
3.11.0true
- 1.8
- 1.8
+ ${maven.compiler.source}
+ ${maven.compiler.target}truetrue-Xlint:all
diff --git a/driver/src/main/java/oracle/nosql/driver/AuthorizationProvider.java b/driver/src/main/java/oracle/nosql/driver/AuthorizationProvider.java
index a6c491af..d01730a1 100644
--- a/driver/src/main/java/oracle/nosql/driver/AuthorizationProvider.java
+++ b/driver/src/main/java/oracle/nosql/driver/AuthorizationProvider.java
@@ -12,6 +12,8 @@
import io.netty.handler.codec.http.HttpHeaders;
import oracle.nosql.driver.ops.Request;
+import java.util.concurrent.CompletableFuture;
+
/**
* A callback interface used by the driver to obtain an authorization string
* for a request. {@link NoSQLHandle} calls this interface when and
@@ -34,6 +36,21 @@ public interface AuthorizationProvider {
*/
public String getAuthorizationString(Request request);
+ /**
+ * Returns an authorization string for specified request. This is sent to
+ * the server in the request for authorization. Authorization information
+ * can be request-dependent.
+ *
+ * @param request the request being processed
+ *
+ * @return a CompletableFuture of a string indicating that the application
+ * is authorized to perform the request
+ */
+ public default CompletableFuture
+ getAuthorizationStringAsync(Request request) {
+ return CompletableFuture.completedFuture(null);
+ }
+
/**
* Release resources provider is using.
*/
@@ -75,6 +92,27 @@ public default void setRequiredHeaders(String authString,
}
}
+ /**
+ * Set HTTP headers required by the provider asynchronously.
+ *
+ * @param authString the authorization string for the request
+ *
+ * @param request the request being processed
+ *
+ * @param headers the HTTP headers
+ *
+ * @param content the request content bytes
+ */
+ default CompletableFuture setRequiredHeadersAsync(String authString,
+ Request request,
+ HttpHeaders headers,
+ byte[] content) {
+ if (authString != null) {
+ headers.set(AUTHORIZATION, authString);
+ }
+ return CompletableFuture.completedFuture(null);
+ }
+
/**
* Invalidate any cached authorization strings.
*/
diff --git a/driver/src/main/java/oracle/nosql/driver/DefaultRetryHandler.java b/driver/src/main/java/oracle/nosql/driver/DefaultRetryHandler.java
index b7e4be88..684cc681 100644
--- a/driver/src/main/java/oracle/nosql/driver/DefaultRetryHandler.java
+++ b/driver/src/main/java/oracle/nosql/driver/DefaultRetryHandler.java
@@ -85,6 +85,13 @@ public void delay(Request request,
request.addRetryDelayMs(delayMs);
}
+ @Override
+ public int delayTime(Request request,
+ int numRetries,
+ RetryableException re) {
+ return Math.max(0, computeBackoffDelay(request, fixedDelayMs));
+ }
+
/**
* Compute an incremental backoff delay in milliseconds.
* This method also checks the request's timeout and ensures the
diff --git a/driver/src/main/java/oracle/nosql/driver/NoSQLHandleAsync.java b/driver/src/main/java/oracle/nosql/driver/NoSQLHandleAsync.java
new file mode 100644
index 00000000..8a27bc6b
--- /dev/null
+++ b/driver/src/main/java/oracle/nosql/driver/NoSQLHandleAsync.java
@@ -0,0 +1,1105 @@
+/*-
+ * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved.
+ *
+ * Licensed under the Universal Permissive License v 1.0 as shown at
+ * https://oss.oracle.com/licenses/upl/
+ */
+
+package oracle.nosql.driver;
+
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.CompletionException;
+import java.util.concurrent.Flow;
+
+import oracle.nosql.driver.ops.AddReplicaRequest;
+import oracle.nosql.driver.ops.DeleteRequest;
+import oracle.nosql.driver.ops.DeleteResult;
+import oracle.nosql.driver.ops.DropReplicaRequest;
+import oracle.nosql.driver.ops.GetIndexesRequest;
+import oracle.nosql.driver.ops.GetIndexesResult;
+import oracle.nosql.driver.ops.GetRequest;
+import oracle.nosql.driver.ops.GetResult;
+import oracle.nosql.driver.ops.GetTableRequest;
+import oracle.nosql.driver.ops.ListTablesRequest;
+import oracle.nosql.driver.ops.ListTablesResult;
+import oracle.nosql.driver.ops.MultiDeleteRequest;
+import oracle.nosql.driver.ops.MultiDeleteResult;
+import oracle.nosql.driver.ops.PrepareRequest;
+import oracle.nosql.driver.ops.PrepareResult;
+import oracle.nosql.driver.ops.PutRequest;
+import oracle.nosql.driver.ops.PutResult;
+import oracle.nosql.driver.ops.QueryRequest;
+import oracle.nosql.driver.ops.QueryResult;
+import oracle.nosql.driver.ops.ReplicaStatsRequest;
+import oracle.nosql.driver.ops.ReplicaStatsResult;
+import oracle.nosql.driver.ops.Request;
+import oracle.nosql.driver.ops.Result;
+import oracle.nosql.driver.ops.SystemRequest;
+import oracle.nosql.driver.ops.SystemResult;
+import oracle.nosql.driver.ops.SystemStatusRequest;
+import oracle.nosql.driver.ops.TableRequest;
+import oracle.nosql.driver.ops.TableResult;
+import oracle.nosql.driver.ops.TableUsageRequest;
+import oracle.nosql.driver.ops.TableUsageResult;
+import oracle.nosql.driver.ops.WriteMultipleRequest;
+import oracle.nosql.driver.ops.WriteMultipleResult;
+import oracle.nosql.driver.values.MapValue;
+
+/**
+ * NoSQLHandleAsync is an asynchronous handle that can be used to access Oracle
+ * NoSQL tables. To create a connection represented by NoSQLHandleAsync,
+ * request an instance using {@link NoSQLHandleFactory#createNoSQLHandleAsync}
+ * and {@link NoSQLHandleConfig}, which allows an application to specify
+ * default values and other configuration information to be used by the handle.
+ *
+ * The same interface is available to both users of the Oracle NoSQL Database
+ * Cloud Service and the on-premises Oracle NoSQL Database; however, some
+ * methods and/or parameters are specific to each environment. The
+ * documentation has notes about whether a class, method, or parameter is
+ * environment-specific. Unless otherwise noted they are applicable to both
+ * environments.
+ *
+ * A handle has memory and network resources associated with it. Consequently,
+ * the {@link NoSQLHandleAsync#close} method must be invoked to free up the
+ * resources when the application is done using the handle.
+ *
+ * To minimize network activity as well as resource allocation and
+ * deallocation overheads, it's best to avoid repeated creation and closing of
+ * handles. For example, creating and closing a handle around each operation,
+ * would incur large resource allocation overheads resulting in poor
+ * application performance.
+ *
+ *
+ * A handle permits concurrent operations, so a single handle is sufficient to
+ * access tables in a multi-threaded application. The creation of multiple
+ * handles incurs additional resource overheads without providing any
+ * performance benefit.
+ *
+ *
+ * With the exception of {@link #close} the operations on this interface follow
+ * a similar pattern. They accept a {@link Request} object containing
+ * parameters, both required and optional. They return a {@link CompletableFuture}
+ * which returns a {@link Result} object containing results. Operation failures
+ * throw exceptions. Unique subclasses of {@link Request} and {@link Result}
+ * exist for most operations, containing information specific to the operation.
+ * All of these operations result in remote calls across a network.
+ *
+ *
+ * All {@link Request} instances support specification of parameters for the
+ * operation as well as the ability to override default parameters which may
+ * have been specified in {@link NoSQLHandleConfig}, such as request timeouts,
+ * {@link Consistency}, etc.
+ *
+ *
+ * {@link Request} objects
+ * are not copied and must not be modified by the application while a method
+ * on this interface is using them.
+ *
+ *
Error and Exception Handling
+ *
+ * On success all methods in this interface return {@link CompletableFuture}
+ * which completes with {@link Result} objects.
+ * On Error, return {@link CompletableFuture} completes with
+ * {@link java.util.concurrent.CompletionException} that wraps the original
+ * exception as its cause.
+ * Some Java exceptions, such as {@link IllegalArgumentException} and
+ * {@link NullPointerException} are thrown directly. All other exceptions are
+ * instances of {@link NoSQLException}, which serves as a base class for NoSQL
+ * Database exceptions.
+ *
+ *
+ * {@link NoSQLException} instances are split into two broad categories:
+ *
+ *
Exceptions that may be retried with the expectation that they
+ * may succeed on retry. These are instances of {@link RetryableException}
+ *
Exceptions that may not be retried and if retried, will fail again
+ *
+ *
+ * Exceptions that may be retried return true for
+ * {@link NoSQLException#okToRetry} while those that may not will return false.
+ * Examples of retryable exceptions are those which indicate resource
+ * consumption violations such as {@link ThrottlingException}.
+ * Examples of exceptions that should not be
+ * retried are {@link IllegalArgumentException},
+ * {@link TableNotFoundException}, and any other exception indicating a
+ * syntactic or semantic error.
+ *
+ *
+ * Instances of NoSQLHandleAsync are thread-safe and expected to be shared among
+ * threads.
+ *
+ * The async APIs are non-blocking in that they return without waiting for any
+ * events such as network read and write, or security handshake. The actual
+ * handling of such events happens inside an internal thread pool which has a
+ * fixed number of threads. These async methods return widely accepted
+ * asynchronous flow-control or computation classes, namely, the
+ * {@link CompletableFuture} and {@link Flow.Publisher}, from which
+ * user-supplied actions are triggered after the execution results are
+ * available. We implement these interfaces in a way such that user-supplied
+ * actions will be performed by a thread in the internal thread pool.
+ * Therefore, these actions must be non-blocking to avoid interfering with
+ * internal event processing.
+ * This requirement corresponds to those defined in the async classes (see the
+ * policies for implementing {@code CompletionStage} in
+ * {@link CompletableFuture}). If the triggered method needs to perform a
+ * blocking action or heavy CPU bound task, use a separate executor to perform
+ * the action. For example:
+ *
+ * @since 6.0.0
+ */
+public interface NoSQLHandleAsync extends AutoCloseable {
+
+ /**
+ * Deletes a row from a table asynchronously. The row is identified using a
+ * primary key value supplied in {@link DeleteRequest#setKey}
+ *
+ * By default, a delete operation is unconditional and will succeed if the
+ * specified row exists. Delete operations can be made conditional based
+ * on whether the {@link Version} of an existing row matches that supplied
+ * by {@link DeleteRequest#setMatchVersion}.
+ *
+ * It is also possible to return information about the existing
+ * row. The row, including its {@link Version} and modification time
+ * can be optionally returned.
+ * The existing row information will only be returned if
+ * {@link DeleteRequest#setReturnRow} is true and one of the following
+ * occurs:
+ *
+ *
The {@link DeleteRequest#setMatchVersion} is used and the operation
+ * fails because the row exists and its version does not match.
+ *
+ *
The {@link DeleteRequest#setMatchVersion} is not used and the
+ * operation succeeds provided that the server supports providing the
+ * existing row.
+ *
+ *
+ * Use of {@link DeleteRequest#setReturnRow} may result in additional
+ * consumed read capacity.
+ *
+ * @param request the input parameters for the operation
+ *
+ * @return A {@link CompletableFuture} which completes with the result of
+ * the operation.
+ *
+ * The returned {@link CompletableFuture} may complete exceptionally with
+ * the following exceptions. The underlying exception is wrapped in a
+ * {@link CompletionException}; use {@link Throwable#getCause()} to
+ * retrieve it.
+ *
+ *
+ * {@link IllegalArgumentException} if any of the parameters are invalid or
+ * required parameters are missing.
+ *
+ *
+ * {@link NoSQLException} if the operation cannot be performed for
+ * any other reason.
+ *
+ *
+ *
+ * @since 6.0.0
+ * @see
+ * Thread model for asynchronous execution
+ */
+ CompletableFuture delete(DeleteRequest request);
+
+ /**
+ * Gets the row associated with a primary key asynchronously. On success the
+ * value of the row is available using the {@link GetResult#getValue}
+ * operation. If there are no matching rows that method will return null.
+ *
+ * The default {@link Consistency} used for the operation is
+ * {@link Consistency#EVENTUAL} unless an explicit value is has been set
+ * using {@link NoSQLHandleConfig#setConsistency} or
+ * {@link GetRequest#setConsistency}. Use of {@link Consistency#ABSOLUTE}
+ * may affect latency of the operation and may result in additional cost
+ * for the operation.
+ *
+ * @param request the input parameters for the operation
+ *
+ * @return A {@link CompletableFuture} which completes with the result of
+ * the operation.
+ *
+ * The returned {@link CompletableFuture} may complete exceptionally with
+ * the following exceptions. The underlying exception is wrapped in a
+ * {@link CompletionException}; use {@link Throwable#getCause()} to
+ * retrieve it.
+ *
+ *
+ * {@link IllegalArgumentException} if any of the parameters are invalid or
+ * required parameters are missing.
+ *
+ *
+ * {@link NoSQLException} if the operation cannot be performed for
+ * any other reason.
+ *
+ *
+ *
+ * @since 6.0.0
+ * @see
+ * Thread model for asynchronous execution
+ */
+ CompletableFuture get(GetRequest request);
+
+ /**
+ * Puts a row into a table asynchronously. This method creates a new row or
+ * overwrites an existing row entirely. The value used for the put is in
+ * the {@link PutRequest} object and must contain a complete primary key and
+ * all required fields.
+ *
+ * It is not possible to put part of a row. Any fields that are not
+ * provided will be defaulted, overwriting any existing value. Fields that
+ * are not nullable or defaulted must be provided or an exception will be
+ * thrown.
+ *
+ * By default a put operation is unconditional, but put operations can be
+ * conditional based on existence, or not, of a previous value as well as
+ * conditional on the {@link Version} of the existing value.
+ *
+ *
Use {@link PutRequest.Option#IfAbsent} to do a put only if there is
+ * no existing row that matches the primary key
+ *
Use {@link PutRequest.Option#IfPresent} to do a put only if there
+ * is an existing row that matches the primary key
+ *
Use {@link PutRequest.Option#IfVersion} to do a put only if there is
+ * an existing row that matches the primary key and its
+ * {@link Version} matches that provided
+ *
+ *
+ * It is also possible to return information about the existing
+ * row. The existing row, including its {@link Version} and modification
+ * time can be optionally returned.
+ * The existing row information will only be returned if
+ * {@link PutRequest#setReturnRow} is true and one of the following occurs:
+ *
+ *
The {@link PutRequest.Option#IfAbsent} is used and the operation
+ * fails because the row already exists.
+ *
The {@link PutRequest.Option#IfVersion} is used and the operation
+ * fails because the row exists and its version does not match.
+ *
The {@link PutRequest.Option#IfPresent} is used and the operation
+ * succeeds provided that the server supports providing the existing row.
+ *
The {@link PutRequest.Option} is not used and put operation replaces
+ * the existing row provided that the server supports providing the existing
+ * row.
+ *
+ * Use of {@link PutRequest#setReturnRow} may result in additional consumed
+ * read capacity.
+ *
+ * @param request the input parameters for the operation
+ *
+ * @return A {@link CompletableFuture} which completes with the result of
+ * the operation.
+ *
+ * The returned {@link CompletableFuture} may complete exceptionally with
+ * the following exceptions. The underlying exception is wrapped in a
+ * {@link CompletionException}; use {@link Throwable#getCause()} to
+ * retrieve it.
+ *
+ *
+ * {@link IllegalArgumentException} if any of the parameters are invalid or
+ * required parameters are missing.
+ *
+ *
+ * {@link NoSQLException} if the operation cannot be performed for
+ * any other reason.
+ *
+ *
+ * @since 6.0.0
+ * @see
+ * Thread model for asynchronous execution
+ */
+ CompletableFuture put(PutRequest request);
+
+ /**
+ * Executes a sequence of operations associated with a table that share the
+ * same shard key portion of their primary keys, all the specified
+ * operations are executed within the scope of a single transaction.
+ * {@link WriteMultipleRequest}.
+ *
+ * There are some size-based limitations on this operation:
+ *
+ *
The max number of individual operations (put, delete) in a single
+ * WriteMultiple request is 50.
+ *
The total request size is limited to 25MB.
+ *
+ *
+ * @param request the input parameters for the operation
+ *
+ * @return A {@link CompletableFuture} which completes with the result of
+ * the operation.
+ *
+ * The returned {@link CompletableFuture} may complete exceptionally with
+ * the following exceptions. The underlying exception is wrapped in a
+ * {@link CompletionException}; use {@link Throwable#getCause()} to
+ * retrieve it.
+ *
+ *
+ * {@link IllegalArgumentException} if any of the parameters are invalid or
+ * required parameters are missing.
+ *
+ *
+ * {@link RowSizeLimitException} if the size of the request exceeds
+ * the maximum limit.
+ *
+ *
+ * {@link BatchOperationNumberLimitException} if the number of operations in
+ * the request exceeds the maximum limit.
+ *
+ *
+ * {@link NoSQLException} if the operation cannot be performed for
+ * any other reason.
+ *
+ *
+ *
+ * @since 6.0.0
+ * @see
+ * Thread model for asynchronous execution
+ */
+ CompletableFuture writeMultiple(WriteMultipleRequest request);
+
+ /**
+ * Deletes multiple rows from a table in an atomic operation asynchronously.
+ * The key used may be partial but must contain all of the fields that are
+ * in the shard key. A range may be specified to delete a range of keys.
+ *
+ * @param request the input parameters for the operation
+ *
+ * @return A {@link CompletableFuture} which completes with the result of
+ * the operation.
+ *
+ * The returned {@link CompletableFuture} may complete exceptionally with
+ * the following exceptions. The underlying exception is wrapped in a
+ * {@link CompletionException}; use {@link Throwable#getCause()} to
+ * retrieve it.
+ *
+ *
+ * {@link IllegalArgumentException} if any of the parameters are invalid or
+ * required parameters are missing.
+ *
+ *
+ * {@link NoSQLException} if the operation cannot be performed for
+ * any other reason.
+ *
+ *
+ *
+ * @since 6.0.0
+ * @see
+ * Thread model for asynchronous execution
+ */
+ CompletableFuture multiDelete(MultiDeleteRequest request);
+
+ /**
+ * Queries a table based on the query statement specified in the
+ * {@link QueryRequest} asynchronously.
+ *
+ * Queries that include a full shard key will execute much more efficiently
+ * than more distributed queries that must go to multiple shards.
+ *
+ * Table- and system-style queries such as "CREATE TABLE ..." or "DROP TABLE .."
+ * are not supported by this interface. Those operations must be performed
+ * using {@link #tableRequest} or {@link #systemRequest} as appropriate.
+ *
+ * The amount of data read by a single query request is limited by a system
+ * default and can be further limited using
+ * {@link QueryRequest#setMaxReadKB}. This limits the amount of data
+ * read and not the amount of data returned, which means
+ * that a query can return zero results but still have more data to read.
+ * This situation is detected by checking if the {@link QueryResult} has a
+ * continuation key, using {@link QueryResult#getContinuationKey}. For this
+ * reason queries should always operate in a loop, acquiring more results,
+ * until the continuation key is null, indicating that the query is done.
+ * Inside the loop the continuation key is applied to the
+ * {@link QueryRequest} using {@link QueryRequest#setContinuationKey}.
+ *
+ * Note: Since Query might use resources until they reach the end, it
+ * is necessary to close the QueryRequest or use the
+ * try-with-resources statement:
+ *
+ * @param request the input parameters for the operation
+ *
+ * @return A {@link CompletableFuture} which completes with the result of
+ * the operation.
+ *
+ * The returned {@link CompletableFuture} may complete exceptionally with
+ * the following exceptions. The underlying exception is wrapped in a
+ * {@link CompletionException}; use {@link Throwable#getCause()} to
+ * retrieve it.
+ *
+ *
+ * {@link IllegalArgumentException} if any of the parameters are invalid or
+ * required parameters are missing.
+ *
+ *
+ * {@link NoSQLException} if the operation cannot be performed for
+ * any other reason.
+ *
+ *
+ *
+ * @since 6.0.0
+ * @see
+ * Thread model for asynchronous execution
+ */
+ CompletableFuture query(QueryRequest request);
+
+ /**
+ * Queries a table based on the query statement specified in the
+ * {@link QueryRequest} while returning an {@link java.util.concurrent.Flow.Publisher}.
+ * The return {@link Flow.Publisher} can be subscribed only once.
+ *
+ * Queries that include a full shard key will execute much more efficiently
+ * than more distributed queries that must go to multiple shards.
+ *
+ * Remote calls, including preparation of a query statement, will not
+ * occur until the subscription happens.
+ *
+ * Table- and system-style queries such as "CREATE TABLE ..." or "DROP TABLE .."
+ * are not supported by this interface. Those operations must be performed using
+ * {@link #tableRequest} or {@link #systemRequest} as appropriate.
+ *
+ * Note: Publisher will close the {@link QueryRequest}
+ *
+ *
+ * @param request the input parameters for the operation
+ *
+ * @return The {@link Flow.Publisher} of {@link MapValue}
+ *
+ * @throws IllegalArgumentException if any of the parameters are invalid or
+ * required parameters are missing
+ *
+ * @throws NoSQLException if the operation cannot be performed for any other
+ * reason
+ * @since 6.0.0
+ * @see
+ * Thread model for asynchronous execution
+ */
+ Flow.Publisher queryPaginator(QueryRequest request);
+ /**
+ * Prepares a query for execution and reuse asynchronously. See
+ * {@link #query} for general information and restrictions. It is
+ * recommended that prepared queries are used when the same query will run
+ * multiple times as execution is much more efficient than starting with a
+ * query string every time. The query language and API support query
+ * variables to assist with re-use.
+ *
+ * @param request the input parameters for the operation
+ *
+ * @return A {@link CompletableFuture} which completes with the result of
+ * the operation.
+ *
+ * The returned {@link CompletableFuture} may complete exceptionally with
+ * the following exceptions. The underlying exception is wrapped in a
+ * {@link CompletionException}; use {@link Throwable#getCause()} to
+ * retrieve it.
+ *
+ *
+ * {@link IllegalArgumentException} if any of the parameters are invalid or
+ * required parameters are missing.
+ *
+ *
+ * {@link NoSQLException} if the operation cannot be performed for
+ * any other reason.
+ *
+ *
+ *
+ * @since 6.0.0
+ * @see
+ * Thread model for asynchronous execution
+ */
+ CompletableFuture prepare(PrepareRequest request);
+
+ /**
+ * Performs an operation on a table asynchronously. This method is used for
+ * creating and dropping tables and indexes as well as altering tables.
+ * Only one operation is allowed on a table at any one time.
+ *
+ * This operation is implicitly asynchronous. The caller must poll using
+ * methods on {@link TableResult} to determine when it has completed.
+ *
+ * @param request the input parameters for the operation
+ *
+ * @return A {@link CompletableFuture} which completes with the result of
+ * the operation.
+ *
+ * The returned {@link CompletableFuture} may complete exceptionally with
+ * the following exceptions. The underlying exception is wrapped in a
+ * {@link CompletionException}; use {@link Throwable#getCause()} to
+ * retrieve it.
+ *
+ *
+ * {@link IllegalArgumentException} if any of the parameters are invalid or
+ * required parameters are missing.
+ *
+ *
+ * {@link NoSQLException} if the operation cannot be performed for
+ * any other reason.
+ *
+ *
+ *
+ * @since 6.0.0
+ * @see
+ * Thread model for asynchronous execution
+ */
+ CompletableFuture tableRequest(TableRequest request);
+
+ /**
+ * A convenience method that performs a TableRequest and waits for
+ * completion of the operation. This is the same as calling
+ * {@link #tableRequest} then calling {@link TableResult#waitForCompletion}.
+ * If the operation fails an exception is thrown. All parameters are
+ * required.
+ *
+ * @param request the {@link TableRequest} to perform.
+ *
+ * @param timeoutMs the amount of time to wait for completion, in
+ * milliseconds.
+ *
+ * @param pollIntervalMs the polling interval for the wait operation.
+ *
+ * @return A {@link CompletableFuture} which completes with the result of
+ * the operation.
+ *
+ * The returned {@link CompletableFuture} may complete exceptionally with
+ * the following exceptions. The underlying exception is wrapped in a
+ * {@link CompletionException}; use {@link Throwable#getCause()} to
+ * retrieve it.
+ *
+ *
+ * {@link IllegalArgumentException} if any of the parameters are invalid or
+ * required parameters are missing.
+ *
+ *
+ * {@link RequestTimeoutException} if the operation times out.
+ *
+ *
+ * {@link NoSQLException} if the operation cannot be performed for
+ * any other reason.
+ *
+ *
+ *
+ * @since 6.0.0
+ * @see
+ * Thread model for asynchronous execution
+ */
+ CompletableFuture doTableRequest(TableRequest request,
+ int timeoutMs,
+ int pollIntervalMs);
+
+ /**
+ * On-premises only.
+ *
+ * Performs a system operation on the system asynchronously, such as
+ * administrative operations that don't affect a specific table. For
+ * table-specific operations use {@link #tableRequest} or
+ * {@link #doTableRequest}.
+ *
+ * Examples of statements in the {@link SystemRequest} passed to this
+ * method include:
+ *
+ *
CREATE NAMESPACE mynamespace
+ *
CREATE USER some_user IDENTIFIED BY password
+ *
CREATE ROLE some_role
+ *
GRANT ROLE some_role TO USER some_user
+ *
+ *
+ * This operation is implicitly asynchronous. The caller must poll using
+ * methods on {@link SystemResult} to determine when it has completed.
+ *
+ * @param request the input parameters for the operation
+ *
+ * @return A {@link CompletableFuture} which completes with the result of
+ * the operation.
+ *
+ * The returned {@link CompletableFuture} may complete exceptionally with
+ * the following exceptions. The underlying exception is wrapped in a
+ * {@link CompletionException}; use {@link Throwable#getCause()} to
+ * retrieve it.
+ *
+ *
+ * {@link IllegalArgumentException} if any of the parameters are invalid or
+ * required parameters are missing.
+ *
+ *
+ * {@link NoSQLException} if the operation cannot be performed for
+ * any other reason.
+ *
+ * Checks the status of an operation previously performed using
+ * {@link #systemRequest} asynchronously.
+ *
+ * @param request the input parameters for the operation
+ *
+ * @return A {@link CompletableFuture} which completes with the result of
+ * the operation.
+ *
+ * The returned {@link CompletableFuture} may complete exceptionally with
+ * the following exceptions. The underlying exception is wrapped in a
+ * {@link CompletionException}; use {@link Throwable#getCause()} to
+ * retrieve it.
+ *
+ *
+ * {@link IllegalArgumentException} if any of the parameters are invalid or
+ * required parameters are missing.
+ *
+ *
+ * {@link NoSQLException} if the operation cannot be performed for
+ * any other reason.
+ *
+ *
+ *
+ * @since 6.0.0
+ * @see
+ * Thread model for asynchronous execution
+ */
+ CompletableFuture systemStatus(SystemStatusRequest request);
+
+ /**
+ * Gets static information about the specified table asynchronously
+ * including its state, provisioned throughput and capacity and schema.
+ * Dynamic information such as usage is obtained using {@link #getTableUsage}.
+ * Throughput, capacity and usage information is only available when using
+ * the Cloud Service and will be null or not defined on-premises.
+ *
+ * @param request the input parameters for the operation
+ *
+ * @return A {@link CompletableFuture} which completes with the result of
+ * the operation.
+ *
+ * The returned {@link CompletableFuture} may complete exceptionally with
+ * the following exceptions. The underlying exception is wrapped in a
+ * {@link CompletionException}; use {@link Throwable#getCause()} to
+ * retrieve it.
+ *
+ *
+ * {@link IllegalArgumentException} if any of the parameters are invalid or
+ * required parameters are missing.
+ *
+ *
+ * {@link TableNotFoundException} if the specified table does not exist.
+ *
+ *
+ * {@link NoSQLException} if the operation cannot be performed for
+ * any other reason.
+ *
+ * Gets dynamic information about the specified table asynchronously such as
+ * the current throughput usage. Usage information is collected in time
+ * slices and returned in individual usage records. It is possible to
+ * specify a time-based range of usage records using input parameters.
+ *
+ * @param request the input parameters for the operation
+ *
+ * @return A {@link CompletableFuture} which completes with the result of
+ * the operation.
+ *
+ * The returned {@link CompletableFuture} may complete exceptionally with
+ * the following exceptions. The underlying exception is wrapped in a
+ * {@link CompletionException}; use {@link Throwable#getCause()} to
+ * retrieve it.
+ *
+ *
+ * {@link IllegalArgumentException} if any of the parameters are invalid or
+ * required parameters are missing.
+ *
+ *
+ * {@link TableNotFoundException} if the specified table does not exist.
+ *
+ *
+ * {@link NoSQLException} if the operation cannot be performed for
+ * any other reason.
+ *
+ *
+ *
+ * @since 6.0.0
+ * @see
+ * Thread model for asynchronous execution
+ */
+ CompletableFuture getTableUsage(TableUsageRequest request);
+
+ /**
+ * Lists tables asynchronously, returning table names. If further information
+ * about a specific table is desired the {@link #getTable} interface may be
+ * used. If a given identity has access to a large number of tables the
+ * list may be paged using input parameters.
+ *
+ * @param request the input parameters for the operation
+ *
+ * @return A {@link CompletableFuture} which completes with the result of
+ * the operation.
+ *
+ * The returned {@link CompletableFuture} may complete exceptionally with
+ * the following exceptions. The underlying exception is wrapped in a
+ * {@link CompletionException}; use {@link Throwable#getCause()} to
+ * retrieve it.
+ *
+ *
+ * {@link IllegalArgumentException} if any of the parameters are invalid or
+ * required parameters are missing.
+ *
+ *
+ * {@link NoSQLException} if the operation cannot be performed for
+ * any other reason.
+ *
+ *
+ *
+ * @since 6.0.0
+ * @see
+ * Thread model for asynchronous execution
+ */
+ CompletableFuture listTables(ListTablesRequest request);
+
+ /**
+ * Returns information about an index, or indexes on a table asynchronously.
+ * If no index name is specified in the {@link GetIndexesRequest}, then
+ * information on all indexes is returned.
+ *
+ * @param request the input parameters for the operation
+ *
+ * @return A {@link CompletableFuture} which completes with the result of
+ * the operation.
+ *
+ * The returned {@link CompletableFuture} may complete exceptionally with
+ * the following exceptions. The underlying exception is wrapped in a
+ * {@link CompletionException}; use {@link Throwable#getCause()} to
+ * retrieve it.
+ *
+ *
+ * {@link IllegalArgumentException} if any of the parameters are invalid or
+ * required parameters are missing.
+ *
+ *
+ * {@link NoSQLException} if the operation cannot be performed for
+ * any other reason.
+ *
+ * Returns the namespaces in a store as an array of String.
+ *
+ * @return A {@link CompletableFuture} which completes with the namespaces
+ * or null if none are found.
+ *
+ * The returned {@link CompletableFuture} may complete exceptionally with
+ * the following exceptions. The underlying exception is wrapped in a
+ * {@link CompletionException}; use {@link Throwable#getCause()} to
+ * retrieve it.
+ *
+ *
+ * {@link IllegalArgumentException} if any of the parameters are invalid or
+ * required parameters are missing.
+ *
+ *
+ * {@link NoSQLException} if the operation cannot be performed for
+ * any other reason.
+ *
+ * Returns the roles in a store as an array of String.
+ *
+ * @return A {@link CompletableFuture} which completes with the list of
+ * roles or null if none are found.
+ *
+ * The returned {@link CompletableFuture} may complete exceptionally with
+ * the following exceptions. The underlying exception is wrapped in a
+ * {@link CompletionException}; use {@link Throwable#getCause()} to
+ * retrieve it.
+ *
+ *
+ * {@link IllegalArgumentException} if any of the parameters are invalid or
+ * required parameters are missing.
+ *
+ *
+ * {@link NoSQLException} if the operation cannot be performed for
+ * any other reason.
+ *
+ * Returns the users in a store as an array of {@link UserInfo}.
+ *
+ * @return A {@link CompletableFuture} which completes with the users
+ * or null if none are found.
+ *
+ * The returned {@link CompletableFuture} may complete exceptionally with
+ * the following exceptions. The underlying exception is wrapped in a
+ * {@link CompletionException}; use {@link Throwable#getCause()} to
+ * retrieve it.
+ *
+ *
+ * {@link IllegalArgumentException} if any of the parameters are invalid or
+ * required parameters are missing.
+ *
+ *
+ * {@link NoSQLException} if the operation cannot be performed for
+ * any other reason.
+ *
+ * A convenience method that performs a SystemRequest and waits for
+ * completion of the operation. This is the same as calling {@link
+ * #systemRequest} then calling {@link SystemResult#waitForCompletion}. If
+ * the operation fails an exception is thrown. All parameters are required.
+ *
+ * System requests are those related to namespaces and security and are
+ * generally independent of specific tables. Examples of statements include:
+ *
+ *
CREATE NAMESPACE mynamespace
+ *
CREATE USER some_user IDENTIFIED BY password
+ *
CREATE ROLE some_role
+ *
GRANT ROLE some_role TO USER some_user
+ *
+ *
+ * @param statement the system statement for the operation.
+ *
+ * @param timeoutMs the amount of time to wait for completion, in
+ * milliseconds.
+ *
+ * @param pollIntervalMs the polling interval for the wait operation.
+ *
+ * @return A {@link CompletableFuture} which completes with the result of
+ * the operation.
+ *
+ * The returned {@link CompletableFuture} may complete exceptionally with
+ * the following exceptions. The underlying exception is wrapped in a
+ * {@link CompletionException}; use {@link Throwable#getCause()} to
+ * retrieve it.
+ *
+ *
+ * {@link IllegalArgumentException} if any of the parameters are invalid or
+ * required parameters are missing.
+ *
+ *
+ * {@link RequestTimeoutException} if the operation times out.
+ *
+ *
+ * {@link NoSQLException} if the operation cannot be performed for
+ * any other reason.
+ *
+ *
+ *
+ * @since 6.0.0
+ * @see
+ * Thread model for asynchronous execution
+ */
+ CompletableFuture doSystemRequest(String statement,
+ int timeoutMs,
+ int pollIntervalMs);
+
+ /**
+ * Cloud service only.
+ *
+ * Add replica to a table asynchronously.
+ *
+ * This operation is implicitly asynchronous. The caller must poll using
+ * methods on {@link TableResult} to determine when it has completed.
+ *
+ * @param request the input parameters for the operation
+ *
+ * @return A {@link CompletableFuture} which completes with the result of
+ * the operation.
+ *
+ * The returned {@link CompletableFuture} may complete exceptionally with
+ * the following exceptions. The underlying exception is wrapped in a
+ * {@link CompletionException}; use {@link Throwable#getCause()} to
+ * retrieve it.
+ *
+ *
+ * {@link IllegalArgumentException} if any of the parameters are invalid or
+ * required parameters are missing.
+ *
+ *
+ * {@link NoSQLException} if the operation cannot be performed for
+ * any other reason.
+ *
+ * This operation is implicitly asynchronous. The caller must poll using
+ * methods on {@link TableResult} to determine when it has completed.
+ *
+ * @param request the input parameters for the operation
+ *
+ * @return A {@link CompletableFuture} which completes with the result of
+ * the operation.
+ *
+ * The returned {@link CompletableFuture} may complete exceptionally with
+ * the following exceptions. The underlying exception is wrapped in a
+ * {@link CompletionException}; use {@link Throwable#getCause()} to
+ * retrieve it.
+ *
+ *
+ * {@link IllegalArgumentException} if any of the parameters are invalid or
+ * required parameters are missing.
+ *
+ *
+ * {@link NoSQLException} if the operation cannot be performed for
+ * any other reason.
+ *
+ * Gets replica statistics information asynchronously
+ *
+ * @param request the input parameters for the operation
+ *
+ * @return A {@link CompletableFuture} which completes with the result of
+ * the operation.
+ *
+ * The returned {@link CompletableFuture} may complete exceptionally with
+ * the following exceptions. The underlying exception is wrapped in a
+ * {@link CompletionException}; use {@link Throwable#getCause()} to
+ * retrieve it.
+ *
+ *
+ * {@link IllegalArgumentException} if any of the parameters are invalid or
+ * required parameters are missing.
+ *
+ *
+ * {@link TableNotFoundException} if the specified table does not exist.
+ *
+ *
+ * {@link NoSQLException} if the operation cannot be performed for
+ * any other reason.
+ *
+ *
+ *
+ * @since 6.0.0
+ * @see
+ * Thread model for asynchronous execution
+ */
+ CompletableFuture getReplicaStats(ReplicaStatsRequest request);
+
+ /**
+ * Returns an object that allows control over how SDK statistics
+ * are collected.
+ *
+ * @return the StatsControl object
+ *
+ * @since 6.0.0
+ */
+ StatsControl getStatsControl();
+
+ /**
+ * Closes the handle, releasing its memory and network resources. Once
+ * this method is closed the handle is no longer usable. Any attempt to
+ * use a closed handle will throw {@link IllegalArgumentException}.
+ */
+ void close();
+}
diff --git a/driver/src/main/java/oracle/nosql/driver/NoSQLHandleConfig.java b/driver/src/main/java/oracle/nosql/driver/NoSQLHandleConfig.java
index 28ea4769..4eefa1bc 100644
--- a/driver/src/main/java/oracle/nosql/driver/NoSQLHandleConfig.java
+++ b/driver/src/main/java/oracle/nosql/driver/NoSQLHandleConfig.java
@@ -84,6 +84,18 @@ public class NoSQLHandleConfig implements Cloneable {
public static final String STATS_ENABLE_LOG_PROPERTY =
"com.oracle.nosql.sdk.nosqldriver.stats.enable-log";
+ /**
+ * Java property for connection pool size
+ */
+ public static final String CONNECTION_SIZE_PROPERTY =
+ "com.oracle.nosql.sdk.nosqldriver.connection.size";
+
+ /**
+ *
+ */
+ public static final String CONNECTION_PENDING_PROPERTY =
+ "com.oracle.nosql.sdk.nosqldriver.connection.pending";
+
/**
* Statistics logging interval in seconds. Default 600 sec, ie. 10 min.
*/
@@ -102,6 +114,8 @@ public class NoSQLHandleConfig implements Cloneable {
*/
public static final boolean DEFAULT_ENABLE_LOG = true;
+ static final int DEFAULT_CONNECTION_POOL_SIZE = 100;
+ static final int DEFAULT_CONNECTION_PENDING_SIZE = 10_000;
/*
* The url used to contact an HTTP proxy
@@ -277,6 +291,20 @@ public class NoSQLHandleConfig implements Cloneable {
*/
private String extensionUserAgent;
+ /**
+ * Maximum size of the connection pool
+ */
+ private int connectionPoolSize =
+ getAndVerifyPropertyPositive(CONNECTION_SIZE_PROPERTY,
+ DEFAULT_CONNECTION_POOL_SIZE);
+
+ /**
+ * The maximum number of pending acquires for the pool
+ */
+ private int poolMaxPending =
+ getAndVerifyPropertyPositive(CONNECTION_PENDING_PROPERTY,
+ DEFAULT_CONNECTION_PENDING_SIZE);
+
/**
* Specifies an endpoint or region id to use to connect to the Oracle
* NoSQL Database Cloud Service or, if on-premise, the Oracle NoSQL
@@ -724,19 +752,23 @@ public NoSQLHandleConfig setNumThreads(int numThreads) {
* Sets the maximum number of individual connections to use to connect
* to the service. Each request/response pair uses a connection. The
* pool exists to allow concurrent requests and will bound the number of
- * concurrent requests. Additional requests will wait for a connection to
- * become available. If requests need to wait for a significant time
- * additional connections may be created regardless of the pool size.
- * The default value if not set is number of available CPUs * 2.
+ * concurrent requests. Additional requests upto
+ * {@link NoSQLHandleConfig#poolMaxPending} will wait for a connection
+ * to become available.
+ * Default value is {@value DEFAULT_CONNECTION_POOL_SIZE}
*
* @param poolSize the pool size
*
* @return this
- * @deprecated The connection pool no longer supports a size setting.
- * It will expand as needed based on concurrent demand.
+ *
+ * @since 6.0.0
*/
- @Deprecated
public NoSQLHandleConfig setConnectionPoolSize(int poolSize) {
+ if (poolSize <= 0) {
+ throw new IllegalArgumentException(
+ "Connection pool size must be positive");
+ }
+ this.connectionPoolSize = poolSize;
return this;
}
@@ -789,16 +821,20 @@ public NoSQLHandleConfig setConnectionPoolInactivityPeriod(
/**
* Sets the maximum number of pending acquire operations allowed on the
* connection pool. This number is used if the degree of concurrency
- * desired exceeds the size of the connection pool temporarily. The
- * default value is 3.
+ * desired exceeds the size of the connection pool temporarily.
+ * Default value is {@value DEFAULT_CONNECTION_PENDING_SIZE}.
*
* @param poolMaxPending the maximum number allowed
*
* @return this
- * @deprecated The connection pool no longer supports pending requests.
+ * @since 6.0.0
*/
- @Deprecated
public NoSQLHandleConfig setPoolMaxPending(int poolMaxPending) {
+ if (poolMaxPending <= 0) {
+ throw new IllegalArgumentException("pool max pending value must " +
+ "be positive");
+ }
+ this.poolMaxPending = poolMaxPending;
return this;
}
@@ -869,13 +905,12 @@ public int getMaxChunkSize() {
* concurrent requests. Additional requests will wait for a connection to
* become available.
*
- * @return 0
- * @deprecated The connection pool no longer supports a size setting.
- * It will expand as needed based on concurrent demand.
+ * @return the pool size
+ *
+ * @since 6.0.0
*/
- @Deprecated
public int getConnectionPoolSize() {
- return 0;
+ return connectionPoolSize;
}
/**
@@ -908,12 +943,11 @@ public int getConnectionPoolInactivityPeriod() {
* Returns the maximum number of pending acquire operations allowed on
* the connection pool.
*
- * @return 0
- * @deprecated The connection pool no longer supports pending requests.
+ * @return the max pool pending
+ * @since 6.0.0
*/
- @Deprecated
public int getPoolMaxPending() {
- return 0;
+ return poolMaxPending;
}
/**
@@ -1687,4 +1721,15 @@ public void setExtensionUserAgent(String extensionUserAgent) {
}
this.extensionUserAgent = extensionUserAgent;
}
+
+ static int getAndVerifyPropertyPositive(String property,
+ int defaultVal) {
+ final int val = Integer.getInteger(property, defaultVal);
+ if (val <= 0) {
+ final String msg =
+ String.format("Property %s must be larger than zero", property);
+ throw new IllegalArgumentException(msg);
+ }
+ return val;
+ }
}
diff --git a/driver/src/main/java/oracle/nosql/driver/NoSQLHandleFactory.java b/driver/src/main/java/oracle/nosql/driver/NoSQLHandleFactory.java
index a6bf2033..c3edfd8b 100644
--- a/driver/src/main/java/oracle/nosql/driver/NoSQLHandleFactory.java
+++ b/driver/src/main/java/oracle/nosql/driver/NoSQLHandleFactory.java
@@ -9,6 +9,7 @@
import static oracle.nosql.driver.util.CheckNull.requireNonNull;
+import oracle.nosql.driver.http.NoSQLHandleAsyncImpl;
import oracle.nosql.driver.http.NoSQLHandleImpl;
/**
@@ -43,4 +44,34 @@ public static NoSQLHandle createNoSQLHandle(NoSQLHandleConfig config) {
}
return new NoSQLHandleImpl(configCopy);
}
+
+ /**
+ * Creates a async handle that can be used to access tables.
+ * The application must invoke {@link NoSQLHandleAsync#close},
+ * when it is done accessing the system to
+ * free up resources associated with the handle.
+ *
+ * @param config the NoSQLHandle configuration parameters
+ *
+ * @return a valid {@link NoSQLHandleAsync} instance, ready for use
+ *
+ * @throws IllegalArgumentException if an illegal configuration parameter
+ * is specified.
+ *
+ * @see NoSQLHandleAsync#close
+ */
+ public static NoSQLHandleAsync createNoSQLHandleAsync(
+ NoSQLHandleConfig config) {
+ requireNonNull(
+ config,
+ "NoSQLHandleFactory.createNoSQLHandleAsync: config cannot be null");
+ NoSQLHandleConfig configCopy = config.clone();
+ if (configCopy.getRetryHandler() == null) {
+ /*
+ * Default retry handler: 10 retries, default backoff
+ */
+ configCopy.configureDefaultRetryHandler(10, 0);
+ }
+ return new NoSQLHandleAsyncImpl(configCopy);
+ }
}
diff --git a/driver/src/main/java/oracle/nosql/driver/RetryHandler.java b/driver/src/main/java/oracle/nosql/driver/RetryHandler.java
index 3d625c27..11842169 100644
--- a/driver/src/main/java/oracle/nosql/driver/RetryHandler.java
+++ b/driver/src/main/java/oracle/nosql/driver/RetryHandler.java
@@ -69,4 +69,21 @@ public interface RetryHandler {
* @param re the exception that was thrown
*/
void delay(Request request, int numRetries, RetryableException re);
+
+ /**
+ * This method is called when a {@link RetryableException} is thrown and it
+ * is determined that the request will be retried based on the return value
+ * of {@link #doRetry}. It returns the number of milliseconds to delay
+ * before retrying the request.
+ *
+ * @param request the Request that has triggered the exception
+ *
+ * @param numRetries the number of retries that have occurred for the
+ * operation
+ *
+ * @param re the exception that was thrown
+ *
+ * @return Retry delay time in milliseconds
+ */
+ int delayTime(Request request, int numRetries, RetryableException re);
}
diff --git a/driver/src/main/java/oracle/nosql/driver/SDKVersion.java b/driver/src/main/java/oracle/nosql/driver/SDKVersion.java
index 5fad9116..77d294d5 100644
--- a/driver/src/main/java/oracle/nosql/driver/SDKVersion.java
+++ b/driver/src/main/java/oracle/nosql/driver/SDKVersion.java
@@ -12,5 +12,5 @@ public class SDKVersion {
/**
* The full X.Y.Z version of the current SDK
*/
- public static final String VERSION = "5.4.18";
+ public static final String VERSION = "6.0.0";
}
diff --git a/driver/src/main/java/oracle/nosql/driver/http/Client.java b/driver/src/main/java/oracle/nosql/driver/http/Client.java
index 3a0f2ce9..989329ce 100644
--- a/driver/src/main/java/oracle/nosql/driver/http/Client.java
+++ b/driver/src/main/java/oracle/nosql/driver/http/Client.java
@@ -40,23 +40,34 @@
import java.io.FileOutputStream;
import java.io.IOException;
import java.net.URL;
+import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.Map;
import java.util.Properties;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.CompletionException;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.RejectedExecutionException;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.locks.ReentrantLock;
+import java.util.function.Function;
+import java.util.function.Supplier;
import java.util.logging.Level;
import java.util.logging.Logger;
+import io.netty.buffer.Unpooled;
+import io.netty.handler.codec.http.FullHttpResponse;
+import io.netty.handler.codec.http.HttpRequest;
import oracle.nosql.driver.AuthorizationProvider;
import oracle.nosql.driver.DefaultRetryHandler;
import oracle.nosql.driver.InvalidAuthorizationException;
@@ -76,7 +87,6 @@
import oracle.nosql.driver.UnsupportedQueryVersionException;
import oracle.nosql.driver.WriteThrottlingException;
import oracle.nosql.driver.httpclient.HttpClient;
-import oracle.nosql.driver.httpclient.ResponseHandler;
import oracle.nosql.driver.kv.AuthenticationException;
import oracle.nosql.driver.kv.StoreAccessTokenProvider;
import oracle.nosql.driver.ops.AddReplicaRequest;
@@ -106,15 +116,16 @@
import oracle.nosql.driver.query.QueryDriver;
import oracle.nosql.driver.query.TopologyInfo;
import oracle.nosql.driver.util.ByteInputStream;
+import oracle.nosql.driver.util.ConcurrentUtil;
import oracle.nosql.driver.util.HttpConstants;
import oracle.nosql.driver.util.NettyByteInputStream;
import oracle.nosql.driver.util.NettyByteOutputStream;
import oracle.nosql.driver.util.RateLimiterMap;
import oracle.nosql.driver.util.SerializationUtil;
+import oracle.nosql.driver.util.SimpleRateLimiter;
import oracle.nosql.driver.values.MapValue;
import io.netty.buffer.ByteBuf;
-import io.netty.channel.Channel;
import io.netty.handler.codec.http.DefaultFullHttpRequest;
import io.netty.handler.codec.http.FullHttpRequest;
import io.netty.handler.codec.http.HttpHeaderNames;
@@ -153,7 +164,7 @@ public class Client {
/**
* Tracks the unique client scoped request id.
*/
- private final AtomicInteger maxRequestId = new AtomicInteger(1);
+ private final AtomicLong maxRequestId = new AtomicLong(1);
private final HttpClient httpClient;
@@ -200,7 +211,7 @@ public class Client {
/**
* config for statistics
*/
- private StatsControlImpl statsControl;
+ private final StatsControlImpl statsControl;
/**
* list of Request instances to refresh when auth changes. This will only
@@ -219,15 +230,86 @@ public class Client {
private final String SESSION_COOKIE_FIELD = "session=";
/* for keeping track of SDKs usage */
- private String userAgent;
+ private final String userAgent;
private volatile TopologyInfo topology;
/* for internal testing */
private final String prepareFilename;
+ /* thread-pool for scheduling tasks */
+ private final ScheduledExecutorService taskExecutor;
+
+ /* Lock to access data structures */
+ private final ReentrantLock lock = new ReentrantLock();
+
+ /*
+ * Centralized error handling for request execution.
+ * This class maps specific {@link Throwable} types to error-handling
+ * strategies (retry, fail, or protocol downgrade).
+ * It uses a HashMap of {@link ErrorHandler} functions
+ * to keep {@link #handleError(RequestContext, Throwable)} short and
+ * maintainable.
+ */
+ private final Map, ErrorHandler>
+ errorHandlers = new HashMap<>();
+
+ /*
+ * Functional interface for all error handlers.
+ * Each handler inspects the exception and decides whether
+ * to retry the request or fail with an exception.
+ */
+ @FunctionalInterface
+ private interface ErrorHandler {
+ CompletableFuture handle(RequestContext ctx, Throwable error);
+ }
+
+ /**
+ * RequestContext class to encapsulate request-specific data.
+ * This helps in passing context through asynchronous chains.
+ * It now includes requestId and a Supplier to generate new IDs for retries.
+ */
+ private static class RequestContext {
+ private final Request kvRequest;
+ private final String requestClass;
+ private volatile String requestId;
+ private final long startNanos;
+ private final int timeoutMs;
+ private final Supplier nextIdSupplier;
+ private volatile Throwable exception;
+ private final AtomicInteger rateDelayedMs = new AtomicInteger(0);
+ private volatile RateLimiter readLimiter;
+ private volatile RateLimiter writeLimiter;
+ private volatile boolean checkReadUnits;
+ private volatile boolean checkWriteUnits;
+ private volatile int reqSize;
+ private volatile int resSize;
+ private volatile short serialVersionUsed;
+ private volatile short queryVersionUsed;
+ private volatile long latencyNanos;
+ private volatile long networkLatency;
+
+ RequestContext(Request kvRequest, long startNanos, int timeoutMs,
+ Supplier nextIdSupplier, RateLimiter readLimiter,
+ RateLimiter writeLimiter, boolean checkReadUnits,
+ boolean checkWriteUnits) {
+ this.kvRequest = kvRequest;
+ this.startNanos = startNanos;
+ this.timeoutMs = timeoutMs;
+ this.nextIdSupplier = nextIdSupplier;
+ this.readLimiter = readLimiter;
+ this.writeLimiter = writeLimiter;
+ this.checkReadUnits = checkReadUnits;
+ this.checkWriteUnits = checkWriteUnits;
+
+ this.requestId = Long.toString(nextIdSupplier.get());
+ this.requestClass = kvRequest.getClass().getSimpleName();
+ }
+ }
+
public Client(Logger logger,
- NoSQLHandleConfig httpConfig) {
+ NoSQLHandleConfig httpConfig,
+ ScheduledExecutorService taskExecutor) {
this.logger = logger;
this.config = httpConfig;
@@ -266,7 +348,7 @@ public Client(Logger logger,
httpClient.configureProxy(httpConfig);
}
- authProvider= config.getAuthorizationProvider();
+ authProvider = config.getAuthorizationProvider();
if (authProvider == null) {
throw new IllegalArgumentException(
"Must configure AuthorizationProvider to use HttpClient");
@@ -302,6 +384,8 @@ public Client(Logger logger,
/* for internal testing */
prepareFilename = System.getProperty("test.preparefilename");
+ this.taskExecutor = taskExecutor;
+ initErrorHandlers();
}
/**
@@ -326,7 +410,9 @@ protected HttpClient createHttpClient(URL url,
sslCtx,
httpConfig.getSSLHandshakeTimeout(),
"NoSQL Driver",
- logger);
+ logger,
+ httpConfig.getConnectionPoolSize(),
+ httpConfig.getPoolMaxPending());
}
/**
@@ -347,6 +433,9 @@ public void shutdown() {
if (threadPool != null) {
threadPool.shutdown();
}
+ if (taskExecutor != null) {
+ taskExecutor.shutdown();
+ }
}
public int getAcquiredChannelCount() {
@@ -365,13 +454,13 @@ public int getFreeChannelCount() {
* Get the next client-scoped request id. It needs to be combined with the
* client id to obtain a globally unique scope.
*/
- private int nextRequestId() {
+ private long nextRequestId() {
return maxRequestId.addAndGet(1);
}
/**
- * Execute the KV request and return the response. This is the top-level
- * method for request execution.
+ * Execute the KV request and return the future response. This is the
+ * top-level method for request execution.
*
* This method handles exceptions to distinguish between what can be retried
* and what cannot, making sure that root cause exceptions are
@@ -387,9 +476,9 @@ private int nextRequestId() {
*
* @param kvRequest the KV request to be executed by the server
*
- * @return the Result of the request
+ * @return the future representing the result of the request
*/
- public Result execute(Request kvRequest) {
+ public CompletableFuture execute(Request kvRequest) {
requireNonNull(kvRequest, "NoSQLHandle: request must be non-null");
@@ -406,7 +495,11 @@ public Result execute(Request kvRequest) {
* fails for a given Request instance it will throw
* IllegalArgumentException.
*/
- kvRequest.validate();
+ try {
+ kvRequest.validate();
+ } catch (Throwable t) {
+ return CompletableFuture.failedFuture(t);
+ }
/* clear any retry stats that may exist on this request object */
kvRequest.setRetryStats(null);
@@ -434,7 +527,8 @@ public Result execute(Request kvRequest) {
*/
if (qreq.hasDriver()) {
trace("QueryRequest has QueryDriver", 2);
- return new QueryResult(qreq, false);
+ return CompletableFuture.completedFuture(
+ new QueryResult(qreq, false));
}
/*
@@ -449,7 +543,8 @@ public Result execute(Request kvRequest) {
trace("QueryRequest has no QueryDriver, but is prepared", 2);
QueryDriver driver = new QueryDriver(qreq);
driver.setClient(this);
- return new QueryResult(qreq, false);
+ return CompletableFuture.completedFuture(
+ new QueryResult(qreq, false));
}
/*
@@ -467,10 +562,6 @@ public Result execute(Request kvRequest) {
qreq.incBatchCounter();
}
- int timeoutMs = kvRequest.getTimeoutInternal();
-
- Throwable exception = null;
-
/*
* If the request doesn't set an explicit compartment, use
* the config default if provided.
@@ -480,7 +571,6 @@ public Result execute(Request kvRequest) {
config.getDefaultCompartment());
}
- int rateDelayedMs = 0;
boolean checkReadUnits = false;
boolean checkWriteUnits = false;
@@ -515,527 +605,679 @@ public Result execute(Request kvRequest) {
}
}
- final long startNanos = System.nanoTime();
- kvRequest.setStartNanos(startNanos);
- final String requestClass = kvRequest.getClass().getSimpleName();
+ kvRequest.setStartNanos(System.nanoTime());
+ RequestContext ctx = new RequestContext(kvRequest,
+ kvRequest.getStartNanos(), kvRequest.getTimeoutInternal(),
+ this::nextRequestId, readLimiter, writeLimiter,
+ checkReadUnits, checkWriteUnits);
- /*
- * boolean that indicates whether content must be signed. Cross
- * region operations must include content when signing. See comment
- * on the method
- */
- final boolean signContent = requireContentSigned(kvRequest);
- String requestId = "";
- int thisIterationTimeoutMs = 0;
+ return executeWithRetry(ctx);
+ }
- do {
- thisIterationTimeoutMs =
- getIterationTimeoutMs(timeoutMs, startNanos);
- /*
- * Check rate limiters before executing the request.
- * Wait for read and/or write limiters to be below their limits
- * before continuing. Be aware of the timeout given.
- */
- if (readLimiter != null && checkReadUnits == true) {
- try {
- /*
- * this may sleep for a while, up to thisIterationTimeoutMs
- * and may throw TimeoutException
- */
- rateDelayedMs += readLimiter.consumeUnitsWithTimeout(
- 0, thisIterationTimeoutMs, false);
- } catch (Exception e) {
- exception = e;
- break;
- }
+ /*
+ * Core method which creates the request and send to the server.
+ * If the request fails, it performs retry.
+ */
+ private CompletableFuture executeWithRetry(RequestContext ctx) {
+
+ final Request kvRequest = ctx.kvRequest;
+ final int timeoutMs = ctx.timeoutMs;
+ final long startNanos = ctx.startNanos;
+ final int thisIterationTimeoutMs =
+ getIterationTimeoutMs(timeoutMs, startNanos);
+
+ /* Check for over all request timeout first */
+ if (thisIterationTimeoutMs <= 0) {
+ RequestTimeoutException rte = new RequestTimeoutException(timeoutMs,
+ ctx.requestClass + " timed out:" +
+ (ctx.requestId.isEmpty() ? "" : " requestId=" + ctx.requestId) +
+ " nextRequestId=" + nextRequestId() +
+ " iterationTimeout=" + thisIterationTimeoutMs + "ms " +
+ (kvRequest.getRetryStats() != null ?
+ kvRequest.getRetryStats() : ""), ctx.exception);
+ return CompletableFuture.failedFuture(rte);
+ }
+
+ /* Log retry */
+ if (kvRequest.getNumRetries() > 0) {
+ logRetries(kvRequest.getNumRetries(), ctx.exception);
+ }
+
+ if (serialVersion < 3 && kvRequest instanceof DurableRequest) {
+ if (((DurableRequest)kvRequest).getDurability() != null) {
+ oneTimeMessage("The requested feature is not supported " +
+ "by the connected server: Durability");
}
- if (writeLimiter != null && checkWriteUnits == true) {
- try {
- /*
- * this may sleep for a while, up to thisIterationTimeoutMs
- * and may throw TimeoutException
- */
- rateDelayedMs += writeLimiter.consumeUnitsWithTimeout(
- 0, thisIterationTimeoutMs, false);
- } catch (Exception e) {
- exception = e;
- break;
- }
+ }
+ if (serialVersion < 3 && kvRequest instanceof TableRequest) {
+ TableLimits limits = ((TableRequest)kvRequest).getTableLimits();
+ if (limits != null &&
+ limits.getMode() == CapacityMode.ON_DEMAND) {
+ oneTimeMessage("The requested feature is not supported " +
+ "by the connected server: on demand " +
+ "capacity table");
}
+ }
- /* update iteration timeout in case limiters slept for some time */
- thisIterationTimeoutMs =
- getIterationTimeoutMs(timeoutMs, startNanos);
+ return handlePreRateLimit(ctx)
+ .thenCompose((Integer delay) -> getAuthString(ctx, authProvider))
+ .thenCompose((String authString) -> createRequest(ctx, authString))
+ .thenCompose((FullHttpRequest request) -> submitRequest(ctx, request))
+ .thenApply((FullHttpResponse response) -> handleResponse(ctx, response))
+ .thenApply((Result result) -> handleResult(ctx, result))
+ .thenCompose((Result result) -> handlePostRateLimit(ctx, result))
+ .handle((Result result, Throwable err) -> {
+ /* Handle error and retry */
+ if (err != null) {
+ return handleError(ctx, err);
+ } else {
+ return CompletableFuture.completedFuture(result);
+ }
+ })
+ .thenCompose(Function.identity());
+ }
- /* ensure limiting didn't throw us over the timeout */
- if (thisIterationTimeoutMs <= 0) {
- break;
- }
+ private CompletableFuture handlePreRateLimit(RequestContext ctx) {
+ /*
+ * Check rate limiters before executing the request.
+ * Wait for read and/or write limiters to be below their limits
+ * before continuing. Be aware of the timeout given.
+ */
+ int preRateLimitDelayMs = 0;
+ if (ctx.readLimiter != null && ctx.checkReadUnits) {
+ preRateLimitDelayMs += ((SimpleRateLimiter) ctx.readLimiter)
+ .consumeExternally(0);
+ }
+ if (ctx.writeLimiter != null && ctx.checkWriteUnits) {
+ preRateLimitDelayMs += ((SimpleRateLimiter) ctx.writeLimiter)
+ .consumeExternally(0);
+ }
+
+ int thisIterationTimeoutMs =
+ getIterationTimeoutMs(ctx.timeoutMs, ctx.startNanos);
+
+ /* If rate limit result in timeout, complete with exception. */
+ if (thisIterationTimeoutMs <= preRateLimitDelayMs) {
+ final TimeoutException ex = new TimeoutException(
+ "timed out waiting "
+ + thisIterationTimeoutMs
+ + "ms due to rate limiting");
+ return createDelayFuture(thisIterationTimeoutMs)
+ .thenCompose(d -> CompletableFuture.failedFuture(ex));
+ }
+ /* sleep for delay ms */
+ return createDelayFuture(preRateLimitDelayMs)
+ .whenComplete((delay, err) -> ctx.rateDelayedMs.addAndGet(delay));
+ }
- final String authString =
- authProvider.getAuthorizationString(kvRequest);
+ /**
+ * Get auth token from auth provider.
+ * This may contact the server to get the token.
+ */
+ private CompletableFuture getAuthString(RequestContext ctx,
+ AuthorizationProvider authProvider) {
+ final Request kvRequest = ctx.kvRequest;
+ return authProvider.getAuthorizationStringAsync(kvRequest)
+ .thenApply(authString -> {
+ /* Check whether timed out while acquiring the auth token */
+ if (timeoutRequest(kvRequest.getStartNanos(),
+ kvRequest.getTimeoutInternal(),
+ null /* exception */)) {
+ TimeoutException ex = new TimeoutException(
+ "timed out during auth token acquisition");
+ throw new CompletionException(ex);
+ }
+ /* validate the token is valid or not */
authProvider.validateAuthString(authString);
+ return authString;
+ });
+ }
+ /**
+ * Create Netty HTTP request.
+ * This will serialize the request body and fill the HTTP headers and
+ * body.
+ * This may contact the server to sign the request body.
+ */
+ private CompletableFuture createRequest(RequestContext ctx,
+ String authString) {
+ ByteBuf buffer = null;
+ try {
+ buffer = Unpooled.buffer();
+ final Request kvRequest = ctx.kvRequest;
+ /*
+ * we expressly check size limit below based on onprem versus
+ * cloud. Set the request to not check size limit inside
+ * writeContent().
+ */
+ kvRequest.setCheckRequestSize(false);
- if (kvRequest.getNumRetries() > 0) {
- logRetries(kvRequest.getNumRetries(), exception);
+ /* Set the topo seq num in the request, if it has not been set
+ * already */
+ if (!(kvRequest instanceof QueryRequest) ||
+ kvRequest.isQueryRequest()) {
+ kvRequest.setTopoSeqNum(getTopoSeqNum());
}
- if (serialVersion < 3 && kvRequest instanceof DurableRequest) {
- if (((DurableRequest)kvRequest).getDurability() != null) {
- oneTimeMessage("The requested feature is not supported " +
- "by the connected server: Durability");
- }
- }
+ /*
+ * Temporarily change the timeout in the request object so
+ * the serialized timeout sent to the server is correct for
+ * this iteration. After serializing the request, set the
+ * timeout back to the overall request timeout so that other
+ * processing (retry delays, etc) work correctly.
+ */
+ kvRequest.setTimeoutInternal(
+ getIterationTimeoutMs(ctx.timeoutMs, ctx.startNanos));
+ writeContent(buffer, ctx);
+ kvRequest.setTimeoutInternal(ctx.timeoutMs);
- if (serialVersion < 3 && kvRequest instanceof TableRequest) {
- TableLimits limits = ((TableRequest)kvRequest).getTableLimits();
- if (limits != null &&
- limits.getMode() == CapacityMode.ON_DEMAND) {
- oneTimeMessage("The requested feature is not supported " +
- "by the connected server: on demand " +
- "capacity table");
+ /*
+ * If on-premises the authProvider will always be a
+ * StoreAccessTokenProvider. If so, check against
+ * configurable limit. Otherwise check against internal
+ * hardcoded cloud limit.
+ */
+ if (authProvider instanceof StoreAccessTokenProvider) {
+ if (buffer.readableBytes() >
+ httpClient.getMaxContentLength()) {
+ throw new RequestSizeLimitException("The request " +
+ "size of " + buffer.readableBytes() +
+ " exceeded the limit of " +
+ httpClient.getMaxContentLength());
}
+ } else {
+ kvRequest.setCheckRequestSize(true);
+ BinaryProtocol.checkRequestSizeLimit(
+ kvRequest, buffer.readableBytes());
+ }
+ final FullHttpRequest request =
+ new DefaultFullHttpRequest(
+ HTTP_1_1, POST, kvRequestURI,
+ buffer,
+ headersFactory().withValidation(false),
+ trailersFactory().withValidation(false));
+ HttpHeaders headers = request.headers();
+ addCommonHeaders(headers);
+ int contentLength = buffer.readableBytes();
+ ctx.reqSize = contentLength;
+ headers.add(HttpHeaderNames.HOST, host)
+ .add(REQUEST_ID_HEADER, ctx.requestId)
+ .setInt(CONTENT_LENGTH, contentLength);
+ if (sessionCookie != null) {
+ headers.add(COOKIE, sessionCookie);
+ }
+ String serdeVersion = getSerdeVersion(kvRequest);
+ if (serdeVersion != null) {
+ headers.add("x-nosql-serde-version", serdeVersion);
}
- ResponseHandler responseHandler = null;
- short serialVersionUsed = serialVersion;
- short queryVersionUsed = queryVersion;
- ByteBuf buffer = null;
- try {
- /*
- * NOTE: the ResponseHandler will release the Channel
- * in its close() method, which is always called in the
- * finally clause. This handles both successful and retried
- * operations in the loop.
- */
- Channel channel = httpClient.getChannel(thisIterationTimeoutMs);
- /* update iteration timeout in case channel took some time */
- thisIterationTimeoutMs =
- getIterationTimeoutMs(timeoutMs, startNanos);
- /* ensure limiting didn't throw us over the timeout */
- if (thisIterationTimeoutMs <= 0) {
- break;
- }
-
- requestId = Long.toString(nextRequestId());
- responseHandler =
- new ResponseHandler(httpClient, logger, channel,
- requestId, kvRequest.shouldRetry());
- buffer = channel.alloc().directBuffer();
- buffer.retain();
-
- /*
- * we expressly check size limit below based on onprem versus
- * cloud. Set the request to not check size limit inside
- * writeContent().
- */
- kvRequest.setCheckRequestSize(false);
-
- /* Set the topo seq num in the request, if it has not been set
- * already */
- if (!(kvRequest instanceof QueryRequest) ||
- kvRequest.isQueryRequest()) {
- kvRequest.setTopoSeqNum(getTopoSeqNum());
- }
+ /*
+ * boolean that indicates whether content must be signed. Cross
+ * region operations must include content when signing. See comment
+ * on the method
+ */
+ final boolean signContent = requireContentSigned(kvRequest);
- /*
- * Temporarily change the timeout in the request object so
- * the serialized timeout sent to the server is correct for
- * this iteration. After serializing the request, set the
- * timeout back to the overall request timeout so that other
- * processing (retry delays, etc) work correctly.
- */
- kvRequest.setTimeoutInternal(thisIterationTimeoutMs);
- serialVersionUsed = writeContent(buffer, kvRequest,
- queryVersionUsed);
- kvRequest.setTimeoutInternal(timeoutMs);
-
- /*
- * If on-premises the authProvider will always be a
- * StoreAccessTokenProvider. If so, check against
- * configurable limit. Otherwise check against internal
- * hardcoded cloud limit.
- */
- if (authProvider instanceof StoreAccessTokenProvider) {
- if (buffer.readableBytes() >
- httpClient.getMaxContentLength()) {
- throw new RequestSizeLimitException("The request " +
- "size of " + buffer.readableBytes() +
- " exceeded the limit of " +
- httpClient.getMaxContentLength());
+ /*
+ * Get request body bytes if the request needed to be signed
+ * with content
+ */
+ byte[] content = signContent ? getBodyBytes(buffer) : null;
+ return authProvider.setRequiredHeadersAsync(authString, kvRequest,
+ headers, content)
+ .thenApply(n -> {
+ String namespace = kvRequest.getNamespace();
+ if (namespace == null) {
+ namespace = config.getDefaultNamespace();
}
- } else {
- kvRequest.setCheckRequestSize(true);
- BinaryProtocol.checkRequestSizeLimit(
- kvRequest, buffer.readableBytes());
- }
+ if (namespace != null) {
+ headers.add(REQUEST_NAMESPACE_HEADER, namespace);
+ }
+ return request;
+ });
+ } catch (Throwable e) {
+ /* Release the buffer on error */
+ if (buffer != null) {
+ buffer.release();
+ }
+ return CompletableFuture.failedFuture(e);
+ }
+ }
- final FullHttpRequest request =
- new DefaultFullHttpRequest(
- HTTP_1_1, POST, kvRequestURI,
- buffer,
- headersFactory().withValidation(false),
- trailersFactory().withValidation(false));
- HttpHeaders headers = request.headers();
- addCommonHeaders(headers);
- int contentLength = buffer.readableBytes();
- headers.add(HttpHeaderNames.HOST, host)
- .add(REQUEST_ID_HEADER, requestId)
- .setInt(CONTENT_LENGTH, contentLength);
- if (sessionCookie != null) {
- headers.add(COOKIE, sessionCookie);
- }
+ /**
+ * Send the HTTP request to server and get the response back.
+ */
+ private CompletableFuture submitRequest(
+ RequestContext ctx, HttpRequest request) {
- String serdeVersion = getSerdeVersion(kvRequest);
- if (serdeVersion != null) {
- headers.add("x-nosql-serde-version", serdeVersion);
- }
+ final Request kvRequest = ctx.kvRequest;
+ if (isLoggable(logger, Level.FINE) && !kvRequest.getIsRefresh()) {
+ logTrace(logger, "Request: " + ctx.requestClass +
+ ", requestId=" + ctx.requestId);
+ }
+ ctx.latencyNanos = System.nanoTime();
+ int timeoutMs = getIterationTimeoutMs(ctx.timeoutMs, ctx.startNanos);
- /*
- * If the request doesn't set an explicit compartment, use
- * the config default if provided.
- */
- if (kvRequest.getCompartment() == null) {
- kvRequest.setCompartmentInternal(
- config.getDefaultCompartment());
- }
+ return httpClient.runRequest(request, timeoutMs)
+ .whenComplete((res, err) -> {
+ ctx.networkLatency =
+ (System.nanoTime() - ctx.latencyNanos) / 1_000_000;
+ });
+ }
- /*
- * Get request body bytes if the request needed to be signed
- * with content
- */
- byte[] content = signContent ? getBodyBytes(buffer) : null;
- authProvider.setRequiredHeaders(authString, kvRequest, headers,
- content);
-
- String namespace = kvRequest.getNamespace();
- if (namespace == null) {
- namespace = config.getDefaultNamespace();
- }
- if (namespace != null) {
- headers.add(REQUEST_NAMESPACE_HEADER, namespace);
- }
+ /**
+ * Deserialize HTTP response into NoSQL Result.
+ */
+ private Result handleResponse(RequestContext ctx, FullHttpResponse fhr) {
+ final Request kvRequest = ctx.kvRequest;
+ if (isLoggable(logger, Level.FINE) && !kvRequest.getIsRefresh()) {
+ logTrace(logger, "Response: " + ctx.requestClass +
+ ", status=" + fhr.status() +
+ ", requestId=" + ctx.requestId );
+ }
+ try {
+ Result result = processResponse(
+ fhr.status(), fhr.headers(), fhr.content(), ctx);
+ ctx.rateDelayedMs.addAndGet(
+ getRateDelayedFromHeader(fhr.headers()));
+ ctx.resSize = fhr.content().readerIndex();
+ return result;
+ } finally {
+ fhr.release(); //release response
+ }
+ }
- if (isLoggable(logger, Level.FINE) &&
- !kvRequest.getIsRefresh()) {
- logTrace(logger, "Request: " + requestClass +
- ", requestId=" + requestId);
- }
- long latencyNanos = System.nanoTime();
- httpClient.runRequest(request, responseHandler, channel);
-
- boolean isTimeout =
- responseHandler.await(thisIterationTimeoutMs);
- if (isTimeout) {
- throw new TimeoutException("Request timed out after " +
- timeoutMs + " milliseconds: requestId=" + requestId);
- }
+ /**
+ * Update stats from the result.
+ */
+ private Result handleResult(RequestContext ctx, Result result) {
+ final Request kvRequest = ctx.kvRequest;
+ setTopology(result.getTopology());
+ if (ctx.serialVersionUsed < 3) {
+ /* so we can emit a one-time message if the app */
+ /* tries to access modificationTime */
+ if (result instanceof GetResult) {
+ ((GetResult)result).setClient(this);
+ } else if (result instanceof WriteResult) {
+ ((WriteResult)result).setClient(this);
+ }
+ }
+ if (result instanceof QueryResult && kvRequest.isQueryRequest()) {
+ QueryRequest qreq = (QueryRequest)kvRequest;
+ qreq.addQueryTraces(((QueryResult)result).getQueryTraces());
+ }
+ if (result instanceof TableResult && rateLimiterMap != null) {
+ /* update rate limiter settings for table */
+ TableLimits tl = ((TableResult)result).getTableLimits();
+ updateRateLimiters(((TableResult)result).getTableName(), tl);
+ }
+ /*
+ * We may not have rate limiters yet because queries may
+ * not have a tablename until after the first request.
+ * So try to get rate limiters if we don't have them yet and
+ * this is a QueryRequest.
+ */
+ if (rateLimiterMap != null && ctx.readLimiter == null) {
+ ctx.readLimiter = getQueryRateLimiter(kvRequest, true);
+ }
+ if (rateLimiterMap != null && ctx.writeLimiter == null) {
+ ctx.writeLimiter = getQueryRateLimiter(kvRequest, false);
+ }
+ return result;
+ }
- if (isLoggable(logger, Level.FINE) &&
- !kvRequest.getIsRefresh()) {
- logTrace(logger, "Response: " + requestClass +
- ", status=" +
- responseHandler.getStatus() +
- ", requestId=" + requestId );
- }
+ /**
+ * Handle rate limit from the Result.
+ * This will consume actual units used by the request and sleep.
+ */
+ private CompletableFuture handlePostRateLimit(RequestContext ctx,
+ Result result) {
+ final Request kvRequest = ctx.kvRequest;
+ int postRateLimitDelayMs = consumeLimiterUnits(ctx.readLimiter,
+ result.getReadUnitsInternal());
+ postRateLimitDelayMs += consumeLimiterUnits(ctx.writeLimiter,
+ result.getWriteUnitsInternal());
+
+ return createDelayFuture(postRateLimitDelayMs)
+ .thenApply(rateDelay -> {
+ ctx.rateDelayedMs.addAndGet(rateDelay);
+ result.setRateLimitDelayedMs(ctx.rateDelayedMs.get());
- ByteBuf wireContent = responseHandler.getContent();
- Result res = processResponse(responseHandler.getStatus(),
- responseHandler.getHeaders(),
- wireContent,
- kvRequest,
- serialVersionUsed,
- queryVersionUsed);
- rateDelayedMs += getRateDelayedFromHeader(
- responseHandler.getHeaders());
- int resSize = wireContent.readerIndex();
- long networkLatency =
- (System.nanoTime() - latencyNanos) / 1_000_000;
-
- setTopology(res.getTopology());
-
- if (serialVersionUsed < 3) {
- /* so we can emit a one-time message if the app */
- /* tries to access modificationTime */
- if (res instanceof GetResult) {
- ((GetResult)res).setClient(this);
- } else if (res instanceof WriteResult) {
- ((WriteResult)res).setClient(this);
- }
- }
+ /* copy retry stats to Result on successful operation */
+ result.setRetryStats(kvRequest.getRetryStats());
+ kvRequest.setRateLimitDelayedMs(ctx.rateDelayedMs.get());
- if (res instanceof QueryResult && kvRequest.isQueryRequest()) {
- QueryRequest qreq = (QueryRequest)kvRequest;
- qreq.addQueryTraces(((QueryResult)res).getQueryTraces());
- }
+ statsControl.observe(kvRequest,
+ Math.toIntExact(ctx.networkLatency),
+ ctx.reqSize, ctx.resSize);
+ checkAuthRefreshList(kvRequest);
+ return result;
+ });
+ }
- if (res instanceof TableResult && rateLimiterMap != null) {
- /* update rate limiter settings for table */
- TableLimits tl = ((TableResult)res).getTableLimits();
- updateRateLimiters(((TableResult)res).getTableName(), tl);
- }
+ /*
+ * Main error handling entry point.
+ */
+ private CompletableFuture handleError(RequestContext ctx,
+ Throwable err) {
+ final Throwable actualCause =
+ (err instanceof CompletionException && err.getCause() != null) ?
+ err.getCause() : err;
+
+ /* set exception on context */
+ ctx.exception = actualCause;
+
+ /* Get the appropriate error handler and delegate */
+ ErrorHandler handler = findErrorHandler(actualCause.getClass());
+ if (handler != null) {
+ return handler.handle(ctx, actualCause);
+ }
+
+ /* Default throwable: retry with small delay */
+ final String name = actualCause.getClass().getName();
+ logInfo(logger, "Client execute Throwable, name: " +
+ name + "message: " + actualCause.getMessage());
+ return retryRequest(ctx, 10, actualCause);
+ }
- /*
- * We may not have rate limiters yet because queries may
- * not have a tablename until after the first request.
- * So try to get rate limiters if we don't have them yet and
- * this is a QueryRequest.
- */
- if (rateLimiterMap != null && readLimiter == null) {
- readLimiter = getQueryRateLimiter(kvRequest, true);
- }
- if (rateLimiterMap != null && writeLimiter == null) {
- writeLimiter = getQueryRateLimiter(kvRequest, false);
- }
+ /*
+ * Initializes the error handlers map with specific exception types
+ * and their corresponding handling strategies.
+ * This method sets up a mapping between various exception classes
+ * and the methods responsible for handling them,facilitating appropriate
+ * error management and retry logic during request execution.
+ */
+ private void initErrorHandlers() {
+ errorHandlers.put(AuthenticationException.class,
+ this::handleAuthException);
+ errorHandlers.put(InvalidAuthorizationException.class,
+ this::handleInvalidAuthError);
+ errorHandlers.put(SecurityInfoNotReadyException.class,
+ this::handleSecurityNotReadyError);
+ errorHandlers.put(RetryableException.class,
+ this::handleRetryableError);
+ errorHandlers.put(UnsupportedQueryVersionException.class,
+ this::handleQueryVerError);
+ errorHandlers.put(UnsupportedProtocolException.class,
+ this::handleProtocolVerError);
+ errorHandlers.put(RequestTimeoutException.class, this::failRequest);
+ errorHandlers.put(NoSQLException.class, this::failRequest);
+ errorHandlers.put(RuntimeException.class, this::failRequest);
+ errorHandlers.put(IOException.class, this::handleIOError);
+ errorHandlers.put(InterruptedException.class,
+ this::handleInterruptedError);
+ errorHandlers.put(ExecutionException.class, this::handleExecutionError);
+ errorHandlers.put(TimeoutException.class, this::handleTimeoutError);
+ /* Add any new error handlers here */
+ }
- /* consume rate limiter units based on actual usage */
- rateDelayedMs += consumeLimiterUnits(readLimiter,
- res.getReadUnitsInternal(),
- thisIterationTimeoutMs);
- rateDelayedMs += consumeLimiterUnits(writeLimiter,
- res.getWriteUnitsInternal(),
- thisIterationTimeoutMs);
- res.setRateLimitDelayedMs(rateDelayedMs);
+ /*
+ * Marks the request as failed and returns failed {@link CompletableFuture}.
+ */
+ private CompletableFuture failRequest(RequestContext ctx,
+ Throwable ex) {
+ final String name = ex.getClass().getName();
+ final String message = String.format("Client execute %s: %s",
+ name, ex.getMessage());
+ logFine(logger, message);
+ ctx.kvRequest.setRateLimitDelayedMs(ctx.rateDelayedMs.get());
+ statsControl.observeError(ctx.kvRequest);
+ return CompletableFuture.failedFuture(ex);
+ }
- /* copy retry stats to Result on successful operation */
- res.setRetryStats(kvRequest.getRetryStats());
- kvRequest.setRateLimitDelayedMs(rateDelayedMs);
+ /*
+ * Schedules a retry for the request with the given delay.
+ * Updates retry counters and statistics.
+ */
+ private CompletableFuture retryRequest(RequestContext ctx,
+ int delayMs, Throwable ex) {
+ Request kvRequest = ctx.kvRequest;
+ /* query and protocol exceptions are not errors, do not add them to
+ * retry stats.
+ */
+ if (!(ex instanceof UnsupportedProtocolException
+ || ex instanceof UnsupportedQueryVersionException)) {
+ kvRequest.addRetryException(ex.getClass());
+ kvRequest.incrementRetries();
+ kvRequest.addRetryDelayMs(delayMs);
+ }
+ return scheduleRetry(ctx, delayMs);
+ }
- statsControl.observe(kvRequest, Math.toIntExact(networkLatency),
- contentLength, resSize);
+ /**
+ * Looks up an error handler for the given class by traversing the
+ * class hierarchy until a registered handler is found.
+ *
+ * @param clazz Exception class to resolve
+ * @return A matching {@link ErrorHandler} or {@code null} if none found
+ */
+ private ErrorHandler findErrorHandler(Class> clazz) {
+ while (clazz != null) {
+ if (errorHandlers.containsKey(clazz)) {
+ return errorHandlers.get(clazz);
+ }
+ clazz = clazz.getSuperclass();
+ }
+ return null;
+ }
- checkAuthRefreshList(kvRequest);
+ /*
+ * Error handler for {@link AuthenticationException}
+ */
+ private CompletableFuture handleAuthException(RequestContext ctx,
+ Throwable ex) {
- return res;
+ if (authProvider instanceof StoreAccessTokenProvider) {
+ authProvider.flushCache();
+ return retryRequest(ctx, 0, ex);
+ } else {
+ logInfo(logger, "Unexpected authentication exception: " + ex);
+ return failRequest(ctx, new NoSQLException(
+ "Unexpected exception: " + ex.getMessage(), ex));
+ }
+ }
- } catch (AuthenticationException rae) {
- if (authProvider instanceof StoreAccessTokenProvider) {
- final StoreAccessTokenProvider satp =
- (StoreAccessTokenProvider) authProvider;
- satp.bootstrapLogin(kvRequest);
- kvRequest.addRetryException(rae.getClass());
- kvRequest.incrementRetries();
- exception = rae;
- continue;
- }
- kvRequest.setRateLimitDelayedMs(rateDelayedMs);
- statsControl.observeError(kvRequest);
- logInfo(logger, "Unexpected authentication exception: " +
- rae);
- throw new NoSQLException("Unexpected exception: " +
- rae.getMessage(), rae);
- } catch (InvalidAuthorizationException iae) {
- /*
- * Allow a single retry for invalid/expired auth
- *
- * This includes "clock skew" errors or signature refresh
- * failures. This does not include permissions-related errors,
- * which would be a UnauthorizedException.
- */
- if (retriedInvalidAuthorizationException(kvRequest)) {
- /* same as NoSQLException below */
- kvRequest.setRateLimitDelayedMs(rateDelayedMs);
- statsControl.observeError(kvRequest);
- logFine(logger, "Client execute NoSQLException: " +
- iae.getMessage());
- throw iae;
- }
- /* flush auth cache and do one retry */
- authProvider.flushCache();
- kvRequest.addRetryException(iae.getClass());
- kvRequest.incrementRetries();
- exception = iae;
- logFine(logger,
- "Client retrying on InvalidAuthorizationException: " +
- iae.getMessage());
- continue;
- } catch (SecurityInfoNotReadyException sinre) {
- kvRequest.addRetryException(sinre.getClass());
- exception = sinre;
- int delayMs = SEC_ERROR_DELAY_MS;
- if (kvRequest.getNumRetries() > 10) {
- delayMs =
- DefaultRetryHandler.computeBackoffDelay(kvRequest, 0);
- if (delayMs <= 0) {
- break;
- }
- }
- try {
- Thread.sleep(delayMs);
- } catch (InterruptedException ie) {}
- kvRequest.incrementRetries();
- kvRequest.addRetryDelayMs(delayMs);
- continue;
- } catch (RetryableException re) {
-
- if (re instanceof WriteThrottlingException &&
- writeLimiter != null) {
- /* ensure we check write limits next loop */
- checkWriteUnits = true;
- /* set limiter to its limit, if not over already */
- if (writeLimiter.getCurrentRate() < 100.0) {
- writeLimiter.setCurrentRate(100.0);
- }
- /* call retry handler to manage sleep/delay */
- }
- if (re instanceof ReadThrottlingException &&
- readLimiter != null) {
- /* ensure we check read limits next loop */
- checkReadUnits = true;
- /* set limiter to its limit, if not over already */
- if (readLimiter.getCurrentRate() < 100.0) {
- readLimiter.setCurrentRate(100.0);
- }
- /* call retry handler to manage sleep/delay */
- }
+ /*
+ * Error handler for {@link InvalidAuthorizationException}
+ */
+ private CompletableFuture handleInvalidAuthError(RequestContext ctx,
+ Throwable ex) {
+ /*
+ * Allow a single retry for invalid/expired auth
+ *
+ * This includes "clock skew" errors or signature refresh
+ * failures. This does not include permissions-related errors,
+ * which would be a UnauthorizedException.
+ */
+ Request kvRequest = ctx.kvRequest;
+ if (retriedInvalidAuthorizationException(kvRequest)) {
+ return failRequest(ctx, ex);
+ }
+ authProvider.flushCache();
+ logFine(logger,
+ "Client retrying on InvalidAuthorizationException: "
+ + ex.getMessage());
+ return retryRequest(ctx, 0, ex);
+ }
- logFine(logger, "Retryable exception: " +
- re.getMessage());
- /*
- * Handle automatic retries. If this does not throw an error,
- * then the delay (if any) will have been performed and the
- * request should be retried.
- *
- * If there have been too many retries this method will
- * throw the original exception.
- */
-
- kvRequest.addRetryException(re.getClass());
- handleRetry(re, kvRequest);
- kvRequest.incrementRetries();
- exception = re;
- continue;
- } catch (UnsupportedQueryVersionException uqve) {
- /* decrement query version and try again */
- if (decrementQueryVersion(queryVersionUsed) == true) {
- logFine(logger, "Got unsupported query version error " +
- "from server: decrementing query version to " +
- queryVersion + " and trying again.");
- continue;
- }
- throw uqve;
- } catch (UnsupportedProtocolException upe) {
- /* decrement protocol version and try again */
- if (decrementSerialVersion(serialVersionUsed) == true) {
- /* Don't set this exception: it's misleading */
- /* exception = upe; */
- logFine(logger, "Got unsupported protocol error " +
- "from server: decrementing serial version to " +
- serialVersion + " and trying again.");
- continue;
- }
- throw upe;
- } catch (NoSQLException nse) {
- kvRequest.setRateLimitDelayedMs(rateDelayedMs);
- statsControl.observeError(kvRequest);
- logFine(logger, "Client execute NoSQLException: " +
- nse.getMessage());
- throw nse; /* pass through */
- } catch (RuntimeException e) {
- kvRequest.setRateLimitDelayedMs(rateDelayedMs);
- statsControl.observeError(kvRequest);
- if (!kvRequest.getIsRefresh()) {
- /* don't log expected failures from refresh */
- logFine(logger, "Client execute runtime exception: " +
- e.getMessage());
- }
- throw e;
- } catch (IOException ioe) {
- String name = ioe.getClass().getName();
- logFine(logger, "Client execution IOException, name: " +
- name + ", message: " + ioe.getMessage());
- /*
- * An exception in the channel, e.g. the server may have
- * disconnected. Retry.
- */
- kvRequest.addRetryException(ioe.getClass());
- kvRequest.incrementRetries();
- exception = ioe;
+ /*
+ * Error handler for {@link SecurityInfoNotReadyException}
+ */
+ private CompletableFuture handleSecurityNotReadyError(
+ RequestContext ctx, Throwable ex) {
+
+ Request kvRequest = ctx.kvRequest;
+ int delayMs = SEC_ERROR_DELAY_MS;
+ if (kvRequest.getNumRetries() > 10) {
+ delayMs = DefaultRetryHandler.computeBackoffDelay(kvRequest, 0);
+ if (delayMs <= 0) {
+ return failRequest(ctx,
+ new RequestTimeoutException(ctx.timeoutMs,
+ ctx.requestClass + " timed out:" +
+ (ctx.requestId.isEmpty() ? "" :
+ " requestId=" + ctx.requestId) +
+ " nextRequestId=" + nextRequestId() +
+ (kvRequest.getRetryStats() != null ?
+ kvRequest.getRetryStats() : ""), ctx.exception));
+ }
+ }
+ return retryRequest(ctx, delayMs, ex);
+ }
- try {
- Thread.sleep(10);
- } catch (InterruptedException ie) {}
-
- continue;
- } catch (InterruptedException ie) {
- kvRequest.setRateLimitDelayedMs(rateDelayedMs);
- statsControl.observeError(kvRequest);
- logInfo(logger, "Client interrupted exception: " +
- ie.getMessage());
- /* this exception shouldn't retry -- direct throw */
- throw new NoSQLException("Request interrupted: " +
- ie.getMessage());
- } catch (ExecutionException ee) {
- /*
- * This can happen if a channel is bad in HttpClient.getChannel.
- * This happens if the channel is shut down by the server side
- * or the server (proxy) is restarted, etc. Treat it like
- * IOException above, but retry without waiting
- */
- String name = ee.getCause().getClass().getName();
- logFine(logger, "Client ExecutionException, name: " +
- name + ", message: " + ee.getMessage() + ", retrying");
-
- kvRequest.addRetryException(ee.getCause().getClass());
- kvRequest.incrementRetries();
- exception = ee.getCause();
- continue;
- } catch (TimeoutException te) {
- exception = te;
- logInfo(logger, "Timeout exception: " + te);
- break; /* fall through to exception below */
- } catch (Throwable t) {
- /*
- * this is likely an exception from Netty, perhaps a bad
- * connection. Retry.
- */
- /* Maybe make this logFine */
- String name = t.getClass().getName();
- logInfo(logger, "Client execute Throwable, name: " +
- name + "message: " + t.getMessage());
-
- kvRequest.addRetryException(t.getClass());
- kvRequest.incrementRetries();
- exception = t;
- continue;
- } finally {
- /*
- * Because the buffer.retain() is called after initialized, so
- * the reference count of buffer should be always > 0 here, just
- * call buffer.release(refCnt) to release it.
- */
- if (buffer != null) {
- buffer.release(buffer.refCnt());
- }
- if (responseHandler != null) {
- responseHandler.close();
- }
+
+ /*
+ * Error handler for {@link RetryableException}
+ */
+ private CompletableFuture handleRetryableError(RequestContext ctx,
+ Throwable ex) {
+ Request kvRequest = ctx.kvRequest;
+
+ if (ex instanceof WriteThrottlingException && ctx.writeLimiter != null) {
+ /* ensure we check write limits next retry */
+ ctx.checkWriteUnits = true;
+ /* set limiter to its limit, if not over already */
+ if (ctx.writeLimiter.getCurrentRate() < 100.0) {
+ ctx.writeLimiter.setCurrentRate(100.0);
+ }
+ }
+ if (ex instanceof ReadThrottlingException && ctx.readLimiter != null) {
+ /* ensure we check read limits next loop */
+ ctx.checkReadUnits = true;
+ /* set limiter to its limit, if not over already */
+ if (ctx.readLimiter.getCurrentRate() < 100.0) {
+ ctx.readLimiter.setCurrentRate(100.0);
}
- } while (! timeoutRequest(startNanos, timeoutMs, exception));
+ }
+ logFine(logger, "Retryable exception: " + ex.getMessage());
+ /*
+ * Handle automatic retries. If this does not throw an error,
+ * then the delay (if any) will have been performed and the
+ * request should be retried.
+ *
+ * If there have been too many retries this method will
+ * throw the original exception.
+ */
+ int delayMs = handleRetry((RetryableException) ex, kvRequest);
+ return retryRequest(ctx, delayMs, ex);
+ }
+
+ /*
+ * Error handler for {@link UnsupportedQueryVersionException}
+ */
+ private CompletableFuture handleQueryVerError(RequestContext ctx,
+ Throwable ex) {
+ if (decrementQueryVersion(ctx.queryVersionUsed)) {
+ logFine(logger, "Got unsupported query version error " +
+ "from server: decrementing query version to " +
+ queryVersion + " and trying again.");
+ return retryRequest(ctx, 0, ex);
+ }
+ return failRequest(ctx, ex);
+ }
+
+ /*
+ * Error handler for {@link UnsupportedProtocolException}
+ */
+ private CompletableFuture handleProtocolVerError(RequestContext ctx,
+ Throwable ex) {
+ if (decrementSerialVersion(ctx.serialVersionUsed)) {
+ logFine(logger, "Got unsupported protocol error " +
+ "from server: decrementing serial version to " +
+ serialVersion + " and trying again.");
+ return retryRequest(ctx, 0, ex);
+ }
+ return failRequest(ctx, ex);
+ }
- kvRequest.setRateLimitDelayedMs(rateDelayedMs);
- statsControl.observeError(kvRequest);
+ /*
+ * Error handler for {@link IOException}
+ */
+ private CompletableFuture handleIOError(RequestContext ctx,
+ Throwable ex) {
+ Request kvRequest = ctx.kvRequest;
+ String name = ex.getClass().getName();
+ logFine(logger, "Client execution IOException, name: " +
+ name + ", message: " + ex.getMessage());
+ /* Retry only 10 times. We shouldn't be retrying till timeout occurs
+ * as this can consume a lot of async resources.
+ */
+ if (kvRequest.getNumRetries() > 10) {
+ return failRequest(ctx, ex);
+ }
+ return retryRequest(ctx, 10, ex);
+ }
+
+ private CompletableFuture handleInterruptedError(RequestContext ctx,
+ Throwable ex) {
+ logInfo(logger, "Interrupted: " + ex.getMessage());
+ return failRequest(ctx,
+ new NoSQLException("Request interrupted: " + ex.getMessage()));
+ }
+
+ private CompletableFuture handleExecutionError(RequestContext ctx,
+ Throwable ex) {
/*
- * If the request timed out in a single iteration, and the
- * timeout was fairly long, and there was no delay due to
- * rate limiting, reset the session cookie so the next request
- * may use a different server.
+ * This can happen if a channel is bad in HttpClient.getChannel.
+ * This happens if the channel is shut down by the server side
+ * or the server (proxy) is restarted, etc. Treat it like
+ * IOException above, but retry without waiting
*/
- if (timeoutMs == thisIterationTimeoutMs &&
- timeoutMs >= 2000 &&
- rateDelayedMs == 0) {
- setSessionCookieValue(null);
- }
- throw new RequestTimeoutException(timeoutMs,
- requestClass + " timed out:" +
- (requestId.isEmpty() ? "" : " requestId=" + requestId) +
+ String name = ex.getCause().getClass().getName();
+ logFine(logger, "Client ExecutionException, name: " +
+ name + ", message: " + ex.getMessage() + ", retrying");
+ return retryRequest(ctx, 10, ex);
+ }
+
+ private CompletableFuture handleTimeoutError(RequestContext ctx,
+ Throwable ex) {
+ logInfo(logger, "Timeout exception: " + ex);
+ return failRequest(ctx,
+ new RequestTimeoutException(
+ ctx.timeoutMs,
+ ctx.requestClass + " timed out:" +
+ (ctx.requestId.isEmpty() ? "" : " requestId=" + ctx.requestId) +
" nextRequestId=" + nextRequestId() +
- " iterationTimeout=" + thisIterationTimeoutMs + "ms " +
- (kvRequest.getRetryStats() != null ?
- kvRequest.getRetryStats() : ""), exception);
+ (ctx.kvRequest.getRetryStats() != null ?
+ ctx.kvRequest.getRetryStats() : ""),
+ ctx.exception));
}
+ /**
+ * Helper method to create a CompletableFuture that completes after a delay.
+ * This is used for non-blocking asynchronous delays for rate limiting.
+ *
+ * @param delayMs The delay in milliseconds.
+ * @return A CompletableFuture that completes after the specified delay.
+ */
+ private CompletableFuture createDelayFuture(int delayMs) {
+ CompletableFuture delayFuture = new CompletableFuture<>();
+ if (delayMs > 0) {
+ taskExecutor.schedule(() -> delayFuture.complete(delayMs), delayMs,
+ TimeUnit.MILLISECONDS);
+ } else {
+ delayFuture.complete(delayMs); // Complete immediately if no delay
+ }
+ return delayFuture;
+ }
+
+ private CompletableFuture scheduleRetry(RequestContext ctx,
+ int delayMs) {
+ //TODO check for overall timeout before schedule
+ CompletableFuture retryFuture = new CompletableFuture<>();
+ taskExecutor.schedule(() -> {
+ /* Increment request-id for retry */
+ ctx.requestId = String.valueOf(ctx.nextIdSupplier.get());
+ executeWithRetry(ctx)
+ .whenComplete((res, e) -> {
+ if (e != null) {
+ retryFuture.completeExceptionally(e);
+ } else {
+ retryFuture.complete(res);
+ }
+ });
+ }, delayMs, TimeUnit.MILLISECONDS);
+ return retryFuture;
+ }
/**
* Calculate the timeout for the next iteration.
* This is basically the given timeout minus the time
@@ -1096,7 +1338,7 @@ private RateLimiter getQueryRateLimiter(Request request, boolean read) {
* @return the number of milliseconds delayed due to rate limiting
*/
private int consumeLimiterUnits(RateLimiter rl,
- long units, int timeoutMs) {
+ long units) {
if (rl == null || units <= 0) {
return 0;
@@ -1115,13 +1357,7 @@ private int consumeLimiterUnits(RateLimiter rl,
* better to avoid spikes in throughput and oscillation that
* can result from it.
*/
-
- try {
- return rl.consumeUnitsWithTimeout(units, timeoutMs, false);
- } catch (TimeoutException e) {
- /* Don't throw - operation succeeded. Just return timeoutMs. */
- return timeoutMs;
- }
+ return ((SimpleRateLimiter) rl).consumeExternally(units);
}
@@ -1205,26 +1441,27 @@ boolean timeoutRequest(long startNanos,
*
* @throws IOException
*/
- private short writeContent(ByteBuf content, Request kvRequest,
- short queryVersion)
+ private void writeContent(ByteBuf content, RequestContext ctx)
throws IOException {
+ final Request kvRequest = ctx.kvRequest;
final NettyByteOutputStream bos = new NettyByteOutputStream(content);
- final short versionUsed = serialVersion;
+ ctx.serialVersionUsed = serialVersion;
+ ctx.queryVersionUsed = queryVersion;
+
SerializerFactory factory = chooseFactory(kvRequest);
- factory.writeSerialVersion(versionUsed, bos);
+ factory.writeSerialVersion(ctx.serialVersionUsed, bos);
if (kvRequest instanceof QueryRequest ||
kvRequest instanceof PrepareRequest) {
kvRequest.createSerializer(factory).serialize(kvRequest,
- versionUsed,
- queryVersion,
+ ctx.serialVersionUsed,
+ ctx.queryVersionUsed,
bos);
} else {
kvRequest.createSerializer(factory).serialize(kvRequest,
- versionUsed,
+ ctx.serialVersionUsed,
bos);
}
- return versionUsed;
}
/**
@@ -1238,9 +1475,7 @@ private short writeContent(ByteBuf content, Request kvRequest,
final Result processResponse(HttpResponseStatus status,
HttpHeaders headers,
ByteBuf content,
- Request kvRequest,
- short serialVersionUsed,
- short queryVersionUsed) {
+ RequestContext ctx) {
if (!HttpResponseStatus.OK.equals(status)) {
processNotOKResponse(status, content);
@@ -1254,8 +1489,8 @@ final Result processResponse(HttpResponseStatus status,
Result res = null;
try (ByteInputStream bis = new NettyByteInputStream(content)) {
- res = processOKResponse(bis, kvRequest, serialVersionUsed,
- queryVersionUsed);
+ res = processOKResponse(bis, ctx.kvRequest, ctx.serialVersionUsed,
+ ctx.queryVersionUsed);
}
String sv = headers.get(SERVER_SERIAL_VERSION);
if (sv != null) {
@@ -1384,8 +1619,10 @@ private void setSessionCookie(HttpHeaders headers) {
}
}
- private synchronized void setSessionCookieValue(String pVal) {
- sessionCookie = pVal;
+ private void setSessionCookieValue(String pVal) {
+ ConcurrentUtil.synchronizedCall(this.lock, () -> {
+ sessionCookie = pVal;
+ });
}
/**
@@ -1435,20 +1672,22 @@ private void setTableNeedsRefresh(String tableName, boolean needsRefresh) {
* Query table limits and create rate limiters for a table in a
* short-lived background thread.
*/
- private synchronized void backgroundUpdateLimiters(String tableName,
- String compartmentId) {
- if (tableNeedsRefresh(tableName) == false) {
- return;
- }
- setTableNeedsRefresh(tableName, false);
+ private void backgroundUpdateLimiters(String tableName,
+ String compartmentId) {
+ ConcurrentUtil.synchronizedCall(this.lock, () -> {
+ if (tableNeedsRefresh(tableName) == false) {
+ return;
+ }
+ setTableNeedsRefresh(tableName, false);
- try {
- threadPool.execute(() -> {
- updateTableLimiters(tableName, compartmentId);
- });
- } catch (RejectedExecutionException e) {
- setTableNeedsRefresh(tableName, true);
- }
+ try {
+ threadPool.execute(() -> {
+ updateTableLimiters(tableName, compartmentId);
+ });
+ } catch (RejectedExecutionException e) {
+ setTableNeedsRefresh(tableName, true);
+ }
+ });
}
/*
@@ -1464,7 +1703,7 @@ private void updateTableLimiters(String tableName, String compartmentId) {
try {
logFine(logger, "Starting GetTableRequest for table '" +
tableName + "'");
- res = (TableResult) this.execute(gtr);
+ res = (TableResult) ConcurrentUtil.awaitFuture(this.execute(gtr));
} catch (Exception e) {
logFine(logger, "GetTableRequest for table '" +
tableName + "' returned exception: " + e.getMessage());
@@ -1508,7 +1747,7 @@ private boolean retriedInvalidAuthorizationException(Request request) {
return rs.getNumExceptions(InvalidAuthorizationException.class) > 0;
}
- private void handleRetry(RetryableException re,
+ private int handleRetry(RetryableException re,
Request kvRequest) {
int numRetries = kvRequest.getNumRetries();
String msg = "Retry for request " +
@@ -1520,7 +1759,7 @@ private void handleRetry(RetryableException re,
logFine(logger, "Too many retries");
throw re;
}
- handler.delay(kvRequest, numRetries, re);
+ return handler.delayTime(kvRequest, numRetries, re);
}
private void logRetries(int numRetries, Throwable exception) {
@@ -1638,19 +1877,21 @@ StatsControl getStatsControl() {
* @return true: version was decremented
* false: already at lowest version number.
*/
- private synchronized boolean decrementSerialVersion(short versionUsed) {
- if (serialVersion != versionUsed) {
- return true;
- }
- if (serialVersion == V4) {
- serialVersion = V3;
- return true;
- }
- if (serialVersion == V3) {
- serialVersion = V2;
- return true;
- }
- return false;
+ private boolean decrementSerialVersion(short versionUsed) {
+ return ConcurrentUtil.synchronizedCall(this.lock, () -> {
+ if (serialVersion != versionUsed) {
+ return true;
+ }
+ if (serialVersion == V4) {
+ serialVersion = V3;
+ return true;
+ }
+ if (serialVersion == V3) {
+ serialVersion = V2;
+ return true;
+ }
+ return false;
+ });
}
/**
@@ -1660,18 +1901,19 @@ private synchronized boolean decrementSerialVersion(short versionUsed) {
* @return true: version was decremented
* false: already at lowest version number.
*/
- private synchronized boolean decrementQueryVersion(short versionUsed) {
-
- if (queryVersion != versionUsed) {
- return true;
- }
+ private boolean decrementQueryVersion(short versionUsed) {
+ return ConcurrentUtil.synchronizedCall(this.lock, () -> {
+ if (queryVersion != versionUsed) {
+ return true;
+ }
- if (queryVersion == QueryDriver.QUERY_V3) {
- return false;
- }
+ if (queryVersion == QueryDriver.QUERY_V3) {
+ return false;
+ }
- --queryVersion;
- return true;
+ --queryVersion;
+ return true;
+ });
}
/**
@@ -1805,38 +2047,42 @@ private boolean stringsEqualOrNull(String s1, String s2) {
* Add get, put, delete to cover all auth types
* This is synchronized to avoid 2 requests adding the same table
*/
- private synchronized void addRequestToRefreshList(Request request) {
- logFine(logger, "Adding table to request list: " +
- request.getCompartment() + ":" + request.getTableName());
- PutRequest pr =
- new PutRequest().setTableName(request.getTableName());
- pr.setCompartmentInternal(request.getCompartment());
- pr.setValue(badValue);
- pr.setIsRefresh(true);
- authRefreshRequests.add(pr);
- GetRequest gr =
- new GetRequest().setTableName(request.getTableName());
- gr.setCompartmentInternal(request.getCompartment());
- gr.setKey(badValue);
- gr.setIsRefresh(true);
- authRefreshRequests.add(gr);
- DeleteRequest dr =
- new DeleteRequest().setTableName(request.getTableName());
- dr.setCompartmentInternal(request.getCompartment());
- dr.setKey(badValue);
- dr.setIsRefresh(true);
- authRefreshRequests.add(dr);
+ private void addRequestToRefreshList(Request request) {
+ ConcurrentUtil.synchronizedCall(this.lock, () -> {
+ logFine(logger, "Adding table to request list: " +
+ request.getCompartment() + ":" + request.getTableName());
+ PutRequest pr =
+ new PutRequest().setTableName(request.getTableName());
+ pr.setCompartmentInternal(request.getCompartment());
+ pr.setValue(badValue);
+ pr.setIsRefresh(true);
+ authRefreshRequests.add(pr);
+ GetRequest gr =
+ new GetRequest().setTableName(request.getTableName());
+ gr.setCompartmentInternal(request.getCompartment());
+ gr.setKey(badValue);
+ gr.setIsRefresh(true);
+ authRefreshRequests.add(gr);
+ DeleteRequest dr =
+ new DeleteRequest().setTableName(request.getTableName());
+ dr.setCompartmentInternal(request.getCompartment());
+ dr.setKey(badValue);
+ dr.setIsRefresh(true);
+ authRefreshRequests.add(dr);
+ });
}
/**
* @hidden
* for internal use
*/
- public synchronized void oneTimeMessage(String msg) {
- if (oneTimeMessages.add(msg) == false) {
- return;
- }
- logWarning(logger, msg);
+ public void oneTimeMessage(String msg) {
+ ConcurrentUtil.synchronizedCall(this.lock, () -> {
+ if (oneTimeMessages.add(msg) == false) {
+ return;
+ }
+ logWarning(logger, msg);
+ });
}
private SerializerFactory chooseFactory(Request rq) {
@@ -1940,20 +2186,22 @@ public TopologyInfo getTopology() {
return topology;
}
- private synchronized int getTopoSeqNum() {
- return (topology == null ? -1 : topology.getSeqNum());
+ private int getTopoSeqNum() {
+ return ConcurrentUtil.synchronizedCall(this.lock, () ->
+ (topology == null ? -1 : topology.getSeqNum()));
}
- private synchronized void setTopology(TopologyInfo topo) {
-
- if (topo == null) {
- return;
- }
+ private void setTopology(TopologyInfo topo) {
+ ConcurrentUtil.synchronizedCall(this.lock, () -> {
+ if (topo == null) {
+ return;
+ }
- if (topology == null || topology.getSeqNum() < topo.getSeqNum()) {
- topology = topo;
- trace("New topology: " + topo, 1);
- }
+ if (topology == null || topology.getSeqNum() < topo.getSeqNum()) {
+ topology = topo;
+ trace("New topology: " + topo, 1);
+ }
+ });
}
/*
diff --git a/driver/src/main/java/oracle/nosql/driver/http/NoSQLHandleAsyncImpl.java b/driver/src/main/java/oracle/nosql/driver/http/NoSQLHandleAsyncImpl.java
new file mode 100644
index 00000000..d792027f
--- /dev/null
+++ b/driver/src/main/java/oracle/nosql/driver/http/NoSQLHandleAsyncImpl.java
@@ -0,0 +1,520 @@
+/*-
+ * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved.
+ *
+ * Licensed under the Universal Permissive License v 1.0 as shown at
+ * https://oss.oracle.com/licenses/upl/
+ */
+
+package oracle.nosql.driver.http;
+
+import io.netty.handler.ssl.SslContextBuilder;
+import io.netty.util.internal.logging.InternalLoggerFactory;
+import io.netty.util.internal.logging.JdkLoggerFactory;
+import oracle.nosql.driver.AuthorizationProvider;
+import oracle.nosql.driver.NoSQLHandleAsync;
+import oracle.nosql.driver.NoSQLHandleConfig;
+import oracle.nosql.driver.StatsControl;
+import oracle.nosql.driver.UserInfo;
+import oracle.nosql.driver.iam.SignatureProvider;
+import oracle.nosql.driver.kv.StoreAccessTokenProvider;
+import oracle.nosql.driver.ops.AddReplicaRequest;
+import oracle.nosql.driver.ops.DeleteRequest;
+import oracle.nosql.driver.ops.DeleteResult;
+import oracle.nosql.driver.ops.DropReplicaRequest;
+import oracle.nosql.driver.ops.GetIndexesRequest;
+import oracle.nosql.driver.ops.GetIndexesResult;
+import oracle.nosql.driver.ops.GetRequest;
+import oracle.nosql.driver.ops.GetResult;
+import oracle.nosql.driver.ops.GetTableRequest;
+import oracle.nosql.driver.ops.ListTablesRequest;
+import oracle.nosql.driver.ops.ListTablesResult;
+import oracle.nosql.driver.ops.MultiDeleteRequest;
+import oracle.nosql.driver.ops.MultiDeleteResult;
+import oracle.nosql.driver.ops.PrepareRequest;
+import oracle.nosql.driver.ops.PrepareResult;
+import oracle.nosql.driver.ops.PutRequest;
+import oracle.nosql.driver.ops.PutResult;
+import oracle.nosql.driver.ops.QueryPublisher;
+import oracle.nosql.driver.ops.QueryRequest;
+import oracle.nosql.driver.ops.QueryResult;
+import oracle.nosql.driver.ops.ReplicaStatsRequest;
+import oracle.nosql.driver.ops.ReplicaStatsResult;
+import oracle.nosql.driver.ops.Request;
+import oracle.nosql.driver.ops.Result;
+import oracle.nosql.driver.ops.SystemRequest;
+import oracle.nosql.driver.ops.SystemResult;
+import oracle.nosql.driver.ops.SystemStatusRequest;
+import oracle.nosql.driver.ops.TableRequest;
+import oracle.nosql.driver.ops.TableResult;
+import oracle.nosql.driver.ops.TableUsageRequest;
+import oracle.nosql.driver.ops.TableUsageResult;
+import oracle.nosql.driver.ops.WriteMultipleRequest;
+import oracle.nosql.driver.ops.WriteMultipleResult;
+import oracle.nosql.driver.util.ConcurrentUtil;
+import oracle.nosql.driver.util.LogUtil;
+import oracle.nosql.driver.values.FieldValue;
+import oracle.nosql.driver.values.JsonUtils;
+import oracle.nosql.driver.values.MapValue;
+
+import javax.net.ssl.SSLException;
+import java.util.ArrayList;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Flow;
+import java.util.concurrent.RejectedExecutionException;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.ScheduledThreadPoolExecutor;
+import java.util.concurrent.ThreadFactory;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.logging.Logger;
+
+public class NoSQLHandleAsyncImpl implements NoSQLHandleAsync {
+ private static final int cores = Runtime.getRuntime().availableProcessors();
+
+ /*
+ * The HTTP client. This is not final so that it can be nulled upon
+ * close.
+ */
+ private final Client client;
+ private final AtomicBoolean isClosed = new AtomicBoolean(false);
+
+ /* thread-pool for scheduling tasks */
+ private final ScheduledExecutorService taskExecutor;
+
+ public NoSQLHandleAsyncImpl(NoSQLHandleConfig config) {
+ configNettyLogging();
+ final Logger logger = getLogger(config);
+ /*
+ * config SslContext first, on-prem authorization provider
+ * will reuse the context in NoSQLHandleConfig
+ */
+ configSslContext(config);
+ taskExecutor = new ScheduledThreadPoolExecutor(cores /* core threads */,
+ new ThreadFactory() {
+ private final AtomicInteger threadNumber = new AtomicInteger(1);
+ @Override
+ public Thread newThread(Runnable r) {
+ final Thread t = Executors.defaultThreadFactory()
+ .newThread(r);
+ t.setName(String.format("nosql-task-executor-%s",
+ threadNumber.getAndIncrement()));
+ t.setDaemon(true);
+ t.setUncaughtExceptionHandler((thread, error) -> {
+ if (ConcurrentUtil.unwrapCompletionException(error)
+ instanceof RejectedExecutionException) {
+ /*
+ * Ignore uncaught error for rejected exception
+ * since that is expected to happen during
+ * executor shut down.
+ */
+ return;
+ }
+ logger.warning(() -> String.format(
+ "Uncaught exception from %s: %s",
+ error, LogUtil.getStackTrace(error)));
+ });
+ return t;
+ }
+ });
+ client = new Client(logger, config, taskExecutor);
+ try {
+ /* configAuthProvider may use client */
+ configAuthProvider(logger, config);
+ } catch (RuntimeException re) {
+ /* cleanup client */
+ client.shutdown();
+ taskExecutor.shutdown();
+ throw re;
+ }
+ }
+
+ /**
+ * Returns the logger used for the driver. If no logger is specified
+ * create one based on this class name.
+ */
+ private Logger getLogger(NoSQLHandleConfig config) {
+ if (config.getLogger() != null) {
+ return config.getLogger();
+ }
+
+ /*
+ * The default logger logs at INFO. If this is too verbose users
+ * must create a logger and pass it in.
+ */
+ Logger logger = Logger.getLogger(getClass().getName());
+ return logger;
+ }
+
+ /**
+ * Configures the logging of Netty library.
+ */
+ private void configNettyLogging() {
+ /*
+ * Configure default Netty logging using Jdk Logger.
+ */
+ InternalLoggerFactory.setDefaultFactory(JdkLoggerFactory.INSTANCE);
+ }
+
+ private void configSslContext(NoSQLHandleConfig config) {
+ if (config.getSslContext() != null) {
+ return;
+ }
+ if (config.getServiceURL().getProtocol().equalsIgnoreCase("HTTPS")) {
+ try {
+ SslContextBuilder builder = SslContextBuilder.forClient();
+ if (config.getSSLCipherSuites() != null) {
+ builder.ciphers(config.getSSLCipherSuites());
+ }
+ if (config.getSSLProtocols() != null) {
+ builder.protocols(config.getSSLProtocols());
+ }
+ builder.sessionTimeout(config.getSSLSessionTimeout());
+ builder.sessionCacheSize(config.getSSLSessionCacheSize());
+ config.setSslContext(builder.build());
+ } catch (SSLException se) {
+ throw new IllegalStateException(
+ "Unable to start handle with SSL", se);
+ }
+ }
+ }
+
+ private void configAuthProvider(Logger logger, NoSQLHandleConfig config) {
+ final AuthorizationProvider ap = config.getAuthorizationProvider();
+ if (ap instanceof StoreAccessTokenProvider) {
+ final StoreAccessTokenProvider stProvider =
+ (StoreAccessTokenProvider) ap;
+ if (stProvider.getLogger() == null) {
+ stProvider.setLogger(logger);
+ }
+ if (stProvider.isSecure() &&
+ stProvider.getEndpoint() == null) {
+ String endpoint = config.getServiceURL().toString();
+ if (endpoint.endsWith("/")) {
+ endpoint = endpoint.substring(0, endpoint.length() - 1);
+ }
+ stProvider.setEndpoint(endpoint)
+ .setSslContext(config.getSslContext())
+ .setSslHandshakeTimeout(
+ config.getSSLHandshakeTimeout());
+ }
+
+ } else if (ap instanceof SignatureProvider) {
+ SignatureProvider sigProvider = (SignatureProvider) ap;
+ if (sigProvider.getLogger() == null) {
+ sigProvider.setLogger(logger);
+ }
+ sigProvider.prepare(config);
+ if (config.getAuthRefresh()) {
+ sigProvider.setOnSignatureRefresh(new SigRefresh());
+ client.createAuthRefreshList();
+ }
+ }
+ }
+
+ @Override
+ public CompletableFuture delete(DeleteRequest request) {
+ return executeASync(request);
+ }
+
+ @Override
+ public CompletableFuture get(GetRequest request) {
+ return executeASync(request);
+ }
+
+ @Override
+ public CompletableFuture put(PutRequest request) {
+ return executeASync(request);
+ }
+
+ @Override
+ public CompletableFuture writeMultiple(
+ WriteMultipleRequest request) {
+ return executeASync(request);
+ }
+
+ @Override
+ public CompletableFuture multiDelete(
+ MultiDeleteRequest request) {
+ return executeASync(request);
+ }
+
+ @Override
+ public CompletableFuture query(QueryRequest request) {
+ return queryAsync(request);
+ }
+
+ CompletableFuture queryAsync(QueryRequest request) {
+ return executeASync(request)
+ .thenCompose(result -> {
+ /* Complex queries need RCB, run asynchronously */
+ if (!request.isSimpleQuery()) {
+ // TODO supplyAsync runs in fork-join pool.
+ // Change to dedicated pool
+ return CompletableFuture.supplyAsync(() -> result);
+ }
+ return CompletableFuture.completedFuture(result);
+ })
+ .thenApply(result -> ((QueryResult) result));
+ }
+
+ @Override
+ public Flow.Publisher queryPaginator(QueryRequest request) {
+ return new QueryPublisher(this, request);
+ }
+
+ @Override
+ public CompletableFuture prepare(PrepareRequest request) {
+ return executeASync(request);
+ }
+
+ @Override
+ public CompletableFuture tableRequest(TableRequest request) {
+ return executeASync(request).thenApply(tres -> {
+ TableResult res = (TableResult) tres;
+ /* update rate limiters, if table has limits */
+ client.updateRateLimiters(res.getTableName(), res.getTableLimits());
+ return res;
+ });
+ }
+
+ @Override
+ public CompletableFuture getTable(GetTableRequest request) {
+ return executeASync(request).thenApply(tres -> {
+ TableResult res = (TableResult) tres;
+ /* update rate limiters, if table has limits */
+ client.updateRateLimiters(res.getTableName(), res.getTableLimits());
+ return res;
+ });
+ }
+
+ @Override
+ public CompletableFuture systemRequest(
+ SystemRequest request) {
+ return executeASync(request);
+ }
+
+ @Override
+ public CompletableFuture systemStatus(
+ SystemStatusRequest request) {
+ return executeASync(request);
+ }
+
+ @Override
+ public CompletableFuture getTableUsage(
+ TableUsageRequest request) {
+ return executeASync(request);
+ }
+
+ @Override
+ public CompletableFuture listTables(
+ ListTablesRequest request) {
+ return executeASync(request);
+ }
+
+ @Override
+ public CompletableFuture getIndexes(
+ GetIndexesRequest request) {
+ return executeASync(request);
+ }
+
+ @Override
+ public CompletableFuture addReplica(
+ AddReplicaRequest request) {
+ return executeASync(request);
+ }
+
+ @Override
+ public CompletableFuture dropReplica(
+ DropReplicaRequest request) {
+ return executeASync(request);
+ }
+
+ @Override
+ public CompletableFuture getReplicaStats(
+ ReplicaStatsRequest request) {
+ return executeASync(request);
+ }
+
+ @Override
+ public void close() {
+ if (isClosed.compareAndSet(false, true)) {
+ client.shutdown();
+ taskExecutor.shutdown();
+ }
+ }
+
+ @Override
+ public CompletableFuture listNamespaces() {
+ return doSystemRequest("show as json namespaces")
+ .thenApply((SystemResult dres )-> {
+ String jsonResult = dres.getResultString();
+ if (jsonResult == null) {
+ return null;
+ }
+ MapValue root = JsonUtils.createValueFromJson(jsonResult, null)
+ .asMap();
+
+ FieldValue namespaces = root.get("namespaces");
+ if (namespaces == null) {
+ return null;
+ }
+
+ ArrayList results = new ArrayList(
+ namespaces.asArray().size());
+ for (FieldValue val : namespaces.asArray()) {
+ results.add(val.getString());
+ }
+ return results.toArray(new String[0]);
+ });
+ }
+
+ @Override
+ public CompletableFuture listUsers() {
+ return doSystemRequest("show as json users")
+ .thenApply((SystemResult dres) -> {
+ String jsonResult = dres.getResultString();
+ if (jsonResult == null) {
+ return null;
+ }
+
+ MapValue root = JsonUtils.createValueFromJson(
+ jsonResult, null).asMap();
+
+ FieldValue users = root.get("users");
+ if (users == null) {
+ return null;
+ }
+
+ ArrayList results = new ArrayList(
+ users.asArray().size());
+
+ for (FieldValue val : users.asArray()) {
+ String id = val.asMap().getString("id");
+ String name = val.asMap().getString("name");
+ results.add(new UserInfo(id, name));
+ }
+ return results.toArray(new UserInfo[0]);
+ });
+ }
+
+ @Override
+ public CompletableFuture listRoles() {
+ return doSystemRequest("show as json roles")
+ .thenApply((SystemResult dres) -> {
+ String jsonResult = dres.getResultString();
+ if (jsonResult == null) {
+ return null;
+ }
+ MapValue root = JsonUtils.createValueFromJson(
+ jsonResult, null).asMap();
+
+ FieldValue roles = root.get("roles");
+ if (roles == null) {
+ return null;
+ }
+
+ ArrayList results = new ArrayList(
+ roles.asArray().size());
+ for (FieldValue val : roles.asArray()) {
+ String role = val.asMap().getString("name");
+ results.add(role);
+ }
+ return results.toArray(new String[0]);
+ });
+ }
+
+ /**
+ * Internal method used by list* methods that defaults timeouts.
+ */
+ private CompletableFuture doSystemRequest(String statement) {
+ return doSystemRequest(statement, 30000, 1000);
+ }
+
+ @Override
+ public CompletableFuture doTableRequest(TableRequest request,
+ int timeoutMs,
+ int pollIntervalMs) {
+
+ return tableRequest(request).thenCompose((TableResult res) ->
+ res.waitForCompletionAsync(this, timeoutMs, pollIntervalMs)
+ .thenApply(v -> res));
+ }
+
+ @Override
+ public CompletableFuture doSystemRequest(String statement,
+ int timeoutMs,
+ int pollIntervalMs) {
+ checkClient();
+ SystemRequest dreq =
+ new SystemRequest().setStatement(statement.toCharArray());
+ return systemRequest(dreq).thenCompose((SystemResult dres) ->
+ dres.waitForCompletionAsync(this, timeoutMs, pollIntervalMs)
+ .thenApply(v -> dres));
+ }
+
+ @Override
+ public StatsControl getStatsControl() {
+ return client.getStatsControl();
+ }
+
+ void checkClient() {
+ if (isClosed.get()) {
+ throw new IllegalStateException("NoSQLHandle has been closed");
+ }
+ }
+
+ /**
+ * @hidden
+ * For testing use
+ */
+ public Client getClient() {
+ return client;
+ }
+
+ /**
+ * @hidden
+ * For testing use
+ */
+ public short getSerialVersion() {
+ return client.getSerialVersion();
+ }
+
+ /**
+ * @hidden
+ *
+ * Testing use only.
+ */
+ public void setDefaultNamespace(String ns) {
+ client.setDefaultNamespace(ns);
+ }
+
+ @SuppressWarnings("unchecked")
+ CompletableFuture executeASync(Request request) {
+ checkClient();
+ return client.execute(request).thenApply(result -> (T) result);
+ }
+
+ public ScheduledExecutorService getTaskExecutor() {
+ return taskExecutor;
+ }
+
+ /**
+ * Cloud service only.
+ * The refresh method of this class is called when a Signature is refreshed
+ * in SignatureProvider. This happens every 4 minutes or so. This mechanism
+ * allows the authentication and authorization information cached by the
+ * server to be refreshed out of band with the normal request path.
+ */
+ private class SigRefresh implements SignatureProvider.OnSignatureRefresh {
+
+ /*
+ * Attempt to refresh the server's authentication and authorization
+ * information for a new signature.
+ */
+ @Override
+ public void refresh(long refreshMs) {
+ client.doRefresh(refreshMs);
+ }
+ }
+}
diff --git a/driver/src/main/java/oracle/nosql/driver/http/NoSQLHandleImpl.java b/driver/src/main/java/oracle/nosql/driver/http/NoSQLHandleImpl.java
index b29b039f..edc4a706 100644
--- a/driver/src/main/java/oracle/nosql/driver/http/NoSQLHandleImpl.java
+++ b/driver/src/main/java/oracle/nosql/driver/http/NoSQLHandleImpl.java
@@ -7,18 +7,10 @@
package oracle.nosql.driver.http;
-import java.util.ArrayList;
-import java.util.logging.Logger;
-
-import javax.net.ssl.SSLException;
-
-import oracle.nosql.driver.AuthorizationProvider;
import oracle.nosql.driver.NoSQLHandle;
import oracle.nosql.driver.NoSQLHandleConfig;
import oracle.nosql.driver.StatsControl;
import oracle.nosql.driver.UserInfo;
-import oracle.nosql.driver.iam.SignatureProvider;
-import oracle.nosql.driver.kv.StoreAccessTokenProvider;
import oracle.nosql.driver.ops.AddReplicaRequest;
import oracle.nosql.driver.ops.DeleteRequest;
import oracle.nosql.driver.ops.DeleteResult;
@@ -41,6 +33,8 @@
import oracle.nosql.driver.ops.QueryResult;
import oracle.nosql.driver.ops.ReplicaStatsRequest;
import oracle.nosql.driver.ops.ReplicaStatsResult;
+import oracle.nosql.driver.ops.Request;
+import oracle.nosql.driver.ops.Result;
import oracle.nosql.driver.ops.SystemRequest;
import oracle.nosql.driver.ops.SystemResult;
import oracle.nosql.driver.ops.SystemStatusRequest;
@@ -50,13 +44,7 @@
import oracle.nosql.driver.ops.TableUsageResult;
import oracle.nosql.driver.ops.WriteMultipleRequest;
import oracle.nosql.driver.ops.WriteMultipleResult;
-import oracle.nosql.driver.values.FieldValue;
-import oracle.nosql.driver.values.JsonUtils;
-import oracle.nosql.driver.values.MapValue;
-
-import io.netty.handler.ssl.SslContextBuilder;
-import io.netty.util.internal.logging.InternalLoggerFactory;
-import io.netty.util.internal.logging.JdkLoggerFactory;
+import oracle.nosql.driver.util.ConcurrentUtil;
/**
* The methods in this class require non-null arguments. Because they all
@@ -64,235 +52,110 @@
* single place.
*/
public class NoSQLHandleImpl implements NoSQLHandle {
-
/*
* The HTTP client. This is not final so that it can be nulled upon
* close.
*/
- private Client client;
+ private final NoSQLHandleAsyncImpl asyncHandle;
public NoSQLHandleImpl(NoSQLHandleConfig config) {
-
- configNettyLogging();
- final Logger logger = getLogger(config);
-
- /*
- * config SslContext first, on-prem authorization provider
- * will reuse the context in NoSQLHandleConfig
- */
- configSslContext(config);
- client = new Client(logger, config);
- try {
- /* configAuthProvider may use client */
- configAuthProvider(logger, config);
- } catch (RuntimeException re) {
- /* cleanup client */
- client.shutdown();
- throw re;
- }
- }
-
- /**
- * Returns the logger used for the driver. If no logger is specified
- * create one based on this class name.
- */
- private Logger getLogger(NoSQLHandleConfig config) {
- if (config.getLogger() != null) {
- return config.getLogger();
- }
-
- /*
- * The default logger logs at INFO. If this is too verbose users
- * must create a logger and pass it in.
- */
- Logger logger = Logger.getLogger(getClass().getName());
- return logger;
- }
-
- /**
- * Configures the logging of Netty library.
- */
- private void configNettyLogging() {
- /*
- * Configure default Netty logging using Jdk Logger.
- */
- InternalLoggerFactory.setDefaultFactory(JdkLoggerFactory.INSTANCE);
- }
-
- private void configSslContext(NoSQLHandleConfig config) {
- if (config.getSslContext() != null) {
- return;
- }
- if (config.getServiceURL().getProtocol().equalsIgnoreCase("HTTPS")) {
- try {
- SslContextBuilder builder = SslContextBuilder.forClient();
- if (config.getSSLCipherSuites() != null) {
- builder.ciphers(config.getSSLCipherSuites());
- }
- if (config.getSSLProtocols() != null) {
- builder.protocols(config.getSSLProtocols());
- }
- builder.sessionTimeout(config.getSSLSessionTimeout());
- builder.sessionCacheSize(config.getSSLSessionCacheSize());
- config.setSslContext(builder.build());
- } catch (SSLException se) {
- throw new IllegalStateException(
- "Unable to start handle with SSL", se);
- }
- }
- }
-
- private void configAuthProvider(Logger logger, NoSQLHandleConfig config) {
- final AuthorizationProvider ap = config.getAuthorizationProvider();
- if (ap instanceof StoreAccessTokenProvider) {
- final StoreAccessTokenProvider stProvider =
- (StoreAccessTokenProvider) ap;
- if (stProvider.getLogger() == null) {
- stProvider.setLogger(logger);
- }
- if (stProvider.isSecure() &&
- stProvider.getEndpoint() == null) {
- String endpoint = config.getServiceURL().toString();
- if (endpoint.endsWith("/")) {
- endpoint = endpoint.substring(0, endpoint.length() - 1);
- }
- stProvider.setEndpoint(endpoint)
- .setSslContext(config.getSslContext())
- .setSslHandshakeTimeout(
- config.getSSLHandshakeTimeout());
- }
- } else if (ap instanceof SignatureProvider) {
- SignatureProvider sigProvider = (SignatureProvider) ap;
- if (sigProvider.getLogger() == null) {
- sigProvider.setLogger(logger);
- }
- sigProvider.prepare(config);
- if (config.getAuthRefresh()) {
- sigProvider.setOnSignatureRefresh(new SigRefresh());
- client.createAuthRefreshList();
- }
- }
+ asyncHandle = new NoSQLHandleAsyncImpl(config);
}
@Override
public DeleteResult delete(DeleteRequest request) {
- checkClient();
- return (DeleteResult) client.execute(request);
+ return executeSync(request);
}
@Override
public GetResult get(GetRequest request) {
- checkClient();
- return (GetResult) client.execute(request);
+ return executeSync(request);
}
@Override
public PutResult put(PutRequest request) {
- checkClient();
- return (PutResult) client.execute(request);
+ return executeSync(request);
}
@Override
public WriteMultipleResult writeMultiple(WriteMultipleRequest request) {
- checkClient();
- return (WriteMultipleResult) client.execute(request);
+ return executeSync(request);
}
@Override
public MultiDeleteResult multiDelete(MultiDeleteRequest request) {
- checkClient();
- return (MultiDeleteResult) client.execute(request);
+ return executeSync(request);
}
@Override
public QueryResult query(QueryRequest request) {
- checkClient();
- return (QueryResult) client.execute(request);
+ return ConcurrentUtil.awaitFuture(asyncHandle.queryAsync(request));
}
@Override
public QueryIterableResult queryIterable(QueryRequest request) {
- checkClient();
+ asyncHandle.checkClient();
return new QueryIterableResult(request, this);
}
@Override
public PrepareResult prepare(PrepareRequest request) {
- checkClient();
- return (PrepareResult) client.execute(request);
+ return executeSync(request);
}
@Override
public TableResult tableRequest(TableRequest request) {
- checkClient();
- TableResult res = (TableResult) client.execute(request);
- /* update rate limiters, if table has limits */
- client.updateRateLimiters(res.getTableName(), res.getTableLimits());
- return res;
+ return executeSync(request);
}
@Override
public TableResult getTable(GetTableRequest request) {
- checkClient();
- TableResult res = (TableResult) client.execute(request);
- /* update rate limiters, if table has limits */
- client.updateRateLimiters(res.getTableName(), res.getTableLimits());
- return res;
+ return executeSync(request);
}
@Override
public SystemResult systemRequest(SystemRequest request) {
- checkClient();
- return (SystemResult) client.execute(request);
+ return executeSync(request);
}
@Override
public SystemResult systemStatus(SystemStatusRequest request) {
- checkClient();
- return (SystemResult) client.execute(request);
+ return executeSync(request);
}
@Override
public TableUsageResult getTableUsage(TableUsageRequest request) {
- checkClient();
- return (TableUsageResult) client.execute(request);
+ return executeSync(request);
}
@Override
public ListTablesResult listTables(ListTablesRequest request) {
- checkClient();
- return (ListTablesResult) client.execute(request);
+ return executeSync(request);
}
@Override
public GetIndexesResult getIndexes(GetIndexesRequest request) {
- checkClient();
- return (GetIndexesResult) client.execute(request);
+ return executeSync(request);
}
@Override
public TableResult addReplica(AddReplicaRequest request) {
- checkClient();
- return (TableResult) client.execute(request);
+ return executeSync(request);
}
@Override
public TableResult dropReplica(DropReplicaRequest request) {
- checkClient();
- return (TableResult) client.execute(request);
+ return executeSync(request);
}
@Override
public ReplicaStatsResult getReplicaStats(ReplicaStatsRequest request) {
- checkClient();
- return (ReplicaStatsResult) client.execute(request);
+ return executeSync(request);
}
@Override
- synchronized public void close() {
- checkClient();
- client.shutdown();
- client = null;
+ public void close() {
+ asyncHandle.close();
}
/**
@@ -302,25 +165,7 @@ synchronized public void close() {
*/
@Override
public String[] listNamespaces() {
- SystemResult dres = doSystemRequest("show as json namespaces");
-
- String jsonResult = dres.getResultString();
- if (jsonResult == null) {
- return null;
- }
- MapValue root = JsonUtils.createValueFromJson(jsonResult, null).asMap();
-
- FieldValue namespaces = root.get("namespaces");
- if (namespaces == null) {
- return null;
- }
-
- ArrayList results = new ArrayList(
- namespaces.asArray().size());
- for (FieldValue val : namespaces.asArray()) {
- results.add(val.getString());
- }
- return results.toArray(new String[0]);
+ return ConcurrentUtil.awaitFuture(asyncHandle.listNamespaces());
}
/**
@@ -330,29 +175,7 @@ public String[] listNamespaces() {
*/
@Override
public UserInfo[] listUsers() {
- SystemResult dres = doSystemRequest("show as json users");
-
- String jsonResult = dres.getResultString();
- if (jsonResult == null) {
- return null;
- }
-
- MapValue root = JsonUtils.createValueFromJson(jsonResult, null).asMap();
-
- FieldValue users = root.get("users");
- if (users == null) {
- return null;
- }
-
- ArrayList results = new ArrayList(
- users.asArray().size());
-
- for (FieldValue val : users.asArray()) {
- String id = val.asMap().getString("id");
- String name = val.asMap().getString("name");
- results.add(new UserInfo(id, name));
- }
- return results.toArray(new UserInfo[0]);
+ return ConcurrentUtil.awaitFuture(asyncHandle.listUsers());
}
/**
@@ -362,26 +185,7 @@ public UserInfo[] listUsers() {
*/
@Override
public String[] listRoles() {
- SystemResult dres = doSystemRequest("show as json roles");
-
- String jsonResult = dres.getResultString();
- if (jsonResult == null) {
- return null;
- }
- MapValue root = JsonUtils.createValueFromJson(jsonResult, null).asMap();
-
- FieldValue roles = root.get("roles");
- if (roles == null) {
- return null;
- }
-
- ArrayList results = new ArrayList(
- roles.asArray().size());
- for (FieldValue val : roles.asArray()) {
- String role = val.asMap().getString("name");
- results.add(role);
- }
- return results.toArray(new String[0]);
+ return ConcurrentUtil.awaitFuture(asyncHandle.listRoles());
}
@@ -415,16 +219,14 @@ public SystemResult doSystemRequest(String statement,
@Override
public StatsControl getStatsControl() {
- return client.getStatsControl();
+ return asyncHandle.getStatsControl();
}
/**
* Ensure that the client exists and hasn't been closed;
*/
private void checkClient() {
- if (client == null) {
- throw new IllegalStateException("NoSQLHandle has been closed");
- }
+ asyncHandle.checkClient();
}
/**
@@ -432,7 +234,7 @@ private void checkClient() {
* For testing use
*/
public Client getClient() {
- return client;
+ return asyncHandle.getClient();
}
/**
@@ -440,7 +242,7 @@ public Client getClient() {
* For testing use
*/
public short getSerialVersion() {
- return client.getSerialVersion();
+ return asyncHandle.getSerialVersion();
}
/**
@@ -449,25 +251,11 @@ public short getSerialVersion() {
* Testing use only.
*/
public void setDefaultNamespace(String ns) {
- client.setDefaultNamespace(ns);
+ asyncHandle.setDefaultNamespace(ns);
}
- /**
- * Cloud service only.
- * The refresh method of this class is called when a Signature is refreshed
- * in SignatureProvider. This happens every 4 minutes or so. This mechanism
- * allows the authentication and authorization information cached by the
- * server to be refreshed out of band with the normal request path.
- */
- private class SigRefresh implements SignatureProvider.OnSignatureRefresh {
-
- /*
- * Attempt to refresh the server's authentication and authorization
- * information for a new signature.
- */
- @Override
- public void refresh(long refreshMs) {
- client.doRefresh(refreshMs);
- }
+ @SuppressWarnings("unchecked")
+ private T executeSync(Request request) {
+ return (T) ConcurrentUtil.awaitFuture(asyncHandle.executeASync(request));
}
}
diff --git a/driver/src/main/java/oracle/nosql/driver/httpclient/ConnectionPool.java b/driver/src/main/java/oracle/nosql/driver/httpclient/ConnectionPool.java
index e4f7f800..c480cd17 100644
--- a/driver/src/main/java/oracle/nosql/driver/httpclient/ConnectionPool.java
+++ b/driver/src/main/java/oracle/nosql/driver/httpclient/ConnectionPool.java
@@ -8,13 +8,16 @@
package oracle.nosql.driver.httpclient;
import static oracle.nosql.driver.util.LogUtil.logFine;
-import static oracle.nosql.driver.util.LogUtil.logInfo;
-import java.io.IOException;
import java.util.Map;
+import java.util.Queue;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentLinkedDeque;
+import java.util.concurrent.Executors;
+import java.util.concurrent.RejectedExecutionException;
+import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
import java.util.logging.Logger;
import io.netty.bootstrap.Bootstrap;
@@ -33,11 +36,16 @@
* and tracking of Channels.
*
* Configuration:
- * minSize - actively keep this many alive, even after inactivity, by default
- * this is the number of cores
+ * minSize - actively keep this many alive, even after inactivity, by default,
+ * this is set to 2
* inactivityPeriod - remove inactive channels after this many seconds.
* If negative, don't ever remove them
* Logger
+ * maxSize - Maximum number of connections to create. Once these many channels
+ * are acquired, further channel acquires are put into the pending queue
+ * maxPending - Maximum number of pending acquires. Once pending queue is full
+ * further acquires will fail till channels are released back to the pool
+ *
*
* Usage
* o acquire()
@@ -52,6 +60,9 @@
* release
* o if no Channels are in the queue for acquire a new one is created and
* placed in the queue on release
+ * o During release, if there are pending acquire requests in pending queue,
+ * released channel is used to serve pending request instead of putting back
+ * to the queue
*
* Keep-alive and minimum size
* o if a pool is not a minimal pool a refresh task is created on construction.
@@ -99,7 +110,22 @@ class ConnectionPool {
* closed.
*/
private final Map stats;
- private int acquiredChannelCount;
+ private final AtomicInteger acquiredChannelCount = new AtomicInteger();
+
+ /* Executor to run keep-alive task periodically */
+ private final ScheduledExecutorService keepAlivescheduler;
+
+ private final int maxPoolSize;
+ private final int maxPending;
+
+ /* State to ensure to maxConnections */
+ private final AtomicInteger currentConnectionCount;
+
+ /* State to ensure to maxPending */
+ private final AtomicInteger pendingAcquireCount;
+
+ /* Queue to track pending acquires */
+ private final Queue> pendingAcquires;
/**
* Keepalive callback interface
@@ -118,7 +144,7 @@ interface KeepAlive {
*
* @param bootstrap (netty)
* @param handler the handler, mostly used for event callbacks
- * @param logger
+ * @param logger logger
* @param isMinimalPool set to true if this is a one-time, or minimal time
* use. In this case no refresh task is created
* @param poolMin the minimum size at which the pool should be maintained.
@@ -129,13 +155,17 @@ interface KeepAlive {
* to the minimum (if set). This allows bursty behavior to automatically
* clean up when channels are no longer required. This is more for on-prem
* than the cloud service but applies to both.
+ * @param maxPoolSize maximum number of connections in the pool
+ * @param maxPending maximum number of pending acquires
*/
ConnectionPool(Bootstrap bootstrap,
ChannelPoolHandler handler,
Logger logger,
boolean isMinimalPool,
int poolMin,
- int inactivityPeriodSeconds) {
+ int inactivityPeriodSeconds,
+ int maxPoolSize,
+ int maxPending) {
/* clone bootstrap to set handler */
this.bootstrap = bootstrap.clone();
@@ -162,6 +192,12 @@ protected void initChannel(Channel ch) throws Exception {
queue = new ConcurrentLinkedDeque();
stats = new ConcurrentHashMap();
+ this.maxPoolSize = maxPoolSize;
+ this.maxPending = maxPending;
+ this.currentConnectionCount = new AtomicInteger(0);
+ this.pendingAcquireCount = new AtomicInteger(0);
+ this.pendingAcquires = new ConcurrentLinkedDeque<>();
+
/*
* If not creating a minimal pool run RefreshTask every 30s. A
* minimal pool is short-lived so don't create the overhead.
@@ -177,10 +213,17 @@ protected void initChannel(Channel ch) throws Exception {
DEFAULT_REFRESH_PERIOD_SECS :
Math.min(DEFAULT_REFRESH_PERIOD_SECS,
this.inactivityPeriodSeconds);
- this.bootstrap.config().group().next()
- .scheduleAtFixedRate(new RefreshTask(),
- refreshPeriod, refreshPeriod,
- TimeUnit.SECONDS);
+ this.keepAlivescheduler =
+ Executors.newSingleThreadScheduledExecutor(r -> {
+ Thread t = new Thread(r, "nosql-keep-alive");
+ t.setDaemon(true);
+ return t;
+ });
+ keepAlivescheduler.scheduleAtFixedRate(new RefreshTask(),
+ refreshPeriod, refreshPeriod,
+ TimeUnit.SECONDS);
+ } else {
+ this.keepAlivescheduler = null;
}
}
@@ -204,49 +247,34 @@ final Future acquire() {
* significant time sink in terms of affecting overall latency of this call
*
* Acquired channels are removed from the queue and are "owned" by the
- * caller until released, at which time they are put back on the queue.
+ * caller until released, at which time they are put back on the queue or
+ * serve pending acquires
*/
final Future acquire(final Promise promise) {
try {
+ /* 1. Try to get a free channel from the idle pool (LIFO) */
+ Channel channel = queue.pollFirst();
+ if (channel != null) {
+ activateChannel(channel, promise);
+ return promise;
+ }
+
+ /* 2. Pool is empty.
+ * Try to create a new connection respecting maxPoolSize.
+ */
while (true) {
- /* this *removes* the channel from the queue */
- final Channel channel = queue.pollFirst();
- if (channel == null) {
- /* need a new Channel */
- Bootstrap bs = bootstrap.clone();
- ChannelFuture fut = bs.connect();
- if (fut.isDone()) {
- notifyOnConnect(fut, promise);
- } else {
- fut.addListener(new ChannelFutureListener() {
- @Override
- public void operationComplete(
- ChannelFuture future) throws Exception {
- notifyOnConnect(future, promise);
- }
- });
- }
+ int current = currentConnectionCount.get();
+ if (current >= maxPoolSize) {
+ /* Pool is full. Enqueue the request and return */
+ enqueueRequest(promise);
return promise;
}
- /*
- * This logic must happen in the event loop
- */
- EventLoop loop = channel.eventLoop();
- if (loop.inEventLoop()) {
- if (checkChannel(channel, promise)) {
- /* bad channel, try again */
- continue;
- }
- } else {
- /*
- * Note: run() may be executed some time after this method
- * returns a promise. So the caller may have to wait a
- * few milliseconds for the promise to be completed
- * (successfully or not).
- */
- loop.execute(() -> checkChannel(channel, promise));
+ /* CAS (Compare-And-Swap) to reserve a slot */
+ if (currentConnectionCount.compareAndSet(current, current + 1)) {
+ createConnection(promise);
+ return promise;
}
- break;
+ /* If CAS failed, loop retry */
}
} catch (Throwable t) {
promise.tryFailure(t);
@@ -255,21 +283,77 @@ public void operationComplete(
}
/**
- * Release a channel. This is not async. The channel is added to the
- * front of the queue. This class implements a LIFO algorithm to ensure
- * that the first, or first few channels on the queue remain active and
- * are not subject to inactivity timeouts from the server side.
- * Note that inactive released channels will be closed and not
- * re-added to the queue.
+ * Helper to safely enqueue pending requests.
+ */
+ private void enqueueRequest(Promise promise) {
+ /* Atomic check-then-act */
+ if (pendingAcquireCount.incrementAndGet() > maxPending) {
+ /* Rollback and fail */
+ pendingAcquireCount.decrementAndGet();
+ promise.tryFailure(new IllegalStateException(
+ "Pending acquire queue has reached its maximum size of "
+ + maxPending));
+ } else {
+ pendingAcquires.add(promise);
+ }
+ }
+
+ /**
+ * Helper to create a new connection.
+ */
+ private void createConnection(Promise promise) {
+ Bootstrap bs = bootstrap.clone();
+ ChannelFuture fut = bs.connect();
+ if (fut.isDone()) {
+ notifyOnConnect(fut, promise);
+ } else {
+ fut.addListener((ChannelFutureListener) future ->
+ notifyOnConnect(future, promise));
+ }
+ }
+
+ /**
+ * Release a channel. This is not async.
+ *
+ *
+ * If the released channel is inactive it will be closed and not added
+ * back to the pool. Also, If there is a pending acquire, new channel is
+ * created to replace the closed channel.
+ *
+ *
+ * If there is a pending acquire, the released channel is assigned to the
+ * pending acquire rather than releasing back to the pool.
+ *
+ *
+ * Otherwise, The channel is added to the front of the queue.
+ * This class implements a LIFO algorithm to ensure that the first,
+ * or first few channels on the queue remain active and are not subject to
+ * inactivity timeouts from the server side.
+ *
+ *
*/
void release(Channel channel) {
if (!channel.isActive()) {
logFine(logger,
"Inactive channel on release, closing: " + channel);
removeChannel(channel);
- } else {
- queue.addFirst(channel);
+ return;
+ }
+
+ /* Check for pending waiters */
+ Promise waitingPromise = pendingAcquires.poll();
+ if (waitingPromise != null) {
+ /* Decrement pending count as we pulled one out */
+ int pending = pendingAcquireCount.decrementAndGet();
+ assert pending>=0;
+ updateStats(channel, false);
+ /* Handoff directly to the waiter and skip the queue */
+ activateChannel(channel, waitingPromise);
+ return;
}
+
+ /* No waiters, put back in idle queue (LIFO) */
+ queue.addFirst(channel);
updateStats(channel, false);
try { handler.channelReleased(channel); } catch (Exception e) {}
}
@@ -287,6 +371,23 @@ public void removeChannel(Channel channel) {
queue.remove(channel);
stats.remove(channel);
channel.close();
+
+ /* Free up the slot */
+ int cur = currentConnectionCount.decrementAndGet();
+ assert cur>=0;
+
+ /*If there are waiters, use this newly freed slot to create a
+ * connection for them
+ */
+ Promise waiter = pendingAcquires.poll();
+ if (waiter != null) {
+ /* We removed a waiter */
+ int pending = pendingAcquireCount.decrementAndGet();
+ assert pending >= 0;
+ /* We are reserving the slot again */
+ currentConnectionCount.incrementAndGet();
+ createConnection(waiter);
+ }
}
/**
@@ -297,6 +398,18 @@ public void removeChannel(Channel channel) {
*/
void close() {
logFine(logger, "Closing pool, stats " + getStats());
+ if (keepAlivescheduler != null) {
+ keepAlivescheduler.shutdown();
+ }
+
+ // Reject pending queue
+ Promise pending;
+ while ((pending = pendingAcquires.poll()) != null) {
+ pending.tryFailure(new RejectedExecutionException(
+ "Connection pool is closed"));
+ pendingAcquireCount.decrementAndGet();
+ }
+
/* TODO: do this cleanly */
validatePool("close1");
Channel ch = queue.pollFirst();
@@ -311,51 +424,89 @@ void close() {
* How many channels have been acquired since this pool was created
*/
int getAcquiredChannelCount() {
- return acquiredChannelCount;
+ return acquiredChannelCount.get();
}
private void notifyOnConnect(ChannelFuture future,
- Promise promise) throws Exception {
- if (future.isSuccess()) {
- Channel channel = future.channel();
- updateStats(channel, true);
- handler.channelAcquired(channel);
- if (!promise.trySuccess(channel)) {
- /* Promise was completed (like cancelled), release channel */
- release(channel);
+ Promise promise) {
+ try {
+ if (future.isSuccess()) {
+ Channel channel = future.channel();
+ updateStats(channel, true);
+ handler.channelAcquired(channel);
+ if (!promise.trySuccess(channel)) {
+ /* Promise was completed (like cancelled), release channel */
+ release(channel);
+ }
+ } else {
+ /* Connect failed, we must free the slot we reserved */
+ int count = currentConnectionCount.decrementAndGet();
+ assert count >= 0;
+ promise.tryFailure(future.cause());
+
+ /* Retry for next pending if any (since this attempt failed) */
+ Promise waiter = pendingAcquires.poll();
+ if (waiter != null) {
+ int pending = pendingAcquireCount.decrementAndGet();
+ assert pending >= 0;
+ currentConnectionCount.incrementAndGet();
+ createConnection(waiter);
+ }
}
- } else {
- promise.tryFailure(future.cause());
+ } catch (Exception e) {
+ promise.tryFailure(e);
}
}
- private boolean checkChannel(final Channel channel,
- final Promise promise) {
-
- /*
- * If channel isn't healthy close it. It's been removed from
- * the queue
- */
- if (!channel.isActive()) {
- logFine(logger,
- "Inactive channel found, closing: " + channel);
- removeChannel(channel);
- promise.tryFailure(new IOException("inactive channel"));
- return true;
+ /**
+ * Helper to verify channel health on the EventLoop
+ */
+ private void activateChannel(final Channel channel, final Promise promise) {
+ EventLoop loop = channel.eventLoop();
+ if (loop.inEventLoop()) {
+ checkChannel(channel, promise);
+ } else {
+ loop.execute(() -> checkChannel(channel, promise));
}
+ }
+
+ private void checkChannel(final Channel channel,
+ final Promise promise) {
try {
- updateStats(channel, true);
- handler.channelAcquired(channel);
- } catch (Exception e) {} /* ignore */
- promise.setSuccess(channel);
- return false;
+ /*
+ * If channel isn't healthy close it. It's been removed from
+ * the queue
+ */
+ if (!channel.isActive()) {
+ logFine(logger,
+ "Inactive channel found, closing: " + channel);
+ removeChannel(channel);
+ /* retry channel acquire, which might queue if pool filled in
+ * background
+ */
+ acquire(promise);
+ } else {
+ try {
+ updateStats(channel, true);
+ handler.channelAcquired(channel);
+ } catch (Exception e) {} /* ignore */
+ if (!promise.trySuccess(channel)) {
+ release(channel);
+ }
+ }
+ } catch (Throwable cause) {
+ if (channel != null) {
+ removeChannel(channel); // Ensure slot is freed
+ }
+ promise.tryFailure(cause);
+ }
}
/**
* Returns the total number of channels, acquired and not, in the pool
*/
int getTotalChannels() {
- return queue.size() + acquiredChannelCount;
+ return queue.size() + acquiredChannelCount.get();
}
/**
@@ -365,6 +516,10 @@ int getFreeChannels() {
return queue.size();
}
+ int getPendingAcquires() {
+ return pendingAcquireCount.get();
+ }
+
/**
* Prune channels
* 1. remove any inactive channels (closed by other side)
@@ -388,7 +543,7 @@ int pruneChannels() {
}
}
- /**
+ /*
* If inactivityPeriodSeconds is negative there is nothing to
* prune
*/
@@ -400,7 +555,10 @@ int pruneChannels() {
* period, remove it
*/
ChannelStats cs = stats.get(ch);
- assert cs != null;
+ /* stats race condition check */
+ if (cs == null) {
+ continue;
+ }
long inactive = (now - cs.getLastAcquired())/1000;
if (inactive > inactivityPeriodSeconds) {
logFine(logger,
@@ -444,7 +602,7 @@ int doKeepAlive(int keepAlivePeriod) {
* This works for poolMin of 0 as well. If HttpClient is null
* there is no way to do this either.
*/
- int numToSend = poolMin - acquiredChannelCount;
+ int numToSend = poolMin - acquiredChannelCount.get();
if (numToSend <= 0) {
return 0;
}
@@ -504,12 +662,15 @@ private void validatePool(final String caller) {
* Some sanity checking. Stats size should include all channels in the
* pool -- acquired plus not-acquired
*/
- if ((queue.size() + acquiredChannelCount) != stats.size()) {
+
+ // Below check is not valid in concurrent access, removing it
+
+ /*if ((queue.size() + acquiredChannelCount.get()) != stats.size()) {
logInfo(logger,
"Pool count discrepancy, called from " + caller +
" : Queue size, acquired count, stats size :" + queue.size() + ", " +
acquiredChannelCount + ", " + stats.size());
- }
+ }*/
}
/**
@@ -517,16 +678,16 @@ private void validatePool(final String caller) {
*/
private void updateStats(Channel channel, boolean isAcquire) {
ChannelStats cstats = stats.get(channel);
- if (cstats == null) {
+ if (cstats == null && isAcquire) {
cstats = new ChannelStats();
stats.put(channel, cstats);
}
synchronized(this) {
if (isAcquire) {
- acquiredChannelCount++;
+ acquiredChannelCount.incrementAndGet();
cstats.acquired();
} else {
- acquiredChannelCount--;
+ acquiredChannelCount.decrementAndGet();
}
}
}
@@ -541,12 +702,13 @@ void logStats() {
*/
String getStats() {
StringBuilder sb = new StringBuilder();
- sb.append("acquiredCount=" + acquiredChannelCount +
- ", freeChannelCount=" + queue.size() +
- ", totalChannelCount=" + stats.size());
+ sb.append("acquiredCount=").append(acquiredChannelCount)
+ .append(", freeChannelCount=").append(queue.size())
+ .append(", totalChannelCount=").append(stats.size())
+ .append(", pendingRequests=").append(pendingAcquireCount.get());
sb.append(", [");
for (Map.Entry entry : stats.entrySet()) {
- sb.append("channel=" + entry.getKey().id() + "[");
+ sb.append("channel=").append(entry.getKey().id()).append("[");
entry.getValue().toStringBuilder(sb);
sb.append("]");
}
@@ -576,7 +738,7 @@ int getUseCount(Channel ch) {
* An internal class that maintains stats on Channels. Consider exposing
* it beyond tests.
*/
- class ChannelStats {
+ static class ChannelStats {
/* when the channel was last acquired -- timestamp */
private long lastAcquired;
/* how many times the channel has been used */
@@ -596,8 +758,8 @@ int getUseCount() {
}
void toStringBuilder(StringBuilder sb) {
- sb.append("useCount=" + useCount +
- ", lastAcquired=" + java.time.Instant.ofEpochMilli(lastAcquired));
+ sb.append("useCount=").append(useCount).append(", lastAcquired=");
+ sb.append(java.time.Instant.ofEpochMilli(lastAcquired));
}
@Override
diff --git a/driver/src/main/java/oracle/nosql/driver/httpclient/HttpClient.java b/driver/src/main/java/oracle/nosql/driver/httpclient/HttpClient.java
index 492e62b7..6591b8c0 100644
--- a/driver/src/main/java/oracle/nosql/driver/httpclient/HttpClient.java
+++ b/driver/src/main/java/oracle/nosql/driver/httpclient/HttpClient.java
@@ -11,13 +11,10 @@
import static io.netty.handler.codec.http.HttpMethod.HEAD;
import static io.netty.handler.codec.http.HttpVersion.HTTP_1_1;
import static oracle.nosql.driver.util.HttpConstants.CONNECTION;
-import static oracle.nosql.driver.util.LogUtil.isFineEnabled;
import static oracle.nosql.driver.util.LogUtil.logFine;
-import static oracle.nosql.driver.util.LogUtil.logInfo;
-import static oracle.nosql.driver.util.LogUtil.logWarning;
-import java.io.IOException;
-import java.util.concurrent.ExecutionException;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.CompletionException;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.logging.Logger;
@@ -29,15 +26,19 @@
import io.netty.channel.nio.NioEventLoopGroup;
import io.netty.channel.socket.nio.NioSocketChannel;
import io.netty.handler.codec.http.DefaultFullHttpRequest;
+import io.netty.handler.codec.http.FullHttpResponse;
import io.netty.handler.codec.http.HttpRequest;
import io.netty.handler.ssl.SslContext;
+import io.netty.handler.timeout.ReadTimeoutHandler;
import io.netty.util.AttributeKey;
-import io.netty.util.concurrent.Future;
/*
* If this code is ever made generic, the proxy information obtained
* from this config needs to be abstracted to a generic class.
*/
+import io.netty.util.ReferenceCountUtil;
+import io.netty.util.concurrent.FutureListener;
import oracle.nosql.driver.NoSQLHandleConfig;
+import oracle.nosql.driver.util.ConcurrentUtil;
/**
* Netty HTTP client. Initialization process:
@@ -56,26 +57,32 @@
* use by requests.
*
*
- * Using the client to send request and get a synchronous response. The
+ * Using the client to send request. The
* request must be an instance of HttpRequest:
+ *
+ * For synchronous calls, wait for a response:
+ *
+ * response.join() or response.get();
+ *
+ *
+ *
+ * For asynchronous calls, consume the response future.
+ *
+ *
+ * If there was a problem with the send or receive, future completes
+ * with exception.
+ *
+ *
*
- * 1. Get a Channel.
- * Channel channel = client.getChannel(timeoutMs);
- * 2. Create a ResponseHandler to handle a response.
- * ResponseHandler rhandler = new ResponseHandler(client, logger, channel);
- * Note that the ResponseHandler will release the Channel.
- * 3. Call runRequest to send the request.
- * client.runRequest(request, rhandler, channel);
- * 4. For synchronous calls, wait for a response:
- * rhandler.await(timeoutMs);
- * If there was a problem with the send or receive this call will throw a
- * Throwable with the relevant information. If successful the response
- * information can be extracted from the ResponseHandler.
- * ResponseHandler instances must be closed using the close() method. This
- * releases resources associated with the request/response dialog such as the
- * channel and the HttpResponse itself.
- *
- * TODO: asynchronous handler
*/
public class HttpClient {
@@ -84,8 +91,11 @@ public class HttpClient {
static final int DEFAULT_HANDSHAKE_TIMEOUT_MS = 3000;
static final int DEFAULT_MIN_POOL_SIZE = 2; // min pool size
- static final AttributeKey STATE_KEY =
- AttributeKey.valueOf("rqstate");
+ /* AttributeKey to attach a CompletableFuture to the Channel,
+ * allowing the HttpResponseHandler to signal completion.
+ */
+ public static final AttributeKey>
+ STATE_KEY = AttributeKey.valueOf("rqstate");
//private final FixedChannelPool pool;
private final ConnectionPool pool;
@@ -98,12 +108,6 @@ public class HttpClient {
private final int port;
private final String name;
- /*
- * Amount of time to wait for acquiring a channel before timing
- * out and possibly retrying
- */
- private final int acquireRetryIntervalMs;
-
/*
* Non-null if using SSL
*/
@@ -153,7 +157,9 @@ public static HttpClient createMinimalClient(String host,
true, /* minimal client */
DEFAULT_MAX_CONTENT_LENGTH,
DEFAULT_MAX_CHUNK_SIZE,
- sslCtx, handshakeTimeoutMs, name, logger);
+ sslCtx, handshakeTimeoutMs, name, logger,
+ 1, /* max connections */
+ 1 /* max pending connections */);
}
/**
@@ -197,7 +203,60 @@ public HttpClient(String host,
this(host, port, numThreads, connectionPoolMinSize,
inactivityPeriodSeconds, false /* not minimal */,
- maxContentLength, maxChunkSize, sslCtx, handshakeTimeoutMs, name, logger);
+ maxContentLength, maxChunkSize, sslCtx, handshakeTimeoutMs, name,
+ logger,
+ 100 /* max connections */,
+ 10_000 /* max pending connections */);
+ }
+
+ /**
+ * Creates a new HttpClient class capable of sending Netty HttpRequest
+ * instances and receiving replies. This is a concurrent, asynchronous
+ * interface capable of sending and receiving on multiple HTTP channels
+ * at the same time.
+ *
+ * @param host the hostname for the HTTP server
+ * @param port the port for the HTTP server
+ * @param numThreads the number of async threads to use for Netty
+ * notifications. If 0, a default value is used based on the number of
+ * cores
+ * @param connectionPoolMinSize the number of connections to keep in the
+ * pool and keep alive using a minimal HTTP request. If 0, none are kept
+ * alive
+ * @param inactivityPeriodSeconds the number of seconds to keep an
+ * inactive channel/connection before removing it. 0 means use the default,
+ * a negative number means there is no timeout and channels are not
+ * removed
+ * @param maxContentLength maximum size in bytes of requests/responses.
+ * If 0, a default value is used (32MB).
+ * @param maxChunkSize maximum size in bytes of chunked response messages.
+ * If 0, a default value is used (64KB).
+ * @param sslCtx if non-null, SSL context to use for connections.
+ * @param handshakeTimeoutMs if not zero, timeout to use for SSL handshake
+ * @param name A name to use in logging messages for this client.
+ * @param logger A logger to use for logging messages.
+ * @param maxConnections Maximum size of the connection pool
+ * @param maxPendingConnections The maximum number of pending acquires
+ * for the pool
+ */
+ public HttpClient(String host,
+ int port,
+ int numThreads,
+ int connectionPoolMinSize,
+ int inactivityPeriodSeconds,
+ int maxContentLength,
+ int maxChunkSize,
+ SslContext sslCtx,
+ int handshakeTimeoutMs,
+ String name,
+ Logger logger,
+ int maxConnections,
+ int maxPendingConnections) {
+
+ this(host, port, numThreads, connectionPoolMinSize,
+ inactivityPeriodSeconds, false /* not minimal */,
+ maxContentLength, maxChunkSize, sslCtx, handshakeTimeoutMs, name,
+ logger, maxConnections, maxPendingConnections);
}
/*
@@ -214,7 +273,9 @@ private HttpClient(String host,
SslContext sslCtx,
int handshakeTimeoutMs,
String name,
- Logger logger) {
+ Logger logger,
+ int maxConnections,
+ int maxPendingConnections) {
this.logger = logger;
this.sslCtx = sslCtx;
@@ -257,7 +318,9 @@ private HttpClient(String host,
pool = new ConnectionPool(b, poolHandler, logger,
isMinimalClient,
connectionPoolMinSize,
- inactivityPeriodSeconds);
+ inactivityPeriodSeconds,
+ maxConnections,
+ maxPendingConnections);
/*
* Don't do keepalive if min size is not set. That configuration
@@ -273,11 +336,6 @@ public boolean keepAlive(Channel ch) {
}
});
}
-
- /* TODO: eventually add this to Config? */
- acquireRetryIntervalMs = Integer.getInteger(
- "oracle.nosql.driver.acquire.retryinterval",
- 1000);
}
SslContext getSslContext() {
@@ -366,6 +424,10 @@ public int getFreeChannelCount() {
return pool.getFreeChannels();
}
+ public int getPendingChannelsCount() {
+ return pool.getPendingAcquires();
+ }
+
/* available for testing */
ConnectionPool getConnectionPool() {
return pool;
@@ -388,70 +450,20 @@ public void shutdown() {
syncUninterruptibly();
}
- public Channel getChannel(int timeoutMs)
- throws InterruptedException, ExecutionException, TimeoutException {
-
- long startMs = System.currentTimeMillis();
- long now = startMs;
- int retries = 0;
-
- while (true) {
- long msDiff = now - startMs;
-
- /* retry loop with at most (retryInterval) ms timeouts */
- long thisTimeoutMs = (timeoutMs - msDiff);
- if (thisTimeoutMs <= 0) {
- String msg = "Timed out trying to acquire channel";
- logInfo(logger, "HttpClient " + name + " " + msg);
- throw new TimeoutException(msg);
- }
- if (thisTimeoutMs > acquireRetryIntervalMs) {
- thisTimeoutMs = acquireRetryIntervalMs;
- }
- Future fut = pool.acquire();
- Channel retChan = null;
- try {
- retChan = fut.get(thisTimeoutMs, TimeUnit.MILLISECONDS);
- } catch (TimeoutException e) {
- if (retries == 0) {
- logFine(logger, "Timed out after " +
- (System.currentTimeMillis() - startMs) +
- "ms trying to acquire channel: retrying");
+ private CompletableFuture getChannel() {
+ CompletableFuture acquireFuture = new CompletableFuture<>();
+ pool.acquire().addListener((FutureListener) channelFuture -> {
+ if (channelFuture.isSuccess()) {
+ Channel channel = channelFuture.getNow();
+ if (!acquireFuture.complete(channel)) {
+ /* future already completed release channel back to pool */
+ pool.release(channel);
}
- /* fall through */
+ } else {
+ acquireFuture.completeExceptionally(channelFuture.cause());
}
-
- /*
- * Ensure that the channel is in good shape. retChan is null
- * on a timeout exception from above; that path will retry.
- */
- if (retChan != null) {
- if (fut.isSuccess() && retChan.isActive()) {
- /*
- * Clear out any previous state. The channel should not
- * have any state associated with it, but this code is here
- * just in case it does.
- */
- if (retChan.attr(STATE_KEY).get() != null) {
- if (isFineEnabled(logger)) {
- logFine(logger,
- "HttpClient acquired a channel with " +
- "a still-active state: clearing.");
- }
- retChan.attr(STATE_KEY).set(null);
- }
- return retChan;
- }
- logFine(logger,
- "HttpClient " + name + ", acquired an inactive " +
- "channel, releasing it and retrying, reason: " +
- fut.cause());
- releaseChannel(retChan);
- }
- /* reset "now" and increment retries */
- now = System.currentTimeMillis();
- retries++;
- }
+ });
+ return acquireFuture;
}
public void releaseChannel(Channel channel) {
@@ -473,51 +485,113 @@ public void removeChannel(Channel channel) {
pool.removeChannel(channel);
}
-
/**
- * Sends an HttpRequest, setting up the ResponseHandler as the handler to
- * use for the (asynchronous) response.
+ * Sends an HttpRequest to the server.
*
- * @param request the request
- * @param handler the response handler
- * @param channel the Channel to use for the request/response
- *
- * @throws IOException if there is a network problem (bad channel). Such
- * exceptions can be retried.
+ * @param request HttpRequest
+ * @param timeoutMs Time to wait for the response from the server.
+ * Returned future completes with {@link TimeoutException}
+ * in case of timeout
+ * @return {@link CompletableFuture} holding the response from the server.
+ * @apiNote The caller must release the response by calling
+ * {@link FullHttpResponse#release()} or
+ * {@link ReferenceCountUtil#release(Object)}
*/
- public void runRequest(HttpRequest request,
- ResponseHandler handler,
- Channel channel)
-
- throws IOException {
+ public CompletableFuture runRequest(HttpRequest request,
+ int timeoutMs) {
+ CompletableFuture responseFuture =
+ new CompletableFuture<>();
+ long deadlineNs = System.nanoTime() +
+ TimeUnit.MILLISECONDS.toNanos(timeoutMs);
- /*
- * If the channel goes bad throw IOE to allow the caller to retry
- */
- if (!channel.isActive()) {
- String msg = "HttpClient " + name + ", runRequest, channel " +
- channel + " is not active: ";
- logWarning(logger, msg);
- throw new IOException(msg);
- }
+ /* Acquire a channel from the pool */
+ CompletableFuture acuireFuture = getChannel();
- RequestState state = new RequestState(handler);
- channel.attr(STATE_KEY).set(state);
+ /* setup timeout on channel acquisition */
+ acuireFuture.orTimeout(timeoutMs, TimeUnit.MILLISECONDS);
- /*
- * Send the request. If the operation fails set the exception
- * on the ResponseHandler where it will be thrown synchronously to
- * users of that object. operationComplete will likely be called in
- * another thread.
+ /* when acquire future completes exceptionally, release request bytebuf
+ * and complete the response future
*/
- channel.writeAndFlush(request).
- addListener((ChannelFutureListener) future -> {
- if (!future.isSuccess()) {
- /* handleException logs this exception */
- handler.handleException("HttpClient: send failed",
- future.cause());
+ acuireFuture.whenComplete((ch, err) -> {
+ if (err != null) {
+ ReferenceCountUtil.release(request);
+ /* Unwrap to check the real cause */
+ Throwable cause = err instanceof CompletionException ?
+ err.getCause() : err;
+ if (cause instanceof TimeoutException) {
+ final String msg = "Timed out trying to acquire channel";
+ responseFuture.completeExceptionally(
+ new CompletionException(new TimeoutException(msg)));
+ }
+ /* Re-throw original if it wasn't a timeout */
+ responseFuture.completeExceptionally(cause);
+ }
+ });
+
+ /* send request on acquired channel */
+ acuireFuture.thenAccept(channel -> {
+ long remainingTimeoutNs = deadlineNs - System.nanoTime();
+ long remainingTimeoutMs = Math.max(1,
+ TimeUnit.NANOSECONDS.toMillis(remainingTimeoutNs));
+
+ /* Execute the request on the acquired channel */
+ CompletableFuture requestExecutionFuture =
+ runRequest(request, channel, remainingTimeoutMs);
+
+ /* When the request execution future completes (either
+ * successfully or exceptionally),
+ * complete the public responseFuture and ensure the channel
+ * is released back to the pool.
+ */
+ requestExecutionFuture.whenComplete((response, throwable) -> {
+ /* Always release the channel */
+ releaseChannel(channel);
+ if (throwable != null) {
+ responseFuture.completeExceptionally(throwable);
+ } else {
+ responseFuture.complete(response);
}
});
+ });
+ return responseFuture;
+ }
+
+ /**
+ * Sends an HttpRequest to the server on a given netty channel.
+ *
+ * @param request HttpRequest
+ * @param channel Netty channel
+ * @param timeoutMs Time to wait for the response from the server.
+ * Returned future completes with {@link TimeoutException}
+ * in case of timeout
+ * @return {@link CompletableFuture} holding the response from the server.
+ * @apiNote The caller must release the response by calling
+ * {@link FullHttpResponse#release()} or
+ * {@link ReferenceCountUtil#release(Object)}
+ */
+ public CompletableFuture runRequest(HttpRequest request,
+ Channel channel,
+ long timeoutMs) {
+ CompletableFuture
+ responseFuture = new CompletableFuture<>();
+ /* Attach the CompletableFuture to the channel's attributes */
+ channel.attr(STATE_KEY).set(responseFuture);
+
+ /* Add timeout handler to the pipeline */
+ channel.pipeline().addFirst(
+ new ReadTimeoutHandler(timeoutMs, TimeUnit.MILLISECONDS));
+
+ /* Write the request to the channel and flush it */
+ channel.writeAndFlush(request)
+ .addListener((ChannelFutureListener) writeFuture -> {
+ if (!writeFuture.isSuccess()) {
+ /* If write fails, complete the future exceptionally */
+ channel.attr(STATE_KEY).set(null);
+ responseFuture.completeExceptionally(writeFuture.cause());
+ }
+ });
+ return responseFuture;
}
/**
@@ -525,8 +599,7 @@ public void runRequest(HttpRequest request,
*/
boolean doKeepAlive(Channel ch) {
final int keepAliveTimeout = 3000; /* ms */
- ResponseHandler responseHandler =
- new ResponseHandler(this, logger, ch);
+ FullHttpResponse response = null;
try {
final HttpRequest request =
new DefaultFullHttpRequest(HTTP_1_1, HEAD, "/");
@@ -536,19 +609,14 @@ boolean doKeepAlive(Channel ch) {
* other server may reject them and close the connection
*/
request.headers().add(HOST, host);
- runRequest(request, responseHandler, ch);
- boolean isTimeout = responseHandler.await(keepAliveTimeout);
- if (isTimeout) {
- logFine(logger,
- "Timeout on keepalive HEAD request on channel " + ch);
- return false;
- }
+ response = ConcurrentUtil.awaitFuture(
+ runRequest(request, ch, keepAliveTimeout));
/*
* LBaaS will return a non-200 status but that is expected as the
* path "/" does not map to the service. This is ok because all that
* matters is that the connection remain alive.
*/
- String conn = responseHandler.getHeaders().get(CONNECTION);
+ String conn = response.headers().get(CONNECTION);
if (conn == null || !"keep-alive".equalsIgnoreCase(conn)) {
logFine(logger,
"Keepalive HEAD request did not return keep-alive " +
@@ -556,10 +624,14 @@ boolean doKeepAlive(Channel ch) {
}
return true;
- } catch (Throwable t) {
- logFine(logger, "Exception sending HTTP HEAD: " + t);
+ } catch (Throwable t) {
+ String msg = String.format(
+ "Exception sending keepalive on [channel:%s] error:%s",
+ ch.id(), t.getMessage());
+ logFine(logger, msg);
} finally {
- responseHandler.releaseResponse();
+ /* Release response */
+ ReferenceCountUtil.release(response);
}
/* something went wrong, caller is responsible for disposition */
return false;
diff --git a/driver/src/main/java/oracle/nosql/driver/httpclient/HttpClientHandler.java b/driver/src/main/java/oracle/nosql/driver/httpclient/HttpClientHandler.java
index 649d851a..a41c8087 100644
--- a/driver/src/main/java/oracle/nosql/driver/httpclient/HttpClientHandler.java
+++ b/driver/src/main/java/oracle/nosql/driver/httpclient/HttpClientHandler.java
@@ -7,18 +7,23 @@
package oracle.nosql.driver.httpclient;
+import static oracle.nosql.driver.httpclient.HttpClient.STATE_KEY;
import static oracle.nosql.driver.util.HttpConstants.REQUEST_ID_HEADER;
import static oracle.nosql.driver.util.LogUtil.isFineEnabled;
import static oracle.nosql.driver.util.LogUtil.logFine;
import static oracle.nosql.driver.util.LogUtil.logWarning;
import java.io.IOException;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.TimeoutException;
import java.util.logging.Logger;
import io.netty.channel.ChannelHandler.Sharable;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.ChannelInboundHandlerAdapter;
import io.netty.handler.codec.http.FullHttpResponse;
+import io.netty.handler.timeout.ReadTimeoutException;
+import io.netty.handler.timeout.ReadTimeoutHandler;
/**
*
@@ -34,8 +39,8 @@ public class HttpClientHandler extends ChannelInboundHandlerAdapter {
@Override
public void channelRead(ChannelHandlerContext ctx, Object msg) {
- final RequestState state =
- ctx.channel().attr(HttpClient.STATE_KEY).get();
+ final CompletableFuture responseFuture =
+ ctx.channel().attr(STATE_KEY).getAndSet(null);
/*
* TODO/think about:
@@ -44,10 +49,15 @@ public void channelRead(ChannelHandlerContext ctx, Object msg) {
* o redirects
*/
+ /* Remove timeout handler upon response arrival */
+ if (ctx.pipeline().get(ReadTimeoutHandler.class) != null) {
+ ctx.pipeline().remove(ReadTimeoutHandler.class);
+ }
+
if (msg instanceof FullHttpResponse) {
FullHttpResponse fhr = (FullHttpResponse) msg;
- if (state == null) {
+ if (responseFuture == null) {
/*
* This message came in after the client was done processing
* a request in a different thread.
@@ -65,14 +75,7 @@ public void channelRead(ChannelHandlerContext ctx, Object msg) {
fhr.release();
return;
}
-
- state.setResponse(fhr);
-
- /*
- * Notify the response handler
- */
- state.getHandler().receive(state);
-
+ responseFuture.complete(fhr);
return;
}
logWarning(logger,
@@ -82,24 +85,31 @@ public void channelRead(ChannelHandlerContext ctx, Object msg) {
@Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) {
- final RequestState state =
- ctx.channel().attr(HttpClient.STATE_KEY).get();
- if (state != null) {
+ final CompletableFuture responseFuture =
+ ctx.channel().attr(STATE_KEY).getAndSet(null);
+ if (responseFuture != null) {
/* handleException logs */
- state.getHandler().handleException("HttpClientHandler read failed",
- cause);
+ logFine(logger, "HttpClientHandler read failed, cause: " + cause);
+ Throwable err = cause;
+ if (err instanceof ReadTimeoutException) {
+ err = new TimeoutException("Request timed out while waiting "
+ + "for the response from the server");
+ }
+ responseFuture.completeExceptionally(err);
}
ctx.close();
}
@Override
- public void channelInactive(ChannelHandlerContext ctx) throws Exception {
- final RequestState state =
- ctx.channel().attr(HttpClient.STATE_KEY).get();
+ public void channelInactive(ChannelHandlerContext ctx) {
+ final CompletableFuture responseFuture =
+ ctx.channel().attr(STATE_KEY).getAndSet(null);
/* handleException logs */
- if (state != null) {
+ if (responseFuture != null && !responseFuture.isDone()) {
String msg = "Channel is inactive: " + ctx.channel();
- state.getHandler().handleException(msg, new IOException(msg));
+ Throwable cause = new IOException(msg);
+ logFine(logger, msg + ", cause: " + cause);
+ responseFuture.completeExceptionally(cause);
}
/* should the context be closed? */
ctx.close();
diff --git a/driver/src/main/java/oracle/nosql/driver/httpclient/RequestState.java b/driver/src/main/java/oracle/nosql/driver/httpclient/RequestState.java
deleted file mode 100644
index 185399d5..00000000
--- a/driver/src/main/java/oracle/nosql/driver/httpclient/RequestState.java
+++ /dev/null
@@ -1,80 +0,0 @@
-/*-
- * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved.
- *
- * Licensed under the Universal Permissive License v 1.0 as shown at
- * https://oss.oracle.com/licenses/upl/
- */
-
-package oracle.nosql.driver.httpclient;
-
-import io.netty.buffer.ByteBuf;
-import io.netty.handler.codec.http.FullHttpResponse;
-import io.netty.handler.codec.http.HttpHeaders;
-import io.netty.handler.codec.http.HttpResponseStatus;
-
-/**
- * An instance of this class is created when a request is sent and is used to
- * collect response state. The instance is attached to a Channel's attribute
- * map, which means that this will work for HTTP/1.1 where channels are not
- * multiplexed, but will need to change for HTTP/2.
- *
- * This class is not thread-safe but is used in a safe, single-threaded manner
- * mapped 1:1 with a channel associated with a single HTTP request/response
- * cycle.
- *
- * At this time this object does not aggregate chunks of content into a single
- * buffer. It is expected that this is done using an HttpContentAggregator in
- * the pipeline and is only called with a FullHttpResponse. If aggregation is
- * desired here it can be added using a CompositeByteBuf and calls to add
- * content incrementally.
- */
-class RequestState {
-
- private final ResponseHandler handler;
- private FullHttpResponse response;
-
- RequestState(ResponseHandler handler) {
- this.handler = handler;
- }
-
- ResponseHandler getHandler() {
- return handler;
- }
-
- HttpResponseStatus getStatus() {
- if (response != null) {
- return response.status();
- }
- return null;
- }
-
- HttpHeaders getHeaders() {
- if (response != null) {
- return response.headers();
- }
- return null;
- }
-
- int getContentSize() {
- ByteBuf buf = getBuf();
- if (buf != null) {
- return buf.readableBytes();
- }
- return -1;
- }
-
- ByteBuf getBuf() {
- if (response != null) {
- return response.content();
- }
- return null;
- }
-
- void setResponse(FullHttpResponse response) {
- this.response = response;
- }
-
- FullHttpResponse getResponse() {
- return response;
- }
-}
diff --git a/driver/src/main/java/oracle/nosql/driver/httpclient/ResponseHandler.java b/driver/src/main/java/oracle/nosql/driver/httpclient/ResponseHandler.java
deleted file mode 100644
index 12b38216..00000000
--- a/driver/src/main/java/oracle/nosql/driver/httpclient/ResponseHandler.java
+++ /dev/null
@@ -1,239 +0,0 @@
-/*-
- * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved.
- *
- * Licensed under the Universal Permissive License v 1.0 as shown at
- * https://oss.oracle.com/licenses/upl/
- */
-
-package oracle.nosql.driver.httpclient;
-
-import static oracle.nosql.driver.util.LogUtil.logFine;
-import static oracle.nosql.driver.util.HttpConstants.REQUEST_ID_HEADER;
-
-import java.io.Closeable;
-import java.net.ProtocolException;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.TimeUnit;
-import java.util.logging.Logger;
-import javax.net.ssl.SSLException;
-
-import io.netty.buffer.ByteBuf;
-import io.netty.channel.Channel;
-import io.netty.handler.codec.http.HttpHeaders;
-import io.netty.handler.codec.http.HttpResponseStatus;
-import io.netty.util.ReferenceCountUtil;
-
-/**
- * This class allows for asynchronous or synchronous request operation.
- * An instance is passed when sending a request. The caller can handle the
- * response asynchronously by overriding the responseReceived() method, or
- * synchronously by using the default implementation and waiting for the
- * response.
- *
- * Instances of this class must be closed using close().
- *
- * TODO: examples of both sync and async usage
- */
-public class ResponseHandler implements Closeable {
-
- private HttpResponseStatus status;
- private HttpHeaders headers;
- private ByteBuf content;
- private RequestState state;
- private final HttpClient httpClient;
- private final Channel channel;
- private final String requestId;
-
- /* logger may be null */
- private final Logger logger;
-
- /* this is set if there is an exception in send or receive */
- private Throwable cause;
-
- /* OK to retry: affects logic when there are specific protocol errors */
- private final boolean allowRetry;
-
- /*
- * the latch counts down when the response is received. It's only needed
- * in synchronous mode
- */
- private final CountDownLatch latch;
-
- public ResponseHandler(final HttpClient httpClient,
- final Logger logger,
- final Channel channel) {
- this(httpClient, logger, channel, null, false);
- }
-
- public ResponseHandler(final HttpClient httpClient,
- final Logger logger,
- final Channel channel,
- final String requestId,
- boolean allowRetry) {
- this.httpClient = httpClient;
- this.logger = logger;
- this.channel = channel;
- this.requestId = requestId;
- this.allowRetry = allowRetry;
-
- /*
- * TODO: this won't be needed for an async client
- */
- latch = new CountDownLatch(1);
- }
-
- /**
- * An exception occurred. Set cause and count down the latch to wake
- * up any waiters. This is synchronized because the call may come from
- * a different thread.
- */
- public void handleException(String msg, Throwable th) {
-
- synchronized(this) {
- this.cause = th;
- if (th instanceof SSLException) {
- /* disconnect channel to re-create channel and engine */
- channel.disconnect();
- }
- latch.countDown();
- }
- logFine(logger, msg + ", cause: " + th);
- }
-
- /**
- * The full response has been received. Users can override this method
- * to do full async operation. Synchronous users will wait for the latch
- * and get the response objects from this class.
- */
- public void responseReceived(HttpResponseStatus rStatus,
- HttpHeaders rHeaders,
- ByteBuf rContent) {
- status = rStatus;
- headers = rHeaders;
- content = rContent;
- }
-
- /**
- * Wait for the latch to count down. This can happen on a successful
- * receive operation or an exception that occurs during send or receive.
- */
- public boolean await(int milliSeconds) throws Throwable {
-
- boolean ret = !latch.await(milliSeconds, TimeUnit.MILLISECONDS);
-
- synchronized(this) {
- if (cause != null) {
- throw cause;
- }
- }
- return ret;
- }
-
- /**
- * Gets the status, or null if the operation has not yet completed
- */
- public HttpResponseStatus getStatus() {
- return status;
- }
-
- /**
- * Gets the headers, or null if the operation has not yet completed
- */
- public HttpHeaders getHeaders() {
- return headers;
- }
-
- /**
- * Gets the content, or null if the operation has not yet completed
- */
- public ByteBuf getContent() {
- return content;
- }
-
- /**
- * Gets the Throwable if an exception has occurred during send or
- * receive
- */
- public Throwable getCause() {
- return cause;
- }
-
- /**
- * Internal close that does not release the channel. This is used
- * by keepalive HEAD requests
- */
- void releaseResponse() {
- if (state != null) {
- if (state.getResponse() != null) {
- ReferenceCountUtil.release(state.getResponse());
- }
- }
- }
-
- @Override
- public void close() {
- if (channel != null) {
- httpClient.releaseChannel(channel);
- }
-
- /*
- * Release the response
- */
- releaseResponse();
- }
-
- /*
- * TODO: error responses with and without status
- */
-
- /*
- * Internal receive that calls the public method and counts down the latch.
- * Use try/finally in case there is a throw in the receive.
- */
- void receive(RequestState requestState) {
- /*
- * Check the request id in response's header, discards this response
- * if it is not for the request.
- */
- if (requestId != null) {
- String resReqId = requestState.getHeaders().get(REQUEST_ID_HEADER);
- if (resReqId == null || !resReqId.equals(requestId)) {
- logFine(logger,
- "Expected response for request " + requestId +
- ", but got response for request " + resReqId +
- ": discarding response");
- if (resReqId == null) {
- logFine(logger, "Headers for discarded response: " +
- requestState.getHeaders());
- if (this.allowRetry) {
- this.cause = new ProtocolException(
- "Received invalid response with no requestId");
- latch.countDown();
- }
- }
- if (requestState.getResponse() != null) {
- ReferenceCountUtil.release(requestState.getResponse());
- }
- return;
- }
- }
-
- /*
- * We got a valid message: don't accept any more for this handler.
- * This logic may change if we enable full async and allow multiple
- * messages to be processed by the same channel for the same client.
- * This clears the response handler from this channel so that any
- * additional messages on this channel will be properly discarded.
- */
- channel.attr(HttpClient.STATE_KEY).set(null);
-
- state = requestState;
- try {
- responseReceived(state.getStatus(),
- state.getHeaders(),
- state.getBuf());
- } finally {
- latch.countDown();
- }
- }
-}
diff --git a/driver/src/main/java/oracle/nosql/driver/iam/SignatureProvider.java b/driver/src/main/java/oracle/nosql/driver/iam/SignatureProvider.java
index bc2f76ec..81817d36 100644
--- a/driver/src/main/java/oracle/nosql/driver/iam/SignatureProvider.java
+++ b/driver/src/main/java/oracle/nosql/driver/iam/SignatureProvider.java
@@ -26,6 +26,8 @@
import java.util.Date;
import java.util.Timer;
import java.util.TimerTask;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.locks.ReentrantLock;
import java.util.logging.Level;
import java.util.logging.Logger;
@@ -38,6 +40,7 @@
import oracle.nosql.driver.ops.Request;
import io.netty.handler.codec.http.HttpHeaders;
+import oracle.nosql.driver.util.ConcurrentUtil;
/**
* Cloud service only.
@@ -159,6 +162,7 @@ public class SignatureProvider
private String serviceHost;
private Region region;
private Logger logger;
+ private final ReentrantLock lock = new ReentrantLock();
/**
* A callback interface called when the signature is refreshed. This
@@ -858,16 +862,24 @@ public SignatureProvider(AuthenticationProfileProvider profileProvider,
@Override
public String getAuthorizationString(Request request) {
+ return ConcurrentUtil.awaitFuture(getAuthorizationStringAsync(request));
+ }
+
+ @Override
+ public CompletableFuture getAuthorizationStringAsync(
+ Request request) {
if (serviceHost == null) {
- throw new IllegalArgumentException(
- "Unable to find service host, use setServiceHost " +
- "to load from NoSQLHandleConfig");
- }
- SignatureDetails sigDetails = getSignatureDetails(request);
- if (sigDetails != null) {
- return sigDetails.getSignatureHeader();
+ CompletableFuture.failedFuture(new IllegalArgumentException(
+ "Unable to find service host, use setServiceHost " +
+ "to load from NoSQLHandleConfig"));
}
- return null;
+
+ return getSignatureDetails(request).thenApply(sigDetails -> {
+ if (sigDetails != null) {
+ return sigDetails.getSignatureHeader();
+ }
+ return null;
+ });
}
@Override
@@ -876,44 +888,62 @@ public void setRequiredHeaders(String authString,
HttpHeaders headers,
byte[] content) {
- SignatureDetails sigDetails = (content != null) ?
- getSignatureWithContent(request, headers, content):
- getSignatureDetails(request);
- if (sigDetails == null) {
- return;
- }
- headers.add(AUTHORIZATION, sigDetails.getSignatureHeader());
- headers.add(DATE, sigDetails.getDate());
+ ConcurrentUtil.awaitFuture(
+ setRequiredHeadersAsync(authString, request, headers, content));
+ }
- final String token = getDelegationToken(request);
- if (token != null) {
- headers.add(OBO_TOKEN_HEADER, token);
- }
- String compartment = request.getCompartment();
- if (compartment == null) {
- /*
- * If request doesn't has compartment id, set the tenant id as the
- * default compartment, which is the root compartment in IAM if
- * using user principal. If using an instance principal this
- * value is null.
- */
- compartment = getTenantOCID();
- }
+ @Override
+ public CompletableFuture setRequiredHeadersAsync(String authString,
+ Request request,
+ HttpHeaders headers,
+ byte[] content) {
- if (compartment != null) {
- headers.add(REQUEST_COMPARTMENT_ID, compartment);
+ CompletableFuture sigDetailsFuture;
+ if (content != null) {
+ sigDetailsFuture = getSignatureWithContent(
+ request, headers, content);
} else {
- throw new IllegalArgumentException(
- "Compartment is null. When authenticating using an " +
- "Instance Principal the compartment for the operation " +
- "must be specified.");
+ sigDetailsFuture = getSignatureDetails(request);
}
+
+ return sigDetailsFuture.thenAccept(sigDetails -> {
+ if (sigDetails != null) {
+ headers.add(AUTHORIZATION, sigDetails.getSignatureHeader());
+ headers.add(DATE, sigDetails.getDate());
+
+ final String token = getDelegationToken(request);
+ if (token != null) {
+ headers.add(OBO_TOKEN_HEADER, token);
+ }
+ String compartment = request.getCompartment();
+ if (compartment == null) {
+ /*
+ * If request doesn't has compartment id, set the tenant id
+ * as the default compartment, which is the root compartment
+ * in IAM if using user principal. If using an instance
+ * principal this value is null.
+ */
+ compartment = getTenantOCID();
+ }
+
+ if (compartment != null) {
+ headers.add(REQUEST_COMPARTMENT_ID, compartment);
+ } else {
+ throw new IllegalArgumentException(
+ "Compartment is null. When authenticating using an " +
+ "Instance Principal the compartment for the operation " +
+ "must be specified.");
+ }
+ }
+ });
}
@Override
- public synchronized void flushCache() {
- currentSigDetails = null;
- refreshSigDetails = null;
+ public void flushCache() {
+ ConcurrentUtil.synchronizedCall(lock, () -> {
+ currentSigDetails = null;
+ refreshSigDetails = null;
+ });
}
/**
@@ -978,7 +1008,10 @@ public SignatureProvider prepare(NoSQLHandleConfig config) {
}
/* creates and caches a signature as warm-up */
- getSignatureDetailsForCache(false);
+ getSignatureDetailsInternal(false, /* isRefresh */
+ null, /* request */
+ null, /* headers */
+ null /* content */);
return this;
}
@@ -1040,115 +1073,129 @@ private void logMessage(Level level, String msg) {
}
}
- private SignatureDetails getSignatureDetails(Request request) {
+ private CompletableFuture
+ getSignatureDetails(Request request) {
SignatureDetails sigDetails =
(request.getIsRefresh() ? refreshSigDetails : currentSigDetails);
if (sigDetails != null) {
- return sigDetails;
+ return CompletableFuture.completedFuture(sigDetails);
}
if (request.getIsRefresh()) {
/* try current details before failing */
sigDetails = currentSigDetails;
if (sigDetails != null) {
- return sigDetails;
+ return CompletableFuture.completedFuture(sigDetails);
}
}
return getSignatureDetailsForCache(false);
}
- private SignatureDetails getSignatureWithContent(Request request,
- HttpHeaders headers,
- byte[] content) {
- return getSignatureDetailsInternal(false, request, headers, content);
+ private CompletableFuture
+ getSignatureWithContent(Request request,
+ HttpHeaders headers,
+ byte[] content) {
+ /* TODO: supplyAsync runs in JVM common fork-join pool.
+ * Do we need a separate executor?
+ */
+ return CompletableFuture.supplyAsync(() ->
+ getSignatureDetailsInternal(false, request, headers, content));
}
- synchronized SignatureDetails
- getSignatureDetailsForCache(boolean isRefresh) {
- return getSignatureDetailsInternal(isRefresh,
- null /* request */,
- null /* headers */,
- null /* content */);
+ private CompletableFuture
+ getSignatureDetailsForCache(boolean isRefresh) {
+ /* TODO: supplyAsync runs in JVM common fork-join pool.
+ * Do we need a separate executor?
+ */
+ return CompletableFuture.supplyAsync(() ->
+ getSignatureDetailsInternal(isRefresh,
+ null /* request */,
+ null /* headers */,
+ null /* content */)
+ );
}
/* visible for testing */
- synchronized SignatureDetails
+ SignatureDetails
getSignatureDetailsInternal(boolean isRefresh,
Request request,
HttpHeaders headers,
byte[] content) {
- /*
- * add one minute to the current time, so that any caching is
- * effective over a more valid time period.
- */
- long nowPlus = System.currentTimeMillis() + 60_000L;
- String date = createFormatter().format(new Date(nowPlus));
- String keyId = provider.getKeyId();
-
- /*
- * Security token based providers may refresh the security token
- * and associated private key in above getKeyId() method, reload
- * private key to PrivateKeyProvider to avoid a mismatch, which
- * will create an invalid signature, cause authentication error.
- */
- if (provider instanceof SecurityTokenBasedProvider) {
- privateKeyProvider.reload(provider.getPrivateKey(),
- provider.getPassphraseCharacters());
- }
- String signature;
- try {
- signature = sign(signingContent(date, request, headers, content),
- privateKeyProvider.getKey());
- } catch (Exception e) {
- logMessage(Level.SEVERE, "Error signing request " + e.getMessage());
- return null;
- }
+ return ConcurrentUtil.synchronizedCall(lock, () -> {
+ /*
+ * add one minute to the current time, so that any caching is
+ * effective over a more valid time period.
+ */
+ long nowPlus = System.currentTimeMillis() + 60_000L;
+ String date = createFormatter().format(new Date(nowPlus));
+ String keyId = provider.getKeyId();
- String token = getDelegationToken(request);
- String signingHeader;
- if (content != null) {
- signingHeader = (token == null)
- ? SIGNING_HEADERS_WITH_CONTENT :
- SIGNING_HEADERS_WITH_CONTENT_OBO;
- } else {
- signingHeader = (token == null)
- ? SIGNING_HEADERS : SIGNING_HEADERS_WITH_OBO;
- }
+ /*
+ * Security token based providers may refresh the security token
+ * and associated private key in above getKeyId() method, reload
+ * private key to PrivateKeyProvider to avoid a mismatch, which
+ * will create an invalid signature, cause authentication error.
+ */
+ if (provider instanceof SecurityTokenBasedProvider) {
+ privateKeyProvider.reload(provider.getPrivateKey(),
+ provider.getPassphraseCharacters());
+ }
+ String signature;
+ try {
+ signature = sign(signingContent(date, request, headers, content),
+ privateKeyProvider.getKey());
+ } catch (Exception e) {
+ logMessage(Level.SEVERE, "Error signing request " +
+ e.getMessage());
+ return null;
+ }
- String sigHeader = String.format(SIGNATURE_HEADER_FORMAT,
- signingHeader,
- keyId,
- RSA,
- signature,
- SINGATURE_VERSION);
- SignatureDetails sigDetails = new SignatureDetails(sigHeader, date);
+ String token = getDelegationToken(request);
+ String signingHeader;
+ if (content != null) {
+ signingHeader = (token == null)
+ ? SIGNING_HEADERS_WITH_CONTENT :
+ SIGNING_HEADERS_WITH_CONTENT_OBO;
+ } else {
+ signingHeader = (token == null)
+ ? SIGNING_HEADERS : SIGNING_HEADERS_WITH_OBO;
+ }
- /*
- * Don't cache the signature generated with content, which
- * needs to be associated with its request
- */
- if (content != null) {
- return sigDetails;
- }
+ String sigHeader = String.format(SIGNATURE_HEADER_FORMAT,
+ signingHeader,
+ keyId,
+ RSA,
+ signature,
+ SINGATURE_VERSION);
+ SignatureDetails sigDetails = new SignatureDetails(sigHeader, date);
- if (!isRefresh) {
- /*
- * if this is not a refresh, use the normal key and schedule a
- * refresh
- */
- currentSigDetails = sigDetails;
- scheduleRefresh();
- } else {
/*
- * If this is a refresh put the object in a temporary key.
- * The caller (the refresh task) will:
- * 1. perform callbacks if needed and when done,
- * 2. move the object to the normal key and schedule a refresh
+ * Don't cache the signature generated with content, which
+ * needs to be associated with its request
*/
- refreshSigDetails = sigDetails;
- }
- return sigDetails;
+ if (content != null) {
+ return sigDetails;
+ }
+
+ if (!isRefresh) {
+ /*
+ * if this is not a refresh, use the normal key and schedule a
+ * refresh
+ */
+ currentSigDetails = sigDetails;
+ scheduleRefresh();
+ } else {
+ /*
+ * If this is a refresh put the object in a temporary key.
+ * The caller (the refresh task) will:
+ * 1. perform callbacks if needed and when done,
+ * 2. move the object to the normal key and schedule a refresh
+ */
+ refreshSigDetails = sigDetails;
+ }
+ return sigDetails;
+ });
}
/*
@@ -1164,11 +1211,13 @@ private String getDelegationToken(Request req) {
req.getOboToken() : delegationToken;
}
- private synchronized void setRefreshKey() {
- if (refreshSigDetails != null) {
- currentSigDetails = refreshSigDetails;
- refreshSigDetails = null;
- }
+ private void setRefreshKey() {
+ ConcurrentUtil.synchronizedCall(lock, () -> {
+ if (refreshSigDetails != null) {
+ currentSigDetails = refreshSigDetails;
+ refreshSigDetails = null;
+ }
+ });
}
private String signingContent(String date,
@@ -1264,7 +1313,10 @@ public void run() {
Exception lastException;
do {
try {
- getSignatureDetailsForCache(true);
+ getSignatureDetailsInternal(true, /* isRefresh */
+ null /* request */,
+ null /* headers */,
+ null /* content */);
handleRefreshCallback(refreshAheadMs);
return;
} catch (SecurityInfoNotReadyException se) {
diff --git a/driver/src/main/java/oracle/nosql/driver/kv/StoreAccessTokenProvider.java b/driver/src/main/java/oracle/nosql/driver/kv/StoreAccessTokenProvider.java
index e3f6d0f3..596098eb 100644
--- a/driver/src/main/java/oracle/nosql/driver/kv/StoreAccessTokenProvider.java
+++ b/driver/src/main/java/oracle/nosql/driver/kv/StoreAccessTokenProvider.java
@@ -15,7 +15,9 @@
import java.util.Base64;
import java.util.Timer;
import java.util.TimerTask;
+import java.util.concurrent.CompletableFuture;
import java.util.concurrent.atomic.AtomicReference;
+import java.util.concurrent.locks.ReentrantLock;
import java.util.logging.Logger;
import oracle.nosql.driver.AuthorizationProvider;
@@ -24,6 +26,7 @@
import oracle.nosql.driver.NoSQLHandleConfig;
import oracle.nosql.driver.httpclient.HttpClient;
import oracle.nosql.driver.ops.Request;
+import oracle.nosql.driver.util.ConcurrentUtil;
import oracle.nosql.driver.util.HttpRequestUtil;
import oracle.nosql.driver.util.HttpRequestUtil.HttpResponse;
import oracle.nosql.driver.values.JsonUtils;
@@ -98,7 +101,7 @@ public class StoreAccessTokenProvider implements AuthorizationProvider {
/*
* Login token expiration time.
*/
- private long expirationTime;
+ private volatile long expirationTime;
/*
* A timer task used to periodically renew the login token.
@@ -153,7 +156,7 @@ public class StoreAccessTokenProvider implements AuthorizationProvider {
/*
* Whether this provider is closed
*/
- private boolean isClosed = false;
+ private volatile boolean isClosed = false;
/*
* SslContext used by http client
@@ -170,6 +173,7 @@ public class StoreAccessTokenProvider implements AuthorizationProvider {
*/
public static boolean disableSSLHook;
+ private final ReentrantLock lock = new ReentrantLock();
/**
* This method is used for access to a store without security enabled.
@@ -223,8 +227,9 @@ public StoreAccessTokenProvider(String userName,
*
* Bootstrap login using the provided credentials
*/
- public synchronized void bootstrapLogin(Request request) {
+ public void bootstrapLogin(Request request) {
+ ConcurrentUtil.synchronizedCall(lock, () -> {
/* re-check the authString in case of a race */
if (!isSecure || isClosed || authString.get() != null) {
return;
@@ -277,7 +282,7 @@ public synchronized void bootstrapLogin(Request request) {
throw iae;
} catch (Exception e) {
throw new NoSQLException("Bootstrap login fail", e);
- }
+ }});
}
/**
@@ -285,26 +290,33 @@ public synchronized void bootstrapLogin(Request request) {
*/
@Override
public String getAuthorizationString(Request request) {
+ return ConcurrentUtil.awaitFuture(getAuthorizationStringAsync(request));
+ }
+
+ /**
+ * @hidden
+ */
+ @Override
+ public CompletableFuture
+ getAuthorizationStringAsync(Request request) {
- if (!isSecure) {
- return null;
+ if (!isSecure || isClosed) {
+ return CompletableFuture.completedFuture(null);
}
- /*
- * Already close
- */
- if (isClosed) {
- return null;
+ String token = authString.get();
+ if (token != null) {
+ return CompletableFuture.completedFuture(token);
}
- /*
- * If there is no cached auth string, re-authentication to retrieve
- * the login token and generate the auth string.
+ /* Run bootstrap login asynchronously, reusing existing sync logic. */
+ /* TODO: supplyAsync runs in JVM common fork-join pool.
+ * Do we need a separate executor?
*/
- if (authString.get() == null) {
+ return CompletableFuture.supplyAsync(() -> {
bootstrapLogin(request);
- }
- return authString.get();
+ return authString.get();
+ });
}
/**
@@ -319,13 +331,29 @@ public void validateAuthString(String input) {
}
}
+ @Override
+ public void flushCache() {
+ ConcurrentUtil.synchronizedCall(lock,
+ () -> {
+ if (!isSecure || isClosed) {
+ return;
+ }
+ authString.set(null);
+ expirationTime = 0;
+ if (timer != null) {
+ timer.cancel();
+ timer = null;
+ }
+ });
+ }
+
/**
* Closes the provider, releasing resources such as a stored login
* token.
*/
@Override
- public synchronized void close() {
-
+ public void close() {
+ ConcurrentUtil.synchronizedCall(lock, () -> {
/*
* Don't do anything for non-secure case
*/
@@ -363,6 +391,7 @@ public synchronized void close() {
timer.cancel();
timer = null;
}
+ });
}
/**
diff --git a/driver/src/main/java/oracle/nosql/driver/ops/QueryPublisher.java b/driver/src/main/java/oracle/nosql/driver/ops/QueryPublisher.java
new file mode 100644
index 00000000..f3fe752a
--- /dev/null
+++ b/driver/src/main/java/oracle/nosql/driver/ops/QueryPublisher.java
@@ -0,0 +1,113 @@
+/*-
+ * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved.
+ *
+ * Licensed under the Universal Permissive License v 1.0 as shown at
+ * https://oss.oracle.com/licenses/upl/
+ */
+
+package oracle.nosql.driver.ops;
+
+import oracle.nosql.driver.http.NoSQLHandleAsyncImpl;
+import oracle.nosql.driver.values.MapValue;
+
+import java.util.List;
+import java.util.concurrent.Flow;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicLong;
+
+/**
+ * Publisher for query pagination.
+ */
+public class QueryPublisher implements Flow.Publisher {
+
+ private final NoSQLHandleAsyncImpl handle;
+ private final QueryRequest request;
+ private final AtomicBoolean subscribed = new AtomicBoolean(false);
+
+ public QueryPublisher(NoSQLHandleAsyncImpl handle, QueryRequest request) {
+ this.handle = handle;
+ this.request = request;
+ }
+
+ @Override
+ public void subscribe(Flow.Subscriber super MapValue> subscriber) {
+ /* only allow one subscriber */
+ if (!subscribed.compareAndSet(false, true)) {
+ subscriber.onSubscribe(new Flow.Subscription() {
+ @Override
+ public void request(long n) {
+ }
+ @Override
+ public void cancel() {
+ }
+ });
+ subscriber.onError(new IllegalStateException("already subscribed"));
+ return;
+ }
+
+ subscriber.onSubscribe(new Flow.Subscription() {
+ private final AtomicBoolean cancelled = new AtomicBoolean(false);
+ private final AtomicLong demand = new AtomicLong(0);
+ private int currentIndex = 0;
+ private List currentBatch = List.of();
+ /* first run triggered? */
+ private boolean started = false;
+
+ @Override
+ public void request(long n) {
+ if (n <= 0 || cancelled.get()) return;
+ demand.addAndGet(n);
+ fetchNext();
+ }
+
+ @Override
+ public void cancel() {
+ cancelled.set(true);
+ /* close the query request */
+ request.close();
+ }
+
+ private void fetchNext() {
+ if (cancelled.get()) return;
+
+ /* If batch exhausted, fetch next Result */
+ if (currentIndex >= currentBatch.size()) {
+ if (started && request.isDone()) {
+ /* close the query request */
+ request.close();
+ subscriber.onComplete();
+ return;
+ }
+ started = true;
+ handle.query(request).whenComplete((result, error) -> {
+ if (cancelled.get()) return;
+ if (error != null) {
+ request.close();
+ subscriber.onError(error);
+ } else {
+ currentBatch = result.getResults();
+ currentIndex = 0;
+ fetchNext(); /* continue with new batch */
+ }
+ });
+ return;
+ }
+
+ /* Emit items while demand > 0 and we still have rows */
+ while (demand.get() > 0
+ && currentIndex < currentBatch.size()
+ && !cancelled.get()) {
+ subscriber.onNext(currentBatch.get(currentIndex++));
+ demand.decrementAndGet();
+ }
+
+ // If demand still positive but batch finished, fetch more
+ if (demand.get() > 0
+ && currentIndex >= currentBatch.size()
+ && !cancelled.get()) {
+ fetchNext();
+ }
+ }
+ });
+ }
+}
diff --git a/driver/src/main/java/oracle/nosql/driver/ops/Request.java b/driver/src/main/java/oracle/nosql/driver/ops/Request.java
index 565036c8..e4908dd0 100644
--- a/driver/src/main/java/oracle/nosql/driver/ops/Request.java
+++ b/driver/src/main/java/oracle/nosql/driver/ops/Request.java
@@ -61,7 +61,7 @@ public abstract class Request {
/**
* @hidden
*/
- private long startNanos;
+ private volatile long startNanos;
/**
* @hidden
diff --git a/driver/src/main/java/oracle/nosql/driver/ops/SystemResult.java b/driver/src/main/java/oracle/nosql/driver/ops/SystemResult.java
index 45bf9361..ea332c14 100644
--- a/driver/src/main/java/oracle/nosql/driver/ops/SystemResult.java
+++ b/driver/src/main/java/oracle/nosql/driver/ops/SystemResult.java
@@ -10,6 +10,11 @@
import oracle.nosql.driver.NoSQLException;
import oracle.nosql.driver.NoSQLHandle;
import oracle.nosql.driver.RequestTimeoutException;
+import oracle.nosql.driver.http.NoSQLHandleAsyncImpl;
+
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
/**
* On-premises only.
@@ -233,4 +238,84 @@ public void waitForCompletion(NoSQLHandle handle,
}
} while (!state.equals(State.COMPLETE));
}
+
+ /**
+ * Asynchronously waits for the operation to be complete.
+ * This is a polling style wait that delays for the specified number of
+ * milliseconds between each polling operation.
+ *
+ * This instance is modified with any changes in state.
+ *
+ * @param handle the Async NoSQLHandle to use
+ * @param waitMillis the total amount of time to wait, in milliseconds. This
+ * value must be non-zero and greater than delayMillis
+ * @param delayMillis the amount of time to wait between polling attempts,
+ * in milliseconds. If 0 it will default to 500.
+ *
+ * @return Returns a {@link CompletableFuture} which completes
+ * successfully when operation is completed within waitMillis otherwise
+ * completes exceptionally with {@link IllegalArgumentException}
+ * if the operation times out or the parameters are not valid.
+ * Completes exceptionally with {@link NoSQLException}
+ * if the operation id used is unknown or the operation has failed.
+ */
+ public CompletableFuture waitForCompletionAsync(
+ NoSQLHandleAsyncImpl handle, int waitMillis, int delayMillis) {
+
+ if (state.equals(State.COMPLETE)) {
+ return CompletableFuture.completedFuture(null);
+ }
+
+ final int DELAY_MS = 500;
+
+ final int delayMS = (delayMillis != 0 ? delayMillis : DELAY_MS);
+ if (waitMillis < delayMillis) {
+ Throwable t = new IllegalArgumentException(
+ "Wait milliseconds must be a minimum of " +
+ DELAY_MS + " and greater than delay milliseconds");
+ return CompletableFuture.failedFuture(t);
+ }
+ final long startTime = System.currentTimeMillis();
+ SystemStatusRequest ds = new SystemStatusRequest()
+ .setOperationId(operationId);
+
+ final CompletableFuture resultFuture = new CompletableFuture<>();
+ final ScheduledExecutorService taskExecutor = handle.getTaskExecutor();
+
+ Runnable poll = new Runnable() {
+ @Override
+ public void run() {
+ final long curTime = System.currentTimeMillis();
+ if ((curTime - startTime) > waitMillis) {
+ Throwable t = new RequestTimeoutException(
+ waitMillis,
+ "Operation not completed within timeout: " +
+ statement);
+ resultFuture.completeExceptionally(t);
+ return;
+ }
+ handle.systemStatus(ds)
+ .whenComplete((res, ex) -> {
+ if (ex != null) {
+ resultFuture.completeExceptionally(ex);
+ return;
+ }
+ /* Update state */
+ resultString = res.resultString;
+ state = res.state;
+
+ if (state.equals(State.COMPLETE)) {
+ resultFuture.complete(null);
+ } else {
+ /* Schedule next poll */
+ taskExecutor.schedule(this, delayMS,
+ TimeUnit.MILLISECONDS);
+ }
+ });
+ }
+ };
+ /* Kick off the first poll immediately */
+ taskExecutor.execute(poll);
+ return resultFuture;
+ }
}
diff --git a/driver/src/main/java/oracle/nosql/driver/ops/TableResult.java b/driver/src/main/java/oracle/nosql/driver/ops/TableResult.java
index 428fec61..78fdf6b2 100644
--- a/driver/src/main/java/oracle/nosql/driver/ops/TableResult.java
+++ b/driver/src/main/java/oracle/nosql/driver/ops/TableResult.java
@@ -12,8 +12,13 @@
import oracle.nosql.driver.NoSQLException;
import oracle.nosql.driver.NoSQLHandle;
import oracle.nosql.driver.RequestTimeoutException;
+import oracle.nosql.driver.http.NoSQLHandleAsyncImpl;
import oracle.nosql.driver.ops.TableLimits.CapacityMode;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
+
/**
* TableResult is returned from {@link NoSQLHandle#getTable} and
* {@link NoSQLHandle#tableRequest} operations. It encapsulates the
@@ -784,6 +789,111 @@ public void waitForCompletion(NoSQLHandle handle,
}
}
+ /**
+ * Asynchronously waits for a table operation to complete. Table operations
+ * are asynchronous. This is a polling style wait that delays for
+ * the specified number of milliseconds between each polling operation.
+ * The returned future completes when the table reaches a
+ * terminal state,
+ * which is either {@link State#ACTIVE} or {@link State#DROPPED}.
+ *
+ * This instance must be the return value of a previous
+ * {@link NoSQLHandle#tableRequest} and contain a non-null operation id
+ * representing the in-progress operation unless the operation has
+ * already completed.
+ *
+ * This instance is modified with any change in table state or metadata.
+ *
+ * @param handle the Async NoSQLHandle to use
+ * @param waitMillis the total amount of time to wait, in milliseconds. This
+ * value must be non-zero and greater than delayMillis
+ * @param delayMillis the amount of time to wait between polling attempts,
+ * in milliseconds. If 0 it will default to 500.
+ *
+ * @return Returns a {@link CompletableFuture} which completes
+ * successfully when operation is completed within waitMillis otherwise
+ * completes exceptionally with {@link IllegalArgumentException}
+ * if the parameters are not valid.
+ * Completes exceptionally with {@link RequestTimeoutException}
+ * if the operation times out.
+ */
+ public CompletableFuture waitForCompletionAsync
+ (NoSQLHandleAsyncImpl handle, int waitMillis, int delayMillis) {
+
+ if (isTerminal()) {
+ return CompletableFuture.completedFuture(null);
+ }
+
+ if (operationId == null) {
+ Throwable t = new IllegalArgumentException(
+ "Operation state must not be null");
+ return CompletableFuture.failedFuture(t);
+ }
+
+ /* TODO: try to share code with waitForState? */
+ final int DELAY_MS = 500;
+
+ final int delayMS = (delayMillis != 0 ? delayMillis : DELAY_MS);
+ if (waitMillis < delayMillis) {
+ Throwable t = new IllegalArgumentException(
+ "Wait milliseconds must be a minimum of " +
+ DELAY_MS + " and greater than delay milliseconds");
+ return CompletableFuture.failedFuture(t);
+ }
+
+ final long startTime = System.currentTimeMillis();
+ final CompletableFuture resultFuture = new CompletableFuture<>();
+ final ScheduledExecutorService taskExecutor = handle.getTaskExecutor();
+
+ GetTableRequest getTable =
+ new GetTableRequest().setTableName(tableName).
+ setOperationId(operationId).setCompartment(
+ compartmentOrNamespace);
+
+ Runnable poll = new Runnable() {
+ @Override
+ public void run() {
+ long curTime = System.currentTimeMillis();
+ if ((curTime - startTime) > waitMillis) {
+ Throwable t = new RequestTimeoutException(
+ waitMillis,
+ "Operation not completed in expected time");
+ resultFuture.completeExceptionally(t);
+ return;
+ }
+ handle.getTable(getTable).whenComplete((res, ex) -> {
+ if (ex != null) {
+ resultFuture.completeExceptionally(ex);
+ return;
+ }
+ /*
+ * partial "copy" of possibly modified state. Don't modify
+ * operationId as that is what we are waiting to complete
+ */
+ state = res.getTableState();
+ limits = res.getTableLimits();
+ schema = res.getSchema();
+ matchETag = res.getMatchETag();
+ ddl = res.getDdl();
+ isFrozen = res.isFrozen();
+ isLocalReplicaInitialized = res.isLocalReplicaInitialized();
+ replicas = res.getReplicas();
+
+ if (isTerminal()) {
+ resultFuture.complete(null);
+ } else {
+ /* Schedule next poll */
+ taskExecutor.schedule(this, delayMS,
+ TimeUnit.MILLISECONDS);
+ }
+ });
+ }
+ };
+ /* Kick off the first poll immediately */
+ taskExecutor.execute(poll);
+ return resultFuture;
+ }
+
private boolean isTerminal() {
return state == State.ACTIVE || state == State.DROPPED;
}
diff --git a/driver/src/main/java/oracle/nosql/driver/package-info.java b/driver/src/main/java/oracle/nosql/driver/package-info.java
index 27557cb0..a3dff30b 100644
--- a/driver/src/main/java/oracle/nosql/driver/package-info.java
+++ b/driver/src/main/java/oracle/nosql/driver/package-info.java
@@ -4,6 +4,7 @@
* Licensed under the Universal Permissive License v 1.0 as shown at
* https://oss.oracle.com/licenses/upl/
*/
+ /* TODO: need add NoSQLHandleAsync? */
/**
* Contains the public API for using the Oracle NoSQL Database
* as well as configuration and common parameter classes used in
diff --git a/driver/src/main/java/oracle/nosql/driver/query/ReceiveIter.java b/driver/src/main/java/oracle/nosql/driver/query/ReceiveIter.java
index 615e0ac5..4fe29f62 100644
--- a/driver/src/main/java/oracle/nosql/driver/query/ReceiveIter.java
+++ b/driver/src/main/java/oracle/nosql/driver/query/ReceiveIter.java
@@ -12,11 +12,14 @@
import java.util.HashSet;
import java.util.List;
import java.util.TreeSet;
+import java.util.concurrent.CompletableFuture;
import oracle.nosql.driver.NoSQLException;
import oracle.nosql.driver.RetryableException;
import oracle.nosql.driver.ops.QueryRequest;
import oracle.nosql.driver.ops.QueryResult;
+import oracle.nosql.driver.ops.Result;
+import oracle.nosql.driver.util.ConcurrentUtil;
import oracle.nosql.driver.values.BinaryValue;
import oracle.nosql.driver.values.FieldValue;
import oracle.nosql.driver.values.MapValue;
@@ -592,7 +595,8 @@ private QueryResult execute(RuntimeControlBlock rcb,
NoSQLException e = null;
QueryResult result = null;
try {
- result = (QueryResult)rcb.getClient().execute(reqCopy);
+ CompletableFuture fut = rcb.getClient().execute(reqCopy);
+ result = (QueryResult) ConcurrentUtil.awaitFuture(fut);
} catch (NoSQLException qe) {
e = qe;
}
diff --git a/driver/src/main/java/oracle/nosql/driver/util/ConcurrentUtil.java b/driver/src/main/java/oracle/nosql/driver/util/ConcurrentUtil.java
new file mode 100644
index 00000000..03ef15d6
--- /dev/null
+++ b/driver/src/main/java/oracle/nosql/driver/util/ConcurrentUtil.java
@@ -0,0 +1,91 @@
+/*-
+ * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved.
+ *
+ * Licensed under the Universal Permissive License v 1.0 as shown at
+ * https://oss.oracle.com/licenses/upl/
+ */
+
+package oracle.nosql.driver.util;
+
+import oracle.nosql.driver.NoSQLException;
+
+import java.util.Objects;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.CompletionException;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.locks.ReentrantLock;
+import java.util.function.Supplier;
+
+public class ConcurrentUtil {
+ /**
+ * A convenient function to hold the lock and run.
+ */
+ public static T synchronizedCall(ReentrantLock lock,
+ Supplier s) {
+ lock.lock();
+ try {
+ return s.get();
+ } finally {
+ lock.unlock();
+ }
+ }
+
+ /**
+ * A convenient function to hold the lock and run.
+ */
+ public static void synchronizedCall(ReentrantLock lock,
+ Runnable r) {
+ lock.lock();
+ try {
+ r.run();
+ } finally {
+ lock.unlock();
+ }
+ }
+
+ /**
+ * A helper function to wait for the future to complete.
+ */
+ public static T awaitFuture(CompletableFuture future) {
+ try {
+ return future.get();
+ } catch (ExecutionException e) {
+ final Throwable cause = e.getCause();
+ appendCurrentStack(cause);
+ if (cause instanceof RuntimeException) {
+ throw ((RuntimeException) cause);
+ }
+ throw new NoSQLException("ExecutionException: "
+ + e.getMessage(), e.getCause());
+ } catch (InterruptedException ie) {
+ throw new NoSQLException("Request interrupted: "
+ + ie.getMessage(), ie);
+ }
+ }
+
+ /**
+ * Returns the cause if the exception is a CompletionException, otherwise
+ * returns the exception.
+ */
+ public static Throwable unwrapCompletionException(Throwable t) {
+ Throwable actual = t;
+ while (true) {
+ if (!(actual instanceof CompletionException)
+ || (actual.getCause() == null)) {
+ return actual;
+ }
+ actual = actual.getCause();
+ }
+ }
+
+ private static void appendCurrentStack(Throwable exception) {
+ Objects.requireNonNull(exception, "exception");
+ final StackTraceElement[] existing = exception.getStackTrace();
+ final StackTraceElement[] current = new Throwable().getStackTrace();
+ final StackTraceElement[] updated =
+ new StackTraceElement[existing.length + current.length];
+ System.arraycopy(existing, 0, updated, 0, existing.length);
+ System.arraycopy(current, 0, updated, existing.length, current.length);
+ exception.setStackTrace(updated);
+ }
+}
diff --git a/driver/src/main/java/oracle/nosql/driver/util/HttpRequestUtil.java b/driver/src/main/java/oracle/nosql/driver/util/HttpRequestUtil.java
index d0ea97bb..62903523 100644
--- a/driver/src/main/java/oracle/nosql/driver/util/HttpRequestUtil.java
+++ b/driver/src/main/java/oracle/nosql/driver/util/HttpRequestUtil.java
@@ -23,23 +23,22 @@
import java.net.URI;
import java.nio.charset.Charset;
import java.nio.charset.StandardCharsets;
+import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeoutException;
import java.util.logging.Logger;
import javax.net.ssl.SSLException;
+import io.netty.buffer.Unpooled;
import oracle.nosql.driver.RequestTimeoutException;
import oracle.nosql.driver.httpclient.HttpClient;
-import oracle.nosql.driver.httpclient.ResponseHandler;
import io.netty.buffer.ByteBuf;
-import io.netty.channel.Channel;
import io.netty.handler.codec.http.DefaultFullHttpRequest;
import io.netty.handler.codec.http.FullHttpRequest;
import io.netty.handler.codec.http.HttpHeaderNames;
import io.netty.handler.codec.http.HttpHeaders;
import io.netty.handler.codec.http.HttpMethod;
-import io.netty.handler.codec.http.HttpResponseStatus;
/**
* Utility to issue HTTP request using {@link HttpClient}.
@@ -209,41 +208,36 @@ private static HttpResponse doRequest(HttpClient httpClient,
final long startTime = System.currentTimeMillis();
int numRetries = 0;
Throwable exception = null;
- HttpResponse res = null;
do {
if (numRetries > 0) {
logInfo(logger, "Client, doing retry: " + numRetries +
- (exception != null ? ", exception: " + exception : ""));
+ (exception != null ? ", exception: " + exception : ""));
}
- Channel channel = null;
- ResponseHandler responseHandler = null;
try {
- channel = httpClient.getChannel(timeoutMs);
- responseHandler =
- new ResponseHandler(httpClient, logger, channel);
-
FullHttpRequest request;
if (payload == null) {
request = buildRequest(uri, method, headers);
} else {
- request = buildRequest(
- uri, headers, method, payload, channel);
+ request = buildRequest(uri, headers, method, payload);
}
addRequiredHeaders(request);
logFine(logger, request.headers().toString());
- httpClient.runRequest(request, responseHandler, channel);
- if (responseHandler.await(timeoutMs)) {
- throw new TimeoutException("Request timed out after " +
- timeoutMs + " milliseconds");
- }
-
- final HttpResponseStatus status = responseHandler.getStatus();
- if (status == null) {
- throw new IllegalStateException("Invalid null response");
- }
- res = processResponse(status.code(),
- responseHandler.getContent());
+ CompletableFuture httpResponse =
+ httpClient.runRequest(request, timeoutMs)
+ .thenApply(fhr -> {
+ if (fhr.status() == null) {
+ throw new IllegalStateException(
+ "Invalid null response");
+ }
+ try {
+ final int code = fhr.status().code();
+ return processResponse(code, fhr.content());
+ } finally {
+ fhr.release();
+ }
+ });
+ HttpResponse res = httpResponse.get();
/*
* Retry upon status code larger than 500, in general,
@@ -251,50 +245,53 @@ private static HttpResponse doRequest(HttpClient httpClient,
*/
if (res.getStatusCode() >= 500) {
logFine(logger,
- "Remote server temporarily unavailable," +
- " status code " + res.getStatusCode() +
- " , response " + res.getOutput());
+ "Remote server temporarily unavailable," +
+ " status code " + res.getStatusCode() +
+ " , response " + res.getOutput());
delay();
++numRetries;
continue;
}
return res;
- } catch (RuntimeException e) {
- logFine(logger, "Client execute runtime exception: " +
- e.getMessage());
- throw e;
- } catch (IOException ioe) {
- String name = ioe.getClass().getName();
- logFine(logger, "Client execute IOException, name: " +
- name + ", message: " + ioe.getMessage());
- /*
- * An exception in the channel, e.g. the server may have
- * disconnected. Retry.
- */
- exception = ioe;
- ++numRetries;
- if (ioe instanceof SSLException) {
- /* disconnect the channel to force a new one */
- if (channel != null) {
+ } catch (ExecutionException ee) {
+ Throwable cause = ee.getCause();
+ if (cause instanceof IOException) {
+ IOException ioe = (IOException) cause;
+ String name = ioe.getClass().getName();
+ logFine(logger, "Client execute IOException, name: " +
+ name + ", message: " + ioe.getMessage());
+ /*
+ * An exception in the channel, e.g. the server may have
+ * disconnected. Retry.
+ */
+ exception = ioe;
+ ++numRetries;
+ if (ioe instanceof SSLException) {
+ /* disconnect the channel to force a new one */
+ /*if (channel != null) {
logFine(logger,
"Client disconnecting channel due to: " + ioe);
channel.disconnect();
+ }*/
+ //TODO what to do?
+ } else {
+ delay();
}
- } else {
- delay();
+ continue;
+ } else if (cause instanceof TimeoutException) {
+ throw new RuntimeException("Timeout exception: host=" +
+ httpClient.getHost() + " port=" +
+ httpClient.getPort() + " uri=" +
+ uri, cause);
}
- continue;
+ throw new RuntimeException("Unable to execute request: ", ee);
+
+ } catch (RuntimeException e) {
+ logFine(logger, "Client execute runtime exception: " +
+ e.getMessage());
+ throw e;
} catch (InterruptedException ie) {
- throw new RuntimeException(
- "Client interrupted exception: ", ie);
- } catch (ExecutionException ee) {
- throw new RuntimeException(
- "Unable to execute request: ", ee);
- } catch (TimeoutException te) {
- throw new RuntimeException("Timeout exception: host=" +
- httpClient.getHost() + " port=" +
- httpClient.getPort() + " uri=" +
- uri, te);
+ throw new RuntimeException("Client interrupted exception: ", ie);
} catch (Throwable t) {
/*
* this is likely an exception from Netty, perhaps a bad
@@ -308,10 +305,6 @@ private static HttpResponse doRequest(HttpClient httpClient,
delay();
++numRetries;
continue;
- } finally {
- if (responseHandler != null) {
- responseHandler.close();
- }
}
} while ((System.currentTimeMillis()- startTime) < timeoutMs);
@@ -333,10 +326,8 @@ private static FullHttpRequest buildRequest(String requestURI,
private static FullHttpRequest buildRequest(String requestURI,
HttpHeaders headers,
HttpMethod method,
- byte[] payload,
- Channel channel) {
- final ByteBuf buffer = channel.alloc().directBuffer();
- buffer.writeBytes(payload);
+ byte[] payload) {
+ final ByteBuf buffer = Unpooled.wrappedBuffer(payload);
final FullHttpRequest request =
new DefaultFullHttpRequest(HTTP_1_1, method, requestURI,
diff --git a/driver/src/main/java/oracle/nosql/driver/util/LogUtil.java b/driver/src/main/java/oracle/nosql/driver/util/LogUtil.java
index 25710c02..5aff93ae 100644
--- a/driver/src/main/java/oracle/nosql/driver/util/LogUtil.java
+++ b/driver/src/main/java/oracle/nosql/driver/util/LogUtil.java
@@ -7,6 +7,8 @@
package oracle.nosql.driver.util;
+import java.io.PrintWriter;
+import java.io.StringWriter;
import java.util.logging.Level;
import java.util.logging.Logger;
@@ -67,4 +69,19 @@ public static void logTrace(Logger logger, String msg) {
public static boolean isLoggable(Logger logger, Level level) {
return (logger != null && logger.isLoggable(level));
}
+
+ /**
+ * Returns the stack trace.
+ *
+ * @param t the exception
+ */
+ public static String getStackTrace(Throwable t) {
+ if (t == null) {
+ return null;
+ }
+ final StringWriter sw = new StringWriter();
+ final PrintWriter pw = new PrintWriter(sw);
+ t.printStackTrace(pw);
+ return sw.toString();
+ }
}
diff --git a/driver/src/test/java/oracle/nosql/driver/BasicAsyncTest.java b/driver/src/test/java/oracle/nosql/driver/BasicAsyncTest.java
new file mode 100644
index 00000000..7da08411
--- /dev/null
+++ b/driver/src/test/java/oracle/nosql/driver/BasicAsyncTest.java
@@ -0,0 +1,202 @@
+/*-
+ * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved.
+ *
+ * Licensed under the Universal Permissive License v 1.0 as shown at
+ * https://oss.oracle.com/licenses/upl/
+ */
+
+package oracle.nosql.driver;
+
+import oracle.nosql.driver.ops.DeleteRequest;
+import oracle.nosql.driver.ops.GetRequest;
+import oracle.nosql.driver.ops.ListTablesRequest;
+import oracle.nosql.driver.ops.PutRequest;
+import oracle.nosql.driver.ops.TableLimits;
+import oracle.nosql.driver.ops.TableResult;
+import oracle.nosql.driver.ops.TableUsageRequest;
+import oracle.nosql.driver.values.MapValue;
+import org.junit.Test;
+
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.CompletionException;
+
+import static oracle.nosql.driver.util.BinaryProtocol.V4;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+public class BasicAsyncTest extends ProxyTestBase {
+
+ @Test
+ public void smokeTest() {
+
+ try {
+ MapValue key = new MapValue().put("id", 10);
+ MapValue value = new MapValue().put("id", 10).put("name", "jane");
+
+ /* drop a table */
+ tableOperationAsync(asyncHandle,
+ "drop table if exists testusers",
+ null)
+ .whenComplete((tres, err) -> {
+ assertNotNull(tres.getTableName());
+ assertNull(tres.getTableLimits());
+ })
+ .thenCompose(ignored -> {
+ /* drop again without if exists -- should throw */
+ return tableOperationAsync(asyncHandle,
+ "drop table testusers",
+ null)
+ .handle((tres, err) -> {
+ assertNotNull("operation should have thrown", err);
+ assertTrue(err instanceof CompletionException);
+ assertTrue("Expecting TableNotFoundException",
+ err.getCause() instanceof TableNotFoundException);
+ return null;
+ });
+ })
+ .thenCompose(ignored -> {
+ /* Create a table */
+ return tableOperationAsync(
+ asyncHandle,
+ "create table if not exists testusers(id integer, " +
+ "name string, primary key(id))",
+ new TableLimits(500, 500, 50))
+ .thenAccept(tres -> {
+ assertNotNull(tres);
+ assertEquals(TableResult.State.ACTIVE, tres.getTableState());
+ });
+ })
+ .thenCompose(ignored -> {
+ /* Create an index */
+ return tableOperationAsync(
+ asyncHandle,
+ "create index if not exists Name on testusers(name)",
+ null)
+ .thenAccept(tres -> {
+ assertNotNull(tres);
+ assertEquals(TableResult.State.ACTIVE, tres.getTableState());
+ });
+ })
+ .thenCompose(ignored -> {
+ /* list tables */
+ ListTablesRequest listTables = new ListTablesRequest();
+ return asyncHandle.listTables(listTables)
+ .thenApply(lres -> {
+ assertNotNull(lres);
+ /*
+ * the test cases don't yet clean up so there
+ * may be additional tables present, be
+ * flexible in this assertion.
+ */
+ assertTrue(lres.getTables().length >= 1);
+ assertNotNull(lres.toString());
+ return lres;
+ });
+ })
+ .thenCompose(ignored -> {
+ /* getTableUsage. It won't return much in test mode */
+ if (!onprem) {
+ TableUsageRequest gtu = new TableUsageRequest()
+ .setTableName("testusers").setLimit(2)
+ .setEndTime(System.currentTimeMillis());
+ return asyncHandle.getTableUsage(gtu)
+ .thenAccept(gtuRes -> {
+ assertNotNull(gtuRes);
+ assertNotNull(gtuRes.getUsageRecords());
+ });
+ }
+ return CompletableFuture.completedFuture(null);
+ })
+ .thenCompose(ignored -> {
+ /* PUT */
+ PutRequest putRequest = new PutRequest()
+ .setValue(value)
+ .setTableName("testusers");
+ return asyncHandle.put(putRequest)
+ .thenAccept(res -> {
+ assertNotNull(res.getVersion());
+ assertWriteKB(res);
+ });
+ })
+ .thenCompose(ignored -> {
+ /* GET */
+ GetRequest getRequest = new GetRequest()
+ .setKey(key)
+ .setTableName("testusers");
+
+ return asyncHandle.get(getRequest)
+ .whenComplete((gres, err) -> {
+ assertNotNull(gres);
+ assertNotNull(gres.getJsonValue());
+ assertEquals("jane",
+ gres.getValue().getString("name"));
+ assertReadKB(gres);
+ });
+ })
+ .thenCompose(ignored -> {
+ /* DELETE */
+ DeleteRequest deleteRequest = new DeleteRequest()
+ .setKey(key)
+ .setTableName("testusers")
+ .setReturnRow(true);
+ return asyncHandle.delete(deleteRequest)
+ .whenComplete((dres, err) -> {
+ assertNotNull(dres);
+ assertTrue(dres.getSuccess());
+ assertWriteKB(dres);
+ if (proxySerialVersion <= V4) {
+ assertNull(dres.getExistingVersion());
+ } else {
+ assertEquals(value, dres.getExistingValue());
+ }
+ });
+ })
+ .thenCompose(ignored -> {
+ /* GET -- no row, it was removed above */
+ GetRequest getRequest = new GetRequest()
+ .setTableName("testusers")
+ .setKey(key);
+ return asyncHandle.get(getRequest)
+ .whenComplete((gres, err) -> {
+ assertNotNull(gres);
+ assertNull(gres.getValue());
+ });
+ }).join();
+
+ /* GET -- no table */
+ GetRequest getRequest = new GetRequest()
+ .setTableName("not_a_table")
+ .setKey(key);
+ asyncHandle.get(getRequest)
+ .handle((gres, err) -> {
+ assertTrue(err instanceof CompletionException);
+ assertTrue(
+ "Attempt to access missing table should "
+ + "have thrown",
+ err.getCause() instanceof TableNotFoundException);
+ return null;
+ }).join();
+
+ /* PUT -- invalid row -- this will throw */
+ value.remove("id");
+ value.put("not_a_field", 1);
+ PutRequest putRequest = new PutRequest()
+ .setValue(value)
+ .setTableName("testusers");
+ asyncHandle.put(putRequest)
+ .handle((pres, err) -> {
+ assertTrue(err instanceof CompletionException);
+ assertTrue(
+ "Attempt to put invalid row should have thrown",
+ err.getCause() instanceof IllegalArgumentException);
+ return null;
+ }).join();
+ } catch (Exception e) {
+ e.printStackTrace();
+ fail("Exception in test");
+ }
+ }
+}
diff --git a/driver/src/test/java/oracle/nosql/driver/HandleConfigTest.java b/driver/src/test/java/oracle/nosql/driver/HandleConfigTest.java
index 3499cb6f..2eb09df8 100644
--- a/driver/src/test/java/oracle/nosql/driver/HandleConfigTest.java
+++ b/driver/src/test/java/oracle/nosql/driver/HandleConfigTest.java
@@ -9,6 +9,7 @@
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertThrows;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
@@ -107,6 +108,47 @@ public void testSdkVersion() {
arr.length > 2);
}
+ @Test
+ public void testConnectionPoolConfig() {
+ NoSQLHandleConfig cfg = new NoSQLHandleConfig("http://foo.com");
+ /* verify default connection pool values */
+ assertEquals(NoSQLHandleConfig.DEFAULT_CONNECTION_POOL_SIZE,
+ cfg.getConnectionPoolSize());
+ assertEquals(NoSQLHandleConfig.DEFAULT_CONNECTION_PENDING_SIZE,
+ cfg.getPoolMaxPending());
+ try {
+ /* set connection pool properties and verify */
+ System.setProperty(NoSQLHandleConfig.CONNECTION_SIZE_PROPERTY,
+ "50");
+ System.setProperty(NoSQLHandleConfig.CONNECTION_PENDING_PROPERTY,
+ "100");
+
+ cfg = new NoSQLHandleConfig("http://foo.com");
+ assertEquals(50, cfg.getConnectionPoolSize());
+ assertEquals(100, cfg.getPoolMaxPending());
+
+ /* manually set connection pool values and verify */
+ cfg = new NoSQLHandleConfig("http://foo.com");
+ cfg.setConnectionPoolSize(5);
+ cfg.setPoolMaxPending(2);
+ assertEquals(5, cfg.getConnectionPoolSize());
+ assertEquals(2, cfg.getPoolMaxPending());
+
+ /* set invalid value for properties and verify */
+ System.setProperty(NoSQLHandleConfig.CONNECTION_SIZE_PROPERTY, "0");
+ System.setProperty(NoSQLHandleConfig.CONNECTION_PENDING_PROPERTY,
+ "0");
+ IllegalArgumentException iae =
+ assertThrows(IllegalArgumentException.class,
+ () -> new NoSQLHandleConfig("http://foo.com"));
+ assertTrue(iae.getMessage().contains("must be larger than zero"));
+
+ } finally {
+ System.clearProperty(NoSQLHandleConfig.CONNECTION_SIZE_PROPERTY);
+ System.clearProperty(NoSQLHandleConfig.CONNECTION_PENDING_PROPERTY);
+ }
+ }
+
private void expectIllegalArg(String endpoint) {
try {
new NoSQLHandleConfig(endpoint);
diff --git a/driver/src/test/java/oracle/nosql/driver/PerformanceTest.java b/driver/src/test/java/oracle/nosql/driver/PerformanceTest.java
new file mode 100644
index 00000000..56d76031
--- /dev/null
+++ b/driver/src/test/java/oracle/nosql/driver/PerformanceTest.java
@@ -0,0 +1,241 @@
+/*-
+ * Copyright (c) 2011, 2025 Oracle and/or its affiliates. All rights reserved.
+ *
+ * Licensed under the Universal Permissive License v 1.0 as shown at
+ * https://oss.oracle.com/licenses/upl/
+ */
+
+package oracle.nosql.driver;
+
+import oracle.nosql.driver.http.Client;
+import oracle.nosql.driver.http.NoSQLHandleAsyncImpl;
+import oracle.nosql.driver.ops.DeleteRequest;
+import oracle.nosql.driver.ops.DeleteResult;
+import oracle.nosql.driver.ops.GetRequest;
+import oracle.nosql.driver.ops.GetResult;
+import oracle.nosql.driver.ops.PutRequest;
+import oracle.nosql.driver.ops.PutResult;
+import oracle.nosql.driver.ops.QueryRequest;
+import oracle.nosql.driver.ops.QueryResult;
+import oracle.nosql.driver.ops.Result;
+import oracle.nosql.driver.ops.TableLimits;
+import oracle.nosql.driver.ops.TableResult;
+import oracle.nosql.driver.values.MapValue;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Ignore;
+import org.junit.Test;
+
+import java.time.Duration;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Random;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.Semaphore;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import static org.junit.Assert.assertNotNull;
+
+/**
+ * Performance test for async APIs.
+ * The test has two phases, warm-up and load phase
+ * warm-up phase is to warm-up the netty connections
+ * load phase to randomly run one of put, get, delete and query
+ */
+@Ignore("Performance test is too heavy to run as unit test")
+public class PerformanceTest extends ProxyTestBase {
+ private static final String table = "perf_test";
+ private static final String ddl = "create table if not exists " + table +
+ "(id long, name string, primary key(id))";
+ private static final String dropDdl = "drop table if exists "+ table;
+ private static final int WARMUP_OPS = 100;
+ private static final int TOTAL_OPS = 100000;
+ private static final int THREADS = 100;
+ private static ExecutorService executor;
+
+ private static final int pipelineDepth = 100;
+
+ @BeforeClass
+ public static void setupTest() {
+ executor = Executors.newFixedThreadPool(THREADS);
+ }
+ @Before
+ public void setup() {
+ TableResult tres = tableOperationAsync(asyncHandle, ddl,
+ new TableLimits(1000, 1000,1)).join();
+ assertNotNull(tres.getTableName());
+ }
+
+ @After
+ public void teardown() {
+ TableResult tres =
+ tableOperationAsync(asyncHandle, dropDdl, null).join();
+ assertNotNull(tres.getTableName());
+ }
+
+ @Test
+ public void test() throws Exception {
+ Client client = ((NoSQLHandleAsyncImpl) asyncHandle).getClient();
+ client.enableRateLimiting(true, 100);
+
+ System.out.println("Warm-up phase");
+ //runOpsAsync(WARMUP_OPS, pipelineDepth);
+ runOpsAsync(WARMUP_OPS, pipelineDepth);
+
+ StatsControl statsControl = asyncHandle.getStatsControl();
+ statsControl.setProfile(StatsControl.Profile.ALL).setPrettyPrint(true);
+ statsControl.start();
+
+
+ System.out.println("Load phase");
+ long start = System.nanoTime();
+ //runOpsAsync(TOTAL_OPS, pipelineDepth);
+ runOpsAsync(TOTAL_OPS, pipelineDepth);
+ long end = System.nanoTime();
+
+
+ Duration duration = Duration.ofNanos(end - start);
+ double throughput = TOTAL_OPS / (duration.toMillis() / 1000.0);
+
+ System.out.println("Completed " + TOTAL_OPS + " operations");
+ System.out.println("Time = " + duration);
+ System.out.println("Throughput = " + throughput + " ops/sec");
+ statsControl.stop();
+ }
+
+ private void runOps(int count) throws Exception {
+ List> futures = new ArrayList<>(count);
+ Random random = new Random();
+ AtomicInteger failures = new AtomicInteger();
+ MapValue row = new MapValue()
+ .put("id", 1)
+ .put("name", "oracle");
+ MapValue key = new MapValue().put("id", 1);
+
+ for (int i = 0; i < count; i++) {
+ futures.add(CompletableFuture.runAsync(() -> {
+ try {
+ int op = random.nextInt(4);
+ switch (op) {
+ case 0 : {
+ //put op
+ PutRequest pr = new PutRequest()
+ .setTableName(table)
+ .setValue(row);
+ Result res = asyncHandle.put(pr).join();
+ assertNotNull(res);
+ break;
+ }
+ case 1 : {
+ GetRequest gr = new GetRequest()
+ .setTableName(table)
+ .setKey(key);
+ Result res = asyncHandle.get(gr).join();
+ assertNotNull(res);
+ break;
+ }
+ case 2 : {
+ DeleteRequest dr = new DeleteRequest()
+ .setTableName(table)
+ .setKey(key);
+ Result res = asyncHandle.delete(dr).join();
+ assertNotNull(res);
+ break;
+ } default : {
+ try(QueryRequest qr =
+ new QueryRequest()
+ .setStatement(
+ "select * from " + table + " where id=1")) {
+ Result res = asyncHandle.query(qr).join();
+ assertNotNull(res);
+ }
+ }
+ }
+ } catch (Exception e) {
+ failures.incrementAndGet();
+ }
+ }, executor));
+ }
+ CompletableFuture.allOf(futures.toArray(new CompletableFuture[0])).get();
+ System.out.println("Failures = " + failures.get());
+ }
+
+ private void runOpsAsync(int count, int pipelineDepth) throws Exception {
+ final Semaphore semaphore = new Semaphore(pipelineDepth);
+ final List> futures = new ArrayList<>(count);
+ Random random = new Random();
+ AtomicInteger failures = new AtomicInteger();
+ MapValue row = new MapValue()
+ .put("id", 1)
+ .put("name", "oracle");
+ MapValue key = new MapValue().put("id", 1);
+
+ for (int i = 0; i < count; i++) {
+ try {
+ semaphore.acquire();
+ int op = random.nextInt(4);
+ switch (op) {
+ case 0 : {
+ //put op
+ PutRequest pr = new PutRequest()
+ .setTableName(table)
+ .setValue(row);
+ CompletableFuture fut =
+ asyncHandle.put(pr).whenComplete((res, err) -> {
+ assertNotNull(res);
+ semaphore.release();
+ });
+ futures.add(fut.thenRun(() -> {}));
+ break;
+ }
+ case 1 : {
+ GetRequest gr = new GetRequest()
+ .setTableName(table)
+ .setKey(key);
+ CompletableFuture fut =
+ asyncHandle.get(gr).whenComplete((res, err) -> {
+ assertNotNull(res);
+ semaphore.release();
+ });
+ futures.add(fut.thenRun(() -> {}));
+ break;
+ }
+ case 2 : {
+ DeleteRequest dr = new DeleteRequest()
+ .setTableName(table)
+ .setKey(key);
+ CompletableFuture fut =
+ asyncHandle.delete(dr).whenComplete((res, err) -> {
+ assertNotNull(res);
+ semaphore.release();
+ });
+ futures.add(fut.thenRun(() -> {}));
+ break;
+ } default : {
+ try(QueryRequest qr =
+ new QueryRequest()
+ .setStatement(
+ "select * from " + table + " where id=1")) {
+ CompletableFuture fut =
+ asyncHandle.query(qr)
+ .whenComplete((res, err) -> {
+ assertNotNull(res);
+ semaphore.release();
+ });
+ futures.add(fut.thenRun(() -> {}));
+ }
+ }
+ }
+ } catch (Exception e) {
+ failures.incrementAndGet();
+ }
+ }
+ CompletableFuture.allOf(futures.toArray(new CompletableFuture[0])).get();
+ System.out.println("Failures = " + failures.get());
+ }
+}
diff --git a/driver/src/test/java/oracle/nosql/driver/ProxyTestBase.java b/driver/src/test/java/oracle/nosql/driver/ProxyTestBase.java
index dda592ef..80fd72b5 100644
--- a/driver/src/test/java/oracle/nosql/driver/ProxyTestBase.java
+++ b/driver/src/test/java/oracle/nosql/driver/ProxyTestBase.java
@@ -30,6 +30,7 @@
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
+import java.util.concurrent.CompletableFuture;
import java.util.logging.Level;
import java.util.logging.Logger;
@@ -79,6 +80,7 @@ public class ProxyTestBase {
protected static String TRACE = "test.trace";
protected static int DEFAULT_DDL_TIMEOUT = 15000;
protected static int DEFAULT_DML_TIMEOUT = 5000;
+ protected static int DEFAULT_REQUEST_TIMEOUT = 5000;
protected static String TEST_TABLE_NAME = "drivertest";
protected static int INACTIVITY_PERIOD_SECS = 2;
protected static String NETTY_LEAK_PROP="test.detectleaks";
@@ -122,6 +124,8 @@ public class ProxyTestBase {
protected NoSQLHandle handle;
+ protected NoSQLHandleAsync asyncHandle;
+
/* serial version used at the proxy server */
protected int proxySerialVersion;
@@ -248,6 +252,35 @@ protected static TableResult tableOperation(NoSQLHandle handle,
handle.doTableRequest(tableRequest, waitMillis, 1000);
return tres;
}
+
+ protected static CompletableFuture
+ tableOperationAsync(NoSQLHandleAsync handle,
+ String statement,
+ TableLimits limits) {
+ return tableOperationAsync(handle,
+ statement,
+ limits,
+ DEFAULT_DDL_TIMEOUT);
+ }
+
+ /**
+ * run the statement, assumes success, exception is thrown on error
+ */
+ protected static CompletableFuture
+ tableOperationAsync(NoSQLHandleAsync handle,
+ String statement,
+ TableLimits limits,
+ int waitMillis) {
+ assertTrue(waitMillis > 500);
+ TableRequest tableRequest = new TableRequest()
+ .setStatement(statement)
+ .setTableLimits(limits)
+ .setTimeout(DEFAULT_DDL_TIMEOUT);
+
+ CompletableFuture tres =
+ handle.doTableRequest(tableRequest, waitMillis, 1000);
+ return tres;
+ }
/**
* run the statement, assumes success, exception is thrown on error
*/
@@ -271,6 +304,7 @@ public void beforeTest() throws Exception {
* Configure and get the handle
*/
handle = getHandle(endpoint);
+ asyncHandle = getAsyncHandle(endpoint);
/* track existing tables and don't drop them */
existingTables = new HashSet();
@@ -309,6 +343,9 @@ public void afterTest() throws Exception {
}
handle.close();
}
+ if (asyncHandle != null) {
+ asyncHandle.close();
+ }
}
protected static void dropAllTables(NoSQLHandle nosqlHandle,
@@ -424,13 +461,19 @@ protected NoSQLHandle getHandle(String ep) {
return setupHandle(config);
}
+ protected NoSQLHandleAsync getAsyncHandle(String ep) {
+ NoSQLHandleConfig config = new NoSQLHandleConfig(ep);
+ serviceURL = config.getServiceURL();
+ return setupAsyncHandle(config);
+ }
+
/* Set configuration values for the handle */
protected NoSQLHandle setupHandle(NoSQLHandleConfig config) {
/*
* 5 retries, default retry algorithm
*/
config.configureDefaultRetryHandler(5, 0);
- config.setRequestTimeout(30000);
+ config.setRequestTimeout(DEFAULT_REQUEST_TIMEOUT);
/* remove idle connections after this many seconds */
config.setConnectionPoolInactivityPeriod(INACTIVITY_PERIOD_SECS);
@@ -446,6 +489,23 @@ protected NoSQLHandle setupHandle(NoSQLHandleConfig config) {
return h;
}
+ /* Set configuration values for the aysnc handle */
+ protected NoSQLHandleAsync setupAsyncHandle(NoSQLHandleConfig config) {
+ /*
+ * 5 retries, default retry algorithm
+ */
+ config.configureDefaultRetryHandler(5, 0);
+ config.setRequestTimeout(DEFAULT_REQUEST_TIMEOUT);
+
+ /* remove idle connections after this many seconds */
+ config.setConnectionPoolInactivityPeriod(INACTIVITY_PERIOD_SECS);
+ configAuth(config);
+
+ /* allow test cases to add/modify handle config */
+ perTestHandleConfig(config);
+ return getAsyncHandle(config);
+ }
+
/**
* sub classes can override this to affect the handle config
*/
@@ -474,6 +534,27 @@ protected NoSQLHandle getHandle(NoSQLHandleConfig config) {
return NoSQLHandleFactory.createNoSQLHandle(config);
}
+ /**
+ * get a handle based on the config
+ */
+ protected NoSQLHandleAsync getAsyncHandle(NoSQLHandleConfig config) {
+ /*
+ * Create a Logger, set to WARNING by default.
+ */
+ Logger logger = Logger.getLogger(getClass().getName());
+ String level = System.getProperty("test.loglevel");
+ if (level == null) {
+ level = "WARNING";
+ }
+ logger.setLevel(Level.parse(level));
+ config.setLogger(logger);
+
+ /*
+ * Open the handle
+ */
+ return NoSQLHandleFactory.createNoSQLHandleAsync(config);
+ }
+
void assertReadKB(Result res) {
if (onprem) {
return;
@@ -518,6 +599,13 @@ public String getAuthorizationString(Request request) {
@Override
public void close() {
}
+
+ @Override
+ public CompletableFuture
+ getAuthorizationStringAsync(Request request) {
+ return CompletableFuture.completedFuture(
+ "Bearer cloudsim");
+ }
});
}
}
diff --git a/driver/src/test/java/oracle/nosql/driver/httpclient/ConnectionPoolTest.java b/driver/src/test/java/oracle/nosql/driver/httpclient/ConnectionPoolTest.java
index 48295fb4..b84eeede 100644
--- a/driver/src/test/java/oracle/nosql/driver/httpclient/ConnectionPoolTest.java
+++ b/driver/src/test/java/oracle/nosql/driver/httpclient/ConnectionPoolTest.java
@@ -8,14 +8,37 @@
package oracle.nosql.driver.httpclient;
import static org.junit.Assert.assertEquals;
-
-import java.util.concurrent.Future;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.RejectedExecutionException;
+import java.util.concurrent.TimeUnit;
import java.util.logging.Level;
import java.util.logging.Logger;
import javax.net.ssl.SSLException;
import java.net.URL;
+import io.netty.bootstrap.Bootstrap;
+import io.netty.bootstrap.ServerBootstrap;
+import io.netty.channel.ChannelInboundHandlerAdapter;
+import io.netty.channel.ChannelInitializer;
+import io.netty.channel.EventLoopGroup;
+import io.netty.channel.local.LocalAddress;
+import io.netty.channel.local.LocalChannel;
+import io.netty.channel.local.LocalServerChannel;
+import io.netty.channel.nio.NioEventLoopGroup;
+import io.netty.channel.pool.ChannelPoolHandler;
+import io.netty.util.concurrent.Future;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Assume;
import org.junit.Before;
import org.junit.Test;
@@ -26,36 +49,59 @@
import oracle.nosql.driver.NoSQLHandleConfig;
/**
- * This test is excluded from the test profiles and must be run standalone.
- * This is because of the need to use a cloud endpoint for complete
- * testing. See header comment on testCloudTimeout().
- * It can be run explicitly using either the test-onprem or test-cloudsim
- * profile with a -Dtest directive, e.g.:
- * mvn -Ptest-cloudsim test \
- * -Dtest=oracle.nosql.driver.httpclient.ConnectionPoolTest \
- * -DargLine="-Dtest.endpoint=http://localhost:8080 \
- * -Dtest.cloudendpoint=some_cloud_endpoint"
+ * Tests for ConnectionPool
*/
public class ConnectionPoolTest {
private static String endpoint = System.getProperty("test.endpoint");
private static Logger logger = getLogger();
private URL serviceURL;
+ private EventLoopGroup group;
+ private Channel serverChannel;
+ private LocalAddress address;
@Before
- public void beforeTest() {
- if (endpoint == null) {
- throw new IllegalArgumentException(
- "Test requires test.endpoint system property");
- }
+ public void beforeTest() throws InterruptedException {
+ group = new NioEventLoopGroup();
+ address = new LocalAddress("test-port");
+ /* Start a fake Local Server so the pool can connect */
+ ServerBootstrap sb = new ServerBootstrap()
+ .group(group)
+ .channel(LocalServerChannel.class)
+ .childHandler(new ChannelInitializer() {
+ @Override
+ protected void initChannel(Channel ch) {
+ ch.pipeline().addLast(new ChannelInboundHandlerAdapter());
+ }
+ });
+ serverChannel = sb.bind(address).sync().channel();
+ }
- /* serviceURL is used in the test but a handle is not required */
- NoSQLHandleConfig config = new NoSQLHandleConfig(endpoint);
- serviceURL = config.getServiceURL();
+ @After
+ public void tearDown() {
+ serverChannel.close();
+ group.shutdownGracefully();
}
+ /**
+ * This test is excluded from the test profiles and must be run standalone.
+ * This is because of the need to use a cloud endpoint for complete
+ * testing. See header comment on testCloudTimeout().
+ * It can be run explicitly using either the test-onprem or test-cloudsim
+ * profile with a -Dtest directive, e.g.:
+ * mvn -Ptest-cloudsim test \
+ * -Dtest=oracle.nosql.driver.httpclient.ConnectionPoolTest \
+ * -DargLine="-Dtest.endpoint=http://localhost:8080 \
+ * -Dtest.cloudendpoint=some_cloud_endpoint"
+ */
@Test
public void poolTest() throws Exception {
+ Assume.assumeTrue(endpoint != null);
+
+ /* serviceURL is used in the test but a handle is not required */
+ NoSQLHandleConfig config = new NoSQLHandleConfig(endpoint);
+ serviceURL = config.getServiceURL();
+
final int poolSize = 4;
final int poolMinSize = 1;
final int poolInactivityPeriod = 1;
@@ -85,7 +131,7 @@ public boolean keepAlive(Channel ch) {
/*
* Acquire poolSize channels
*/
- Channel ch[] = new Channel[poolSize];
+ Channel[] ch = new Channel[poolSize];
for (int i = 0; i < poolSize; i++) {
ch[i] = getChannel(pool);
}
@@ -153,11 +199,11 @@ public void testCloudTimeout() throws Exception {
final int port = 443;
final int sleepTimeMs = 70000;
- if (endpoint == null) {
- throw new IllegalStateException(
- "testCloudTimeout requires setting of the system property, " +
- "\"test.cloudendpoint\"");
- }
+ Assume.assumeTrue(endpoint != null);
+
+ /* serviceURL is used in the test but a handle is not required */
+ NoSQLHandleConfig config = new NoSQLHandleConfig(endpoint);
+ serviceURL = config.getServiceURL();
HttpClient client = new HttpClient(
endpoint,
@@ -185,7 +231,7 @@ public boolean keepAlive(Channel ch) {
* Acquire poolSize channels, then release them to the pool. Do this
* 2x to bump the use count on the channels
*/
- Channel ch[] = new Channel[poolSize];
+ Channel[] ch = new Channel[poolSize];
for (int count = 0; count < 2; count++) {
for (int i = 0; i < poolSize; i++) {
ch[i] = getChannel(pool);
@@ -205,7 +251,7 @@ public boolean keepAlive(Channel ch) {
Thread.sleep(sleepTimeMs);
/* assert that 2 channels have gone inactive and been pruned */
- assertEquals(poolSize - poolMinSize, pool.pruneChannels());
+ assertEquals(poolSize - poolMinSize, pool.getTotalChannels());
/* assert that the number of channels is the min size configured */
assertEquals(poolMinSize, pool.getTotalChannels());
@@ -213,6 +259,235 @@ public boolean keepAlive(Channel ch) {
client.shutdown();
}
+ @Test
+ public void testMetricsAndReuse() throws Exception {
+ /* Create Pool */
+ Bootstrap bootstrap = new Bootstrap()
+ .group(group)
+ .channel(LocalChannel.class)
+ .remoteAddress(address);
+
+ /* A dummy user handler (noop) */
+ ConnectionPool pool = getConnectionPool(bootstrap, 0, 2, 2);
+
+ /* CHECK 1: Initial */
+ assertStats(pool, 0, 0, 0, 0);
+
+ /* CHECK 2: Acquire */
+ Channel ch1 = pool.acquire().sync().getNow();
+ /* Total:1, Acquired:1, Idle:0 */
+ assertStats(pool, 1, 1, 0, 0);
+
+ /* CHECK 3: Release */
+ pool.release(ch1);
+ /* Total:1, Acquired:0, Idle:1 */
+ assertStats(pool, 1, 0, 1, 0);
+
+ /* CHECK 4: Reuse */
+ Channel ch2 = pool.acquire().sync().getNow();
+ /* Should be the SAME channel object (reused) */
+ assertEquals(ch1.id(), ch2.id());
+ /* Stats: Total:1, Acquired:1, Idle:0 */
+ assertStats(pool, 1, 1, 0, 0);
+
+ /* acquire another channel and check acquire count is 2 */
+ Channel ch3 = pool.acquire().sync().getNow();
+ /* Stats: Total:2, Acquired:2, Idle:0 */
+ assertStats(pool, 2, 2, 0, 0);
+
+ /* Try to acquire another channel, this should be put into pending */
+ Future ch4 = pool.acquire();
+ /* Stats: Total:2, Acquired:2, Idle:0, Pending:1 */
+ assertStats(pool, 2, 2, 0, 1);
+
+ /* Try to acquire another channel, this should be put into pending */
+ Future ch5 = pool.acquire();
+ /* Stats: Total:2, Acquired:2, Idle:0, Pending:2 */
+ assertStats(pool, 2, 2, 0, 2);
+
+ /* try to acquire more than max pending and check error is thrown */
+ Assert.assertThrows(IllegalStateException.class,
+ ()-> pool.acquire().sync().getNow());
+ /* Stats: Total:2, Acquired:2, Idle:0, Pending:2 */
+ assertStats(pool, 2, 2, 0, 2);
+
+ /* Release back a channel and verify that pending is reduced*/
+ pool.release(ch2);
+ /* Stats: Total:2, Acquired:2, Idle:0, Pending:1 */
+ Thread.sleep(10);
+ assertStats(pool, 2, 2, 0, 1);
+ assertTrue(ch4.isSuccess());
+
+ /* Release back a channel and verify that pending is reduced*/
+ pool.release(ch3);
+ /* Stats: Total:2, Acquired:2, Idle:0, Pending:0 */
+ Thread.sleep(10);
+ assertStats(pool, 2, 2, 0, 0);
+ assertTrue(ch5.isSuccess());
+
+ /* Release back a channel and verify Idle is increased */
+ pool.release(ch4.getNow());
+ /* Stats: Total:2, Acquired:1, Idle:1, Pending:0 */
+ Thread.sleep(10);
+ assertStats(pool, 2, 1, 1, 0);
+
+ /* Release back a channel and verify Idle is increased */
+ pool.release(ch5.getNow());
+ /* Stats: Total:2, Acquired:0, Idle:2, Pending:0 */
+ Thread.sleep(10);
+ assertStats(pool, 2, 0, 2, 0);
+
+ /* check pending tasks are completed when the pool is closed */
+ ch1 = pool.acquire().sync().getNow();
+ ch2 = pool.acquire().sync().getNow();
+ ch4 = pool.acquire();
+ /* Stats: Total:2, Acquired:2, Idle:0, Pending:1 */
+ Thread.sleep(10);
+ assertStats(pool, 2, 2, 0, 1);
+
+ /* close the pool */
+ pool.close();
+
+ /* check pending ch4 is completed with exception */
+ Thread.sleep(10);
+ assertFalse(ch4.isSuccess());
+ assertTrue(ch4.cause() instanceof RejectedExecutionException);
+ }
+
+ @Test
+ public void testMaxConnectionsAndPendingQueue() throws InterruptedException {
+ int numberOfRequests = 5;
+ int maxConnections = 2;
+
+ /* Thread-safe list to hold the channels we successfully acquire */
+ List heldChannels =
+ Collections.synchronizedList(new ArrayList<>());
+
+ /* Latch to wait ONLY for the allowed connections to succeed */
+ CountDownLatch acquiredLatch = new CountDownLatch(maxConnections);
+
+ /* Create Pool */
+ Bootstrap bootstrap = new Bootstrap()
+ .group(group)
+ .channel(LocalChannel.class)
+ .remoteAddress(address);
+
+ /* A dummy user handler (noop) */
+ ConnectionPool pool = getConnectionPool(bootstrap, 60, maxConnections,
+ numberOfRequests + 1);
+ ExecutorService threadPool = Executors.newFixedThreadPool(10);
+
+ /* PHASE 1: Bombard the pool */
+ for (int i = 0; i < numberOfRequests; i++) {
+ threadPool.submit(() -> {
+ Future future = pool.acquire();
+ future.addListener(f -> {
+ if (f.isSuccess()) {
+ heldChannels.add((Channel) f.getNow());
+ acquiredLatch.countDown();
+ }
+ });
+ });
+ }
+ /* Wait for the pool to fill up (Max maxConnections) */
+ boolean success = acquiredLatch.await(5, TimeUnit.SECONDS);
+ if (!success) {
+ throw new RuntimeException("Timeout waiting for initial connections");
+ }
+
+ /* Give a tiny buffer for metrics to settle */
+ Thread.sleep(50);
+
+ /* PHASE 2: Assert Saturation */
+ assertEquals("Total should be capped at max",
+ maxConnections, pool.getTotalChannels());
+ assertEquals("Acquired should be capped at max",
+ maxConnections, pool.getAcquiredChannelCount());
+ assertEquals("Excess requests should be pending",
+ numberOfRequests - maxConnections,
+ pool.getPendingAcquires());
+
+ /* PHASE 3: Drain the Queue
+ * Now we manually release the channels we were holding.
+ * This should trigger the Pending requests to proceed.
+ */
+
+ /* We need a new latch to verify the REMAINING 3 requests finish
+ * (But we can't easily attach listeners now, so we just check stats)
+ */
+ for (Channel ch : heldChannels) {
+ pool.release(ch);
+ }
+
+ /* Wait a moment for the pending queue to drain */
+ Thread.sleep(200);
+
+ /* Expect: Pending should be one now. */
+ assertEquals("Pending queue should have 1", 1, pool.getPendingAcquires());
+
+ threadPool.shutdown();
+ pool.close();
+ }
+
+ private static ConnectionPool getConnectionPool(Bootstrap bootstrap,
+ int inactivityPeriodSeconds,
+ int maxConnections,
+ int numberOfRequests) {
+ ChannelPoolHandler noopHandler = new ChannelPoolHandler() {
+ @Override public void channelReleased(Channel ch) {}
+
+ @Override public void channelAcquired(Channel ch) {}
+
+ @Override public void channelCreated(Channel ch) {}
+ };
+
+ ConnectionPool pool =
+ new ConnectionPool(bootstrap,
+ noopHandler,
+ logger,
+ false, /* isMinimal*/
+ 0, /* pool min*/
+ inactivityPeriodSeconds, /* Inactivity seconds */
+ maxConnections,
+ numberOfRequests);
+ return pool;
+ }
+
+ @Test
+ public void testIdleEvictionInPool() throws InterruptedException {
+ /* Create Pool */
+ Bootstrap bootstrap = new Bootstrap()
+ .group(group)
+ .channel(LocalChannel.class)
+ .remoteAddress(address);
+
+ /* A dummy user handler (noop) */
+ ConnectionPool pool = getConnectionPool(bootstrap, 2, 2, 5);
+
+ /* 1. Acquire a channel */
+ Channel ch = pool.acquire().sync().getNow();
+
+ /* 2. Release it back to the pool (This starts the Idle Timer) */
+ pool.release(ch);
+
+ /* Verify it's currently Idle */
+ assertTrue(ch.isOpen());
+ assertEquals(1, pool.getFreeChannels());
+
+ /* 3. SIMULATE the channel close */
+ ch.close();
+
+ /* 4. wait for the refresh task to close the channel */
+ Thread.sleep(3000);
+
+ /* The metrics should update (Total drops to 0) */
+ assertEquals("Total count should drop to 0",
+ 0, pool.getTotalChannels());
+ assertEquals("Idle count should drop to 0",
+ 0, pool.getFreeChannels());
+ }
+
+
private static Logger getLogger() {
Logger tlogger = Logger.getLogger("oracle.nosql");
String level = System.getProperty("test.loglevel");
@@ -236,7 +511,9 @@ private static Logger getLogger() {
*/
private Channel getChannel(ConnectionPool pool) throws Exception {
Future fut = pool.acquire();
- return fut.get();
+ Channel ch = fut.get();
+ assert ch.isActive();
+ return ch;
}
private void releaseChannel(ConnectionPool pool, Channel ch) {
@@ -254,4 +531,12 @@ private SslContext buildSslContext() {
"Unable o create SSL context: " + e);
}
}
+
+ private void assertStats(ConnectionPool pool, int t,
+ int a, int i, int p) {
+ assertEquals("Total", t, pool.getTotalChannels());
+ assertEquals("Acquired", a, pool.getAcquiredChannelCount());
+ assertEquals("Idle", i, pool.getFreeChannels());
+ assertEquals("Pending", p, pool.getPendingAcquires());
+ }
}
diff --git a/driver/src/test/java/oracle/nosql/driver/iam/AuthRetryTest.java b/driver/src/test/java/oracle/nosql/driver/iam/AuthRetryTest.java
index 3f322987..ffb7e360 100644
--- a/driver/src/test/java/oracle/nosql/driver/iam/AuthRetryTest.java
+++ b/driver/src/test/java/oracle/nosql/driver/iam/AuthRetryTest.java
@@ -7,8 +7,7 @@
package oracle.nosql.driver.iam;
-import io.netty.channel.Channel;
-import io.netty.channel.embedded.EmbeddedChannel;
+import io.netty.handler.codec.http.FullHttpResponse;
import io.netty.handler.codec.http.HttpRequest;
import io.netty.handler.ssl.SslContext;
import oracle.nosql.driver.AuthorizationProvider;
@@ -18,7 +17,6 @@
import oracle.nosql.driver.SecurityInfoNotReadyException;
import oracle.nosql.driver.http.Client;
import oracle.nosql.driver.httpclient.HttpClient;
-import oracle.nosql.driver.httpclient.ResponseHandler;
import oracle.nosql.driver.ops.GetRequest;
import oracle.nosql.driver.ops.Request;
import oracle.nosql.driver.values.MapValue;
@@ -26,6 +24,8 @@
import org.junit.Test;
import java.net.URL;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.Executors;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.logging.Logger;
@@ -79,7 +79,7 @@ public void close() {
private class TestClient extends Client {
public TestClient(Logger logger, NoSQLHandleConfig config) {
- super(logger, config);
+ super(logger, config, Executors.newSingleThreadScheduledExecutor());
}
@Override
@@ -101,9 +101,8 @@ public TestHttpClient() {
}
@Override
- public void runRequest(HttpRequest request,
- ResponseHandler handler,
- Channel channel) {
+ public CompletableFuture
+ runRequest(HttpRequest request, int timeoutMS) {
/*
* Simulate an authentication failure scenario where the initial
* attempt throws SecurityInfoNotReadyException, and subsequent
@@ -111,26 +110,14 @@ public void runRequest(HttpRequest request,
*/
int count = execCount.incrementAndGet();
if (count == 1) {
- throw new SecurityInfoNotReadyException("test");
+ CompletableFuture.failedFuture(
+ new SecurityInfoNotReadyException("test"));
} else {
iaeCount.incrementAndGet();
- throw new InvalidAuthorizationException("test");
+ CompletableFuture.failedFuture(
+ new InvalidAuthorizationException("test"));
}
- }
-
- @Override
- public Channel getChannel(int timeoutMs) {
- /*
- * Utilize Netty's EmbeddedChannel to create a mock channel that
- * remains active, enabling the request execution to proceed with
- * a valid channel for error simulation purposes.
- */
- return new EmbeddedChannel() {
- @Override
- public boolean isActive() {
- return true;
- }
- };
+ return null;
}
}
}
diff --git a/examples/pom.xml b/examples/pom.xml
index c7598a82..355af68d 100644
--- a/examples/pom.xml
+++ b/examples/pom.xml
@@ -3,7 +3,7 @@
4.0.0com.oracle.nosql.sdk
- 5.4.18
+ 6.0.0nosql-java-sdk-examplesOracle NoSQL Database Java ExamplesJava examples for Oracle NoSQL Database
@@ -12,8 +12,8 @@
UTF-8
- 1.8
- 1.8
+ 11
+ 11truetrue
@@ -25,7 +25,7 @@
com.oracle.nosql.sdknosqldriver
- 5.4.18
+ 6.0.0
@@ -37,8 +37,8 @@
3.11.0true
- 1.8
- 1.8
+ ${maven.compiler.source}
+ ${maven.compiler.target}truetrue
diff --git a/examples/src/main/java/Common.java b/examples/src/main/java/Common.java
index dcb800ac..cc4bcd89 100644
--- a/examples/src/main/java/Common.java
+++ b/examples/src/main/java/Common.java
@@ -8,6 +8,7 @@
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
+import java.util.concurrent.CompletableFuture;
import oracle.nosql.driver.AuthorizationProvider;
import oracle.nosql.driver.NoSQLHandle;
@@ -358,6 +359,12 @@ public String getAuthorizationString(Request request) {
return id;
}
+ @Override
+ public CompletableFuture
+ getAuthorizationStringAsync(Request request) {
+ return CompletableFuture.completedFuture(id);
+ }
+
@Override
public void close() {}
}
diff --git a/pom.xml b/pom.xml
index 464ef673..3b10c327 100644
--- a/pom.xml
+++ b/pom.xml
@@ -6,7 +6,7 @@
com.oracle.nosql.sdknosql-java-sdk
- 5.4.18
+ 6.0.0pomOracle NoSQL SDK