diff --git a/end2end-test-examples/echo-client/Dockerfile b/end2end-test-examples/echo-client/Dockerfile new file mode 100644 index 00000000..4c0c517f --- /dev/null +++ b/end2end-test-examples/echo-client/Dockerfile @@ -0,0 +1,7 @@ +FROM openjdk:11-jre + +ADD target/echo-client-2.3-SNAPSHOT.jar /app/ + +WORKDIR /app + +ENTRYPOINT ["java", "-jar", "echo-client-2.3-SNAPSHOT.jar"] diff --git a/end2end-test-examples/echo-client/README.md b/end2end-test-examples/echo-client/README.md index 288386d7..a0bb741c 100644 --- a/end2end-test-examples/echo-client/README.md +++ b/end2end-test-examples/echo-client/README.md @@ -8,25 +8,25 @@ Client sends out `numRpcs` number of unary requests to `host` sequentially with request payload size of `reqSize`, and expected response payload size of `rspSize`: ```sh -./gradlew run --args="--numRpcs=100 --reqSize=100 --resSize=100 --host=grpc-cloudapi1.googleapis.com" +./gradlew run --args="--numRpcs=100 --reqSize=100 --resSize=100 --host=grpc-cloudapi.googleapis.com" ``` Enable gRPC compression for both request and response with gzip: ```sh -./gradlew run --args="--numRpcs=100 --reqSize=100 --resSize=100 --reqComp=gzip --resComp=gzip --host=grpc-cloudapi1.googleapis.com" +./gradlew run --args="--numRpcs=100 --reqSize=100 --resSize=100 --reqComp=gzip --resComp=gzip --host=grpc-cloudapi.googleapis.com" ``` Sending requests infinitely with 10 seconds interval between requests. ```sh -./gradlew run --args="--numRpcs=0 --interval=10000 --reqSize=100 --resSize=100 --host=grpc-cloudapi1.googleapis.com" +./gradlew run --args="--numRpcs=0 --interval=10000 --reqSize=100 --resSize=100 --host=grpc-cloudapi.googleapis.com" ``` Receive server-streaming responses with 10 seconds interval. Re-create the stream after each 10 responses. ```sh -./gradlew run --args="--stream=true --numRpcs=10 --interval=10000 --host=grpc-cloudapi1.googleapis.com" +./gradlew run --args="--stream=true --numRpcs=10 --interval=10000 --host=grpc-cloudapi.googleapis.com" ``` Example results: @@ -84,3 +84,35 @@ Per sec Payload = 0.07 MB (exact amount of KB = 10000) `--logMaxFiles`: If log file size is limited rotate log files and keep this number of files. Default: unlimited. `--disableConsoleLog`: If logging to a file do not log to console. Default: false. + +`--metricName`: Ship metrics to Google Cloud Monitoring with this prefix for metric names. + +`--metricTaskPrefix`: Prefix for the process label for metrics. + +`--metricProbeName`: Additional label for metrics. + +## Deployment + +Build the jar using maven (`mvn clean package`) or docker: + +```shell +docker run -it --rm -v "$(pwd)":/usr/src/mymaven -w /usr/src/mymaven maven mvn clean package +``` + +Deploy the `target/echo-client-2.3-SNAPSHOT.jar` or build a docker image with the jar: + +```shell +docker build -t echo-client:2.3 . +``` + +Run the jar: + +```shell +java -jar echo-client-2.3-SNAPSHOT.jar --numRpcs=1 --reqSize=100 --resSize=100 --host=grpc-cloudapi.googleapis.com +``` + +Or run a container: + +```shell +docker run -it --rm echo-client:2.3 --numRpcs=1 --reqSize=100 --resSize=100 --host=grpc-cloudapi.googleapis.com +``` diff --git a/end2end-test-examples/echo-client/build.gradle b/end2end-test-examples/echo-client/build.gradle index 0dbcd791..cd6b5475 100644 --- a/end2end-test-examples/echo-client/build.gradle +++ b/end2end-test-examples/echo-client/build.gradle @@ -5,13 +5,15 @@ plugins { } group 'io.grpc' -version '2.2-SNAPSHOT' +version '2.3-SNAPSHOT' sourceCompatibility = 1.8 +targetCompatibility = 1.8 def grpcVersion = '1.34.1' def protobufVersion = '3.9.0' def protocVersion = protobufVersion +def opencensusVersion = '0.31.0' repositories { mavenCentral() @@ -23,9 +25,13 @@ dependencies { implementation "io.grpc:grpc-testing:${grpcVersion}" implementation "io.grpc:grpc-netty-shaded:${grpcVersion}" implementation "io.grpc:grpc-census:${grpcVersion}" - implementation "net.sourceforge.argparse4j:argparse4j:0.8.1" + implementation "net.sourceforge.argparse4j:argparse4j:0.9.0" implementation "com.google.protobuf:protobuf-java-util:${protobufVersion}" - implementation "org.hdrhistogram:HdrHistogram:2.1.11" + implementation "org.hdrhistogram:HdrHistogram:2.1.12" + implementation "io.opencensus:opencensus-api:${opencensusVersion}" + implementation "io.opencensus:opencensus-impl:${opencensusVersion}" + implementation "io.opencensus:opencensus-exporter-stats-stackdriver:${opencensusVersion}" + implementation 'io.opencensus:opencensus-contrib-grpc-metrics:${opencensus.version}' implementation "org.apache.commons:commons-math3:3.6.1" diff --git a/end2end-test-examples/echo-client/pom.xml b/end2end-test-examples/echo-client/pom.xml index 22a579a2..3e921199 100644 --- a/end2end-test-examples/echo-client/pom.xml +++ b/end2end-test-examples/echo-client/pom.xml @@ -6,7 +6,7 @@ 4.0.0 io.grpc echo-client - 2.2-SNAPSHOT + 2.3-SNAPSHOT 2019 @@ -17,11 +17,12 @@ - 11 - 11 + 8 + 8 1.34.1 3.9.0 ${protobuf.version} + 0.31.0 @@ -88,6 +89,26 @@ commons-math3 3.6.1 + + io.opencensus + opencensus-api + ${opencensus.version} + + + io.opencensus + opencensus-impl + ${opencensus.version} + + + io.opencensus + opencensus-exporter-stats-stackdriver + ${opencensus.version} + + + io.opencensus + opencensus-contrib-grpc-metrics + ${opencensus.version} + junit junit diff --git a/end2end-test-examples/echo-client/src/main/java/io/grpc/echo/Args.java b/end2end-test-examples/echo-client/src/main/java/io/grpc/echo/Args.java index 623d4fc1..a57bfe5c 100644 --- a/end2end-test-examples/echo-client/src/main/java/io/grpc/echo/Args.java +++ b/end2end-test-examples/echo-client/src/main/java/io/grpc/echo/Args.java @@ -41,6 +41,11 @@ public class Args { final int logMaxSize; final int logMaxFiles; final boolean disableConsoleLog; + final String metricName; + final String metricTaskPrefix; + final String metricProbeName; + final int numMsgs; + final int msgsInterval; Args(String[] args) throws ArgumentParserException { ArgumentParser parser = @@ -79,6 +84,11 @@ public class Args { parser.addArgument("--logMaxSize").type(Integer.class).setDefault(0); parser.addArgument("--logMaxFiles").type(Integer.class).setDefault(0); parser.addArgument("--disableConsoleLog").type(Boolean.class).setDefault(false); + parser.addArgument("--metricName").type(String.class).setDefault(""); + parser.addArgument("--metricTaskPrefix").type(String.class).setDefault(""); + parser.addArgument("--metricProbeName").type(String.class).setDefault(""); + parser.addArgument("--numMsgs").type(Integer.class).setDefault(1); + parser.addArgument("--msgsInterval").type(Integer.class).setDefault(0); Namespace ns = parser.parseArgs(args); @@ -113,6 +123,12 @@ public class Args { logMaxSize = ns.getInt("logMaxSize"); logMaxFiles = ns.getInt("logMaxFiles"); disableConsoleLog = ns.getBoolean("disableConsoleLog"); - distrib = (qps > 0) ? new PoissonDistribution(1000/qps) : null; + metricName = ns.getString("metricName"); + metricTaskPrefix = ns.getString("metricTaskPrefix"); + metricProbeName = ns.getString("metricProbeName"); + numMsgs = ns.getInt("numMsgs"); + msgsInterval = ns.getInt("msgsInterval"); + + distrib = (qps > 0) ? new PoissonDistribution(1000 / qps) : null; } } diff --git a/end2end-test-examples/echo-client/src/main/java/io/grpc/echo/EchoClient.java b/end2end-test-examples/echo-client/src/main/java/io/grpc/echo/EchoClient.java index e5429e27..25f13f99 100644 --- a/end2end-test-examples/echo-client/src/main/java/io/grpc/echo/EchoClient.java +++ b/end2end-test-examples/echo-client/src/main/java/io/grpc/echo/EchoClient.java @@ -1,15 +1,10 @@ package io.grpc.echo; -import io.grpc.Channel; -import io.grpc.ClientInterceptor; -import io.grpc.ClientInterceptors; -import io.grpc.ConnectivityState; -import io.grpc.ManagedChannel; -import io.grpc.Status; -import io.grpc.StatusRuntimeException; -import io.grpc.echo.Echo.EchoResponse; +import com.google.api.MonitoredResource; +import io.grpc.*; import io.grpc.echo.Echo.BatchEchoRequest; import io.grpc.echo.Echo.BatchEchoResponse; +import io.grpc.echo.Echo.EchoResponse; import io.grpc.echo.Echo.EchoWithResponseSizeRequest; import io.grpc.echo.Echo.StreamEchoRequest; import io.grpc.echo.GrpcCloudapiGrpc.GrpcCloudapiBlockingStub; @@ -18,13 +13,21 @@ import io.grpc.netty.shaded.io.grpc.netty.NettyChannelBuilder; import io.grpc.netty.shaded.io.netty.handler.ssl.util.InsecureTrustManagerFactory; import io.grpc.stub.StreamObserver; +import io.opencensus.contrib.grpc.metrics.RpcViews; +import io.opencensus.exporter.stats.stackdriver.StackdriverStatsConfiguration; +import io.opencensus.exporter.stats.stackdriver.StackdriverStatsExporter; +import io.opencensus.metrics.*; +import java.io.File; +import java.io.IOException; +import java.net.InetAddress; +import java.net.UnknownHostException; import java.sql.Timestamp; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Iterator; -import java.util.List; +import java.util.*; +import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicLong; +import java.util.logging.Level; import java.util.logging.Logger; import javax.net.ssl.SSLException; import org.HdrHistogram.Histogram; @@ -44,9 +47,15 @@ public class EchoClient { private int rr; - public EchoClient(Args args) throws SSLException { + private MetricRegistry metricRegistry; + private Map> errorCounts = new ConcurrentHashMap<>(); + private final String OTHER_STATUS = "OTHER"; + + public EchoClient(Args args) throws IOException { this.args = args; + setUpMetrics(); + channels = new ManagedChannel[args.numChannels]; asyncStubs = new GrpcCloudapiStub[args.numChannels]; rr = 0; @@ -79,11 +88,108 @@ public EchoClient(Args args) throws SSLException { } } + private void setUpMetrics() throws IOException { + // Configure standard gRPC client metrics + RpcViews.registerClientGrpcViews(); + + if (args.metricName.isEmpty()) { + return; + } + metricRegistry = Metrics.getMetricRegistry(); + + String hostname = "unknown"; + try { + hostname = InetAddress.getLocalHost().getHostName(); + } catch (UnknownHostException e) { + logger.log(Level.WARNING, "Cannot get hostname", e); + } + + final String pid = new File("/proc/self").getCanonicalFile().getName(); + + Map labels = new HashMap<>(); + labels.put( + LabelKey.create("prober_task", "Prober task identifier"), + LabelValue.create(args.metricTaskPrefix + pid + "@" + hostname)); + if (!args.metricProbeName.isEmpty()) { + labels.put( + LabelKey.create("probe_name", "Prober name"), LabelValue.create(args.metricProbeName)); + } + + final DerivedLongGauge presenceMetric = + metricRegistry.addDerivedLongGauge( + args.metricName + "/presence", + MetricOptions.builder() + .setDescription("Number of prober instances running") + .setUnit("1") + .setConstantLabels(labels) + .build()); + + final List errorKeys = new ArrayList<>(); + errorKeys.add(LabelKey.create("code", "The gRPC error code")); + errorKeys.add( + LabelKey.create("sawGfe", "Whether Google load balancer response headers were present")); + + final DerivedLongCumulative errorsMetric = + metricRegistry.addDerivedLongCumulative( + args.metricName + "/error-count", + MetricOptions.builder() + .setDescription("Number of RPC errors") + .setUnit("1") + .setConstantLabels(labels) + .setLabelKeys(errorKeys) + .build()); + + final List emptyValues = new ArrayList<>(); + presenceMetric.removeTimeSeries(emptyValues); + presenceMetric.createTimeSeries(emptyValues, this, echoClient -> 1L); + + final List reportedStatuses = new ArrayList<>(); + reportedStatuses.add(Status.Code.DEADLINE_EXCEEDED.toString()); + reportedStatuses.add(Status.Code.UNAVAILABLE.toString()); + reportedStatuses.add(Status.Code.CANCELLED.toString()); + reportedStatuses.add(Status.Code.ABORTED.toString()); + reportedStatuses.add(Status.Code.INTERNAL.toString()); + reportedStatuses.add(OTHER_STATUS); + + for (String status : reportedStatuses) { + errorCounts.putIfAbsent(status, new ConcurrentHashMap<>()); + errorCounts.get(status).putIfAbsent(false, new AtomicLong()); + errorCounts.get(status).putIfAbsent(true, new AtomicLong()); + + for (boolean sawGfe : Arrays.asList(false, true)) { + final List errorValues = new ArrayList<>(); + errorValues.add(LabelValue.create(status)); + errorValues.add(LabelValue.create(String.valueOf(sawGfe))); + + errorsMetric.removeTimeSeries(errorValues); + errorsMetric.createTimeSeries( + errorValues, this, echoClient -> echoClient.reportRpcErrors(status, sawGfe)); + } + } + try { + // Enable OpenCensus exporters to export metrics to Stackdriver Monitoring. + // Exporters use Application Default Credentials to authenticate. + // See https://developers.google.com/identity/protocols/application-default-credentials + // for more details. + // The minimum reporting period for Stackdriver is 1 minute. + StackdriverStatsExporter.createAndRegister( + StackdriverStatsConfiguration.builder() + .setMonitoredResource(MonitoredResource.newBuilder().setType("global").build()) + .build()); + logger.log(Level.INFO, "Stackdriver metrics enabled!"); + } catch (IOException e) { + logger.log(Level.SEVERE, "StackdriverStatsExporter.createAndRegister()", e); + throw e; + } + } + private NettyChannelBuilder getChannelBuilder() throws SSLException { - NettyChannelBuilder builder = NettyChannelBuilder.forTarget(args.host + ":" +args.port) - .sslContext(GrpcSslContexts.forClient() - .trustManager(InsecureTrustManagerFactory.INSTANCE) - .build()); + NettyChannelBuilder builder = + NettyChannelBuilder.forTarget(args.host + ":" + args.port) + .sslContext( + GrpcSslContexts.forClient() + .trustManager(InsecureTrustManagerFactory.INSTANCE) + .build()); if (!args.overrideService.isEmpty()) { builder.overrideAuthority(args.overrideService); } @@ -95,12 +201,16 @@ private NettyChannelBuilder getChannelBuilder() throws SSLException { return builder; } - private static void watchStateChange(ManagedChannel channel, ConnectivityState currentState, int i) { - channel.notifyWhenStateChanged(currentState, () -> { - ConnectivityState newState = channel.getState(false); - logger.fine(String.format("Channel %d state changed: %s -> %s", i, currentState, newState)); - watchStateChange(channel, newState, i); - }); + private static void watchStateChange( + ManagedChannel channel, ConnectivityState currentState, int i) { + channel.notifyWhenStateChanged( + currentState, + () -> { + ConnectivityState newState = channel.getState(false); + logger.fine( + String.format("Channel %d state changed: %s -> %s", i, currentState, newState)); + watchStateChange(channel, newState, i); + }); } private Channel createChannel(int i) throws SSLException { @@ -113,6 +223,10 @@ private Channel createChannel(int i) throws SSLException { ClientInterceptor interceptor = new HeaderClientInterceptor(args); channel = ClientInterceptors.intercept(channel, interceptor); } + if (MetricsClientInterceptor.needsInterception(args)) { + ClientInterceptor interceptor = new MetricsClientInterceptor(this); + channel = ClientInterceptors.intercept(channel, interceptor); + } if (i == 0) { blockingChannelCreated = System.currentTimeMillis(); } @@ -142,38 +256,44 @@ private GrpcCloudapiStub getNextAsyncStub() { return next; } - public void asyncEcho(int id, CountDownLatch latch, Histogram histogram) { - EchoWithResponseSizeRequest request = EchoWithResponseSizeRequest.newBuilder() - .setEchoMsg(generatePayload(args.reqSize * 1024)) - .setResponseSize(args.resSize) - .build(); + EchoWithResponseSizeRequest request = + EchoWithResponseSizeRequest.newBuilder() + .setEchoMsg(generatePayload(args.reqSize * 1024)) + .setResponseSize(args.resSize) + .build(); GrpcCloudapiStub stub = getNextAsyncStub(); - stub.withDeadlineAfter(args.timeout, TimeUnit.MILLISECONDS).echoWithResponseSize( - request, - new StreamObserver() { - long start = System.currentTimeMillis(); - - @Override - public void onNext(EchoResponse value) {} - - @Override - public void onError(Throwable t) { - if (latch != null) latch.countDown(); - Status status = Status.fromThrowable(t); - long elapsed = System.currentTimeMillis() - start; - logger.warning(String.format("Encountered an error in %dth echo RPC (startTime: %s, elapsed: %dms). Status: %s", id, new Timestamp(start), elapsed, status)); - t.printStackTrace(); - } - - @Override - public void onCompleted() { - long now = System.currentTimeMillis(); - if (histogram != null) histogram.recordValue(now - start); - if (latch != null) latch.countDown(); - //logger.info(String.format("%dth echo RPC succeeded. Start time: %s. Requests left: %d", id, new Timestamp(start), latch.getCount())); - } - }); + stub.withDeadlineAfter(args.timeout, TimeUnit.MILLISECONDS) + .echoWithResponseSize( + request, + new StreamObserver() { + long start = System.currentTimeMillis(); + + @Override + public void onNext(EchoResponse value) {} + + @Override + public void onError(Throwable t) { + if (latch != null) latch.countDown(); + Status status = Status.fromThrowable(t); + long elapsed = System.currentTimeMillis() - start; + logger.warning( + String.format( + "Encountered an error in %dth echo RPC (startTime: %s, elapsed: %dms)." + + " Status: %s", + id, new Timestamp(start), elapsed, status)); + t.printStackTrace(); + } + + @Override + public void onCompleted() { + long now = System.currentTimeMillis(); + if (histogram != null) histogram.recordValue(now - start); + if (latch != null) latch.countDown(); + // logger.info(String.format("%dth echo RPC succeeded. Start time: %s. Requests + // left: %d", id, new Timestamp(start), latch.getCount())); + } + }); } private String generatePayload(int numBytes) { @@ -187,10 +307,12 @@ private String generatePayload(int numBytes) { void streamingEcho() { long start = 0; try { - StreamEchoRequest request = StreamEchoRequest.newBuilder() - .setMessageCount(args.numRpcs) - .setMessageInterval(Math.max(args.interval, STREAMING_MIN_INTERVAL)) - .build(); + StreamEchoRequest request = + StreamEchoRequest.newBuilder() + .setMessageCount(args.numMsgs) + .setMessageInterval(args.msgsInterval) + .setResponseSizePerMsg(args.resSize) + .build(); start = System.currentTimeMillis(); Iterator iter = blockingStub.echoStream(request); for (long counter = 1; iter.hasNext(); ++counter) { @@ -209,26 +331,28 @@ void blockingEcho(Histogram histogram) throws SSLException { long start = 0; try { if (args.resType == 0) { - EchoWithResponseSizeRequest request = EchoWithResponseSizeRequest.newBuilder() - .setEchoMsg(generatePayload(args.reqSize * 1024)) - .setResponseSize(args.resSize) - .build(); + EchoWithResponseSizeRequest request = + EchoWithResponseSizeRequest.newBuilder() + .setEchoMsg(generatePayload(args.reqSize * 1024)) + .setResponseSize(args.resSize) + .build(); start = System.currentTimeMillis(); - if (args.recreateChannelSeconds >= 0 && blockingChannelCreated < start - args.recreateChannelSeconds * 1000) { + if (args.recreateChannelSeconds >= 0 + && blockingChannelCreated < start - args.recreateChannelSeconds * 1000) { reCreateBlockingStub(); } blockingStub .withDeadlineAfter(args.timeout, TimeUnit.MILLISECONDS) .echoWithResponseSize(request); } else { - BatchEchoRequest request = BatchEchoRequest.newBuilder() - .setEchoMsg(generatePayload(args.reqSize * 1024)) - .setResponseType(args.resType) - .build(); + BatchEchoRequest request = + BatchEchoRequest.newBuilder() + .setEchoMsg(generatePayload(args.reqSize * 1024)) + .setResponseType(args.resType) + .build(); start = System.currentTimeMillis(); - BatchEchoResponse response = blockingStub - .withDeadlineAfter(args.timeout, TimeUnit.MILLISECONDS) - .batchEcho(request); + BatchEchoResponse response = + blockingStub.withDeadlineAfter(args.timeout, TimeUnit.MILLISECONDS).batchEcho(request); List sizeList = new ArrayList<>(); for (EchoResponse r : response.getEchoResponsesList()) { sizeList.add(r.getSerializedSize()); @@ -254,10 +378,31 @@ public void echo(int id, CountDownLatch latch, Histogram histogram) throws SSLEx } if (args.async) { asyncEcho(id, latch, histogram); - //logger.info("Async request: sent rpc#: " + rpcIndex); + // logger.info("Async request: sent rpc#: " + rpcIndex); } else { blockingEcho(histogram); } - //logger.info("Sync request: sent rpc#: " + rpcIndex); + // logger.info("Sync request: sent rpc#: " + rpcIndex); + } + + private String statusToMetricLabel(Status status) { + switch (status.getCode()) { + case ABORTED: + case CANCELLED: + case DEADLINE_EXCEEDED: + case UNAVAILABLE: + case INTERNAL: + return status.getCode().toString(); + default: + return OTHER_STATUS; + } + } + + void registerRpcError(Status status, boolean sawGfe) { + errorCounts.get(statusToMetricLabel(status)).get(sawGfe).incrementAndGet(); + } + + private long reportRpcErrors(String status, boolean sawGfe) { + return errorCounts.get(status).get(sawGfe).get(); } } diff --git a/end2end-test-examples/echo-client/src/main/java/io/grpc/echo/MetricsClientInterceptor.java b/end2end-test-examples/echo-client/src/main/java/io/grpc/echo/MetricsClientInterceptor.java new file mode 100644 index 00000000..21b94e04 --- /dev/null +++ b/end2end-test-examples/echo-client/src/main/java/io/grpc/echo/MetricsClientInterceptor.java @@ -0,0 +1,45 @@ +package io.grpc.echo; + +import io.grpc.*; + +public class MetricsClientInterceptor implements ClientInterceptor { + final private EchoClient echoClient; + + public MetricsClientInterceptor(EchoClient echoClient) { + this.echoClient = echoClient; + } + + public static boolean needsInterception(Args args) { + return !args.metricName.isEmpty(); + } + + @Override + public ClientCall interceptCall(MethodDescriptor method, CallOptions callOptions, Channel next) { + return new ForwardingClientCall.SimpleForwardingClientCall(next.newCall(method, callOptions)) { + private boolean sawGfe = false; + + @Override + public void start(Listener responseListener, Metadata headers) { + headers.put(Metadata.Key.of("x-return-encrypted-headers", Metadata.ASCII_STRING_MARSHALLER), "true"); + super.start(new ForwardingClientCallListener.SimpleForwardingClientCallListener(responseListener) { + @Override + public void onHeaders(Metadata headers) { + if (!sawGfe) { + sawGfe = headers.containsKey(Metadata.Key.of("x-encrypted-debug-headers", Metadata.ASCII_STRING_MARSHALLER)); + } + super.onHeaders(headers); + } + + @Override + public void onClose(Status status, Metadata trailers) { + // Report status, saw GFE + if (!status.equals(Status.OK)) { + echoClient.registerRpcError(status, sawGfe); + } + super.onClose(status, trailers); + } + }, headers); + } + }; + } +} diff --git a/end2end-test-examples/echo-client/src/main/java/io/grpc/echo/TestMain.java b/end2end-test-examples/echo-client/src/main/java/io/grpc/echo/TestMain.java index d7e7708b..6c7681b4 100644 --- a/end2end-test-examples/echo-client/src/main/java/io/grpc/echo/TestMain.java +++ b/end2end-test-examples/echo-client/src/main/java/io/grpc/echo/TestMain.java @@ -76,7 +76,7 @@ private static void runTest(Args args, EchoClient client) throws SSLException, I long totalPayloadSize = 0; long startFirst = System.currentTimeMillis(); - for (int i = 0; args.stream || rpcsToDo == 0 || i < rpcsToDo; i++) { + for (int i = 0; rpcsToDo == 0 || i < rpcsToDo; i++) { if (args.async) { if (args.distrib != null) { int sample = args.distrib.sample(); @@ -111,7 +111,7 @@ private static void runTest(Args args, EchoClient client) throws SSLException, I printResult(args, totalPayloadSize, totalRecvTime, histogram); } - private static void execTask(Args argObj) throws InterruptedException, SSLException { + private static void execTask(Args argObj) throws InterruptedException, IOException { EchoClient client = new EchoClient(argObj); try { logger.info("Start warm up..."); diff --git a/end2end-test-examples/echo-client/src/main/proto/echo.proto b/end2end-test-examples/echo-client/src/main/proto/echo.proto index b9ab888d..6695fbc0 100644 --- a/end2end-test-examples/echo-client/src/main/proto/echo.proto +++ b/end2end-test-examples/echo-client/src/main/proto/echo.proto @@ -2,10 +2,10 @@ syntax = "proto3"; package e2e_service; -option java_package = "io.grpc.echo"; - import "google/api/annotations.proto"; +option java_package = "io.grpc.echo"; + // Request message type for simple echo. message EchoRequest { string string_to_echo = 1; @@ -33,6 +33,7 @@ message EchoWithResponseSizeRequest { message StreamEchoRequest { int32 message_count = 1; int32 message_interval = 2; + int32 response_size_per_msg = 3; } // A simple service to test and debug in an E2E environment @@ -55,7 +56,7 @@ service GrpcCloudapi { // A simple stream endpoint rpc EchoStream(StreamEchoRequest) returns (stream EchoResponse) { option (google.api.http) = { - get: "/v1/stream/{message_count}/{message_interval}" + get: "/v1/stream/{message_count}/{message_interval}/{response_size_per_msg}" }; } @@ -65,5 +66,4 @@ service GrpcCloudapi { get: "/v1/batch/{echo_msg}/{response_type}" }; } - } diff --git a/end2end-test-examples/gcs/build.gradle b/end2end-test-examples/gcs/build.gradle index 27d62e70..0d8c1ef0 100644 --- a/end2end-test-examples/gcs/build.gradle +++ b/end2end-test-examples/gcs/build.gradle @@ -9,11 +9,11 @@ version '1.0-SNAPSHOT' sourceCompatibility = 1.8 -def gcsioVersion = '2.2.3-SNAPSHOT' -def grpcVersion = '1.39.0' -def protobufVersion = '3.17.3' +def gcsioVersion = '2.2.7' +def grpcVersion = '1.47.0' +def protobufVersion = '3.19.2' def protocVersion = protobufVersion -def conscryptVersion = '2.5.1' +def conscryptVersion = '2.5.2' repositories { mavenLocal() @@ -21,22 +21,29 @@ repositories { } dependencies { + compile "io.grpc:grpc-alts:${grpcVersion}" + compile "io.grpc:grpc-api:${grpcVersion}" + compile "io.grpc:grpc-auth:${grpcVersion}" + compile "io.grpc:grpc-census:${grpcVersion}" + compile "io.grpc:grpc-context:${grpcVersion}" + compile "io.grpc:grpc-googleapis:${grpcVersion}" + compile "io.grpc:grpc-netty-shaded:${grpcVersion}" compile "io.grpc:grpc-protobuf:${grpcVersion}" + compile "io.grpc:grpc-protobuf-lite:${grpcVersion}" compile "io.grpc:grpc-stub:${grpcVersion}" compile "io.grpc:grpc-testing:${grpcVersion}" - compile "io.grpc:grpc-netty-shaded:${grpcVersion}" - compile "io.grpc:grpc-auth:${grpcVersion}" - compile "io.grpc:grpc-alts:${grpcVersion}" + compile "io.grpc:grpc-xds:${grpcVersion}" compile "org.conscrypt:conscrypt-openjdk-uber:${conscryptVersion}" compile "com.google.protobuf:protobuf-java-util:${protobufVersion}" + compile "com.google.api.grpc:grpc-google-cloud-storage-v2:latest.release" compile "com.google.api.grpc:proto-google-iam-v1:latest.release" compile "com.google.api.grpc:proto-google-common-protos:latest.release" - compile "net.sourceforge.argparse4j:argparse4j:0.8.1" - compile "com.google.auth:google-auth-library-oauth2-http:0.22.2" - compile "com.google.cloud:google-cloud-storage:1.113.9" + compile "com.google.auth:google-auth-library-oauth2-http:latest.release" + compile "com.google.cloud:google-cloud-storage:latest.release" compile "com.google.cloud.bigdataoss:gcsio:${gcsioVersion}" compile "com.google.cloud.bigdataoss:bigdataoss-parent:${gcsioVersion}" - compile "com.google.guava:guava:30.1.1-jre" + compile "com.google.guava:guava:31.0.1-jre" + compile "net.sourceforge.argparse4j:argparse4j:0.9.0" testCompile group: 'junit', name: 'junit', version: '4.12' } diff --git a/end2end-test-examples/gcs/logging.properties b/end2end-test-examples/gcs/logging.properties index 396eb81d..10fa920a 100644 --- a/end2end-test-examples/gcs/logging.properties +++ b/end2end-test-examples/gcs/logging.properties @@ -1,6 +1,8 @@ handlers = java.util.logging.ConsoleHandler java.util.logging.ConsoleHandler.level = ALL io.netty.handler.codec.http2.Http2FrameLogger.level = FINE +io.grpc.xds.XdsLogger.level = FINEST +io.grpc.ChannelLogger.level = FINEST .level = FINE io.grpc.netty.NettyClientHandler = ALL io.grpc.netty.NettyServerHandler = ALL diff --git a/end2end-test-examples/gcs/src/main/java/io/grpc/gcs/Args.java b/end2end-test-examples/gcs/src/main/java/io/grpc/gcs/Args.java index 9faee081..46660991 100644 --- a/end2end-test-examples/gcs/src/main/java/io/grpc/gcs/Args.java +++ b/end2end-test-examples/gcs/src/main/java/io/grpc/gcs/Args.java @@ -19,13 +19,21 @@ public class Args { private static final int PORT = 443; final int calls; + final int warmups; final String cookie; final String host; + final String host2; final int port; final String service_path; final String access_token; - final String bkt, obj; + final String bkt; + final String obj; + final String objFormat; + final int objStart; + final int objStop; final boolean dp; + final boolean rr; + final boolean td; final int size; final int buffSize; final String method; @@ -41,21 +49,25 @@ public class Args { final int zeroCopy; // 0=auto, 1=on, -1=off Args(String[] args) throws ArgumentParserException { - ArgumentParser parser = - ArgumentParsers.newFor("GCS client test") - .build() - .defaultHelp(true) - .description("GCS client java binary"); + ArgumentParser parser = ArgumentParsers.newFor("GCS client test").build().defaultHelp(true) + .description("GCS client java binary"); parser.addArgument("--calls").type(Integer.class).setDefault(1); + parser.addArgument("--warmups").type(Integer.class).setDefault(0); parser.addArgument("--cookie").type(String.class).setDefault(""); parser.addArgument("--host").type(String.class).setDefault(DEFAULT_HOST); + parser.addArgument("--host2").type(String.class).setDefault(""); parser.addArgument("--port").type(Integer.class).setDefault(PORT); parser.addArgument("--service_path").type(String.class).setDefault("storage/v1/"); parser.addArgument("--access_token").type(String.class).setDefault(""); - parser.addArgument("--bkt").type(String.class).setDefault("gcs-grpc-team-weiranf"); - parser.addArgument("--obj").type(String.class).setDefault("a"); + parser.addArgument("--bkt").type(String.class).setDefault("gcs-grpc-team-veblush1"); + parser.addArgument("--obj").type(String.class).setDefault("1G"); + parser.addArgument("--obj_format").type(String.class).setDefault(""); + parser.addArgument("--obj_start").type(Integer.class).setDefault(0); + parser.addArgument("--obj_stop").type(Integer.class).setDefault(0); parser.addArgument("--dp").type(Boolean.class).setDefault(false); + parser.addArgument("--rr").type(Boolean.class).setDefault(false); + parser.addArgument("--td").type(Boolean.class).setDefault(false); parser.addArgument("--size").type(Integer.class).setDefault(0); parser.addArgument("--buffSize").type(Integer.class).setDefault(0); parser.addArgument("--method").type(String.class).setDefault(METHOD_READ); @@ -74,14 +86,21 @@ public class Args { // Read args calls = ns.getInt("calls"); + warmups = ns.getInt("warmups"); cookie = ns.getString("cookie"); host = ns.getString("host"); + host2 = ns.getString("host2"); port = ns.getInt("port"); service_path = ns.getString("service_path"); access_token = ns.getString("access_token"); bkt = ns.getString("bkt"); obj = ns.getString("obj"); + objFormat = ns.getString("obj_format"); + objStart = ns.getInt("obj_start"); + objStop = ns.getInt("obj_stop"); dp = ns.getBoolean("dp"); + rr = ns.getBoolean("rr"); + td = ns.getBoolean("td"); size = ns.getInt("size"); buffSize = ns.getInt("buffSize"); method = ns.getString("method"); diff --git a/end2end-test-examples/gcs/src/main/java/io/grpc/gcs/GcsioClient.java b/end2end-test-examples/gcs/src/main/java/io/grpc/gcs/GcsioClient.java index 16bc436c..606c5658 100644 --- a/end2end-test-examples/gcs/src/main/java/io/grpc/gcs/GcsioClient.java +++ b/end2end-test-examples/gcs/src/main/java/io/grpc/gcs/GcsioClient.java @@ -10,6 +10,7 @@ import com.google.cloud.hadoop.gcsio.GoogleCloudStorageOptions; import com.google.cloud.hadoop.gcsio.GoogleCloudStorageReadOptions; import com.google.cloud.hadoop.util.AsyncWriteChannelOptions; +import com.google.common.base.Strings; import java.io.IOException; import java.net.URI; import java.nio.ByteBuffer; @@ -17,23 +18,24 @@ import java.nio.channels.SeekableByteChannel; import java.nio.channels.WritableByteChannel; import java.util.Arrays; +import java.util.Random; import java.util.concurrent.Executors; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; -import java.util.List; import java.util.logging.Logger; -import java.util.Random; public class GcsioClient { private static final Logger logger = Logger.getLogger(GcsioClient.class.getName()); private static final String SCOPE = "https://www.googleapis.com/auth/cloud-platform"; private Args args; + private ObjectResolver objectResolver; private GoogleCloudStorageOptions gcsOpts; private GoogleCredential creds; public GcsioClient(Args args, boolean grpcEnabled) throws IOException { this.args = args; + this.objectResolver = new ObjectResolver(args.obj, args.objFormat, args.objStart, args.objStop); if (args.access_token.equals("")) { this.creds = GoogleCredential.getApplicationDefault().createScoped(Arrays.asList(SCOPE)); } else if (args.access_token.equals("-")) { @@ -42,28 +44,37 @@ public GcsioClient(Args args, boolean grpcEnabled) throws IOException { logger.warning("Please provide valid --access_token"); } - this.gcsOpts = GoogleCloudStorageOptions.builder() + GoogleCloudStorageOptions.Builder optsBuilder = + GoogleCloudStorageOptions.builder() .setAppName("weiranf-app") .setGrpcEnabled(grpcEnabled) .setStorageRootUrl("https://" + args.host) .setStorageServicePath(args.service_path) - .setDirectPathPreffered(args.dp) - .setReadChannelOptions(GoogleCloudStorageReadOptions.builder().setGrpcChecksumsEnabled(args.checksum).build()) - .setWriteChannelOptions(AsyncWriteChannelOptions.builder().setGrpcChecksumsEnabled(args.checksum).build()) - .build(); + .setTrafficDirectorEnabled(args.td) + .setDirectPathPreferred(args.dp) + .setReadChannelOptions( + GoogleCloudStorageReadOptions.builder() + .setGrpcChecksumsEnabled(args.checksum) + .build()) + .setWriteChannelOptions( + AsyncWriteChannelOptions.builder().setGrpcChecksumsEnabled(args.checksum).build()); + if (!Strings.isNullOrEmpty(args.host2)) { + optsBuilder.setGrpcServerAddress(args.host2); + } + this.gcsOpts = optsBuilder.build(); } public void startCalls(ResultTable results) throws InterruptedException, IOException { if (args.threads == 1) { switch (args.method) { case METHOD_READ: - makeMediaRequest(results); - break; - case METHOD_WRITE: - makeWriteRequest(results, 0); + makeMediaRequest(results, /*threadId=*/ 1); break; case METHOD_RANDOM: - makeRandomMediaRequest(results); + makeRandomMediaRequest(results, /*threadId=*/ 1); + break; + case METHOD_WRITE: + makeWriteRequest(results, /*threadId=*/ 1); break; default: logger.warning("Please provide valid methods with --method"); @@ -75,29 +86,46 @@ public void startCalls(ResultTable results) throws InterruptedException, IOExcep switch (args.method) { case METHOD_READ: for (int i = 0; i < args.threads; i++) { - Runnable task = () -> { - try { - makeMediaRequest(results); - } catch (IOException e) { - e.printStackTrace(); - } - }; + int finalI = i; + Runnable task = + () -> { + try { + makeMediaRequest(results, finalI + 1); + } catch (IOException e) { + e.printStackTrace(); + } + }; + threadPoolExecutor.execute(task); + } + break; + case METHOD_RANDOM: + for (int i = 0; i < args.threads; i++) { + int finalI = i; + Runnable task = + () -> { + try { + makeRandomMediaRequest(results, finalI + 1); + } catch (IOException e) { + e.printStackTrace(); + } + }; threadPoolExecutor.execute(task); } break; - case METHOD_WRITE: + case METHOD_WRITE: for (int i = 0; i < args.threads; i++) { int finalI = i; - Runnable task = () -> { - try { - makeWriteRequest(results, finalI); - } catch (IOException | InterruptedException e) { - e.printStackTrace(); - } - }; + Runnable task = + () -> { + try { + makeWriteRequest(results, finalI + 1); + } catch (IOException | InterruptedException e) { + e.printStackTrace(); + } + }; threadPoolExecutor.execute(task); } - break; + break; default: logger.warning("Please provide valid methods with --method"); } @@ -110,76 +138,45 @@ public void startCalls(ResultTable results) throws InterruptedException, IOExcep } } - private void makeMediaRequest(ResultTable results) throws IOException { - GoogleCloudStorageFileSystem gcsfs = new GoogleCloudStorageFileSystem(creds, - GoogleCloudStorageFileSystemOptions.builder() - .setCloudStorageOptions(gcsOpts) - .build() - ); - - int size = args.size * 1024; - - URI uri = URI.create("gs://" + args.bkt + "/" + args.obj); - - ByteBuffer buff = ByteBuffer.allocate(size); + private void makeMediaRequest(ResultTable results, int threadId) throws IOException { + GoogleCloudStorageFileSystem gcsfs = + new GoogleCloudStorageFileSystem( + creds, + GoogleCloudStorageFileSystemOptions.builder().setCloudStorageOptions(gcsOpts).build()); + long totalSize = args.size * 1024L; + int buffSize = (args.buffSize == 0 ? 32 * 1024 : args.buffSize) * 1024; + ByteBuffer buff = ByteBuffer.allocate(buffSize); for (int i = 0; i < args.calls; i++) { - ReadableByteChannel readChannel = gcsfs.open(uri); + long receivedSize = 0; long start = System.currentTimeMillis(); - readChannel.read(buff); - long dur = System.currentTimeMillis() - start; - if (buff.remaining() > 0) { - logger.warning("Got remaining bytes: " + buff.remaining()); + String object = objectResolver.Resolve(threadId, i); + URI uri = URI.create("gs://" + args.bkt + "/" + object); + ReadableByteChannel readChannel = gcsfs.open(uri); + while (receivedSize < totalSize) { + int r = readChannel.read(buff); + if (r < 0) break; + buff.clear(); + receivedSize += r; } + long dur = System.currentTimeMillis() - start; buff.clear(); readChannel.close(); - //logger.info("time cost for reading bytes: " + dur + "ms"); - results.reportResult(dur); + results.reportResult(args.bkt, object, receivedSize, dur); } gcsfs.close(); } - private void makeWriteRequest(ResultTable results, int idx) throws IOException, InterruptedException { - GoogleCloudStorageFileSystem gcsfs = new GoogleCloudStorageFileSystem(creds, - GoogleCloudStorageFileSystemOptions.builder() - .setCloudStorageOptions(gcsOpts) - .build() - ); - - int size = args.size * 1024; - Random rd = new Random(); - byte[] randBytes = new byte[size]; - rd.nextBytes(randBytes); - - URI uri = URI.create("gs://" + args.bkt + "/" + args.obj + "_" + idx); - for (int i = 0; i < args.calls; i++) { - WritableByteChannel writeChannel = gcsfs.create(uri); - ByteBuffer buff = ByteBuffer.wrap(randBytes); - long start = System.currentTimeMillis(); - writeChannel.write(buff); - writeChannel.close(); - // write operation is async, need to call close() to wait for finish. - long dur = System.currentTimeMillis() - start; - results.reportResult(dur); - if (dur < 1000) { - Thread.sleep(1000 - dur); // Avoid limit of 1 qps for updating the same object - } - } - - gcsfs.close(); - } - - private void makeRandomMediaRequest(ResultTable results) throws IOException { - GoogleCloudStorageFileSystem gcsfs = new GoogleCloudStorageFileSystem(creds, - GoogleCloudStorageFileSystemOptions.builder() - .setCloudStorageOptions(gcsOpts) - .build() - ); + private void makeRandomMediaRequest(ResultTable results, int threadId) throws IOException { + GoogleCloudStorageFileSystem gcsfs = + new GoogleCloudStorageFileSystem( + creds, + GoogleCloudStorageFileSystemOptions.builder().setCloudStorageOptions(gcsOpts).build()); Random r = new Random(); - URI uri = URI.create("gs://" + args.bkt + "/" + args.obj); - + String object = objectResolver.Resolve(threadId, /*objectId=*/ 0); + URI uri = URI.create("gs://" + args.bkt + "/" + object); GoogleCloudStorageReadOptions readOpts = gcsOpts.getReadChannelOptions(); SeekableByteChannel reader = gcsfs.open(uri, readOpts); @@ -193,11 +190,38 @@ private void makeRandomMediaRequest(ResultTable results) throws IOException { if (buff.remaining() > 0) { logger.warning("Got remaining bytes: " + buff.remaining()); } - logger.info("time cost for reading bytes: " + dur + "ms"); - results.reportResult(dur); + results.reportResult(args.bkt, object, args.buffSize * 1024, dur); } reader.close(); gcsfs.close(); } + + private void makeWriteRequest(ResultTable results, int threadId) + throws IOException, InterruptedException { + GoogleCloudStorageFileSystem gcsfs = + new GoogleCloudStorageFileSystem( + creds, + GoogleCloudStorageFileSystemOptions.builder().setCloudStorageOptions(gcsOpts).build()); + + int size = args.size * 1024; + Random rd = new Random(); + byte[] randBytes = new byte[size]; + rd.nextBytes(randBytes); + + for (int i = 0; i < args.calls; i++) { + long start = System.currentTimeMillis(); + String object = objectResolver.Resolve(threadId, i); + URI uri = URI.create("gs://" + args.bkt + "/" + object); + WritableByteChannel writeChannel = gcsfs.create(uri); + ByteBuffer buff = ByteBuffer.wrap(randBytes); + writeChannel.write(buff); + // write operation is async, need to call close() to wait for finish. + writeChannel.close(); + long dur = System.currentTimeMillis() - start; + results.reportResult(args.bkt, object, size, dur); + } + + gcsfs.close(); + } } diff --git a/end2end-test-examples/gcs/src/main/java/io/grpc/gcs/GrpcClient.java b/end2end-test-examples/gcs/src/main/java/io/grpc/gcs/GrpcClient.java index 7d1c1be2..74323894 100644 --- a/end2end-test-examples/gcs/src/main/java/io/grpc/gcs/GrpcClient.java +++ b/end2end-test-examples/gcs/src/main/java/io/grpc/gcs/GrpcClient.java @@ -3,61 +3,70 @@ import static io.grpc.gcs.Args.METHOD_RANDOM; import static io.grpc.gcs.Args.METHOD_READ; import static io.grpc.gcs.Args.METHOD_WRITE; -import static io.grpc.gcs.Args.DEFAULT_HOST; import com.google.auth.oauth2.GoogleCredentials; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; -import com.google.google.storage.v1.ChecksummedData; -import com.google.google.storage.v1.GetObjectMediaRequest; -import com.google.google.storage.v1.GetObjectMediaResponse; -import com.google.google.storage.v1.InsertObjectRequest; -import com.google.google.storage.v1.InsertObjectSpec; -import com.google.google.storage.v1.Object; -import com.google.google.storage.v1.ServiceConstants.Values; -import com.google.google.storage.v1.StorageGrpc; import com.google.protobuf.ByteString; +import com.google.storage.v2.ChecksummedData; +import com.google.storage.v2.Object; +import com.google.storage.v2.ReadObjectRequest; +import com.google.storage.v2.ReadObjectResponse; +import com.google.storage.v2.ServiceConstants.Values; +import com.google.storage.v2.StorageGrpc; +import com.google.storage.v2.StorageGrpc.StorageBlockingStub; +import com.google.storage.v2.WriteObjectRequest; +import com.google.storage.v2.WriteObjectResponse; +import com.google.storage.v2.WriteObjectSpec; +import io.grpc.Grpc; import io.grpc.ManagedChannel; import io.grpc.ManagedChannelBuilder; +import io.grpc.MethodDescriptor; import io.grpc.Status; import io.grpc.alts.ComputeEngineChannelBuilder; +import io.grpc.alts.GoogleDefaultChannelCredentials; import io.grpc.auth.MoreCallCredentials; import io.grpc.netty.shaded.io.grpc.netty.NettyChannelBuilder; import io.grpc.stub.StreamObserver; -import io.grpc.MethodDescriptor; import java.io.IOException; -import java.lang.reflect.Field; import java.io.InputStream; +import java.lang.reflect.Field; +import java.util.Iterator; +import java.util.NoSuchElementException; +import java.util.Random; import java.util.concurrent.CountDownLatch; import java.util.concurrent.Executors; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; -import java.util.Iterator; -import java.util.List; import java.util.logging.Logger; -import java.util.NoSuchElementException; -import java.util.Random; public class GrpcClient { private static final Logger logger = Logger.getLogger(GrpcClient.class.getName()); // ZeroCopy version of GetObjectMedia Method - private static final ZeroCopyMessageMarshaller getObjectMediaResponseMarshaller = - new ZeroCopyMessageMarshaller(GetObjectMediaResponse.getDefaultInstance()); - private static final MethodDescriptor getObjectMediaMethod = - StorageGrpc.getGetObjectMediaMethod() - .toBuilder().setResponseMarshaller(getObjectMediaResponseMarshaller) - .build(); + private static final ZeroCopyMessageMarshaller ReadObjectResponseMarshaller = + new ZeroCopyMessageMarshaller(ReadObjectResponse.getDefaultInstance()); + private static final MethodDescriptor readObjectMethod = + StorageGrpc.getReadObjectMethod().toBuilder() + .setResponseMarshaller(ReadObjectResponseMarshaller) + .build(); private final boolean useZeroCopy; - private ManagedChannel[] channels; private Args args; + private ObjectResolver objectResolver; + private ManagedChannel[] channels; private GoogleCredentials creds; private static final String SCOPE = "https://www.googleapis.com/auth/cloud-platform"; + private static final String V2_BUCKET_NAME_PREFIX = "projects/_/buckets/"; + + private static String toV2BucketName(String v1BucketName) { + return V2_BUCKET_NAME_PREFIX + v1BucketName; + } public GrpcClient(Args args) throws IOException { this.args = args; + this.objectResolver = new ObjectResolver(args.obj, args.objFormat, args.objStart, args.objStop); if (args.access_token.equals("")) { this.creds = GoogleCredentials.getApplicationDefault().createScoped(SCOPE); } else if (args.access_token.equals("-")) { @@ -67,18 +76,26 @@ public GrpcClient(Args args) throws IOException { } ManagedChannelBuilder channelBuilder; - if (args.dp) { - ComputeEngineChannelBuilder gceChannelBuilder = ComputeEngineChannelBuilder.forAddress(args.host, args.port); - - ImmutableMap pickFirstStrategy = - ImmutableMap.of("pick_first", ImmutableMap.of()); + if (args.td) { + // TODO(veblush): Remove experimental suffix once this code is proven stable. + String target = "google-c2p-experimental:///" + args.host; + channelBuilder = + Grpc.newChannelBuilder(target, GoogleDefaultChannelCredentials.newBuilder().build()); + } else if (args.dp) { + ComputeEngineChannelBuilder gceChannelBuilder = + ComputeEngineChannelBuilder.forAddress(args.host, args.port); + + String policy = args.rr ? "round_robin" : "pick_first"; + ImmutableMap policyStrategy = + ImmutableMap.of(policy, ImmutableMap.of()); ImmutableMap childPolicy = - ImmutableMap.of("childPolicy", ImmutableList.of(pickFirstStrategy)); + ImmutableMap.of( + "childPolicy", ImmutableList.of(policyStrategy)); ImmutableMap grpcLbPolicy = ImmutableMap.of("grpclb", childPolicy); ImmutableMap loadBalancingConfig = - ImmutableMap.of("loadBalancingConfig", ImmutableList.of(grpcLbPolicy)); - + ImmutableMap.of( + "loadBalancingConfig", ImmutableList.of(grpcLbPolicy)); gceChannelBuilder.defaultServiceConfig(loadBalancingConfig); if (args.flowControlWindow > 0) { @@ -87,7 +104,8 @@ public GrpcClient(Args args) throws IOException { delegateField = ComputeEngineChannelBuilder.class.getDeclaredField("delegate"); delegateField.setAccessible(true); - NettyChannelBuilder delegateBuilder = (NettyChannelBuilder) delegateField.get(gceChannelBuilder); + NettyChannelBuilder delegateBuilder = + (NettyChannelBuilder) delegateField.get(gceChannelBuilder); delegateBuilder.flowControlWindow(args.flowControlWindow); } catch (NoSuchFieldException | IllegalAccessException e) { e.printStackTrace(); @@ -96,7 +114,8 @@ public GrpcClient(Args args) throws IOException { } channelBuilder = gceChannelBuilder; } else { - NettyChannelBuilder nettyChannelBuilder = NettyChannelBuilder.forAddress(args.host, args.port); + NettyChannelBuilder nettyChannelBuilder = + NettyChannelBuilder.forAddress(args.host, args.port); if (args.flowControlWindow > 0) { nettyChannelBuilder.flowControlWindow(args.flowControlWindow); } @@ -105,8 +124,17 @@ public GrpcClient(Args args) throws IOException { // Create the same number of channels as the number of threads. this.channels = new ManagedChannel[args.threads]; - for (int i = 0; i < args.threads; i++) { - channels[i] = channelBuilder.build(); + if (args.rr) { + // For round-robin, all threads share the same channel. + ManagedChannel singleChannel = channelBuilder.build(); + for (int i = 0; i < args.threads; i++) { + channels[i] = singleChannel; + } + } else { + // For pick-first, each thread has its own unique channel. + for (int i = 0; i < args.threads; i++) { + channels[i] = channelBuilder.build(); + } } if (args.zeroCopy == 0) { @@ -123,13 +151,13 @@ public void startCalls(ResultTable results) throws InterruptedException { try { switch (args.method) { case METHOD_READ: - makeMediaRequest(channel, results); + makeReadObjectRequest(channel, results, /*threadId=*/ 1); break; case METHOD_RANDOM: - makeRandomMediaRequest(channel, results); + makeRandomReadRequest(channel, results, /*threadId=*/ 1); break; case METHOD_WRITE: - makeInsertRequest(channel, results, 0); + makeInsertRequest(channel, results, /*threadId=*/ 1); break; default: logger.warning("Please provide valid methods with --method"); @@ -145,27 +173,30 @@ public void startCalls(ResultTable results) throws InterruptedException { case METHOD_READ: for (int i = 0; i < args.threads; i++) { int finalI = i; - Runnable task = () -> makeMediaRequest(this.channels[finalI], results); + Runnable task = + () -> makeReadObjectRequest(this.channels[finalI], results, finalI + 1); threadPoolExecutor.execute(task); } break; case METHOD_RANDOM: for (int i = 0; i < args.threads; i++) { int finalI = i; - Runnable task = () -> makeRandomMediaRequest(this.channels[finalI], results); + Runnable task = + () -> makeRandomReadRequest(this.channels[finalI], results, finalI + 1); threadPoolExecutor.execute(task); } break; case METHOD_WRITE: for (int i = 0; i < args.threads; i++) { int finalI = i; - Runnable task = () -> { - try { - makeInsertRequest(this.channels[finalI], results, finalI); - } catch (InterruptedException e) { - e.printStackTrace(); - } - }; + Runnable task = + () -> { + try { + makeInsertRequest(this.channels[finalI], results, finalI + 1); + } catch (InterruptedException e) { + e.printStackTrace(); + } + }; threadPoolExecutor.execute(task); } break; @@ -181,139 +212,138 @@ public void startCalls(ResultTable results) throws InterruptedException { } } - private void makeMediaRequest(ManagedChannel channel, ResultTable results) { - StorageGrpc.StorageBlockingStub blockingStub = - StorageGrpc.newBlockingStub(channel); + private void makeReadObjectRequest(ManagedChannel channel, ResultTable results, int threadId) { + StorageGrpc.StorageBlockingStub blockingStub = StorageGrpc.newBlockingStub(channel); if (creds != null) { - blockingStub = blockingStub.withCallCredentials( - MoreCallCredentials.from(creds)); + blockingStub = blockingStub.withCallCredentials(MoreCallCredentials.from(creds)); } - GetObjectMediaRequest mediaRequest = - GetObjectMediaRequest.newBuilder().setBucket(args.bkt).setObject(args.obj).build(); - byte[] scratch = new byte[4*1024*1024]; + byte[] scratch = new byte[4 * 1024 * 1024]; for (int i = 0; i < args.calls; i++) { + String object = objectResolver.Resolve(threadId, i); + ReadObjectRequest readRequest = + ReadObjectRequest.newBuilder() + .setBucket(toV2BucketName(args.bkt)) + .setObject(object) + .build(); + long start = System.currentTimeMillis(); - Iterator resIterator; + long totalBytes = 0; + Iterator resIterator; if (useZeroCopy) { - resIterator = io.grpc.stub.ClientCalls.blockingServerStreamingCall( - blockingStub.getChannel(), getObjectMediaMethod, blockingStub.getCallOptions(), mediaRequest); + resIterator = + io.grpc.stub.ClientCalls.blockingServerStreamingCall( + blockingStub.getChannel(), + readObjectMethod, + blockingStub.getCallOptions(), + readRequest); } else { - resIterator = blockingStub.getObjectMedia(mediaRequest); + resIterator = blockingStub.readObject(readRequest); } try { while (true) { - GetObjectMediaResponse res = resIterator.next(); - InputStream stream = getObjectMediaResponseMarshaller.popStream(res); - // Just copy to scratch memory to ensure its data is consumed. - ByteString content = res.getChecksummedData().getContent(); - content.copyTo(scratch, 0); - // When zero-copy mashaller is used, the stream that backs GetObjectMediaResponse + ReadObjectResponse res = resIterator.next(); + // When zero-copy mashaller is used, the stream that backs ReadObjectResponse // should be closed when the mssage is no longed needed so that all buffers in the - // stream can be reclaimed. - if (stream != null) { - try { - stream.close(); - } catch (IOException e) { - throw new RuntimeException(e); + // stream can be reclaimed. If zero-copy is not used, stream will be null. + InputStream stream = ReadObjectResponseMarshaller.popStream(res); + try { + // Just copy to scratch memory to ensure its data is consumed. + ByteString content = res.getChecksummedData().getContent(); + totalBytes += content.size(); + content.copyTo(scratch, 0); + } finally { + if (stream != null) { + try { + stream.close(); + } catch (IOException e) { + throw new RuntimeException(e); + } } } } } catch (NoSuchElementException e) { } long dur = System.currentTimeMillis() - start; - results.reportResult(dur); + results.reportResult(args.bkt, object, totalBytes, dur); } } - private void makeRandomMediaRequest(ManagedChannel channel, ResultTable results) { - StorageGrpc.StorageBlockingStub blockingStub = - StorageGrpc.newBlockingStub(channel); + private void makeRandomReadRequest(ManagedChannel channel, ResultTable results, int threadId) { + StorageBlockingStub blockingStub = StorageGrpc.newBlockingStub(channel); if (creds != null) { - blockingStub = blockingStub.withCallCredentials( - MoreCallCredentials.from(creds)); + blockingStub = blockingStub.withCallCredentials(MoreCallCredentials.from(creds)); } - GetObjectMediaRequest.Builder reqBuilder = - GetObjectMediaRequest.newBuilder().setBucket(args.bkt).setObject(args.obj); + String object = objectResolver.Resolve(threadId, /*objectId=*/ 0); + ReadObjectRequest.Builder reqBuilder = + ReadObjectRequest.newBuilder().setBucket(toV2BucketName(args.bkt)).setObject(object); Random r = new Random(); long buffSize = args.buffSize * 1024; - + byte[] scratch = new byte[4 * 1024 * 1024]; for (int i = 0; i < args.calls; i++) { long offset = (long) r.nextInt(args.size - args.buffSize) * 1024; reqBuilder.setReadOffset(offset); reqBuilder.setReadLimit(buffSize); - GetObjectMediaRequest req = reqBuilder.build(); + ReadObjectRequest req = reqBuilder.build(); long start = System.currentTimeMillis(); - Iterator resIterator = blockingStub.getObjectMedia(req); - int itr = 0; - long bytesRead = 0; + Iterator resIterator = blockingStub.readObject(req); while (resIterator.hasNext()) { - itr++; - GetObjectMediaResponse res = resIterator.next(); - bytesRead += res.getChecksummedData().getSerializedSize(); - //logger.info("result: " + res.getChecksummedData()); + ReadObjectResponse res = resIterator.next(); + ByteString content = res.getChecksummedData().getContent(); + content.copyTo(scratch, 0); } long dur = System.currentTimeMillis() - start; - logger.info("time cost for getObjectMedia: " + dur + "ms"); - logger.info("total iterations: " + itr); - logger.info("start pos: " + offset + ", read lenth: " + buffSize + ", total KB read: " + bytesRead / 1024); - results.reportResult(dur); + results.reportResult(args.bkt, object, buffSize, dur); } } - private void makeInsertRequest(ManagedChannel channel, ResultTable results, int idx) throws InterruptedException { + private void makeInsertRequest(ManagedChannel channel, ResultTable results, int threadId) + throws InterruptedException { StorageGrpc.StorageStub asyncStub = StorageGrpc.newStub(channel); if (creds != null) { - asyncStub = asyncStub.withCallCredentials( - MoreCallCredentials.from(creds)); + asyncStub = asyncStub.withCallCredentials(MoreCallCredentials.from(creds)); } int totalBytes = args.size * 1024; byte[] data = new byte[totalBytes]; for (int i = 0; i < args.calls; i++) { + String obj = objectResolver.Resolve(threadId, i); + int offset = 0; boolean isFirst = true; boolean isLast = false; final CountDownLatch finishLatch = new CountDownLatch(1); - StreamObserver responseObserver = new StreamObserver() { - long start = System.currentTimeMillis(); + StreamObserver responseObserver = + new StreamObserver() { + long start = System.currentTimeMillis(); - @Override - public void onNext(Object value) { - } + @Override + public void onNext(WriteObjectResponse value) {} - @Override - public void onError(Throwable t) { - logger.warning("InsertObject failed with: " + Status.fromThrowable(t)); - finishLatch.countDown(); - } + @Override + public void onError(Throwable t) { + logger.warning("InsertObject failed with: " + Status.fromThrowable(t)); + finishLatch.countDown(); + } - @Override - public void onCompleted() { - long dur = System.currentTimeMillis() - start; - results.reportResult(dur); - if (dur < 1000) { - try { - Thread.sleep(1000 - dur); // Avoid limit of 1 qps for updating the same object - } catch (InterruptedException e) { - e.printStackTrace(); + @Override + public void onCompleted() { + long dur = System.currentTimeMillis() - start; + results.reportResult(args.bkt, obj, totalBytes, dur); finishLatch.countDown(); } - } - finishLatch.countDown(); - } - }; + }; - StreamObserver requestObserver = asyncStub.insertObject(responseObserver); + StreamObserver requestObserver = asyncStub.writeObject(responseObserver); while (offset < totalBytes) { int add; if (offset + Values.MAX_WRITE_CHUNK_BYTES_VALUE <= totalBytes) { - add = Values.MAX_WRITE_CHUNK_BYTES_VALUE; + add = Values.MAX_WRITE_CHUNK_BYTES_VALUE; } else { add = totalBytes - offset; } @@ -321,7 +351,8 @@ public void onCompleted() { isLast = true; } - InsertObjectRequest req = getInsertRequest(isFirst, isLast, offset, ByteString.copyFrom(data, offset, add), idx); + WriteObjectRequest req = + getWriteRequest(isFirst, isLast, offset, ByteString.copyFrom(data, offset, add), obj); requestObserver.onNext(req); if (finishLatch.getCount() == 0) { logger.warning("Stream completed before finishing sending requests"); @@ -336,17 +367,16 @@ public void onCompleted() { logger.warning("insertObject cannot finish within 20 minutes"); } } - } - private InsertObjectRequest getInsertRequest(boolean first, boolean last, int offset, ByteString bytes, int idx) { - InsertObjectRequest.Builder builder = InsertObjectRequest.newBuilder(); + private WriteObjectRequest getWriteRequest( + boolean first, boolean last, int offset, ByteString bytes, String obj) { + WriteObjectRequest.Builder builder = WriteObjectRequest.newBuilder(); if (first) { - builder.setInsertObjectSpec( - InsertObjectSpec.newBuilder().setResource( - Object.newBuilder().setBucket(args.bkt).setName(args.obj + "_" + idx) - ).build() - ); + builder.setWriteObjectSpec( + WriteObjectSpec.newBuilder() + .setResource(Object.newBuilder().setBucket(toV2BucketName(args.bkt)).setName(obj)) + .build()); } builder.setChecksummedData(ChecksummedData.newBuilder().setContent(bytes).build()); diff --git a/end2end-test-examples/gcs/src/main/java/io/grpc/gcs/HttpClient.java b/end2end-test-examples/gcs/src/main/java/io/grpc/gcs/HttpClient.java index f3334373..118f7bf1 100644 --- a/end2end-test-examples/gcs/src/main/java/io/grpc/gcs/HttpClient.java +++ b/end2end-test-examples/gcs/src/main/java/io/grpc/gcs/HttpClient.java @@ -3,33 +3,30 @@ import static io.grpc.gcs.Args.METHOD_RANDOM; import static io.grpc.gcs.Args.METHOD_READ; import static io.grpc.gcs.Args.METHOD_WRITE; -import static java.nio.charset.StandardCharsets.UTF_8; import com.google.cloud.ReadChannel; import com.google.cloud.storage.BlobId; import com.google.cloud.storage.BlobInfo; -import com.google.cloud.storage.Bucket; -import com.google.cloud.storage.StorageOptions; import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; import java.io.IOException; import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.List; import java.util.Random; import java.util.concurrent.Executors; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import java.util.logging.Logger; - public class HttpClient { private static final Logger logger = Logger.getLogger(HttpClient.class.getName()); private Args args; + private ObjectResolver objectResolver; private Storage client; public HttpClient(Args args) { this.args = args; + this.objectResolver = new ObjectResolver(args.obj, args.objFormat, args.objStart, args.objStop); this.client = StorageOptions.getDefaultInstance().getService(); } @@ -37,13 +34,13 @@ public void startCalls(ResultTable results) throws InterruptedException, IOExcep if (args.threads == 0) { switch (args.method) { case METHOD_READ: - makeMediaRequest(results); + makeMediaRequest(results, /*threadId=*/ 1); break; case METHOD_RANDOM: - makeRandomMediaRequest(results); + makeRandomMediaRequest(results, /*threadId=*/ 1); break; case METHOD_WRITE: - makeInsertRequest(results); + makeInsertRequest(results, /*threadId=*/ 1); break; default: logger.warning("Please provide valid methods with --method"); @@ -54,7 +51,29 @@ public void startCalls(ResultTable results) throws InterruptedException, IOExcep switch (args.method) { case METHOD_READ: for (int i = 0; i < args.threads; i++) { - Runnable task = () -> makeMediaRequest(results); + int finalI = i; + Runnable task = () -> makeMediaRequest(results, finalI + 1); + threadPoolExecutor.execute(task); + } + break; + case METHOD_RANDOM: + for (int i = 0; i < args.threads; i++) { + int finalI = i; + Runnable task = + () -> { + try { + makeRandomMediaRequest(results, finalI + 1); + } catch (IOException e) { + e.printStackTrace(); + } + }; + threadPoolExecutor.execute(task); + } + break; + case METHOD_WRITE: + for (int i = 0; i < args.threads; i++) { + int finalI = i; + Runnable task = () -> makeInsertRequest(results, finalI + 1); threadPoolExecutor.execute(task); } break; @@ -68,24 +87,26 @@ public void startCalls(ResultTable results) throws InterruptedException, IOExcep } } - public void makeMediaRequest(ResultTable results) { - BlobId blobId = BlobId.of(args.bkt, args.obj); + public void makeMediaRequest(ResultTable results, int threadId) { for (int i = 0; i < args.calls; i++) { + String object = objectResolver.Resolve(threadId, i); + BlobId blobId = BlobId.of(args.bkt, object); long start = System.currentTimeMillis(); byte[] content = client.readAllBytes(blobId); - //String contentString = new String(content, UTF_8); - //logger.info("contentString: " + contentString); + // String contentString = new String(content, UTF_8); + // logger.info("contentString: " + contentString); long dur = System.currentTimeMillis() - start; - //logger.info("time cost for readAllBytes: " + dur + "ms"); - //logger.info("total KB received: " + content.length/1024); - results.reportResult(dur); + // logger.info("time cost for readAllBytes: " + dur + "ms"); + // logger.info("total KB received: " + content.length/1024); + results.reportResult(args.bkt, object, content.length, dur); } } - public void makeRandomMediaRequest(ResultTable results) throws IOException { + public void makeRandomMediaRequest(ResultTable results, int threadId) throws IOException { Random r = new Random(); - BlobId blobId = BlobId.of(args.bkt, args.obj); + String object = objectResolver.Resolve(threadId, /*objectId=*/ 0); + BlobId blobId = BlobId.of(args.bkt, object); ReadChannel reader = client.reader(blobId); for (int i = 0; i < args.calls; i++) { long offset = (long) r.nextInt(args.size - args.buffSize) * 1024; @@ -98,27 +119,25 @@ public void makeRandomMediaRequest(ResultTable results) throws IOException { if (buff.remaining() > 0) { logger.warning("Got remaining bytes: " + buff.remaining()); } - logger.info("total KB received: " + buff.position()/1024); + logger.info("total KB received: " + buff.position() / 1024); logger.info("time cost for random reading: " + dur + "ms"); buff.clear(); - results.reportResult(dur); + results.reportResult(args.bkt, object, args.buffSize * 1024, dur); } reader.close(); } - public void makeInsertRequest(ResultTable results) { + public void makeInsertRequest(ResultTable results, int threadId) { int totalBytes = args.size * 1024; byte[] data = new byte[totalBytes]; - BlobId blobId = BlobId.of(args.bkt, args.obj); for (int i = 0; i < args.calls; i++) { + String object = objectResolver.Resolve(threadId, i); + BlobId blobId = BlobId.of(args.bkt, object); long start = System.currentTimeMillis(); - client.create( - BlobInfo.newBuilder(blobId).build(), - data - ); + client.create(BlobInfo.newBuilder(blobId).build(), data); long dur = System.currentTimeMillis() - start; logger.info("time cost for creating blob: " + dur + "ms"); - results.reportResult(dur); + results.reportResult(args.bkt, object, totalBytes, dur); } } } diff --git a/end2end-test-examples/gcs/src/main/java/io/grpc/gcs/ObjectResolver.java b/end2end-test-examples/gcs/src/main/java/io/grpc/gcs/ObjectResolver.java new file mode 100644 index 00000000..f7cdba26 --- /dev/null +++ b/end2end-test-examples/gcs/src/main/java/io/grpc/gcs/ObjectResolver.java @@ -0,0 +1,24 @@ +package io.grpc.gcs; + +public class ObjectResolver { + public ObjectResolver(String object, String objectFormat, int objectStart, int objectStop) { + this.object = object; + this.objectFormat = objectFormat; + this.objectStart = objectStart; + this.objectStop = objectStop; + } + + public String Resolve(int threadId, int objectId) { + if (objectFormat == null || objectFormat.equals("")) { + return object; + } + int oid = objectStop == 0 ? objectId : (objectId % (objectStop - objectStart)) + objectStart; + return objectFormat.replaceAll("\\{t\\}", String.valueOf(threadId)).replaceAll("\\{o\\}", + String.valueOf(oid)); + } + + private String object; + private String objectFormat; + private int objectStart; + private int objectStop; +} diff --git a/end2end-test-examples/gcs/src/main/java/io/grpc/gcs/ResultTable.java b/end2end-test-examples/gcs/src/main/java/io/grpc/gcs/ResultTable.java index e85c11b8..b830bff3 100644 --- a/end2end-test-examples/gcs/src/main/java/io/grpc/gcs/ResultTable.java +++ b/end2end-test-examples/gcs/src/main/java/io/grpc/gcs/ResultTable.java @@ -1,26 +1,29 @@ package io.grpc.gcs; import com.google.gson.Gson; -import java.io.FileInputStream; import java.io.FileWriter; import java.io.IOException; -import java.security.Security; import java.util.ArrayList; import java.util.Collections; +import java.util.Comparator; import java.util.List; -import java.util.logging.LogManager; -import java.util.logging.Logger; -import org.conscrypt.Conscrypt; - public class ResultTable { private Args args; - private Long startTime; - private Long endTime; - private List results; + private int warmupCount; + private long startTime; + private long endTime; + + private static class Result { + public long startTime; + public long duration; + } + + private List results; public ResultTable(Args args) { this.args = args; + this.warmupCount = args.warmups * args.threads; this.results = new ArrayList<>(); } @@ -32,18 +35,22 @@ public void start() { public void stop() { synchronized (this) { - endTime = System.currentTimeMillis(); + endTime = System.currentTimeMillis(); } } - public void reportResult(long duration) { + public void reportResult(String bucket, String object, long bytes, long duration) { int ord; synchronized (this) { - results.add(duration); + Result result = new Result(); + result.startTime = System.currentTimeMillis() - duration; + result.duration = duration; + results.add(result); ord = results.size(); } if (this.args.verboseResult) { - System.out.format("### Result: ord=%d elapsed=%d\n", ord, duration); + System.out.format("### Result: ord=%d bucket=%s object=%s bytes=%d elapsed=%d%s\n", ord, + bucket, object, bytes, duration, results.size() <= warmupCount ? " [WARM-UP]" : ""); System.out.flush(); } } @@ -55,49 +62,46 @@ private static class BenchmarkResult { public void printResult() throws IOException { synchronized (this) { - if (results.size() == 0) return; - Collections.sort(results); + results.subList(0, Math.min(results.size(), warmupCount)).clear(); + + if (results.size() == 0) + return; + int n = results.size(); - double totalSeconds = 0; - long totalDur = endTime - startTime; - for (Long ms : results) { - totalSeconds += ms / 1000.0; - } + long totalDur = + results.get(n - 1).startTime + results.get(n - 1).duration - results.get(0).startTime; + double totalSec = totalDur / 1000.0; + + Collections.sort(results, new Comparator() { + @Override + public int compare(Result o1, Result o2) { + return Long.compare(o1.duration, o2.duration); + } + }); Gson gson = new Gson(); BenchmarkResult benchmarkResult = new BenchmarkResult(); - benchmarkResult.min = results.get(0); - benchmarkResult.p50 = results.get((int) (n * 0.05)); - benchmarkResult.p90 = results.get((int) (n * 0.90)); - benchmarkResult.p99 = results.get((int) (n * 0.99)); - benchmarkResult.p999 = results.get((int) (n * 0.999)); - benchmarkResult.qps = n / totalSeconds; + benchmarkResult.min = results.get(0).duration; + benchmarkResult.p50 = results.get((int) (n * 0.05)).duration; + benchmarkResult.p90 = results.get((int) (n * 0.90)).duration; + benchmarkResult.p99 = results.get((int) (n * 0.99)).duration; + benchmarkResult.p999 = results.get((int) (n * 0.999)).duration; + benchmarkResult.qps = n / totalSec; if (!args.latencyFilename.isEmpty()) { FileWriter writer = new FileWriter(args.latencyFilename); gson.toJson(benchmarkResult, writer); writer.close(); } System.out.println(String.format( - "****** Test Results [client: %s, method: %s, size: %d, threads: %d, dp: %s, calls: %d]: \n" - + "\t\tMin\tp5\tp10\tp25\tp50\tp75\tp90\tp99\tMax\tTotal\n" + "****** Test Results [client: %s, method: %s, size: %d, threads: %d, dp: %s, calls:" + + " %d, qps: %f]: \n" + "\t\tMin\tp5\tp10\tp25\tp50\tp75\tp90\tp99\tMax\tTotal\n" + " Time(ms)\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\n", - args.client, - args.method, - args.size, - args.threads, - args.dp, - n, - results.get(0), - results.get((int) (n * 0.05)), - results.get((int) (n * 0.1)), - results.get((int) (n * 0.25)), - results.get((int) (n * 0.50)), - results.get((int) (n * 0.75)), - results.get((int) (n * 0.90)), - results.get((int) (n * 0.99)), - results.get(n - 1), - totalDur - )); - } + args.client, args.method, args.size, args.threads, args.dp, n, n / totalSec, + results.get(0).duration, results.get((int) (n * 0.05)).duration, + results.get((int) (n * 0.1)).duration, results.get((int) (n * 0.25)).duration, + results.get((int) (n * 0.50)).duration, results.get((int) (n * 0.75)).duration, + results.get((int) (n * 0.90)).duration, results.get((int) (n * 0.99)).duration, + results.get(n - 1).duration, totalDur)); + } } } diff --git a/end2end-test-examples/gcs/src/main/java/io/grpc/gcs/TestMain.java b/end2end-test-examples/gcs/src/main/java/io/grpc/gcs/TestMain.java index 73e9df1e..b5122087 100644 --- a/end2end-test-examples/gcs/src/main/java/io/grpc/gcs/TestMain.java +++ b/end2end-test-examples/gcs/src/main/java/io/grpc/gcs/TestMain.java @@ -22,7 +22,8 @@ public static void main(String[] args) throws Exception { if (a.conscrypt) { Security.insertProviderAt(Conscrypt.newProvider(), 1); } else if (a.conscrypt_notm) { - Security.insertProviderAt(Conscrypt.newProviderBuilder().provideTrustManager(false).build(), 1); + Security.insertProviderAt( + Conscrypt.newProviderBuilder().provideTrustManager(false).build(), 1); } ResultTable results = new ResultTable(a); long start = 0; diff --git a/end2end-test-examples/gcs/src/main/java/io/grpc/gcs/ZeroCopyMessageMarshaller.java b/end2end-test-examples/gcs/src/main/java/io/grpc/gcs/ZeroCopyMessageMarshaller.java index 82d403b7..fa15fd50 100644 --- a/end2end-test-examples/gcs/src/main/java/io/grpc/gcs/ZeroCopyMessageMarshaller.java +++ b/end2end-test-examples/gcs/src/main/java/io/grpc/gcs/ZeroCopyMessageMarshaller.java @@ -10,11 +10,10 @@ import io.grpc.HasByteBuffer; import io.grpc.KnownLength; import io.grpc.MethodDescriptor.PrototypeMarshaller; -import io.grpc.protobuf.lite.ProtoLiteUtils; import io.grpc.Status; -import io.grpc.MethodDescriptor; -import java.io.InputStream; +import io.grpc.protobuf.lite.ProtoLiteUtils; import java.io.IOException; +import java.io.InputStream; import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.Collections; @@ -22,76 +21,86 @@ import java.util.List; import java.util.Map; -// Custom gRPC marshaller to use zero memory copy feature of gRPC when deserializing messages. -// This achieves zero-copy by deserializing proto messages pointing to the buffers in the input -// stream to avoid memory copy so stream should live as long as the message can be referenced. -// Hence, it exposes the input stream to applications (through popStream) and applications are -// responsible to close it when it's no longer needed. Otherwise, it'd cause memory leak. +/** + * Custom gRPC marshaller to use zero memory copy feature of gRPC when deserializing messages. This + * achieves zero-copy by deserializing proto messages pointing to the buffers in the input stream to + * avoid memory copy so stream should live as long as the message can be referenced. Hence, it + * exposes the input stream to applications (through popStream) and applications are responsible to + * close it when it's no longer needed. Otherwise, it'd cause memory leak. + */ class ZeroCopyMessageMarshaller implements PrototypeMarshaller { - private Map unclosedStreams = Collections.synchronizedMap(new IdentityHashMap<>()); + private Map unclosedStreams = + Collections.synchronizedMap(new IdentityHashMap<>()); private final Parser parser; - private final PrototypeMarshaller baseMarshaller; + private final PrototypeMarshaller marshaller; ZeroCopyMessageMarshaller(T defaultInstance) { parser = (Parser) defaultInstance.getParserForType(); - baseMarshaller = (PrototypeMarshaller) ProtoLiteUtils.marshaller(defaultInstance); + marshaller = (PrototypeMarshaller) ProtoLiteUtils.marshaller(defaultInstance); } @Override public Class getMessageClass() { - return baseMarshaller.getMessageClass(); + return marshaller.getMessageClass(); } @Override public T getMessagePrototype() { - return baseMarshaller.getMessagePrototype(); + return marshaller.getMessagePrototype(); } @Override public InputStream stream(T value) { - return baseMarshaller.stream(value); + return marshaller.stream(value); } @Override public T parse(InputStream stream) { - CodedInputStream cis = null; try { - if (stream instanceof KnownLength) { + if (stream instanceof KnownLength + && stream instanceof Detachable + && stream instanceof HasByteBuffer + && ((HasByteBuffer) stream).byteBufferSupported()) { int size = stream.available(); - if (stream instanceof Detachable && ((HasByteBuffer) stream).byteBufferSupported()) { - // Stream is now detached here and should be closed later. - stream = ((Detachable) stream).detach(); + // Stream is now detached here and should be closed later. + InputStream detachedStream = ((Detachable) stream).detach(); + try { // This mark call is to keep buffer while traversing buffers using skip. - stream.mark(size); + detachedStream.mark(size); List byteStrings = new ArrayList<>(); - while (stream.available() != 0) { - ByteBuffer buffer = ((HasByteBuffer) stream).getByteBuffer(); + while (detachedStream.available() != 0) { + ByteBuffer buffer = ((HasByteBuffer) detachedStream).getByteBuffer(); byteStrings.add(UnsafeByteOperations.unsafeWrap(buffer)); - stream.skip(buffer.remaining()); + detachedStream.skip(buffer.remaining()); + } + detachedStream.reset(); + CodedInputStream codedInputStream = ByteString.copyFrom(byteStrings).newCodedInput(); + codedInputStream.enableAliasing(true); + codedInputStream.setSizeLimit(Integer.MAX_VALUE); + // fast path (no memory copy) + T message; + try { + message = parseFrom(codedInputStream); + } catch (InvalidProtocolBufferException ipbe) { + throw Status.INTERNAL + .withDescription("Invalid protobuf byte sequence") + .withCause(ipbe) + .asRuntimeException(); + } + unclosedStreams.put(message, detachedStream); + detachedStream = null; + return message; + } finally { + if (detachedStream != null) { + detachedStream.close(); } - stream.reset(); - cis = ByteString.copyFrom(byteStrings).newCodedInput(); - cis.enableAliasing(true); - cis.setSizeLimit(Integer.MAX_VALUE); } } } catch (IOException e) { throw new RuntimeException(e); } - if (cis != null) { - // fast path (no memory copy) - T message; - try { - message = parseFrom(cis); - } catch (InvalidProtocolBufferException ipbe) { - throw Status.INTERNAL.withDescription("Invalid protobuf byte sequence").withCause(ipbe).asRuntimeException(); - } - unclosedStreams.put(message, stream); - return message; - } else { - // slow path - return baseMarshaller.parse(stream); - } + // slow path + return marshaller.parse(stream); } private T parseFrom(CodedInputStream stream) throws InvalidProtocolBufferException { diff --git a/end2end-test-examples/gcs/src/main/java/io/grpc/gcs/ZeroCopyReadinessChecker.java b/end2end-test-examples/gcs/src/main/java/io/grpc/gcs/ZeroCopyReadinessChecker.java index 825545a8..ebecd233 100644 --- a/end2end-test-examples/gcs/src/main/java/io/grpc/gcs/ZeroCopyReadinessChecker.java +++ b/end2end-test-examples/gcs/src/main/java/io/grpc/gcs/ZeroCopyReadinessChecker.java @@ -1,12 +1,15 @@ package io.grpc.gcs; +import com.google.common.flogger.GoogleLogger; import com.google.protobuf.MessageLite; import io.grpc.KnownLength; -import java.lang.reflect.InvocationTargetException; -import java.lang.reflect.Method; -import java.security.Provider; -public class ZeroCopyReadinessChecker { +/** + * Checker to test whether a zero-copy masharller is available from the versions of gRPC and + * Protobuf. + */ +class ZeroCopyReadinessChecker { + private static final GoogleLogger logger = GoogleLogger.forEnclosingClass(); private static final boolean isZeroCopyReady; static { @@ -18,22 +21,26 @@ public class ZeroCopyReadinessChecker { // done indirectly to handle the case where gRPC is being shaded in a // different package. String knownLengthClassName = KnownLength.class.getName(); - String detachableClassName = knownLengthClassName.substring(0, knownLengthClassName.lastIndexOf('.') + 1) - + "Detachable"; + String detachableClassName = + knownLengthClassName.substring(0, knownLengthClassName.lastIndexOf('.') + 1) + + "Detachable"; Class detachableClass = Class.forName(detachableClassName); detachableClassExists = (detachableClass != null); } catch (ClassNotFoundException ex) { + logger.atFine().withCause(ex).log("io.grpc.Detachable not found"); } // Check whether com.google.protobuf.UnsafeByteOperations exists? boolean unsafeByteOperationsClassExists = false; try { // Same above String messageLiteClassName = MessageLite.class.getName(); - String unsafeByteOperationsClassName = messageLiteClassName.substring(0, - messageLiteClassName.lastIndexOf('.') + 1) + "UnsafeByteOperations"; + String unsafeByteOperationsClassName = + messageLiteClassName.substring(0, messageLiteClassName.lastIndexOf('.') + 1) + + "UnsafeByteOperations"; Class unsafeByteOperationsClass = Class.forName(unsafeByteOperationsClassName); unsafeByteOperationsClassExists = (unsafeByteOperationsClass != null); } catch (ClassNotFoundException ex) { + logger.atFine().withCause(ex).log("com.google.protobuf.UnsafeByteOperations not found"); } isZeroCopyReady = detachableClassExists && unsafeByteOperationsClassExists; } diff --git a/end2end-test-examples/gcs/src/main/proto/google/storage/v1/storage.proto b/end2end-test-examples/gcs/src/main/proto/google/storage/v1/storage.proto deleted file mode 100644 index 2d0a7a0f..00000000 --- a/end2end-test-examples/gcs/src/main/proto/google/storage/v1/storage.proto +++ /dev/null @@ -1,1986 +0,0 @@ -// Copyright 2019 Google LLC. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -syntax = "proto3"; - -package google.storage.v1; - -import "google/iam/v1/iam_policy.proto"; -import "google/iam/v1/policy.proto"; -import "google/protobuf/empty.proto"; -import "google/protobuf/field_mask.proto"; -import "google/protobuf/wrappers.proto"; -import "google/storage/v1/storage_resources.proto"; -import "google/api/client.proto"; - -option go_package = "google.golang.org/genproto/googleapis/storage/v1;storage"; -option java_multiple_files = true; -option java_package = "com.google.google.storage.v1"; - -// Manages Google Cloud Storage resources. -service Storage { - option (google.api.default_host) = "storage.googleapis.com"; - option (google.api.oauth_scopes) = - "https://www.googleapis.com/auth/cloud-platform," - "https://www.googleapis.com/auth/cloud-platform.read-only," - "https://www.googleapis.com/auth/devstorage.full_control," - "https://www.googleapis.com/auth/devstorage.read_only," - "https://www.googleapis.com/auth/devstorage.read_write"; - - // Permanently deletes the ACL entry for the specified entity on the specified - // bucket. - rpc DeleteBucketAccessControl(DeleteBucketAccessControlRequest) returns (google.protobuf.Empty) { - } - - // Returns the ACL entry for the specified entity on the specified bucket. - rpc GetBucketAccessControl(GetBucketAccessControlRequest) returns (BucketAccessControl) { - } - - // Creates a new ACL entry on the specified bucket. - rpc InsertBucketAccessControl(InsertBucketAccessControlRequest) returns (BucketAccessControl) { - } - - // Retrieves ACL entries on the specified bucket. - rpc ListBucketAccessControls(ListBucketAccessControlsRequest) returns (ListBucketAccessControlsResponse) { - } - - // Updates an ACL entry on the specified bucket. Equivalent to - // PatchBucketAccessControl, but all unspecified fields will be - // reset to their default values. - rpc UpdateBucketAccessControl(UpdateBucketAccessControlRequest) returns (BucketAccessControl) { - } - - // Updates an ACL entry on the specified bucket. - rpc PatchBucketAccessControl(PatchBucketAccessControlRequest) returns (BucketAccessControl) { - } - - // Permanently deletes an empty bucket. - rpc DeleteBucket(DeleteBucketRequest) returns (google.protobuf.Empty) { - } - - // Returns metadata for the specified bucket. - rpc GetBucket(GetBucketRequest) returns (Bucket) { - } - - // Creates a new bucket. - rpc InsertBucket(InsertBucketRequest) returns (Bucket) { - } - - // Retrieves a list of buckets for a given project. - rpc ListBuckets(ListBucketsRequest) returns (ListBucketsResponse) { - } - - // Locks retention policy on a bucket. - rpc LockBucketRetentionPolicy(LockRetentionPolicyRequest) returns (Bucket) { - } - - // Gets the IAM policy for the specified bucket. - rpc GetBucketIamPolicy(google.iam.v1.GetIamPolicyRequest) returns (google.iam.v1.Policy) { - } - - // Updates an IAM policy for the specified bucket. - rpc SetBucketIamPolicy(google.iam.v1.SetIamPolicyRequest) returns (google.iam.v1.Policy) { - } - - // Tests a set of permissions on the given bucket to see which, if - // any, are held by the caller. - rpc TestBucketIamPermissions(google.iam.v1.TestIamPermissionsRequest) returns (google.iam.v1.TestIamPermissionsResponse) { - } - - // Updates a bucket. Changes to the bucket will be readable immediately after - // writing, but configuration changes may take time to propagate. - rpc PatchBucket(PatchBucketRequest) returns (Bucket) { - } - - // Updates a bucket. Equivalent to PatchBucket, but always replaces all - // mutatable fields of the bucket with new values, reverting all - // unspecified fields to their default values. - // Like PatchBucket, Changes to the bucket will be readable immediately after - // writing, but configuration changes may take time to propagate. - rpc UpdateBucket(UpdateBucketRequest) returns (Bucket) { - } - - // Halts "Object Change Notification" push messagages. - // See https://cloud.google.com/storage/docs/object-change-notification - // Note: this is not related to the newer "Notifications" resource, which - // are stopped using DeleteNotification. - rpc StopChannel(StopChannelRequest) returns (google.protobuf.Empty) { - } - - // Permanently deletes the default object ACL entry for the specified entity - // on the specified bucket. - rpc DeleteDefaultObjectAccessControl(DeleteDefaultObjectAccessControlRequest) returns (google.protobuf.Empty) { - } - - // Returns the default object ACL entry for the specified entity on the - // specified bucket. - rpc GetDefaultObjectAccessControl(GetDefaultObjectAccessControlRequest) returns (ObjectAccessControl) { - } - - // Creates a new default object ACL entry on the specified bucket. - rpc InsertDefaultObjectAccessControl(InsertDefaultObjectAccessControlRequest) returns (ObjectAccessControl) { - } - - // Retrieves default object ACL entries on the specified bucket. - rpc ListDefaultObjectAccessControls(ListDefaultObjectAccessControlsRequest) returns (ListObjectAccessControlsResponse) { - } - - // Updates a default object ACL entry on the specified bucket. - rpc PatchDefaultObjectAccessControl(PatchDefaultObjectAccessControlRequest) returns (ObjectAccessControl) { - } - - // Updates a default object ACL entry on the specified bucket. Equivalent to - // PatchDefaultObjectAccessControl, but modifies all unspecified fields to - // their default values. - rpc UpdateDefaultObjectAccessControl(UpdateDefaultObjectAccessControlRequest) returns (ObjectAccessControl) { - } - - // Permanently deletes a notification subscription. - // Note: Older, "Object Change Notification" push subscriptions should be - // deleted using StopChannel instead. - rpc DeleteNotification(DeleteNotificationRequest) returns (google.protobuf.Empty) { - } - - // View a notification configuration. - rpc GetNotification(GetNotificationRequest) returns (Notification) { - } - - // Creates a notification subscription for a given bucket. - // These notifications, when triggered, publish messages to the specified - // Cloud Pub/Sub topics. - // See https://cloud.google.com/storage/docs/pubsub-notifications. - rpc InsertNotification(InsertNotificationRequest) returns (Notification) { - } - - // Retrieves a list of notification subscriptions for a given bucket. - rpc ListNotifications(ListNotificationsRequest) returns (ListNotificationsResponse) { - } - - // Permanently deletes the ACL entry for the specified entity on the specified - // object. - rpc DeleteObjectAccessControl(DeleteObjectAccessControlRequest) returns (google.protobuf.Empty) { - } - - // Returns the ACL entry for the specified entity on the specified object. - rpc GetObjectAccessControl(GetObjectAccessControlRequest) returns (ObjectAccessControl) { - } - - // Creates a new ACL entry on the specified object. - rpc InsertObjectAccessControl(InsertObjectAccessControlRequest) returns (ObjectAccessControl) { - } - - // Retrieves ACL entries on the specified object. - rpc ListObjectAccessControls(ListObjectAccessControlsRequest) returns (ListObjectAccessControlsResponse) { - } - - // Updates an ACL entry on the specified object. - rpc UpdateObjectAccessControl(UpdateObjectAccessControlRequest) returns (ObjectAccessControl) { - } - - // Concatenates a list of existing objects into a new object in the same - // bucket. - rpc ComposeObject(ComposeObjectRequest) returns (Object) { - } - - // Copies a source object to a destination object. Optionally overrides - // metadata. - rpc CopyObject(CopyObjectRequest) returns (Object) { - } - - // Deletes an object and its metadata. Deletions are permanent if versioning - // is not enabled for the bucket, or if the generation parameter - // is used. - rpc DeleteObject(DeleteObjectRequest) returns (google.protobuf.Empty) { - } - - // Retrieves an object's metadata. - rpc GetObject(GetObjectRequest) returns (Object) { - } - - // Reads an object's data. - rpc GetObjectMedia(GetObjectMediaRequest) returns (stream GetObjectMediaResponse) { - } - - // Stores a new object and metadata. - // - // An object can be written either in a single message stream or in a - // resumable sequence of message streams. To write using a single stream, - // the client should include in the first message of the stream an - // `InsertObjectSpec` describing the destination bucket, object, and any - // preconditions. Additionally, the final message must set 'finish_write' to - // true, or else it is an error. - // - // For a resumable write, the client should instead call - // `StartResumableWrite()` and provide that method an `InsertObjectSpec.` - // They should then attach the returned `upload_id` to the first message of - // each following call to `Insert`. If there is an error or the connection is - // broken during the resumable `Insert()`, the client should check the status - // of the `Insert()` by calling `QueryWriteStatus()` and continue writing from - // the returned `committed_size`. This may be less than the amount of data the - // client previously sent. - // - // The service will not view the object as complete until the client has - // sent an `Insert` with `finish_write` set to `true`. Sending any - // requests on a stream after sending a request with `finish_write` set to - // `true` will cause an error. The client **should** check the - // `Object` it receives to determine how much data the service was - // able to commit and whether the service views the object as complete. - rpc InsertObject(stream InsertObjectRequest) returns (Object) { - } - - // Retrieves a list of objects matching the criteria. - rpc ListObjects(ListObjectsRequest) returns (ListObjectsResponse) { - } - - // Rewrites a source object to a destination object. Optionally overrides - // metadata. - rpc RewriteObject(RewriteObjectRequest) returns (RewriteResponse) { - } - - // Starts a resumable write. How long the write operation remains valid, and - // what happens when the write operation becomes invalid, are - // service-dependent. - rpc StartResumableWrite(StartResumableWriteRequest) returns (StartResumableWriteResponse) { - } - - // Determines the `committed_size` for an object that is being written, which - // can then be used as the `write_offset` for the next `Write()` call. - // - // If the object does not exist (i.e., the object has been deleted, or the - // first `Write()` has not yet reached the service), this method returns the - // error `NOT_FOUND`. - // - // The client **may** call `QueryWriteStatus()` at any time to determine how - // much data has been processed for this object. This is useful if the - // client is buffering data and needs to know which data can be safely - // evicted. For any sequence of `QueryWriteStatus()` calls for a given - // object name, the sequence of returned `committed_size` values will be - // non-decreasing. - rpc QueryWriteStatus(QueryWriteStatusRequest) returns (QueryWriteStatusResponse) { - } - - // Updates an object's metadata. - rpc PatchObject(PatchObjectRequest) returns (Object) { - } - - // Updates an object's metadata. Equivalent to PatchObject, but always - // replaces all mutatable fields of the bucket with new values, reverting all - // unspecified fields to their default values. - rpc UpdateObject(UpdateObjectRequest) returns (Object) { - } - - // Gets the IAM policy for the specified object. - rpc GetObjectIamPolicy(google.iam.v1.GetIamPolicyRequest) returns (google.iam.v1.Policy) { - } - - // Updates an IAM policy for the specified object. - rpc SetObjectIamPolicy(google.iam.v1.SetIamPolicyRequest) returns (google.iam.v1.Policy) { - } - - // Tests a set of permissions on the given object to see which, if - // any, are held by the caller. - rpc TestObjectIamPermissions(google.iam.v1.TestIamPermissionsRequest) returns (google.iam.v1.TestIamPermissionsResponse) { - } - - // Watch for changes on all objects in a bucket. - rpc WatchAllObjects(WatchAllObjectsRequest) returns (Channel) { - } - - // Retrieves the name of a project's Google Cloud Storage service account. - rpc GetServiceAccount(GetProjectServiceAccountRequest) returns (ServiceAccount) { - } -} - -// Request message for DeleteBucketAccessControl. -message DeleteBucketAccessControlRequest { - // Name of a bucket. - // Required. - string bucket = 1; - - // The entity holding the permission. Can be - // user-userId, - // user-emailAddress, - // group-groupId, - // group-emailAddress, allUsers, or - // allAuthenticatedUsers. - // Required. - string entity = 2; - - // The project to be billed for this request. Required for Requester Pays - // buckets. - string user_project = 3; - - // A set of parameters common to all Storage API requests. - CommonRequestParams common_request_params = 4; -} - -// Request message for GetBucketAccessControl. -message GetBucketAccessControlRequest { - // Name of a bucket. - // Required. - string bucket = 1; - - // The entity holding the permission. Can be - // user-userId, - // user-emailAddress, - // group-groupId, - // group-emailAddress, allUsers, or - // allAuthenticatedUsers. - // Required. - string entity = 2; - - // The project to be billed for this request. Required for Requester Pays - // buckets. - string user_project = 3; - - // A set of parameters common to all Storage API requests. - CommonRequestParams common_request_params = 4; -} - -// Request message for InsertBucketAccessControl. -message InsertBucketAccessControlRequest { - // Name of a bucket. - // Required. - string bucket = 1; - - // The project to be billed for this request. Required for Requester Pays - // buckets. - string user_project = 2; - - // Properties of the new bucket access control being inserted. - BucketAccessControl bucket_access_control = 3; - - // A set of parameters common to all Storage API requests. - CommonRequestParams common_request_params = 4; -} - -// Request message for ListBucketAccessControl. -message ListBucketAccessControlsRequest { - // Name of a bucket. - // Required. - string bucket = 1; - - // The project to be billed for this request. Required for Requester Pays - // buckets. - string user_project = 2; - - // A set of parameters common to all Storage API requests. - CommonRequestParams common_request_params = 3; -} - -// Request for PatchBucketAccessControl. -message PatchBucketAccessControlRequest { - // Name of a bucket. - // Required. - string bucket = 1; - - // The entity holding the permission. Can be - // user-userId, - // user-emailAddress, - // group-groupId, - // group-emailAddress, allUsers, or - // allAuthenticatedUsers. - // Required. - string entity = 2; - - // The project to be billed for this request. Required for Requester Pays - // buckets. - string user_project = 3; - - // The BucketAccessControl for updating. - BucketAccessControl bucket_access_control = 4; - - // List of fields to be updated. - // - // To specify ALL fields, equivalent to the JSON API's "update" function, - // specify a single field with the value `*`. - // - google.protobuf.FieldMask update_mask = 5; - - // A set of parameters common to all Storage API requests. - CommonRequestParams common_request_params = 6; -} - -// Request for UpdateBucketAccessControl. -message UpdateBucketAccessControlRequest { - // Name of a bucket. - // Required. - string bucket = 1; - - // The entity holding the permission. Can be - // user-userId, - // user-emailAddress, - // group-groupId, - // group-emailAddress, allUsers, or - // allAuthenticatedUsers. - // Required. - string entity = 2; - - // The project to be billed for this request. Required for Requester Pays - // buckets. - string user_project = 3; - - // The BucketAccessControl for updating. - BucketAccessControl bucket_access_control = 4; - - // A set of parameters common to all Storage API requests. - CommonRequestParams common_request_params = 5; -} - -// Request message for DeleteBucket. -message DeleteBucketRequest { - // Name of a bucket. - // Required. - string bucket = 1; - - // If set, only deletes the bucket if its metageneration matches this value. - google.protobuf.Int64Value if_metageneration_match = 2; - - // If set, only deletes the bucket if its metageneration does not match this - // value. - google.protobuf.Int64Value if_metageneration_not_match = 3; - - // The project to be billed for this request. Required for Requester Pays - // buckets. - string user_project = 4; - - // A set of parameters common to all Storage API requests. - CommonRequestParams common_request_params = 5; -} - -// Request message for GetBucket. -message GetBucketRequest { - // Name of a bucket. - // Required. - string bucket = 1; - - // Makes the return of the bucket metadata conditional on whether the bucket's - // current metageneration matches the given value. - google.protobuf.Int64Value if_metageneration_match = 2; - - // Makes the return of the bucket metadata conditional on whether the bucket's - // current metageneration does not match the given value. - google.protobuf.Int64Value if_metageneration_not_match = 3; - - // Set of properties to return. Defaults to NO_ACL. - CommonEnums.Projection projection = 4; - - // The project to be billed for this request. Required for Requester Pays - // buckets. - string user_project = 5; - - // A set of parameters common to all Storage API requests. - CommonRequestParams common_request_params = 6; -} - -// Request message for InsertBucket. -message InsertBucketRequest { - // Apply a predefined set of access controls to this bucket. - CommonEnums.PredefinedBucketAcl predefined_acl = 1; - - // Apply a predefined set of default object access controls to this bucket. - CommonEnums.PredefinedObjectAcl predefined_default_object_acl = 2; - - // A valid API project identifier. - // Required. - string project = 3; - - // Set of properties to return. Defaults to NO_ACL, unless the - // bucket resource specifies acl or defaultObjectAcl - // properties, when it defaults to FULL. - CommonEnums.Projection projection = 4; - - // The project to be billed for this request. - string user_project = 5; - - // Properties of the new bucket being inserted, including its name. - Bucket bucket = 6; - - // A set of parameters common to all Storage API requests. - CommonRequestParams common_request_params = 7; -} - -// Request message for ListBuckets. -message ListBucketsRequest { - // Maximum number of buckets to return in a single response. The service will - // use this parameter or 1,000 items, whichever is smaller. - int32 max_results = 1; - - // A previously-returned page token representing part of the larger set of - // results to view. - string page_token = 2; - - // Filter results to buckets whose names begin with this prefix. - string prefix = 3; - - // A valid API project identifier. - // Required. - string project = 4; - - // Set of properties to return. Defaults to NO_ACL. - CommonEnums.Projection projection = 5; - - // The project to be billed for this request. - string user_project = 6; - - // A set of parameters common to all Storage API requests. - CommonRequestParams common_request_params = 7; -} - -// Request message for LockRetentionPolicy. -message LockRetentionPolicyRequest { - // Name of a bucket. - // Required. - string bucket = 1; - - // Makes the operation conditional on whether bucket's current metageneration - // matches the given value. Must be positive. - int64 if_metageneration_match = 2; - - // The project to be billed for this request. - // Required for Requester Pays buckets. - string user_project = 3; - - // A set of parameters common to all Storage API requests. - CommonRequestParams common_request_params = 4; -} - -// Request for PatchBucket method. -message PatchBucketRequest { - // Name of a bucket. - // Required. - string bucket = 1; - - // Makes the return of the bucket metadata conditional on whether the bucket's - // current metageneration matches the given value. - google.protobuf.Int64Value if_metageneration_match = 2; - - // Makes the return of the bucket metadata conditional on whether the bucket's - // current metageneration does not match the given value. - google.protobuf.Int64Value if_metageneration_not_match = 3; - - // Apply a predefined set of access controls to this bucket. - CommonEnums.PredefinedBucketAcl predefined_acl = 4; - - // Apply a predefined set of default object access controls to this bucket. - CommonEnums.PredefinedObjectAcl predefined_default_object_acl = 5; - - // Set of properties to return. Defaults to FULL. - CommonEnums.Projection projection = 6; - - // The project to be billed for this request. - // Required for Requester Pays buckets. - string user_project = 7; - - // The Bucket metadata for updating. - Bucket metadata = 8; - - // List of fields to be updated. - // - // To specify ALL fields, equivalent to the JSON API's "update" function, - // specify a single field with the value `*`. Note: not recommended. If a new - // field is introduced at a later time, an older client updating with the `*` - // may accidentally reset the new field's value. - // - // Not specifying any fields is an error. - // Not specifying a field while setting that field to a non-default value is - // an error. - google.protobuf.FieldMask update_mask = 9; - - // A set of parameters common to all Storage API requests. - CommonRequestParams common_request_params = 10; -} - -// Request for UpdateBucket method. -message UpdateBucketRequest { - // Name of a bucket. - // Required. - string bucket = 1; - - // Makes the return of the bucket metadata conditional on whether the bucket's - // current metageneration matches the given value. - google.protobuf.Int64Value if_metageneration_match = 2; - - // Makes the return of the bucket metadata conditional on whether the bucket's - // current metageneration does not match the given value. - google.protobuf.Int64Value if_metageneration_not_match = 3; - - // Apply a predefined set of access controls to this bucket. - CommonEnums.PredefinedBucketAcl predefined_acl = 4; - - // Apply a predefined set of default object access controls to this bucket. - CommonEnums.PredefinedObjectAcl predefined_default_object_acl = 5; - - // Set of properties to return. Defaults to FULL. - CommonEnums.Projection projection = 6; - - // The project to be billed for this request. - // Required for Requester Pays buckets. - string user_project = 7; - - // The Bucket metadata for updating. - Bucket metadata = 8; - - // A set of parameters common to all Storage API requests. - CommonRequestParams common_request_params = 9; -} - -// Request message for StopChannel. -message StopChannelRequest { - // The channel to be stopped. - Channel channel = 1; - - // A set of parameters common to all Storage API requests. - CommonRequestParams common_request_params = 2; -} - -// Request message for DeleteDefaultObjectAccessControl. -message DeleteDefaultObjectAccessControlRequest { - // Name of a bucket. - // Required. - string bucket = 1; - - // The entity holding the permission. Can be - // user-userId, - // user-emailAddress, - // group-groupId, - // group-emailAddress, allUsers, or - // allAuthenticatedUsers. - // Required. - string entity = 2; - - // The project to be billed for this request. - // Required for Requester Pays buckets. - string user_project = 3; - - // A set of parameters common to all Storage API requests. - CommonRequestParams common_request_params = 4; -} - -// Request message for GetDefaultObjectAccessControl. -message GetDefaultObjectAccessControlRequest { - // Name of a bucket. - // Required. - string bucket = 1; - - // The entity holding the permission. Can be - // user-userId, - // user-emailAddress, - // group-groupId, - // group-emailAddress, allUsers, or - // allAuthenticatedUsers. - // Required. - string entity = 2; - - // The project to be billed for this request. - // Required for Requester Pays buckets. - string user_project = 3; - - // A set of parameters common to all Storage API requests. - CommonRequestParams common_request_params = 4; -} - -// Request message for InsertDefaultObjectAccessControl. -message InsertDefaultObjectAccessControlRequest { - // Name of a bucket. - // Required. - string bucket = 1; - - // The project to be billed for this request. - // Required for Requester Pays buckets. - string user_project = 2; - - // Properties of the object access control being inserted. - ObjectAccessControl object_access_control = 3; - - // A set of parameters common to all Storage API requests. - CommonRequestParams common_request_params = 4; -} - -// Request message for ListDefaultObjectAccessControls. -message ListDefaultObjectAccessControlsRequest { - // Name of a bucket. - // Required. - string bucket = 1; - - // If present, only return default ACL listing if the bucket's current - // metageneration matches this value. - google.protobuf.Int64Value if_metageneration_match = 2; - - // If present, only return default ACL listing if the bucket's current - // metageneration does not match the given value. - google.protobuf.Int64Value if_metageneration_not_match = 3; - - // The project to be billed for this request. - // Required for Requester Pays buckets. - string user_project = 4; - - // A set of parameters common to all Storage API requests. - CommonRequestParams common_request_params = 5; -} - -// Request message for PatchDefaultObjectAccessControl. -message PatchDefaultObjectAccessControlRequest { - // Name of a bucket. - // Required. - string bucket = 1; - - // The entity holding the permission. Can be - // user-userId, - // user-emailAddress, - // group-groupId, - // group-emailAddress, allUsers, or - // allAuthenticatedUsers. - // Required. - string entity = 2; - - // The project to be billed for this request. - // Required for Requester Pays buckets. - string user_project = 3; - - // The ObjectAccessControl for updating. - ObjectAccessControl object_access_control = 4; - - // List of fields to be updated. - // - // To specify ALL fields, equivalent to the JSON API's "update" function, - // specify a single field with the value `*`. Note: not recommended. If a new - // field is introduced at a later time, an older client updating with the `*` - // may accidentally reset the new field's value. - // - // Not specifying any fields is an error. - // Not specifying a field while setting that field to a non-default value is - // an error. - google.protobuf.FieldMask update_mask = 5; - - // A set of parameters common to all Storage API requests. - CommonRequestParams common_request_params = 6; -} - -// Request message for UpdateDefaultObjectAccessControl. -message UpdateDefaultObjectAccessControlRequest { - // Name of a bucket. - // Required. - string bucket = 1; - - // The entity holding the permission. Can be - // user-userId, - // user-emailAddress, - // group-groupId, - // group-emailAddress, allUsers, or - // allAuthenticatedUsers. - // Required. - string entity = 2; - - // The project to be billed for this request. - // Required for Requester Pays buckets. - string user_project = 3; - - // The ObjectAccessControl for updating. - ObjectAccessControl object_access_control = 4; - - // A set of parameters common to all Storage API requests. - CommonRequestParams common_request_params = 5; -} - -// Request message for DeleteNotification. -message DeleteNotificationRequest { - // The parent bucket of the notification. - // Required. - string bucket = 1; - - // ID of the notification to delete. - // Required. - string notification = 2; - - // The project to be billed for this request. - // Required for Requester Pays buckets. - string user_project = 3; - - // A set of parameters common to all Storage API requests. - CommonRequestParams common_request_params = 4; -} - -// Request message for GetNotification. -message GetNotificationRequest { - // The parent bucket of the notification. - // Required. - string bucket = 1; - - // Notification ID. - // Required. - string notification = 2; - - // The project to be billed for this request. - // Required for Requester Pays buckets. - string user_project = 3; - - // A set of parameters common to all Storage API requests. - CommonRequestParams common_request_params = 4; -} - -// Request message for InsertNotification. -message InsertNotificationRequest { - // The parent bucket of the notification. - // Required. - string bucket = 1; - - // The project to be billed for this request. - // Required for Requester Pays buckets. - string user_project = 2; - - // Properties of the notification to be inserted. - Notification notification = 3; - - // A set of parameters common to all Storage API requests. - CommonRequestParams common_request_params = 4; -} - -// Request message for ListNotifications. -message ListNotificationsRequest { - // Name of a Google Cloud Storage bucket. - // Required. - string bucket = 1; - - // The project to be billed for this request. - // Required for Requester Pays buckets. - string user_project = 2; - - // A set of parameters common to all Storage API requests. - CommonRequestParams common_request_params = 3; -} - -// Request message for DeleteObjectAccessControl. -message DeleteObjectAccessControlRequest { - // Name of a bucket. - // Required. - string bucket = 1; - - // The entity holding the permission. Can be - // user-userId, - // user-emailAddress, - // group-groupId, - // group-emailAddress, allUsers, or - // allAuthenticatedUsers. - // Required. - string entity = 2; - - // Name of the object. - // Required. - string object = 3; - - // If present, selects a specific revision of this object (as opposed to the - // latest version, the default). - int64 generation = 4; - - // The project to be billed for this request. - // Required for Requester Pays buckets. - string user_project = 5; - - // A set of parameters common to all Storage API requests. - CommonRequestParams common_request_params = 6; -} - -// Request message for GetObjectAccessControl. -message GetObjectAccessControlRequest { - // Name of a bucket. - // Required. - string bucket = 1; - - // The entity holding the permission. Can be - // user-userId, - // user-emailAddress, - // group-groupId, - // group-emailAddress, allUsers, or - // allAuthenticatedUsers. - // Required. - string entity = 2; - - // Name of the object. - // Required. - string object = 3; - - // If present, selects a specific revision of this object (as opposed to the - // latest version, the default). - int64 generation = 4; - - // The project to be billed for this request. - // Required for Requester Pays buckets. - string user_project = 5; - - // A set of parameters common to all Storage API requests. - CommonRequestParams common_request_params = 6; -} - -// Request message for InsertObjectAccessControl. -message InsertObjectAccessControlRequest { - // Name of a bucket. - // Required. - string bucket = 1; - - // Name of the object. - // Required. - string object = 2; - - // If present, selects a specific revision of this object (as opposed to the - // latest version, the default). - int64 generation = 3; - - // The project to be billed for this request. - // Required for Requester Pays buckets. - string user_project = 4; - - // Properties of the object access control to be inserted. - ObjectAccessControl object_access_control = 5; - - // A set of parameters common to all Storage API requests. - CommonRequestParams common_request_params = 6; -} - -// Request message for ListObjectAccessControls. -message ListObjectAccessControlsRequest { - // Name of a bucket. - // Required. - string bucket = 1; - - // Name of the object. - // Required. - string object = 2; - - // If present, selects a specific revision of this object (as opposed to the - // latest version, the default). - int64 generation = 3; - - // The project to be billed for this request. - // Required for Requester Pays buckets. - string user_project = 4; - - // A set of parameters common to all Storage API requests. - CommonRequestParams common_request_params = 5; -} - -// Request message for UpdateObjetAccessControl. -message UpdateObjectAccessControlRequest { - // Name of a bucket. - // Required. - string bucket = 1; - - // The entity holding the permission. Can be - // user-userId, - // user-emailAddress, - // group-groupId, - // group-emailAddress, allUsers, or - // allAuthenticatedUsers. - // Required. - string entity = 2; - - // Name of the object. - // Required. - string object = 3; - - // If present, selects a specific revision of this object (as opposed to the - // latest version, the default). - int64 generation = 4; - - // The project to be billed for this request. - // Required for Requester Pays buckets. - string user_project = 5; - - // The ObjectAccessControl for updating. - ObjectAccessControl object_access_control = 6; - - // A set of parameters common to all Storage API requests. - CommonRequestParams common_request_params = 7; - - // List of fields to be updated. - // - // To specify ALL fields, equivalent to the JSON API's "update" function, - // specify a single field with the value `*`. Note: not recommended. If a new - // field is introduced at a later time, an older client updating with the `*` - // may accidentally reset the new field's value. - // - // Not specifying any fields is an error. - // Not specifying a field while setting that field to a non-default value is - // an error. - google.protobuf.FieldMask update_mask = 8; -} - -// Request message for ComposeObject. -message ComposeObjectRequest { - // Description of a source object for a composition request. - message SourceObjects { - // Preconditions for a source object of a composition request. - message ObjectPreconditions { - // Only perform the composition if the generation of the source object - // that would be used matches this value. If this value and a generation - // are both specified, they must be the same value or the call will fail. - google.protobuf.Int64Value if_generation_match = 1; - } - - // The source object's name. All source objects must reside in the same - // bucket. - string name = 1; - - // The generation of this object to use as the source. - int64 generation = 2; - - // Conditions that must be met for this operation to execute. - ObjectPreconditions object_preconditions = 3; - } - - // Name of the bucket containing the source objects. The destination object is - // stored in this bucket. - // Required. - string destination_bucket = 1; - - // Name of the new object. - // Required. - string destination_object = 2; - - // Apply a predefined set of access controls to the destination object. - CommonEnums.PredefinedObjectAcl destination_predefined_acl = 3; - - // Properties of the resulting object. - Object destination = 11; - - // The list of source objects that will be concatenated into a single object. - repeated SourceObjects source_objects = 12; - - // Makes the operation conditional on whether the object's current generation - // matches the given value. Setting to 0 makes the operation succeed only if - // there are no live versions of the object. - google.protobuf.Int64Value if_generation_match = 4; - - // Makes the operation conditional on whether the object's current - // metageneration matches the given value. - google.protobuf.Int64Value if_metageneration_match = 5; - - // Resource name of the Cloud KMS key, of the form - // projects/my-project/locations/my-location/keyRings/my-kr/cryptoKeys/my-key, - // that will be used to encrypt the object. Overrides the object - // metadata's kms_key_name value, if any. - string kms_key_name = 6; - - // The project to be billed for this request. - // Required for Requester Pays buckets. - string user_project = 7; - - // A set of parameters common to Storage API requests concerning an object. - CommonObjectRequestParams common_object_request_params = 9; - - // A set of parameters common to all Storage API requests. - CommonRequestParams common_request_params = 10; -} - -// Request message for CopyObject. -message CopyObjectRequest { - // Name of the bucket in which to store the new object. Overrides the provided - // object - // metadata's bucket value, if any. - // Required. - string destination_bucket = 1; - - // Name of the new object. - // Required when the object metadata is not otherwise provided. Overrides the - // object metadata's name value, if any. - // Required. - string destination_object = 2; - - // Apply a predefined set of access controls to the destination object. - CommonEnums.PredefinedObjectAcl destination_predefined_acl = 3; - - // Makes the operation conditional on whether the destination object's current - // generation matches the given value. Setting to 0 makes the operation - // succeed only if there are no live versions of the object. - google.protobuf.Int64Value if_generation_match = 4; - - // Makes the operation conditional on whether the destination object's current - // generation does not match the given value. If no live object exists, the - // precondition fails. Setting to 0 makes the operation succeed only if there - // is a live version of the object. - google.protobuf.Int64Value if_generation_not_match = 5; - - // Makes the operation conditional on whether the destination object's current - // metageneration matches the given value. - google.protobuf.Int64Value if_metageneration_match = 6; - - // Makes the operation conditional on whether the destination object's current - // metageneration does not match the given value. - google.protobuf.Int64Value if_metageneration_not_match = 7; - - // Makes the operation conditional on whether the source object's current - // generation matches the given value. - google.protobuf.Int64Value if_source_generation_match = 8; - - // Makes the operation conditional on whether the source object's current - // generation does not match the given value. - google.protobuf.Int64Value if_source_generation_not_match = 9; - - // Makes the operation conditional on whether the source object's current - // metageneration matches the given value. - google.protobuf.Int64Value if_source_metageneration_match = 10; - - // Makes the operation conditional on whether the source object's current - // metageneration does not match the given value. - google.protobuf.Int64Value if_source_metageneration_not_match = 11; - - // Set of properties to return. Defaults to NO_ACL, unless the - // object resource specifies the acl property, when it defaults - // to full. - CommonEnums.Projection projection = 12; - - // Name of the bucket in which to find the source object. - // Required. - string source_bucket = 13; - - // Name of the source object. - // Required. - string source_object = 14; - - // If present, selects a specific revision of the source object (as opposed to - // the latest version, the default). - int64 source_generation = 15; - - // The project to be billed for this request. - // Required for Requester Pays buckets. - string user_project = 16; - - // Properties of the resulting object. If not set, duplicate properties of - // source object. - Object destination = 17; - - // Resource name of the Cloud KMS key, of the form - // projects/my-project/locations/my-location/keyRings/my-kr/cryptoKeys/my-key, - // that will be used to encrypt the object. Overrides the object - // metadata's kms_key_name value, if any. - string destination_kms_key_name = 20; - - // A set of parameters common to Storage API requests concerning an object. - CommonObjectRequestParams common_object_request_params = 18; - - // A set of parameters common to all Storage API requests. - CommonRequestParams common_request_params = 19; -} - -// Message for deleting an object. -// Either `bucket` and `object` *or* `upload_id` **must** be set (but not both). -message DeleteObjectRequest { - // Name of the bucket in which the object resides. - // Required. - string bucket = 1; - - // The name of the object to delete (when not using a resumable write). - // Required. - string object = 2; - - // The resumable upload_id of the object to delete (when using a - // resumable write). This should be copied from the `upload_id` field of - // `StartResumableWriteResponse`. - string upload_id = 3; - - // If present, permanently deletes a specific revision of this object (as - // opposed to the latest version, the default). - int64 generation = 4; - - // Makes the operation conditional on whether the object's current generation - // matches the given value. Setting to 0 makes the operation succeed only if - // there are no live versions of the object. - google.protobuf.Int64Value if_generation_match = 5; - - // Makes the operation conditional on whether the object's current generation - // does not match the given value. If no live object exists, the precondition - // fails. Setting to 0 makes the operation succeed only if there is a live - // version of the object. - google.protobuf.Int64Value if_generation_not_match = 6; - - // Makes the operation conditional on whether the object's current - // metageneration matches the given value. - google.protobuf.Int64Value if_metageneration_match = 7; - - // Makes the operation conditional on whether the object's current - // metageneration does not match the given value. - google.protobuf.Int64Value if_metageneration_not_match = 8; - - // The project to be billed for this request. - // Required for Requester Pays buckets. - string user_project = 9; - - // A set of parameters common to Storage API requests concerning an object. - CommonObjectRequestParams common_object_request_params = 10; - - // A set of parameters common to all Storage API requests. - CommonRequestParams common_request_params = 11; -} - -// Request message for GetObjectMedia. -message GetObjectMediaRequest { - // The name of the bucket containing the object to read. - string bucket = 1; - - // The name of the object to read. - string object = 2; - - // If present, selects a specific revision of this object (as opposed - // to the latest version, the default). - int64 generation = 3; - - // The offset for the first byte to return in the read, relative to the start - // of the object. - // - // A `read_offset` that is negative or greater than the size of the object - // will cause an `OUT_OF_RANGE` error. - int64 read_offset = 4; - - // The maximum number of `data` bytes the server is allowed to return in the - // sum of all `Object` messages. A `read_limit` of zero indicates that there - // is no limit, and a negative `read_limit` will cause an error. - // - // If the stream returns fewer bytes than allowed by the `read_limit` and no - // error occurred, the stream includes all data from the `read_offset` to the - // end of the resource. - int64 read_limit = 5; - - // Makes the operation conditional on whether the object's current generation - // matches the given value. Setting to 0 makes the operation succeed only if - // there are no live versions of the object. - google.protobuf.Int64Value if_generation_match = 6; - - // Makes the operation conditional on whether the object's current generation - // does not match the given value. If no live object exists, the precondition - // fails. Setting to 0 makes the operation succeed only if there is a live - // version of the object. - google.protobuf.Int64Value if_generation_not_match = 7; - - // Makes the operation conditional on whether the object's current - // metageneration matches the given value. - google.protobuf.Int64Value if_metageneration_match = 8; - - // Makes the operation conditional on whether the object's current - // metageneration does not match the given value. - google.protobuf.Int64Value if_metageneration_not_match = 9; - - // The project to be billed for this request. - // Required for Requester Pays buckets. - string user_project = 10; - - // A set of parameters common to Storage API requests concerning an object. - CommonObjectRequestParams common_object_request_params = 11; - - // A set of parameters common to all Storage API requests. - CommonRequestParams common_request_params = 12; -} - -// Request message for GetObject. -message GetObjectRequest { - // Name of the bucket in which the object resides. - // Required. - string bucket = 1; - - // Name of the object. - // Required. - string object = 2; - - // If present, selects a specific revision of this object (as opposed to the - // latest version, the default). - int64 generation = 3; - - // Makes the operation conditional on whether the object's current generation - // matches the given value. Setting to 0 makes the operation succeed only if - // there are no live versions of the object. - google.protobuf.Int64Value if_generation_match = 4; - - // Makes the operation conditional on whether the object's current generation - // does not match the given value. If no live object exists, the precondition - // fails. Setting to 0 makes the operation succeed only if there is a live - // version of the object. - google.protobuf.Int64Value if_generation_not_match = 5; - - // Makes the operation conditional on whether the object's current - // metageneration matches the given value. - google.protobuf.Int64Value if_metageneration_match = 6; - - // Makes the operation conditional on whether the object's current - // metageneration does not match the given value. - google.protobuf.Int64Value if_metageneration_not_match = 7; - - // Set of properties to return. Defaults to NO_ACL. - CommonEnums.Projection projection = 8; - - // The project to be billed for this request. - // Required for Requester Pays buckets. - string user_project = 9; - - // A set of parameters common to Storage API requests concerning an object. - CommonObjectRequestParams common_object_request_params = 10; - - // A set of parameters common to all Storage API requests. - CommonRequestParams common_request_params = 11; -} - -// Response message for GetObject. -message GetObjectMediaResponse { - // A portion of the data for the object. The service **may** leave `data` - // empty for any given `ReadResponse`. This enables the service to inform the - // client that the request is still live while it is running an operation to - // generate more data. - ChecksummedData checksummed_data = 1; - - // The checksums of the complete object. The client should compute one of - // these checksums over the downloaded object and compare it against the value - // provided here. - ObjectChecksums object_checksums = 2; - - // If read_offset and or read_limit was specified on the - // GetObjectMediaRequest, ContentRange will be populated on the first - // GetObjectMediaResponse message of the read stream. - ContentRange content_range = 3; -} - -// Describes an attempt to insert an object, possibly over multiple requests. -message InsertObjectSpec { - // Destination object, including its name and its metadata. - Object resource = 1; - - // Apply a predefined set of access controls to this object. - CommonEnums.PredefinedObjectAcl predefined_acl = 2; - - // Makes the operation conditional on whether the object's current - // generation matches the given value. Setting to 0 makes the operation - // succeed only if there are no live versions of the object. - google.protobuf.Int64Value if_generation_match = 3; - - // Makes the operation conditional on whether the object's current - // generation does not match the given value. If no live object exists, the - // precondition fails. Setting to 0 makes the operation succeed only if - // there is a live version of the object. - google.protobuf.Int64Value if_generation_not_match = 4; - - // Makes the operation conditional on whether the object's current - // metageneration matches the given value. - google.protobuf.Int64Value if_metageneration_match = 5; - - // Makes the operation conditional on whether the object's current - // metageneration does not match the given value. - google.protobuf.Int64Value if_metageneration_not_match = 6; - - // Set of properties to return. Defaults to NO_ACL, unless the - // object resource specifies the acl property, when it defaults - // to full. - CommonEnums.Projection projection = 7; - - // The project to be billed for this request. - // Required for Requester Pays buckets. - string user_project = 8; -} - -// Message for writing an object. -message InsertObjectRequest { - // The first message of each stream should set one of the following. - oneof first_message { - // For resumable uploads. This should be the `upload_id` returned from a - // call to `StartResumableWriteResponse`. - string upload_id = 1; - - // For non-resumable uploads. Describes the overall upload, including the - // destination bucket and object name, preconditions, etc. - InsertObjectSpec insert_object_spec = 2; - } - - // The offset from the beginning of the object at which the data should be - // written. - // Required. - // - // In the first `InsertObjectRequest` of a `InsertObject()` action, it - // indicates the initial offset for the `Insert()` call. The value **must** be - // equal to the `committed_size` that a call to `QueryWriteStatus()` would - // return (0 if this is the first write to the object). - // - // On subsequent calls, this value **must** be no larger than the sum of the - // first `write_offset` and the sizes of all `data` chunks sent previously on - // this stream. - // - // An incorrect value will cause an error. - int64 write_offset = 3; - - // A portion of the data for the object. - oneof data { - // The data to insert. If a crc32c checksum is provided that doesn't match - // the checksum computed by the service, the request will fail. - ChecksummedData checksummed_data = 4; - - // A reference to an existing object. This can be used to support - // several use cases: - // - Writing a sequence of data buffers supports the basic use case of - // uploading a complete object, chunk by chunk. - // - Writing a sequence of references to existing objects allows an - // object to be composed from a collection of objects, which can be - // used to support parallel object writes. - // - Writing a single reference with a given offset and size can be used - // to create an object from a slice of an existing object. - // - Writing an object referencing a object slice (created as noted - // above) followed by a data buffer followed by another object - // slice can be used to support delta upload functionality. - GetObjectMediaRequest reference = 5; - } - - // Checksums for the complete object. If the checksums computed by the service - // don't match the specifified checksums the call will fail. May only be - // provided in the first or last request (either with first_message, or - // finish_write set). - ObjectChecksums object_checksums = 6; - - // If `true`, this indicates that the write is complete. Sending any - // `InsertObjectRequest`s subsequent to one in which `finish_write` is `true` - // will cause an error. - // For a non-resumable write (where the upload_id was not set in the first - // message), it is an error not to set this field in the final message of the - // stream. - bool finish_write = 7; - - // A set of parameters common to Storage API requests concerning an object. - CommonObjectRequestParams common_object_request_params = 8; - - // A set of parameters common to all Storage API requests. - CommonRequestParams common_request_params = 9; -} - -// Request message for ListObjects. -message ListObjectsRequest { - // Name of the bucket in which to look for objects. - // Required. - string bucket = 1; - - // Returns results in a directory-like mode. items will contain - // only objects whose names, aside from the prefix, do not - // contain delimiter. Objects whose names, aside from the - // prefix, contain delimiter will have their name, - // truncated after the delimiter, returned in - // prefixes. Duplicate prefixes are omitted. - string delimiter = 2; - - // If true, objects that end in exactly one instance of delimiter - // will have their metadata included in items in addition to - // prefixes. - bool include_trailing_delimiter = 3; - - // Maximum number of items plus prefixes to return - // in a single page of responses. As duplicate prefixes are - // omitted, fewer total results may be returned than requested. The service - // will use this parameter or 1,000 items, whichever is smaller. - int32 max_results = 4; - - // A previously-returned page token representing part of the larger set of - // results to view. - string page_token = 5; - - // Filter results to objects whose names begin with this prefix. - string prefix = 6; - - // Set of properties to return. Defaults to NO_ACL. - CommonEnums.Projection projection = 7; - - // The project to be billed for this request. - // Required for Requester Pays buckets. - string user_project = 8; - - // If true, lists all versions of an object as distinct results. - // The default is false. For more information, see Object Versioning. - bool versions = 9; - - // A set of parameters common to all Storage API requests. - CommonRequestParams common_request_params = 10; -} - -// Request object for `ByteStream.QueryWriteStatus`. -message QueryWriteStatusRequest { - // The name of the resume token for the object whose write status is being - // requested. - // Required. - string upload_id = 1; - - // A set of parameters common to Storage API requests concerning an object. - CommonObjectRequestParams common_object_request_params = 2; - - // A set of parameters common to all Storage API requests. - CommonRequestParams common_request_params = 3; -} - -// Response object for `ByteStream.QueryWriteStatus`. -message QueryWriteStatusResponse { - // The number of bytes that have been processed for the given object. - int64 committed_size = 1; - - // `complete` is `true` only if the client has sent a `InsertObjectRequest` - // with `finish_write` set to true, and the server has processed that request. - bool complete = 2; -} - -// Request message for RewriteObject. -message RewriteObjectRequest { - // Name of the bucket in which to store the new object. Overrides the provided - // object metadata's bucket value, if any. - // Required. - string destination_bucket = 1; - - // Name of the new object. - // Required when the object metadata is not otherwise provided. Overrides the - // object metadata's name value, if any. - // Required. - string destination_object = 2; - - // Resource name of the Cloud KMS key, of the form - // projects/my-project/locations/my-location/keyRings/my-kr/cryptoKeys/my-key, - // that will be used to encrypt the object. Overrides the object - // metadata's kms_key_name value, if any. - string destination_kms_key_name = 3; - - // Apply a predefined set of access controls to the destination object. - CommonEnums.PredefinedObjectAcl destination_predefined_acl = 4; - - // Makes the operation conditional on whether the object's current generation - // matches the given value. Setting to 0 makes the operation succeed only if - // there are no live versions of the object. - google.protobuf.Int64Value if_generation_match = 5; - - // Makes the operation conditional on whether the object's current generation - // does not match the given value. If no live object exists, the precondition - // fails. Setting to 0 makes the operation succeed only if there is a live - // version of the object. - google.protobuf.Int64Value if_generation_not_match = 6; - - // Makes the operation conditional on whether the destination object's current - // metageneration matches the given value. - google.protobuf.Int64Value if_metageneration_match = 7; - - // Makes the operation conditional on whether the destination object's current - // metageneration does not match the given value. - google.protobuf.Int64Value if_metageneration_not_match = 8; - - // Makes the operation conditional on whether the source object's current - // generation matches the given value. - google.protobuf.Int64Value if_source_generation_match = 9; - - // Makes the operation conditional on whether the source object's current - // generation does not match the given value. - google.protobuf.Int64Value if_source_generation_not_match = 10; - - // Makes the operation conditional on whether the source object's current - // metageneration matches the given value. - google.protobuf.Int64Value if_source_metageneration_match = 11; - - // Makes the operation conditional on whether the source object's current - // metageneration does not match the given value. - google.protobuf.Int64Value if_source_metageneration_not_match = 12; - - // The maximum number of bytes that will be rewritten per rewrite request. - // Most callers - // shouldn't need to specify this parameter - it is primarily in place to - // support testing. If specified the value must be an integral multiple of - // 1 MiB (1048576). Also, this only applies to requests where the source and - // destination span locations and/or storage classes. Finally, this value must - // not change across rewrite calls else you'll get an error that the - // rewriteToken is invalid. - int64 max_bytes_rewritten_per_call = 13; - - // Set of properties to return. Defaults to NO_ACL, unless the - // object resource specifies the acl property, when it defaults - // to full. - CommonEnums.Projection projection = 14; - - // Include this field (from the previous rewrite response) on each rewrite - // request after the first one, until the rewrite response 'done' flag is - // true. Calls that provide a rewriteToken can omit all other request fields, - // but if included those fields must match the values provided in the first - // rewrite request. - string rewrite_token = 15; - - // Name of the bucket in which to find the source object. - // Required. - string source_bucket = 16; - - // Name of the source object. - // Required. - string source_object = 17; - - // If present, selects a specific revision of the source object (as opposed to - // the latest version, the default). - int64 source_generation = 18; - - // The project to be billed for this request. - // Required for Requester Pays buckets. - string user_project = 19; - - // Properties of the destination, post-rewrite object. - Object object = 20; - - // The algorithm used to encrypt the source object, if any. - string copy_source_encryption_algorithm = 21; - - // The encryption key used to encrypt the source object, if any. - string copy_source_encryption_key = 22; - - // The SHA-256 hash of the key used to encrypt the source object, if any. - string copy_source_encryption_key_sha256 = 23; - - // A set of parameters common to Storage API requests concerning an object. - CommonObjectRequestParams common_object_request_params = 24; - - // A set of parameters common to all Storage API requests. - CommonRequestParams common_request_params = 25; -} - -// A rewrite response. -message RewriteResponse { - // The total bytes written so far, which can be used to provide a waiting user - // with a progress indicator. This property is always present in the response. - int64 total_bytes_rewritten = 1; - - // The total size of the object being copied in bytes. This property is always - // present in the response. - int64 object_size = 2; - - // true if the copy is finished; otherwise, false if - // the copy is in progress. This property is always present in the response. - bool done = 3; - - // A token to use in subsequent requests to continue copying data. This token - // is present in the response only when there is more data to copy. - string rewrite_token = 4; - - // A resource containing the metadata for the copied-to object. This property - // is present in the response only when copying completes. - Object resource = 5; -} - -// Request message StartResumableWrite. -message StartResumableWriteRequest { - // The destination bucket, object, and metadata, as well as any preconditions. - InsertObjectSpec insert_object_spec = 1; - - // A set of parameters common to Storage API requests concerning an object. - CommonObjectRequestParams common_object_request_params = 3; - - // A set of parameters common to all Storage API requests. - CommonRequestParams common_request_params = 4; -} - -// Response object for ByteStream.StartResumableWrite. -message StartResumableWriteResponse { - // The upload_id of the newly started resumable write operation. This - // value should be copied into the `InsertObjectRequest.upload_id` field. - string upload_id = 1; -} - -// Request message for PatchObject. -message PatchObjectRequest { - // Name of the bucket in which the object resides. - // Required. - string bucket = 1; - - // Name of the object. - // Required. - string object = 2; - - // If present, selects a specific revision of this object (as opposed to the - // latest version, the default). - int64 generation = 3; - - // Makes the operation conditional on whether the object's current generation - // matches the given value. Setting to 0 makes the operation succeed only if - // there are no live versions of the object. - google.protobuf.Int64Value if_generation_match = 4; - - // Makes the operation conditional on whether the object's current generation - // does not match the given value. If no live object exists, the precondition - // fails. Setting to 0 makes the operation succeed only if there is a live - // version of the object. - google.protobuf.Int64Value if_generation_not_match = 5; - - // Makes the operation conditional on whether the object's current - // metageneration matches the given value. - google.protobuf.Int64Value if_metageneration_match = 6; - - // Makes the operation conditional on whether the object's current - // metageneration does not match the given value. - google.protobuf.Int64Value if_metageneration_not_match = 7; - - // Apply a predefined set of access controls to this object. - CommonEnums.PredefinedObjectAcl predefined_acl = 8; - - // Set of properties to return. Defaults to FULL. - CommonEnums.Projection projection = 9; - - // The project to be billed for this request. - // Required for Requester Pays buckets. - string user_project = 10; - - // The Object metadata for updating. - Object metadata = 11; - - // List of fields to be updated. - // - // To specify ALL fields, equivalent to the JSON API's "update" function, - // specify a single field with the value `*`. Note: not recommended. If a new - // field is introduced at a later time, an older client updating with the `*` - // may accidentally reset the new field's value. - // - // Not specifying any fields is an error. - // Not specifying a field while setting that field to a non-default value is - // an error. - google.protobuf.FieldMask update_mask = 12; - - // A set of parameters common to Storage API requests concerning an object. - CommonObjectRequestParams common_object_request_params = 13; - - // A set of parameters common to all Storage API requests. - CommonRequestParams common_request_params = 14; -} - -// Request message for UpdateObject. -message UpdateObjectRequest { - // Name of the bucket in which the object resides. - // Required. - string bucket = 1; - - // Name of the object. - // Required. - string object = 2; - - // If present, selects a specific revision of this object (as opposed to the - // latest version, the default). - int64 generation = 3; - - // Makes the operation conditional on whether the object's current generation - // matches the given value. Setting to 0 makes the operation succeed only if - // there are no live versions of the object. - google.protobuf.Int64Value if_generation_match = 4; - - // Makes the operation conditional on whether the object's current generation - // does not match the given value. If no live object exists, the precondition - // fails. Setting to 0 makes the operation succeed only if there is a live - // version of the object. - google.protobuf.Int64Value if_generation_not_match = 5; - - // Makes the operation conditional on whether the object's current - // metageneration matches the given value. - google.protobuf.Int64Value if_metageneration_match = 6; - - // Makes the operation conditional on whether the object's current - // metageneration does not match the given value. - google.protobuf.Int64Value if_metageneration_not_match = 7; - - // Apply a predefined set of access controls to this object. - CommonEnums.PredefinedObjectAcl predefined_acl = 8; - - // Set of properties to return. Defaults to FULL. - CommonEnums.Projection projection = 9; - - // The project to be billed for this request. - // Required for Requester Pays buckets. - string user_project = 10; - - // The Object metadata for updating. - Object metadata = 11; - - // A set of parameters common to Storage API requests concerning an object. - CommonObjectRequestParams common_object_request_params = 12; - - // A set of parameters common to all Storage API requests. - CommonRequestParams common_request_params = 13; -} - -// Request message for WatchAllObjects. -message WatchAllObjectsRequest { - // Name of the bucket in which to look for objects. - string bucket = 1; - - // If true, lists all versions of an object as distinct results. - // The default is false. For more information, see Object Versioning. - bool versions = 2; - - // Returns results in a directory-like mode. items will contain - // only objects whose names, aside from the prefix, do not - // contain delimiter. Objects whose names, aside from the - // prefix, contain delimiter will have their name, - // truncated after the delimiter, returned in - // prefixes. Duplicate prefixes are omitted. - string delimiter = 3; - - // Maximum number of items plus prefixes to return - // in a single page of responses. As duplicate prefixes are - // omitted, fewer total results may be returned than requested. The service - // will use this parameter or 1,000 items, whichever is smaller. - int32 max_results = 4; - - // Filter results to objects whose names begin with this prefix. - string prefix = 5; - - // If true, objects that end in exactly one instance of delimiter - // will have their metadata included in items in addition to - // prefixes. - bool include_trailing_delimiter = 6; - - // A previously-returned page token representing part of the larger set of - // results to view. - string page_token = 7; - - // Set of properties to return. Defaults to NO_ACL. - CommonEnums.Projection projection = 8; - - // The project to be billed for this request. - // Required for Requester Pays buckets. - string user_project = 9; - - // Properties of the channel to be inserted. - Channel channel = 10; - - // A set of parameters common to all Storage API requests. - CommonRequestParams common_request_params = 11; -} - -// Request message for GetProjectServiceAccount. -message GetProjectServiceAccountRequest { - // Project ID. - // Required. - string project_id = 1; - - // The project to be billed for this request. - string user_project = 2; - - // A set of parameters common to all Storage API requests. - CommonRequestParams common_request_params = 3; -} - -// Parameters that can be passed to any object request. -message CommonObjectRequestParams { - // Encryption algorithm used with Customer-Supplied Encryption Keys feature. - string encryption_algorithm = 1; - - // Encryption key used with Customer-Supplied Encryption Keys feature. - string encryption_key = 2; - - // SHA256 hash of encryption key used with Customer-Supplied Encryption Keys - // feature. - string encryption_key_sha256 = 3; -} - -// Parameters that can be passed to any request. -message CommonRequestParams { - // Required when using buckets with Requestor Pays feature enabled. - string user_project = 1; - - // Lets you enforce per-user quotas from a server-side application even in - // cases when the user's IP address is unknown. This can occur, for example, - // with applications that run cron jobs on App Engine on a user's behalf. - // You can choose any arbitrary string that uniquely identifies a user, but it - // is limited to 40 characters. - // Overrides user_ip if both are provided. - string quota_user = 2; - - // IP address of the end user for whom the API call is being made. - // Lets you enforce per-user quotas when calling the API from a server-side - // application. - string user_ip = 3; - - // Subset of fields to include in the response. - google.protobuf.FieldMask fields = 4; -} - -// Shared constants. -message ServiceConstants { - // A collection of constant values meaningful to the Storage API. - enum Values { - option allow_alias = true; - - // Unused. Proto3 requires first enum to be 0. - SIZE_UNSPECIFIED = 0; - - // The maximum size chunk that can will be returned in a single - // ReadRequest. - // 2 MiB. - MAX_READ_CHUNK_BYTES = 2097152; - - // The maximum size chunk that can be sent in a single InsertObjectRequest. - // 2 MiB. - MAX_WRITE_CHUNK_BYTES = 2097152; - - // The maximum size of an object in MB - whether written in a single stream - // or composed from multiple other objects. - // 5 TiB. - MAX_OBJECT_SIZE_MB = 5242880; - - // The maximum length field name that can be sent in a single - // custom metadata field. - // 1 KiB. - MAX_CUSTOM_METADATA_FIELD_NAME_BYTES = 1024; - - // The maximum length field value that can be sent in a single - // custom_metadata field. - // 4 KiB. - MAX_CUSTOM_METADATA_FIELD_VALUE_BYTES = 4096; - - // The maximum total bytes that can be populated into all field names and - // values of the custom_metadata for one object. - // 8 KiB. - MAX_CUSTOM_METADATA_TOTAL_SIZE_BYTES = 8192; - - // The maximum total bytes that can be populated into all bucket metadata - // fields. - // 20 KiB. - MAX_BUCKET_METADATA_TOTAL_SIZE_BYTES = 20480; - - // The maximum number of NotificationConfigurations that can be registered - // for a given bucket. - MAX_NOTIFICATION_CONFIGS_PER_BUCKET = 100; - - // The maximum number of LifecycleRules that can be registered for a given - // bucket. - MAX_LIFECYCLE_RULES_PER_BUCKET = 100; - - // The maximum number of custom attributes per NotificationConfig. - MAX_NOTIFICATION_CUSTOM_ATTRIBUTES = 5; - - // The maximum length of a custom attribute key included in - // NotificationConfig. - MAX_NOTIFICATION_CUSTOM_ATTRIBUTE_KEY_LENGTH = 256; - - // The maximum length of a custom attribute value included in a - // NotificationConfig. - MAX_NOTIFICATION_CUSTOM_ATTRIBUTE_VALUE_LENGTH = 1024; - - // The maximum number of key/value entries per bucket label. - MAX_LABELS_ENTRIES_COUNT = 64; - - // The maximum character length of the key or value in a bucket - // label map. - MAX_LABELS_KEY_VALUE_LENGTH = 63; - - // The maximum byte size of the key or value in a bucket label - // map. - MAX_LABELS_KEY_VALUE_BYTES = 128; - - // The maximum number of object IDs that can be included in a - // DeleteObjectsRequest. - MAX_OBJECT_IDS_PER_DELETE_OBJECTS_REQUEST = 1000; - - // The maximum number of days for which a token returned by the - // GetListObjectsSplitPoints RPC is valid. - SPLIT_TOKEN_MAX_VALID_DAYS = 14; - } - - -} diff --git a/end2end-test-examples/gcs/src/main/proto/google/storage/v1/storage_resources.proto b/end2end-test-examples/gcs/src/main/proto/google/storage/v1/storage_resources.proto deleted file mode 100644 index 77a01785..00000000 --- a/end2end-test-examples/gcs/src/main/proto/google/storage/v1/storage_resources.proto +++ /dev/null @@ -1,793 +0,0 @@ -// Copyright 2019 Google LLC. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -syntax = "proto3"; - -package google.storage.v1; - -import "google/protobuf/timestamp.proto"; -import "google/protobuf/wrappers.proto"; - -option go_package = "google.golang.org/genproto/googleapis/storage/v1;storage"; -option java_multiple_files = true; -option java_outer_classname = "CloudStorageResourcesProto"; -option java_package = "com.google.google.storage.v1"; - -// A bucket. -message Bucket { - // Billing properties of a bucket. - message Billing { - // When set to true, Requester Pays is enabled for this bucket. - bool requester_pays = 1; - } - - // Cross-Origin Response sharing (CORS) properties for a bucket. - // For more on GCS and CORS, see - // https://cloud.google.com/storage/docs/cross-origin. - // For more on CORS in general, see https://tools.ietf.org/html/rfc6454. - message Cors { - // The list of Origins eligible to receive CORS response headers. See - // [https://tools.ietf.org/html/rfc6454][RFC 6454] for more on origins. - // Note: "*" is permitted in the list of origins, and means "any Origin". - repeated string origin = 1; - - // The list of HTTP methods on which to include CORS response headers, - // (`GET`, `OPTIONS`, `POST`, etc) Note: "*" is permitted in the list of - // methods, and means "any method". - repeated string method = 2; - - // The list of HTTP headers other than the - // [https://www.w3.org/TR/cors/#simple-response-header][simple response - // headers] to give permission for the user-agent to share across domains. - repeated string response_header = 3; - - // The value, in seconds, to return in the - // [https://www.w3.org/TR/cors/#access-control-max-age-response-header][Access-Control-Max-Age - // header] used in preflight responses. - int32 max_age_seconds = 4; - } - - // Encryption properties of a bucket. - message Encryption { - // A Cloud KMS key that will be used to encrypt objects inserted into this - // bucket, if no encryption method is specified. - string default_kms_key_name = 1; - } - - // Lifecycle properties of a bucket. - // For more information, see https://cloud.google.com/storage/docs/lifecycle. - message Lifecycle { - // A lifecycle Rule, combining an action to take on an object and a - // condition which will trigger that action. - message Rule { - // An action to take on an object. - message Action { - // Type of the action. Currently, only `Delete` and - // `SetStorageClass` are supported. - string type = 1; - - // Target storage class. Required iff the type of the action is - // SetStorageClass. - string storage_class = 2; - } - - // A condition of an object which triggers some action. - message Condition { - // Age of an object (in days). This condition is satisfied when an - // object reaches the specified age. - int32 age = 1; - - // A date in [RFC 3339][1] format with only the date part (for - // instance, "2013-01-15"). This condition is satisfied when an - // object is created before midnight of the specified date in UTC. - // [1]: https://tools.ietf.org/html/rfc3339 - google.protobuf.Timestamp created_before = 2; - - // Relevant only for versioned objects. If the value is - // `true`, this condition matches live objects; if the value - // is `false`, it matches archived objects. - google.protobuf.BoolValue is_live = 3; - - // Relevant only for versioned objects. If the value is N, this - // condition is satisfied when there are at least N versions (including - // the live version) newer than this version of the object. - int32 num_newer_versions = 4; - - // Objects having any of the storage classes specified by this condition - // will be matched. Values include `MULTI_REGIONAL`, `REGIONAL`, - // `NEARLINE`, `COLDLINE`, `STANDARD`, and - // `DURABLE_REDUCED_AVAILABILITY`. - repeated string matches_storage_class = 5; - - // A regular expression that satisfies the RE2 syntax. This condition is - // satisfied when the name of the object matches the RE2 pattern. Note: - // This feature is currently in the "Early Access" launch stage and is - // only available to a whitelisted set of users; that means that this - // feature may be changed in backward-incompatible ways and that it is - // not guaranteed to be released. - string matches_pattern = 6; - } - - // The action to take. - Action action = 1; - - // The condition(s) under which the action will be taken. - Condition condition = 2; - } - - // A lifecycle management rule, which is made of an action to take and the - // condition(s) under which the action will be taken. - repeated Rule rule = 1; - } - - // Logging-related properties of a bucket. - message Logging { - // The destination bucket where the current bucket's logs should be placed. - string log_bucket = 1; - - // A prefix for log object names. - string log_object_prefix = 2; - } - - // Retention policy properties of a bucket. - message RetentionPolicy { - // Server-determined value that indicates the time from which policy was - // enforced and effective. This value is in - // [https://tools.ietf.org/html/rfc3339][RFC 3339] format. - google.protobuf.Timestamp effective_time = 1; - - // Once locked, an object retention policy cannot be modified. - bool is_locked = 2; - - // The duration in seconds that objects need to be retained. Retention - // duration must be greater than zero and less than 100 years. Note that - // enforcement of retention periods less than a day is not guaranteed. Such - // periods should only be used for testing purposes. - int64 retention_period = 3; - } - - // Properties of a bucket related to versioning. - // For more on GCS versioning, see - // https://cloud.google.com/storage/docs/object-versioning. - message Versioning { - // While set to true, versioning is fully enabled for this bucket. - bool enabled = 1; - } - - // Properties of a bucket related to accessing the contents as a static - // website. For more on hosting a static website via GCS, see - // https://cloud.google.com/storage/docs/hosting-static-website. - message Website { - // If the requested object path is missing, the service will ensure the path - // has a trailing '/', append this suffix, and attempt to retrieve the - // resulting object. This allows the creation of `index.html` - // objects to represent directory pages. - string main_page_suffix = 1; - - // If the requested object path is missing, and any - // `mainPageSuffix` object is missing, if applicable, the service - // will return the named object from this bucket as the content for a - // [https://tools.ietf.org/html/rfc7231#section-6.5.4][404 Not Found] - // result. - string not_found_page = 2; - } - - // Access controls on the bucket. - repeated BucketAccessControl acl = 1; - - // Default access controls to apply to new objects when no ACL is provided. - repeated ObjectAccessControl default_object_acl = 2; - - // The bucket's lifecycle configuration. See - // [https://developers.google.com/storage/docs/lifecycle]Lifecycle Management] - // for more information. - Lifecycle lifecycle = 3; - - // The creation time of the bucket in - // [https://tools.ietf.org/html/rfc3339][RFC 3339] format. - // Attempting to set this field will result in an error. - google.protobuf.Timestamp time_created = 4; - - // The ID of the bucket. For buckets, the `id` and `name` properties are the - // same. - // Attempting to update this field after the bucket is created will result in - // an error. - string id = 5; - - // The name of the bucket. - // Attempting to update this field after the bucket is created will result in - // an error. - string name = 6; - - // The project number of the project the bucket belongs to. - // Attempting to set this field will result in an error. - int64 project_number = 7; - - // The metadata generation of this bucket. - // Attempting to set this field will result in an error. - int64 metageneration = 8; - - // The bucket's [https://www.w3.org/TR/cors/][Cross-Origin Resource Sharing] - // (CORS) configuration. - repeated Cors cors = 9; - - // The location of the bucket. Object data for objects in the bucket resides - // in physical storage within this region. Defaults to `US`. See the - // [https://developers.google.com/storage/docs/concepts-techniques#specifyinglocations"][developer's - // guide] for the authoritative list. Attempting to update this field after - // the bucket is created will result in an error. - string location = 10; - - // The bucket's default storage class, used whenever no storageClass is - // specified for a newly-created object. This defines how objects in the - // bucket are stored and determines the SLA and the cost of storage. - // If this value is not specified when the bucket is created, it will default - // to `STANDARD`. For more information, see - // https://developers.google.com/storage/docs/storage-classes. - string storage_class = 11; - - // HTTP 1.1 [https://tools.ietf.org/html/rfc7232#section-2.3"]Entity tag] - // for the bucket. - // Attempting to set this field will result in an error. - string etag = 12; - - // The modification time of the bucket. - // Attempting to set this field will result in an error. - google.protobuf.Timestamp updated = 13; - - // The default value for event-based hold on newly created objects in this - // bucket. Event-based hold is a way to retain objects indefinitely until an - // event occurs, signified by the - // hold's release. After being released, such objects will be subject to - // bucket-level retention (if any). One sample use case of this flag is for - // banks to hold loan documents for at least 3 years after loan is paid in - // full. Here, bucket-level retention is 3 years and the event is loan being - // paid in full. In this example, these objects will be held intact for any - // number of years until the event has occurred (event-based hold on the - // object is released) and then 3 more years after that. That means retention - // duration of the objects begins from the moment event-based hold - // transitioned from true to false. Objects under event-based hold cannot be - // deleted, overwritten or archived until the hold is removed. - bool default_event_based_hold = 14; - - // User-provided labels, in key/value pairs. - map labels = 15; - - // The bucket's website configuration, controlling how the service behaves - // when accessing bucket contents as a web site. See the - // [https://cloud.google.com/storage/docs/static-website][Static Website - // Examples] for more information. - Website website = 16; - - // The bucket's versioning configuration. - Versioning versioning = 17; - - // The bucket's logging configuration, which defines the destination bucket - // and optional name prefix for the current bucket's logs. - Logging logging = 18; - - // The owner of the bucket. This is always the project team's owner group. - Owner owner = 19; - - // Encryption configuration for a bucket. - Encryption encryption = 20; - - // The bucket's billing configuration. - Billing billing = 21; - - // The bucket's retention policy. The retention policy enforces a minimum - // retention time for all objects contained in the bucket, based on their - // creation time. Any attempt to overwrite or delete objects younger than the - // retention period will result in a PERMISSION_DENIED error. An unlocked - // retention policy can be modified or removed from the bucket via a - // storage.buckets.update operation. A locked retention policy cannot be - // removed or shortened in duration for the lifetime of the bucket. - // Attempting to remove or decrease period of a locked retention policy will - // result in a PERMISSION_DENIED error. - RetentionPolicy retention_policy = 22; -} - -// An access-control entry. -message BucketAccessControl { - // The access permission for the entity. - string role = 1; - - // HTTP 1.1 ["https://tools.ietf.org/html/rfc7232#section-2.3][Entity tag] - // for the access-control entry. - string etag = 2; - - // The ID of the access-control entry. - string id = 3; - - // The name of the bucket. - string bucket = 4; - - // The entity holding the permission, in one of the following forms: - // * `user-{userid}` - // * `user-{email}` - // * `group-{groupid}` - // * `group-{email}` - // * `domain-{domain}` - // * `project-{team-projectid}` - // * `allUsers` - // * `allAuthenticatedUsers` - // Examples: - // * The user `liz@example.com` would be `user-liz@example.com`. - // * The group `example@googlegroups.com` would be - // `group-example@googlegroups.com` - // * All members of the Google Apps for Business domain `example.com` would be - // `domain-example.com` - string entity = 6; - - // The ID for the entity, if any. - string entity_id = 7; - - // The email address associated with the entity, if any. - string email = 8; - - // The domain associated with the entity, if any. - string domain = 9; - - // The project team associated with the entity, if any. - ProjectTeam project_team = 10; -} - -// The response to a call to BucketAccessControls.ListBucketAccessControls. -message ListBucketAccessControlsResponse { - // The list of items. - repeated BucketAccessControl items = 1; -} - -// The result of a call to Buckets.ListBuckets -message ListBucketsResponse { - // The list of items. - repeated Bucket items = 1; - - // The continuation token, used to page through large result sets. Provide - // this value in a subsequent request to return the next page of results. - string next_page_token = 2; -} - -// An notification channel used to watch for resource changes. -message Channel { - // A UUID or similar unique string that identifies this channel. - string id = 1; - - // An opaque ID that identifies the resource being watched on this channel. - // Stable across different API versions. - string resource_id = 2; - - // A version-specific identifier for the watched resource. - string resource_uri = 3; - - // An arbitrary string delivered to the target address with each notification - // delivered over this channel. Optional. - string token = 4; - - // Date and time of notification channel expiration. Optional. - google.protobuf.Timestamp expiration = 5; - - // The type of delivery mechanism used for this channel. - string type = 6; - - // The address where notifications are delivered for this channel. - string address = 7; - - // Additional parameters controlling delivery channel behavior. Optional. - map params = 8; - - // A Boolean value to indicate whether payload is wanted. Optional. - bool payload = 9; -} - -// Message used to convey content being read or written, along with its -// checksum. -message ChecksummedData { - // The data. - bytes content = 1; - - // CRC32C digest of the contents. - google.protobuf.UInt32Value crc32c = 2; -} - -// Message used for storing full (not subrange) object checksums. -message ObjectChecksums { - // CRC32C digest of the object data. Computed by the GCS service for - // all written objects, and validated by the GCS service against - // client-supplied values if present in an InsertObjectRequest. - google.protobuf.UInt32Value crc32c = 1; - - // Hex-encoded MD5 hash of the object data (hexdigest). Whether/how this - // checksum is provided and validated is service-dependent. - string md5_hash = 2; -} - -// A collection of enums used in multiple places throughout the API. -message CommonEnums { - // A set of properties to return in a response. - enum Projection { - // No specified projection. - PROJECTION_UNSPECIFIED = 0; - - // Omit `owner`, `acl`, and `defaultObjectAcl` properties. - NO_ACL = 1; - - // Include all properties. - FULL = 2; - } - - // Predefined or "canned" aliases for sets of specific bucket ACL entries. - enum PredefinedBucketAcl { - // No predefined ACL. - PREDEFINED_BUCKET_ACL_UNSPECIFIED = 0; - - // Project team owners get `OWNER` access, and - // `allAuthenticatedUsers` get `READER` access. - BUCKET_ACL_AUTHENTICATED_READ = 1; - - // Project team owners get `OWNER` access. - BUCKET_ACL_PRIVATE = 2; - - // Project team members get access according to their roles. - BUCKET_ACL_PROJECT_PRIVATE = 3; - - // Project team owners get `OWNER` access, and - // `allUsers` get `READER` access. - BUCKET_ACL_PUBLIC_READ = 4; - - // Project team owners get `OWNER` access, and - // `allUsers` get `WRITER` access. - BUCKET_ACL_PUBLIC_READ_WRITE = 5; - } - - // Predefined or "canned" aliases for sets of specific object ACL entries. - enum PredefinedObjectAcl { - // No predefined ACL. - PREDEFINED_OBJECT_ACL_UNSPECIFIED = 0; - - // Object owner gets `OWNER` access, and - // `allAuthenticatedUsers` get `READER` access. - OBJECT_ACL_AUTHENTICATED_READ = 1; - - // Object owner gets `OWNER` access, and project team owners get - // `OWNER` access. - OBJECT_ACL_BUCKET_OWNER_FULL_CONTROL = 2; - - // Object owner gets `OWNER` access, and project team owners get - // `READER` access. - OBJECT_ACL_BUCKET_OWNER_READ = 3; - - // Object owner gets `OWNER` access. - OBJECT_ACL_PRIVATE = 4; - - // Object owner gets `OWNER` access, and project team members get - // access according to their roles. - OBJECT_ACL_PROJECT_PRIVATE = 5; - - // Object owner gets `OWNER` access, and `allUsers` - // get `READER` access. - OBJECT_ACL_PUBLIC_READ = 6; - } - - -} - -// Specifies a requested range of bytes to download. -message ContentRange { - // The starting offset of the object data. - int64 start = 1; - - // The ending offset of the object data. - int64 end = 2; - - // The complete length of the object data. - int64 complete_length = 3; -} - -// A subscription to receive Google PubSub notifications. -message Notification { - // The Cloud PubSub topic to which this subscription publishes. Formatted as: - // '//pubsub.googleapis.com/projects/{project-identifier}/topics/{my-topic}' - string topic = 1; - - // If present, only send notifications about listed event types. If empty, - // sent notifications for all event types. - repeated string event_types = 2; - - // An optional list of additional attributes to attach to each Cloud PubSub - // message published for this notification subscription. - map custom_attributes = 3; - - // HTTP 1.1 [https://tools.ietf.org/html/rfc7232#section-2.3][Entity tag] - // for this subscription notification. - string etag = 4; - - // If present, only apply this notification configuration to object names that - // begin with this prefix. - string object_name_prefix = 5; - - // The desired content of the Payload. - string payload_format = 6; - - // The ID of the notification. - string id = 7; -} - -// The result of a call to Notifications.ListNotifications -message ListNotificationsResponse { - // The list of items. - repeated Notification items = 1; -} - -// An object. -message Object { - // Describes the customer-specified mechanism used to store the data at rest. - message CustomerEncryption { - // The encryption algorithm. - string encryption_algorithm = 1; - - // SHA256 hash value of the encryption key. - string key_sha256 = 2; - } - - // Content-Encoding of the object data, matching - // [https://tools.ietf.org/html/rfc7231#section-3.1.2.2][RFC 7231 §3.1.2.2] - string content_encoding = 1; - - // Content-Disposition of the object data, matching - // [https://tools.ietf.org/html/rfc6266][RFC 6266]. - string content_disposition = 2; - - // Cache-Control directive for the object data, matching - // [https://tools.ietf.org/html/rfc7234#section-5.2"][RFC 7234 §5.2]. - // If omitted, and the object is accessible to all anonymous users, the - // default will be `public, max-age=3600`. - string cache_control = 3; - - // Access controls on the object. - repeated ObjectAccessControl acl = 4; - - // Content-Language of the object data, matching - // [https://tools.ietf.org/html/rfc7231#section-3.1.3.2][RFC 7231 §3.1.3.2]. - string content_language = 5; - - // The version of the metadata for this object at this generation. Used for - // preconditions and for detecting changes in metadata. A metageneration - // number is only meaningful in the context of a particular generation of a - // particular object. - // Attempting to set this field will result in an error. - int64 metageneration = 6; - - // The deletion time of the object. Will be returned if and only if this - // version of the object has been deleted. - // Attempting to set this field will result in an error. - google.protobuf.Timestamp time_deleted = 7; - - // Content-Type of the object data, matching - // [https://tools.ietf.org/html/rfc7231#section-3.1.1.5][RFC 7231 §3.1.1.5]. - // If an object is stored without a Content-Type, it is served as - // `application/octet-stream`. - string content_type = 8; - - // Content-Length of the object data in bytes, matching - // [https://tools.ietf.org/html/rfc7230#section-3.3.2][RFC 7230 §3.3.2]. - // Attempting to set this field will result in an error. - int64 size = 9; - - // The creation time of the object. - // Attempting to set this field will result in an error. - google.protobuf.Timestamp time_created = 10; - - // CRC32c checksum. For more information about using the CRC32c - // checksum, see - // [https://cloud.google.com/storage/docs/hashes-etags#_JSONAPI][Hashes and - // ETags: Best Practices]. This is a server determined value and should not be - // supplied by the user when sending an Object. The server will ignore any - // value provided. Users should instead use the object_checksums field on the - // InsertObjectRequest when uploading an object. - google.protobuf.UInt32Value crc32c = 11; - - // Number of underlying components that make up this object. Components are - // accumulated by compose operations. - // Attempting to set this field will result in an error. - int32 component_count = 12; - - // MD5 hash of the data; encoded using base64 as per - // [https://tools.ietf.org/html/rfc4648#section-4][RFC 4648 §4]. For more - // information about using the MD5 hash, see - // [https://cloud.google.com/storage/docs/hashes-etags#_JSONAPI][Hashes and - // ETags: Best Practices]. This is a server determined value and should not be - // supplied by the user when sending an Object. The server will ignore any - // value provided. Users should instead use the object_checksums field on the - // InsertObjectRequest when uploading an object. - string md5_hash = 13; - - // HTTP 1.1 Entity tag for the object. See - // [https://tools.ietf.org/html/rfc7232#section-2.3][RFC 7232 §2.3]. - // Attempting to set this field will result in an error. - string etag = 14; - - // The modification time of the object metadata. - // Attempting to set this field will result in an error. - google.protobuf.Timestamp updated = 15; - - // Storage class of the object. - string storage_class = 16; - - // Cloud KMS Key used to encrypt this object, if the object is encrypted by - // such a key. - string kms_key_name = 17; - - // The time at which the object's storage class was last changed. When the - // object is initially created, it will be set to time_created. - // Attempting to set this field will result in an error. - google.protobuf.Timestamp time_storage_class_updated = 18; - - // Whether an object is under temporary hold. While this flag is set to true, - // the object is protected against deletion and overwrites. A common use case - // of this flag is regulatory investigations where objects need to be retained - // while the investigation is ongoing. Note that unlike event-based hold, - // temporary hold does not impact retention expiration time of an object. - bool temporary_hold = 19; - - // A server-determined value that specifies the earliest time that the - // object's retention period expires. This value is in - // [https://tools.ietf.org/html/rfc3339][RFC 3339] format. - // Note 1: This field is not provided for objects with an active event-based - // hold, since retention expiration is unknown until the hold is removed. - // Note 2: This value can be provided even when temporary hold is set (so that - // the user can reason about policy without having to first unset the - // temporary hold). - google.protobuf.Timestamp retention_expiration_time = 20; - - // User-provided metadata, in key/value pairs. - map metadata = 21; - - // Whether an object is under event-based hold. Event-based hold is a way to - // retain objects until an event occurs, which is signified by the - // hold's release (i.e. this value is set to false). After being released (set - // to false), such objects will be subject to bucket-level retention (if any). - // One sample use case of this flag is for banks to hold loan documents for at - // least 3 years after loan is paid in full. Here, bucket-level retention is 3 - // years and the event is the loan being paid in full. In this example, these - // objects will be held intact for any number of years until the event has - // occurred (event-based hold on the object is released) and then 3 more years - // after that. That means retention duration of the objects begins from the - // moment event-based hold transitioned from true to false. - google.protobuf.BoolValue event_based_hold = 29; - - // The name of the object. Required if not specified by URL parameter. - // Attempting to update this field after the object is created will result in - // an error. - string name = 23; - - // The ID of the object, including the bucket name, object name, and - // generation number. - // Attempting to update this field after the object is created will result in - // an error. - string id = 24; - - // The name of the bucket containing this object. - // Attempting to update this field after the object is created will result in - // an error. - string bucket = 25; - - // The content generation of this object. Used for object versioning. - // Attempting to set this field will result in an error. - int64 generation = 26; - - // The owner of the object. This will always be the uploader of the object. - // Attempting to set this field will result in an error. - Owner owner = 27; - - // Metadata of customer-supplied encryption key, if the object is encrypted by - // such a key. - CustomerEncryption customer_encryption = 28; -} - -// An access-control entry. -message ObjectAccessControl { - // The access permission for the entity. - string role = 1; - - // HTTP 1.1 Entity tag for the access-control entry. - // See [https://tools.ietf.org/html/rfc7232#section-2.3][RFC 7232 §2.3]. - string etag = 2; - - // The ID of the access-control entry. - string id = 3; - - // The name of the bucket. - string bucket = 4; - - // The name of the object, if applied to an object. - string object = 5; - - // The content generation of the object, if applied to an object. - int64 generation = 6; - - // The entity holding the permission, in one of the following forms: - // * `user-{userid}` - // * `user-{email}` - // * `group-{groupid}` - // * `group-{email}` - // * `domain-{domain}` - // * `project-{team-projectid}` - // * `allUsers` - // * `allAuthenticatedUsers` - // Examples: - // * The user `liz@example.com` would be `user-liz@example.com`. - // * The group `example@googlegroups.com` would be - // `group-example@googlegroups.com`. - // * All members of the Google Apps for Business domain `example.com` would be - // `domain-example.com`. - string entity = 7; - - // The ID for the entity, if any. - string entity_id = 8; - - // The email address associated with the entity, if any. - string email = 9; - - // The domain associated with the entity, if any. - string domain = 10; - - // The project team associated with the entity, if any. - ProjectTeam project_team = 11; -} - -// The result of a call to ObjectAccessControls.ListObjectAccessControls. -message ListObjectAccessControlsResponse { - // The list of items. - repeated ObjectAccessControl items = 1; -} - -// The result of a call to Objects.ListObjects -message ListObjectsResponse { - // The list of prefixes of objects matching-but-not-listed up to and including - // the requested delimiter. - repeated string prefixes = 1; - - // The list of items. - repeated Object items = 2; - - // The continuation token, used to page through large result sets. Provide - // this value in a subsequent request to return the next page of results. - string next_page_token = 3; -} - -// Represents the Viewers, Editors, or Owners of a given project. -message ProjectTeam { - // The project number. - string project_number = 1; - - // The team. - string team = 2; -} - -// A subscription to receive Google PubSub notifications. -message ServiceAccount { - // The ID of the notification. - string email_address = 1; -} - -// The owner of a specific resource. -message Owner { - // The entity, in the form `user-`*userId*. - string entity = 1; - - // The ID for the entity. - string entity_id = 2; -} diff --git a/grpc-gcp/CHANGELOG.md b/grpc-gcp/CHANGELOG.md index 20597001..94d17bc7 100644 --- a/grpc-gcp/CHANGELOG.md +++ b/grpc-gcp/CHANGELOG.md @@ -1,5 +1,18 @@ # Changelog +## 1.2.0 (2021-07-18) + +### Features + +* multi-endpoint (#135). +* round-robin for bind calls (#127). +* minSize of the channel pool (#134). +* detailed logs (#133). +* log metrics (#131). +* map a key to current channel when bound call arrives but no mapping for the +key exists (#132). +* consolidate channel pool config in the GcpChannelPoolOptions (#109). + ## 1.1.0 (2021-07-20) ### Features diff --git a/grpc-gcp/build.gradle b/grpc-gcp/build.gradle index 5d7e3587..63ab6499 100644 --- a/grpc-gcp/build.gradle +++ b/grpc-gcp/build.gradle @@ -15,7 +15,7 @@ repositories { mavenLocal() } -version = '1.1.0' +version = '1.2.0' group = 'com.google.cloud' description = 'GRPC-GCP-Extension Java' sourceCompatibility = '1.8' @@ -34,6 +34,7 @@ dependencies { implementation "io.grpc:grpc-protobuf:${grpcVersion}" implementation "io.grpc:grpc-stub:${grpcVersion}" implementation "io.opencensus:opencensus-api:${opencensusVersion}" + implementation "com.google.api:api-common:2.1.5" compileOnly "org.apache.tomcat:annotations-api:6.0.53" // necessary for Java 9+ diff --git a/grpc-gcp/src/main/java/com/google/cloud/grpc/GcpClientCall.java b/grpc-gcp/src/main/java/com/google/cloud/grpc/GcpClientCall.java index 60c6c1a8..edd853cc 100644 --- a/grpc-gcp/src/main/java/com/google/cloud/grpc/GcpClientCall.java +++ b/grpc-gcp/src/main/java/com/google/cloud/grpc/GcpClientCall.java @@ -115,7 +115,12 @@ public void sendMessage(ReqT message) { && delegateChannel.getChannelRef(keys.get(0)) != null) { key = keys.get(0); } - delegateChannelRef = delegateChannel.getChannelRef(key); + + if (affinity != null && affinity.getCommand().equals(AffinityConfig.Command.BIND)) { + delegateChannelRef = delegateChannel.getChannelRefForBind(); + } else { + delegateChannelRef = delegateChannel.getChannelRef(key); + } delegateChannelRef.activeStreamsCountIncr(); // Create the client call and do the previous operations. diff --git a/grpc-gcp/src/main/java/com/google/cloud/grpc/GcpManagedChannel.java b/grpc-gcp/src/main/java/com/google/cloud/grpc/GcpManagedChannel.java index 44bfd7ff..4354cce7 100644 --- a/grpc-gcp/src/main/java/com/google/cloud/grpc/GcpManagedChannel.java +++ b/grpc-gcp/src/main/java/com/google/cloud/grpc/GcpManagedChannel.java @@ -17,6 +17,7 @@ package com.google.cloud.grpc; import static java.util.concurrent.TimeUnit.NANOSECONDS; +import static java.util.concurrent.TimeUnit.SECONDS; import com.google.cloud.grpc.GcpManagedChannelOptions.GcpMetricsOptions; import com.google.cloud.grpc.GcpManagedChannelOptions.GcpResiliencyOptions; @@ -24,8 +25,12 @@ import com.google.cloud.grpc.proto.ApiConfig; import com.google.cloud.grpc.proto.MethodConfig; import com.google.common.annotations.VisibleForTesting; +import com.google.common.util.concurrent.ThreadFactoryBuilder; +import com.google.common.base.Joiner; +import com.google.errorprone.annotations.concurrent.GuardedBy; import com.google.protobuf.Descriptors.FieldDescriptor; import com.google.protobuf.MessageOrBuilder; +import com.google.protobuf.TextFormat; import io.grpc.CallOptions; import io.grpc.ClientCall; import io.grpc.ConnectivityState; @@ -44,14 +49,19 @@ import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; +import java.util.LinkedList; import java.util.List; import java.util.LongSummaryStatistics; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; +import java.util.function.Supplier; import java.util.logging.Logger; import javax.annotation.Nullable; @@ -59,8 +69,11 @@ public class GcpManagedChannel extends ManagedChannel { private static final Logger logger = Logger.getLogger(GcpManagedChannel.class.getName()); static final AtomicInteger channelPoolIndex = new AtomicInteger(); - private static final int DEFAULT_MAX_CHANNEL = 10; - private static final int DEFAULT_MAX_STREAM = 100; + static final int DEFAULT_MAX_CHANNEL = 10; + static final int DEFAULT_MAX_STREAM = 100; + + @GuardedBy("this") + private Integer bindingIndex = -1; private final ManagedChannelBuilder delegateChannelBuilder; private final GcpManagedChannelOptions options; @@ -69,6 +82,7 @@ public class GcpManagedChannel extends ManagedChannel { private final int unresponsiveMs; private final int unresponsiveDropCount; private int maxSize = DEFAULT_MAX_CHANNEL; + private int minSize = 0; private int maxConcurrentStreamsLowWatermark = DEFAULT_MAX_STREAM; @VisibleForTesting final Map methodToAffinity = new HashMap<>(); @@ -81,6 +95,10 @@ public class GcpManagedChannel extends ManagedChannel { @VisibleForTesting final List channelRefs = new CopyOnWriteArrayList<>(); + private final ExecutorService stateNotificationExecutor = Executors.newCachedThreadPool( + new ThreadFactoryBuilder().setNameFormat("gcp-mc-state-notifications-%d").build()); + private List stateChangeCallbacks = Collections.synchronizedList(new LinkedList<>()); + // Metrics configuration. private MetricRegistry metricRegistry; private final List labelKeys = new ArrayList<>(); @@ -96,6 +114,10 @@ public class GcpManagedChannel extends ManagedChannel { new ArrayList<>( Collections.singletonList(LabelValue.create(GcpMetricsConstants.RESULT_ERROR))); private String metricPrefix; + private final String metricPoolIndex = + String.format("pool-%d", channelPoolIndex.incrementAndGet()); + private final Map cumulativeMetricValues = new ConcurrentHashMap<>(); + private ScheduledExecutorService logMetricService; // Metrics counters. private final AtomicInteger readyChannels = new AtomicInteger(); @@ -122,8 +144,8 @@ public class GcpManagedChannel extends ManagedChannel { private final AtomicLong totalErrCalls = new AtomicLong(); private boolean minErrReported = false; private boolean maxErrReported = false; - private int minAffinity = 0; - private int maxAffinity = 0; + private final AtomicInteger minAffinity = new AtomicInteger(); + private final AtomicInteger maxAffinity = new AtomicInteger(); private final AtomicInteger totalAffinityCount = new AtomicInteger(); private final AtomicLong fallbacksSucceeded = new AtomicLong(); private final AtomicLong fallbacksFailed = new AtomicLong(); @@ -141,21 +163,22 @@ public class GcpManagedChannel extends ManagedChannel { * @param options the options for GcpManagedChannel. */ public GcpManagedChannel( - ManagedChannelBuilder delegateChannelBuilder, - ApiConfig apiConfig, - int poolSize, - GcpManagedChannelOptions options) { + ManagedChannelBuilder delegateChannelBuilder, + ApiConfig apiConfig, + GcpManagedChannelOptions options) { loadApiConfig(apiConfig); - if (poolSize != 0) { - this.maxSize = poolSize; - } this.delegateChannelBuilder = delegateChannelBuilder; this.options = options; + logger.finer(log( + "Created with api config: %s, and options: %s", + apiConfig == null ? "null" : TextFormat.shortDebugString(apiConfig), + options + )); initOptions(); if (options.getResiliencyOptions() != null) { fallbackEnabled = options.getResiliencyOptions().isNotReadyFallbackEnabled(); unresponsiveDetectionEnabled = - options.getResiliencyOptions().isUnresponsiveDetectionEnabled(); + options.getResiliencyOptions().isUnresponsiveDetectionEnabled(); unresponsiveMs = options.getResiliencyOptions().getUnresponsiveDetectionMs(); unresponsiveDropCount = options.getResiliencyOptions().getUnresponsiveDetectionDroppedCount(); } else { @@ -164,23 +187,102 @@ public GcpManagedChannel( unresponsiveMs = 0; unresponsiveDropCount = 0; } + initMinChannels(); + } + + /** + * Constructor for GcpManagedChannel. + * Deprecated. Use the one without the poolSize and set the maximum pool size in options. However, note that if + * setting the pool size from options then concurrent streams low watermark (even the default one) will be also taken + * from the options and not apiConfig. + * + * @param delegateChannelBuilder the underlying delegate ManagedChannelBuilder. + * @param apiConfig the ApiConfig object for configuring GcpManagedChannel. + * @param poolSize maximum number of channels the pool can have. + * @param options the options for GcpManagedChannel. + */ + @Deprecated + public GcpManagedChannel( + ManagedChannelBuilder delegateChannelBuilder, + ApiConfig apiConfig, + int poolSize, + GcpManagedChannelOptions options) { + this(delegateChannelBuilder, apiConfig, options); + if (poolSize != 0) { + logger.finer(log("Pool size adjusted to %d", poolSize)); + this.maxSize = poolSize; + } + } + + private Supplier log(Supplier messageSupplier) { + return () -> String.format("%s: %s", metricPoolIndex, messageSupplier.get()); + } + + private String log(String message) { + return String.format("%s: %s", metricPoolIndex, message); + } + + private String log(String format, Object... args) { + return String.format("%s: %s", metricPoolIndex, String.format(format, args)); + } + + private synchronized void initMinChannels() { + while (minSize - getNumberOfChannels() > 0) { + createNewChannel(); + } } private void initOptions() { + GcpManagedChannelOptions.GcpChannelPoolOptions poolOptions = options.getChannelPoolOptions(); + if (poolOptions != null) { + maxSize = poolOptions.getMaxSize(); + minSize = poolOptions.getMinSize(); + maxConcurrentStreamsLowWatermark = poolOptions.getConcurrentStreamsLowWatermark(); + } initMetrics(); } + private synchronized void initLogMetrics() { + if (logMetricService != null) { + return; + } + logMetricService = Executors.newSingleThreadScheduledExecutor(); + logMetricService.scheduleAtFixedRate(this::logMetrics, 60, 60, SECONDS); + } + + private void logMetricsOptions() { + if (options.getMetricsOptions() != null) { + logger.fine(log("Metrics options: %s", options.getMetricsOptions())); + } + } + + private void logChannelsStats() { + logger.fine(log( + "Active streams counts: [%s]", Joiner.on(", ").join( + channelRefs.stream().mapToInt(ChannelRef::getActiveStreamsCount).iterator() + ) + )); + logger.fine(log( + "Affinity counts: [%s]", Joiner.on(", ").join( + channelRefs.stream().mapToInt(ChannelRef::getAffinityCount).iterator() + ) + )); + } + private void initMetrics() { final GcpMetricsOptions metricsOptions = options.getMetricsOptions(); if (metricsOptions == null) { - logger.info("Metrics options are empty. Metrics disabled."); + logger.info(log("Metrics options are empty. Metrics disabled.")); + initLogMetrics(); return; } + logMetricsOptions(); if (metricsOptions.getMetricRegistry() == null) { - logger.info("Metric registry is null. Metrics disabled."); + logger.info(log("Metric registry is null. Metrics disabled.")); + initLogMetrics(); return; } - logger.info("Metrics enabled."); + logger.info(log("Metrics enabled.")); metricRegistry = metricsOptions.getMetricRegistry(); labelKeys.addAll(metricsOptions.getLabelKeys()); @@ -193,8 +295,7 @@ private void initMetrics() { LabelKey.create(GcpMetricsConstants.POOL_INDEX_LABEL, GcpMetricsConstants.POOL_INDEX_DESC); labelKeys.add(poolKey); labelKeysWithResult.add(poolKey); - final LabelValue poolIndex = - LabelValue.create(String.format("pool-%d", channelPoolIndex.incrementAndGet())); + final LabelValue poolIndex = LabelValue.create(metricPoolIndex); labelValues.add(poolIndex); labelValuesSuccess.add(poolIndex); labelValuesError.add(poolIndex); @@ -381,6 +482,53 @@ private void initMetrics() { GcpManagedChannel::reportMaxUnresponsiveDrops); } + private void logGauge(String key, long value) { + logger.fine(log("stat: %s = %d", key, value)); + } + + private void logCumulative(String key, long value) { + logger.fine(log(() -> { + Long prevValue = cumulativeMetricValues.put(key, value); + long logValue = prevValue == null ? value : value - prevValue; + return String.format("stat: %s = %d", key, logValue); + })); + } + + @VisibleForTesting + void logMetrics() { + logMetricsOptions(); + logChannelsStats(); + reportMinReadyChannels(); + reportMaxReadyChannels(); + reportMaxChannels(); + reportMaxAllowedChannels(); + reportNumChannelDisconnect(); + reportNumChannelConnect(); + reportMinReadinessTime(); + reportAvgReadinessTime(); + reportMaxReadinessTime(); + reportMinActiveStreams(); + reportMaxActiveStreams(); + reportMinTotalActiveStreams(); + reportMaxTotalActiveStreams(); + reportMinAffinity(); + reportMaxAffinity(); + reportNumAffinity(); + reportMinOkCalls(); + reportMinErrCalls(); + reportMaxOkCalls(); + reportMaxErrCalls(); + reportTotalOkCalls(); + reportTotalErrCalls(); + reportSucceededFallbacks(); + reportFailedFallbacks(); + reportUnresponsiveDetectionCount(); + reportMinUnresponsiveMs(); + reportMaxUnresponsiveMs(); + reportMinUnresponsiveDrops(); + reportMaxUnresponsiveDrops(); + } + private MetricOptions createMetricOptions( String description, List labelKeys, String unit) { return MetricOptions.builder() @@ -444,37 +592,48 @@ private void createDerivedLongCumulativeTimeSeriesWithResult( metric.createTimeSeries(labelValuesError, obj, funcErr); } + // TODO: When introducing pool downscaling feature this method must be changed accordingly. private long reportMaxChannels() { - return getNumberOfChannels(); + int value = getNumberOfChannels(); + logGauge(GcpMetricsConstants.METRIC_MAX_CHANNELS, value); + return value; } private long reportMaxAllowedChannels() { + logGauge(GcpMetricsConstants.METRIC_MAX_ALLOWED_CHANNELS, maxSize); return maxSize; } private long reportMinReadyChannels() { int value = minReadyChannels; minReadyChannels = readyChannels.get(); + logGauge(GcpMetricsConstants.METRIC_MIN_READY_CHANNELS, value); return value; } private long reportMaxReadyChannels() { int value = maxReadyChannels; maxReadyChannels = readyChannels.get(); + logGauge(GcpMetricsConstants.METRIC_MAX_READY_CHANNELS, value); return value; } private long reportNumChannelConnect() { - return numChannelConnect.get(); + long value = numChannelConnect.get(); + logCumulative(GcpMetricsConstants.METRIC_NUM_CHANNEL_CONNECT, value); + return value; } private long reportNumChannelDisconnect() { - return numChannelDisconnect.get(); + long value = numChannelDisconnect.get(); + logCumulative(GcpMetricsConstants.METRIC_NUM_CHANNEL_DISCONNECT, value); + return value; } private long reportMinReadinessTime() { long value = minReadinessTime; minReadinessTime = 0; + logGauge(GcpMetricsConstants.METRIC_MIN_CHANNEL_READINESS_TIME, value); return value; } @@ -485,12 +644,14 @@ private long reportAvgReadinessTime() { if (occ != 0) { value = total / occ; } + logGauge(GcpMetricsConstants.METRIC_AVG_CHANNEL_READINESS_TIME, value); return value; } private long reportMaxReadinessTime() { long value = maxReadinessTime; maxReadinessTime = 0; + logGauge(GcpMetricsConstants.METRIC_MAX_CHANNEL_READINESS_TIME, value); return value; } @@ -498,6 +659,7 @@ private int reportMinActiveStreams() { int value = minActiveStreams; minActiveStreams = channelRefs.stream().mapToInt(ChannelRef::getActiveStreamsCount).min().orElse(0); + logGauge(GcpMetricsConstants.METRIC_MIN_ACTIVE_STREAMS, value); return value; } @@ -505,51 +667,81 @@ private int reportMaxActiveStreams() { int value = maxActiveStreams; maxActiveStreams = channelRefs.stream().mapToInt(ChannelRef::getActiveStreamsCount).max().orElse(0); + logGauge(GcpMetricsConstants.METRIC_MAX_ACTIVE_STREAMS, value); return value; } private int reportMinTotalActiveStreams() { int value = minTotalActiveStreams; minTotalActiveStreams = totalActiveStreams.get(); + logGauge(GcpMetricsConstants.METRIC_MIN_TOTAL_ACTIVE_STREAMS, value); return value; } private int reportMaxTotalActiveStreams() { int value = maxTotalActiveStreams; maxTotalActiveStreams = totalActiveStreams.get(); + logGauge(GcpMetricsConstants.METRIC_MAX_TOTAL_ACTIVE_STREAMS, value); return value; } private int reportMinAffinity() { - int value = minAffinity; - minAffinity = channelRefs.stream().mapToInt(ChannelRef::getAffinityCount).min().orElse(0); + int value = minAffinity.getAndSet( + channelRefs.stream().mapToInt(ChannelRef::getAffinityCount).min().orElse(0) + ); + logGauge(GcpMetricsConstants.METRIC_MIN_AFFINITY, value); return value; } private int reportMaxAffinity() { - int value = maxAffinity; - maxAffinity = channelRefs.stream().mapToInt(ChannelRef::getAffinityCount).max().orElse(0); + int value = maxAffinity.getAndSet( + channelRefs.stream().mapToInt(ChannelRef::getAffinityCount).max().orElse(0) + ); + logGauge(GcpMetricsConstants.METRIC_MAX_AFFINITY, value); return value; } private int reportNumAffinity() { - return totalAffinityCount.get(); + int value = totalAffinityCount.get(); + logGauge(GcpMetricsConstants.METRIC_NUM_AFFINITY, value); + return value; } private synchronized long reportMinOkCalls() { minOkReported = true; calcMinMaxOkCalls(); + logGauge(GcpMetricsConstants.METRIC_MIN_CALLS + "_ok", minOkCalls); return minOkCalls; } private synchronized long reportMaxOkCalls() { maxOkReported = true; calcMinMaxOkCalls(); + logGauge(GcpMetricsConstants.METRIC_MAX_CALLS + "_ok", maxOkCalls); return maxOkCalls; } private long reportTotalOkCalls() { - return totalOkCalls.get(); + long value = totalOkCalls.get(); + logCumulative(GcpMetricsConstants.METRIC_NUM_CALLS_COMPLETED + "_ok", value); + return value; + } + + private LongSummaryStatistics calcStatsAndLog(String logLabel, ToLongFunction func) { + StringBuilder str = new StringBuilder(logLabel + ": ["); + final LongSummaryStatistics stats = + channelRefs.stream().mapToLong(ch -> { + long count = func.applyAsLong(ch); + if (str.charAt(str.length() - 1) != '[') { + str.append(", "); + } + str.append(count); + return count; + }).summaryStatistics(); + + str.append("]"); + logger.fine(log(str.toString())); + return stats; } private void calcMinMaxOkCalls() { @@ -559,7 +751,7 @@ private void calcMinMaxOkCalls() { return; } final LongSummaryStatistics stats = - channelRefs.stream().mapToLong(ChannelRef::getAndResetOkCalls).summaryStatistics(); + calcStatsAndLog("Ok calls", ChannelRef::getAndResetOkCalls); minOkCalls = stats.getMin(); maxOkCalls = stats.getMax(); } @@ -567,17 +759,21 @@ private void calcMinMaxOkCalls() { private synchronized long reportMinErrCalls() { minErrReported = true; calcMinMaxErrCalls(); + logGauge(GcpMetricsConstants.METRIC_MIN_CALLS + "_err", minErrCalls); return minErrCalls; } private synchronized long reportMaxErrCalls() { maxErrReported = true; calcMinMaxErrCalls(); + logGauge(GcpMetricsConstants.METRIC_MAX_CALLS + "_err", maxErrCalls); return maxErrCalls; } private long reportTotalErrCalls() { - return totalErrCalls.get(); + long value = totalErrCalls.get(); + logCumulative(GcpMetricsConstants.METRIC_NUM_CALLS_COMPLETED + "_err", value); + return value; } private void calcMinMaxErrCalls() { @@ -587,44 +783,54 @@ private void calcMinMaxErrCalls() { return; } final LongSummaryStatistics stats = - channelRefs.stream().mapToLong(ChannelRef::getAndResetErrCalls).summaryStatistics(); + calcStatsAndLog("Failed calls", ChannelRef::getAndResetErrCalls); minErrCalls = stats.getMin(); maxErrCalls = stats.getMax(); } private long reportSucceededFallbacks() { - return fallbacksSucceeded.get(); + long value = fallbacksSucceeded.get(); + logCumulative(GcpMetricsConstants.METRIC_NUM_FALLBACKS + "_ok", value); + return value; } private long reportFailedFallbacks() { - return fallbacksFailed.get(); + long value = fallbacksFailed.get(); + logCumulative(GcpMetricsConstants.METRIC_NUM_FALLBACKS + "_fail", value); + return value; } private long reportUnresponsiveDetectionCount() { - return unresponsiveDetectionCount.get(); + long value = unresponsiveDetectionCount.get(); + logCumulative(GcpMetricsConstants.METRIC_NUM_UNRESPONSIVE_DETECTIONS, value); + return value; } private long reportMinUnresponsiveMs() { long value = minUnresponsiveMs; minUnresponsiveMs = 0; + logGauge(GcpMetricsConstants.METRIC_MIN_UNRESPONSIVE_DETECTION_TIME, value); return value; } private long reportMaxUnresponsiveMs() { long value = maxUnresponsiveMs; maxUnresponsiveMs = 0; + logGauge(GcpMetricsConstants.METRIC_MAX_UNRESPONSIVE_DETECTION_TIME, value); return value; } private long reportMinUnresponsiveDrops() { long value = minUnresponsiveDrops; minUnresponsiveDrops = 0; + logGauge(GcpMetricsConstants.METRIC_MIN_UNRESPONSIVE_DROPPED_CALLS, value); return value; } private long reportMaxUnresponsiveDrops() { long value = maxUnresponsiveDrops; maxUnresponsiveDrops = 0; + logGauge(GcpMetricsConstants.METRIC_MAX_UNRESPONSIVE_DROPPED_CALLS, value); return value; } @@ -673,6 +879,15 @@ private void recordUnresponsiveDetection(long nanos, long dropCount) { } } + @Override + public void notifyWhenStateChanged(ConnectivityState source, Runnable callback) { + if (!getState(false).equals(source)) { + stateNotificationExecutor.execute(callback); + return; + } + stateChangeCallbacks.add(callback); + } + /** * ChannelStateMonitor subscribes to channel's state changes and informs {@link GcpManagedChannel} * on any new state. This monitor allows to detect when a channel is not ready and temporarily @@ -695,7 +910,12 @@ public void run() { if (channel == null) { return; } - ConnectivityState newState = channel.getState(false); + // Keep minSize channels always connected. + boolean requestConnection = channelId < minSize; + ConnectivityState newState = channel.getState(requestConnection); + logger.finer( + log("Channel %d state change detected: %s -> %s", channelId, currentState, newState) + ); if (newState == ConnectivityState.READY && currentState != ConnectivityState.READY) { incReadyChannels(); saveReadinessTime(System.nanoTime() - connectingStartNanos); @@ -715,7 +935,14 @@ public void run() { } } + private synchronized void executeStateChangeCallbacks() { + List callbacksToTrigger = stateChangeCallbacks; + stateChangeCallbacks = new LinkedList<>(); + callbacksToTrigger.forEach(stateNotificationExecutor::execute); + } + void processChannelStateChange(int channelId, ConnectivityState state) { + executeStateChangeCallbacks(); if (!fallbackEnabled) { return; } @@ -732,6 +959,10 @@ public int getMaxSize() { return maxSize; } + public int getMinSize() { + return minSize; + } + public int getNumberOfChannels() { return channelRefs.size(); } @@ -748,6 +979,43 @@ public int getMaxActiveStreams() { return channelRefs.stream().mapToInt(ChannelRef::getActiveStreamsCount).max().orElse(0); } + /** + * Returns a {@link ChannelRef} from the pool for a binding call. + * If round-robin on bind is enabled, uses {@link #getChannelRefRoundRobin()} + * otherwise {@link #getChannelRef(String)} + * + * @return {@link ChannelRef} channel to use for a call. + */ + protected ChannelRef getChannelRefForBind() { + ChannelRef channelRef; + if (options.getChannelPoolOptions() != null && options.getChannelPoolOptions().isUseRoundRobinOnBind()) { + channelRef = getChannelRefRoundRobin(); + logger.finest(log( + "Channel %d picked for bind operation using round-robin.", channelRef.getId())); + } else { + channelRef = getChannelRef(null); + logger.finest(log("Channel %d picked for bind operation.", channelRef.getId())); + } + return channelRef; + } + + /** + * Returns a {@link ChannelRef} from the pool in round-robin manner. + * Creates a new channel in the pool until the pool reaches its max size. + * + * @return {@link ChannelRef} + */ + protected synchronized ChannelRef getChannelRefRoundRobin() { + if (channelRefs.size() < maxSize) { + return createNewChannel(); + } + bindingIndex++; + if (bindingIndex >= channelRefs.size()) { + bindingIndex = 0; + } + return channelRefs.get(bindingIndex); + } + /** * Pick a {@link ChannelRef} (and create a new one if necessary). If notReadyFallbackEnabled is * true in the {@link GcpResiliencyOptions} then instead of a channel in a non-READY state another @@ -760,10 +1028,15 @@ public int getMaxActiveStreams() { */ protected ChannelRef getChannelRef(@Nullable String key) { if (key == null || key.isEmpty()) { - return pickLeastBusyChannel(); + return pickLeastBusyChannel(/* forFallback= */ false); } ChannelRef mappedChannel = affinityKeyToChannelRef.get(key); - if (mappedChannel == null || !fallbackEnabled) { + if (mappedChannel == null) { + ChannelRef channelRef = pickLeastBusyChannel(/*forFallback= */ false); + bind(channelRef, Collections.singletonList(key)); + return channelRef; + } + if (!fallbackEnabled) { return mappedChannel; } // Look up if the channelRef is not ready. @@ -776,18 +1049,25 @@ protected ChannelRef getChannelRef(@Nullable String key) { Integer channelId = tempMap.get(key); if (channelId != null && !fallbackMap.containsKey(channelId)) { // Fallback channel is ready. + logger.finest(log("Using fallback channel: %d -> %d", mappedChannel.getId(), channelId)); fallbacksSucceeded.incrementAndGet(); return channelRefs.get(channelId); } // No temp mapping for this key or fallback channel is also broken. - ChannelRef channelRef = pickLeastBusyChannel(); + ChannelRef channelRef = pickLeastBusyChannel(/* forFallback= */ true); if (!fallbackMap.containsKey(channelRef.getId()) && channelRef.getActiveStreamsCount() < DEFAULT_MAX_STREAM) { // Got a ready and not an overloaded channel. - fallbacksSucceeded.incrementAndGet(); - tempMap.put(key, channelRef.getId()); + if (channelRef.getId() != mappedChannel.getId()) { + logger.finest(log( + "Setting fallback channel: %d -> %d", mappedChannel.getId(), channelRef.getId() + )); + fallbacksSucceeded.incrementAndGet(); + tempMap.put(key, channelRef.getId()); + } return channelRef; } + logger.finest(log("Failed to find fallback for channel %d", mappedChannel.getId())); fallbacksFailed.incrementAndGet(); if (channelId != null) { // Stick with previous mapping if fallback has failed. @@ -802,18 +1082,49 @@ private synchronized ChannelRef createNewChannel() { final int size = channelRefs.size(); ChannelRef channelRef = new ChannelRef(delegateChannelBuilder.build(), size); channelRefs.add(channelRef); + logger.finer(log("Channel %d created.", channelRef.getId())); return channelRef; } + // Returns first newly created channel or null if there are already some channels in the pool. + @Nullable + private ChannelRef createFirstChannel() { + if (!channelRefs.isEmpty()) { + return null; + } + synchronized (this) { + if (channelRefs.isEmpty()) { + return createNewChannel(); + } + } + return null; + } + + // Creates new channel if maxSize is not reached. + // Returns new channel or null. + @Nullable + private ChannelRef tryCreateNewChannel() { + if (channelRefs.size() >= maxSize) { + return null; + } + synchronized (this) { + if (channelRefs.size() < maxSize) { + return createNewChannel(); + } + } + return null; + } + /** * Pick a {@link ChannelRef} (and create a new one if necessary). If notReadyFallbackEnabled is * true in the {@link GcpResiliencyOptions} then instead of a channel in a non-READY state another * channel in the READY state and having fewer than maximum allowed number of active streams will * be provided if available. */ - private ChannelRef pickLeastBusyChannel() { - if (channelRefs.isEmpty()) { - return createNewChannel(); + private ChannelRef pickLeastBusyChannel(boolean forFallback) { + ChannelRef first = createFirstChannel(); + if (first != null) { + return first; } // Pick the least busy channel and the least busy ready and not overloaded channel (this could @@ -839,19 +1150,38 @@ private ChannelRef pickLeastBusyChannel() { if (!fallbackEnabled) { if (channelRefs.size() < maxSize && minStreams >= maxConcurrentStreamsLowWatermark) { - return createNewChannel(); + ChannelRef newChannel = tryCreateNewChannel(); + if (newChannel != null) { + return newChannel; + } } return channelCandidate; } if (channelRefs.size() < maxSize && readyMinStreams >= maxConcurrentStreamsLowWatermark) { - return createNewChannel(); + ChannelRef newChannel = tryCreateNewChannel(); + if (newChannel != null) { + if (!forFallback && readyCandidate == null) { + logger.finest(log("Fallback to newly created channel %d", newChannel.getId())); + fallbacksSucceeded.incrementAndGet(); + } + return newChannel; + } } if (readyCandidate != null) { + if (!forFallback && readyCandidate.getId() != channelCandidate.getId()) { + logger.finest(log( + "Picking fallback channel: %d -> %d", channelCandidate.getId(), readyCandidate.getId())); + fallbacksSucceeded.incrementAndGet(); + } return readyCandidate; } + if (!forFallback) { + logger.finest(log("Failed to find fallback for channel %d", channelCandidate.getId())); + fallbacksFailed.incrementAndGet(); + } return channelCandidate; } @@ -886,19 +1216,31 @@ public ClientCall newCall( @Override public ManagedChannel shutdownNow() { + logger.finer(log("Shutdown now started.")); for (ChannelRef channelRef : channelRefs) { if (!channelRef.getChannel().isTerminated()) { channelRef.getChannel().shutdownNow(); } } + if (logMetricService != null && !logMetricService.isTerminated()) { + logMetricService.shutdownNow(); + } + if (!stateNotificationExecutor.isTerminated()) { + stateNotificationExecutor.shutdownNow(); + } return this; } @Override public ManagedChannel shutdown() { + logger.finer(log("Shutdown started.")); for (ChannelRef channelRef : channelRefs) { channelRef.getChannel().shutdown(); } + if (logMetricService != null) { + logMetricService.shutdown(); + } + stateNotificationExecutor.shutdown(); return this; } @@ -915,6 +1257,16 @@ public boolean awaitTermination(long timeout, TimeUnit unit) throws InterruptedE } channelRef.getChannel().awaitTermination(awaitTimeNanos, NANOSECONDS); } + long awaitTimeNanos = endTimeNanos - System.nanoTime(); + if (logMetricService != null && awaitTimeNanos > 0) { + //noinspection ResultOfMethodCallIgnored + logMetricService.awaitTermination(awaitTimeNanos, NANOSECONDS); + } + awaitTimeNanos = endTimeNanos - System.nanoTime(); + if (awaitTimeNanos > 0) { + //noinspection ResultOfMethodCallIgnored + stateNotificationExecutor.awaitTermination(awaitTimeNanos, NANOSECONDS); + } return isTerminated(); } @@ -925,7 +1277,10 @@ public boolean isShutdown() { return false; } } - return true; + if (logMetricService != null) { + return logMetricService.isShutdown(); + } + return stateNotificationExecutor.isShutdown(); } @Override @@ -935,12 +1290,18 @@ public boolean isTerminated() { return false; } } - return true; + if (logMetricService != null) { + return logMetricService.isTerminated(); + } + return stateNotificationExecutor.isTerminated(); } /** Get the current connectivity state of the channel pool. */ @Override public ConnectivityState getState(boolean requestConnection) { + if (requestConnection && getNumberOfChannels() == 0) { + createFirstChannel(); + } int ready = 0; int idle = 0; int connecting = 0; @@ -992,6 +1353,12 @@ protected void bind(ChannelRef channelRef, List affinityKeys) { if (channelRef == null || affinityKeys == null) { return; } + logger.finest(log( + "Binding %d key(s) to channel %d: [%s]", + affinityKeys.size(), + channelRef.getId(), + String.join(", ", affinityKeys) + )); for (String affinityKey : affinityKeys) { while (affinityKeyToChannelRef.putIfAbsent(affinityKey, channelRef) != null) { unbind(Collections.singletonList(affinityKey)); @@ -1009,6 +1376,9 @@ protected void unbind(List affinityKeys) { ChannelRef channelRef = affinityKeyToChannelRef.remove(affinityKey); if (channelRef != null) { channelRef.affinityCountDecr(); + logger.finest(log("Unbinding key %s from channel %d.", affinityKey, channelRef.getId())); + } else { + logger.finest(log("Unbinding key %s but it wasn't bound.", affinityKey)); } } } @@ -1156,12 +1526,14 @@ protected int getId() { } protected void affinityCountIncr() { - affinityCount.incrementAndGet(); + int count = affinityCount.incrementAndGet(); + maxAffinity.getAndUpdate(currentMax -> Math.max(currentMax, count)); totalAffinityCount.incrementAndGet(); } protected void affinityCountDecr() { - affinityCount.decrementAndGet(); + int count = affinityCount.decrementAndGet(); + minAffinity.getAndUpdate(currentMin -> Math.min(currentMin, count)); totalAffinityCount.decrementAndGet(); } @@ -1226,7 +1598,7 @@ private void detectUnresponsiveConnection( return; } if (deadlineExceededCount.incrementAndGet() >= unresponsiveDropCount - && unresponsiveTimingConditionMet()) { + && msSinceLastResponse() >= unresponsiveMs) { maybeReconnectUnresponsive(); } return; @@ -1238,15 +1610,23 @@ && unresponsiveTimingConditionMet()) { } } - private boolean unresponsiveTimingConditionMet() { - return (System.nanoTime() - lastResponseNanos) / 1000000 >= unresponsiveMs; + private long msSinceLastResponse() { + return (System.nanoTime() - lastResponseNanos) / 1000000; } private synchronized void maybeReconnectUnresponsive() { + final long msSinceLastResponse = msSinceLastResponse(); if (deadlineExceededCount.get() >= unresponsiveDropCount - && unresponsiveTimingConditionMet()) { + && msSinceLastResponse >= unresponsiveMs) { recordUnresponsiveDetection( System.nanoTime() - lastResponseNanos, deadlineExceededCount.get()); + logger.finer(log( + "Channel %d connection is unresponsive for %d ms and %d deadline exceeded calls. " + + "Forcing channel to idle state.", + channelId, + msSinceLastResponse, + deadlineExceededCount.get() + )); delegate.enterIdle(); lastResponseNanos = System.nanoTime(); deadlineExceededCount.set(0); diff --git a/grpc-gcp/src/main/java/com/google/cloud/grpc/GcpManagedChannelBuilder.java b/grpc-gcp/src/main/java/com/google/cloud/grpc/GcpManagedChannelBuilder.java index 061cc095..425cc52a 100644 --- a/grpc-gcp/src/main/java/com/google/cloud/grpc/GcpManagedChannelBuilder.java +++ b/grpc-gcp/src/main/java/com/google/cloud/grpc/GcpManagedChannelBuilder.java @@ -63,7 +63,10 @@ public static GcpManagedChannelBuilder forDelegateBuilder(ManagedChannelBuilder< return new GcpManagedChannelBuilder(delegate); } - /** Sets the channel pool size. This will override the pool size configuration in ApiConfig. */ + /** Sets the maximum channel pool size. This will override the pool size configuration in ApiConfig. + * Deprecated. Use maxSize in GcpManagedChannelOptions.GcpChannelPoolOptions. + */ + @Deprecated public GcpManagedChannelBuilder setPoolSize(int poolSize) { this.poolSize = poolSize; return this; diff --git a/grpc-gcp/src/main/java/com/google/cloud/grpc/GcpManagedChannelOptions.java b/grpc-gcp/src/main/java/com/google/cloud/grpc/GcpManagedChannelOptions.java index 8870bcb5..f6c4c557 100644 --- a/grpc-gcp/src/main/java/com/google/cloud/grpc/GcpManagedChannelOptions.java +++ b/grpc-gcp/src/main/java/com/google/cloud/grpc/GcpManagedChannelOptions.java @@ -20,7 +20,9 @@ import io.opencensus.metrics.LabelKey; import io.opencensus.metrics.LabelValue; import io.opencensus.metrics.MetricRegistry; + import java.util.ArrayList; +import java.util.Iterator; import java.util.List; import java.util.logging.Logger; import javax.annotation.Nullable; @@ -29,19 +31,27 @@ public class GcpManagedChannelOptions { private static final Logger logger = Logger.getLogger(GcpManagedChannelOptions.class.getName()); + @Nullable private final GcpChannelPoolOptions channelPoolOptions; @Nullable private final GcpMetricsOptions metricsOptions; @Nullable private final GcpResiliencyOptions resiliencyOptions; public GcpManagedChannelOptions() { + channelPoolOptions = null; metricsOptions = null; resiliencyOptions = null; } public GcpManagedChannelOptions(Builder builder) { + channelPoolOptions = builder.channelPoolOptions; metricsOptions = builder.metricsOptions; resiliencyOptions = builder.resiliencyOptions; } + @Nullable + public GcpChannelPoolOptions getChannelPoolOptions() { + return channelPoolOptions; + } + @Nullable public GcpMetricsOptions getMetricsOptions() { return metricsOptions; @@ -52,6 +62,16 @@ public GcpResiliencyOptions getResiliencyOptions() { return resiliencyOptions; } + @Override + public String toString() { + return String.format( + "{channelPoolOptions: %s, resiliencyOptions: %s, metricsOptions: %s}", + getChannelPoolOptions(), + getResiliencyOptions(), + getMetricsOptions() + ); + } + /** Creates a new GcpManagedChannelOptions.Builder. */ public static Builder newBuilder() { return new Builder(); @@ -63,12 +83,14 @@ public static Builder newBuilder(GcpManagedChannelOptions options) { } public static class Builder { + private GcpChannelPoolOptions channelPoolOptions; private GcpMetricsOptions metricsOptions; private GcpResiliencyOptions resiliencyOptions; public Builder() {} public Builder(GcpManagedChannelOptions options) { + this.channelPoolOptions = options.getChannelPoolOptions(); this.metricsOptions = options.getMetricsOptions(); this.resiliencyOptions = options.getResiliencyOptions(); } @@ -77,6 +99,17 @@ public GcpManagedChannelOptions build() { return new GcpManagedChannelOptions(this); } + /** + * Sets the channel pool configuration for the {@link GcpManagedChannel}. + * + * @param channelPoolOptions a {@link GcpChannelPoolOptions} to use as a channel pool + * configuration. + */ + public Builder withChannelPoolOptions(GcpChannelPoolOptions channelPoolOptions) { + this.channelPoolOptions = channelPoolOptions; + return this; + } + /** * Sets the metrics configuration for the {@link GcpManagedChannel}. * @@ -127,6 +160,135 @@ public Builder withResiliencyOptions(GcpResiliencyOptions resiliencyOptions) { } } + /** Channel pool configuration for the GCP managed channel. */ + public static class GcpChannelPoolOptions { + // The maximum number of channels in the pool. + private final int maxSize; + // The minimum size of the channel pool. This number of channels will be created and these + // channels will try to always keep connection to the server. + private final int minSize; + // If every channel in the pool has at least this amount of concurrent streams then a new channel will be created + // in the pool unless the pool reached its maximum size. + private final int concurrentStreamsLowWatermark; + // Use round-robin channel selection for affinity binding calls. + private final boolean useRoundRobinOnBind; + + public GcpChannelPoolOptions(Builder builder) { + maxSize = builder.maxSize; + minSize = builder.minSize; + concurrentStreamsLowWatermark = builder.concurrentStreamsLowWatermark; + useRoundRobinOnBind = builder.useRoundRobinOnBind; + } + + public int getMaxSize() { + return maxSize; + } + + public int getMinSize() { + return minSize; + } + + public int getConcurrentStreamsLowWatermark() { + return concurrentStreamsLowWatermark; + } + + /** Creates a new GcpChannelPoolOptions.Builder. */ + public static GcpChannelPoolOptions.Builder newBuilder() { + return new GcpChannelPoolOptions.Builder(); + } + + /** Creates a new GcpChannelPoolOptions.Builder from GcpChannelPoolOptions. */ + public static GcpChannelPoolOptions.Builder newBuilder(GcpChannelPoolOptions options) { + return new GcpChannelPoolOptions.Builder(options); + } + + public boolean isUseRoundRobinOnBind() { + return useRoundRobinOnBind; + } + + @Override + public String toString() { + return String.format( + "{maxSize: %d, minSize: %d, concurrentStreamsLowWatermark: %d, useRoundRobinOnBind: %s}", + getMaxSize(), + getMinSize(), + getConcurrentStreamsLowWatermark(), + isUseRoundRobinOnBind() + ); + } + + public static class Builder { + private int maxSize = GcpManagedChannel.DEFAULT_MAX_CHANNEL; + private int minSize = 0; + private int concurrentStreamsLowWatermark = GcpManagedChannel.DEFAULT_MAX_STREAM; + private boolean useRoundRobinOnBind = false; + + public Builder() {} + + public Builder(GcpChannelPoolOptions options) { + this(); + if (options == null) { + return; + } + this.maxSize = options.getMaxSize(); + this.minSize = options.getMinSize(); + this.concurrentStreamsLowWatermark = options.getConcurrentStreamsLowWatermark(); + this.useRoundRobinOnBind = options.isUseRoundRobinOnBind(); + } + + public GcpChannelPoolOptions build() { + return new GcpChannelPoolOptions(this); + } + + /** + * Sets the maximum size of the channel pool. + * + * @param maxSize maximum number of channels the pool can have. + */ + public Builder setMaxSize(int maxSize) { + Preconditions.checkArgument(maxSize > 0, "Channel pool size must be positive."); + this.maxSize = maxSize; + return this; + } + + /** + * Sets the minimum size of the channel pool. This number of channels will be created and + * these channels will try to always keep connection to the server established. + * + * @param minSize minimum number of channels the pool must have. + */ + public Builder setMinSize(int minSize) { + Preconditions.checkArgument(minSize >= 0, + "Channel pool minimum size must be 0 or positive."); + this.minSize = minSize; + return this; + } + + /** + * Sets the concurrent streams low watermark. + * If every channel in the pool has at least this amount of concurrent streams then a new + * channel will be created in the pool unless the pool reached its maximum size. + * + * @param concurrentStreamsLowWatermark number of streams every channel must reach before adding a new channel + * to the pool. + */ + public Builder setConcurrentStreamsLowWatermark(int concurrentStreamsLowWatermark) { + this.concurrentStreamsLowWatermark = concurrentStreamsLowWatermark; + return this; + } + + /** + * Enables/disables using round-robin channel selection for affinity binding calls. + * + * @param enabled If true, use round-robin channel selection for affinity binding calls. + */ + public Builder setUseRoundRobinOnBind(boolean enabled) { + this.useRoundRobinOnBind = enabled; + return this; + } + } + } + /** Metrics configuration for the GCP managed channel. */ public static class GcpMetricsOptions { private final MetricRegistry metricRegistry; @@ -157,6 +319,27 @@ public String getNamePrefix() { return namePrefix; } + @Override + public String toString() { + Iterator keyIterator = getLabelKeys().iterator(); + Iterator valueIterator = getLabelValues().iterator(); + + final List labels = new ArrayList<>(); + while (keyIterator.hasNext() && valueIterator.hasNext()) { + labels.add( + String.format( + "%s: \"%s\"", keyIterator.next().getKey(), valueIterator.next().getValue() + ) + ); + } + return String.format( + "{namePrefix: \"%s\", labels: [%s], metricRegistry: %s}", + getNamePrefix(), + String.join(", ", labels), + getMetricRegistry() + ); + } + /** Creates a new GcpMetricsOptions.Builder. */ public static Builder newBuilder() { return new Builder(); @@ -269,6 +452,18 @@ public int getUnresponsiveDetectionDroppedCount() { return unresponsiveDetectionDroppedCount; } + @Override + public String toString() { + return String.format( + "{notReadyFallbackEnabled: %s, unresponsiveDetectionEnabled: %s, " + + "unresponsiveDetectionMs: %d, unresponsiveDetectionDroppedCount: %d}", + isNotReadyFallbackEnabled(), + isUnresponsiveDetectionEnabled(), + getUnresponsiveDetectionMs(), + getUnresponsiveDetectionDroppedCount() + ); + } + public static class Builder { private boolean notReadyFallbackEnabled = false; private boolean unresponsiveDetectionEnabled = false; diff --git a/grpc-gcp/src/main/java/com/google/cloud/grpc/GcpMultiEndpointChannel.java b/grpc-gcp/src/main/java/com/google/cloud/grpc/GcpMultiEndpointChannel.java new file mode 100644 index 00000000..9d9de7e7 --- /dev/null +++ b/grpc-gcp/src/main/java/com/google/cloud/grpc/GcpMultiEndpointChannel.java @@ -0,0 +1,427 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.grpc; + +import static java.util.concurrent.TimeUnit.NANOSECONDS; + +import com.google.cloud.grpc.GcpManagedChannelOptions.GcpChannelPoolOptions; +import com.google.cloud.grpc.GcpManagedChannelOptions.GcpMetricsOptions; +import com.google.cloud.grpc.multiendpoint.MultiEndpoint; +import com.google.cloud.grpc.proto.ApiConfig; +import com.google.common.base.Preconditions; +import io.grpc.CallOptions; +import io.grpc.ClientCall; +import io.grpc.ClientCall.Listener; +import io.grpc.ConnectivityState; +import io.grpc.Grpc; +import io.grpc.ManagedChannel; +import io.grpc.ManagedChannelBuilder; +import io.grpc.Metadata; +import io.grpc.MethodDescriptor; +import io.opencensus.metrics.LabelKey; +import io.opencensus.metrics.LabelValue; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.TimeUnit; + +/** + * The purpose of GcpMultiEndpointChannel is twofold: + * + *
    + *
  1. Fallback to an alternative endpoint (host:port) of a gRPC service when the original + * endpoint is completely unavailable. + *
  2. Be able to route an RPC call to a specific group of endpoints. + *
+ * + *

A group of endpoints is called a {@link MultiEndpoint} and is essentially a list of endpoints + * where priority is defined by the position in the list with the first endpoint having top + * priority. A MultiEndpoint tracks endpoints' availability. When a MultiEndpoint is picked for an + * RPC call, it picks the top priority endpoint that is currently available. More information on + * the {@link MultiEndpoint} class. + * + *

GcpMultiEndpointChannel can have one or more MultiEndpoint identified by its name -- arbitrary + * string provided in the {@link GcpMultiEndpointOptions} when configuring MultiEndpoints. This name + * can be used to route an RPC call to this MultiEndpoint by setting the {@link #ME_KEY} key value + * of the RPC {@link CallOptions}. + * + *

GcpMultiEndpointChannel receives a list of GcpMultiEndpointOptions for initial configuration. + * An updated configuration can be provided at any time later using + * {@link GcpMultiEndpointChannel#setMultiEndpoints(List)}. The first item in the + * GcpMultiEndpointOptions list defines the default MultiEndpoint that will be used when no + * MultiEndpoint name is provided with an RPC call. + * + *

Example: + * + *

Let's assume we have a service with read and write operations and the following backends: + *

    + *
  • service.example.com -- the main set of backends supporting all operations
  • + *
  • service-fallback.example.com -- read-write replica supporting all operations
  • + *
  • ro-service.example.com -- read-only replica supporting only read operations
  • + *
+ * + *

Example configuration: + *

    + *
  • + * MultiEndpoint named "default" with endpoints: + *
      + *
    1. service.example.com:443
    2. + *
    3. service-fallback.example.com:443
    4. + *
    + *
  • + *
  • + * MultiEndpoint named "read" with endpoints: + *
      + *
    1. ro-service.example.com:443
    2. + *
    3. service-fallback.example.com:443
    4. + *
    5. service.example.com:443
    6. + *
    + *
  • + *
+ * + *

With the configuration above GcpMultiEndpointChannel will use the "default" MultiEndpoint by + * default. It means that RPC calls by default will use the main endpoint and if it is not available + * then the read-write replica. + * + *

To offload some read calls to the read-only replica we can specify "read" MultiEndpoint in + * the CallOptions. Then these calls will use the read-only replica endpoint and if it is not + * available then the read-write replica and if it is also not available then the main endpoint. + * + *

GcpMultiEndpointChannel creates a {@link GcpManagedChannel} channel pool for every unique + * endpoint. For the example above three channel pools will be created. + */ +public class GcpMultiEndpointChannel extends ManagedChannel { + + public static final CallOptions.Key ME_KEY = CallOptions.Key.create("MultiEndpoint"); + private final LabelKey endpointKey = + LabelKey.create("endpoint", "Endpoint address."); + private final Map multiEndpoints = new ConcurrentHashMap<>(); + private MultiEndpoint defaultMultiEndpoint; + private final ApiConfig apiConfig; + private final GcpManagedChannelOptions gcpManagedChannelOptions; + + private final Map pools = new ConcurrentHashMap<>(); + + /** + * Constructor for {@link GcpMultiEndpointChannel}. + * + * @param meOptions list of MultiEndpoint configurations. + * @param apiConfig the ApiConfig object for configuring GcpManagedChannel. + * @param gcpManagedChannelOptions the options for GcpManagedChannel. + */ + public GcpMultiEndpointChannel( + List meOptions, + ApiConfig apiConfig, + GcpManagedChannelOptions gcpManagedChannelOptions) { + this.apiConfig = apiConfig; + this.gcpManagedChannelOptions = gcpManagedChannelOptions; + setMultiEndpoints(meOptions); + } + + private class EndpointStateMonitor implements Runnable { + + private final ManagedChannel channel; + private final String endpoint; + + private EndpointStateMonitor(ManagedChannel channel, String endpoint) { + this.endpoint = endpoint; + this.channel = channel; + run(); + } + + @Override + public void run() { + if (channel == null) { + return; + } + ConnectivityState newState = checkPoolState(channel, endpoint); + if (newState != ConnectivityState.SHUTDOWN) { + channel.notifyWhenStateChanged(newState, this); + } + } + } + + // Checks and returns channel pool state. Also notifies all MultiEndpoints of the pool state. + private ConnectivityState checkPoolState(ManagedChannel channel, String endpoint) { + ConnectivityState state = channel.getState(false); + // Update endpoint state in all multiendpoints. + for (MultiEndpoint me : multiEndpoints.values()) { + me.setEndpointAvailable(endpoint, state.equals(ConnectivityState.READY)); + } + return state; + } + + private GcpManagedChannelOptions prepareGcpManagedChannelConfig( + GcpManagedChannelOptions gcpOptions, String endpoint) { + final GcpMetricsOptions.Builder metricsOptions = GcpMetricsOptions.newBuilder( + gcpOptions.getMetricsOptions() + ); + + final List labelKeys = new ArrayList<>(metricsOptions.build().getLabelKeys()); + final List labelValues = new ArrayList<>(metricsOptions.build().getLabelValues()); + + labelKeys.add(endpointKey); + labelValues.add(LabelValue.create(endpoint)); + + // Make sure the pool will have at least 1 channel always connected. If maximum size > 1 then we + // want at least 2 channels or square root of maximum channels whichever is larger. + // Do not override if minSize is already specified as > 0. + final GcpChannelPoolOptions.Builder poolOptions = GcpChannelPoolOptions.newBuilder( + gcpOptions.getChannelPoolOptions() + ); + if (poolOptions.build().getMinSize() < 1) { + int minSize = Math.min(2, poolOptions.build().getMaxSize()); + minSize = Math.max(minSize, ((int) Math.sqrt(poolOptions.build().getMaxSize()))); + poolOptions.setMinSize(minSize); + } + + return GcpManagedChannelOptions.newBuilder(gcpOptions) + .withChannelPoolOptions(poolOptions.build()) + .withMetricsOptions(metricsOptions.withLabels(labelKeys, labelValues).build()) + .build(); + } + + /** + * Update the list of MultiEndpoint configurations. + * + *

MultiEndpoints are matched with the current ones by name. + *

    + *
  • If a current MultiEndpoint is missing in the updated list, the MultiEndpoint will be + * removed. + *
  • A new MultiEndpoint will be created for every new name in the list. + *
  • For an existing MultiEndpoint only its endpoints will be updated (no recovery timeout + * change). + *
+ * + *

Endpoints are matched by the endpoint address (usually in the form of address:port). + *

    + *
  • If an existing endpoint is not used by any MultiEndpoint in the updated list, then the + * channel poll for this endpoint will be shutdown. + *
  • A channel pool will be created for every new endpoint. + *
  • For an existing endpoint nothing will change (the channel pool will not be re-created, thus + * no channel credentials change, nor channel configurator change). + *
+ */ + public void setMultiEndpoints(List meOptions) { + Preconditions.checkNotNull(meOptions); + Preconditions.checkArgument(!meOptions.isEmpty(), "MultiEndpoints list is empty"); + Set currentMultiEndpoints = new HashSet<>(); + Set currentEndpoints = new HashSet<>(); + + // Must have all multiendpoints before initializing the pools so that all multiendpoints + // can get status update of every pool. + meOptions.forEach(options -> { + currentMultiEndpoints.add(options.getName()); + // Create or update MultiEndpoint + if (multiEndpoints.containsKey(options.getName())) { + multiEndpoints.get(options.getName()).setEndpoints(options.getEndpoints()); + } else { + multiEndpoints.put(options.getName(), + (new MultiEndpoint.Builder(options.getEndpoints())) + .withRecoveryTimeout(options.getRecoveryTimeout()) + .build()); + } + }); + + // TODO: Support the same endpoint in different MultiEndpoint to use different channel + // credentials. + // TODO: Support different endpoints in the same MultiEndpoint to use different channel + // credentials. + meOptions.forEach(options -> { + // Create missing pools + options.getEndpoints().forEach(endpoint -> { + currentEndpoints.add(endpoint); + pools.computeIfAbsent(endpoint, e -> { + ManagedChannelBuilder managedChannelBuilder; + if (options.getChannelCredentials() != null) { + managedChannelBuilder = Grpc.newChannelBuilder(e, options.getChannelCredentials()); + } else { + String serviceAddress; + int port; + int colon = e.lastIndexOf(':'); + if (colon < 0) { + serviceAddress = e; + // Assume https by default. + port = 443; + } else { + serviceAddress = e.substring(0, colon); + port = Integer.parseInt(e.substring(colon + 1)); + } + managedChannelBuilder = ManagedChannelBuilder.forAddress(serviceAddress, port); + } + if (options.getChannelConfigurator() != null) { + managedChannelBuilder = options.getChannelConfigurator().apply(managedChannelBuilder); + } + + GcpManagedChannel channel = new GcpManagedChannel( + managedChannelBuilder, + apiConfig, + // Add endpoint to metric labels. + prepareGcpManagedChannelConfig(gcpManagedChannelOptions, e)); + // Start monitoring the pool state. + new EndpointStateMonitor(channel, e); + return channel; + }); + // Communicate current state to MultiEndpoints. + checkPoolState(pools.get(endpoint), endpoint); + }); + }); + defaultMultiEndpoint = multiEndpoints.get(meOptions.get(0).getName()); + + // Remove obsolete multiendpoints. + multiEndpoints.keySet().removeIf(name -> !currentMultiEndpoints.contains(name)); + + // Shutdown and remove the pools not present in options. + for (String endpoint : pools.keySet()) { + if (!currentEndpoints.contains(endpoint)) { + pools.get(endpoint).shutdown(); + pools.remove(endpoint); + } + } + } + + /** + * Initiates an orderly shutdown in which preexisting calls continue but new calls are immediately + * cancelled. + * + * @return this + * @since 1.0.0 + */ + @Override + public ManagedChannel shutdown() { + pools.values().forEach(GcpManagedChannel::shutdown); + return this; + } + + /** + * Returns whether the channel is shutdown. Shutdown channels immediately cancel any new calls, + * but may still have some calls being processed. + * + * @see #shutdown() + * @see #isTerminated() + * @since 1.0.0 + */ + @Override + public boolean isShutdown() { + return pools.values().stream().allMatch(GcpManagedChannel::isShutdown); + } + + /** + * Returns whether the channel is terminated. Terminated channels have no running calls and + * relevant resources released (like TCP connections). + * + * @see #isShutdown() + * @since 1.0.0 + */ + @Override + public boolean isTerminated() { + return pools.values().stream().allMatch(GcpManagedChannel::isTerminated); + } + + /** + * Initiates a forceful shutdown in which preexisting and new calls are cancelled. Although + * forceful, the shutdown process is still not instantaneous; {@link #isTerminated()} will likely + * return {@code false} immediately after this method returns. + * + * @return this + * @since 1.0.0 + */ + @Override + public ManagedChannel shutdownNow() { + pools.values().forEach(GcpManagedChannel::shutdownNow); + return this; + } + + /** + * Waits for the channel to become terminated, giving up if the timeout is reached. + * + * @return whether the channel is terminated, as would be done by {@link #isTerminated()}. + * @since 1.0.0 + */ + @Override + public boolean awaitTermination(long timeout, TimeUnit unit) throws InterruptedException { + long endTimeNanos = System.nanoTime() + unit.toNanos(timeout); + for (GcpManagedChannel gcpManagedChannel : pools.values()) { + if (gcpManagedChannel.isTerminated()) { + continue; + } + long awaitTimeNanos = endTimeNanos - System.nanoTime(); + if (awaitTimeNanos <= 0) { + break; + } + gcpManagedChannel.awaitTermination(awaitTimeNanos, NANOSECONDS); + } + return isTerminated(); + } + + /** + * Check the value of {@link #ME_KEY} key in the {@link CallOptions} and if found use + * the MultiEndpoint with the same name for this call. + * + *

Create a {@link ClientCall} to the remote operation specified by the given {@link + * MethodDescriptor}. The returned {@link ClientCall} does not trigger any remote behavior until + * {@link ClientCall#start(Listener, Metadata)} is invoked. + * + * @param methodDescriptor describes the name and parameter types of the operation to call. + * @param callOptions runtime options to be applied to this call. + * @return a {@link ClientCall} bound to the specified method. + * @since 1.0.0 + */ + @Override + public ClientCall newCall( + MethodDescriptor methodDescriptor, CallOptions callOptions) { + final String multiEndpointKey = callOptions.getOption(ME_KEY); + MultiEndpoint me = defaultMultiEndpoint; + if (multiEndpointKey != null) { + me = multiEndpoints.getOrDefault(multiEndpointKey, defaultMultiEndpoint); + } + return pools.get(me.getCurrentId()).newCall(methodDescriptor, callOptions); + } + + /** + * The authority of the current endpoint of the default MultiEndpoint. Typically, this is in the + * format {@code host:port}. + * + * To get the authority of the current endpoint of another MultiEndpoint use {@link + * #authorityFor(String)} method. + * + * This may return different values over time because MultiEndpoint may switch between endpoints. + * + * @since 1.0.0 + */ + @Override + public String authority() { + return pools.get(defaultMultiEndpoint.getCurrentId()).authority(); + } + + /** + * The authority of the current endpoint of the specified MultiEndpoint. Typically, this is in the + * format {@code host:port}. + * + * This may return different values over time because MultiEndpoint may switch between endpoints. + */ + public String authorityFor(String multiEndpointName) { + MultiEndpoint multiEndpoint = multiEndpoints.get(multiEndpointName); + if (multiEndpoint == null) { + return null; + } + return pools.get(multiEndpoint.getCurrentId()).authority(); + } +} diff --git a/grpc-gcp/src/main/java/com/google/cloud/grpc/GcpMultiEndpointOptions.java b/grpc-gcp/src/main/java/com/google/cloud/grpc/GcpMultiEndpointOptions.java new file mode 100644 index 00000000..c1c30bac --- /dev/null +++ b/grpc-gcp/src/main/java/com/google/cloud/grpc/GcpMultiEndpointOptions.java @@ -0,0 +1,170 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.grpc; + +import com.google.api.core.ApiFunction; +import com.google.cloud.grpc.multiendpoint.MultiEndpoint; +import com.google.common.base.Preconditions; +import io.grpc.ChannelCredentials; +import io.grpc.ManagedChannelBuilder; +import java.time.Duration; +import java.util.List; + +/** + * {@link MultiEndpoint} configuration for the {@link GcpMultiEndpointChannel}. + */ +public class GcpMultiEndpointOptions { + + private final String name; + private final List endpoints; + private final ApiFunction, ManagedChannelBuilder> channelConfigurator; + private final ChannelCredentials channelCredentials; + private final Duration recoveryTimeout; + + public static String DEFAULT_NAME = "default"; + + public GcpMultiEndpointOptions(Builder builder) { + this.name = builder.name; + this.endpoints = builder.endpoints; + this.channelConfigurator = builder.channelConfigurator; + this.channelCredentials = builder.channelCredentials; + this.recoveryTimeout = builder.recoveryTimeout; + } + + /** + * Creates a new GcpMultiEndpointOptions.Builder. + * + * @param endpoints list of endpoints for the MultiEndpoint. + */ + public static Builder newBuilder(List endpoints) { + return new Builder(endpoints); + } + + /** + * Creates a new GcpMultiEndpointOptions.Builder from GcpMultiEndpointOptions. + */ + public static Builder newBuilder(GcpMultiEndpointOptions options) { + return new Builder(options); + } + + public String getName() { + return name; + } + + public List getEndpoints() { + return endpoints; + } + + public ApiFunction, ManagedChannelBuilder> getChannelConfigurator() { + return channelConfigurator; + } + + public ChannelCredentials getChannelCredentials() { + return channelCredentials; + } + + public Duration getRecoveryTimeout() { + return recoveryTimeout; + } + + public static class Builder { + + private String name = GcpMultiEndpointOptions.DEFAULT_NAME; + private List endpoints; + private ApiFunction, ManagedChannelBuilder> channelConfigurator; + private ChannelCredentials channelCredentials; + private Duration recoveryTimeout = Duration.ZERO; + + public Builder(List endpoints) { + setEndpoints(endpoints); + } + + public Builder(GcpMultiEndpointOptions options) { + this.name = options.getName(); + this.endpoints = options.getEndpoints(); + this.channelConfigurator = options.getChannelConfigurator(); + this.channelCredentials = options.getChannelCredentials(); + this.recoveryTimeout = options.getRecoveryTimeout(); + } + + public GcpMultiEndpointOptions build() { + return new GcpMultiEndpointOptions(this); + } + + private void setEndpoints(List endpoints) { + Preconditions.checkNotNull(endpoints); + Preconditions.checkArgument( + !endpoints.isEmpty(), "At least one endpoint must be specified."); + Preconditions.checkArgument( + endpoints.stream().noneMatch(s -> s.trim().isEmpty()), "No empty endpoints allowed."); + this.endpoints = endpoints; + } + + /** + * Sets the name of the MultiEndpoint. + * + * @param name MultiEndpoint name. + */ + public GcpMultiEndpointOptions.Builder withName(String name) { + this.name = name; + return this; + } + + /** + * Sets the endpoints of the MultiEndpoint. + * + * @param endpoints List of endpoints in the form of host:port in descending priority order. + */ + public GcpMultiEndpointOptions.Builder withEndpoints(List endpoints) { + this.setEndpoints(endpoints); + return this; + } + + /** + * Sets the channel configurator for the MultiEndpoint channel pool. + * + * @param channelConfigurator function to perform on the ManagedChannelBuilder in the channel + * pool. + */ + public GcpMultiEndpointOptions.Builder withChannelConfigurator( + ApiFunction, ManagedChannelBuilder> channelConfigurator) { + this.channelConfigurator = channelConfigurator; + return this; + } + + /** + * Sets the channel credentials to use in the MultiEndpoint channel pool. + * + * @param channelCredentials channel credentials. + */ + public GcpMultiEndpointOptions.Builder withChannelCredentials( + ChannelCredentials channelCredentials) { + this.channelCredentials = channelCredentials; + return this; + } + + /** + * Sets the recovery timeout for the MultiEndpoint. See more info in the {@link MultiEndpoint}. + * + * @param recoveryTimeout recovery timeout. + */ + public GcpMultiEndpointOptions.Builder withRecoveryTimeout(Duration recoveryTimeout) { + this.recoveryTimeout = recoveryTimeout; + return this; + } + } +} diff --git a/grpc-gcp/src/main/java/com/google/cloud/grpc/multiendpoint/Endpoint.java b/grpc-gcp/src/main/java/com/google/cloud/grpc/multiendpoint/Endpoint.java new file mode 100644 index 00000000..fc6d08eb --- /dev/null +++ b/grpc-gcp/src/main/java/com/google/cloud/grpc/multiendpoint/Endpoint.java @@ -0,0 +1,78 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.grpc.multiendpoint; + +import com.google.errorprone.annotations.CheckReturnValue; +import java.util.concurrent.ScheduledFuture; + +/** + * Endpoint holds an endpoint's state, priority and a future of upcoming state change. + */ +@CheckReturnValue +final class Endpoint { + + /** + * Holds a state of an endpoint. + */ + public enum EndpointState { + UNAVAILABLE, + AVAILABLE, + RECOVERING, + } + + private final String id; + private EndpointState state; + private int priority; + private ScheduledFuture changeStateFuture; + + public Endpoint(String id, EndpointState state, int priority) { + this.id = id; + this.priority = priority; + this.state = state; + } + + public String getId() { + return id; + } + + public EndpointState getState() { + return state; + } + + public int getPriority() { + return priority; + } + + public void setState(EndpointState state) { + this.state = state; + } + + public void setPriority(int priority) { + this.priority = priority; + } + + public synchronized void setChangeStateFuture(ScheduledFuture future) { + resetStateChangeFuture(); + changeStateFuture = future; + } + + public synchronized void resetStateChangeFuture() { + if (changeStateFuture != null) { + changeStateFuture.cancel(true); + } + } +} diff --git a/grpc-gcp/src/main/java/com/google/cloud/grpc/multiendpoint/MultiEndpoint.java b/grpc-gcp/src/main/java/com/google/cloud/grpc/multiendpoint/MultiEndpoint.java new file mode 100644 index 00000000..18b9abfd --- /dev/null +++ b/grpc-gcp/src/main/java/com/google/cloud/grpc/multiendpoint/MultiEndpoint.java @@ -0,0 +1,204 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.grpc.multiendpoint; + +import static java.util.Comparator.comparingInt; +import static java.util.concurrent.TimeUnit.MILLISECONDS; + +import com.google.cloud.grpc.multiendpoint.Endpoint.EndpointState; +import com.google.common.base.Preconditions; +import com.google.errorprone.annotations.CheckReturnValue; +import com.google.errorprone.annotations.concurrent.GuardedBy; +import java.time.Duration; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.concurrent.ScheduledFuture; +import java.util.concurrent.ScheduledThreadPoolExecutor; + +/** + * MultiEndpoint holds a list of endpoints, tracks their availability and defines the current + * endpoint. An endpoint has a priority defined by its position in the list (first item has top + * priority). MultiEndpoint returns top priority endpoint that is available as current. If no + * endpoint is available, MultiEndpoint returns the top priority endpoint. + * + *

Sometimes switching between endpoints can be costly, and it is worth waiting for some time + * after current endpoint becomes unavailable. For this case, use {@link + * Builder#withRecoveryTimeout} to set the recovery timeout. MultiEndpoint will keep the current + * endpoint for up to recovery timeout after it became unavailable to give it some time to recover. + * + *

The list of endpoints can be changed at any time with {@link #setEndpoints} method. + * MultiEndpoint will preserve endpoints' state and update their priority according to their new + * positions. + * + *

The initial state of endpoint is "unavailable" or "recovering" if using recovery timeout. + */ +@CheckReturnValue +public final class MultiEndpoint { + @GuardedBy("this") + private final Map endpointsMap = new HashMap<>(); + + @GuardedBy("this") + private String currentId; + + private final Duration recoveryTimeout; + + private final ScheduledThreadPoolExecutor executor = new ScheduledThreadPoolExecutor(1); + + private MultiEndpoint(Builder builder) { + this.recoveryTimeout = builder.recoveryTimeout; + this.setEndpoints(builder.endpoints); + } + + /** Builder for MultiEndpoint. */ + public static final class Builder { + private final List endpoints; + private Duration recoveryTimeout = Duration.ZERO; + + public Builder(List endpoints) { + Preconditions.checkNotNull(endpoints); + Preconditions.checkArgument(!endpoints.isEmpty(), "Endpoints list must not be empty."); + this.endpoints = endpoints; + } + + /** + * MultiEndpoint will keep the current endpoint for up to recovery timeout after it became + * unavailable to give it some time to recover. + */ + public Builder withRecoveryTimeout(Duration timeout) { + Preconditions.checkNotNull(timeout); + this.recoveryTimeout = timeout; + return this; + } + + public MultiEndpoint build() { + return new MultiEndpoint(this); + } + } + + /** + * Returns current endpoint id. + * + *

Note that the read is not synchronized and in case of a race condition there is a chance of + * getting an outdated current id. + */ + @SuppressWarnings("GuardedBy") + public String getCurrentId() { + return currentId; + } + + private synchronized void setEndpointStateInternal(String endpointId, EndpointState state) { + Endpoint endpoint = endpointsMap.get(endpointId); + if (endpoint != null) { + endpoint.setState(state); + maybeUpdateCurrentEndpoint(); + } + } + + private boolean isRecoveryEnabled() { + return !recoveryTimeout.isNegative() && !recoveryTimeout.isZero(); + } + + /** Inform MultiEndpoint when an endpoint becomes available or unavailable. */ + public synchronized void setEndpointAvailable(String endpointId, boolean available) { + setEndpointState(endpointId, available ? EndpointState.AVAILABLE : EndpointState.UNAVAILABLE); + } + + private synchronized void setEndpointState(String endpointId, EndpointState state) { + Preconditions.checkNotNull(state); + Endpoint endpoint = endpointsMap.get(endpointId); + if (endpoint == null) { + return; + } + // If we allow some recovery time. + if (EndpointState.UNAVAILABLE.equals(state) && isRecoveryEnabled()) { + endpoint.setState(EndpointState.RECOVERING); + ScheduledFuture future = + executor.schedule( + () -> setEndpointStateInternal(endpointId, EndpointState.UNAVAILABLE), + recoveryTimeout.toMillis(), + MILLISECONDS); + endpoint.setChangeStateFuture(future); + return; + } + endpoint.resetStateChangeFuture(); + endpoint.setState(state); + maybeUpdateCurrentEndpoint(); + } + + /** + * Provide an updated list of endpoints to MultiEndpoint. + * + *

MultiEndpoint will preserve current endpoints' state and update their priority according to + * their new positions. + */ + public synchronized void setEndpoints(List endpoints) { + Preconditions.checkNotNull(endpoints); + Preconditions.checkArgument(!endpoints.isEmpty(), "Endpoints list must not be empty."); + + // Remove obsolete endpoints. + endpointsMap.keySet().retainAll(endpoints); + + // Add new endpoints and update priority. + int priority = 0; + for (String endpointId : endpoints) { + Endpoint existingEndpoint = endpointsMap.get(endpointId); + if (existingEndpoint != null) { + existingEndpoint.setPriority(priority++); + continue; + } + EndpointState newState = + isRecoveryEnabled() ? EndpointState.RECOVERING : EndpointState.UNAVAILABLE; + Endpoint newEndpoint = new Endpoint(endpointId, newState, priority++); + if (isRecoveryEnabled()) { + ScheduledFuture future = + executor.schedule( + () -> setEndpointStateInternal(endpointId, EndpointState.UNAVAILABLE), + recoveryTimeout.toMillis(), + MILLISECONDS); + newEndpoint.setChangeStateFuture(future); + } + endpointsMap.put(endpointId, newEndpoint); + } + + maybeUpdateCurrentEndpoint(); + } + + // Updates currentId to the top-priority available endpoint unless the current endpoint is + // recovering. + private synchronized void maybeUpdateCurrentEndpoint() { + Optional topEndpoint = + endpointsMap.values().stream() + .filter((c) -> c.getState().equals(EndpointState.AVAILABLE)) + .min(comparingInt(Endpoint::getPriority)); + + Endpoint current = endpointsMap.get(currentId); + if (current != null && current.getState().equals(EndpointState.RECOVERING)) { + // Keep recovering endpoint as current unless a higher priority endpoint became available. + if (!topEndpoint.isPresent() || topEndpoint.get().getPriority() >= current.getPriority()) { + return; + } + } + + if (!topEndpoint.isPresent() && current == null) { + topEndpoint = endpointsMap.values().stream().min(comparingInt(Endpoint::getPriority)); + } + + topEndpoint.ifPresent(endpoint -> currentId = endpoint.getId()); + } +} diff --git a/grpc-gcp/src/main/proto/google/grpc/gcp/proto/grpc_gcp.proto b/grpc-gcp/src/main/proto/google/grpc/gcp/proto/grpc_gcp.proto index 1301dd99..81987f19 100644 --- a/grpc-gcp/src/main/proto/google/grpc/gcp/proto/grpc_gcp.proto +++ b/grpc-gcp/src/main/proto/google/grpc/gcp/proto/grpc_gcp.proto @@ -21,14 +21,19 @@ option java_outer_classname = "GcpExtensionProto"; option java_package = "com.google.cloud.grpc.proto"; message ApiConfig { + // Deprecated. Use GcpManagedChannelOptions.GcpChannelPoolOptions class. // The channel pool configurations. - ChannelPoolConfig channel_pool = 2; + ChannelPoolConfig channel_pool = 2 [deprecated = true]; // The method configurations. repeated MethodConfig method = 1001; } + +// Deprecated. Use GcpManagedChannelOptions.GcpChannelPoolOptions class. message ChannelPoolConfig { + option deprecated = true; + // The max number of channels in the pool. uint32 max_size = 1; // The idle timeout (seconds) of channels without bound affinity sessions. diff --git a/grpc-gcp/src/test/java/com/google/cloud/grpc/GcpManagedChannelOptionsTest.java b/grpc-gcp/src/test/java/com/google/cloud/grpc/GcpManagedChannelOptionsTest.java index 3a39d86a..c8ddb9bd 100644 --- a/grpc-gcp/src/test/java/com/google/cloud/grpc/GcpManagedChannelOptionsTest.java +++ b/grpc-gcp/src/test/java/com/google/cloud/grpc/GcpManagedChannelOptionsTest.java @@ -16,11 +16,13 @@ package com.google.cloud.grpc; +import static com.google.common.truth.Truth.assertThat; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; +import com.google.cloud.grpc.GcpManagedChannelOptions.GcpChannelPoolOptions; import com.google.cloud.grpc.GcpManagedChannelOptions.GcpMetricsOptions; import com.google.cloud.grpc.GcpManagedChannelOptions.GcpResiliencyOptions; import io.opencensus.metrics.LabelKey; @@ -34,7 +36,7 @@ import org.junit.runner.RunWith; import org.junit.runners.JUnit4; -/** Unit tests for GcpManagedChannel. */ +/** Unit tests for GcpManagedChannelOptionsTest. */ @RunWith(JUnit4.class) public final class GcpManagedChannelOptionsTest { private static final String namePrefix = "name-prefix"; @@ -168,4 +170,25 @@ public void testOptionsReBuild() { assertEquals(unresponsiveMs, resOpts.getUnresponsiveDetectionMs()); assertEquals(unresponsiveDroppedCount, resOpts.getUnresponsiveDetectionDroppedCount()); } + + @Test + public void testPoolOptions() { + final GcpManagedChannelOptions opts = GcpManagedChannelOptions.newBuilder() + .withChannelPoolOptions( + GcpChannelPoolOptions.newBuilder() + .setMaxSize(5) + .setMinSize(2) + .setConcurrentStreamsLowWatermark(10) + .setUseRoundRobinOnBind(true) + .build() + ) + .build(); + + GcpChannelPoolOptions channelPoolOptions = opts.getChannelPoolOptions(); + assertThat(channelPoolOptions).isNotNull(); + assertThat(channelPoolOptions.getMaxSize()).isEqualTo(5); + assertThat(channelPoolOptions.getMinSize()).isEqualTo(2); + assertThat(channelPoolOptions.getConcurrentStreamsLowWatermark()).isEqualTo(10); + assertThat(channelPoolOptions.isUseRoundRobinOnBind()).isTrue(); + } } diff --git a/grpc-gcp/src/test/java/com/google/cloud/grpc/GcpManagedChannelTest.java b/grpc-gcp/src/test/java/com/google/cloud/grpc/GcpManagedChannelTest.java index 50c3b3c6..9af0e88e 100644 --- a/grpc-gcp/src/test/java/com/google/cloud/grpc/GcpManagedChannelTest.java +++ b/grpc-gcp/src/test/java/com/google/cloud/grpc/GcpManagedChannelTest.java @@ -22,6 +22,7 @@ import static org.junit.Assert.assertNotNull; import com.google.cloud.grpc.GcpManagedChannel.ChannelRef; +import com.google.cloud.grpc.GcpManagedChannelOptions.GcpChannelPoolOptions; import com.google.cloud.grpc.GcpManagedChannelOptions.GcpMetricsOptions; import com.google.cloud.grpc.GcpManagedChannelOptions.GcpResiliencyOptions; import com.google.cloud.grpc.MetricRegistryTestUtils.FakeMetricRegistry; @@ -49,9 +50,18 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; +import java.util.LinkedList; import java.util.List; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; +import java.util.logging.Handler; +import java.util.logging.Level; +import java.util.logging.LogRecord; +import java.util.logging.Logger; import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -70,6 +80,38 @@ public final class GcpManagedChannelTest { private static final int MAX_CHANNEL = 10; private static final int MAX_STREAM = 100; + private static final Logger testLogger = Logger.getLogger(GcpManagedChannel.class.getName()); + + private final List logRecords = new LinkedList<>(); + + private String lastLogMessage() { + return lastLogMessage(1); + } + + private String lastLogMessage(int nthFromLast) { + return logRecords.get(logRecords.size() - nthFromLast).getMessage(); + } + + private Level lastLogLevel() { + return lastLogLevel(1); + } + + private Level lastLogLevel(int nthFromLast) { + return logRecords.get(logRecords.size() - nthFromLast).getLevel(); + } + + private final Handler testLogHandler = new Handler() { + @Override + public synchronized void publish(LogRecord record) { + logRecords.add(record); + } + + @Override + public void flush() {} + + @Override + public void close() throws SecurityException {} + }; private GcpManagedChannel gcpChannel; private ManagedChannelBuilder builder; @@ -82,6 +124,7 @@ private void resetGcpChannel() { @Before public void setUpChannel() { + testLogger.addHandler(testLogHandler); builder = ManagedChannelBuilder.forAddress(TARGET, 443); gcpChannel = (GcpManagedChannel) GcpManagedChannelBuilder.forDelegateBuilder(builder).build(); } @@ -89,6 +132,9 @@ public void setUpChannel() { @After public void shutdown() { gcpChannel.shutdownNow(); + testLogger.removeHandler(testLogHandler); + testLogger.setLevel(Level.INFO); + logRecords.clear(); } @Test @@ -130,17 +176,117 @@ public void testLoadApiConfigString() throws Exception { assertEquals(3, gcpChannel.methodToAffinity.size()); } + @Test + public void testUsesPoolOptions() { + resetGcpChannel(); + GcpChannelPoolOptions poolOptions = GcpChannelPoolOptions.newBuilder() + .setMaxSize(5) + .setMinSize(2) + .setConcurrentStreamsLowWatermark(50) + .build(); + GcpManagedChannelOptions options = GcpManagedChannelOptions.newBuilder() + .withChannelPoolOptions(poolOptions) + .build(); + gcpChannel = + (GcpManagedChannel) + GcpManagedChannelBuilder.forDelegateBuilder(builder) + .withOptions(options) + .build(); + assertEquals(2, gcpChannel.channelRefs.size()); + assertEquals(5, gcpChannel.getMaxSize()); + assertEquals(2, gcpChannel.getMinSize()); + assertEquals(50, gcpChannel.getStreamsLowWatermark()); + } + + @Test + public void testPoolOptionsOverrideApiConfig() { + resetGcpChannel(); + final URL resource = GcpManagedChannelTest.class.getClassLoader().getResource(API_FILE); + assertNotNull(resource); + File configFile = new File(resource.getFile()); + GcpChannelPoolOptions poolOptions = GcpChannelPoolOptions.newBuilder() + .setMaxSize(5) + .setConcurrentStreamsLowWatermark(50) + .build(); + GcpManagedChannelOptions options = GcpManagedChannelOptions.newBuilder() + .withChannelPoolOptions(poolOptions) + .build(); + gcpChannel = + (GcpManagedChannel) + GcpManagedChannelBuilder.forDelegateBuilder(builder) + .withApiConfigJsonFile(configFile) + .withOptions(options) + .build(); + assertEquals(0, gcpChannel.channelRefs.size()); + assertEquals(5, gcpChannel.getMaxSize()); + assertEquals(50, gcpChannel.getStreamsLowWatermark()); + assertEquals(3, gcpChannel.methodToAffinity.size()); + } + @Test public void testGetChannelRefInitialization() { + // Watch debug messages. + testLogger.setLevel(Level.FINER); + + final int currentIndex = GcpManagedChannel.channelPoolIndex.get(); + final String poolIndex = String.format("pool-%d", currentIndex); + + // Initial log messages count. + int logCount = logRecords.size(); + // Should not have a managedchannel by default. assertEquals(0, gcpChannel.channelRefs.size()); // But once requested it's there. assertEquals(0, gcpChannel.getChannelRef(null).getAffinityCount()); + + assertThat(logRecords.size()).isEqualTo(logCount + 2); + assertThat(lastLogMessage()).isEqualTo(poolIndex + ": Channel 0 created."); + assertThat(lastLogLevel()).isEqualTo(Level.FINER); + assertThat(logRecords.get(logRecords.size() - 2).getMessage()).isEqualTo( + poolIndex + ": Channel 0 state change detected: null -> IDLE"); + assertThat(logRecords.get(logRecords.size() - 2).getLevel()).isEqualTo(Level.FINER); + // The state of this channel is idle. assertEquals(ConnectivityState.IDLE, gcpChannel.getState(false)); assertEquals(1, gcpChannel.channelRefs.size()); } + @Test + public void testGetChannelRefInitializationWithMinSize() throws InterruptedException { + resetGcpChannel(); + GcpChannelPoolOptions poolOptions = GcpChannelPoolOptions.newBuilder() + .setMaxSize(5) + .setMinSize(2) + .build(); + GcpManagedChannelOptions options = GcpManagedChannelOptions.newBuilder() + .withChannelPoolOptions(poolOptions) + .build(); + gcpChannel = + (GcpManagedChannel) + GcpManagedChannelBuilder.forDelegateBuilder(builder) + .withOptions(options) + .build(); + // Should have 2 channels since the beginning. + assertThat(gcpChannel.channelRefs.size()).isEqualTo(2); + TimeUnit.MILLISECONDS.sleep(50); + // The connection establishment must have been started on these two channels. + assertThat(gcpChannel.getState(false)) + .isAnyOf( + ConnectivityState.CONNECTING, + ConnectivityState.READY, + ConnectivityState.TRANSIENT_FAILURE); + assertThat(gcpChannel.channelRefs.get(0).getChannel().getState(false)) + .isAnyOf( + ConnectivityState.CONNECTING, + ConnectivityState.READY, + ConnectivityState.TRANSIENT_FAILURE); + assertThat(gcpChannel.channelRefs.get(1).getChannel().getState(false)) + .isAnyOf( + ConnectivityState.CONNECTING, + ConnectivityState.READY, + ConnectivityState.TRANSIENT_FAILURE); + } + @Test public void testGetChannelRefPickUpSmallest() { // All channels have max number of streams @@ -179,6 +325,9 @@ private void assertFallbacksMetric( @Test public void testGetChannelRefWithFallback() { + // Watch debug messages. + testLogger.setLevel(Level.FINEST); + final FakeMetricRegistry fakeRegistry = new FakeMetricRegistry(); final int maxSize = 3; @@ -205,6 +354,9 @@ public void testGetChannelRefWithFallback() { .build()) .build(); + final int currentIndex = GcpManagedChannel.channelPoolIndex.get(); + final String poolIndex = String.format("pool-%d", currentIndex); + // Creates the first channel with 0 id. assertEquals(0, pool.getNumberOfChannels()); ChannelRef chRef = pool.getChannelRef(null); @@ -219,20 +371,35 @@ public void testGetChannelRefWithFallback() { // Let's simulate the non-ready state for the 0 channel. pool.processChannelStateChange(0, ConnectivityState.CONNECTING); + int logCount = logRecords.size(); // Now request for a channel should return a newly created channel because our current channel - // is not ready and we haven't reached the pool's max size. + // is not ready, and we haven't reached the pool's max size. chRef = pool.getChannelRef(null); assertEquals(1, chRef.getId()); assertEquals(2, pool.getNumberOfChannels()); + // This was a fallback from non-ready channel 0 to the newly created channel 1. + assertThat(logRecords.size()).isEqualTo(logCount + 3); + logRecords.forEach(logRecord -> System.out.println(logRecord.getMessage())); + assertThat(lastLogMessage()).isEqualTo( + poolIndex + ": Fallback to newly created channel 1"); + assertThat(lastLogLevel()).isEqualTo(Level.FINEST); + assertFallbacksMetric(fakeRegistry, 1, 0); // Adding one active stream to channel 1. pool.channelRefs.get(1).activeStreamsCountIncr(); + logCount = logRecords.size(); // Having 0 active streams on channel 0 and 1 active streams on channel one with the default // settings would return channel 0 for the next channel request. But having fallback enabled and // channel 0 not ready it should return channel 1 instead. chRef = pool.getChannelRef(null); assertEquals(1, chRef.getId()); assertEquals(2, pool.getNumberOfChannels()); + // This was the second fallback from non-ready channel 0 to the channel 1. + assertThat(logRecords.size()).isEqualTo(++logCount); + assertThat(lastLogMessage()).isEqualTo( + poolIndex + ": Picking fallback channel: 0 -> 1"); + assertThat(lastLogLevel()).isEqualTo(Level.FINEST); + assertFallbacksMetric(fakeRegistry, 2, 0); // Now let's have channel 0 still as not ready but bring channel 1 streams to low watermark. for (int i = 0; i < lowWatermark - 1; i++) { @@ -259,6 +426,8 @@ public void testGetChannelRefWithFallback() { chRef = pool.getChannelRef(null); assertEquals(2, chRef.getId()); assertEquals(3, pool.getNumberOfChannels()); + // This was the third fallback from non-ready channel 0 to the channel 2. + assertFallbacksMetric(fakeRegistry, 3, 0); // Let's bring channel 1 to max streams and mark channel 2 as not ready. for (int i = 0; i < MAX_STREAM - lowWatermark; i++) { @@ -269,54 +438,88 @@ public void testGetChannelRefWithFallback() { // Now we have two non-ready channels and one overloaded. // Even when fallback enabled there is no good candidate at this time, the next channel request - // should return a channel with lowest streams count regardless of its readiness state. + // should return a channel with the lowest streams count regardless of its readiness state. // In our case it is channel 0. + logCount = logRecords.size(); chRef = pool.getChannelRef(null); assertEquals(0, chRef.getId()); assertEquals(3, pool.getNumberOfChannels()); - - // So far the fallback logic sometimes provided different channels than a pool with disabled - // fallback would provide. But for metrics we consider a fallback only if we have an affinity - // key that was mapped to some channel and after that channel went to a non-ready state we - // temporarily used another channel as a fallback. - // Because of that, metric values for successful and failed fallbacks should be still zero. - assertFallbacksMetric(fakeRegistry, 0, 0); + // This will also count as a failed fallback because we couldn't find a ready and non-overloaded + // channel. + assertThat(logRecords.size()).isEqualTo(++logCount); + assertThat(lastLogMessage()).isEqualTo( + poolIndex + ": Failed to find fallback for channel 0"); + assertThat(lastLogLevel()).isEqualTo(Level.FINEST); + assertFallbacksMetric(fakeRegistry, 3, 1); // Let's have an affinity key and bind it to channel 0. final String key = "ABC"; pool.bind(pool.channelRefs.get(0), Collections.singletonList(key)); + logCount = logRecords.size(); // Channel 0 is not ready currently and the fallback enabled should look for a fallback but we // still don't have a good channel because channel 1 is not ready and channel 2 is overloaded. // The getChannelRef should return the original channel 0 and report a failed fallback. chRef = pool.getChannelRef(key); assertEquals(0, chRef.getId()); - assertFallbacksMetric(fakeRegistry, 0, 1); + assertThat(logRecords.size()).isEqualTo(++logCount); + assertThat(lastLogMessage()).isEqualTo( + poolIndex + ": Failed to find fallback for channel 0"); + assertThat(lastLogLevel()).isEqualTo(Level.FINEST); + assertFallbacksMetric(fakeRegistry, 3, 2); // Let's return channel 1 to a ready state. pool.processChannelStateChange(1, ConnectivityState.READY); + logCount = logRecords.size(); // Now we have a fallback candidate. // The getChannelRef should return the channel 1 and report a successful fallback. chRef = pool.getChannelRef(key); assertEquals(1, chRef.getId()); - assertFallbacksMetric(fakeRegistry, 1, 1); + assertThat(logRecords.size()).isEqualTo(++logCount); + assertThat(lastLogMessage()).isEqualTo( + poolIndex + ": Setting fallback channel: 0 -> 1"); + assertThat(lastLogLevel()).isEqualTo(Level.FINEST); + assertFallbacksMetric(fakeRegistry, 4, 2); + + // Let's briefly bring channel 2 to ready state. + pool.processChannelStateChange(2, ConnectivityState.READY); + logCount = logRecords.size(); + // Now we have a better fallback candidate (fewer streams on channel 2). But this time we + // already used channel 1 as a fallback, and we should stick to it instead of returning the + // original channel. + // The getChannelRef should return the channel 1 and report a successful fallback. + chRef = pool.getChannelRef(key); + assertEquals(1, chRef.getId()); + assertThat(logRecords.size()).isEqualTo(++logCount); + assertThat(lastLogMessage()).isEqualTo( + poolIndex + ": Using fallback channel: 0 -> 1"); + assertThat(lastLogLevel()).isEqualTo(Level.FINEST); + assertFallbacksMetric(fakeRegistry, 5, 2); + pool.processChannelStateChange(2, ConnectivityState.CONNECTING); // Let's bring channel 1 back to connecting state. pool.processChannelStateChange(1, ConnectivityState.CONNECTING); + logCount = logRecords.size(); // Now we don't have a good fallback candidate again. But this time we already used channel 1 // as a fallback and we should stick to it instead of returning the original channel. // The getChannelRef should return the channel 1 and report a failed fallback. chRef = pool.getChannelRef(key); assertEquals(1, chRef.getId()); - assertFallbacksMetric(fakeRegistry, 1, 2); + assertThat(logRecords.size()).isEqualTo(++logCount); + assertThat(lastLogMessage()).isEqualTo( + poolIndex + ": Failed to find fallback for channel 0"); + assertThat(lastLogLevel()).isEqualTo(Level.FINEST); + assertFallbacksMetric(fakeRegistry, 5, 3); // Finally, we bring both channel 1 and channel 0 to the ready state and we should get the // original channel 0 for the key without any fallbacks happening. pool.processChannelStateChange(1, ConnectivityState.READY); pool.processChannelStateChange(0, ConnectivityState.READY); + logCount = logRecords.size(); chRef = pool.getChannelRef(key); assertEquals(0, chRef.getId()); - assertFallbacksMetric(fakeRegistry, 1, 2); + assertThat(logRecords.size()).isEqualTo(logCount); + assertFallbacksMetric(fakeRegistry, 5, 3); } @Test @@ -333,13 +536,30 @@ public void testGetChannelRefMaxSize() { @Test public void testBindUnbindKey() { + // Watch debug messages. + testLogger.setLevel(Level.FINEST); + + final int currentIndex = GcpManagedChannel.channelPoolIndex.get(); + final String poolIndex = String.format("pool-%d", currentIndex); + // Initialize the channel and bind the key, check the affinity count. ChannelRef cf1 = gcpChannel.new ChannelRef(builder.build(), 1, 0, 5); - ChannelRef cf2 = gcpChannel.new ChannelRef(builder.build(), 1, 0, 4); + ChannelRef cf2 = gcpChannel.new ChannelRef(builder.build(), 2, 0, 4); gcpChannel.channelRefs.add(cf1); gcpChannel.channelRefs.add(cf2); + gcpChannel.bind(cf1, Collections.singletonList("key1")); + + // Initial log messages count. + int logCount = logRecords.size(); + gcpChannel.bind(cf2, Collections.singletonList("key2")); + + assertThat(logRecords.size()).isEqualTo(++logCount); + assertThat(lastLogMessage()).isEqualTo( + poolIndex + ": Binding 1 key(s) to channel 2: [key2]"); + assertThat(lastLogLevel()).isEqualTo(Level.FINEST); + gcpChannel.bind(cf2, Collections.singletonList("key3")); // Binding the same key to the same channel should not increase affinity count. gcpChannel.bind(cf1, Collections.singletonList("key1")); @@ -352,15 +572,25 @@ public void testBindUnbindKey() { assertEquals(1, gcpChannel.channelRefs.get(1).getAffinityCount()); assertEquals(3, gcpChannel.affinityKeyToChannelRef.size()); + logCount = logRecords.size(); + // Unbind the affinity key. gcpChannel.unbind(Collections.singletonList("key1")); assertEquals(1, gcpChannel.channelRefs.get(0).getAffinityCount()); assertEquals(1, gcpChannel.channelRefs.get(1).getAffinityCount()); assertEquals(2, gcpChannel.affinityKeyToChannelRef.size()); + assertThat(logRecords.size()).isEqualTo(++logCount); + assertThat(lastLogMessage()).isEqualTo( + poolIndex + ": Unbinding key key1 from channel 1."); + assertThat(lastLogLevel()).isEqualTo(Level.FINEST); gcpChannel.unbind(Collections.singletonList("key1")); assertEquals(1, gcpChannel.channelRefs.get(0).getAffinityCount()); assertEquals(1, gcpChannel.channelRefs.get(1).getAffinityCount()); assertEquals(2, gcpChannel.affinityKeyToChannelRef.size()); + assertThat(logRecords.size()).isEqualTo(++logCount); + assertThat(lastLogMessage()).isEqualTo( + poolIndex + ": Unbinding key key1 but it wasn't bound."); + assertThat(lastLogLevel()).isEqualTo(Level.FINEST); gcpChannel.unbind(Collections.singletonList("key2")); assertEquals(1, gcpChannel.channelRefs.get(0).getAffinityCount()); assertEquals(0, gcpChannel.channelRefs.get(1).getAffinityCount()); @@ -371,6 +601,27 @@ public void testBindUnbindKey() { assertEquals(0, gcpChannel.affinityKeyToChannelRef.size()); } + @Test + public void testUsingKeyWithoutBinding() { + // Initialize the channel and bind the key, check the affinity count. + ChannelRef cf1 = gcpChannel.new ChannelRef(builder.build(), 1, 0, 5); + ChannelRef cf2 = gcpChannel.new ChannelRef(builder.build(), 2, 0, 4); + gcpChannel.channelRefs.add(cf1); + gcpChannel.channelRefs.add(cf2); + + final String key = "non-binded-key"; + ChannelRef channelRef = gcpChannel.getChannelRef(key); + // Should bind on the fly to the least busy channel, which is 2. + assertThat(channelRef.getId()).isEqualTo(2); + + cf1.activeStreamsCountDecr(System.nanoTime(), Status.OK, true); + cf1.activeStreamsCountDecr(System.nanoTime(), Status.OK, true); + channelRef = gcpChannel.getChannelRef(key); + // Even after channel 1 now has less active streams (3) the channel 2 is still mapped for the + // same key. + assertThat(channelRef.getId()).isEqualTo(2); + } + @Test public void testGetKeysFromRequest() { String expected = "thisisaname"; @@ -483,6 +734,8 @@ public void testParseEmptyChannelJsonFile() { @Test public void testMetrics() { + // Watch debug messages. + testLogger.setLevel(Level.FINE); final FakeMetricRegistry fakeRegistry = new FakeMetricRegistry(); final String prefix = "some/prefix/"; final List labelKeys = @@ -510,12 +763,24 @@ public void testMetrics() { .build()) .build(); + final int currentIndex = GcpManagedChannel.channelPoolIndex.get(); + final String poolIndex = String.format("pool-%d", currentIndex); + + // Logs metrics options. + assertThat(logRecords.get(logRecords.size() - 2).getLevel()).isEqualTo(Level.FINE); + assertThat(logRecords.get(logRecords.size() - 2).getMessage()).startsWith( + poolIndex + ": Metrics options: {namePrefix: \"some/prefix/\", labels: " + + "[key_a: \"val_a\", key_b: \"val_b\"]," + ); + + assertThat(lastLogLevel()).isEqualTo(Level.INFO); + assertThat(lastLogMessage()).isEqualTo(poolIndex + ": Metrics enabled."); + List expectedLabelKeys = new ArrayList<>(labelKeys); expectedLabelKeys.add( LabelKey.create(GcpMetricsConstants.POOL_INDEX_LABEL, GcpMetricsConstants.POOL_INDEX_DESC)); List expectedLabelValues = new ArrayList<>(labelValues); - int currentIndex = GcpManagedChannel.channelPoolIndex.get(); - expectedLabelValues.add(LabelValue.create(String.format("pool-%d", currentIndex))); + expectedLabelValues.add(LabelValue.create(poolIndex)); try { // Let's fill five channels with some fake streams. @@ -530,12 +795,19 @@ public void testMetrics() { MetricsRecord record = fakeRegistry.pollRecord(); assertThat(record.getMetrics().size()).isEqualTo(25); + // Initial log messages count. + int logCount = logRecords.size(); + List> numChannels = record.getMetrics().get(prefix + GcpMetricsConstants.METRIC_MAX_CHANNELS); assertThat(numChannels.size()).isEqualTo(1); assertThat(numChannels.get(0).value()).isEqualTo(5L); assertThat(numChannels.get(0).keys()).isEqualTo(expectedLabelKeys); assertThat(numChannels.get(0).values()).isEqualTo(expectedLabelValues); + assertThat(logRecords.size()).isEqualTo(++logCount); + assertThat(lastLogLevel()).isEqualTo(Level.FINE); + assertThat(lastLogMessage()).isEqualTo( + poolIndex + ": stat: " + GcpMetricsConstants.METRIC_MAX_CHANNELS + " = 5"); List> maxAllowedChannels = record.getMetrics().get(prefix + GcpMetricsConstants.METRIC_MAX_ALLOWED_CHANNELS); @@ -543,6 +815,10 @@ public void testMetrics() { assertThat(maxAllowedChannels.get(0).value()).isEqualTo(MAX_CHANNEL); assertThat(maxAllowedChannels.get(0).keys()).isEqualTo(expectedLabelKeys); assertThat(maxAllowedChannels.get(0).values()).isEqualTo(expectedLabelValues); + assertThat(logRecords.size()).isEqualTo(++logCount); + assertThat(lastLogLevel()).isEqualTo(Level.FINE); + assertThat(lastLogMessage()).isEqualTo( + poolIndex + ": stat: " + GcpMetricsConstants.METRIC_MAX_ALLOWED_CHANNELS + " = 10"); List> minActiveStreams = record.getMetrics().get(prefix + GcpMetricsConstants.METRIC_MIN_ACTIVE_STREAMS); @@ -550,6 +826,10 @@ public void testMetrics() { assertThat(minActiveStreams.get(0).value()).isEqualTo(0L); assertThat(minActiveStreams.get(0).keys()).isEqualTo(expectedLabelKeys); assertThat(minActiveStreams.get(0).values()).isEqualTo(expectedLabelValues); + assertThat(logRecords.size()).isEqualTo(++logCount); + assertThat(lastLogLevel()).isEqualTo(Level.FINE); + assertThat(lastLogMessage()).isEqualTo( + poolIndex + ": stat: " + GcpMetricsConstants.METRIC_MIN_ACTIVE_STREAMS + " = 0"); List> maxActiveStreams = record.getMetrics().get(prefix + GcpMetricsConstants.METRIC_MAX_ACTIVE_STREAMS); @@ -557,21 +837,247 @@ public void testMetrics() { assertThat(maxActiveStreams.get(0).value()).isEqualTo(7L); assertThat(maxActiveStreams.get(0).keys()).isEqualTo(expectedLabelKeys); assertThat(maxActiveStreams.get(0).values()).isEqualTo(expectedLabelValues); + assertThat(logRecords.size()).isEqualTo(++logCount); + assertThat(lastLogLevel()).isEqualTo(Level.FINE); + assertThat(lastLogMessage()).isEqualTo( + poolIndex + ": stat: " + GcpMetricsConstants.METRIC_MAX_ACTIVE_STREAMS + " = 7"); List> totalActiveStreams = record.getMetrics().get(prefix + GcpMetricsConstants.METRIC_MAX_TOTAL_ACTIVE_STREAMS); assertThat(totalActiveStreams.size()).isEqualTo(1); - assertThat(totalActiveStreams.get(0).value()) - .isEqualTo(Arrays.stream(streams).asLongStream().sum()); + long totalStreamsExpected = Arrays.stream(streams).asLongStream().sum(); + assertThat(totalActiveStreams.get(0).value()).isEqualTo(totalStreamsExpected); assertThat(totalActiveStreams.get(0).keys()).isEqualTo(expectedLabelKeys); assertThat(totalActiveStreams.get(0).values()).isEqualTo(expectedLabelValues); + assertThat(logRecords.size()).isEqualTo(++logCount); + assertThat(lastLogLevel()).isEqualTo(Level.FINE); + assertThat(lastLogMessage()).isEqualTo( + poolIndex + ": stat: " + GcpMetricsConstants.METRIC_MAX_TOTAL_ACTIVE_STREAMS + " = " + + totalStreamsExpected); + } finally { + pool.shutdownNow(); + } + } + + @Test + public void testLogMetrics() throws InterruptedException { + // Watch debug messages. + testLogger.setLevel(Level.FINE); + + final GcpManagedChannel pool = + (GcpManagedChannel) + GcpManagedChannelBuilder.forDelegateBuilder(builder) + .withOptions( + GcpManagedChannelOptions.newBuilder() + .withChannelPoolOptions( + GcpChannelPoolOptions.newBuilder() + .setMaxSize(5) + .setConcurrentStreamsLowWatermark(3) + .build()) + .withMetricsOptions( + GcpMetricsOptions.newBuilder() + .withNamePrefix("prefix") + .build()) + .withResiliencyOptions( + GcpResiliencyOptions.newBuilder() + .setNotReadyFallback(true) + .withUnresponsiveConnectionDetection(100, 2) + .build()) + .build()) + .build(); + + ExecutorService executorService = Executors.newSingleThreadExecutor(); + try { + final int currentIndex = GcpManagedChannel.channelPoolIndex.get(); + final String poolIndex = String.format("pool-%d", currentIndex); + + int[] streams = new int[]{3, 2, 5, 7, 1}; + int[] keyCount = new int[]{2, 3, 1, 1, 4}; + int[] okCalls = new int[]{2, 2, 8, 2, 3}; + int[] errCalls = new int[]{1, 1, 2, 2, 1}; + List channels = new ArrayList<>(); + for (int i = 0; i < streams.length; i++) { + FakeManagedChannel channel = new FakeManagedChannel(executorService); + channels.add(channel); + ChannelRef ref = pool.new ChannelRef(channel, i); + pool.channelRefs.add(ref); + + // Simulate channel connecting. + channel.setState(ConnectivityState.CONNECTING); + TimeUnit.MILLISECONDS.sleep(10); + + // For the last one... + if (i == streams.length - 1) { + // This will be a couple of successful fallbacks. + pool.getChannelRef(null); + pool.getChannelRef(null); + // Bring down all other channels. + for (int j = 0; j < i; j++) { + channels.get(j).setState(ConnectivityState.CONNECTING); + } + TimeUnit.MILLISECONDS.sleep(100); + // And this will be a failed fallback (no ready channels). + pool.getChannelRef(null); + + // Simulate unresponsive connection. + long startNanos = System.nanoTime(); + final Status deStatus = Status.fromCode(Code.DEADLINE_EXCEEDED); + ref.activeStreamsCountIncr(); + ref.activeStreamsCountDecr(startNanos, deStatus, false); + ref.activeStreamsCountIncr(); + ref.activeStreamsCountDecr(startNanos, deStatus, false); + + // Simulate unresponsive connection with more dropped calls. + startNanos = System.nanoTime(); + ref.activeStreamsCountIncr(); + ref.activeStreamsCountDecr(startNanos, deStatus, false); + ref.activeStreamsCountIncr(); + ref.activeStreamsCountDecr(startNanos, deStatus, false); + TimeUnit.MILLISECONDS.sleep(110); + ref.activeStreamsCountIncr(); + ref.activeStreamsCountDecr(startNanos, deStatus, false); + } + + channel.setState(ConnectivityState.READY); + + for (int j = 0; j < streams[i]; j++) { + ref.activeStreamsCountIncr(); + } + // Bind affinity keys. + final List keys = new ArrayList<>(); + for (int j = 0; j < keyCount[i]; j++) { + keys.add("key-" + i + "-" + j); + } + pool.bind(ref, keys); + // Simulate successful calls. + for (int j = 0; j < okCalls[i]; j++) { + ref.activeStreamsCountDecr(0, Status.OK, false); + ref.activeStreamsCountIncr(); + } + // Simulate failed calls. + for (int j = 0; j < errCalls[i]; j++) { + ref.activeStreamsCountDecr(0, Status.UNAVAILABLE, false); + ref.activeStreamsCountIncr(); + } + + } + + logRecords.clear(); + + pool.logMetrics(); + + List messages = Arrays.asList(logRecords.stream().map(LogRecord::getMessage).toArray()); + + assertThat(messages).contains(poolIndex + ": Active streams counts: [3, 2, 5, 7, 1]"); + assertThat(messages).contains(poolIndex + ": Affinity counts: [2, 3, 1, 1, 4]"); + + assertThat(messages).contains(poolIndex + ": stat: min_ready_channels = 0"); + assertThat(messages).contains(poolIndex + ": stat: max_ready_channels = 4"); + assertThat(messages).contains(poolIndex + ": stat: max_channels = 5"); + assertThat(messages).contains(poolIndex + ": stat: max_allowed_channels = 5"); + assertThat(messages).contains(poolIndex + ": stat: num_channel_disconnect = 4"); + assertThat(messages).contains(poolIndex + ": stat: num_channel_connect = 5"); + assertThat(messages.stream().filter(o -> o.toString().matches( + poolIndex + ": stat: min_channel_readiness_time = \\d\\d+" + ) + ).count()).isEqualTo(1); + assertThat(messages.stream().filter(o -> o.toString().matches( + poolIndex + ": stat: avg_channel_readiness_time = \\d\\d+" + ) + ).count()).isEqualTo(1); + assertThat(messages.stream().filter(o -> o.toString().matches( + poolIndex + ": stat: max_channel_readiness_time = \\d\\d+" + ) + ).count()).isEqualTo(1); + assertThat(messages).contains(poolIndex + ": stat: min_active_streams_per_channel = 0"); + assertThat(messages).contains(poolIndex + ": stat: max_active_streams_per_channel = 7"); + assertThat(messages).contains(poolIndex + ": stat: min_total_active_streams = 0"); + assertThat(messages).contains(poolIndex + ": stat: max_total_active_streams = 18"); + assertThat(messages).contains(poolIndex + ": stat: min_affinity_per_channel = 0"); + assertThat(messages).contains(poolIndex + ": stat: max_affinity_per_channel = 4"); + assertThat(messages).contains(poolIndex + ": stat: num_affinity = 11"); + assertThat(messages).contains(poolIndex + ": Ok calls: [2, 2, 8, 2, 3]"); + assertThat(messages).contains(poolIndex + ": Failed calls: [1, 1, 2, 2, 6]"); + assertThat(messages).contains(poolIndex + ": stat: min_calls_per_channel_ok = 2"); + assertThat(messages).contains(poolIndex + ": stat: min_calls_per_channel_err = 1"); + assertThat(messages).contains(poolIndex + ": stat: max_calls_per_channel_ok = 8"); + assertThat(messages).contains(poolIndex + ": stat: max_calls_per_channel_err = 6"); + assertThat(messages).contains(poolIndex + ": stat: num_calls_completed_ok = 17"); + assertThat(messages).contains(poolIndex + ": stat: num_calls_completed_err = 12"); + assertThat(messages).contains(poolIndex + ": stat: num_fallbacks_ok = 2"); + assertThat(messages).contains(poolIndex + ": stat: num_fallbacks_fail = 1"); + assertThat(messages).contains(poolIndex + ": stat: num_unresponsive_detections = 2"); + assertThat(messages.stream().filter(o -> o.toString().matches( + poolIndex + ": stat: min_unresponsive_detection_time = 1\\d\\d" + ) + ).count()).isEqualTo(1); + assertThat(messages.stream().filter(o -> o.toString().matches( + poolIndex + ": stat: max_unresponsive_detection_time = 1\\d\\d" + ) + ).count()).isEqualTo(1); + assertThat(messages).contains(poolIndex + ": stat: min_unresponsive_dropped_calls = 2"); + assertThat(messages).contains(poolIndex + ": stat: max_unresponsive_dropped_calls = 3"); + + assertThat(logRecords.size()).isEqualTo(34); + logRecords.forEach(logRecord -> + assertThat(logRecord.getLevel()).named(logRecord.getMessage()).isEqualTo(Level.FINE) + ); + + logRecords.clear(); + + // Next call should update minimums that was 0 previously (e.g., min_ready_channels, + // min_active_streams_per_channel, min_total_active_streams...). + pool.logMetrics(); + + messages = Arrays.asList(logRecords.stream().map(LogRecord::getMessage).toArray()); + + assertThat(messages).contains(poolIndex + ": Active streams counts: [3, 2, 5, 7, 1]"); + assertThat(messages).contains(poolIndex + ": Affinity counts: [2, 3, 1, 1, 4]"); + + assertThat(messages).contains(poolIndex + ": stat: min_ready_channels = 1"); + assertThat(messages).contains(poolIndex + ": stat: max_ready_channels = 1"); + assertThat(messages).contains(poolIndex + ": stat: max_channels = 5"); + assertThat(messages).contains(poolIndex + ": stat: max_allowed_channels = 5"); + assertThat(messages).contains(poolIndex + ": stat: num_channel_disconnect = 0"); + assertThat(messages).contains(poolIndex + ": stat: num_channel_connect = 0"); + assertThat(messages).contains(poolIndex + ": stat: min_channel_readiness_time = 0"); + assertThat(messages).contains(poolIndex + ": stat: avg_channel_readiness_time = 0"); + assertThat(messages).contains(poolIndex + ": stat: max_channel_readiness_time = 0"); + assertThat(messages).contains(poolIndex + ": stat: min_active_streams_per_channel = 1"); + assertThat(messages).contains(poolIndex + ": stat: max_active_streams_per_channel = 7"); + assertThat(messages).contains(poolIndex + ": stat: min_total_active_streams = 18"); + assertThat(messages).contains(poolIndex + ": stat: max_total_active_streams = 18"); + assertThat(messages).contains(poolIndex + ": stat: min_affinity_per_channel = 1"); + assertThat(messages).contains(poolIndex + ": stat: max_affinity_per_channel = 4"); + assertThat(messages).contains(poolIndex + ": stat: num_affinity = 11"); + assertThat(messages).contains(poolIndex + ": Ok calls: [0, 0, 0, 0, 0]"); + assertThat(messages).contains(poolIndex + ": Failed calls: [0, 0, 0, 0, 0]"); + assertThat(messages).contains(poolIndex + ": stat: min_calls_per_channel_ok = 0"); + assertThat(messages).contains(poolIndex + ": stat: min_calls_per_channel_err = 0"); + assertThat(messages).contains(poolIndex + ": stat: max_calls_per_channel_ok = 0"); + assertThat(messages).contains(poolIndex + ": stat: max_calls_per_channel_err = 0"); + assertThat(messages).contains(poolIndex + ": stat: num_calls_completed_ok = 0"); + assertThat(messages).contains(poolIndex + ": stat: num_calls_completed_err = 0"); + assertThat(messages).contains(poolIndex + ": stat: num_fallbacks_ok = 0"); + assertThat(messages).contains(poolIndex + ": stat: num_fallbacks_fail = 0"); + assertThat(messages).contains(poolIndex + ": stat: num_unresponsive_detections = 0"); + assertThat(messages).contains(poolIndex + ": stat: min_unresponsive_detection_time = 0"); + assertThat(messages).contains(poolIndex + ": stat: max_unresponsive_detection_time = 0"); + assertThat(messages).contains(poolIndex + ": stat: min_unresponsive_dropped_calls = 0"); + assertThat(messages).contains(poolIndex + ": stat: max_unresponsive_dropped_calls = 0"); + + assertThat(logRecords.size()).isEqualTo(34); + } finally { pool.shutdownNow(); + executorService.shutdownNow(); } } @Test public void testUnresponsiveDetection() throws InterruptedException { + // Watch debug messages. + testLogger.setLevel(Level.FINER); final FakeMetricRegistry fakeRegistry = new FakeMetricRegistry(); // Creating a pool with unresponsive connection detection for 100 ms, 3 dropped requests. final GcpManagedChannel pool = @@ -587,6 +1093,8 @@ public void testUnresponsiveDetection() throws InterruptedException { GcpMetricsOptions.newBuilder().withMetricRegistry(fakeRegistry).build()) .build()) .build(); + int currentIndex = GcpManagedChannel.channelPoolIndex.get(); + String poolIndex = String.format("pool-%d", currentIndex); final AtomicInteger idleCounter = new AtomicInteger(); ManagedChannel channel = new FakeIdleCountingManagedChannel(idleCounter); ChannelRef chRef = pool.new ChannelRef(channel, 0); @@ -605,23 +1113,52 @@ public void testUnresponsiveDetection() throws InterruptedException { // Reconnected after 3rd deadline exceeded. assertEquals(1, idleCounter.get()); + // Initial log messages count. + int logCount = logRecords.size(); + MetricsRecord record = fakeRegistry.pollRecord(); List> metric = record.getMetrics().get(GcpMetricsConstants.METRIC_NUM_UNRESPONSIVE_DETECTIONS); assertThat(metric.size()).isEqualTo(1); assertThat(metric.get(0).value()).isEqualTo(1L); + assertThat(logRecords.size()).isEqualTo(++logCount); + assertThat(lastLogLevel()).isEqualTo(Level.FINE); + assertThat(lastLogMessage()).isEqualTo( + poolIndex + ": stat: " + GcpMetricsConstants.METRIC_NUM_UNRESPONSIVE_DETECTIONS + " = 1"); + metric = record.getMetrics().get(GcpMetricsConstants.METRIC_MIN_UNRESPONSIVE_DROPPED_CALLS); assertThat(metric.size()).isEqualTo(1); assertThat(metric.get(0).value()).isEqualTo(3L); + assertThat(logRecords.size()).isEqualTo(++logCount); + assertThat(lastLogLevel()).isEqualTo(Level.FINE); + assertThat(lastLogMessage()).isEqualTo( + poolIndex + ": stat: " + GcpMetricsConstants.METRIC_MIN_UNRESPONSIVE_DROPPED_CALLS + " = 3"); + metric = record.getMetrics().get(GcpMetricsConstants.METRIC_MAX_UNRESPONSIVE_DROPPED_CALLS); assertThat(metric.size()).isEqualTo(1); assertThat(metric.get(0).value()).isEqualTo(3L); + assertThat(logRecords.size()).isEqualTo(++logCount); + assertThat(lastLogLevel()).isEqualTo(Level.FINE); + assertThat(lastLogMessage()).isEqualTo( + poolIndex + ": stat: " + GcpMetricsConstants.METRIC_MAX_UNRESPONSIVE_DROPPED_CALLS + " = 3"); + metric = record.getMetrics().get(GcpMetricsConstants.METRIC_MIN_UNRESPONSIVE_DETECTION_TIME); assertThat(metric.size()).isEqualTo(1); assertThat(metric.get(0).value()).isAtLeast(100L); + assertThat(logRecords.size()).isEqualTo(++logCount); + assertThat(lastLogLevel()).isEqualTo(Level.FINE); + assertThat(lastLogMessage()).matches( + poolIndex + ": stat: " + GcpMetricsConstants.METRIC_MIN_UNRESPONSIVE_DETECTION_TIME + + " = 1\\d\\d"); + metric = record.getMetrics().get(GcpMetricsConstants.METRIC_MAX_UNRESPONSIVE_DETECTION_TIME); assertThat(metric.size()).isEqualTo(1); assertThat(metric.get(0).value()).isAtLeast(100L); + assertThat(logRecords.size()).isEqualTo(++logCount); + assertThat(lastLogLevel()).isEqualTo(Level.FINE); + assertThat(lastLogMessage()).matches( + poolIndex + ": stat: " + GcpMetricsConstants.METRIC_MAX_UNRESPONSIVE_DETECTION_TIME + + " = 1\\d\\d"); // Any message from the server must reset the dropped requests count and timestamp. TimeUnit.MILLISECONDS.sleep(105); @@ -661,9 +1198,216 @@ public void testUnresponsiveDetection() throws InterruptedException { assertEquals(1, idleCounter.get()); TimeUnit.MILLISECONDS.sleep(105); - // Any subsequent deadline exceeded after 100ms must trigger the reconnect. + // Any subsequent deadline exceeded after 100ms must trigger the reconnection. chRef.activeStreamsCountDecr(startNanos, deStatus, false); assertEquals(2, idleCounter.get()); + assertThat(logRecords.size()).isEqualTo(++logCount); + assertThat(lastLogMessage()).matches( + poolIndex + ": Channel 0 connection is unresponsive for 1\\d\\d ms and 4 deadline " + + "exceeded calls. Forcing channel to idle state."); + assertThat(lastLogLevel()).isEqualTo(Level.FINER); + + // The cumulative num_unresponsive_detections metric must become 2. + metric = record.getMetrics().get(GcpMetricsConstants.METRIC_NUM_UNRESPONSIVE_DETECTIONS); + assertThat(metric.size()).isEqualTo(1); + assertThat(metric.get(0).value()).isEqualTo(2L); + assertThat(logRecords.size()).isEqualTo(++logCount); + // But the log metric count the detections since previous report for num_unresponsive_detections + // in the logs. It is always delta in the logs, not cumulative. + assertThat(lastLogMessage()).isEqualTo( + poolIndex + ": stat: " + GcpMetricsConstants.METRIC_NUM_UNRESPONSIVE_DETECTIONS + " = 1"); + assertThat(lastLogLevel()).isEqualTo(Level.FINE); + // If we log it again the cumulative metric value must remain unchanged. + metric = record.getMetrics().get(GcpMetricsConstants.METRIC_NUM_UNRESPONSIVE_DETECTIONS); + assertThat(metric.size()).isEqualTo(1); + assertThat(metric.get(0).value()).isEqualTo(2L); + assertThat(logRecords.size()).isEqualTo(++logCount); + assertThat(lastLogLevel()).isEqualTo(Level.FINE); + // But in the log it must post 0. + assertThat(lastLogMessage()).isEqualTo( + poolIndex + ": stat: " + GcpMetricsConstants.METRIC_NUM_UNRESPONSIVE_DETECTIONS + " = 0"); + } + + @Test + public void testStateNotifications() throws InterruptedException { + final AtomicBoolean immediateCallbackCalled = new AtomicBoolean(); + // Test callback is called when state doesn't match. + gcpChannel.notifyWhenStateChanged(ConnectivityState.SHUTDOWN, () -> + immediateCallbackCalled.set(true)); + + TimeUnit.MILLISECONDS.sleep(1); + + assertThat(immediateCallbackCalled.get()).isTrue(); + + // Subscribe for notification when leaving IDLE state. + final AtomicReference newState = new AtomicReference<>(); + + final Runnable callback = new Runnable() { + @Override + public void run() { + ConnectivityState state = gcpChannel.getState(false); + newState.set(state); + if (state.equals(ConnectivityState.IDLE)) { + gcpChannel.notifyWhenStateChanged(ConnectivityState.IDLE, this); + } + } + }; + + gcpChannel.notifyWhenStateChanged(ConnectivityState.IDLE, callback); + + // Init connection to move out of the IDLE state. + ConnectivityState currentState = gcpChannel.getState(true); + // Make sure it was IDLE; + assertThat(currentState).isEqualTo(ConnectivityState.IDLE); + + TimeUnit.MILLISECONDS.sleep(5); + + assertThat(newState.get()) + .isAnyOf(ConnectivityState.CONNECTING, ConnectivityState.TRANSIENT_FAILURE); + } + + @Test + public void testParallelGetChannelRefWontExceedMaxSize() throws InterruptedException { + resetGcpChannel(); + GcpChannelPoolOptions poolOptions = GcpChannelPoolOptions.newBuilder() + .setMaxSize(2) + .setConcurrentStreamsLowWatermark(0) + .build(); + GcpManagedChannelOptions options = GcpManagedChannelOptions.newBuilder() + .withChannelPoolOptions(poolOptions) + .build(); + gcpChannel = + (GcpManagedChannel) + GcpManagedChannelBuilder.forDelegateBuilder(builder) + .withOptions(options) + .build(); + + assertThat(gcpChannel.getNumberOfChannels()).isEqualTo(0); + assertThat(gcpChannel.getStreamsLowWatermark()).isEqualTo(0); + + for (int i = 0; i < gcpChannel.getMaxSize() - 1; i++) { + gcpChannel.getChannelRef(null); + } + + assertThat(gcpChannel.getNumberOfChannels()).isEqualTo(gcpChannel.getMaxSize() - 1); + + Runnable requestChannel = () -> gcpChannel.getChannelRef(null); + + int requestCount = gcpChannel.getMaxSize() * 3; + ExecutorService exec = Executors.newFixedThreadPool(requestCount); + for (int i = 0; i < requestCount; i++) { + exec.execute(requestChannel); + } + exec.shutdown(); + exec.awaitTermination(100, TimeUnit.MILLISECONDS); + + assertThat(gcpChannel.getNumberOfChannels()).isEqualTo(gcpChannel.getMaxSize()); + } + + @Test + public void testParallelGetChannelRefWontExceedMaxSizeFromTheStart() throws InterruptedException { + resetGcpChannel(); + GcpChannelPoolOptions poolOptions = GcpChannelPoolOptions.newBuilder() + .setMaxSize(2) + .setConcurrentStreamsLowWatermark(0) + .build(); + GcpManagedChannelOptions options = GcpManagedChannelOptions.newBuilder() + .withChannelPoolOptions(poolOptions) + .build(); + gcpChannel = + (GcpManagedChannel) + GcpManagedChannelBuilder.forDelegateBuilder(builder) + .withOptions(options) + .build(); + + assertThat(gcpChannel.getNumberOfChannels()).isEqualTo(0); + assertThat(gcpChannel.getStreamsLowWatermark()).isEqualTo(0); + + Runnable requestChannel = () -> gcpChannel.getChannelRef(null); + + int requestCount = gcpChannel.getMaxSize() * 3; + ExecutorService exec = Executors.newFixedThreadPool(requestCount); + for (int i = 0; i < requestCount; i++) { + exec.execute(requestChannel); + } + exec.shutdown(); + exec.awaitTermination(100, TimeUnit.MILLISECONDS); + + assertThat(gcpChannel.getNumberOfChannels()).isEqualTo(gcpChannel.getMaxSize()); + } + + static class FakeManagedChannel extends ManagedChannel { + private ConnectivityState state = ConnectivityState.IDLE; + private Runnable stateCallback; + private final ExecutorService exec; + + FakeManagedChannel(ExecutorService exec) { + this.exec = exec; + } + + @Override + public void enterIdle() {} + + @Override + public ConnectivityState getState(boolean requestConnection) { + return state; + } + + public void setState(ConnectivityState state) { + if (state.equals(this.state)) { + return; + } + this.state = state; + if (stateCallback != null) { + exec.execute(stateCallback); + stateCallback = null; + } + } + + @Override + public void notifyWhenStateChanged(ConnectivityState source, Runnable callback) { + if (!source.equals(state)) { + exec.execute(callback); + return; + } + stateCallback = callback; + } + + @Override + public ManagedChannel shutdown() { + return null; + } + + @Override + public boolean isShutdown() { + return false; + } + + @Override + public boolean isTerminated() { + return false; + } + + @Override + public ManagedChannel shutdownNow() { + return null; + } + + @Override + public boolean awaitTermination(long timeout, TimeUnit unit) { + return false; + } + + @Override + public ClientCall newCall( + MethodDescriptor methodDescriptor, CallOptions callOptions) { + return null; + } + + @Override + public String authority() { + return null; + } } static class FakeIdleCountingManagedChannel extends ManagedChannel { diff --git a/grpc-gcp/src/test/java/com/google/cloud/grpc/SpannerIntegrationTest.java b/grpc-gcp/src/test/java/com/google/cloud/grpc/SpannerIntegrationTest.java index 2b657e45..780c6f58 100644 --- a/grpc-gcp/src/test/java/com/google/cloud/grpc/SpannerIntegrationTest.java +++ b/grpc-gcp/src/test/java/com/google/cloud/grpc/SpannerIntegrationTest.java @@ -16,12 +16,29 @@ package com.google.cloud.grpc; +import static com.google.cloud.grpc.GcpMultiEndpointChannel.ME_KEY; +import static com.google.cloud.spanner.SpannerOptions.CALL_CONTEXT_CONFIGURATOR_KEY; import static com.google.common.base.Preconditions.checkState; import static com.google.common.truth.Truth.assertThat; +import static java.nio.charset.StandardCharsets.UTF_8; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.fail; +import com.google.api.core.ApiFunction; +import com.google.api.gax.grpc.GrpcCallContext; +import com.google.api.gax.grpc.GrpcTransportChannel; import com.google.api.gax.longrunning.OperationFuture; +import com.google.api.gax.rpc.ApiCallContext; +import com.google.api.gax.rpc.FixedTransportChannelProvider; +import com.google.api.gax.rpc.TransportChannelProvider; import com.google.auth.oauth2.GoogleCredentials; +import com.google.cloud.grpc.GcpManagedChannel.ChannelRef; +import com.google.cloud.grpc.GcpManagedChannelOptions.GcpChannelPoolOptions; +import com.google.cloud.grpc.GcpManagedChannelOptions.GcpMetricsOptions; +import com.google.cloud.grpc.MetricRegistryTestUtils.FakeMetricRegistry; +import com.google.cloud.grpc.MetricRegistryTestUtils.MetricsRecord; +import com.google.cloud.grpc.MetricRegistryTestUtils.PointWithFunction; +import com.google.cloud.grpc.proto.ApiConfig; import com.google.cloud.spanner.Database; import com.google.cloud.spanner.DatabaseAdminClient; import com.google.cloud.spanner.DatabaseClient; @@ -32,13 +49,17 @@ import com.google.cloud.spanner.InstanceConfigId; import com.google.cloud.spanner.InstanceId; import com.google.cloud.spanner.InstanceInfo; +import com.google.cloud.spanner.KeySet; import com.google.cloud.spanner.Mutation; import com.google.cloud.spanner.Spanner; import com.google.cloud.spanner.SpannerExceptionFactory; import com.google.cloud.spanner.SpannerOptions; +import com.google.cloud.spanner.SpannerOptions.CallContextConfigurator; import com.google.common.collect.Iterators; import com.google.common.util.concurrent.ListenableFuture; import com.google.protobuf.Empty; +import com.google.protobuf.util.JsonFormat; +import com.google.protobuf.util.JsonFormat.Parser; import com.google.spanner.admin.database.v1.CreateDatabaseMetadata; import com.google.spanner.admin.instance.v1.CreateInstanceMetadata; import com.google.spanner.v1.BatchCreateSessionsRequest; @@ -62,23 +83,39 @@ import com.google.spanner.v1.SpannerGrpc.SpannerFutureStub; import com.google.spanner.v1.SpannerGrpc.SpannerStub; import com.google.spanner.v1.TransactionOptions; +import com.google.spanner.v1.TransactionOptions.ReadOnly; +import com.google.spanner.v1.TransactionOptions.ReadWrite; import com.google.spanner.v1.TransactionSelector; +import io.grpc.CallOptions; import io.grpc.ConnectivityState; +import io.grpc.Context; import io.grpc.ManagedChannel; import io.grpc.ManagedChannelBuilder; +import io.grpc.MethodDescriptor; import io.grpc.StatusRuntimeException; import io.grpc.auth.MoreCallCredentials; import io.grpc.stub.StreamObserver; +import io.opencensus.metrics.LabelValue; import java.io.File; +import java.io.IOException; +import java.io.Reader; +import java.nio.file.Files; +import java.time.Duration; import java.util.ArrayList; import java.util.Arrays; import java.util.HashSet; +import java.util.LinkedList; import java.util.List; import java.util.Set; import java.util.concurrent.CountDownLatch; -import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; +import java.util.function.Function; +import java.util.logging.Handler; +import java.util.logging.Level; +import java.util.logging.LogRecord; +import java.util.logging.Logger; +import java.util.stream.Collectors; +import javax.annotation.Nullable; import org.junit.After; import org.junit.AfterClass; import org.junit.Assume; @@ -94,6 +131,8 @@ @RunWith(JUnit4.class) public final class SpannerIntegrationTest { + private static final Logger testLogger = Logger.getLogger(GcpManagedChannel.class.getName()); + private final List logRecords = new LinkedList<>(); private static final String GCP_PROJECT_ID = System.getenv("GCP_PROJECT_ID"); private static final String INSTANCE_ID = "grpc-gcp-test-instance"; private static final String DB_NAME = "grpc-gcp-test-db"; @@ -107,9 +146,10 @@ public final class SpannerIntegrationTest { private static final int MAX_CHANNEL = 3; private static final int MAX_STREAM = 2; - private static final ManagedChannelBuilder builder = + private static final ManagedChannelBuilder builder = ManagedChannelBuilder.forAddress(SPANNER_TARGET, 443); private GcpManagedChannel gcpChannel; + private GcpManagedChannel gcpChannelBRR; @BeforeClass public static void beforeClass() { @@ -127,6 +167,31 @@ public static void beforeClass() { initializeTable(databaseClient); } + private String lastLogMessage() { + return lastLogMessage(1); + } + + private String lastLogMessage(int nthFromLast) { + return logRecords.get(logRecords.size() - nthFromLast).getMessage(); + } + + private Level lastLogLevel() { + return logRecords.get(logRecords.size() - 1).getLevel(); + } + + private final Handler testLogHandler = new Handler() { + @Override + public synchronized void publish(LogRecord record) { + logRecords.add(record); + } + + @Override + public void flush() {} + + @Override + public void close() throws SecurityException {} + }; + private static void initializeTable(DatabaseClient databaseClient) { List mutations = Arrays.asList( @@ -226,7 +291,7 @@ private SpannerBlockingStub getSpannerBlockingStub() { return stub; } - private static void deleteSession(SpannerGrpc.SpannerBlockingStub stub, Session session) { + private static void deleteSession(SpannerBlockingStub stub, Session session) { if (session != null) { stub.deleteSession(DeleteSessionRequest.newBuilder().setName(session.getName()).build()); } @@ -242,10 +307,35 @@ private SpannerStub getSpannerStub() { /** A wrapper of checking the status of each channelRef in the gcpChannel. */ private void checkChannelRefs(int channels, int streams, int affinities) { - assertEquals(channels, gcpChannel.channelRefs.size()); + checkChannelRefs(gcpChannel, channels, streams, affinities); + } + + private void checkChannelRefs(GcpManagedChannel gcpChannel, int channels, int streams, int affinities) { + assertEquals("Channel pool size mismatch.", channels, gcpChannel.channelRefs.size()); for (int i = 0; i < channels; i++) { - assertEquals(streams, gcpChannel.channelRefs.get(i).getActiveStreamsCount()); - assertEquals(affinities, gcpChannel.channelRefs.get(i).getAffinityCount()); + assertEquals( + String.format("Channel %d streams mismatch.", i), + streams, gcpChannel.channelRefs.get(i).getActiveStreamsCount() + ); + assertEquals( + String.format("Channel %d affinities mismatch.", i), + affinities, + gcpChannel.channelRefs.get(i).getAffinityCount() + ); + } + } + + private void checkChannelRefs(int[] streams, int[] affinities) { + for (int i = 0; i < streams.length; i++) { + assertEquals( + String.format("Channel %d streams mismatch.", i), + streams[i], gcpChannel.channelRefs.get(i).getActiveStreamsCount() + ); + assertEquals( + String.format("Channel %d affinities mismatch.", i), + affinities[i], + gcpChannel.channelRefs.get(i).getAffinityCount() + ); } } @@ -258,7 +348,7 @@ private List createAsyncSessions(SpannerStub stub) throws Exception { // Check CreateSession with multiple channels and streams, CreateSessionRequest req = CreateSessionRequest.newBuilder().setDatabase(DATABASE_PATH).build(); for (int i = 0; i < MAX_CHANNEL * MAX_STREAM; i++) { - AsyncResponseObserver resp = new AsyncResponseObserver(); + AsyncResponseObserver resp = new AsyncResponseObserver<>(); stub.createSession(req, resp); resps.add(resp); } @@ -279,7 +369,7 @@ private void deleteAsyncSessions(SpannerStub stub, List respNames) throw AsyncResponseObserver resp = new AsyncResponseObserver<>(); stub.deleteSession(DeleteSessionRequest.newBuilder().setName(respName).build(), resp); // The ChannelRef which is bound with the current affinity key. - GcpManagedChannel.ChannelRef currentChannel = + ChannelRef currentChannel = gcpChannel.affinityKeyToChannelRef.get(respName); // Verify the channel is in use. assertEquals(1, currentChannel.getActiveStreamsCount()); @@ -292,10 +382,12 @@ private void deleteAsyncSessions(SpannerStub stub, List respNames) throw /** Helper Functions for FutureStub. */ private SpannerFutureStub getSpannerFutureStub() { + return getSpannerFutureStub(gcpChannel); + } + + private SpannerFutureStub getSpannerFutureStub(GcpManagedChannel gcpChannel) { GoogleCredentials creds = getCreds(); - SpannerFutureStub stub = - SpannerGrpc.newFutureStub(gcpChannel).withCallCredentials(MoreCallCredentials.from(creds)); - return stub; + return SpannerGrpc.newFutureStub(gcpChannel).withCallCredentials(MoreCallCredentials.from(creds)); } private List createFutureSessions(SpannerFutureStub stub) throws Exception { @@ -327,7 +419,7 @@ private void deleteFutureSessions(SpannerFutureStub stub, List futureNam ListenableFuture future = stub.deleteSession(DeleteSessionRequest.newBuilder().setName(futureName).build()); // The ChannelRef which is bound with the current affinity key. - GcpManagedChannel.ChannelRef currentChannel = + ChannelRef currentChannel = gcpChannel.affinityKeyToChannelRef.get(futureName); // Verify the channel is in use. assertEquals(1, currentChannel.getActiveStreamsCount()); @@ -341,7 +433,8 @@ private void deleteFutureSessions(SpannerFutureStub stub, List futureNam @Rule public ExpectedException expectedEx = ExpectedException.none(); @Before - public void setupChannel() { + public void setupChannels() { + testLogger.addHandler(testLogHandler); File configFile = new File(SpannerIntegrationTest.class.getClassLoader().getResource(API_FILE).getFile()); gcpChannel = @@ -349,15 +442,287 @@ public void setupChannel() { GcpManagedChannelBuilder.forDelegateBuilder(builder) .withApiConfigJsonFile(configFile) .build(); + gcpChannelBRR = + (GcpManagedChannel) + GcpManagedChannelBuilder.forDelegateBuilder(builder) + .withApiConfigJsonFile(configFile) + .withOptions(GcpManagedChannelOptions.newBuilder() + .withChannelPoolOptions( + GcpChannelPoolOptions.newBuilder() + .setMaxSize(MAX_CHANNEL) + .setConcurrentStreamsLowWatermark(MAX_STREAM) + .setUseRoundRobinOnBind(true) + .build()) + .build()) + .build(); } @After - public void shutdownChannel() { + public void shutdownChannels() { + testLogger.removeHandler(testLogHandler); + testLogger.setLevel(Level.INFO); + logRecords.clear(); gcpChannel.shutdownNow(); + gcpChannelBRR.shutdownNow(); + } + + private long getOkCallsCount( + FakeMetricRegistry fakeRegistry, String endpoint) { + MetricsRecord record = fakeRegistry.pollRecord(); + List> metric = + record.getMetrics().get(GcpMetricsConstants.METRIC_NUM_CALLS_COMPLETED); + for (PointWithFunction m : metric) { + assertThat(m.keys().get(0).getKey()).isEqualTo("result"); + assertThat(m.keys().get(1).getKey()).isEqualTo("endpoint"); + if (!m.values().get(0).equals(LabelValue.create(GcpMetricsConstants.RESULT_SUCCESS))) { + continue; + } + if (!m.values().get(1).equals(LabelValue.create(endpoint))) { + continue; + } + return m.value(); + } + fail("Success calls metric is not found for endpoint: " + endpoint); + return 0; } + // For this test we'll create a Spanner client with gRPC-GCP MultiEndpoint feature. + // + // Imagine we have a multi-region Spanner instance with leader in the us-east4 and follower in the + // us-east1 regions. + // + // We will provide two MultiEndpoint configs: "leader" (having leader region endpoint first and + // follower second) and "follower" (having follower region endpoint first and leader second). + // + // Then we'll make sure the Spanner client uses leader MultiEndpoint as a default one and creates + // its sessions there. Then we'll make sure a read request will also use the leader MultiEndpoint + // by default. + // + // Then we'll verify we can use the follower MultiEndpoint when needed by specifying that in + // the Spanner context. + // + // Then we'll update MultiEndpoints configuration by replacing the leader endpoint and renaming + // the follower MultiEndpoint. And make sure the new leader endpoint and the previous follower + // endpoint are still working as expected when using different MultiEndpoints. @Test - public void testCreateAndGetSessionBlocking() throws Exception { + public void testSpannerMultiEndpointClient() throws IOException, InterruptedException { + // Watch debug messages. + testLogger.setLevel(Level.FINEST); + + final FakeMetricRegistry fakeRegistry = new FakeMetricRegistry(); + + File configFile = + new File(SpannerIntegrationTest.class.getClassLoader().getResource(API_FILE).getFile()); + + // Leader-first multi-endpoint endpoints. + final List leaderEndpoints = new ArrayList<>(); + // Follower-first multi-endpoint endpoints. + final List followerEndpoints = new ArrayList<>(); + final String leaderEndpoint = "us-east4.googleapis.com:443"; + final String followerEndpoint = "us-east1.googleapis.com:443"; + leaderEndpoints.add(leaderEndpoint); + leaderEndpoints.add(followerEndpoint); + followerEndpoints.add(followerEndpoint); + followerEndpoints.add(leaderEndpoint); + + ApiFunction, ManagedChannelBuilder> configurator = input -> input.overrideAuthority( + SPANNER_TARGET); + + GcpMultiEndpointOptions leaderOpts = GcpMultiEndpointOptions.newBuilder(leaderEndpoints) + .withName("leader") + .withChannelConfigurator(configurator) + .withRecoveryTimeout(Duration.ofSeconds(3)) + .build(); + + GcpMultiEndpointOptions followerOpts = GcpMultiEndpointOptions.newBuilder(followerEndpoints) + .withName("follower") + .withChannelConfigurator(configurator) + .withRecoveryTimeout(Duration.ofSeconds(3)) + .build(); + + List opts = new ArrayList<>(); + opts.add(leaderOpts); + opts.add(followerOpts); + + Parser parser = JsonFormat.parser(); + ApiConfig.Builder apiConfig = ApiConfig.newBuilder(); + Reader reader = Files.newBufferedReader(configFile.toPath(), UTF_8); + parser.merge(reader, apiConfig); + + GcpMultiEndpointChannel gcpMultiEndpointChannel = new GcpMultiEndpointChannel( + opts, + apiConfig.build(), + GcpManagedChannelOptions.newBuilder() + .withChannelPoolOptions(GcpChannelPoolOptions.newBuilder() + .setConcurrentStreamsLowWatermark(0) + .setMaxSize(3) + .build()) + .withMetricsOptions(GcpMetricsOptions.newBuilder() + .withMetricRegistry(fakeRegistry) + .build()) + .build()); + + final int currentIndex = GcpManagedChannel.channelPoolIndex.get(); + final String followerPoolIndex = String.format("pool-%d", currentIndex); + final String leaderPoolIndex = String.format("pool-%d", currentIndex - 1); + + // Make sure authorities are overridden by channel configurator. + assertThat(gcpMultiEndpointChannel.authority()).isEqualTo(SPANNER_TARGET); + assertThat(gcpMultiEndpointChannel.authorityFor("leader")) + .isEqualTo(SPANNER_TARGET); + assertThat(gcpMultiEndpointChannel.authorityFor("follower")) + .isEqualTo(SPANNER_TARGET); + assertThat(gcpMultiEndpointChannel.authorityFor("no-such-name")).isNull(); + + TimeUnit.MILLISECONDS.sleep(200); + + List logMessages = logRecords.stream() + .map(LogRecord::getMessage).collect(Collectors.toList()); + + // Make sure min channels are created and connections are established right away in both pools. + for (String poolIndex : Arrays.asList(leaderPoolIndex, followerPoolIndex)) { + for (int i = 0; i < 2; i++) { + assertThat(logMessages).contains( + poolIndex + ": Channel " + i + " state change detected: null -> IDLE"); + + assertThat(logMessages).contains( + poolIndex + ": Channel " + i + " state change detected: IDLE -> CONNECTING"); + } + } + + // Make sure endpoint is set as a metric label for each pool. + assertThat(logRecords.stream().filter(logRecord -> + logRecord.getMessage().matches( + leaderPoolIndex + ": Metrics options: \\{namePrefix: \"\", labels: \\[endpoint: " + + "\"" + leaderEndpoint + "\"], metricRegistry: .*" + )).count()).isEqualTo(1); + + assertThat(logRecords.stream().filter(logRecord -> + logRecord.getMessage().matches( + followerPoolIndex + ": Metrics options: \\{namePrefix: \"\", labels: \\[endpoint: " + + "\"" + followerEndpoint + "\"], metricRegistry: .*" + )).count()).isEqualTo(1); + + logRecords.clear(); + + TransportChannelProvider channelProvider = FixedTransportChannelProvider.create( + GrpcTransportChannel.create(gcpMultiEndpointChannel)); + + SpannerOptions.Builder options = SpannerOptions.newBuilder().setProjectId(GCP_PROJECT_ID); + + options.setChannelProvider(channelProvider); + // Match channel pool size. + options.setNumChannels(3); + + Spanner spanner = options.build().getService(); + InstanceId instanceId = InstanceId.of(GCP_PROJECT_ID, INSTANCE_ID); + DatabaseId databaseId = DatabaseId.of(instanceId, DB_NAME); + DatabaseClient databaseClient = spanner.getDatabaseClient(databaseId); + + Runnable readQuery = () -> { + try (com.google.cloud.spanner.ResultSet read = databaseClient.singleUse() + .read("Users", KeySet.all(), Arrays.asList("UserId", "UserName"))) { + int readRows = 0; + while (read.next()) { + readRows++; + assertEquals(USERNAME, read.getCurrentRowAsStruct().getString("UserName")); + } + assertEquals(1, readRows); + } + }; + + // Make sure leader endpoint is used by default. + assertThat(getOkCallsCount(fakeRegistry, leaderEndpoint)).isEqualTo(0); + readQuery.run(); + + // Wait for sessions creation requests to be completed but no more than 10 seconds. + for (int i = 0; i < 20; i++) { + TimeUnit.MILLISECONDS.sleep(500); + if (getOkCallsCount(fakeRegistry, leaderEndpoint) == 4) { + break; + } + } + + // 3 session creation requests + 1 our read request to the leader endpoint. + assertThat(getOkCallsCount(fakeRegistry, leaderEndpoint)).isEqualTo(4); + + // Make sure there were 3 session creation requests in the leader pool only. + assertThat(logRecords.stream().filter(logRecord -> + logRecord.getMessage().matches( + leaderPoolIndex + ": Binding \\d+ key\\(s\\) to channel \\d:.*" + )).count()).isEqualTo(3); + + assertThat(logRecords.stream().filter(logRecord -> + logRecord.getMessage().matches( + followerPoolIndex + ": Binding \\d+ key\\(s\\) to channel \\d:.*" + )).count()).isEqualTo(0); + + // Create context for using follower-first multi-endpoint. + Function contextFor = meName -> Context.current() + .withValue(CALL_CONTEXT_CONFIGURATOR_KEY, + new CallContextConfigurator() { + @Nullable + @Override + public ApiCallContext configure(ApiCallContext context, ReqT request, + MethodDescriptor method) { + return context.merge(GrpcCallContext.createDefault().withCallOptions( + CallOptions.DEFAULT.withOption(ME_KEY, meName))); + } + }); + + assertThat(getOkCallsCount(fakeRegistry, followerEndpoint)).isEqualTo(0); + // Use follower, make sure it is used. + contextFor.apply("follower").run(readQuery); + assertThat(getOkCallsCount(fakeRegistry, followerEndpoint)).isEqualTo(1); + + // Replace leader endpoints. + final String newLeaderEndpoint = "us-west1.googleapis.com:443"; + leaderEndpoints.clear(); + leaderEndpoints.add(newLeaderEndpoint); + leaderEndpoints.add(followerEndpoint); + leaderOpts = GcpMultiEndpointOptions.newBuilder(leaderEndpoints) + .withName("leader") + .withChannelConfigurator(configurator) + .build(); + + followerEndpoints.clear(); + followerEndpoints.add(followerEndpoint); + followerEndpoints.add(newLeaderEndpoint); + + // Rename follower MultiEndpoint. + followerOpts = GcpMultiEndpointOptions.newBuilder(followerEndpoints) + .withName("follower-2") + .withChannelConfigurator(configurator) + .build(); + + opts.clear(); + opts.add(leaderOpts); + opts.add(followerOpts); + + gcpMultiEndpointChannel.setMultiEndpoints(opts); + + // As it takes some time to connect to the new leader endpoint, RPC will fall back to the + // follower until we connect to leader. + assertThat(getOkCallsCount(fakeRegistry, followerEndpoint)).isEqualTo(1); + readQuery.run(); + assertThat(getOkCallsCount(fakeRegistry, followerEndpoint)).isEqualTo(2); + + TimeUnit.MILLISECONDS.sleep(500); + + // Make sure the new leader endpoint is used by default after it is connected. + assertThat(getOkCallsCount(fakeRegistry, newLeaderEndpoint)).isEqualTo(0); + readQuery.run(); + assertThat(getOkCallsCount(fakeRegistry, newLeaderEndpoint)).isEqualTo(1); + + // Make sure that the follower endpoint still works if specified. + assertThat(getOkCallsCount(fakeRegistry, followerEndpoint)).isEqualTo(2); + // Use follower, make sure it is used. + contextFor.apply("follower-2").run(readQuery); + assertThat(getOkCallsCount(fakeRegistry, followerEndpoint)).isEqualTo(3); + } + + @Test + public void testCreateAndGetSessionBlocking() { SpannerBlockingStub stub = getSpannerBlockingStub(); CreateSessionRequest req = CreateSessionRequest.newBuilder().setDatabase(DATABASE_PATH).build(); // The first MAX_CHANNEL requests (without affinity) should be distributed 1 per channel. @@ -380,7 +745,7 @@ public void testCreateAndGetSessionBlocking() throws Exception { } @Test - public void testBatchCreateSessionsBlocking() throws Exception { + public void testBatchCreateSessionsBlocking() { int sessionCount = 10; SpannerBlockingStub stub = getSpannerBlockingStub(); BatchCreateSessionsRequest req = @@ -403,6 +768,130 @@ public void testBatchCreateSessionsBlocking() throws Exception { checkChannelRefs(MAX_CHANNEL, 0, 0); } + @Test + public void testSessionsCreatedUsingRoundRobin() throws Exception { + SpannerFutureStub stub = getSpannerFutureStub(gcpChannelBRR); + List> futures = new ArrayList<>(); + assertEquals(ConnectivityState.IDLE, gcpChannelBRR.getState(false)); + + // Should create one session per channel. + CreateSessionRequest req = CreateSessionRequest.newBuilder().setDatabase(DATABASE_PATH).build(); + for (int i = 0; i < MAX_CHANNEL; i++) { + ListenableFuture future = stub.createSession(req); + futures.add(future); + } + // If round-robin in use as expected, then each channel should have 1 active stream with the CreateSession request. + checkChannelRefs(gcpChannelBRR, MAX_CHANNEL, 1, 0); + + // Collecting futures results. + String lastSession = ""; + for (ListenableFuture future : futures) { + lastSession = future.get().getName(); + } + // Since createSession will bind the key, check the number of keys bound with channels. + // Each channel should have 1 affinity key. + assertEquals(MAX_CHANNEL, gcpChannelBRR.affinityKeyToChannelRef.size()); + checkChannelRefs(gcpChannelBRR, MAX_CHANNEL, 0, 1); + + // Create a different request with the lastSession created. + ListenableFuture responseFuture = + stub.executeSql( + ExecuteSqlRequest.newBuilder() + .setSession(lastSession) + .setSql("select * FROM Users") + .build()); + // The ChannelRef which is bound with the lastSession. + ChannelRef currentChannel = + gcpChannelBRR.affinityKeyToChannelRef.get(lastSession); + // Verify the channel is in use. + assertEquals(1, currentChannel.getActiveStreamsCount()); + + // Create another 1 session per channel sequentially. + // Without the round-robin it won't use the currentChannel as it has more active streams (1) than other channels. + // But with round-robin each channel should get one create session request. + for (int i = 0; i < MAX_CHANNEL; i++) { + ListenableFuture future = stub.createSession(req); + future.get(); + } + ResultSet response = responseFuture.get(); + + // If round-robin in use, then each channel should now have 2 active stream with the CreateSession request. + checkChannelRefs(gcpChannelBRR, MAX_CHANNEL, 0, 2); + } + + @Test + public void testSessionsCreatedWithoutRoundRobin() throws Exception { + // Watch debug messages. + testLogger.setLevel(Level.FINEST); + final int currentIndex = GcpManagedChannel.channelPoolIndex.get() - 1; + final String poolIndex = String.format("pool-%d", currentIndex); + + SpannerFutureStub stub = getSpannerFutureStub(); + List> futures = new ArrayList<>(); + assertEquals(ConnectivityState.IDLE, gcpChannel.getState(false)); + + // Initial log messages count. + int logCount = logRecords.size(); + + // Should create one session per channel. + CreateSessionRequest req = CreateSessionRequest.newBuilder().setDatabase(DATABASE_PATH).build(); + for (int i = 0; i < MAX_CHANNEL; i++) { + ListenableFuture future = stub.createSession(req); + futures.add(future); + assertThat(lastLogMessage(3)).isEqualTo( + poolIndex + ": Channel " + i + " state change detected: null -> IDLE"); + assertThat(lastLogMessage(2)).isEqualTo( + poolIndex + ": Channel " + i + " created."); + assertThat(lastLogMessage()).isEqualTo( + poolIndex + ": Channel " + i + " picked for bind operation."); + logCount += 3; + assertThat(logRecords.size()).isEqualTo(logCount); + assertThat(lastLogLevel()).isEqualTo(Level.FINEST); + } + // Each channel should have 1 active stream with the CreateSession request because we create them concurrently. + checkChannelRefs(gcpChannel, MAX_CHANNEL, 1, 0); + + // Collecting futures results. + String lastSession = ""; + for (ListenableFuture future : futures) { + lastSession = future.get().getName(); + } + // Since createSession will bind the key, check the number of keys bound with channels. + // Each channel should have 1 affinity key. + assertEquals(MAX_CHANNEL, gcpChannel.affinityKeyToChannelRef.size()); + checkChannelRefs(MAX_CHANNEL, 0, 1); + + // Create a different request with the lastSession created. + ListenableFuture responseFuture = + stub.executeSql( + ExecuteSqlRequest.newBuilder() + .setSession(lastSession) + .setSql("select * FROM Users") + .build()); + // The ChannelRef which is bound with the lastSession. + ChannelRef currentChannel = + gcpChannel.affinityKeyToChannelRef.get(lastSession); + // Verify the channel is in use. + assertEquals(1, currentChannel.getActiveStreamsCount()); + + logCount = logRecords.size(); + + // Create another 1 session per channel sequentially. + // Without the round-robin it won't use the currentChannel as it has more active streams (1) than other channels. + for (int i = 0; i < MAX_CHANNEL; i++) { + ListenableFuture future = stub.createSession(req); + assertThat(lastLogMessage()).isEqualTo( + poolIndex + ": Channel 0 picked for bind operation."); + assertThat(logRecords.size()).isEqualTo(++logCount); + future.get(); + logCount++; // For session mapping log message. + } + ResultSet response = responseFuture.get(); + + // Without round-robin the first channel will get all additional 3 sessions. + checkChannelRefs(new int[]{0, 0, 0}, new int[]{4, 1, 1}); + } + @Test public void testListSessionsFuture() throws Exception { SpannerFutureStub stub = getSpannerFutureStub(); @@ -437,7 +926,7 @@ public void testExecuteSqlFuture() throws Exception { .setSql("select * FROM Users") .build()); // The ChannelRef which is bound with the current affinity key. - GcpManagedChannel.ChannelRef currentChannel = + ChannelRef currentChannel = gcpChannel.affinityKeyToChannelRef.get(futureName); // Verify the channel is in use. assertEquals(1, currentChannel.getActiveStreamsCount()); @@ -459,7 +948,7 @@ public void testExecuteStreamingSqlAsync() throws Exception { ExecuteSqlRequest.newBuilder().setSession(respName).setSql("select * FROM Users").build(), resp); // The ChannelRef which is bound with the current affinity key. - GcpManagedChannel.ChannelRef currentChannel = + ChannelRef currentChannel = gcpChannel.affinityKeyToChannelRef.get(respName); // Verify the channel is in use. assertEquals(1, currentChannel.getActiveStreamsCount()); @@ -477,7 +966,7 @@ public void testPartitionQueryAsync() throws Exception { for (String respName : respNames) { TransactionOptions options = TransactionOptions.newBuilder() - .setReadOnly(TransactionOptions.ReadOnly.getDefaultInstance()) + .setReadOnly(ReadOnly.getDefaultInstance()) .build(); TransactionSelector selector = TransactionSelector.newBuilder().setBegin(options).build(); AsyncResponseObserver resp = new AsyncResponseObserver<>(); @@ -489,7 +978,7 @@ public void testPartitionQueryAsync() throws Exception { .build(), resp); // The ChannelRef which is bound with the current affinity key. - GcpManagedChannel.ChannelRef currentChannel = + ChannelRef currentChannel = gcpChannel.affinityKeyToChannelRef.get(respName); // Verify the channel is in use. assertEquals(1, currentChannel.getActiveStreamsCount()); @@ -506,7 +995,7 @@ public void testExecuteBatchDmlFuture() throws Exception { for (String futureName : futureNames) { TransactionOptions options = TransactionOptions.newBuilder() - .setReadWrite(TransactionOptions.ReadWrite.getDefaultInstance()) + .setReadWrite(ReadWrite.getDefaultInstance()) .build(); TransactionSelector selector = TransactionSelector.newBuilder().setBegin(options).build(); // Will use only one session for the whole batch. @@ -518,7 +1007,7 @@ public void testExecuteBatchDmlFuture() throws Exception { .addStatements(Statement.newBuilder().setSql("select * FROM Users").build()) .build()); // The ChannelRef which is bound with the current affinity key. - GcpManagedChannel.ChannelRef currentChannel = + ChannelRef currentChannel = gcpChannel.affinityKeyToChannelRef.get(futureName); // Verify the channel is in use. assertEquals(1, currentChannel.getActiveStreamsCount()); @@ -568,7 +1057,7 @@ private static class AsyncResponseObserver implements StreamObserver threeEndpoints = + new ArrayList<>(ImmutableList.of("first", "second", "third")); + + private final List fourEndpoints = + new ArrayList<>(ImmutableList.of("four", "first", "third", "second")); + + private static final long RECOVERY_MS = 1000; + + @Rule + public ExpectedException expectedEx = ExpectedException.none(); + + private MultiEndpoint initPlain(List endpoints) { + return new MultiEndpoint.Builder(endpoints).build(); + } + + private MultiEndpoint initWithRecovery(List endpoints, long recoveryTimeOut) { + return new MultiEndpoint.Builder(endpoints) + .withRecoveryTimeout(Duration.ofMillis(recoveryTimeOut)) + .build(); + } + + @Test + public void initPlain_raisesErrorWhenEmptyEndpoints() { + expectedEx.expect(IllegalArgumentException.class); + expectedEx.expectMessage("Endpoints list must not be empty."); + initPlain(ImmutableList.of()); + } + + @Test + public void initWithRecovery_raisesErrorWhenEmptyEndpoints() { + expectedEx.expect(IllegalArgumentException.class); + expectedEx.expectMessage("Endpoints list must not be empty."); + initWithRecovery(ImmutableList.of(), RECOVERY_MS); + } + + @Test + public void getCurrent_returnsTopPriorityAvailableEndpointWithoutRecovery() { + MultiEndpoint multiEndpoint = initPlain(threeEndpoints); + + // Returns first after creation. + assertThat(multiEndpoint.getCurrentId()).isEqualTo(threeEndpoints.get(0)); + + // Second becomes available. + multiEndpoint.setEndpointAvailable(threeEndpoints.get(1), true); + + // Second is the current as the only available. + assertThat(multiEndpoint.getCurrentId()).isEqualTo(threeEndpoints.get(1)); + + // Third becomes available. + multiEndpoint.setEndpointAvailable(threeEndpoints.get(2), true); + + // Second is still the current because it has higher priority. + assertThat(multiEndpoint.getCurrentId()).isEqualTo(threeEndpoints.get(1)); + + // First becomes available. + multiEndpoint.setEndpointAvailable(threeEndpoints.get(0), true); + + // First becomes the current because it has higher priority. + assertThat(multiEndpoint.getCurrentId()).isEqualTo(threeEndpoints.get(0)); + + // Second becomes unavailable. + multiEndpoint.setEndpointAvailable(threeEndpoints.get(1), false); + + // Second becoming unavailable should not affect the current first. + assertThat(multiEndpoint.getCurrentId()).isEqualTo(threeEndpoints.get(0)); + + // First becomes unavailable. + multiEndpoint.setEndpointAvailable(threeEndpoints.get(0), false); + + // Third becomes the current as the only remaining available. + assertThat(multiEndpoint.getCurrentId()).isEqualTo(threeEndpoints.get(2)); + + // Third becomes unavailable. + multiEndpoint.setEndpointAvailable(threeEndpoints.get(2), false); + + // After all endpoints became unavailable the multiEndpoint sticks to the last used endpoint. + assertThat(multiEndpoint.getCurrentId()).isEqualTo(threeEndpoints.get(2)); + } + + @Test + public void getCurrent_returnsTopPriorityAvailableEndpointWithRecovery() + throws InterruptedException { + MultiEndpoint multiEndpoint = initWithRecovery(threeEndpoints, RECOVERY_MS); + + // Returns first after creation. + assertThat(multiEndpoint.getCurrentId()).isEqualTo(threeEndpoints.get(0)); + + // Second becomes available. + multiEndpoint.setEndpointAvailable(threeEndpoints.get(1), true); + + // First is still the current to allow it to become available within recovery timeout. + assertThat(multiEndpoint.getCurrentId()).isEqualTo(threeEndpoints.get(0)); + + // After recovery timeout has passed. + Sleeper.DEFAULT.sleep(RECOVERY_MS + 100); + + // Second becomes current as an available endpoint with top priority. + assertThat(multiEndpoint.getCurrentId()).isEqualTo(threeEndpoints.get(1)); + + // Third becomes available. + multiEndpoint.setEndpointAvailable(threeEndpoints.get(2), true); + + // Second is still the current because it has higher priority. + assertThat(multiEndpoint.getCurrentId()).isEqualTo(threeEndpoints.get(1)); + + // Second becomes unavailable. + multiEndpoint.setEndpointAvailable(threeEndpoints.get(1), false); + + // Second is still current, allowing upto recoveryTimeout to recover. + assertThat(multiEndpoint.getCurrentId()).isEqualTo(threeEndpoints.get(1)); + + // Halfway through recovery timeout the second recovers. + Sleeper.DEFAULT.sleep(RECOVERY_MS / 2); + multiEndpoint.setEndpointAvailable(threeEndpoints.get(1), true); + + // Second is the current. + assertThat(multiEndpoint.getCurrentId()).isEqualTo(threeEndpoints.get(1)); + + // After the initial recovery timeout, the second is still current. + Sleeper.DEFAULT.sleep(RECOVERY_MS / 2 + 100); + assertThat(multiEndpoint.getCurrentId()).isEqualTo(threeEndpoints.get(1)); + + // Second becomes unavailable. + multiEndpoint.setEndpointAvailable(threeEndpoints.get(1), false); + + // After recovery timeout has passed. + Sleeper.DEFAULT.sleep(RECOVERY_MS + 100); + + // Changes to an available endpoint -- third. + assertThat(multiEndpoint.getCurrentId()).isEqualTo(threeEndpoints.get(2)); + + // First becomes available. + multiEndpoint.setEndpointAvailable(threeEndpoints.get(0), true); + + // First becomes current immediately. + assertThat(multiEndpoint.getCurrentId()).isEqualTo(threeEndpoints.get(0)); + + // First becomes unavailable. + multiEndpoint.setEndpointAvailable(threeEndpoints.get(0), false); + + // First is still current, allowing upto recoveryTimeout to recover. + assertThat(multiEndpoint.getCurrentId()).isEqualTo(threeEndpoints.get(0)); + + // After recovery timeout has passed. + Sleeper.DEFAULT.sleep(RECOVERY_MS + 100); + + // Changes to an available endpoint -- third. + assertThat(multiEndpoint.getCurrentId()).isEqualTo(threeEndpoints.get(2)); + + // Third becomes unavailable + multiEndpoint.setEndpointAvailable(threeEndpoints.get(2), false); + + // Third is still current, allowing upto recoveryTimeout to recover. + assertThat(multiEndpoint.getCurrentId()).isEqualTo(threeEndpoints.get(2)); + + // Halfway through recovery timeout the second becomes available. + // Sleeper.defaultSleeper().sleep(Duration.ofMillis(RECOVERY_MS - 100)); + multiEndpoint.setEndpointAvailable(threeEndpoints.get(1), true); + + // Second becomes current immediately. + assertThat(multiEndpoint.getCurrentId()).isEqualTo(threeEndpoints.get(1)); + + // Second becomes unavailable. + multiEndpoint.setEndpointAvailable(threeEndpoints.get(1), false); + + // Second is still current, allowing upto recoveryTimeout to recover. + assertThat(multiEndpoint.getCurrentId()).isEqualTo(threeEndpoints.get(1)); + + // After recovery timeout has passed. + Sleeper.DEFAULT.sleep(RECOVERY_MS + 100); + + // After all endpoints became unavailable the multiEndpoint sticks to the last used endpoint. + assertThat(multiEndpoint.getCurrentId()).isEqualTo(threeEndpoints.get(1)); + } + + @Test + public void setEndpoints_raisesErrorWhenEmptyEndpoints() { + MultiEndpoint multiEndpoint = initPlain(threeEndpoints); + expectedEx.expect(IllegalArgumentException.class); + multiEndpoint.setEndpoints(ImmutableList.of()); + } + + @Test + public void setEndpoints_updatesEndpoints() { + MultiEndpoint multiEndpoint = initPlain(threeEndpoints); + multiEndpoint.setEndpoints(fourEndpoints); + + // "first" which is now under index 1 still current because no other available. + assertThat(multiEndpoint.getCurrentId()).isEqualTo(fourEndpoints.get(1)); + } + + @Test + public void setEndpoints_updatesEndpointsWithRecovery() { + MultiEndpoint multiEndpoint = initWithRecovery(threeEndpoints, RECOVERY_MS); + multiEndpoint.setEndpoints(fourEndpoints); + + // "first" which is now under index 1 still current because no other available. + assertThat(multiEndpoint.getCurrentId()).isEqualTo(fourEndpoints.get(1)); + } + + @Test + public void setEndpoints_updatesEndpointsPreservingStates() { + MultiEndpoint multiEndpoint = initPlain(threeEndpoints); + + // Second is available. + multiEndpoint.setEndpointAvailable(threeEndpoints.get(1), true); + multiEndpoint.setEndpoints(fourEndpoints); + + // "second" which is now under index 3 still must remain available. + assertThat(multiEndpoint.getCurrentId()).isEqualTo(fourEndpoints.get(3)); + } + + @Test + public void setEndpoints_updatesEndpointsPreservingStatesWithRecovery() + throws InterruptedException { + MultiEndpoint multiEndpoint = initWithRecovery(threeEndpoints, RECOVERY_MS); + + // After recovery timeout has passed. + Sleeper.DEFAULT.sleep(RECOVERY_MS + 100); + + // Second is available. + multiEndpoint.setEndpointAvailable(threeEndpoints.get(1), true); + multiEndpoint.setEndpoints(fourEndpoints); + + // "second" which is now under index 3 still must remain available. + assertThat(multiEndpoint.getCurrentId()).isEqualTo(fourEndpoints.get(3)); + } + + @Test + public void setEndpoints_updatesEndpointsSwitchToTopPriorityAvailable() { + MultiEndpoint multiEndpoint = initPlain(threeEndpoints); + + // Second and third is available. + multiEndpoint.setEndpointAvailable(threeEndpoints.get(1), true); + multiEndpoint.setEndpointAvailable(threeEndpoints.get(2), true); + + multiEndpoint.setEndpoints(fourEndpoints); + + // "third" which is now under index 2 must become current, because "second" has lower priority. + assertThat(multiEndpoint.getCurrentId()).isEqualTo(fourEndpoints.get(2)); + } + + @Test + public void setEndpoints_updatesEndpointsSwitchToTopPriorityAvailableWithRecovery() + throws InterruptedException { + MultiEndpoint multiEndpoint = initWithRecovery(threeEndpoints, RECOVERY_MS); + + // After recovery timeout has passed. + Sleeper.DEFAULT.sleep(RECOVERY_MS + 100); + + // Second and third is available. + multiEndpoint.setEndpointAvailable(threeEndpoints.get(1), true); + multiEndpoint.setEndpointAvailable(threeEndpoints.get(2), true); + + multiEndpoint.setEndpoints(fourEndpoints); + + // "third" which is now under index 2 must become current, because "second" has lower priority. + assertThat(multiEndpoint.getCurrentId()).isEqualTo(fourEndpoints.get(2)); + } + + @Test + public void setEndpoints_updatesEndpointsRemovesOnlyActiveEndpoint() { + List extraEndpoints = new ArrayList<>(threeEndpoints); + extraEndpoints.add("extra"); + MultiEndpoint multiEndpoint = initPlain(extraEndpoints); + + // Extra is available. + multiEndpoint.setEndpointAvailable("extra", true); + + // Extra is removed. + multiEndpoint.setEndpoints(fourEndpoints); + + // "four" which is under index 0 must become current, because no endpoints available. + assertThat(multiEndpoint.getCurrentId()).isEqualTo(fourEndpoints.get(0)); + } + + @Test + public void setEndpoints_updatesEndpointsRemovesOnlyActiveEndpointWithRecovery() + throws InterruptedException { + List extraEndpoints = new ArrayList<>(threeEndpoints); + extraEndpoints.add("extra"); + MultiEndpoint multiEndpoint = initWithRecovery(extraEndpoints, RECOVERY_MS); + + // After recovery timeout has passed. + Sleeper.DEFAULT.sleep(RECOVERY_MS + 100); + + // Extra is available. + multiEndpoint.setEndpointAvailable("extra", true); + + // Extra is removed. + multiEndpoint.setEndpoints(fourEndpoints); + + // "four" which is under index 0 must become current, because no endpoints available. + assertThat(multiEndpoint.getCurrentId()).isEqualTo(fourEndpoints.get(0)); + } + + @Test + public void setEndpoints_recoveringEndpointGetsRemoved() throws InterruptedException { + List extraEndpoints = new ArrayList<>(threeEndpoints); + extraEndpoints.add("extra"); + MultiEndpoint multiEndpoint = initWithRecovery(extraEndpoints, RECOVERY_MS); + + // After recovery timeout has passed. + Sleeper.DEFAULT.sleep(RECOVERY_MS + 100); + + // Extra is available. + multiEndpoint.setEndpointAvailable("extra", true); + + // Extra is recovering. + multiEndpoint.setEndpointAvailable("extra", false); + + // Extra is removed. + multiEndpoint.setEndpoints(fourEndpoints); + + // "four" which is under index 0 must become current, because no endpoints available. + assertThat(multiEndpoint.getCurrentId()).isEqualTo(fourEndpoints.get(0)); + + // After recovery timeout has passed. + Sleeper.DEFAULT.sleep(RECOVERY_MS + 100); + + // "four" is still current. + assertThat(multiEndpoint.getCurrentId()).isEqualTo(fourEndpoints.get(0)); + } +}