diff --git a/google-cloud-clients/google-cloud-spanner/src/main/java/com/google/cloud/spanner/AbstractReadContext.java b/google-cloud-clients/google-cloud-spanner/src/main/java/com/google/cloud/spanner/AbstractReadContext.java new file mode 100644 index 000000000000..ef37267d724e --- /dev/null +++ b/google-cloud-clients/google-cloud-spanner/src/main/java/com/google/cloud/spanner/AbstractReadContext.java @@ -0,0 +1,556 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.cloud.spanner.SpannerExceptionFactory.newSpannerException; +import static com.google.common.base.Preconditions.checkArgument; +import static com.google.common.base.Preconditions.checkNotNull; +import static com.google.common.base.Preconditions.checkState; + +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.AbstractResultSet.CloseableIterator; +import com.google.cloud.spanner.AbstractResultSet.GrpcResultSet; +import com.google.cloud.spanner.AbstractResultSet.GrpcStreamIterator; +import com.google.cloud.spanner.AbstractResultSet.ResumableStreamIterator; +import com.google.cloud.spanner.Options.QueryOption; +import com.google.cloud.spanner.Options.ReadOption; +import com.google.cloud.spanner.SessionImpl.SessionTransaction; +import com.google.cloud.spanner.spi.v1.SpannerRpc; +import com.google.protobuf.ByteString; +import com.google.spanner.v1.BeginTransactionRequest; +import com.google.spanner.v1.ExecuteBatchDmlRequest; +import com.google.spanner.v1.ExecuteSqlRequest; +import com.google.spanner.v1.ExecuteSqlRequest.QueryMode; +import com.google.spanner.v1.PartialResultSet; +import com.google.spanner.v1.ReadRequest; +import com.google.spanner.v1.Transaction; +import com.google.spanner.v1.TransactionOptions; +import com.google.spanner.v1.TransactionSelector; +import io.opencensus.trace.Span; +import io.opencensus.trace.Tracing; +import java.util.Map; +import java.util.concurrent.Callable; +import java.util.concurrent.atomic.AtomicLong; +import javax.annotation.Nullable; +import javax.annotation.concurrent.GuardedBy; + +/** + * Abstract base class for all {@link ReadContext}s + concrete implementations of read-only {@link + * ReadContext}s. + */ +abstract class AbstractReadContext + implements ReadContext, AbstractResultSet.Listener, SessionTransaction { + /** + * A {@code ReadContext} for standalone reads. This can only be used for a single operation, since + * each standalone read may see a different timestamp of Cloud Spanner data. + */ + static class SingleReadContext extends AbstractReadContext { + final TimestampBound bound; + + @GuardedBy("lock") + private boolean used; + + SingleReadContext( + SessionImpl session, TimestampBound bound, SpannerRpc rpc, int defaultPrefetchChunks) { + super(session, rpc, defaultPrefetchChunks); + this.bound = bound; + } + + @GuardedBy("lock") + @Override + void beforeReadOrQueryLocked() { + super.beforeReadOrQueryLocked(); + checkState(!used, "Cannot use a single-read ReadContext for multiple reads"); + used = true; + } + + @Override + @Nullable + TransactionSelector getTransactionSelector() { + if (bound.getMode() == TimestampBound.Mode.STRONG) { + // Default mode: no need to specify a transaction. + return null; + } + return TransactionSelector.newBuilder() + .setSingleUse(TransactionOptions.newBuilder().setReadOnly(bound.toProto())) + .build(); + } + } + + private static void assertTimestampAvailable(boolean available) { + checkState(available, "Method can only be called after read has returned data or finished"); + } + + static class SingleUseReadOnlyTransaction extends SingleReadContext + implements ReadOnlyTransaction { + @GuardedBy("lock") + private Timestamp timestamp; + + SingleUseReadOnlyTransaction( + SessionImpl session, TimestampBound bound, SpannerRpc rpc, int defaultPrefetchChunks) { + super(session, bound, rpc, defaultPrefetchChunks); + } + + @Override + public Timestamp getReadTimestamp() { + synchronized (lock) { + assertTimestampAvailable(timestamp != null); + return timestamp; + } + } + + @Override + @Nullable + TransactionSelector getTransactionSelector() { + TransactionOptions.Builder options = TransactionOptions.newBuilder(); + bound.applyToBuilder(options.getReadOnlyBuilder()).setReturnReadTimestamp(true); + return TransactionSelector.newBuilder().setSingleUse(options).build(); + } + + @Override + public void onTransactionMetadata(Transaction transaction) { + synchronized (lock) { + if (!transaction.hasReadTimestamp()) { + throw newSpannerException( + ErrorCode.INTERNAL, "Missing expected transaction.read_timestamp metadata field"); + } + try { + timestamp = Timestamp.fromProto(transaction.getReadTimestamp()); + } catch (IllegalArgumentException e) { + throw newSpannerException( + ErrorCode.INTERNAL, "Bad value in transaction.read_timestamp metadata field", e); + } + } + } + } + + static class MultiUseReadOnlyTransaction extends AbstractReadContext + implements ReadOnlyTransaction { + private TimestampBound bound; + private final Object txnLock = new Object(); + + @GuardedBy("txnLock") + private Timestamp timestamp; + + @GuardedBy("txnLock") + private ByteString transactionId; + + MultiUseReadOnlyTransaction( + SessionImpl session, TimestampBound bound, SpannerRpc rpc, int defaultPrefetchChunks) { + super(session, rpc, defaultPrefetchChunks); + checkArgument( + bound.getMode() != TimestampBound.Mode.MAX_STALENESS + && bound.getMode() != TimestampBound.Mode.MIN_READ_TIMESTAMP, + "Bounded staleness mode %s is not supported for multi-use read-only transactions." + + " Create a single-use read or read-only transaction instead.", + bound.getMode()); + this.bound = bound; + } + + MultiUseReadOnlyTransaction( + SessionImpl session, + ByteString transactionId, + Timestamp timestamp, + SpannerRpc rpc, + int defaultPrefetchChunks) { + super(session, rpc, defaultPrefetchChunks); + this.transactionId = transactionId; + this.timestamp = timestamp; + } + + @Override + void beforeReadOrQuery() { + super.beforeReadOrQuery(); + initTransaction(); + } + + @Override + @Nullable + TransactionSelector getTransactionSelector() { + // No need for synchronization: super.readInternal() is always preceded by a check of + // "transactionId" that provides a happens-before from initialization, and the value is never + // changed afterwards. + @SuppressWarnings("GuardedByChecker") + TransactionSelector selector = TransactionSelector.newBuilder().setId(transactionId).build(); + return selector; + } + + @Override + public Timestamp getReadTimestamp() { + synchronized (txnLock) { + assertTimestampAvailable(timestamp != null); + return timestamp; + } + } + + ByteString getTransactionId() { + synchronized (txnLock) { + return transactionId; + } + } + + void initTransaction() { + SessionImpl.throwIfTransactionsPending(); + + // Since we only support synchronous calls, just block on "txnLock" while the RPC is in + // flight. Note that we use the strategy of sending an explicit BeginTransaction() RPC, + // rather than using the first read in the transaction to begin it implicitly. The chosen + // strategy is sub-optimal in the case of the first read being fast, as it incurs an extra + // RTT, but optimal if the first read is slow. As the client library is now using streaming + // reads, a possible optimization could be to use the first read in the transaction to begin + // it implicitly. + synchronized (txnLock) { + if (transactionId != null) { + return; + } + span.addAnnotation("Creating Transaction"); + try { + TransactionOptions.Builder options = TransactionOptions.newBuilder(); + bound.applyToBuilder(options.getReadOnlyBuilder()).setReturnReadTimestamp(true); + final BeginTransactionRequest request = + BeginTransactionRequest.newBuilder() + .setSession(session.getName()) + .setOptions(options) + .build(); + Transaction transaction = + SpannerImpl.runWithRetries( + new Callable() { + @Override + public Transaction call() throws Exception { + return rpc.beginTransaction(request, session.getOptions()); + } + }); + if (!transaction.hasReadTimestamp()) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INTERNAL, "Missing expected transaction.read_timestamp metadata field"); + } + if (transaction.getId().isEmpty()) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INTERNAL, "Missing expected transaction.id metadata field"); + } + try { + timestamp = Timestamp.fromProto(transaction.getReadTimestamp()); + } catch (IllegalArgumentException e) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INTERNAL, "Bad value in transaction.read_timestamp metadata field", e); + } + transactionId = transaction.getId(); + span.addAnnotation( + "Transaction Creation Done", TraceUtil.getTransactionAnnotations(transaction)); + } catch (SpannerException e) { + span.addAnnotation("Transaction Creation Failed", TraceUtil.getExceptionAnnotations(e)); + throw e; + } + } + } + } + + final Object lock = new Object(); + final SessionImpl session; + final SpannerRpc rpc; + final Span span; + private final int defaultPrefetchChunks; + + @GuardedBy("lock") + private boolean isValid = true; + + @GuardedBy("lock") + private boolean isClosed = false; + + // A per-transaction sequence number used to identify this ExecuteSqlRequests. Required for DML, + // ignored for query by the server. + private AtomicLong seqNo = new AtomicLong(); + + // Allow up to 512MB to be buffered (assuming 1MB chunks). In practice, restart tokens are sent + // much more frequently. + private static final int MAX_BUFFERED_CHUNKS = 512; + + AbstractReadContext(SessionImpl session, SpannerRpc rpc, int defaultPrefetchChunks) { + this(session, rpc, defaultPrefetchChunks, Tracing.getTracer().getCurrentSpan()); + } + + private AbstractReadContext( + SessionImpl session, SpannerRpc rpc, int defaultPrefetchChunks, Span span) { + this.session = session; + this.rpc = rpc; + this.defaultPrefetchChunks = defaultPrefetchChunks; + this.span = span; + } + + long getSeqNo() { + return seqNo.incrementAndGet(); + } + + @Override + public final ResultSet read( + String table, KeySet keys, Iterable columns, ReadOption... options) { + return readInternal(table, null, keys, columns, options); + } + + @Override + public final ResultSet readUsingIndex( + String table, String index, KeySet keys, Iterable columns, ReadOption... options) { + return readInternal(table, checkNotNull(index), keys, columns, options); + } + + @Nullable + @Override + public final Struct readRow(String table, Key key, Iterable columns) { + try (ResultSet resultSet = read(table, KeySet.singleKey(key), columns)) { + return consumeSingleRow(resultSet); + } + } + + @Nullable + @Override + public final Struct readRowUsingIndex( + String table, String index, Key key, Iterable columns) { + try (ResultSet resultSet = readUsingIndex(table, index, KeySet.singleKey(key), columns)) { + return consumeSingleRow(resultSet); + } + } + + @Override + public final ResultSet executeQuery(Statement statement, QueryOption... options) { + return executeQueryInternal( + statement, com.google.spanner.v1.ExecuteSqlRequest.QueryMode.NORMAL, options); + } + + @Override + public final ResultSet analyzeQuery(Statement statement, QueryAnalyzeMode readContextQueryMode) { + switch (readContextQueryMode) { + case PROFILE: + return executeQueryInternal( + statement, com.google.spanner.v1.ExecuteSqlRequest.QueryMode.PROFILE); + case PLAN: + return executeQueryInternal( + statement, com.google.spanner.v1.ExecuteSqlRequest.QueryMode.PLAN); + default: + throw new IllegalStateException( + "Unknown value for QueryAnalyzeMode : " + readContextQueryMode); + } + } + + private ResultSet executeQueryInternal( + Statement statement, + com.google.spanner.v1.ExecuteSqlRequest.QueryMode queryMode, + QueryOption... options) { + Options readOptions = Options.fromQueryOptions(options); + return executeQueryInternalWithOptions( + statement, queryMode, readOptions, null /*partitionToken*/); + } + + ExecuteSqlRequest.Builder getExecuteSqlRequestBuilder(Statement statement, QueryMode queryMode) { + ExecuteSqlRequest.Builder builder = + ExecuteSqlRequest.newBuilder() + .setSql(statement.getSql()) + .setQueryMode(queryMode) + .setSession(session.getName()); + Map stmtParameters = statement.getParameters(); + if (!stmtParameters.isEmpty()) { + com.google.protobuf.Struct.Builder paramsBuilder = builder.getParamsBuilder(); + for (Map.Entry param : stmtParameters.entrySet()) { + paramsBuilder.putFields(param.getKey(), param.getValue().toProto()); + builder.putParamTypes(param.getKey(), param.getValue().getType().toProto()); + } + } + TransactionSelector selector = getTransactionSelector(); + if (selector != null) { + builder.setTransaction(selector); + } + builder.setSeqno(getSeqNo()); + return builder; + } + + ExecuteBatchDmlRequest.Builder getExecuteBatchDmlRequestBuilder(Iterable statements) { + ExecuteBatchDmlRequest.Builder builder = + ExecuteBatchDmlRequest.newBuilder().setSession(session.getName()); + int idx = 0; + for (Statement stmt : statements) { + builder.addStatementsBuilder(); + builder.getStatementsBuilder(idx).setSql(stmt.getSql()); + Map stmtParameters = stmt.getParameters(); + if (!stmtParameters.isEmpty()) { + com.google.protobuf.Struct.Builder paramsBuilder = + builder.getStatementsBuilder(idx).getParamsBuilder(); + for (Map.Entry param : stmtParameters.entrySet()) { + paramsBuilder.putFields(param.getKey(), param.getValue().toProto()); + builder + .getStatementsBuilder(idx) + .putParamTypes(param.getKey(), param.getValue().getType().toProto()); + } + } + idx++; + } + + TransactionSelector selector = getTransactionSelector(); + if (selector != null) { + builder.setTransaction(selector); + } + builder.setSeqno(getSeqNo()); + return builder; + } + + ResultSet executeQueryInternalWithOptions( + Statement statement, + com.google.spanner.v1.ExecuteSqlRequest.QueryMode queryMode, + Options readOptions, + ByteString partitionToken) { + beforeReadOrQuery(); + final ExecuteSqlRequest.Builder request = getExecuteSqlRequestBuilder(statement, queryMode); + if (partitionToken != null) { + request.setPartitionToken(partitionToken); + } + final int prefetchChunks = + readOptions.hasPrefetchChunks() ? readOptions.prefetchChunks() : defaultPrefetchChunks; + ResumableStreamIterator stream = + new ResumableStreamIterator(MAX_BUFFERED_CHUNKS, SpannerImpl.QUERY, span) { + @Override + CloseableIterator startStream(@Nullable ByteString resumeToken) { + GrpcStreamIterator stream = new GrpcStreamIterator(prefetchChunks); + if (resumeToken != null) { + request.setResumeToken(resumeToken); + } + SpannerRpc.StreamingCall call = + rpc.executeQuery(request.build(), stream.consumer(), session.getOptions()); + call.request(prefetchChunks); + stream.setCall(call); + return stream; + } + }; + return new GrpcResultSet(stream, this); + } + + /** + * Called before any read or query is started to perform state checks and initializations. + * Subclasses should call {@code super.beforeReadOrQuery()} if overriding. + */ + void beforeReadOrQuery() { + synchronized (lock) { + beforeReadOrQueryLocked(); + } + } + + /** Called as part of {@link #beforeReadOrQuery()} under {@link #lock}. */ + @GuardedBy("lock") + void beforeReadOrQueryLocked() { + // Note that transactions are invalidated under some circumstances on the backend, but we + // implement the check more strictly here to encourage coding to contract rather than the + // implementation. + checkState(isValid, "Context has been invalidated by a new operation on the session"); + checkState(!isClosed, "Context has been closed"); + } + + /** Invalidates the context since another context has been created more recently. */ + @Override + public final void invalidate() { + synchronized (lock) { + isValid = false; + } + } + + @Override + public void close() { + span.end(); + synchronized (lock) { + isClosed = true; + } + } + + @Nullable + abstract TransactionSelector getTransactionSelector(); + + @Override + public void onTransactionMetadata(Transaction transaction) {} + + @Override + public void onError(SpannerException e) {} + + @Override + public void onDone() {} + + private ResultSet readInternal( + String table, + @Nullable String index, + KeySet keys, + Iterable columns, + ReadOption... options) { + Options readOptions = Options.fromReadOptions(options); + return readInternalWithOptions( + table, index, keys, columns, readOptions, null /*partitionToken*/); + } + + ResultSet readInternalWithOptions( + String table, + @Nullable String index, + KeySet keys, + Iterable columns, + Options readOptions, + ByteString partitionToken) { + beforeReadOrQuery(); + final ReadRequest.Builder builder = + ReadRequest.newBuilder() + .setSession(session.getName()) + .setTable(checkNotNull(table)) + .addAllColumns(columns); + if (readOptions.hasLimit()) { + builder.setLimit(readOptions.limit()); + } + + keys.appendToProto(builder.getKeySetBuilder()); + if (index != null) { + builder.setIndex(index); + } + TransactionSelector selector = getTransactionSelector(); + if (selector != null) { + builder.setTransaction(selector); + } + if (partitionToken != null) { + builder.setPartitionToken(partitionToken); + } + final int prefetchChunks = + readOptions.hasPrefetchChunks() ? readOptions.prefetchChunks() : defaultPrefetchChunks; + ResumableStreamIterator stream = + new ResumableStreamIterator(MAX_BUFFERED_CHUNKS, SpannerImpl.READ, span) { + @Override + CloseableIterator startStream(@Nullable ByteString resumeToken) { + GrpcStreamIterator stream = new GrpcStreamIterator(prefetchChunks); + if (resumeToken != null) { + builder.setResumeToken(resumeToken); + } + SpannerRpc.StreamingCall call = + rpc.read(builder.build(), stream.consumer(), session.getOptions()); + call.request(prefetchChunks); + stream.setCall(call); + return stream; + } + }; + GrpcResultSet resultSet = new GrpcResultSet(stream, this); + return resultSet; + } + + private Struct consumeSingleRow(ResultSet resultSet) { + if (!resultSet.next()) { + return null; + } + Struct row = resultSet.getCurrentRowAsStruct(); + if (resultSet.next()) { + throw newSpannerException(ErrorCode.INTERNAL, "Multiple rows returned for single key"); + } + return row; + } +} diff --git a/google-cloud-clients/google-cloud-spanner/src/main/java/com/google/cloud/spanner/AbstractResultSet.java b/google-cloud-clients/google-cloud-spanner/src/main/java/com/google/cloud/spanner/AbstractResultSet.java new file mode 100644 index 000000000000..7dd1c69077a3 --- /dev/null +++ b/google-cloud-clients/google-cloud-spanner/src/main/java/com/google/cloud/spanner/AbstractResultSet.java @@ -0,0 +1,1181 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.cloud.spanner.SpannerExceptionFactory.newSpannerException; +import static com.google.common.base.Preconditions.checkArgument; +import static com.google.common.base.Preconditions.checkNotNull; +import static com.google.common.base.Preconditions.checkState; + +import com.google.api.client.util.BackOff; +import com.google.cloud.ByteArray; +import com.google.cloud.Date; +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.spi.v1.SpannerRpc; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Function; +import com.google.common.collect.AbstractIterator; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.Lists; +import com.google.common.util.concurrent.Uninterruptibles; +import com.google.protobuf.ByteString; +import com.google.protobuf.ListValue; +import com.google.protobuf.Value.KindCase; +import com.google.spanner.v1.PartialResultSet; +import com.google.spanner.v1.ResultSetMetadata; +import com.google.spanner.v1.ResultSetStats; +import com.google.spanner.v1.Transaction; +import com.google.spanner.v1.TypeCode; +import io.grpc.Context; +import io.opencensus.common.Scope; +import io.opencensus.trace.AttributeValue; +import io.opencensus.trace.Span; +import io.opencensus.trace.Tracer; +import io.opencensus.trace.Tracing; +import java.io.Serializable; +import java.util.AbstractList; +import java.util.ArrayList; +import java.util.BitSet; +import java.util.Collections; +import java.util.Iterator; +import java.util.LinkedList; +import java.util.List; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.logging.Level; +import java.util.logging.Logger; +import javax.annotation.Nullable; + +/** Implementation of {@link ResultSet}. */ +abstract class AbstractResultSet extends AbstractStructReader implements ResultSet { + private static final Tracer tracer = Tracing.getTracer(); + + interface Listener { + /** + * Called when transaction metadata is seen. This method may be invoked at most once. If the + * method is invoked, it will precede {@link #onError(SpannerException)} or {@link #onDone()}. + */ + void onTransactionMetadata(Transaction transaction) throws SpannerException; + + /** Called when the read finishes with an error. */ + void onError(SpannerException e); + + /** Called when the read finishes normally. */ + void onDone(); + } + + @VisibleForTesting + static class GrpcResultSet extends AbstractResultSet> { + private final GrpcValueIterator iterator; + private final Listener listener; + private GrpcStruct currRow; + private SpannerException error; + private ResultSetStats statistics; + private boolean closed; + + GrpcResultSet(CloseableIterator iterator, Listener listener) { + this.iterator = new GrpcValueIterator(iterator); + this.listener = listener; + } + + @Override + protected GrpcStruct currRow() { + checkState(!closed, "ResultSet is closed"); + checkState(currRow != null, "next() call required"); + return currRow; + } + + @Override + public boolean next() throws SpannerException { + if (error != null) { + throw newSpannerException(error); + } + try { + if (currRow == null) { + ResultSetMetadata metadata = iterator.getMetadata(); + if (metadata.hasTransaction()) { + listener.onTransactionMetadata(metadata.getTransaction()); + } + currRow = new GrpcStruct(iterator.type(), new ArrayList<>()); + } + boolean hasNext = currRow.consumeRow(iterator); + if (!hasNext) { + statistics = iterator.getStats(); + } + return hasNext; + } catch (SpannerException e) { + throw yieldError(e); + } + } + + @Override + @Nullable + public ResultSetStats getStats() { + return statistics; + } + + @Override + public void close() { + iterator.close("ResultSet closed"); + closed = true; + } + + @Override + public Type getType() { + checkState(currRow != null, "next() call required"); + return currRow.getType(); + } + + private SpannerException yieldError(SpannerException e) { + close(); + listener.onError(e); + throw e; + } + } + /** + * Adapts a stream of {@code PartialResultSet} messages into a stream of {@code Value} messages. + */ + private static class GrpcValueIterator extends AbstractIterator { + private enum StreamValue { + METADATA, + RESULT, + } + + private final CloseableIterator stream; + private ResultSetMetadata metadata; + private Type type; + private PartialResultSet current; + private int pos; + private ResultSetStats statistics; + + GrpcValueIterator(CloseableIterator stream) { + this.stream = stream; + } + + @SuppressWarnings("unchecked") + @Override + protected com.google.protobuf.Value computeNext() { + if (!ensureReady(StreamValue.RESULT)) { + endOfData(); + return null; + } + com.google.protobuf.Value value = current.getValues(pos++); + KindCase kind = value.getKindCase(); + + if (!isMergeable(kind)) { + if (pos == current.getValuesCount() && current.getChunkedValue()) { + throw newSpannerException(ErrorCode.INTERNAL, "Unexpected chunked PartialResultSet."); + } else { + return value; + } + } + if (!current.getChunkedValue() || pos != current.getValuesCount()) { + return value; + } + + Object merged = + kind == KindCase.STRING_VALUE + ? value.getStringValue() + : new ArrayList(value.getListValue().getValuesList()); + while (current.getChunkedValue() && pos == current.getValuesCount()) { + if (!ensureReady(StreamValue.RESULT)) { + throw newSpannerException( + ErrorCode.INTERNAL, "Stream closed in the middle of chunked value"); + } + com.google.protobuf.Value newValue = current.getValues(pos++); + if (newValue.getKindCase() != kind) { + throw newSpannerException( + ErrorCode.INTERNAL, + "Unexpected type in middle of chunked value. Expected: " + + kind + + " but got: " + + newValue.getKindCase()); + } + if (kind == KindCase.STRING_VALUE) { + merged = (String) merged + newValue.getStringValue(); + } else { + concatLists( + (List) merged, newValue.getListValue().getValuesList()); + } + } + if (kind == KindCase.STRING_VALUE) { + return com.google.protobuf.Value.newBuilder().setStringValue((String) merged).build(); + } else { + return com.google.protobuf.Value.newBuilder() + .setListValue( + ListValue.newBuilder().addAllValues((List) merged)) + .build(); + } + } + + ResultSetMetadata getMetadata() throws SpannerException { + if (metadata == null) { + if (!ensureReady(StreamValue.METADATA)) { + throw newSpannerException(ErrorCode.INTERNAL, "Stream closed without sending metadata"); + } + } + return metadata; + } + + /** + * Get the query statistics. Query statistics are delivered with the last PartialResultSet in + * the stream. Any attempt to call this method before the caller has finished consuming the + * results will return null. + */ + @Nullable + ResultSetStats getStats() { + return statistics; + } + + Type type() { + checkState(type != null, "metadata has not been received"); + return type; + } + + private boolean ensureReady(StreamValue requiredValue) throws SpannerException { + while (current == null || pos >= current.getValuesCount()) { + if (!stream.hasNext()) { + return false; + } + current = stream.next(); + pos = 0; + if (type == null) { + // This is the first message on the stream. + if (!current.hasMetadata() || !current.getMetadata().hasRowType()) { + throw newSpannerException(ErrorCode.INTERNAL, "Missing type metadata in first message"); + } + metadata = current.getMetadata(); + com.google.spanner.v1.Type typeProto = + com.google.spanner.v1.Type.newBuilder() + .setCode(TypeCode.STRUCT) + .setStructType(metadata.getRowType()) + .build(); + try { + type = Type.fromProto(typeProto); + } catch (IllegalArgumentException e) { + throw newSpannerException( + ErrorCode.INTERNAL, "Invalid type metadata: " + e.getMessage(), e); + } + } + if (current.hasStats()) { + statistics = current.getStats(); + } + if (requiredValue == StreamValue.METADATA) { + return true; + } + } + return true; + } + + void close(@Nullable String message) { + stream.close(message); + } + + /** @param a is a mutable list and b will be concatenated into a. */ + private void concatLists(List a, List b) { + if (a.size() == 0 || b.size() == 0) { + a.addAll(b); + return; + } else { + com.google.protobuf.Value last = a.get(a.size() - 1); + com.google.protobuf.Value first = b.get(0); + KindCase lastKind = last.getKindCase(); + KindCase firstKind = first.getKindCase(); + if (isMergeable(lastKind) && lastKind == firstKind) { + com.google.protobuf.Value merged = null; + if (lastKind == KindCase.STRING_VALUE) { + String lastStr = last.getStringValue(); + String firstStr = first.getStringValue(); + merged = + com.google.protobuf.Value.newBuilder().setStringValue(lastStr + firstStr).build(); + } else { // List + List mergedList = new ArrayList<>(); + mergedList.addAll(last.getListValue().getValuesList()); + concatLists(mergedList, first.getListValue().getValuesList()); + merged = + com.google.protobuf.Value.newBuilder() + .setListValue(ListValue.newBuilder().addAllValues(mergedList)) + .build(); + } + a.set(a.size() - 1, merged); + a.addAll(b.subList(1, b.size())); + } else { + a.addAll(b); + } + } + } + + private boolean isMergeable(KindCase kind) { + return kind == KindCase.STRING_VALUE || kind == KindCase.LIST_VALUE; + } + } + + static class GrpcStruct extends Struct implements Serializable { + private final Type type; + private final List rowData; + + /** + * Builds an immutable version of this struct using {@link Struct#newBuilder()} which is used as + * a serialization proxy. + */ + private Object writeReplace() { + Builder builder = Struct.newBuilder(); + List structFields = getType().getStructFields(); + for (int i = 0; i < structFields.size(); i++) { + Type.StructField field = structFields.get(i); + String fieldName = field.getName(); + Object value = rowData.get(i); + Type fieldType = field.getType(); + switch (fieldType.getCode()) { + case BOOL: + builder.set(fieldName).to((Boolean) value); + break; + case INT64: + builder.set(fieldName).to((Long) value); + break; + case FLOAT64: + builder.set(fieldName).to((Double) value); + break; + case STRING: + builder.set(fieldName).to((String) value); + break; + case BYTES: + builder.set(fieldName).to((ByteArray) value); + break; + case TIMESTAMP: + builder.set(fieldName).to((Timestamp) value); + break; + case DATE: + builder.set(fieldName).to((Date) value); + break; + case ARRAY: + switch (fieldType.getArrayElementType().getCode()) { + case BOOL: + builder.set(fieldName).toBoolArray((Iterable) value); + break; + case INT64: + builder.set(fieldName).toInt64Array((Iterable) value); + break; + case FLOAT64: + builder.set(fieldName).toFloat64Array((Iterable) value); + break; + case STRING: + builder.set(fieldName).toStringArray((Iterable) value); + break; + case BYTES: + builder.set(fieldName).toBytesArray((Iterable) value); + break; + case TIMESTAMP: + builder.set(fieldName).toTimestampArray((Iterable) value); + break; + case DATE: + builder.set(fieldName).toDateArray((Iterable) value); + break; + case STRUCT: + builder + .set(fieldName) + .toStructArray(fieldType.getArrayElementType(), (Iterable) value); + break; + default: + throw new AssertionError( + "Unhandled array type code: " + fieldType.getArrayElementType()); + } + break; + case STRUCT: + if (value == null) { + builder.set(fieldName).to(fieldType, null); + } else { + builder.set(fieldName).to((Struct) value); + } + break; + default: + throw new AssertionError("Unhandled type code: " + fieldType.getCode()); + } + } + return builder.build(); + } + + GrpcStruct(Type type, List rowData) { + this.type = type; + this.rowData = rowData; + } + + @Override + public String toString() { + return this.rowData.toString(); + } + + boolean consumeRow(Iterator iterator) { + rowData.clear(); + if (!iterator.hasNext()) { + return false; + } + for (Type.StructField fieldType : getType().getStructFields()) { + if (!iterator.hasNext()) { + throw newSpannerException( + ErrorCode.INTERNAL, + "Invalid value stream: end of stream reached before row is complete"); + } + com.google.protobuf.Value value = iterator.next(); + rowData.add(decodeValue(fieldType.getType(), value)); + } + return true; + } + + private static Object decodeValue(Type fieldType, com.google.protobuf.Value proto) { + if (proto.getKindCase() == KindCase.NULL_VALUE) { + return null; + } + switch (fieldType.getCode()) { + case BOOL: + checkType(fieldType, proto, KindCase.BOOL_VALUE); + return proto.getBoolValue(); + case INT64: + checkType(fieldType, proto, KindCase.STRING_VALUE); + return Long.parseLong(proto.getStringValue()); + case FLOAT64: + return valueProtoToFloat64(proto); + case STRING: + checkType(fieldType, proto, KindCase.STRING_VALUE); + return proto.getStringValue(); + case BYTES: + checkType(fieldType, proto, KindCase.STRING_VALUE); + return ByteArray.fromBase64(proto.getStringValue()); + case TIMESTAMP: + checkType(fieldType, proto, KindCase.STRING_VALUE); + return Timestamp.parseTimestamp(proto.getStringValue()); + case DATE: + checkType(fieldType, proto, KindCase.STRING_VALUE); + return Date.parseDate(proto.getStringValue()); + case ARRAY: + checkType(fieldType, proto, KindCase.LIST_VALUE); + ListValue listValue = proto.getListValue(); + return decodeArrayValue(fieldType.getArrayElementType(), listValue); + case STRUCT: + checkType(fieldType, proto, KindCase.LIST_VALUE); + ListValue structValue = proto.getListValue(); + return decodeStructValue(fieldType, structValue); + default: + throw new AssertionError("Unhandled type code: " + fieldType.getCode()); + } + } + + private static Struct decodeStructValue(Type structType, ListValue structValue) { + List fieldTypes = structType.getStructFields(); + checkArgument( + structValue.getValuesCount() == fieldTypes.size(), + "Size mismatch between type descriptor and actual values."); + List fields = new ArrayList<>(fieldTypes.size()); + List fieldValues = structValue.getValuesList(); + for (int i = 0; i < fieldTypes.size(); ++i) { + fields.add(decodeValue(fieldTypes.get(i).getType(), fieldValues.get(i))); + } + return new GrpcStruct(structType, fields); + } + + private static Object decodeArrayValue(Type elementType, ListValue listValue) { + switch (elementType.getCode()) { + case BOOL: + // Use a view: element conversion is virtually free. + return Lists.transform( + listValue.getValuesList(), + new Function() { + @Override + public Boolean apply(com.google.protobuf.Value input) { + return input.getKindCase() == KindCase.NULL_VALUE ? null : input.getBoolValue(); + } + }); + case INT64: + // For int64/float64 types, use custom containers. These avoid wrapper object + // creation for non-null arrays. + return new Int64Array(listValue); + case FLOAT64: + return new Float64Array(listValue); + case STRING: + return Lists.transform( + listValue.getValuesList(), + new Function() { + @Override + public String apply(com.google.protobuf.Value input) { + return input.getKindCase() == KindCase.NULL_VALUE ? null : input.getStringValue(); + } + }); + case BYTES: + { + // Materialize list: element conversion is expensive and should happen only once. + ArrayList list = new ArrayList<>(listValue.getValuesCount()); + for (com.google.protobuf.Value value : listValue.getValuesList()) { + list.add( + value.getKindCase() == KindCase.NULL_VALUE + ? null + : ByteArray.fromBase64(value.getStringValue())); + } + return list; + } + case TIMESTAMP: + { + // Materialize list: element conversion is expensive and should happen only once. + ArrayList list = new ArrayList<>(listValue.getValuesCount()); + for (com.google.protobuf.Value value : listValue.getValuesList()) { + list.add( + value.getKindCase() == KindCase.NULL_VALUE + ? null + : Timestamp.parseTimestamp(value.getStringValue())); + } + return list; + } + case DATE: + { + // Materialize list: element conversion is expensive and should happen only once. + ArrayList list = new ArrayList<>(listValue.getValuesCount()); + for (com.google.protobuf.Value value : listValue.getValuesList()) { + list.add( + value.getKindCase() == KindCase.NULL_VALUE + ? null + : Date.parseDate(value.getStringValue())); + } + return list; + } + + case STRUCT: + { + ArrayList list = new ArrayList<>(listValue.getValuesCount()); + for (com.google.protobuf.Value value : listValue.getValuesList()) { + if (value.getKindCase() == KindCase.NULL_VALUE) { + list.add(null); + } else { + ListValue structValue = value.getListValue(); + list.add(decodeStructValue(elementType, structValue)); + } + } + return list; + } + default: + throw new AssertionError("Unhandled type code: " + elementType.getCode()); + } + } + + private static void checkType( + Type fieldType, com.google.protobuf.Value proto, KindCase expected) { + if (proto.getKindCase() != expected) { + throw newSpannerException( + ErrorCode.INTERNAL, + "Invalid value for column type " + + fieldType + + " expected " + + expected + + " but was " + + proto.getKindCase()); + } + } + + Struct immutableCopy() { + return new GrpcStruct(type, new ArrayList<>(rowData)); + } + + @Override + public Type getType() { + return type; + } + + @Override + public boolean isNull(int columnIndex) { + return rowData.get(columnIndex) == null; + } + + @Override + protected boolean getBooleanInternal(int columnIndex) { + return (Boolean) rowData.get(columnIndex); + } + + @Override + protected long getLongInternal(int columnIndex) { + return (Long) rowData.get(columnIndex); + } + + @Override + protected double getDoubleInternal(int columnIndex) { + return (Double) rowData.get(columnIndex); + } + + @Override + protected String getStringInternal(int columnIndex) { + return (String) rowData.get(columnIndex); + } + + @Override + protected ByteArray getBytesInternal(int columnIndex) { + return (ByteArray) rowData.get(columnIndex); + } + + @Override + protected Timestamp getTimestampInternal(int columnIndex) { + return (Timestamp) rowData.get(columnIndex); + } + + @Override + protected Date getDateInternal(int columnIndex) { + return (Date) rowData.get(columnIndex); + } + + @Override + protected Struct getStructInternal(int columnIndex) { + return (Struct) rowData.get(columnIndex); + } + + @Override + protected boolean[] getBooleanArrayInternal(int columnIndex) { + @SuppressWarnings("unchecked") // We know ARRAY produces a List. + List values = (List) rowData.get(columnIndex); + boolean[] r = new boolean[values.size()]; + for (int i = 0; i < values.size(); ++i) { + if (values.get(i) == null) { + throw throwNotNull(columnIndex); + } + r[i] = values.get(i); + } + return r; + } + + @Override + @SuppressWarnings("unchecked") // We know ARRAY produces a List. + protected List getBooleanListInternal(int columnIndex) { + return Collections.unmodifiableList((List) rowData.get(columnIndex)); + } + + @Override + protected long[] getLongArrayInternal(int columnIndex) { + return getLongListInternal(columnIndex).toPrimitiveArray(columnIndex); + } + + @Override + protected Int64Array getLongListInternal(int columnIndex) { + return (Int64Array) rowData.get(columnIndex); + } + + @Override + protected double[] getDoubleArrayInternal(int columnIndex) { + return getDoubleListInternal(columnIndex).toPrimitiveArray(columnIndex); + } + + @Override + protected Float64Array getDoubleListInternal(int columnIndex) { + return (Float64Array) rowData.get(columnIndex); + } + + @Override + @SuppressWarnings("unchecked") // We know ARRAY produces a List. + protected List getStringListInternal(int columnIndex) { + return Collections.unmodifiableList((List) rowData.get(columnIndex)); + } + + @Override + @SuppressWarnings("unchecked") // We know ARRAY produces a List. + protected List getBytesListInternal(int columnIndex) { + return Collections.unmodifiableList((List) rowData.get(columnIndex)); + } + + @Override + @SuppressWarnings("unchecked") // We know ARRAY produces a List. + protected List getTimestampListInternal(int columnIndex) { + return Collections.unmodifiableList((List) rowData.get(columnIndex)); + } + + @Override + @SuppressWarnings("unchecked") // We know ARRAY produces a List. + protected List getDateListInternal(int columnIndex) { + return Collections.unmodifiableList((List) rowData.get(columnIndex)); + } + + @Override + @SuppressWarnings("unchecked") // We know ARRAY> produces a List. + protected List getStructListInternal(int columnIndex) { + return Collections.unmodifiableList((List) rowData.get(columnIndex)); + } + } + + @VisibleForTesting + interface CloseableIterator extends Iterator { + + /** + * Closes the iterator, freeing any underlying resources. + * + * @param message a message to include in the final RPC status + */ + void close(@Nullable String message); + } + + /** Adapts a streaming read/query call into an iterator over partial result sets. */ + @VisibleForTesting + static class GrpcStreamIterator extends AbstractIterator + implements CloseableIterator { + private static final PartialResultSet END_OF_STREAM = PartialResultSet.newBuilder().build(); + + private final ConsumerImpl consumer = new ConsumerImpl(); + private final BlockingQueue stream; + + private SpannerRpc.StreamingCall call; + private SpannerException error; + + // Visible for testing. + GrpcStreamIterator(int prefetchChunks) { + // One extra to allow for END_OF_STREAM message. + this.stream = new LinkedBlockingQueue<>(prefetchChunks + 1); + } + + protected final SpannerRpc.ResultStreamConsumer consumer() { + return consumer; + } + + public void setCall(SpannerRpc.StreamingCall call) { + this.call = call; + } + + @Override + public void close(@Nullable String message) { + if (call != null) { + call.cancel(message); + } + } + + @Override + protected final PartialResultSet computeNext() { + PartialResultSet next; + try { + // TODO: Ideally honor io.grpc.Context while blocking here. In practice, + // cancellation/deadline results in an error being delivered to "stream", which + // should mean that we do not block significantly longer afterwards, but it would + // be more robust to use poll() with a timeout. + next = stream.take(); + } catch (InterruptedException e) { + // Treat interrupt as a request to cancel the read. + throw SpannerExceptionFactory.propagateInterrupt(e); + } + if (next != END_OF_STREAM) { + call.request(1); + return next; + } + + // All done - close() no longer needs to cancel the call. + call = null; + + if (error != null) { + throw SpannerExceptionFactory.newSpannerException(error); + } + + endOfData(); + return null; + } + + private void addToStream(PartialResultSet results) { + // We assume that nothing from the user will interrupt gRPC event threads. + Uninterruptibles.putUninterruptibly(stream, results); + } + + private class ConsumerImpl implements SpannerRpc.ResultStreamConsumer { + @Override + public void onPartialResultSet(PartialResultSet results) { + addToStream(results); + } + + @Override + public void onCompleted() { + addToStream(END_OF_STREAM); + } + + @Override + public void onError(SpannerException e) { + error = e; + addToStream(END_OF_STREAM); + } + + // Visible only for testing. + @VisibleForTesting + void setCall(SpannerRpc.StreamingCall call) { + GrpcStreamIterator.this.setCall(call); + } + } + } + + /** + * Wraps an iterator over partial result sets, supporting resuming RPCs on error. This class keeps + * track of the most recent resume token seen, and will buffer partial result set chunks that do + * not have a resume token until one is seen or buffer space is exceeded, which reduces the chance + * of yielding data to the caller that cannot be resumed. + */ + @VisibleForTesting + abstract static class ResumableStreamIterator extends AbstractIterator + implements CloseableIterator { + private static final Logger logger = Logger.getLogger(ResumableStreamIterator.class.getName()); + private final BackOff backOff = SpannerImpl.newBackOff(); + private final LinkedList buffer = new LinkedList<>(); + private final int maxBufferSize; + private final Span span; + private CloseableIterator stream; + private ByteString resumeToken; + private boolean finished; + /** + * Indicates whether it is currently safe to retry RPCs. This will be {@code false} if we have + * reached the maximum buffer size without seeing a restart token; in this case, we will drain + * the buffer and remain in this state until we see a new restart token. + */ + private boolean safeToRetry = true; + + protected ResumableStreamIterator(int maxBufferSize, String streamName, Span parent) { + checkArgument(maxBufferSize >= 0); + this.maxBufferSize = maxBufferSize; + this.span = tracer.spanBuilderWithExplicitParent(streamName, parent).startSpan(); + } + + abstract CloseableIterator startStream(@Nullable ByteString resumeToken); + + @Override + public void close(@Nullable String message) { + if (stream != null) { + stream.close(message); + } + } + + @Override + protected PartialResultSet computeNext() { + Context context = Context.current(); + while (true) { + // Eagerly start stream before consuming any buffered items. + if (stream == null) { + span.addAnnotation( + "Starting/Resuming stream", + ImmutableMap.of( + "ResumeToken", + AttributeValue.stringAttributeValue( + resumeToken == null ? "null" : resumeToken.toStringUtf8()))); + try (Scope s = tracer.withSpan(span)) { + // When start a new stream set the Span as current to make the gRPC Span a child of + // this Span. + stream = checkNotNull(startStream(resumeToken)); + } + } + // Buffer contains items up to a resume token or has reached capacity: flush. + if (!buffer.isEmpty() + && (finished || !safeToRetry || !buffer.getLast().getResumeToken().isEmpty())) { + return buffer.pop(); + } + try { + if (stream.hasNext()) { + PartialResultSet next = stream.next(); + boolean hasResumeToken = !next.getResumeToken().isEmpty(); + if (hasResumeToken) { + resumeToken = next.getResumeToken(); + safeToRetry = true; + } + // If the buffer is empty and this chunk has a resume token or we cannot resume safely + // anyway, we can yield it immediately rather than placing it in the buffer to be + // returned on the next iteration. + if ((hasResumeToken || !safeToRetry) && buffer.isEmpty()) { + return next; + } + buffer.add(next); + if (buffer.size() > maxBufferSize && buffer.getLast().getResumeToken().isEmpty()) { + // We need to flush without a restart token. Errors encountered until we see + // such a token will fail the read. + safeToRetry = false; + } + } else { + finished = true; + if (buffer.isEmpty()) { + endOfData(); + return null; + } + } + } catch (SpannerException e) { + if (safeToRetry && e.isRetryable()) { + span.addAnnotation( + "Stream broken. Safe to retry", TraceUtil.getExceptionAnnotations(e)); + logger.log(Level.FINE, "Retryable exception, will sleep and retry", e); + // Truncate any items in the buffer before the last retry token. + while (!buffer.isEmpty() && buffer.getLast().getResumeToken().isEmpty()) { + buffer.removeLast(); + } + assert buffer.isEmpty() || buffer.getLast().getResumeToken().equals(resumeToken); + stream = null; + try (Scope s = tracer.withSpan(span)) { + long delay = e.getRetryDelayInMillis(); + if (delay != -1) { + SpannerImpl.backoffSleep(context, delay); + } else { + SpannerImpl.backoffSleep(context, backOff); + } + } + continue; + } + span.addAnnotation("Stream broken. Not safe to retry"); + TraceUtil.endSpanWithFailure(span, e); + throw e; + } catch (RuntimeException e) { + span.addAnnotation("Stream broken. Not safe to retry"); + TraceUtil.endSpanWithFailure(span, e); + throw e; + } + } + } + } + + private static double valueProtoToFloat64(com.google.protobuf.Value proto) { + if (proto.getKindCase() == KindCase.STRING_VALUE) { + switch (proto.getStringValue()) { + case "-Infinity": + return Double.NEGATIVE_INFINITY; + case "Infinity": + return Double.POSITIVE_INFINITY; + case "NaN": + return Double.NaN; + default: + // Fall-through to handling below to produce an error. + } + } + if (proto.getKindCase() != KindCase.NUMBER_VALUE) { + throw newSpannerException( + ErrorCode.INTERNAL, + "Invalid value for column type " + + Type.float64() + + " expected NUMBER_VALUE or STRING_VALUE with value one of" + + " \"Infinity\", \"-Infinity\", or \"NaN\" but was " + + proto.getKindCase() + + (proto.getKindCase() == KindCase.STRING_VALUE + ? " with value \"" + proto.getStringValue() + "\"" + : "")); + } + return proto.getNumberValue(); + } + + private static NullPointerException throwNotNull(int columnIndex) { + throw new NullPointerException( + "Cannot call array getter for column " + columnIndex + " with null elements"); + } + + /** + * Memory-optimized base class for {@code ARRAY} and {@code ARRAY} types. Both of + * these involve conversions from the type yielded by JSON parsing, which are {@code String} and + * {@code BigDecimal} respectively. Rather than construct new wrapper objects for each array + * element, we use primitive arrays and a {@code BitSet} to track nulls. + */ + private abstract static class PrimitiveArray extends AbstractList { + private final A data; + private final BitSet nulls; + private final int size; + + PrimitiveArray(ListValue protoList) { + this.size = protoList.getValuesCount(); + A data = newArray(size); + BitSet nulls = new BitSet(size); + for (int i = 0; i < protoList.getValuesCount(); ++i) { + if (protoList.getValues(i).getKindCase() == KindCase.NULL_VALUE) { + nulls.set(i); + } else { + setProto(data, i, protoList.getValues(i)); + } + } + this.data = data; + this.nulls = nulls; + } + + PrimitiveArray(A data, BitSet nulls, int size) { + this.data = data; + this.nulls = nulls; + this.size = size; + } + + abstract A newArray(int size); + + abstract void setProto(A array, int i, com.google.protobuf.Value protoValue); + + abstract T get(A array, int i); + + @Override + public T get(int index) { + if (index < 0 || index >= size) { + throw new ArrayIndexOutOfBoundsException("index=" + index + " size=" + size); + } + return nulls.get(index) ? null : get(data, index); + } + + @Override + public int size() { + return size; + } + + A toPrimitiveArray(int columnIndex) { + if (nulls.length() > 0) { + throw throwNotNull(columnIndex); + } + A r = newArray(size); + System.arraycopy(data, 0, r, 0, size); + return r; + } + } + + private static class Int64Array extends PrimitiveArray { + Int64Array(ListValue protoList) { + super(protoList); + } + + Int64Array(long[] data, BitSet nulls) { + super(data, nulls, data.length); + } + + @Override + long[] newArray(int size) { + return new long[size]; + } + + @Override + void setProto(long[] array, int i, com.google.protobuf.Value protoValue) { + array[i] = Long.parseLong(protoValue.getStringValue()); + } + + @Override + Long get(long[] array, int i) { + return array[i]; + } + } + + private static class Float64Array extends PrimitiveArray { + Float64Array(ListValue protoList) { + super(protoList); + } + + Float64Array(double[] data, BitSet nulls) { + super(data, nulls, data.length); + } + + @Override + double[] newArray(int size) { + return new double[size]; + } + + @Override + void setProto(double[] array, int i, com.google.protobuf.Value protoValue) { + array[i] = valueProtoToFloat64(protoValue); + } + + @Override + Double get(double[] array, int i) { + return array[i]; + } + } + + protected abstract GrpcStruct currRow(); + + @Override + public Struct getCurrentRowAsStruct() { + return currRow().immutableCopy(); + } + + @Override + protected boolean getBooleanInternal(int columnIndex) { + return currRow().getBooleanInternal(columnIndex); + } + + @Override + protected long getLongInternal(int columnIndex) { + return currRow().getLongInternal(columnIndex); + } + + @Override + protected double getDoubleInternal(int columnIndex) { + return currRow().getDoubleInternal(columnIndex); + } + + @Override + protected String getStringInternal(int columnIndex) { + return currRow().getStringInternal(columnIndex); + } + + @Override + protected ByteArray getBytesInternal(int columnIndex) { + return currRow().getBytesInternal(columnIndex); + } + + @Override + protected Timestamp getTimestampInternal(int columnIndex) { + return currRow().getTimestampInternal(columnIndex); + } + + @Override + protected Date getDateInternal(int columnIndex) { + return currRow().getDateInternal(columnIndex); + } + + @Override + protected boolean[] getBooleanArrayInternal(int columnIndex) { + return currRow().getBooleanArrayInternal(columnIndex); + } + + @Override + protected List getBooleanListInternal(int columnIndex) { + return currRow().getBooleanListInternal(columnIndex); + } + + @Override + protected long[] getLongArrayInternal(int columnIndex) { + return currRow().getLongArrayInternal(columnIndex); + } + + @Override + protected List getLongListInternal(int columnIndex) { + return currRow().getLongListInternal(columnIndex); + } + + @Override + protected double[] getDoubleArrayInternal(int columnIndex) { + return currRow().getDoubleArrayInternal(columnIndex); + } + + @Override + protected List getDoubleListInternal(int columnIndex) { + return currRow().getDoubleListInternal(columnIndex); + } + + @Override + protected List getStringListInternal(int columnIndex) { + return currRow().getStringListInternal(columnIndex); + } + + @Override + protected List getBytesListInternal(int columnIndex) { + return currRow().getBytesListInternal(columnIndex); + } + + @Override + protected List getTimestampListInternal(int columnIndex) { + return currRow().getTimestampListInternal(columnIndex); + } + + @Override + protected List getDateListInternal(int columnIndex) { + return currRow().getDateListInternal(columnIndex); + } + + @Override + protected List getStructListInternal(int columnIndex) { + return currRow().getStructListInternal(columnIndex); + } + + @Override + public boolean isNull(int columnIndex) { + return currRow().isNull(columnIndex); + } +} diff --git a/google-cloud-clients/google-cloud-spanner/src/main/java/com/google/cloud/spanner/BatchClientImpl.java b/google-cloud-clients/google-cloud-spanner/src/main/java/com/google/cloud/spanner/BatchClientImpl.java index 5691144a6a21..8840abc7415a 100644 --- a/google-cloud-clients/google-cloud-spanner/src/main/java/com/google/cloud/spanner/BatchClientImpl.java +++ b/google-cloud-clients/google-cloud-spanner/src/main/java/com/google/cloud/spanner/BatchClientImpl.java @@ -18,10 +18,9 @@ import static com.google.common.base.Preconditions.checkNotNull; +import com.google.cloud.spanner.AbstractReadContext.MultiUseReadOnlyTransaction; import com.google.cloud.spanner.Options.QueryOption; import com.google.cloud.spanner.Options.ReadOption; -import com.google.cloud.spanner.SpannerImpl.MultiUseReadOnlyTransaction; -import com.google.cloud.spanner.SpannerImpl.SessionImpl; import com.google.cloud.spanner.spi.v1.SpannerRpc; import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableList; diff --git a/google-cloud-clients/google-cloud-spanner/src/main/java/com/google/cloud/spanner/DatabaseAdminClientImpl.java b/google-cloud-clients/google-cloud-spanner/src/main/java/com/google/cloud/spanner/DatabaseAdminClientImpl.java new file mode 100644 index 000000000000..b419d406fa5f --- /dev/null +++ b/google-cloud-clients/google-cloud-spanner/src/main/java/com/google/cloud/spanner/DatabaseAdminClientImpl.java @@ -0,0 +1,187 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.api.core.ApiFunction; +import com.google.api.gax.grpc.ProtoOperationTransformers; +import com.google.api.gax.longrunning.OperationFuture; +import com.google.api.gax.longrunning.OperationFutureImpl; +import com.google.api.gax.longrunning.OperationSnapshot; +import com.google.api.gax.paging.Page; +import com.google.cloud.spanner.Options.ListOption; +import com.google.cloud.spanner.SpannerImpl.PageFetcher; +import com.google.cloud.spanner.spi.v1.SpannerRpc; +import com.google.cloud.spanner.spi.v1.SpannerRpc.Paginated; +import com.google.common.base.Preconditions; +import com.google.protobuf.Empty; +import com.google.spanner.admin.database.v1.CreateDatabaseMetadata; +import com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata; +import java.util.List; +import java.util.UUID; +import java.util.concurrent.Callable; +import javax.annotation.Nullable; + +/** Default implementation of {@link DatabaseAdminClient}. */ +class DatabaseAdminClientImpl implements DatabaseAdminClient { + private final String projectId; + private final SpannerRpc rpc; + + DatabaseAdminClientImpl(String projectId, SpannerRpc rpc) { + this.projectId = projectId; + this.rpc = rpc; + } + + /** Generates a random operation id for long-running database operations. */ + private static String randomOperationId() { + UUID uuid = UUID.randomUUID(); + return ("r" + uuid.toString()).replace("-", "_"); + } + + @Override + public OperationFuture createDatabase( + String instanceId, String databaseId, Iterable statements) throws SpannerException { + // CreateDatabase() is not idempotent, so we're not retrying this request. + String instanceName = getInstanceName(instanceId); + String createStatement = "CREATE DATABASE `" + databaseId + "`"; + OperationFuture + rawOperationFuture = rpc.createDatabase(instanceName, createStatement, statements); + return new OperationFutureImpl( + rawOperationFuture.getPollingFuture(), + rawOperationFuture.getInitialFuture(), + new ApiFunction() { + @Override + public Database apply(OperationSnapshot snapshot) { + return Database.fromProto( + ProtoOperationTransformers.ResponseTransformer.create( + com.google.spanner.admin.database.v1.Database.class) + .apply(snapshot), + DatabaseAdminClientImpl.this); + } + }, + ProtoOperationTransformers.MetadataTransformer.create(CreateDatabaseMetadata.class), + new ApiFunction() { + @Override + public Database apply(Exception e) { + throw SpannerExceptionFactory.newSpannerException(e); + } + }); + } + + @Override + public Database getDatabase(String instanceId, String databaseId) throws SpannerException { + final String dbName = getDatabaseName(instanceId, databaseId); + Callable callable = + new Callable() { + @Override + public Database call() throws Exception { + return Database.fromProto(rpc.getDatabase(dbName), DatabaseAdminClientImpl.this); + } + }; + return SpannerImpl.runWithRetries(callable); + } + + @Override + public OperationFuture updateDatabaseDdl( + final String instanceId, + final String databaseId, + final Iterable statements, + @Nullable String operationId) + throws SpannerException { + final String dbName = getDatabaseName(instanceId, databaseId); + final String opId = operationId != null ? operationId : randomOperationId(); + OperationFuture rawOperationFuture = + rpc.updateDatabaseDdl(dbName, statements, opId); + return new OperationFutureImpl( + rawOperationFuture.getPollingFuture(), + rawOperationFuture.getInitialFuture(), + new ApiFunction() { + @Override + public Void apply(OperationSnapshot snapshot) { + ProtoOperationTransformers.ResponseTransformer.create(Empty.class).apply(snapshot); + return null; + } + }, + ProtoOperationTransformers.MetadataTransformer.create(UpdateDatabaseDdlMetadata.class), + new ApiFunction() { + @Override + public Void apply(Exception e) { + throw SpannerExceptionFactory.newSpannerException(e); + } + }); + } + + @Override + public void dropDatabase(String instanceId, String databaseId) throws SpannerException { + final String dbName = getDatabaseName(instanceId, databaseId); + Callable callable = + new Callable() { + @Override + public Void call() throws Exception { + rpc.dropDatabase(dbName); + return null; + } + }; + SpannerImpl.runWithRetries(callable); + } + + @Override + public List getDatabaseDdl(String instanceId, String databaseId) { + final String dbName = getDatabaseName(instanceId, databaseId); + Callable> callable = + new Callable>() { + @Override + public List call() throws Exception { + return rpc.getDatabaseDdl(dbName); + } + }; + return SpannerImpl.runWithRetries(callable); + } + + @Override + public Page listDatabases(String instanceId, ListOption... options) { + final String instanceName = getInstanceName(instanceId); + final Options listOptions = Options.fromListOptions(options); + Preconditions.checkArgument( + !listOptions.hasFilter(), "Filter option is not support by" + "listDatabases"); + final int pageSize = listOptions.hasPageSize() ? listOptions.pageSize() : 0; + PageFetcher pageFetcher = + new PageFetcher() { + @Override + public Paginated getNextPage( + String nextPageToken) { + return rpc.listDatabases(instanceName, pageSize, nextPageToken); + } + + @Override + public Database fromProto(com.google.spanner.admin.database.v1.Database proto) { + return Database.fromProto(proto, DatabaseAdminClientImpl.this); + } + }; + if (listOptions.hasPageToken()) { + pageFetcher.nextPageToken = listOptions.pageToken(); + } + return pageFetcher.getNextPage(); + } + + private String getInstanceName(String instanceId) { + return new InstanceId(projectId, instanceId).getName(); + } + + private String getDatabaseName(String instanceId, String databaseId) { + return new DatabaseId(new InstanceId(projectId, instanceId), databaseId).getName(); + } +} diff --git a/google-cloud-clients/google-cloud-spanner/src/main/java/com/google/cloud/spanner/InstanceAdminClientImpl.java b/google-cloud-clients/google-cloud-spanner/src/main/java/com/google/cloud/spanner/InstanceAdminClientImpl.java new file mode 100644 index 000000000000..a6e9b8f2841e --- /dev/null +++ b/google-cloud-clients/google-cloud-spanner/src/main/java/com/google/cloud/spanner/InstanceAdminClientImpl.java @@ -0,0 +1,206 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import com.google.api.core.ApiFunction; +import com.google.api.gax.grpc.ProtoOperationTransformers; +import com.google.api.gax.longrunning.OperationFuture; +import com.google.api.gax.longrunning.OperationFutureImpl; +import com.google.api.gax.longrunning.OperationSnapshot; +import com.google.api.gax.paging.Page; +import com.google.api.pathtemplate.PathTemplate; +import com.google.cloud.spanner.Options.ListOption; +import com.google.cloud.spanner.SpannerImpl.PageFetcher; +import com.google.cloud.spanner.spi.v1.SpannerRpc; +import com.google.cloud.spanner.spi.v1.SpannerRpc.Paginated; +import com.google.common.base.Preconditions; +import com.google.protobuf.FieldMask; +import com.google.spanner.admin.instance.v1.CreateInstanceMetadata; +import com.google.spanner.admin.instance.v1.UpdateInstanceMetadata; +import java.util.concurrent.Callable; + +/** Default implementation of {@link InstanceAdminClient} */ +class InstanceAdminClientImpl implements InstanceAdminClient { + private static final PathTemplate PROJECT_NAME_TEMPLATE = + PathTemplate.create("projects/{project}"); + private final DatabaseAdminClient dbClient; + private final String projectId; + private final SpannerRpc rpc; + + InstanceAdminClientImpl(String projectId, SpannerRpc rpc, DatabaseAdminClient dbClient) { + this.projectId = projectId; + this.rpc = rpc; + this.dbClient = dbClient; + } + + @Override + public InstanceConfig getInstanceConfig(String configId) throws SpannerException { + final String instanceConfigName = new InstanceConfigId(projectId, configId).getName(); + return SpannerImpl.runWithRetries( + new Callable() { + @Override + public InstanceConfig call() { + return InstanceConfig.fromProto( + rpc.getInstanceConfig(instanceConfigName), InstanceAdminClientImpl.this); + } + }); + } + + @Override + public Page listInstanceConfigs(ListOption... options) { + final Options listOptions = Options.fromListOptions(options); + Preconditions.checkArgument( + !listOptions.hasFilter(), "Filter option is not supported by listInstanceConfigs"); + final int pageSize = listOptions.hasPageSize() ? listOptions.pageSize() : 0; + PageFetcher pageFetcher = + new PageFetcher() { + @Override + public Paginated getNextPage( + String nextPageToken) { + return rpc.listInstanceConfigs(pageSize, nextPageToken); + } + + @Override + public InstanceConfig fromProto( + com.google.spanner.admin.instance.v1.InstanceConfig proto) { + return InstanceConfig.fromProto(proto, InstanceAdminClientImpl.this); + } + }; + if (listOptions.hasPageToken()) { + pageFetcher.nextPageToken = listOptions.pageToken(); + } + return pageFetcher.getNextPage(); + } + + @Override + public OperationFuture createInstance(InstanceInfo instance) + throws SpannerException { + String projectName = PROJECT_NAME_TEMPLATE.instantiate("project", projectId); + OperationFuture + rawOperationFuture = + rpc.createInstance(projectName, instance.getId().getInstance(), instance.toProto()); + + return new OperationFutureImpl( + rawOperationFuture.getPollingFuture(), + rawOperationFuture.getInitialFuture(), + new ApiFunction() { + @Override + public Instance apply(OperationSnapshot snapshot) { + return Instance.fromProto( + ProtoOperationTransformers.ResponseTransformer.create( + com.google.spanner.admin.instance.v1.Instance.class) + .apply(snapshot), + InstanceAdminClientImpl.this, + dbClient); + } + }, + ProtoOperationTransformers.MetadataTransformer.create(CreateInstanceMetadata.class), + new ApiFunction() { + @Override + public Instance apply(Exception e) { + throw SpannerExceptionFactory.newSpannerException(e); + } + }); + } + + @Override + public Instance getInstance(String instanceId) throws SpannerException { + final String instanceName = new InstanceId(projectId, instanceId).getName(); + return SpannerImpl.runWithRetries( + new Callable() { + @Override + public Instance call() { + return Instance.fromProto( + rpc.getInstance(instanceName), InstanceAdminClientImpl.this, dbClient); + } + }); + } + + @Override + public Page listInstances(ListOption... options) throws SpannerException { + final Options listOptions = Options.fromListOptions(options); + final int pageSize = listOptions.hasPageSize() ? listOptions.pageSize() : 0; + final String filter = listOptions.filter(); + PageFetcher pageFetcher = + new PageFetcher() { + @Override + public Paginated getNextPage( + String nextPageToken) { + return rpc.listInstances(pageSize, nextPageToken, filter); + } + + @Override + public Instance fromProto(com.google.spanner.admin.instance.v1.Instance proto) { + return Instance.fromProto(proto, InstanceAdminClientImpl.this, dbClient); + } + }; + if (listOptions.hasPageToken()) { + pageFetcher.nextPageToken = listOptions.pageToken(); + } + return pageFetcher.getNextPage(); + } + + @Override + public void deleteInstance(final String instanceId) throws SpannerException { + SpannerImpl.runWithRetries( + new Callable() { + @Override + public Void call() { + rpc.deleteInstance(new InstanceId(projectId, instanceId).getName()); + return null; + } + }); + } + + @Override + public OperationFuture updateInstance( + InstanceInfo instance, InstanceInfo.InstanceField... fieldsToUpdate) { + FieldMask fieldMask = + fieldsToUpdate.length == 0 + ? InstanceInfo.InstanceField.toFieldMask(InstanceInfo.InstanceField.values()) + : InstanceInfo.InstanceField.toFieldMask(fieldsToUpdate); + + OperationFuture + rawOperationFuture = rpc.updateInstance(instance.toProto(), fieldMask); + return new OperationFutureImpl( + rawOperationFuture.getPollingFuture(), + rawOperationFuture.getInitialFuture(), + new ApiFunction() { + @Override + public Instance apply(OperationSnapshot snapshot) { + return Instance.fromProto( + ProtoOperationTransformers.ResponseTransformer.create( + com.google.spanner.admin.instance.v1.Instance.class) + .apply(snapshot), + InstanceAdminClientImpl.this, + dbClient); + } + }, + ProtoOperationTransformers.MetadataTransformer.create(UpdateInstanceMetadata.class), + new ApiFunction() { + @Override + public Instance apply(Exception e) { + throw SpannerExceptionFactory.newSpannerException(e); + } + }); + } + + @Override + public Instance.Builder newInstanceBuilder(InstanceId id) { + return new Instance.Builder(this, dbClient, id); + } +} diff --git a/google-cloud-clients/google-cloud-spanner/src/main/java/com/google/cloud/spanner/PartitionedDMLTransaction.java b/google-cloud-clients/google-cloud-spanner/src/main/java/com/google/cloud/spanner/PartitionedDMLTransaction.java new file mode 100644 index 000000000000..cefd128c499c --- /dev/null +++ b/google-cloud-clients/google-cloud-spanner/src/main/java/com/google/cloud/spanner/PartitionedDMLTransaction.java @@ -0,0 +1,106 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.common.base.Preconditions.checkState; + +import com.google.cloud.spanner.SessionImpl.SessionTransaction; +import com.google.cloud.spanner.spi.v1.SpannerRpc; +import com.google.protobuf.ByteString; +import com.google.spanner.v1.BeginTransactionRequest; +import com.google.spanner.v1.ExecuteSqlRequest; +import com.google.spanner.v1.ExecuteSqlRequest.QueryMode; +import com.google.spanner.v1.Transaction; +import com.google.spanner.v1.TransactionOptions; +import com.google.spanner.v1.TransactionSelector; +import java.util.Map; +import java.util.concurrent.Callable; + +/** Partitioned DML transaction for bulk updates and deletes. */ +class PartitionedDMLTransaction implements SessionTransaction { + private final ByteString transactionId; + private final SessionImpl session; + private final SpannerRpc rpc; + private volatile boolean isValid = true; + + PartitionedDMLTransaction(SessionImpl session, SpannerRpc rpc) { + this.session = session; + this.rpc = rpc; + this.transactionId = initTransaction(); + } + + private ByteString initTransaction() { + final BeginTransactionRequest request = + BeginTransactionRequest.newBuilder() + .setSession(session.getName()) + .setOptions( + TransactionOptions.newBuilder() + .setPartitionedDml(TransactionOptions.PartitionedDml.getDefaultInstance())) + .build(); + Transaction txn = + SpannerImpl.runWithRetries( + new Callable() { + @Override + public Transaction call() throws Exception { + return rpc.beginTransaction(request, session.getOptions()); + } + }); + if (txn.getId().isEmpty()) { + throw SpannerExceptionFactory.newSpannerException( + ErrorCode.INTERNAL, + "Failed to init transaction, missing transaction id\n" + session.getName()); + } + return txn.getId(); + } + + long executePartitionedUpdate(Statement statement) { + checkState(isValid, "Partitioned DML has been invalidated by a new operation on the session"); + final ExecuteSqlRequest.Builder builder = + ExecuteSqlRequest.newBuilder() + .setSql(statement.getSql()) + .setQueryMode(QueryMode.NORMAL) + .setSession(session.getName()) + .setTransaction(TransactionSelector.newBuilder().setId(transactionId).build()); + Map stmtParameters = statement.getParameters(); + if (!stmtParameters.isEmpty()) { + com.google.protobuf.Struct.Builder paramsBuilder = builder.getParamsBuilder(); + for (Map.Entry param : stmtParameters.entrySet()) { + paramsBuilder.putFields(param.getKey(), param.getValue().toProto()); + builder.putParamTypes(param.getKey(), param.getValue().getType().toProto()); + } + } + com.google.spanner.v1.ResultSet resultSet = + SpannerImpl.runWithRetries( + new Callable() { + @Override + public com.google.spanner.v1.ResultSet call() throws Exception { + return rpc.executeQuery(builder.build(), session.getOptions()); + } + }); + if (!resultSet.hasStats()) { + throw new IllegalArgumentException( + "Partitioned DML response missing stats possibly due to non-DML statement as input"); + } + // For partitioned DML, using the row count lower bound. + return resultSet.getStats().getRowCountLowerBound(); + } + + @Override + public void invalidate() { + isValid = false; + } +} diff --git a/google-cloud-clients/google-cloud-spanner/src/main/java/com/google/cloud/spanner/SessionImpl.java b/google-cloud-clients/google-cloud-spanner/src/main/java/com/google/cloud/spanner/SessionImpl.java new file mode 100644 index 000000000000..a55c45da66bb --- /dev/null +++ b/google-cloud-clients/google-cloud-spanner/src/main/java/com/google/cloud/spanner/SessionImpl.java @@ -0,0 +1,277 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.cloud.spanner.SpannerExceptionFactory.newSpannerException; +import static com.google.common.base.Preconditions.checkNotNull; + +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.AbstractReadContext.MultiUseReadOnlyTransaction; +import com.google.cloud.spanner.AbstractReadContext.SingleReadContext; +import com.google.cloud.spanner.AbstractReadContext.SingleUseReadOnlyTransaction; +import com.google.cloud.spanner.TransactionRunnerImpl.TransactionContextImpl; +import com.google.cloud.spanner.spi.v1.SpannerRpc; +import com.google.common.collect.Lists; +import com.google.protobuf.ByteString; +import com.google.spanner.v1.BeginTransactionRequest; +import com.google.spanner.v1.CommitRequest; +import com.google.spanner.v1.CommitResponse; +import com.google.spanner.v1.Transaction; +import com.google.spanner.v1.TransactionOptions; +import io.opencensus.common.Scope; +import io.opencensus.trace.Span; +import io.opencensus.trace.Tracer; +import io.opencensus.trace.Tracing; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.concurrent.Callable; +import javax.annotation.Nullable; + +/** + * Implementation of {@link Session}. Sessions are managed internally by the client library, and + * users need not be aware of the actual session management, pooling and handling. + */ +class SessionImpl implements Session { + private static final Tracer tracer = Tracing.getTracer(); + + /** Keep track of running transactions on this session per thread. */ + static final ThreadLocal hasPendingTransaction = + new ThreadLocal() { + @Override + protected Boolean initialValue() { + return false; + } + }; + + static void throwIfTransactionsPending() { + if (hasPendingTransaction.get() == Boolean.TRUE) { + throw newSpannerException(ErrorCode.INTERNAL, "Nested transactions are not supported"); + } + } + + /** + * Represents a transaction within a session. "Transaction" here is used in the general sense, + * which covers standalone reads, standalone writes, single-use and multi-use read-only + * transactions, and read-write transactions. The defining characteristic is that a session may + * only have one such transaction active at a time. + */ + static interface SessionTransaction { + /** Invalidates the transaction, generally because a new one has been started on the session. */ + void invalidate(); + } + + private final SpannerRpc gapicRpc; + private final int defaultPrefetchChunks; + private final String name; + private SessionTransaction activeTransaction; + private ByteString readyTransactionId; + private final Map options; + + SessionImpl( + SpannerRpc gapicRpc, + int defaultPrefetchChunks, + String name, + Map options) { + this.gapicRpc = gapicRpc; + this.defaultPrefetchChunks = defaultPrefetchChunks; + this.options = options; + this.name = checkNotNull(name); + } + + @Override + public String getName() { + return name; + } + + Map getOptions() { + return options; + } + + @Override + public long executePartitionedUpdate(Statement stmt) { + setActive(null); + PartitionedDMLTransaction txn = new PartitionedDMLTransaction(this, gapicRpc); + return txn.executePartitionedUpdate(stmt); + } + + @Override + public Timestamp write(Iterable mutations) throws SpannerException { + TransactionRunner runner = readWriteTransaction(); + final Collection finalMutations = + mutations instanceof java.util.Collection + ? (Collection) mutations + : Lists.newArrayList(mutations); + runner.run( + new TransactionRunner.TransactionCallable() { + @Override + public Void run(TransactionContext ctx) { + ctx.buffer(finalMutations); + return null; + } + }); + return runner.getCommitTimestamp(); + } + + @Override + public Timestamp writeAtLeastOnce(Iterable mutations) throws SpannerException { + setActive(null); + List mutationsProto = new ArrayList<>(); + Mutation.toProto(mutations, mutationsProto); + final CommitRequest request = + CommitRequest.newBuilder() + .setSession(name) + .addAllMutations(mutationsProto) + .setSingleUseTransaction( + TransactionOptions.newBuilder() + .setReadWrite(TransactionOptions.ReadWrite.getDefaultInstance())) + .build(); + Span span = tracer.spanBuilder(SpannerImpl.COMMIT).startSpan(); + try (Scope s = tracer.withSpan(span)) { + CommitResponse response = + SpannerImpl.runWithRetries( + new Callable() { + @Override + public CommitResponse call() throws Exception { + return SessionImpl.this.gapicRpc.commit(request, options); + } + }); + Timestamp t = Timestamp.fromProto(response.getCommitTimestamp()); + span.end(); + return t; + } catch (IllegalArgumentException e) { + TraceUtil.endSpanWithFailure(span, e); + throw newSpannerException(ErrorCode.INTERNAL, "Could not parse commit timestamp", e); + } catch (RuntimeException e) { + TraceUtil.endSpanWithFailure(span, e); + throw e; + } + } + + @Override + public ReadContext singleUse() { + return singleUse(TimestampBound.strong()); + } + + @Override + public ReadContext singleUse(TimestampBound bound) { + return setActive(new SingleReadContext(this, bound, gapicRpc, defaultPrefetchChunks)); + } + + @Override + public ReadOnlyTransaction singleUseReadOnlyTransaction() { + return singleUseReadOnlyTransaction(TimestampBound.strong()); + } + + @Override + public ReadOnlyTransaction singleUseReadOnlyTransaction(TimestampBound bound) { + return setActive( + new SingleUseReadOnlyTransaction(this, bound, gapicRpc, defaultPrefetchChunks)); + } + + @Override + public ReadOnlyTransaction readOnlyTransaction() { + return readOnlyTransaction(TimestampBound.strong()); + } + + @Override + public ReadOnlyTransaction readOnlyTransaction(TimestampBound bound) { + return setActive(new MultiUseReadOnlyTransaction(this, bound, gapicRpc, defaultPrefetchChunks)); + } + + @Override + public TransactionRunner readWriteTransaction() { + return setActive(new TransactionRunnerImpl(this, gapicRpc, defaultPrefetchChunks)); + } + + @Override + public void prepareReadWriteTransaction() { + setActive(null); + readyTransactionId = beginTransaction(); + } + + @Override + public void close() { + Span span = tracer.spanBuilder(SpannerImpl.DELETE_SESSION).startSpan(); + try (Scope s = tracer.withSpan(span)) { + SpannerImpl.runWithRetries( + new Callable() { + @Override + public Void call() throws Exception { + SessionImpl.this.gapicRpc.deleteSession(name, options); + return null; + } + }); + span.end(); + } catch (RuntimeException e) { + TraceUtil.endSpanWithFailure(span, e); + throw e; + } + } + + ByteString beginTransaction() { + Span span = tracer.spanBuilder(SpannerImpl.BEGIN_TRANSACTION).startSpan(); + try (Scope s = tracer.withSpan(span)) { + final BeginTransactionRequest request = + BeginTransactionRequest.newBuilder() + .setSession(name) + .setOptions( + TransactionOptions.newBuilder() + .setReadWrite(TransactionOptions.ReadWrite.getDefaultInstance())) + .build(); + Transaction txn = + SpannerImpl.runWithRetries( + new Callable() { + @Override + public Transaction call() throws Exception { + return SessionImpl.this.gapicRpc.beginTransaction(request, options); + } + }); + if (txn.getId().isEmpty()) { + throw newSpannerException(ErrorCode.INTERNAL, "Missing id in transaction\n" + getName()); + } + span.end(); + return txn.getId(); + } catch (RuntimeException e) { + TraceUtil.endSpanWithFailure(span, e); + throw e; + } + } + + TransactionContextImpl newTransaction() { + TransactionContextImpl txn = + new TransactionContextImpl(this, readyTransactionId, gapicRpc, defaultPrefetchChunks); + return txn; + } + + T setActive(@Nullable T ctx) { + throwIfTransactionsPending(); + + if (activeTransaction != null) { + activeTransaction.invalidate(); + } + activeTransaction = ctx; + readyTransactionId = null; + return ctx; + } + + @Override + public TransactionManager transactionManager() { + return new TransactionManagerImpl(this); + } +} diff --git a/google-cloud-clients/google-cloud-spanner/src/main/java/com/google/cloud/spanner/SpannerImpl.java b/google-cloud-clients/google-cloud-spanner/src/main/java/com/google/cloud/spanner/SpannerImpl.java index c8e3506ca596..a3ef6cebee03 100644 --- a/google-cloud-clients/google-cloud-spanner/src/main/java/com/google/cloud/spanner/SpannerImpl.java +++ b/google-cloud-clients/google-cloud-spanner/src/main/java/com/google/cloud/spanner/SpannerImpl.java @@ -16,72 +16,26 @@ package com.google.cloud.spanner; -import static com.google.cloud.spanner.SpannerExceptionFactory.newSpannerBatchUpdateException; import static com.google.cloud.spanner.SpannerExceptionFactory.newSpannerException; import static com.google.cloud.spanner.SpannerExceptionFactory.newSpannerExceptionForCancellation; import static com.google.common.base.Preconditions.checkArgument; import static com.google.common.base.Preconditions.checkNotNull; -import static com.google.common.base.Preconditions.checkState; import com.google.api.client.util.BackOff; import com.google.api.client.util.ExponentialBackOff; -import com.google.api.core.ApiFunction; -import com.google.api.gax.grpc.ProtoOperationTransformers; -import com.google.api.gax.longrunning.OperationFuture; -import com.google.api.gax.longrunning.OperationFutureImpl; -import com.google.api.gax.longrunning.OperationSnapshot; import com.google.api.gax.paging.Page; -import com.google.api.gax.rpc.ServerStream; -import com.google.api.pathtemplate.PathTemplate; import com.google.cloud.BaseService; -import com.google.cloud.ByteArray; -import com.google.cloud.Date; import com.google.cloud.PageImpl; import com.google.cloud.PageImpl.NextPageFetcher; -import com.google.cloud.Timestamp; -import com.google.cloud.spanner.Options.ListOption; -import com.google.cloud.spanner.Options.QueryOption; -import com.google.cloud.spanner.Options.ReadOption; import com.google.cloud.spanner.spi.v1.SpannerRpc; import com.google.cloud.spanner.spi.v1.SpannerRpc.Paginated; import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Function; import com.google.common.base.Preconditions; import com.google.common.base.Throwables; -import com.google.common.collect.AbstractIterator; import com.google.common.collect.ImmutableMap; -import com.google.common.collect.Lists; import com.google.common.collect.Maps; import com.google.common.util.concurrent.Futures; import com.google.common.util.concurrent.ListenableFuture; -import com.google.common.util.concurrent.Uninterruptibles; -import com.google.protobuf.Any; -import com.google.protobuf.ByteString; -import com.google.protobuf.Empty; -import com.google.protobuf.FieldMask; -import com.google.protobuf.InvalidProtocolBufferException; -import com.google.protobuf.ListValue; -import com.google.protobuf.Message; -import com.google.protobuf.Value.KindCase; -import com.google.spanner.admin.database.v1.CreateDatabaseMetadata; -import com.google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata; -import com.google.spanner.admin.instance.v1.CreateInstanceMetadata; -import com.google.spanner.admin.instance.v1.UpdateInstanceMetadata; -import com.google.spanner.v1.BeginTransactionRequest; -import com.google.spanner.v1.CommitRequest; -import com.google.spanner.v1.CommitResponse; -import com.google.spanner.v1.ExecuteBatchDmlRequest; -import com.google.spanner.v1.ExecuteSqlRequest; -import com.google.spanner.v1.ExecuteSqlRequest.QueryMode; -import com.google.spanner.v1.PartialResultSet; -import com.google.spanner.v1.ReadRequest; -import com.google.spanner.v1.ResultSetMetadata; -import com.google.spanner.v1.ResultSetStats; -import com.google.spanner.v1.RollbackRequest; -import com.google.spanner.v1.Transaction; -import com.google.spanner.v1.TransactionOptions; -import com.google.spanner.v1.TransactionSelector; -import com.google.spanner.v1.TypeCode; import io.grpc.Context; import io.opencensus.common.Scope; import io.opencensus.trace.AttributeValue; @@ -89,27 +43,17 @@ import io.opencensus.trace.Tracer; import io.opencensus.trace.Tracing; import java.io.IOException; -import java.io.Serializable; -import java.util.AbstractList; import java.util.ArrayList; -import java.util.BitSet; -import java.util.Collection; import java.util.Collections; import java.util.HashMap; -import java.util.Iterator; -import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Random; -import java.util.UUID; -import java.util.concurrent.BlockingQueue; import java.util.concurrent.Callable; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; import java.util.concurrent.Executor; -import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicLong; import java.util.logging.Level; import java.util.logging.Logger; import javax.annotation.Nullable; @@ -119,36 +63,16 @@ class SpannerImpl extends BaseService implements Spanner { private static final int MIN_BACKOFF_MS = 1000; private static final int MAX_BACKOFF_MS = 32000; - private static final PathTemplate OP_NAME_TEMPLATE = - PathTemplate.create( - "projects/{project}/instances/{instance}/databases/{database}/operations/{operation}"); - private static final PathTemplate PROJECT_NAME_TEMPLATE = - PathTemplate.create("projects/{project}"); private static final Logger logger = Logger.getLogger(SpannerImpl.class.getName()); - private static final Logger txnLogger = Logger.getLogger(TransactionRunner.class.getName()); private static final Tracer tracer = Tracing.getTracer(); - private static final String CREATE_SESSION = "CloudSpannerOperation.CreateSession"; - private static final String DELETE_SESSION = "CloudSpannerOperation.DeleteSession"; - private static final String BEGIN_TRANSACTION = "CloudSpannerOperation.BeginTransaction"; - private static final String COMMIT = "CloudSpannerOperation.Commit"; - private static final String QUERY = "CloudSpannerOperation.ExecuteStreamingQuery"; - private static final String READ = "CloudSpannerOperation.ExecuteStreamingRead"; - - private static final ThreadLocal hasPendingTransaction = - new ThreadLocal() { - @Override - protected Boolean initialValue() { - return false; - } - }; - - private static void throwIfTransactionsPending() { - if (hasPendingTransaction.get() == Boolean.TRUE) { - throw newSpannerException(ErrorCode.INTERNAL, "Nested transactions are not supported"); - } - } + static final String CREATE_SESSION = "CloudSpannerOperation.CreateSession"; + static final String DELETE_SESSION = "CloudSpannerOperation.DeleteSession"; + static final String BEGIN_TRANSACTION = "CloudSpannerOperation.BeginTransaction"; + static final String COMMIT = "CloudSpannerOperation.Commit"; + static final String QUERY = "CloudSpannerOperation.ExecuteStreamingQuery"; + static final String READ = "CloudSpannerOperation.ExecuteStreamingRead"; static { TraceUtil.exportSpans(CREATE_SESSION, DELETE_SESSION, BEGIN_TRANSACTION, COMMIT, QUERY, READ); @@ -156,7 +80,6 @@ private static void throwIfTransactionsPending() { private final Random random = new Random(); private final SpannerRpc gapicRpc; - private final int defaultPrefetchChunks; @GuardedBy("this") private final Map dbClients = new HashMap<>(); @@ -167,20 +90,20 @@ private static void throwIfTransactionsPending() { @GuardedBy("this") private boolean spannerIsClosed = false; - SpannerImpl(SpannerRpc gapicRpc, int defaultPrefetchChunks, SpannerOptions options) { + @VisibleForTesting + SpannerImpl(SpannerRpc gapicRpc, SpannerOptions options) { super(options); this.gapicRpc = gapicRpc; - this.defaultPrefetchChunks = defaultPrefetchChunks; this.dbAdminClient = new DatabaseAdminClientImpl(options.getProjectId(), gapicRpc); this.instanceClient = new InstanceAdminClientImpl(options.getProjectId(), gapicRpc, dbAdminClient); } SpannerImpl(SpannerOptions options) { - this(options.getSpannerRpcV1(), options.getPrefetchChunks(), options); + this(options.getSpannerRpcV1(), options); } - private static ExponentialBackOff newBackOff() { + static ExponentialBackOff newBackOff() { return new ExponentialBackOff.Builder() .setInitialIntervalMillis(MIN_BACKOFF_MS) .setMaxIntervalMillis(MAX_BACKOFF_MS) @@ -188,11 +111,11 @@ private static ExponentialBackOff newBackOff() { .build(); } - private static void backoffSleep(Context context, BackOff backoff) throws SpannerException { + static void backoffSleep(Context context, BackOff backoff) throws SpannerException { backoffSleep(context, nextBackOffMillis(backoff)); } - private static long nextBackOffMillis(BackOff backoff) throws SpannerException { + static long nextBackOffMillis(BackOff backoff) throws SpannerException { try { return backoff.nextBackOffMillis(); } catch (IOException e) { @@ -200,7 +123,7 @@ private static long nextBackOffMillis(BackOff backoff) throws SpannerException { } } - private static void backoffSleep(Context context, long backoffMillis) throws SpannerException { + static void backoffSleep(Context context, long backoffMillis) throws SpannerException { tracer .getCurrentSpan() .addAnnotation( @@ -270,7 +193,7 @@ static T runWithRetries(Callable callable) { } } - // TODO(user): change this to return SessionImpl and modify all corresponding references. + /** Create a new session for the specified database. */ Session createSession(final DatabaseId db) throws SpannerException { final Map options = optionMap(SessionOption.channelHint(random.nextLong())); @@ -286,17 +209,19 @@ public com.google.spanner.v1.Session call() throws Exception { } }); span.end(); - return new SessionImpl(session.getName(), options); + return new SessionImpl( + gapicRpc, getOptions().getPrefetchChunks(), session.getName(), options); } catch (RuntimeException e) { TraceUtil.endSpanWithFailure(span, e); throw e; } } + /** Lookup and return the session with the specified session id. */ SessionImpl sessionWithId(String name) { final Map options = SpannerImpl.optionMap(SessionOption.channelHint(random.nextLong())); - return new SessionImpl(name, options); + return new SessionImpl(gapicRpc, getOptions().getPrefetchChunks(), name, options); } @Override @@ -357,7 +282,7 @@ public void close() { * Checks that the current context is still valid, throwing a CANCELLED or DEADLINE_EXCEEDED error * if not. */ - private static void checkContext(Context context) { + static void checkContext(Context context) { if (context.isCancelled()) { throw newSpannerExceptionForCancellation(context, null); } @@ -367,7 +292,7 @@ private static void checkContext(Context context) { * Encapsulates state to be passed to the {@link SpannerRpc} layer for a given session. Currently * used to select the {@link io.grpc.Channel} to be used in issuing the RPCs in a Session. */ - static class SessionOption { + private static class SessionOption { private final SpannerRpc.Option rpcOption; private final Object value; @@ -389,7 +314,7 @@ Object value() { } } - static Map optionMap(SessionOption... options) { + private static Map optionMap(SessionOption... options) { if (options.length == 0) { return Collections.emptyMap(); } @@ -401,18 +326,9 @@ Object value() { return ImmutableMap.copyOf(tmp); } - private static T unpack(Any response, Class clazz) - throws SpannerException { - try { - return response.unpack(clazz); - } catch (InvalidProtocolBufferException e) { - throw SpannerExceptionFactory.newSpannerException( - ErrorCode.INTERNAL, "Error unpacking response", e); - } - } - - private abstract static class PageFetcher implements NextPageFetcher { - private String nextPageToken; + /** Helper class for gRPC calls that can return paginated results. */ + abstract static class PageFetcher implements NextPageFetcher { + String nextPageToken; @Override public Page getNextPage() { @@ -437,832 +353,6 @@ public Paginated call() { abstract S fromProto(T proto); } - private static String randomOperationId() { - UUID uuid = UUID.randomUUID(); - return ("r" + uuid.toString()).replace("-", "_"); - } - - static class DatabaseAdminClientImpl implements DatabaseAdminClient { - - private final String projectId; - private final SpannerRpc rpc; - - DatabaseAdminClientImpl(String projectId, SpannerRpc rpc) { - this.projectId = projectId; - this.rpc = rpc; - } - - @Override - public OperationFuture createDatabase( - String instanceId, String databaseId, Iterable statements) throws SpannerException { - // CreateDatabase() is not idempotent, so we're not retrying this request. - String instanceName = getInstanceName(instanceId); - String createStatement = "CREATE DATABASE `" + databaseId + "`"; - OperationFuture - rawOperationFuture = rpc.createDatabase(instanceName, createStatement, statements); - return new OperationFutureImpl( - rawOperationFuture.getPollingFuture(), - rawOperationFuture.getInitialFuture(), - new ApiFunction() { - @Override - public Database apply(OperationSnapshot snapshot) { - return Database.fromProto( - ProtoOperationTransformers.ResponseTransformer.create( - com.google.spanner.admin.database.v1.Database.class) - .apply(snapshot), - DatabaseAdminClientImpl.this); - } - }, - ProtoOperationTransformers.MetadataTransformer.create(CreateDatabaseMetadata.class), - new ApiFunction() { - @Override - public Database apply(Exception e) { - throw SpannerExceptionFactory.newSpannerException(e); - } - }); - } - - @Override - public Database getDatabase(String instanceId, String databaseId) throws SpannerException { - final String dbName = getDatabaseName(instanceId, databaseId); - Callable callable = - new Callable() { - @Override - public Database call() throws Exception { - return Database.fromProto(rpc.getDatabase(dbName), DatabaseAdminClientImpl.this); - } - }; - return runWithRetries(callable); - } - - @Override - public OperationFuture updateDatabaseDdl( - final String instanceId, - final String databaseId, - final Iterable statements, - @Nullable String operationId) - throws SpannerException { - final String dbName = getDatabaseName(instanceId, databaseId); - final String opId = operationId != null ? operationId : randomOperationId(); - OperationFuture rawOperationFuture = - rpc.updateDatabaseDdl(dbName, statements, opId); - return new OperationFutureImpl( - rawOperationFuture.getPollingFuture(), - rawOperationFuture.getInitialFuture(), - new ApiFunction() { - @Override - public Void apply(OperationSnapshot snapshot) { - ProtoOperationTransformers.ResponseTransformer.create(Empty.class).apply(snapshot); - return null; - } - }, - ProtoOperationTransformers.MetadataTransformer.create(UpdateDatabaseDdlMetadata.class), - new ApiFunction() { - @Override - public Void apply(Exception e) { - throw SpannerExceptionFactory.newSpannerException(e); - } - }); - } - - @Override - public void dropDatabase(String instanceId, String databaseId) throws SpannerException { - final String dbName = getDatabaseName(instanceId, databaseId); - Callable callable = - new Callable() { - @Override - public Void call() throws Exception { - rpc.dropDatabase(dbName); - return null; - } - }; - runWithRetries(callable); - } - - @Override - public List getDatabaseDdl(String instanceId, String databaseId) { - final String dbName = getDatabaseName(instanceId, databaseId); - Callable> callable = - new Callable>() { - @Override - public List call() throws Exception { - return rpc.getDatabaseDdl(dbName); - } - }; - return runWithRetries(callable); - } - - @Override - public Page listDatabases(String instanceId, ListOption... options) { - final String instanceName = getInstanceName(instanceId); - final Options listOptions = Options.fromListOptions(options); - Preconditions.checkArgument( - !listOptions.hasFilter(), "Filter option is not support by" + "listDatabases"); - final int pageSize = listOptions.hasPageSize() ? listOptions.pageSize() : 0; - PageFetcher pageFetcher = - new PageFetcher() { - @Override - public Paginated getNextPage( - String nextPageToken) { - return rpc.listDatabases(instanceName, pageSize, nextPageToken); - } - - @Override - public Database fromProto(com.google.spanner.admin.database.v1.Database proto) { - return Database.fromProto(proto, DatabaseAdminClientImpl.this); - } - }; - if (listOptions.hasPageToken()) { - pageFetcher.nextPageToken = listOptions.pageToken(); - } - return pageFetcher.getNextPage(); - } - - private String getInstanceName(String instanceId) { - return new InstanceId(projectId, instanceId).getName(); - } - - private String getDatabaseName(String instanceId, String databaseId) { - return new DatabaseId(new InstanceId(projectId, instanceId), databaseId).getName(); - } - } - - static class InstanceAdminClientImpl implements InstanceAdminClient { - final DatabaseAdminClient dbClient; - final String projectId; - final SpannerRpc rpc; - - InstanceAdminClientImpl(String projectId, SpannerRpc rpc, DatabaseAdminClient dbClient) { - this.projectId = projectId; - this.rpc = rpc; - this.dbClient = dbClient; - } - - @Override - public InstanceConfig getInstanceConfig(String configId) throws SpannerException { - final String instanceConfigName = new InstanceConfigId(projectId, configId).getName(); - return runWithRetries( - new Callable() { - @Override - public InstanceConfig call() { - return InstanceConfig.fromProto( - rpc.getInstanceConfig(instanceConfigName), InstanceAdminClientImpl.this); - } - }); - } - - @Override - public Page listInstanceConfigs(ListOption... options) { - final Options listOptions = Options.fromListOptions(options); - Preconditions.checkArgument( - !listOptions.hasFilter(), "Filter option is not supported by listInstanceConfigs"); - final int pageSize = listOptions.hasPageSize() ? listOptions.pageSize() : 0; - PageFetcher pageFetcher = - new PageFetcher() { - @Override - public Paginated getNextPage( - String nextPageToken) { - return rpc.listInstanceConfigs(pageSize, nextPageToken); - } - - @Override - public InstanceConfig fromProto( - com.google.spanner.admin.instance.v1.InstanceConfig proto) { - return InstanceConfig.fromProto(proto, InstanceAdminClientImpl.this); - } - }; - if (listOptions.hasPageToken()) { - pageFetcher.nextPageToken = listOptions.pageToken(); - } - return pageFetcher.getNextPage(); - } - - @Override - public OperationFuture createInstance(InstanceInfo instance) - throws SpannerException { - String projectName = PROJECT_NAME_TEMPLATE.instantiate("project", projectId); - OperationFuture - rawOperationFuture = - rpc.createInstance(projectName, instance.getId().getInstance(), instance.toProto()); - - return new OperationFutureImpl( - rawOperationFuture.getPollingFuture(), - rawOperationFuture.getInitialFuture(), - new ApiFunction() { - @Override - public Instance apply(OperationSnapshot snapshot) { - return Instance.fromProto( - ProtoOperationTransformers.ResponseTransformer.create( - com.google.spanner.admin.instance.v1.Instance.class) - .apply(snapshot), - InstanceAdminClientImpl.this, - dbClient); - } - }, - ProtoOperationTransformers.MetadataTransformer.create(CreateInstanceMetadata.class), - new ApiFunction() { - @Override - public Instance apply(Exception e) { - throw SpannerExceptionFactory.newSpannerException(e); - } - }); - } - - @Override - public Instance getInstance(String instanceId) throws SpannerException { - final String instanceName = new InstanceId(projectId, instanceId).getName(); - return runWithRetries( - new Callable() { - @Override - public Instance call() { - return Instance.fromProto( - rpc.getInstance(instanceName), InstanceAdminClientImpl.this, dbClient); - } - }); - } - - @Override - public Page listInstances(ListOption... options) throws SpannerException { - final Options listOptions = Options.fromListOptions(options); - final int pageSize = listOptions.hasPageSize() ? listOptions.pageSize() : 0; - final String filter = listOptions.filter(); - PageFetcher pageFetcher = - new PageFetcher() { - @Override - public Paginated getNextPage( - String nextPageToken) { - return rpc.listInstances(pageSize, nextPageToken, filter); - } - - @Override - public Instance fromProto(com.google.spanner.admin.instance.v1.Instance proto) { - return Instance.fromProto(proto, InstanceAdminClientImpl.this, dbClient); - } - }; - if (listOptions.hasPageToken()) { - pageFetcher.nextPageToken = listOptions.pageToken(); - } - return pageFetcher.getNextPage(); - } - - @Override - public void deleteInstance(final String instanceId) throws SpannerException { - runWithRetries( - new Callable() { - @Override - public Void call() { - rpc.deleteInstance(new InstanceId(projectId, instanceId).getName()); - return null; - } - }); - } - - @Override - public OperationFuture updateInstance( - InstanceInfo instance, InstanceInfo.InstanceField... fieldsToUpdate) { - FieldMask fieldMask = - fieldsToUpdate.length == 0 - ? InstanceInfo.InstanceField.toFieldMask(InstanceInfo.InstanceField.values()) - : InstanceInfo.InstanceField.toFieldMask(fieldsToUpdate); - - OperationFuture - rawOperationFuture = rpc.updateInstance(instance.toProto(), fieldMask); - return new OperationFutureImpl( - rawOperationFuture.getPollingFuture(), - rawOperationFuture.getInitialFuture(), - new ApiFunction() { - @Override - public Instance apply(OperationSnapshot snapshot) { - return Instance.fromProto( - ProtoOperationTransformers.ResponseTransformer.create( - com.google.spanner.admin.instance.v1.Instance.class) - .apply(snapshot), - InstanceAdminClientImpl.this, - dbClient); - } - }, - ProtoOperationTransformers.MetadataTransformer.create(UpdateInstanceMetadata.class), - new ApiFunction() { - @Override - public Instance apply(Exception e) { - throw SpannerExceptionFactory.newSpannerException(e); - } - }); - } - - @Override - public Instance.Builder newInstanceBuilder(InstanceId id) { - return new Instance.Builder(this, dbClient, id); - } - } - - class SessionImpl implements Session { - private final String name; - private SessionTransaction activeTransaction; - private ByteString readyTransactionId; - private final Map options; - - SessionImpl(String name, Map options) { - this.options = options; - this.name = checkNotNull(name); - } - - @Override - public String getName() { - return name; - } - - Map getOptions() { - return options; - } - - @Override - public long executePartitionedUpdate(Statement stmt) { - setActive(null); - PartitionedDMLTransaction txn = new PartitionedDMLTransaction(this, gapicRpc); - return txn.executePartitionedUpdate(stmt); - } - - @Override - public Timestamp write(Iterable mutations) throws SpannerException { - TransactionRunner runner = readWriteTransaction(); - final Collection finalMutations = - mutations instanceof java.util.Collection - ? (Collection) mutations - : Lists.newArrayList(mutations); - runner.run( - new TransactionRunner.TransactionCallable() { - @Override - public Void run(TransactionContext ctx) { - ctx.buffer(finalMutations); - return null; - } - }); - return runner.getCommitTimestamp(); - } - - @Override - public Timestamp writeAtLeastOnce(Iterable mutations) throws SpannerException { - setActive(null); - List mutationsProto = new ArrayList<>(); - Mutation.toProto(mutations, mutationsProto); - final CommitRequest request = - CommitRequest.newBuilder() - .setSession(name) - .addAllMutations(mutationsProto) - .setSingleUseTransaction( - TransactionOptions.newBuilder() - .setReadWrite(TransactionOptions.ReadWrite.getDefaultInstance())) - .build(); - Span span = tracer.spanBuilder(COMMIT).startSpan(); - try (Scope s = tracer.withSpan(span)) { - CommitResponse response = - runWithRetries( - new Callable() { - @Override - public CommitResponse call() throws Exception { - return gapicRpc.commit(request, options); - } - }); - Timestamp t = Timestamp.fromProto(response.getCommitTimestamp()); - span.end(); - return t; - } catch (IllegalArgumentException e) { - TraceUtil.endSpanWithFailure(span, e); - throw newSpannerException(ErrorCode.INTERNAL, "Could not parse commit timestamp", e); - } catch (RuntimeException e) { - TraceUtil.endSpanWithFailure(span, e); - throw e; - } - } - - @Override - public ReadContext singleUse() { - return singleUse(TimestampBound.strong()); - } - - @Override - public ReadContext singleUse(TimestampBound bound) { - return setActive(new SingleReadContext(this, bound, gapicRpc, defaultPrefetchChunks)); - } - - @Override - public ReadOnlyTransaction singleUseReadOnlyTransaction() { - return singleUseReadOnlyTransaction(TimestampBound.strong()); - } - - @Override - public ReadOnlyTransaction singleUseReadOnlyTransaction(TimestampBound bound) { - return setActive( - new SingleUseReadOnlyTransaction(this, bound, gapicRpc, defaultPrefetchChunks)); - } - - @Override - public ReadOnlyTransaction readOnlyTransaction() { - return readOnlyTransaction(TimestampBound.strong()); - } - - @Override - public ReadOnlyTransaction readOnlyTransaction(TimestampBound bound) { - return setActive( - new MultiUseReadOnlyTransaction(this, bound, gapicRpc, defaultPrefetchChunks)); - } - - @Override - public TransactionRunner readWriteTransaction() { - return setActive(new TransactionRunnerImpl(this, gapicRpc, defaultPrefetchChunks)); - } - - @Override - public void prepareReadWriteTransaction() { - setActive(null); - readyTransactionId = beginTransaction(); - } - - @Override - public void close() { - Span span = tracer.spanBuilder(DELETE_SESSION).startSpan(); - try (Scope s = tracer.withSpan(span)) { - runWithRetries( - new Callable() { - @Override - public Void call() throws Exception { - gapicRpc.deleteSession(name, options); - return null; - } - }); - span.end(); - } catch (RuntimeException e) { - TraceUtil.endSpanWithFailure(span, e); - throw e; - } - } - - ByteString beginTransaction() { - Span span = tracer.spanBuilder(BEGIN_TRANSACTION).startSpan(); - try (Scope s = tracer.withSpan(span)) { - final BeginTransactionRequest request = - BeginTransactionRequest.newBuilder() - .setSession(name) - .setOptions( - TransactionOptions.newBuilder() - .setReadWrite(TransactionOptions.ReadWrite.getDefaultInstance())) - .build(); - Transaction txn = - runWithRetries( - new Callable() { - @Override - public Transaction call() throws Exception { - return gapicRpc.beginTransaction(request, options); - } - }); - if (txn.getId().isEmpty()) { - throw newSpannerException(ErrorCode.INTERNAL, "Missing id in transaction\n" + getName()); - } - span.end(); - return txn.getId(); - } catch (RuntimeException e) { - TraceUtil.endSpanWithFailure(span, e); - throw e; - } - } - - TransactionContextImpl newTransaction() { - TransactionContextImpl txn = - new TransactionContextImpl(this, readyTransactionId, gapicRpc, defaultPrefetchChunks); - return txn; - } - - T setActive(@Nullable T ctx) { - throwIfTransactionsPending(); - - if (activeTransaction != null) { - activeTransaction.invalidate(); - } - activeTransaction = ctx; - readyTransactionId = null; - return ctx; - } - - @Override - public TransactionManager transactionManager() { - return new TransactionManagerImpl(this); - } - } - - /** - * Represents a transaction within a session. "Transaction" here is used in the general sense, - * which covers standalone reads, standalone writes, single-use and multi-use read-only - * transactions, and read-write transactions. The defining characteristic is that a session may - * only have one such transaction active at a time. - */ - static interface SessionTransaction { - /** Invalidates the transaction, generally because a new one has been started on the session. */ - void invalidate(); - } - - abstract static class AbstractReadContext - implements ReadContext, AbstractResultSet.Listener, SessionTransaction { - final Object lock = new Object(); - final SessionImpl session; - final SpannerRpc rpc; - final int defaultPrefetchChunks; - final Span span; - - @GuardedBy("lock") - private boolean isValid = true; - - @GuardedBy("lock") - private boolean isClosed = false; - - // A per-transaction sequence number used to identify this ExecuteSqlRequests. Required for DML, - // ignored for query by the server. - private AtomicLong seqNo = new AtomicLong(); - - // Allow up to 512MB to be buffered (assuming 1MB chunks). In practice, restart tokens are sent - // much more frequently. - private static final int MAX_BUFFERED_CHUNKS = 512; - - private AbstractReadContext(SessionImpl session, SpannerRpc rpc, int defaultPrefetchChunks) { - this(session, rpc, defaultPrefetchChunks, Tracing.getTracer().getCurrentSpan()); - } - - private AbstractReadContext( - SessionImpl session, SpannerRpc rpc, int defaultPrefetchChunks, Span span) { - this.session = session; - this.rpc = rpc; - this.defaultPrefetchChunks = defaultPrefetchChunks; - this.span = span; - } - - long getSeqNo() { - return seqNo.incrementAndGet(); - } - - @Override - public final ResultSet read( - String table, KeySet keys, Iterable columns, ReadOption... options) { - return readInternal(table, null, keys, columns, options); - } - - @Override - public final ResultSet readUsingIndex( - String table, String index, KeySet keys, Iterable columns, ReadOption... options) { - return readInternal(table, checkNotNull(index), keys, columns, options); - } - - @Nullable - @Override - public final Struct readRow(String table, Key key, Iterable columns) { - try (ResultSet resultSet = read(table, KeySet.singleKey(key), columns)) { - return consumeSingleRow(resultSet); - } - } - - @Nullable - @Override - public final Struct readRowUsingIndex( - String table, String index, Key key, Iterable columns) { - try (ResultSet resultSet = readUsingIndex(table, index, KeySet.singleKey(key), columns)) { - return consumeSingleRow(resultSet); - } - } - - @Override - public final ResultSet executeQuery(Statement statement, QueryOption... options) { - return executeQueryInternal( - statement, com.google.spanner.v1.ExecuteSqlRequest.QueryMode.NORMAL, options); - } - - @Override - public final ResultSet analyzeQuery( - Statement statement, QueryAnalyzeMode readContextQueryMode) { - switch (readContextQueryMode) { - case PROFILE: - return executeQueryInternal( - statement, com.google.spanner.v1.ExecuteSqlRequest.QueryMode.PROFILE); - case PLAN: - return executeQueryInternal( - statement, com.google.spanner.v1.ExecuteSqlRequest.QueryMode.PLAN); - default: - throw new IllegalStateException( - "Unknown value for QueryAnalyzeMode : " + readContextQueryMode); - } - } - - private ResultSet executeQueryInternal( - Statement statement, - com.google.spanner.v1.ExecuteSqlRequest.QueryMode queryMode, - QueryOption... options) { - Options readOptions = Options.fromQueryOptions(options); - return executeQueryInternalWithOptions( - statement, queryMode, readOptions, null /*partitionToken*/); - } - - ExecuteSqlRequest.Builder getExecuteSqlRequestBuilder( - Statement statement, QueryMode queryMode) { - ExecuteSqlRequest.Builder builder = - ExecuteSqlRequest.newBuilder() - .setSql(statement.getSql()) - .setQueryMode(queryMode) - .setSession(session.name); - Map stmtParameters = statement.getParameters(); - if (!stmtParameters.isEmpty()) { - com.google.protobuf.Struct.Builder paramsBuilder = builder.getParamsBuilder(); - for (Map.Entry param : stmtParameters.entrySet()) { - paramsBuilder.putFields(param.getKey(), param.getValue().toProto()); - builder.putParamTypes(param.getKey(), param.getValue().getType().toProto()); - } - } - TransactionSelector selector = getTransactionSelector(); - if (selector != null) { - builder.setTransaction(selector); - } - builder.setSeqno(getSeqNo()); - return builder; - } - - ExecuteBatchDmlRequest.Builder getExecuteBatchDmlRequestBuilder( - Iterable statements) { - ExecuteBatchDmlRequest.Builder builder = - ExecuteBatchDmlRequest.newBuilder().setSession(session.name); - int idx = 0; - for (Statement stmt : statements) { - builder.addStatementsBuilder(); - builder.getStatementsBuilder(idx).setSql(stmt.getSql()); - Map stmtParameters = stmt.getParameters(); - if (!stmtParameters.isEmpty()) { - com.google.protobuf.Struct.Builder paramsBuilder = - builder.getStatementsBuilder(idx).getParamsBuilder(); - for (Map.Entry param : stmtParameters.entrySet()) { - paramsBuilder.putFields(param.getKey(), param.getValue().toProto()); - builder - .getStatementsBuilder(idx) - .putParamTypes(param.getKey(), param.getValue().getType().toProto()); - } - } - idx++; - } - - TransactionSelector selector = getTransactionSelector(); - if (selector != null) { - builder.setTransaction(selector); - } - builder.setSeqno(getSeqNo()); - return builder; - } - - ResultSet executeQueryInternalWithOptions( - Statement statement, - com.google.spanner.v1.ExecuteSqlRequest.QueryMode queryMode, - Options readOptions, - ByteString partitionToken) { - beforeReadOrQuery(); - final ExecuteSqlRequest.Builder request = getExecuteSqlRequestBuilder(statement, queryMode); - if (partitionToken != null) { - request.setPartitionToken(partitionToken); - } - final int prefetchChunks = - readOptions.hasPrefetchChunks() ? readOptions.prefetchChunks() : defaultPrefetchChunks; - ResumableStreamIterator stream = - new ResumableStreamIterator(MAX_BUFFERED_CHUNKS, QUERY, span) { - @Override - CloseableIterator startStream(@Nullable ByteString resumeToken) { - GrpcStreamIterator stream = new GrpcStreamIterator(prefetchChunks); - if (resumeToken != null) { - request.setResumeToken(resumeToken); - } - SpannerRpc.StreamingCall call = - rpc.executeQuery(request.build(), stream.consumer(), session.options); - call.request(prefetchChunks); - stream.setCall(call); - return stream; - } - }; - return new GrpcResultSet(stream, this, queryMode); - } - - /** - * Called before any read or query is started to perform state checks and initializations. - * Subclasses should call {@code super.beforeReadOrQuery()} if overriding. - */ - void beforeReadOrQuery() { - synchronized (lock) { - beforeReadOrQueryLocked(); - } - } - - /** Called as part of {@link #beforeReadOrQuery()} under {@link #lock}. */ - @GuardedBy("lock") - void beforeReadOrQueryLocked() { - // Note that transactions are invalidated under some circumstances on the backend, but we - // implement the check more strictly here to encourage coding to contract rather than the - // implementation. - checkState(isValid, "Context has been invalidated by a new operation on the session"); - checkState(!isClosed, "Context has been closed"); - } - - /** Invalidates the context since another context has been created more recently. */ - @Override - public final void invalidate() { - synchronized (lock) { - isValid = false; - } - } - - @Override - public void close() { - span.end(); - synchronized (lock) { - isClosed = true; - } - } - - @Nullable - abstract TransactionSelector getTransactionSelector(); - - @Override - public void onTransactionMetadata(Transaction transaction) {} - - @Override - public void onError(SpannerException e) {} - - @Override - public void onDone() {} - - private ResultSet readInternal( - String table, - @Nullable String index, - KeySet keys, - Iterable columns, - ReadOption... options) { - Options readOptions = Options.fromReadOptions(options); - return readInternalWithOptions( - table, index, keys, columns, readOptions, null /*partitionToken*/); - } - - ResultSet readInternalWithOptions( - String table, - @Nullable String index, - KeySet keys, - Iterable columns, - Options readOptions, - ByteString partitionToken) { - beforeReadOrQuery(); - final ReadRequest.Builder builder = - ReadRequest.newBuilder() - .setSession(session.name) - .setTable(checkNotNull(table)) - .addAllColumns(columns); - if (readOptions.hasLimit()) { - builder.setLimit(readOptions.limit()); - } - - keys.appendToProto(builder.getKeySetBuilder()); - if (index != null) { - builder.setIndex(index); - } - TransactionSelector selector = getTransactionSelector(); - if (selector != null) { - builder.setTransaction(selector); - } - if (partitionToken != null) { - builder.setPartitionToken(partitionToken); - } - final int prefetchChunks = - readOptions.hasPrefetchChunks() ? readOptions.prefetchChunks() : defaultPrefetchChunks; - ResumableStreamIterator stream = - new ResumableStreamIterator(MAX_BUFFERED_CHUNKS, READ, span) { - @Override - CloseableIterator startStream(@Nullable ByteString resumeToken) { - GrpcStreamIterator stream = new GrpcStreamIterator(prefetchChunks); - if (resumeToken != null) { - builder.setResumeToken(resumeToken); - } - SpannerRpc.StreamingCall call = - rpc.read(builder.build(), stream.consumer(), session.options); - call.request(prefetchChunks); - stream.setCall(call); - return stream; - } - }; - GrpcResultSet resultSet = - new GrpcResultSet(stream, this, com.google.spanner.v1.ExecuteSqlRequest.QueryMode.NORMAL); - return resultSet; - } - - private Struct consumeSingleRow(ResultSet resultSet) { - if (!resultSet.next()) { - return null; - } - Struct row = resultSet.getCurrentRowAsStruct(); - if (resultSet.next()) { - throw newSpannerException(ErrorCode.INTERNAL, "Multiple rows returned for single key"); - } - return row; - } - } - private enum DirectExecutor implements Executor { INSTANCE; @@ -1271,1825 +361,4 @@ public void execute(Runnable command) { command.run(); } } - - @VisibleForTesting - static class TransactionRunnerImpl implements SessionTransaction, TransactionRunner { - private boolean blockNestedTxn = true; - - /** Allow for testing of backoff logic */ - static class Sleeper { - void backoffSleep(Context context, long backoffMillis) { - SpannerImpl.backoffSleep(context, backoffMillis); - } - } - - private final SessionImpl session; - private final Sleeper sleeper; - private final Span span; - private TransactionContextImpl txn; - private volatile boolean isValid = true; - - public TransactionRunner allowNestedTransaction() { - blockNestedTxn = false; - return this; - } - - TransactionRunnerImpl( - SessionImpl session, SpannerRpc rpc, Sleeper sleeper, int defaultPrefetchChunks) { - this.session = session; - this.sleeper = sleeper; - this.span = Tracing.getTracer().getCurrentSpan(); - this.txn = session.newTransaction(); - } - - TransactionRunnerImpl(SessionImpl session, SpannerRpc rpc, int defaultPrefetchChunks) { - this(session, rpc, new Sleeper(), defaultPrefetchChunks); - } - - @Nullable - @Override - public T run(TransactionCallable callable) { - try (Scope s = tracer.withSpan(span)) { - if (blockNestedTxn) { - hasPendingTransaction.set(Boolean.TRUE); - } - - return runInternal(callable); - } catch (RuntimeException e) { - TraceUtil.endSpanWithFailure(span, e); - throw e; - } finally { - // Remove threadLocal rather than set to FALSE to avoid a possible memory leak. - // We also do this unconditionally in case a user has modified the flag when the transaction - // was running. - hasPendingTransaction.remove(); - } - } - - private T runInternal(TransactionCallable callable) { - BackOff backoff = newBackOff(); - final Context context = Context.current(); - int attempt = 0; - // TODO: Change this to use TransactionManager. - while (true) { - checkState( - isValid, "TransactionRunner has been invalidated by a new operation on the session"); - checkContext(context); - attempt++; - // TODO(user): When using streaming reads, consider using the first read to begin - // the txn. - span.addAnnotation( - "Starting Transaction Attempt", - ImmutableMap.of("Attempt", AttributeValue.longAttributeValue(attempt))); - txn.ensureTxn(); - - T result; - boolean shouldRollback = true; - try { - result = callable.run(txn); - shouldRollback = false; - } catch (Exception e) { - txnLogger.log(Level.FINE, "User-provided TransactionCallable raised exception", e); - if (txn.isAborted() || (e instanceof AbortedException)) { - span.addAnnotation( - "Transaction Attempt Aborted in user operation. Retrying", - ImmutableMap.of("Attempt", AttributeValue.longAttributeValue(attempt))); - shouldRollback = false; - backoff(context, backoff); - continue; - } - SpannerException toThrow; - if (e instanceof SpannerException) { - toThrow = (SpannerException) e; - } else { - toThrow = newSpannerException(ErrorCode.UNKNOWN, e.getMessage(), e); - } - span.addAnnotation( - "Transaction Attempt Failed in user operation", - ImmutableMap.builder() - .putAll(TraceUtil.getExceptionAnnotations(toThrow)) - .put("Attempt", AttributeValue.longAttributeValue(attempt)) - .build()); - throw toThrow; - } finally { - if (shouldRollback) { - txn.rollback(); - } - } - - try { - txn.commit(); - span.addAnnotation( - "Transaction Attempt Succeeded", - ImmutableMap.of("Attempt", AttributeValue.longAttributeValue(attempt))); - return result; - } catch (AbortedException e) { - txnLogger.log(Level.FINE, "Commit aborted", e); - span.addAnnotation( - "Transaction Attempt Aborted in Commit. Retrying", - ImmutableMap.of("Attempt", AttributeValue.longAttributeValue(attempt))); - backoff(context, backoff); - } catch (SpannerException e) { - span.addAnnotation( - "Transaction Attempt Failed in Commit", - ImmutableMap.builder() - .putAll(TraceUtil.getExceptionAnnotations(e)) - .put("Attempt", AttributeValue.longAttributeValue(attempt)) - .build()); - throw e; - } - } - } - - @Override - public Timestamp getCommitTimestamp() { - return txn.commitTimestamp(); - } - - @Override - public void invalidate() { - isValid = false; - } - - private void backoff(Context context, BackOff backoff) { - long delay = txn.getRetryDelayInMillis(backoff); - txn = session.newTransaction(); - span.addAnnotation( - "Backing off", ImmutableMap.of("Delay", AttributeValue.longAttributeValue(delay))); - sleeper.backoffSleep(context, delay); - } - } - - static class PartitionedDMLTransaction implements SessionTransaction { - private final ByteString transactionId; - private final SessionImpl session; - private final SpannerRpc rpc; - private volatile boolean isValid = true; - - PartitionedDMLTransaction(SessionImpl session, SpannerRpc rpc) { - this.session = session; - this.rpc = rpc; - this.transactionId = initTransaction(); - } - - ByteString initTransaction() { - final BeginTransactionRequest request = - BeginTransactionRequest.newBuilder() - .setSession(session.getName()) - .setOptions( - TransactionOptions.newBuilder() - .setPartitionedDml(TransactionOptions.PartitionedDml.getDefaultInstance())) - .build(); - Transaction txn = - runWithRetries( - new Callable() { - @Override - public Transaction call() throws Exception { - return rpc.beginTransaction(request, session.options); - } - }); - if (txn.getId().isEmpty()) { - throw SpannerExceptionFactory.newSpannerException( - ErrorCode.INTERNAL, - "Failed to init transaction, missing transaction id\n" + session.getName()); - } - return txn.getId(); - } - - public long executePartitionedUpdate(Statement statement) { - checkState(isValid, "Partitioned DML has been invalidated by a new operation on the session"); - final ExecuteSqlRequest.Builder builder = - ExecuteSqlRequest.newBuilder() - .setSql(statement.getSql()) - .setQueryMode(QueryMode.NORMAL) - .setSession(session.name) - .setTransaction(TransactionSelector.newBuilder().setId(transactionId).build()); - Map stmtParameters = statement.getParameters(); - if (!stmtParameters.isEmpty()) { - com.google.protobuf.Struct.Builder paramsBuilder = builder.getParamsBuilder(); - for (Map.Entry param : stmtParameters.entrySet()) { - paramsBuilder.putFields(param.getKey(), param.getValue().toProto()); - builder.putParamTypes(param.getKey(), param.getValue().getType().toProto()); - } - } - com.google.spanner.v1.ResultSet resultSet = - runWithRetries( - new Callable() { - @Override - public com.google.spanner.v1.ResultSet call() throws Exception { - return rpc.executeQuery(builder.build(), session.options); - } - }); - if (!resultSet.hasStats()) { - throw new IllegalArgumentException( - "Partitioned DML response missing stats possibly due to non-DML statement as input"); - } - // For partitioned DML, using the row count lower bound. - return resultSet.getStats().getRowCountLowerBound(); - } - - @Override - public void invalidate() { - isValid = false; - } - } - - @VisibleForTesting - static class TransactionContextImpl extends AbstractReadContext implements TransactionContext { - @GuardedBy("lock") - private List mutations = new ArrayList<>(); - - @GuardedBy("lock") - private boolean aborted; - - /** Default to -1 to indicate not available. */ - @GuardedBy("lock") - private long retryDelayInMillis = -1L; - - private ByteString transactionId; - private Timestamp commitTimestamp; - - TransactionContextImpl( - SessionImpl session, - @Nullable ByteString transactionId, - SpannerRpc rpc, - int defaultPrefetchChunks) { - super(session, rpc, defaultPrefetchChunks); - this.transactionId = transactionId; - } - - void ensureTxn() { - if (transactionId == null) { - span.addAnnotation("Creating Transaction"); - try { - transactionId = session.beginTransaction(); - span.addAnnotation( - "Transaction Creation Done", - ImmutableMap.of( - "Id", AttributeValue.stringAttributeValue(transactionId.toStringUtf8()))); - txnLogger.log( - Level.FINER, - "Started transaction {0}", - txnLogger.isLoggable(Level.FINER) ? transactionId.asReadOnlyByteBuffer() : null); - } catch (SpannerException e) { - span.addAnnotation("Transaction Creation Failed", TraceUtil.getExceptionAnnotations(e)); - throw e; - } - } else { - span.addAnnotation( - "Transaction Initialized", - ImmutableMap.of( - "Id", AttributeValue.stringAttributeValue(transactionId.toStringUtf8()))); - txnLogger.log( - Level.FINER, - "Using prepared transaction {0}", - txnLogger.isLoggable(Level.FINER) ? transactionId.asReadOnlyByteBuffer() : null); - } - } - - void commit() { - span.addAnnotation("Starting Commit"); - CommitRequest.Builder builder = - CommitRequest.newBuilder().setSession(session.getName()).setTransactionId(transactionId); - synchronized (lock) { - if (!mutations.isEmpty()) { - List mutationsProto = new ArrayList<>(); - Mutation.toProto(mutations, mutationsProto); - builder.addAllMutations(mutationsProto); - } - // Ensure that no call to buffer mutations that would be lost can succeed. - mutations = null; - } - final CommitRequest commitRequest = builder.build(); - Span opSpan = tracer.spanBuilderWithExplicitParent(COMMIT, span).startSpan(); - try (Scope s = tracer.withSpan(opSpan)) { - CommitResponse commitResponse = - runWithRetries( - new Callable() { - @Override - public CommitResponse call() throws Exception { - return rpc.commit(commitRequest, session.options); - } - }); - - if (!commitResponse.hasCommitTimestamp()) { - throw newSpannerException( - ErrorCode.INTERNAL, "Missing commitTimestamp:\n" + session.getName()); - } - commitTimestamp = Timestamp.fromProto(commitResponse.getCommitTimestamp()); - opSpan.end(); - } catch (RuntimeException e) { - span.addAnnotation("Commit Failed", TraceUtil.getExceptionAnnotations(e)); - TraceUtil.endSpanWithFailure(opSpan, e); - throw e; - } - span.addAnnotation("Commit Done"); - } - - Timestamp commitTimestamp() { - checkState(commitTimestamp != null, "run() has not yet returned normally"); - return commitTimestamp; - } - - boolean isAborted() { - synchronized (lock) { - return aborted; - } - } - - /** Return the delay in milliseconds between requests to Cloud Spanner. */ - long getRetryDelayInMillis(BackOff backoff) { - long delay = nextBackOffMillis(backoff); - synchronized (lock) { - if (retryDelayInMillis >= 0) { - return retryDelayInMillis; - } - } - return delay; - } - - void rollback() { - // We're exiting early due to a user exception, but the transaction is still active. - // Send a rollback for the transaction to release any locks held. - // TODO(user): Make this an async fire-and-forget request. - try { - // Note that we're not retrying this request since we don't particularly care about the - // response. Normally, the next thing that will happen is that we will make a fresh - // transaction attempt, which should implicitly abort this one. - span.addAnnotation("Starting Rollback"); - rpc.rollback( - RollbackRequest.newBuilder() - .setSession(session.getName()) - .setTransactionId(transactionId) - .build(), - session.options); - span.addAnnotation("Rollback Done"); - } catch (SpannerException e) { - txnLogger.log(Level.FINE, "Exception during rollback", e); - span.addAnnotation("Rollback Failed", TraceUtil.getExceptionAnnotations(e)); - } - } - - @Nullable - @Override - TransactionSelector getTransactionSelector() { - return TransactionSelector.newBuilder().setId(transactionId).build(); - } - - @Override - public void onError(SpannerException e) { - if (e.getErrorCode() == ErrorCode.ABORTED) { - long delay = -1L; - if (e instanceof AbortedException) { - delay = ((AbortedException) e).getRetryDelayInMillis(); - } - if (delay == -1L) { - txnLogger.log(Level.FINE, "Retry duration is missing from the exception.", e); - } - - synchronized (lock) { - retryDelayInMillis = delay; - aborted = true; - } - } - } - - @Override - public void buffer(Mutation mutation) { - synchronized (lock) { - checkNotNull(mutations, "Context is closed"); - mutations.add(checkNotNull(mutation)); - } - } - - @Override - public void buffer(Iterable mutations) { - synchronized (lock) { - checkNotNull(this.mutations, "Context is closed"); - for (Mutation mutation : mutations) { - this.mutations.add(checkNotNull(mutation)); - } - } - } - - @Override - public long executeUpdate(Statement statement) { - beforeReadOrQuery(); - final ExecuteSqlRequest.Builder builder = - getExecuteSqlRequestBuilder(statement, QueryMode.NORMAL); - com.google.spanner.v1.ResultSet resultSet = - runWithRetries( - new Callable() { - @Override - public com.google.spanner.v1.ResultSet call() throws Exception { - return rpc.executeQuery(builder.build(), session.options); - } - }); - if (!resultSet.hasStats()) { - throw new IllegalArgumentException( - "DML response missing stats possibly due to non-DML statement as input"); - } - // For standard DML, using the exact row count. - return resultSet.getStats().getRowCountExact(); - } - - @Override - public long[] batchUpdate(Iterable statements) { - beforeReadOrQuery(); - final ExecuteBatchDmlRequest.Builder builder = getExecuteBatchDmlRequestBuilder(statements); - com.google.spanner.v1.ExecuteBatchDmlResponse response = - runWithRetries( - new Callable() { - @Override - public com.google.spanner.v1.ExecuteBatchDmlResponse call() throws Exception { - return rpc.executeBatchDml(builder.build(), session.options); - } - }); - long[] results = new long[response.getResultSetsCount()]; - for (int i = 0; i < response.getResultSetsCount(); ++i) { - results[i] = response.getResultSets(i).getStats().getRowCountExact(); - } - - if (response.getStatus().getCode() != 0) { - throw newSpannerBatchUpdateException( - ErrorCode.fromRpcStatus(response.getStatus()), - response.getStatus().getMessage(), - results); - } - return results; - } - } - - /** - * A {@code ReadContext} for standalone reads. This can only be used for a single operation, since - * each standalone read may see a different timestamp of Cloud Spanner data. - */ - private static class SingleReadContext extends AbstractReadContext { - final TimestampBound bound; - - @GuardedBy("lock") - private boolean used; - - private SingleReadContext( - SessionImpl session, TimestampBound bound, SpannerRpc rpc, int defaultPrefetchChunks) { - super(session, rpc, defaultPrefetchChunks); - this.bound = bound; - } - - @GuardedBy("lock") - @Override - void beforeReadOrQueryLocked() { - super.beforeReadOrQueryLocked(); - checkState(!used, "Cannot use a single-read ReadContext for multiple reads"); - used = true; - } - - @Override - @Nullable - TransactionSelector getTransactionSelector() { - if (bound.getMode() == TimestampBound.Mode.STRONG) { - // Default mode: no need to specify a transaction. - return null; - } - return TransactionSelector.newBuilder() - .setSingleUse(TransactionOptions.newBuilder().setReadOnly(bound.toProto())) - .build(); - } - } - - private static void assertTimestampAvailable(boolean available) { - checkState(available, "Method can only be called after read has returned data or finished"); - } - - private class SingleUseReadOnlyTransaction extends SingleReadContext - implements ReadOnlyTransaction { - @GuardedBy("lock") - private Timestamp timestamp; - - private SingleUseReadOnlyTransaction( - SessionImpl session, TimestampBound bound, SpannerRpc rpc, int defaultPrefetchChunks) { - super(session, bound, rpc, defaultPrefetchChunks); - } - - @Override - public Timestamp getReadTimestamp() { - synchronized (lock) { - assertTimestampAvailable(timestamp != null); - return timestamp; - } - } - - @Override - @Nullable - TransactionSelector getTransactionSelector() { - TransactionOptions.Builder options = TransactionOptions.newBuilder(); - bound.applyToBuilder(options.getReadOnlyBuilder()).setReturnReadTimestamp(true); - return TransactionSelector.newBuilder().setSingleUse(options).build(); - } - - @Override - public void onTransactionMetadata(Transaction transaction) { - synchronized (lock) { - if (!transaction.hasReadTimestamp()) { - throw newSpannerException( - ErrorCode.INTERNAL, "Missing expected transaction.read_timestamp metadata field"); - } - try { - timestamp = Timestamp.fromProto(transaction.getReadTimestamp()); - } catch (IllegalArgumentException e) { - throw newSpannerException( - ErrorCode.INTERNAL, "Bad value in transaction.read_timestamp metadata field", e); - } - } - } - } - - static class MultiUseReadOnlyTransaction extends AbstractReadContext - implements ReadOnlyTransaction { - private TimestampBound bound; - private final Object txnLock = new Object(); - - @GuardedBy("txnLock") - private Timestamp timestamp; - - @GuardedBy("txnLock") - private ByteString transactionId; - - MultiUseReadOnlyTransaction( - SessionImpl session, TimestampBound bound, SpannerRpc rpc, int defaultPrefetchChunks) { - super(session, rpc, defaultPrefetchChunks); - checkArgument( - bound.getMode() != TimestampBound.Mode.MAX_STALENESS - && bound.getMode() != TimestampBound.Mode.MIN_READ_TIMESTAMP, - "Bounded staleness mode %s is not supported for multi-use read-only transactions." - + " Create a single-use read or read-only transaction instead.", - bound.getMode()); - this.bound = bound; - } - - MultiUseReadOnlyTransaction( - SessionImpl session, - ByteString transactionId, - Timestamp timestamp, - SpannerRpc rpc, - int defaultPrefetchChunks) { - super(session, rpc, defaultPrefetchChunks); - this.transactionId = transactionId; - this.timestamp = timestamp; - } - - @Override - void beforeReadOrQuery() { - super.beforeReadOrQuery(); - initTransaction(); - } - - @Override - @Nullable - TransactionSelector getTransactionSelector() { - // No need for synchronization: super.readInternal() is always preceded by a check of - // "transactionId" that provides a happens-before from initialization, and the value is never - // changed afterwards. - @SuppressWarnings("GuardedByChecker") - TransactionSelector selector = TransactionSelector.newBuilder().setId(transactionId).build(); - return selector; - } - - @Override - public Timestamp getReadTimestamp() { - synchronized (txnLock) { - assertTimestampAvailable(timestamp != null); - return timestamp; - } - } - - ByteString getTransactionId() { - synchronized (txnLock) { - return transactionId; - } - } - - void initTransaction() { - throwIfTransactionsPending(); - - // Since we only support synchronous calls, just block on "txnLock" while the RPC is in - // flight. Note that we use the strategy of sending an explicit BeginTransaction() RPC, - // rather than using the first read in the transaction to begin it implicitly. The chosen - // strategy is sub-optimal in the case of the first read being fast, as it incurs an extra - // RTT, but optimal if the first read is slow. Since we don't know how fast the read will be, - // and we are using non-streaming reads (so we don't see the metadata until the entire read - // has finished), using BeginTransaction() is the safest path. - // TODO(user): Fix comment / begin transaction on first read; we now use streaming reads. - synchronized (txnLock) { - if (transactionId != null) { - return; - } - span.addAnnotation("Creating Transaction"); - try { - TransactionOptions.Builder options = TransactionOptions.newBuilder(); - bound.applyToBuilder(options.getReadOnlyBuilder()).setReturnReadTimestamp(true); - final BeginTransactionRequest request = - BeginTransactionRequest.newBuilder() - .setSession(session.getName()) - .setOptions(options) - .build(); - Transaction transaction = - runWithRetries( - new Callable() { - @Override - public Transaction call() throws Exception { - return rpc.beginTransaction(request, session.options); - } - }); - if (!transaction.hasReadTimestamp()) { - throw SpannerExceptionFactory.newSpannerException( - ErrorCode.INTERNAL, "Missing expected transaction.read_timestamp metadata field"); - } - if (transaction.getId().isEmpty()) { - throw SpannerExceptionFactory.newSpannerException( - ErrorCode.INTERNAL, "Missing expected transaction.id metadata field"); - } - try { - timestamp = Timestamp.fromProto(transaction.getReadTimestamp()); - } catch (IllegalArgumentException e) { - throw SpannerExceptionFactory.newSpannerException( - ErrorCode.INTERNAL, "Bad value in transaction.read_timestamp metadata field", e); - } - transactionId = transaction.getId(); - span.addAnnotation( - "Transaction Creation Done", TraceUtil.getTransactionAnnotations(transaction)); - } catch (SpannerException e) { - span.addAnnotation("Transaction Creation Failed", TraceUtil.getExceptionAnnotations(e)); - throw e; - } - } - } - } - - @VisibleForTesting - abstract static class AbstractResultSet extends AbstractStructReader implements ResultSet { - interface Listener { - /** - * Called when transaction metadata is seen. This method may be invoked at most once. If the - * method is invoked, it will precede {@link #onError(SpannerException)} or {@link #onDone()}. - */ - void onTransactionMetadata(Transaction transaction) throws SpannerException; - - /** Called when the read finishes with an error. */ - void onError(SpannerException e); - - /** Called when the read finishes normally. */ - void onDone(); - } - - protected abstract GrpcStruct currRow(); - - @Override - public Struct getCurrentRowAsStruct() { - return currRow().immutableCopy(); - } - - @Override - protected boolean getBooleanInternal(int columnIndex) { - return currRow().getBooleanInternal(columnIndex); - } - - @Override - protected long getLongInternal(int columnIndex) { - return currRow().getLongInternal(columnIndex); - } - - @Override - protected double getDoubleInternal(int columnIndex) { - return currRow().getDoubleInternal(columnIndex); - } - - @Override - protected String getStringInternal(int columnIndex) { - return currRow().getStringInternal(columnIndex); - } - - @Override - protected ByteArray getBytesInternal(int columnIndex) { - return currRow().getBytesInternal(columnIndex); - } - - @Override - protected Timestamp getTimestampInternal(int columnIndex) { - return currRow().getTimestampInternal(columnIndex); - } - - @Override - protected Date getDateInternal(int columnIndex) { - return currRow().getDateInternal(columnIndex); - } - - @Override - protected boolean[] getBooleanArrayInternal(int columnIndex) { - return currRow().getBooleanArrayInternal(columnIndex); - } - - @Override - protected List getBooleanListInternal(int columnIndex) { - return currRow().getBooleanListInternal(columnIndex); - } - - @Override - protected long[] getLongArrayInternal(int columnIndex) { - return currRow().getLongArrayInternal(columnIndex); - } - - @Override - protected List getLongListInternal(int columnIndex) { - return currRow().getLongListInternal(columnIndex); - } - - @Override - protected double[] getDoubleArrayInternal(int columnIndex) { - return currRow().getDoubleArrayInternal(columnIndex); - } - - @Override - protected List getDoubleListInternal(int columnIndex) { - return currRow().getDoubleListInternal(columnIndex); - } - - @Override - protected List getStringListInternal(int columnIndex) { - return currRow().getStringListInternal(columnIndex); - } - - @Override - protected List getBytesListInternal(int columnIndex) { - return currRow().getBytesListInternal(columnIndex); - } - - @Override - protected List getTimestampListInternal(int columnIndex) { - return currRow().getTimestampListInternal(columnIndex); - } - - @Override - protected List getDateListInternal(int columnIndex) { - return currRow().getDateListInternal(columnIndex); - } - - @Override - protected List getStructListInternal(int columnIndex) { - return currRow().getStructListInternal(columnIndex); - } - - @Override - public boolean isNull(int columnIndex) { - return currRow().isNull(columnIndex); - } - } - - @VisibleForTesting - static class GrpcResultSet extends AbstractResultSet> { - private final GrpcValueIterator iterator; - private final Listener listener; - private final QueryMode queryMode; - private GrpcStruct currRow; - private SpannerException error; - private ResultSetStats statistics; - private boolean closed; - - GrpcResultSet( - CloseableIterator iterator, Listener listener, QueryMode queryMode) { - this.iterator = new GrpcValueIterator(iterator); - this.listener = listener; - this.queryMode = queryMode; - } - - @Override - protected GrpcStruct currRow() { - checkState(!closed, "ResultSet is closed"); - checkState(currRow != null, "next() call required"); - return currRow; - } - - @Override - public boolean next() throws SpannerException { - if (error != null) { - throw newSpannerException(error); - } - try { - if (currRow == null) { - ResultSetMetadata metadata = iterator.getMetadata(); - if (metadata.hasTransaction()) { - listener.onTransactionMetadata(metadata.getTransaction()); - } - currRow = new GrpcStruct(iterator.type(), new ArrayList<>()); - } - boolean hasNext = currRow.consumeRow(iterator); - if (!hasNext) { - statistics = iterator.getStats(); - } - return hasNext; - } catch (SpannerException e) { - throw yieldError(e); - } - } - - @Override - @Nullable - public ResultSetStats getStats() { - return statistics; - } - - @Override - public void close() { - iterator.close("ResultSet closed"); - closed = true; - } - - @Override - public Type getType() { - checkState(currRow != null, "next() call required"); - return currRow.getType(); - } - - private SpannerException yieldError(SpannerException e) { - close(); - listener.onError(e); - throw e; - } - } - - private static class GrpcStruct extends Struct implements Serializable { - - protected final Type type; - protected final List rowData; - - /** - * Builds an immutable version of this struct using {@link Struct#newBuilder()} which is used as - * a serialization proxy. - */ - private Object writeReplace() { - Builder builder = Struct.newBuilder(); - List structFields = getType().getStructFields(); - for (int i = 0; i < structFields.size(); i++) { - Type.StructField field = structFields.get(i); - String fieldName = field.getName(); - Object value = rowData.get(i); - Type fieldType = field.getType(); - switch (fieldType.getCode()) { - case BOOL: - builder.set(fieldName).to((Boolean) value); - break; - case INT64: - builder.set(fieldName).to((Long) value); - break; - case FLOAT64: - builder.set(fieldName).to((Double) value); - break; - case STRING: - builder.set(fieldName).to((String) value); - break; - case BYTES: - builder.set(fieldName).to((ByteArray) value); - break; - case TIMESTAMP: - builder.set(fieldName).to((Timestamp) value); - break; - case DATE: - builder.set(fieldName).to((Date) value); - break; - case ARRAY: - switch (fieldType.getArrayElementType().getCode()) { - case BOOL: - builder.set(fieldName).toBoolArray((Iterable) value); - break; - case INT64: - builder.set(fieldName).toInt64Array((Iterable) value); - break; - case FLOAT64: - builder.set(fieldName).toFloat64Array((Iterable) value); - break; - case STRING: - builder.set(fieldName).toStringArray((Iterable) value); - break; - case BYTES: - builder.set(fieldName).toBytesArray((Iterable) value); - break; - case TIMESTAMP: - builder.set(fieldName).toTimestampArray((Iterable) value); - break; - case DATE: - builder.set(fieldName).toDateArray((Iterable) value); - break; - case STRUCT: - builder - .set(fieldName) - .toStructArray(fieldType.getArrayElementType(), (Iterable) value); - break; - default: - throw new AssertionError( - "Unhandled array type code: " + fieldType.getArrayElementType()); - } - break; - case STRUCT: - if (value == null) { - builder.set(fieldName).to(fieldType, null); - } else { - builder.set(fieldName).to((Struct) value); - } - break; - default: - throw new AssertionError("Unhandled type code: " + fieldType.getCode()); - } - } - return builder.build(); - } - - GrpcStruct(Type type, List rowData) { - this.type = type; - this.rowData = rowData; - } - - @Override - public String toString() { - return this.rowData.toString(); - } - - boolean consumeRow(Iterator iterator) { - rowData.clear(); - if (!iterator.hasNext()) { - return false; - } - for (Type.StructField fieldType : getType().getStructFields()) { - if (!iterator.hasNext()) { - throw newSpannerException( - ErrorCode.INTERNAL, - "Invalid value stream: end of stream reached before row is complete"); - } - com.google.protobuf.Value value = iterator.next(); - rowData.add(decodeValue(fieldType.getType(), value)); - } - return true; - } - - private static Object decodeValue(Type fieldType, com.google.protobuf.Value proto) { - if (proto.getKindCase() == KindCase.NULL_VALUE) { - return null; - } - switch (fieldType.getCode()) { - case BOOL: - checkType(fieldType, proto, KindCase.BOOL_VALUE); - return proto.getBoolValue(); - case INT64: - checkType(fieldType, proto, KindCase.STRING_VALUE); - return Long.parseLong(proto.getStringValue()); - case FLOAT64: - return valueProtoToFloat64(proto); - case STRING: - checkType(fieldType, proto, KindCase.STRING_VALUE); - return proto.getStringValue(); - case BYTES: - checkType(fieldType, proto, KindCase.STRING_VALUE); - return ByteArray.fromBase64(proto.getStringValue()); - case TIMESTAMP: - checkType(fieldType, proto, KindCase.STRING_VALUE); - return Timestamp.parseTimestamp(proto.getStringValue()); - case DATE: - checkType(fieldType, proto, KindCase.STRING_VALUE); - return Date.parseDate(proto.getStringValue()); - case ARRAY: - checkType(fieldType, proto, KindCase.LIST_VALUE); - ListValue listValue = proto.getListValue(); - return decodeArrayValue(fieldType.getArrayElementType(), listValue); - case STRUCT: - checkType(fieldType, proto, KindCase.LIST_VALUE); - ListValue structValue = proto.getListValue(); - return decodeStructValue(fieldType, structValue); - default: - throw new AssertionError("Unhandled type code: " + fieldType.getCode()); - } - } - - private static Struct decodeStructValue(Type structType, ListValue structValue) { - List fieldTypes = structType.getStructFields(); - checkArgument( - structValue.getValuesCount() == fieldTypes.size(), - "Size mismatch between type descriptor and actual values."); - List fields = new ArrayList<>(fieldTypes.size()); - List fieldValues = structValue.getValuesList(); - for (int i = 0; i < fieldTypes.size(); ++i) { - fields.add(decodeValue(fieldTypes.get(i).getType(), fieldValues.get(i))); - } - return new GrpcStruct(structType, fields); - } - - private static Object decodeArrayValue(Type elementType, ListValue listValue) { - switch (elementType.getCode()) { - case BOOL: - // Use a view: element conversion is virtually free. - return Lists.transform( - listValue.getValuesList(), - new Function() { - @Override - public Boolean apply(com.google.protobuf.Value input) { - return input.getKindCase() == KindCase.NULL_VALUE ? null : input.getBoolValue(); - } - }); - case INT64: - // For int64/float64 types, use custom containers. These avoid wrapper object - // creation for non-null arrays. - return new Int64Array(listValue); - case FLOAT64: - return new Float64Array(listValue); - case STRING: - return Lists.transform( - listValue.getValuesList(), - new Function() { - @Override - public String apply(com.google.protobuf.Value input) { - return input.getKindCase() == KindCase.NULL_VALUE ? null : input.getStringValue(); - } - }); - case BYTES: - { - // Materialize list: element conversion is expensive and should happen only once. - ArrayList list = new ArrayList<>(listValue.getValuesCount()); - for (com.google.protobuf.Value value : listValue.getValuesList()) { - list.add( - value.getKindCase() == KindCase.NULL_VALUE - ? null - : ByteArray.fromBase64(value.getStringValue())); - } - return list; - } - case TIMESTAMP: - { - // Materialize list: element conversion is expensive and should happen only once. - ArrayList list = new ArrayList<>(listValue.getValuesCount()); - for (com.google.protobuf.Value value : listValue.getValuesList()) { - list.add( - value.getKindCase() == KindCase.NULL_VALUE - ? null - : Timestamp.parseTimestamp(value.getStringValue())); - } - return list; - } - case DATE: - { - // Materialize list: element conversion is expensive and should happen only once. - ArrayList list = new ArrayList<>(listValue.getValuesCount()); - for (com.google.protobuf.Value value : listValue.getValuesList()) { - list.add( - value.getKindCase() == KindCase.NULL_VALUE - ? null - : Date.parseDate(value.getStringValue())); - } - return list; - } - - case STRUCT: - { - ArrayList list = new ArrayList<>(listValue.getValuesCount()); - for (com.google.protobuf.Value value : listValue.getValuesList()) { - if (value.getKindCase() == KindCase.NULL_VALUE) { - list.add(null); - } else { - ListValue structValue = value.getListValue(); - list.add(decodeStructValue(elementType, structValue)); - } - } - return list; - } - default: - throw new AssertionError("Unhandled type code: " + elementType.getCode()); - } - } - - private static void checkType( - Type fieldType, com.google.protobuf.Value proto, KindCase expected) { - if (proto.getKindCase() != expected) { - throw newSpannerException( - ErrorCode.INTERNAL, - "Invalid value for column type " - + fieldType - + " expected " - + expected - + " but was " - + proto.getKindCase()); - } - } - - Struct immutableCopy() { - return new GrpcStruct(type, new ArrayList<>(rowData)); - } - - @Override - public Type getType() { - return type; - } - - @Override - public boolean isNull(int columnIndex) { - return rowData.get(columnIndex) == null; - } - - @Override - protected boolean getBooleanInternal(int columnIndex) { - return (Boolean) rowData.get(columnIndex); - } - - @Override - protected long getLongInternal(int columnIndex) { - return (Long) rowData.get(columnIndex); - } - - @Override - protected double getDoubleInternal(int columnIndex) { - return (Double) rowData.get(columnIndex); - } - - @Override - protected String getStringInternal(int columnIndex) { - return (String) rowData.get(columnIndex); - } - - @Override - protected ByteArray getBytesInternal(int columnIndex) { - return (ByteArray) rowData.get(columnIndex); - } - - @Override - protected Timestamp getTimestampInternal(int columnIndex) { - return (Timestamp) rowData.get(columnIndex); - } - - @Override - protected Date getDateInternal(int columnIndex) { - return (Date) rowData.get(columnIndex); - } - - @Override - protected Struct getStructInternal(int columnIndex) { - return (Struct) rowData.get(columnIndex); - } - - @Override - protected boolean[] getBooleanArrayInternal(int columnIndex) { - @SuppressWarnings("unchecked") // We know ARRAY produces a List. - List values = (List) rowData.get(columnIndex); - boolean[] r = new boolean[values.size()]; - for (int i = 0; i < values.size(); ++i) { - if (values.get(i) == null) { - throw throwNotNull(columnIndex); - } - r[i] = values.get(i); - } - return r; - } - - @Override - @SuppressWarnings("unchecked") // We know ARRAY produces a List. - protected List getBooleanListInternal(int columnIndex) { - return Collections.unmodifiableList((List) rowData.get(columnIndex)); - } - - @Override - protected long[] getLongArrayInternal(int columnIndex) { - return getLongListInternal(columnIndex).toPrimitiveArray(columnIndex); - } - - @Override - protected Int64Array getLongListInternal(int columnIndex) { - return (Int64Array) rowData.get(columnIndex); - } - - @Override - protected double[] getDoubleArrayInternal(int columnIndex) { - return getDoubleListInternal(columnIndex).toPrimitiveArray(columnIndex); - } - - @Override - protected Float64Array getDoubleListInternal(int columnIndex) { - return (Float64Array) rowData.get(columnIndex); - } - - @Override - @SuppressWarnings("unchecked") // We know ARRAY produces a List. - protected List getStringListInternal(int columnIndex) { - return Collections.unmodifiableList((List) rowData.get(columnIndex)); - } - - @Override - @SuppressWarnings("unchecked") // We know ARRAY produces a List. - protected List getBytesListInternal(int columnIndex) { - return Collections.unmodifiableList((List) rowData.get(columnIndex)); - } - - @Override - @SuppressWarnings("unchecked") // We know ARRAY produces a List. - protected List getTimestampListInternal(int columnIndex) { - return Collections.unmodifiableList((List) rowData.get(columnIndex)); - } - - @Override - @SuppressWarnings("unchecked") // We know ARRAY produces a List. - protected List getDateListInternal(int columnIndex) { - return Collections.unmodifiableList((List) rowData.get(columnIndex)); - } - - @Override - @SuppressWarnings("unchecked") // We know ARRAY> produces a List. - protected List getStructListInternal(int columnIndex) { - return Collections.unmodifiableList((List) rowData.get(columnIndex)); - } - } - - @VisibleForTesting - interface CloseableIterator extends Iterator { - - /** - * Closes the iterator, freeing any underlying resources. - * - * @param message a message to include in the final RPC status - */ - void close(@Nullable String message); - } - - private static final class CloseableServerStreamIterator implements CloseableIterator { - - private final ServerStream stream; - private final Iterator iterator; - - public CloseableServerStreamIterator(ServerStream stream) { - this.stream = stream; - this.iterator = stream.iterator(); - } - - @Override - public boolean hasNext() { - try { - return iterator.hasNext(); - } catch (Exception e) { - throw SpannerExceptionFactory.newSpannerException(e); - } - } - - @Override - public T next() { - try { - return iterator.next(); - } catch (Exception e) { - throw SpannerExceptionFactory.newSpannerException(e); - } - } - - @Override - public void remove() { - throw new UnsupportedOperationException("Not supported: remove."); - } - - @Override - public void close(@Nullable String message) { - try { - stream.cancel(); - } catch (Exception e) { - throw SpannerExceptionFactory.newSpannerException(e); - } - } - } - - /** Adapts a streaming read/query call into an iterator over partial result sets. */ - @VisibleForTesting - static class GrpcStreamIterator extends AbstractIterator - implements CloseableIterator { - private static final PartialResultSet END_OF_STREAM = PartialResultSet.newBuilder().build(); - - private final ConsumerImpl consumer = new ConsumerImpl(); - private final BlockingQueue stream; - - private SpannerRpc.StreamingCall call; - private SpannerException error; - - // Visible for testing. - GrpcStreamIterator(int prefetchChunks) { - // One extra to allow for END_OF_STREAM message. - this.stream = new LinkedBlockingQueue<>(prefetchChunks + 1); - } - - protected final SpannerRpc.ResultStreamConsumer consumer() { - return consumer; - } - - public void setCall(SpannerRpc.StreamingCall call) { - this.call = call; - } - - @Override - public void close(@Nullable String message) { - if (call != null) { - call.cancel(message); - } - } - - @Override - protected final PartialResultSet computeNext() { - PartialResultSet next; - try { - // TODO: Ideally honor io.grpc.Context while blocking here. In practice, - // cancellation/deadline results in an error being delivered to "stream", which - // should mean that we do not block significantly longer afterwards, but it would - // be more robust to use poll() with a timeout. - next = stream.take(); - } catch (InterruptedException e) { - // Treat interrupt as a request to cancel the read. - throw SpannerExceptionFactory.propagateInterrupt(e); - } - if (next != END_OF_STREAM) { - call.request(1); - return next; - } - - // All done - close() no longer needs to cancel the call. - call = null; - - if (error != null) { - throw SpannerExceptionFactory.newSpannerException(error); - } - - endOfData(); - return null; - } - - private void addToStream(PartialResultSet results) { - // We assume that nothing from the user will interrupt gRPC event threads. - Uninterruptibles.putUninterruptibly(stream, results); - } - - private class ConsumerImpl implements SpannerRpc.ResultStreamConsumer { - @Override - public void onPartialResultSet(PartialResultSet results) { - addToStream(results); - } - - @Override - public void onCompleted() { - addToStream(END_OF_STREAM); - } - - @Override - public void onError(SpannerException e) { - error = e; - addToStream(END_OF_STREAM); - } - - // Visible only for testing. - @VisibleForTesting - void setCall(SpannerRpc.StreamingCall call) { - GrpcStreamIterator.this.setCall(call); - } - } - } - - /** - * Wraps an iterator over partial result sets, supporting resuming RPCs on error. This class keeps - * track of the most recent resume token seen, and will buffer partial result set chunks that do - * not have a resume token until one is seen or buffer space is exceeded, which reduces the chance - * of yielding data to the caller that cannot be resumed. - */ - @VisibleForTesting - abstract static class ResumableStreamIterator extends AbstractIterator - implements CloseableIterator { - private final BackOff backOff = newBackOff(); - private final LinkedList buffer = new LinkedList<>(); - private final int maxBufferSize; - private final Span span; - private CloseableIterator stream; - private ByteString resumeToken; - private boolean finished; - /** - * Indicates whether it is currently safe to retry RPCs. This will be {@code false} if we have - * reached the maximum buffer size without seeing a restart token; in this case, we will drain - * the buffer and remain in this state until we see a new restart token. - */ - private boolean safeToRetry = true; - - protected ResumableStreamIterator(int maxBufferSize, String streamName, Span parent) { - checkArgument(maxBufferSize >= 0); - this.maxBufferSize = maxBufferSize; - this.span = tracer.spanBuilderWithExplicitParent(streamName, parent).startSpan(); - } - - abstract CloseableIterator startStream(@Nullable ByteString resumeToken); - - @Override - public void close(@Nullable String message) { - if (stream != null) { - stream.close(message); - } - } - - @Override - protected PartialResultSet computeNext() { - Context context = Context.current(); - while (true) { - // Eagerly start stream before consuming any buffered items. - if (stream == null) { - span.addAnnotation( - "Starting/Resuming stream", - ImmutableMap.of( - "ResumeToken", - AttributeValue.stringAttributeValue( - resumeToken == null ? "null" : resumeToken.toStringUtf8()))); - try (Scope s = tracer.withSpan(span)) { - // When start a new stream set the Span as current to make the gRPC Span a child of - // this Span. - stream = checkNotNull(startStream(resumeToken)); - } - } - // Buffer contains items up to a resume token or has reached capacity: flush. - if (!buffer.isEmpty() - && (finished || !safeToRetry || !buffer.getLast().getResumeToken().isEmpty())) { - return buffer.pop(); - } - try { - if (stream.hasNext()) { - PartialResultSet next = stream.next(); - boolean hasResumeToken = !next.getResumeToken().isEmpty(); - if (hasResumeToken) { - resumeToken = next.getResumeToken(); - safeToRetry = true; - } - // If the buffer is empty and this chunk has a resume token or we cannot resume safely - // anyway, we can yield it immediately rather than placing it in the buffer to be - // returned on the next iteration. - if ((hasResumeToken || !safeToRetry) && buffer.isEmpty()) { - return next; - } - buffer.add(next); - if (buffer.size() > maxBufferSize && buffer.getLast().getResumeToken().isEmpty()) { - // We need to flush without a restart token. Errors encountered until we see - // such a token will fail the read. - safeToRetry = false; - } - } else { - finished = true; - if (buffer.isEmpty()) { - endOfData(); - return null; - } - } - } catch (SpannerException e) { - if (safeToRetry && e.isRetryable()) { - span.addAnnotation( - "Stream broken. Safe to retry", TraceUtil.getExceptionAnnotations(e)); - logger.log(Level.FINE, "Retryable exception, will sleep and retry", e); - // Truncate any items in the buffer before the last retry token. - while (!buffer.isEmpty() && buffer.getLast().getResumeToken().isEmpty()) { - buffer.removeLast(); - } - assert buffer.isEmpty() || buffer.getLast().getResumeToken().equals(resumeToken); - stream = null; - try (Scope s = tracer.withSpan(span)) { - long delay = e.getRetryDelayInMillis(); - if (delay != -1) { - backoffSleep(context, delay); - } else { - backoffSleep(context, backOff); - } - } - continue; - } - span.addAnnotation("Stream broken. Not safe to retry"); - TraceUtil.endSpanWithFailure(span, e); - throw e; - } catch (RuntimeException e) { - span.addAnnotation("Stream broken. Not safe to retry"); - TraceUtil.endSpanWithFailure(span, e); - throw e; - } - } - } - } - - /** - * Adapts a stream of {@code PartialResultSet} messages into a stream of {@code Value} messages. - */ - private static class GrpcValueIterator extends AbstractIterator { - private enum StreamValue { - METADATA, - RESULT, - } - - private final CloseableIterator stream; - private ResultSetMetadata metadata; - private Type type; - private PartialResultSet current; - private int pos; - private ResultSetStats statistics; - - GrpcValueIterator(CloseableIterator stream) { - this.stream = stream; - } - - @SuppressWarnings("unchecked") - @Override - protected com.google.protobuf.Value computeNext() { - if (!ensureReady(StreamValue.RESULT)) { - endOfData(); - return null; - } - com.google.protobuf.Value value = current.getValues(pos++); - KindCase kind = value.getKindCase(); - - if (!isMergeable(kind)) { - if (pos == current.getValuesCount() && current.getChunkedValue()) { - throw newSpannerException(ErrorCode.INTERNAL, "Unexpected chunked PartialResultSet."); - } else { - return value; - } - } - if (!current.getChunkedValue() || pos != current.getValuesCount()) { - return value; - } - - Object merged = - kind == KindCase.STRING_VALUE - ? value.getStringValue() - : new ArrayList(value.getListValue().getValuesList()); - while (current.getChunkedValue() && pos == current.getValuesCount()) { - if (!ensureReady(StreamValue.RESULT)) { - throw newSpannerException( - ErrorCode.INTERNAL, "Stream closed in the middle of chunked value"); - } - com.google.protobuf.Value newValue = current.getValues(pos++); - if (newValue.getKindCase() != kind) { - throw newSpannerException( - ErrorCode.INTERNAL, - "Unexpected type in middle of chunked value. Expected: " - + kind - + " but got: " - + newValue.getKindCase()); - } - if (kind == KindCase.STRING_VALUE) { - merged = (String) merged + newValue.getStringValue(); - } else { - concatLists( - (List) merged, newValue.getListValue().getValuesList()); - } - } - if (kind == KindCase.STRING_VALUE) { - return com.google.protobuf.Value.newBuilder().setStringValue((String) merged).build(); - } else { - return com.google.protobuf.Value.newBuilder() - .setListValue( - ListValue.newBuilder().addAllValues((List) merged)) - .build(); - } - } - - ResultSetMetadata getMetadata() throws SpannerException { - if (metadata == null) { - if (!ensureReady(StreamValue.METADATA)) { - throw newSpannerException(ErrorCode.INTERNAL, "Stream closed without sending metadata"); - } - } - return metadata; - } - - /* - * Get the query statistics. Query statistics are delivered with the last PartialResultSet - * in the stream. Any attempt to call this method before the caller has finished consuming the - * results will return null. - */ - @Nullable - ResultSetStats getStats() { - return statistics; - } - - Type type() { - checkState(type != null, "metadata has not been received"); - return type; - } - - private boolean ensureReady(StreamValue requiredValue) throws SpannerException { - while (current == null || pos >= current.getValuesCount()) { - if (!stream.hasNext()) { - return false; - } - current = stream.next(); - pos = 0; - if (type == null) { - // This is the first message on the stream. - if (!current.hasMetadata() || !current.getMetadata().hasRowType()) { - throw newSpannerException(ErrorCode.INTERNAL, "Missing type metadata in first message"); - } - metadata = current.getMetadata(); - com.google.spanner.v1.Type typeProto = - com.google.spanner.v1.Type.newBuilder() - .setCode(TypeCode.STRUCT) - .setStructType(metadata.getRowType()) - .build(); - try { - type = Type.fromProto(typeProto); - } catch (IllegalArgumentException e) { - throw newSpannerException( - ErrorCode.INTERNAL, "Invalid type metadata: " + e.getMessage(), e); - } - } - if (current.hasStats()) { - statistics = current.getStats(); - } - if (requiredValue == StreamValue.METADATA) { - return true; - } - } - return true; - } - - void close(@Nullable String message) { - stream.close(message); - } - - /* - * @param a is a mutable list and b will be concatenated into a. - */ - private void concatLists(List a, List b) { - if (a.size() == 0 || b.size() == 0) { - a.addAll(b); - return; - } else { - com.google.protobuf.Value last = a.get(a.size() - 1); - com.google.protobuf.Value first = b.get(0); - KindCase lastKind = last.getKindCase(); - KindCase firstKind = first.getKindCase(); - if (isMergeable(lastKind) && lastKind == firstKind) { - com.google.protobuf.Value merged = null; - if (lastKind == KindCase.STRING_VALUE) { - String lastStr = last.getStringValue(); - String firstStr = first.getStringValue(); - merged = - com.google.protobuf.Value.newBuilder().setStringValue(lastStr + firstStr).build(); - } else { // List - List mergedList = new ArrayList<>(); - mergedList.addAll(last.getListValue().getValuesList()); - concatLists(mergedList, first.getListValue().getValuesList()); - merged = - com.google.protobuf.Value.newBuilder() - .setListValue(ListValue.newBuilder().addAllValues(mergedList)) - .build(); - } - a.set(a.size() - 1, merged); - a.addAll(b.subList(1, b.size())); - } else { - a.addAll(b); - } - } - } - - private boolean isMergeable(KindCase kind) { - return kind == KindCase.STRING_VALUE || kind == KindCase.LIST_VALUE; - } - } - - private static double valueProtoToFloat64(com.google.protobuf.Value proto) { - if (proto.getKindCase() == KindCase.STRING_VALUE) { - switch (proto.getStringValue()) { - case "-Infinity": - return Double.NEGATIVE_INFINITY; - case "Infinity": - return Double.POSITIVE_INFINITY; - case "NaN": - return Double.NaN; - default: - // Fall-through to handling below to produce an error. - } - } - if (proto.getKindCase() != KindCase.NUMBER_VALUE) { - throw newSpannerException( - ErrorCode.INTERNAL, - "Invalid value for column type " - + Type.float64() - + " expected NUMBER_VALUE or STRING_VALUE with value one of" - + " \"Infinity\", \"-Infinity\", or \"NaN\" but was " - + proto.getKindCase() - + (proto.getKindCase() == KindCase.STRING_VALUE - ? " with value \"" + proto.getStringValue() + "\"" - : "")); - } - return proto.getNumberValue(); - } - - private static NullPointerException throwNotNull(int columnIndex) { - throw new NullPointerException( - "Cannot call array getter for column " + columnIndex + " with null elements"); - } - - /** - * Memory-optimized base class for {@code ARRAY} and {@code ARRAY} types. Both of - * these involve conversions from the type yielded by JSON parsing, which are {@code String} and - * {@code BigDecimal} respectively. Rather than construct new wrapper objects for each array - * element, we use primitive arrays and a {@code BitSet} to track nulls. - */ - private abstract static class PrimitiveArray extends AbstractList { - private final A data; - private final BitSet nulls; - private final int size; - - PrimitiveArray(ListValue protoList) { - this.size = protoList.getValuesCount(); - A data = newArray(size); - BitSet nulls = new BitSet(size); - for (int i = 0; i < protoList.getValuesCount(); ++i) { - if (protoList.getValues(i).getKindCase() == KindCase.NULL_VALUE) { - nulls.set(i); - } else { - setProto(data, i, protoList.getValues(i)); - } - } - this.data = data; - this.nulls = nulls; - } - - PrimitiveArray(A data, BitSet nulls, int size) { - this.data = data; - this.nulls = nulls; - this.size = size; - } - - abstract A newArray(int size); - - abstract void setProto(A array, int i, com.google.protobuf.Value protoValue); - - abstract T get(A array, int i); - - @Override - public T get(int index) { - if (index < 0 || index >= size) { - throw new ArrayIndexOutOfBoundsException("index=" + index + " size=" + size); - } - return nulls.get(index) ? null : get(data, index); - } - - @Override - public int size() { - return size; - } - - A toPrimitiveArray(int columnIndex) { - if (nulls.length() > 0) { - throw throwNotNull(columnIndex); - } - A r = newArray(size); - System.arraycopy(data, 0, r, 0, size); - return r; - } - } - - private static class Int64Array extends PrimitiveArray { - Int64Array(ListValue protoList) { - super(protoList); - } - - Int64Array(long[] data, BitSet nulls) { - super(data, nulls, data.length); - } - - @Override - long[] newArray(int size) { - return new long[size]; - } - - @Override - void setProto(long[] array, int i, com.google.protobuf.Value protoValue) { - array[i] = Long.parseLong(protoValue.getStringValue()); - } - - @Override - Long get(long[] array, int i) { - return array[i]; - } - } - - private static class Float64Array extends PrimitiveArray { - Float64Array(ListValue protoList) { - super(protoList); - } - - Float64Array(double[] data, BitSet nulls) { - super(data, nulls, data.length); - } - - @Override - double[] newArray(int size) { - return new double[size]; - } - - @Override - void setProto(double[] array, int i, com.google.protobuf.Value protoValue) { - array[i] = valueProtoToFloat64(protoValue); - } - - @Override - Double get(double[] array, int i) { - return array[i]; - } - } } diff --git a/google-cloud-clients/google-cloud-spanner/src/main/java/com/google/cloud/spanner/TransactionManagerImpl.java b/google-cloud-clients/google-cloud-spanner/src/main/java/com/google/cloud/spanner/TransactionManagerImpl.java index 5706789cb894..508fbf5ed5ef 100644 --- a/google-cloud-clients/google-cloud-spanner/src/main/java/com/google/cloud/spanner/TransactionManagerImpl.java +++ b/google-cloud-clients/google-cloud-spanner/src/main/java/com/google/cloud/spanner/TransactionManagerImpl.java @@ -17,8 +17,8 @@ package com.google.cloud.spanner; import com.google.cloud.Timestamp; -import com.google.cloud.spanner.SpannerImpl.SessionImpl; -import com.google.cloud.spanner.SpannerImpl.SessionTransaction; +import com.google.cloud.spanner.SessionImpl.SessionTransaction; +import com.google.cloud.spanner.TransactionRunnerImpl.TransactionContextImpl; import com.google.common.base.Preconditions; import io.opencensus.common.Scope; import io.opencensus.trace.Span; @@ -32,7 +32,7 @@ final class TransactionManagerImpl implements TransactionManager, SessionTransac private final SessionImpl session; private final Span span; - private SpannerImpl.TransactionContextImpl txn; + private TransactionContextImpl txn; private TransactionState txnState; TransactionManagerImpl(SessionImpl session) { diff --git a/google-cloud-clients/google-cloud-spanner/src/main/java/com/google/cloud/spanner/TransactionRunnerImpl.java b/google-cloud-clients/google-cloud-spanner/src/main/java/com/google/cloud/spanner/TransactionRunnerImpl.java new file mode 100644 index 000000000000..afe90d893f64 --- /dev/null +++ b/google-cloud-clients/google-cloud-spanner/src/main/java/com/google/cloud/spanner/TransactionRunnerImpl.java @@ -0,0 +1,431 @@ +/* + * Copyright 2019 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.spanner; + +import static com.google.cloud.spanner.SpannerExceptionFactory.newSpannerBatchUpdateException; +import static com.google.cloud.spanner.SpannerExceptionFactory.newSpannerException; +import static com.google.common.base.Preconditions.checkNotNull; +import static com.google.common.base.Preconditions.checkState; + +import com.google.api.client.util.BackOff; +import com.google.cloud.Timestamp; +import com.google.cloud.spanner.SessionImpl.SessionTransaction; +import com.google.cloud.spanner.spi.v1.SpannerRpc; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.collect.ImmutableMap; +import com.google.protobuf.ByteString; +import com.google.spanner.v1.CommitRequest; +import com.google.spanner.v1.CommitResponse; +import com.google.spanner.v1.ExecuteBatchDmlRequest; +import com.google.spanner.v1.ExecuteSqlRequest; +import com.google.spanner.v1.ExecuteSqlRequest.QueryMode; +import com.google.spanner.v1.RollbackRequest; +import com.google.spanner.v1.TransactionSelector; +import io.grpc.Context; +import io.opencensus.common.Scope; +import io.opencensus.trace.AttributeValue; +import io.opencensus.trace.Span; +import io.opencensus.trace.Tracer; +import io.opencensus.trace.Tracing; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.Callable; +import java.util.logging.Level; +import java.util.logging.Logger; +import javax.annotation.Nullable; +import javax.annotation.concurrent.GuardedBy; + +/** Default implementation of {@link TransactionRunner}. */ +class TransactionRunnerImpl implements SessionTransaction, TransactionRunner { + private static final Tracer tracer = Tracing.getTracer(); + private static final Logger txnLogger = Logger.getLogger(TransactionRunner.class.getName()); + + @VisibleForTesting + static class TransactionContextImpl extends AbstractReadContext implements TransactionContext { + @GuardedBy("lock") + private List mutations = new ArrayList<>(); + + @GuardedBy("lock") + private boolean aborted; + + /** Default to -1 to indicate not available. */ + @GuardedBy("lock") + private long retryDelayInMillis = -1L; + + private ByteString transactionId; + private Timestamp commitTimestamp; + + TransactionContextImpl( + SessionImpl session, + @Nullable ByteString transactionId, + SpannerRpc rpc, + int defaultPrefetchChunks) { + super(session, rpc, defaultPrefetchChunks); + this.transactionId = transactionId; + } + + void ensureTxn() { + if (transactionId == null) { + span.addAnnotation("Creating Transaction"); + try { + transactionId = session.beginTransaction(); + span.addAnnotation( + "Transaction Creation Done", + ImmutableMap.of( + "Id", AttributeValue.stringAttributeValue(transactionId.toStringUtf8()))); + txnLogger.log( + Level.FINER, + "Started transaction {0}", + txnLogger.isLoggable(Level.FINER) ? transactionId.asReadOnlyByteBuffer() : null); + } catch (SpannerException e) { + span.addAnnotation("Transaction Creation Failed", TraceUtil.getExceptionAnnotations(e)); + throw e; + } + } else { + span.addAnnotation( + "Transaction Initialized", + ImmutableMap.of( + "Id", AttributeValue.stringAttributeValue(transactionId.toStringUtf8()))); + txnLogger.log( + Level.FINER, + "Using prepared transaction {0}", + txnLogger.isLoggable(Level.FINER) ? transactionId.asReadOnlyByteBuffer() : null); + } + } + + void commit() { + span.addAnnotation("Starting Commit"); + CommitRequest.Builder builder = + CommitRequest.newBuilder().setSession(session.getName()).setTransactionId(transactionId); + synchronized (lock) { + if (!mutations.isEmpty()) { + List mutationsProto = new ArrayList<>(); + Mutation.toProto(mutations, mutationsProto); + builder.addAllMutations(mutationsProto); + } + // Ensure that no call to buffer mutations that would be lost can succeed. + mutations = null; + } + final CommitRequest commitRequest = builder.build(); + Span opSpan = tracer.spanBuilderWithExplicitParent(SpannerImpl.COMMIT, span).startSpan(); + try (Scope s = tracer.withSpan(opSpan)) { + CommitResponse commitResponse = + SpannerImpl.runWithRetries( + new Callable() { + @Override + public CommitResponse call() throws Exception { + return rpc.commit(commitRequest, session.getOptions()); + } + }); + + if (!commitResponse.hasCommitTimestamp()) { + throw newSpannerException( + ErrorCode.INTERNAL, "Missing commitTimestamp:\n" + session.getName()); + } + commitTimestamp = Timestamp.fromProto(commitResponse.getCommitTimestamp()); + opSpan.end(); + } catch (RuntimeException e) { + span.addAnnotation("Commit Failed", TraceUtil.getExceptionAnnotations(e)); + TraceUtil.endSpanWithFailure(opSpan, e); + throw e; + } + span.addAnnotation("Commit Done"); + } + + Timestamp commitTimestamp() { + checkState(commitTimestamp != null, "run() has not yet returned normally"); + return commitTimestamp; + } + + boolean isAborted() { + synchronized (lock) { + return aborted; + } + } + + /** Return the delay in milliseconds between requests to Cloud Spanner. */ + long getRetryDelayInMillis(BackOff backoff) { + long delay = SpannerImpl.nextBackOffMillis(backoff); + synchronized (lock) { + if (retryDelayInMillis >= 0) { + return retryDelayInMillis; + } + } + return delay; + } + + void rollback() { + // We're exiting early due to a user exception, but the transaction is still active. + // Send a rollback for the transaction to release any locks held. + // TODO(user): Make this an async fire-and-forget request. + try { + // Note that we're not retrying this request since we don't particularly care about the + // response. Normally, the next thing that will happen is that we will make a fresh + // transaction attempt, which should implicitly abort this one. + span.addAnnotation("Starting Rollback"); + rpc.rollback( + RollbackRequest.newBuilder() + .setSession(session.getName()) + .setTransactionId(transactionId) + .build(), + session.getOptions()); + span.addAnnotation("Rollback Done"); + } catch (SpannerException e) { + txnLogger.log(Level.FINE, "Exception during rollback", e); + span.addAnnotation("Rollback Failed", TraceUtil.getExceptionAnnotations(e)); + } + } + + @Nullable + @Override + TransactionSelector getTransactionSelector() { + return TransactionSelector.newBuilder().setId(transactionId).build(); + } + + @Override + public void onError(SpannerException e) { + if (e.getErrorCode() == ErrorCode.ABORTED) { + long delay = -1L; + if (e instanceof AbortedException) { + delay = ((AbortedException) e).getRetryDelayInMillis(); + } + if (delay == -1L) { + txnLogger.log(Level.FINE, "Retry duration is missing from the exception.", e); + } + + synchronized (lock) { + retryDelayInMillis = delay; + aborted = true; + } + } + } + + @Override + public void buffer(Mutation mutation) { + synchronized (lock) { + checkNotNull(mutations, "Context is closed"); + mutations.add(checkNotNull(mutation)); + } + } + + @Override + public void buffer(Iterable mutations) { + synchronized (lock) { + checkNotNull(this.mutations, "Context is closed"); + for (Mutation mutation : mutations) { + this.mutations.add(checkNotNull(mutation)); + } + } + } + + @Override + public long executeUpdate(Statement statement) { + beforeReadOrQuery(); + final ExecuteSqlRequest.Builder builder = + getExecuteSqlRequestBuilder(statement, QueryMode.NORMAL); + com.google.spanner.v1.ResultSet resultSet = + SpannerImpl.runWithRetries( + new Callable() { + @Override + public com.google.spanner.v1.ResultSet call() throws Exception { + return rpc.executeQuery(builder.build(), session.getOptions()); + } + }); + if (!resultSet.hasStats()) { + throw new IllegalArgumentException( + "DML response missing stats possibly due to non-DML statement as input"); + } + // For standard DML, using the exact row count. + return resultSet.getStats().getRowCountExact(); + } + + @Override + public long[] batchUpdate(Iterable statements) { + beforeReadOrQuery(); + final ExecuteBatchDmlRequest.Builder builder = getExecuteBatchDmlRequestBuilder(statements); + com.google.spanner.v1.ExecuteBatchDmlResponse response = + SpannerImpl.runWithRetries( + new Callable() { + @Override + public com.google.spanner.v1.ExecuteBatchDmlResponse call() throws Exception { + return rpc.executeBatchDml(builder.build(), session.getOptions()); + } + }); + long[] results = new long[response.getResultSetsCount()]; + for (int i = 0; i < response.getResultSetsCount(); ++i) { + results[i] = response.getResultSets(i).getStats().getRowCountExact(); + } + + if (response.getStatus().getCode() != 0) { + throw newSpannerBatchUpdateException( + ErrorCode.fromRpcStatus(response.getStatus()), + response.getStatus().getMessage(), + results); + } + return results; + } + } + + private boolean blockNestedTxn = true; + + /** Allow for testing of backoff logic */ + static class Sleeper { + void backoffSleep(Context context, long backoffMillis) { + SpannerImpl.backoffSleep(context, backoffMillis); + } + } + + private final SessionImpl session; + private final TransactionRunnerImpl.Sleeper sleeper; + private final Span span; + private TransactionContextImpl txn; + private volatile boolean isValid = true; + + @Override + public TransactionRunner allowNestedTransaction() { + blockNestedTxn = false; + return this; + } + + TransactionRunnerImpl( + SessionImpl session, + SpannerRpc rpc, + TransactionRunnerImpl.Sleeper sleeper, + int defaultPrefetchChunks) { + this.session = session; + this.sleeper = sleeper; + this.span = Tracing.getTracer().getCurrentSpan(); + this.txn = session.newTransaction(); + } + + TransactionRunnerImpl(SessionImpl session, SpannerRpc rpc, int defaultPrefetchChunks) { + this(session, rpc, new Sleeper(), defaultPrefetchChunks); + } + + @Nullable + @Override + public T run(TransactionCallable callable) { + try (Scope s = tracer.withSpan(span)) { + if (blockNestedTxn) { + SessionImpl.hasPendingTransaction.set(Boolean.TRUE); + } + + return runInternal(callable); + } catch (RuntimeException e) { + TraceUtil.endSpanWithFailure(span, e); + throw e; + } finally { + // Remove threadLocal rather than set to FALSE to avoid a possible memory leak. + // We also do this unconditionally in case a user has modified the flag when the transaction + // was running. + SessionImpl.hasPendingTransaction.remove(); + } + } + + private T runInternal(TransactionCallable callable) { + BackOff backoff = SpannerImpl.newBackOff(); + final Context context = Context.current(); + int attempt = 0; + // TODO: Change this to use TransactionManager. + while (true) { + checkState( + isValid, "TransactionRunner has been invalidated by a new operation on the session"); + SpannerImpl.checkContext(context); + attempt++; + // TODO(user): When using streaming reads, consider using the first read to begin + // the txn. + span.addAnnotation( + "Starting Transaction Attempt", + ImmutableMap.of("Attempt", AttributeValue.longAttributeValue(attempt))); + txn.ensureTxn(); + + T result; + boolean shouldRollback = true; + try { + result = callable.run(txn); + shouldRollback = false; + } catch (Exception e) { + txnLogger.log(Level.FINE, "User-provided TransactionCallable raised exception", e); + if (txn.isAborted() || (e instanceof AbortedException)) { + span.addAnnotation( + "Transaction Attempt Aborted in user operation. Retrying", + ImmutableMap.of("Attempt", AttributeValue.longAttributeValue(attempt))); + shouldRollback = false; + backoff(context, backoff); + continue; + } + SpannerException toThrow; + if (e instanceof SpannerException) { + toThrow = (SpannerException) e; + } else { + toThrow = newSpannerException(ErrorCode.UNKNOWN, e.getMessage(), e); + } + span.addAnnotation( + "Transaction Attempt Failed in user operation", + ImmutableMap.builder() + .putAll(TraceUtil.getExceptionAnnotations(toThrow)) + .put("Attempt", AttributeValue.longAttributeValue(attempt)) + .build()); + throw toThrow; + } finally { + if (shouldRollback) { + txn.rollback(); + } + } + + try { + txn.commit(); + span.addAnnotation( + "Transaction Attempt Succeeded", + ImmutableMap.of("Attempt", AttributeValue.longAttributeValue(attempt))); + return result; + } catch (AbortedException e) { + txnLogger.log(Level.FINE, "Commit aborted", e); + span.addAnnotation( + "Transaction Attempt Aborted in Commit. Retrying", + ImmutableMap.of("Attempt", AttributeValue.longAttributeValue(attempt))); + backoff(context, backoff); + } catch (SpannerException e) { + span.addAnnotation( + "Transaction Attempt Failed in Commit", + ImmutableMap.builder() + .putAll(TraceUtil.getExceptionAnnotations(e)) + .put("Attempt", AttributeValue.longAttributeValue(attempt)) + .build()); + throw e; + } + } + } + + @Override + public Timestamp getCommitTimestamp() { + return txn.commitTimestamp(); + } + + @Override + public void invalidate() { + isValid = false; + } + + private void backoff(Context context, BackOff backoff) { + long delay = txn.getRetryDelayInMillis(backoff); + txn = session.newTransaction(); + span.addAnnotation( + "Backing off", ImmutableMap.of("Delay", AttributeValue.longAttributeValue(delay))); + sleeper.backoffSleep(context, delay); + } +} diff --git a/google-cloud-clients/google-cloud-spanner/src/test/java/com/google/cloud/spanner/BatchClientImplTest.java b/google-cloud-clients/google-cloud-spanner/src/test/java/com/google/cloud/spanner/BatchClientImplTest.java index 395e11cbea7b..e691d1839b77 100644 --- a/google-cloud-clients/google-cloud-spanner/src/test/java/com/google/cloud/spanner/BatchClientImplTest.java +++ b/google-cloud-clients/google-cloud-spanner/src/test/java/com/google/cloud/spanner/BatchClientImplTest.java @@ -59,15 +59,15 @@ public final class BatchClientImplTest { public void setUp() { initMocks(this); DatabaseId db = DatabaseId.of(DB_NAME); - SpannerImpl spanner = new SpannerImpl(gapicRpc, 1, spannerOptions); + when(spannerOptions.getPrefetchChunks()).thenReturn(1); + SpannerImpl spanner = new SpannerImpl(gapicRpc, spannerOptions); client = new BatchClientImpl(db, spanner); } @Test public void testBatchReadOnlyTxnWithBound() throws Exception { Session sessionProto = Session.newBuilder().setName(SESSION_NAME).build(); - when(gapicRpc.createSession( - eq(DB_NAME), (Map) anyMap(), optionsCaptor.capture())) + when(gapicRpc.createSession(eq(DB_NAME), anyMap(), optionsCaptor.capture())) .thenReturn(sessionProto); com.google.protobuf.Timestamp timestamp = Timestamps.parse(TIMESTAMP); Transaction txnMetadata = diff --git a/google-cloud-clients/google-cloud-spanner/src/test/java/com/google/cloud/spanner/DatabaseAdminClientImplTest.java b/google-cloud-clients/google-cloud-spanner/src/test/java/com/google/cloud/spanner/DatabaseAdminClientImplTest.java index 8baa98d7ac32..7b5ae42a3d26 100644 --- a/google-cloud-clients/google-cloud-spanner/src/test/java/com/google/cloud/spanner/DatabaseAdminClientImplTest.java +++ b/google-cloud-clients/google-cloud-spanner/src/test/java/com/google/cloud/spanner/DatabaseAdminClientImplTest.java @@ -40,7 +40,7 @@ import org.junit.runners.JUnit4; import org.mockito.Mock; -/** Unit tests for {@link com.google.cloud.spanner.SpannerImpl.DatabaseAdminClientImpl}. */ +/** Unit tests for {@link com.google.cloud.spanner.DatabaseAdminClientImpl}. */ @RunWith(JUnit4.class) public class DatabaseAdminClientImplTest { private static final String PROJECT_ID = "my-project"; @@ -52,12 +52,12 @@ public class DatabaseAdminClientImplTest { "projects/my-project/instances/my-instance/databases/my-db2"; @Mock SpannerRpc rpc; - SpannerImpl.DatabaseAdminClientImpl client; + DatabaseAdminClientImpl client; @Before public void setUp() { initMocks(this); - client = new SpannerImpl.DatabaseAdminClientImpl(PROJECT_ID, rpc); + client = new DatabaseAdminClientImpl(PROJECT_ID, rpc); } private Database getDatabaseProto() { diff --git a/google-cloud-clients/google-cloud-spanner/src/test/java/com/google/cloud/spanner/GrpcResultSetTest.java b/google-cloud-clients/google-cloud-spanner/src/test/java/com/google/cloud/spanner/GrpcResultSetTest.java index 6b88e72e44c1..95698ab5762b 100644 --- a/google-cloud-clients/google-cloud-spanner/src/test/java/com/google/cloud/spanner/GrpcResultSetTest.java +++ b/google-cloud-clients/google-cloud-spanner/src/test/java/com/google/cloud/spanner/GrpcResultSetTest.java @@ -23,6 +23,8 @@ import com.google.cloud.ByteArray; import com.google.cloud.Date; import com.google.cloud.Timestamp; +import com.google.cloud.spanner.AbstractResultSet.GrpcResultSet; +import com.google.cloud.spanner.AbstractResultSet.GrpcStreamIterator; import com.google.cloud.spanner.spi.v1.SpannerRpc; import com.google.common.base.Function; import com.google.common.collect.ImmutableList; @@ -51,11 +53,11 @@ public class GrpcResultSetTest { @Rule public ExpectedException expectedException = ExpectedException.none(); - private SpannerImpl.GrpcResultSet resultSet; + private GrpcResultSet resultSet; private SpannerRpc.ResultStreamConsumer consumer; - private SpannerImpl.GrpcStreamIterator stream; + private GrpcStreamIterator stream; - private static class NoOpListener implements SpannerImpl.AbstractResultSet.Listener { + private static class NoOpListener implements AbstractResultSet.Listener { @Override public void onTransactionMetadata(Transaction transaction) throws SpannerException {} @@ -68,7 +70,7 @@ public void onDone() {} @Before public void setUp() { - stream = new SpannerImpl.GrpcStreamIterator(10); + stream = new GrpcStreamIterator(10); stream.setCall( new SpannerRpc.StreamingCall() { @Override @@ -78,11 +80,11 @@ public void cancel(@Nullable String message) {} public void request(int numMessages) {} }); consumer = stream.consumer(); - resultSet = new SpannerImpl.GrpcResultSet(stream, new NoOpListener(), QueryMode.NORMAL); + resultSet = new GrpcResultSet(stream, new NoOpListener()); } - public SpannerImpl.GrpcResultSet resultSetWithMode(QueryMode queryMode) { - return new SpannerImpl.GrpcResultSet(stream, new NoOpListener(), queryMode); + public GrpcResultSet resultSetWithMode(QueryMode queryMode) { + return new GrpcResultSet(stream, new NoOpListener()); } @Test @@ -631,7 +633,7 @@ public com.google.protobuf.Value apply(@Nullable Value input) { private void verifySerialization( Function protoFn, Value... values) { - resultSet = new SpannerImpl.GrpcResultSet(stream, new NoOpListener(), QueryMode.NORMAL); + resultSet = new GrpcResultSet(stream, new NoOpListener()); PartialResultSet.Builder builder = PartialResultSet.newBuilder(); List types = new ArrayList<>(); for (Value value : values) { diff --git a/google-cloud-clients/google-cloud-spanner/src/test/java/com/google/cloud/spanner/InstanceAdminClientImplTest.java b/google-cloud-clients/google-cloud-spanner/src/test/java/com/google/cloud/spanner/InstanceAdminClientImplTest.java index 7fb451f508c5..1a2fba154e21 100644 --- a/google-cloud-clients/google-cloud-spanner/src/test/java/com/google/cloud/spanner/InstanceAdminClientImplTest.java +++ b/google-cloud-clients/google-cloud-spanner/src/test/java/com/google/cloud/spanner/InstanceAdminClientImplTest.java @@ -37,7 +37,7 @@ import org.junit.runners.JUnit4; import org.mockito.Mock; -/** Unit tests for {@link com.google.cloud.spanner.SpannerImpl.InstanceAdminClientImpl}. */ +/** Unit tests for {@link com.google.cloud.spanner.InstanceAdminClientImpl}. */ @RunWith(JUnit4.class) public class InstanceAdminClientImplTest { private static final String PROJECT_ID = "my-project"; @@ -50,12 +50,12 @@ public class InstanceAdminClientImplTest { @Mock SpannerRpc rpc; @Mock DatabaseAdminClient dbClient; - SpannerImpl.InstanceAdminClientImpl client; + InstanceAdminClientImpl client; @Before public void setUp() { initMocks(this); - client = new SpannerImpl.InstanceAdminClientImpl(PROJECT_ID, rpc, dbClient); + client = new InstanceAdminClientImpl(PROJECT_ID, rpc, dbClient); } @Test diff --git a/google-cloud-clients/google-cloud-spanner/src/test/java/com/google/cloud/spanner/ReadFormatTestRunner.java b/google-cloud-clients/google-cloud-spanner/src/test/java/com/google/cloud/spanner/ReadFormatTestRunner.java index ca52fbc298b8..c6e0840d36ea 100644 --- a/google-cloud-clients/google-cloud-spanner/src/test/java/com/google/cloud/spanner/ReadFormatTestRunner.java +++ b/google-cloud-clients/google-cloud-spanner/src/test/java/com/google/cloud/spanner/ReadFormatTestRunner.java @@ -19,10 +19,11 @@ import static com.google.common.truth.Truth.assertThat; import com.google.cloud.ByteArray; +import com.google.cloud.spanner.AbstractResultSet.GrpcResultSet; +import com.google.cloud.spanner.AbstractResultSet.GrpcStreamIterator; import com.google.cloud.spanner.spi.v1.SpannerRpc; import com.google.common.io.Resources; import com.google.protobuf.util.JsonFormat; -import com.google.spanner.v1.ExecuteSqlRequest.QueryMode; import com.google.spanner.v1.PartialResultSet; import com.google.spanner.v1.Transaction; import java.nio.charset.StandardCharsets; @@ -42,7 +43,7 @@ /** Test runner that runs tests specified in json file */ public class ReadFormatTestRunner extends ParentRunner { - private static class NoOpListener implements SpannerImpl.AbstractResultSet.Listener { + private static class NoOpListener implements AbstractResultSet.Listener { @Override public void onTransactionMetadata(Transaction transaction) throws SpannerException {} @@ -99,9 +100,9 @@ protected List getChildren() { } private class TestCaseRunner { - private SpannerImpl.GrpcResultSet resultSet; + private GrpcResultSet resultSet; private SpannerRpc.ResultStreamConsumer consumer; - private SpannerImpl.GrpcStreamIterator stream; + private GrpcStreamIterator stream; private JSONObject testCase; TestCaseRunner(JSONObject testCase) { @@ -109,7 +110,7 @@ private class TestCaseRunner { } private void run() throws Exception { - stream = new SpannerImpl.GrpcStreamIterator(10); + stream = new GrpcStreamIterator(10); stream.setCall( new SpannerRpc.StreamingCall() { @Override @@ -119,7 +120,7 @@ public void cancel(@Nullable String message) {} public void request(int numMessages) {} }); consumer = stream.consumer(); - resultSet = new SpannerImpl.GrpcResultSet(stream, new NoOpListener(), QueryMode.NORMAL); + resultSet = new GrpcResultSet(stream, new NoOpListener()); JSONArray chunks = testCase.getJSONArray("chunks"); JSONObject expectedResult = testCase.getJSONObject("result"); @@ -132,8 +133,7 @@ public void request(int numMessages) {} assertResultSet(resultSet, expectedResult.getJSONArray("value")); } - private void assertResultSet(SpannerImpl.GrpcResultSet actual, JSONArray expected) - throws Exception { + private void assertResultSet(GrpcResultSet actual, JSONArray expected) throws Exception { int i = 0; while (actual.next()) { Struct actualRow = actual.getCurrentRowAsStruct(); diff --git a/google-cloud-clients/google-cloud-spanner/src/test/java/com/google/cloud/spanner/ResumableStreamIteratorTest.java b/google-cloud-clients/google-cloud-spanner/src/test/java/com/google/cloud/spanner/ResumableStreamIteratorTest.java index 112ed1103cf9..3621f529f0d8 100644 --- a/google-cloud-clients/google-cloud-spanner/src/test/java/com/google/cloud/spanner/ResumableStreamIteratorTest.java +++ b/google-cloud-clients/google-cloud-spanner/src/test/java/com/google/cloud/spanner/ResumableStreamIteratorTest.java @@ -19,6 +19,8 @@ import static com.google.cloud.spanner.SpannerMatchers.isSpannerException; import static com.google.common.truth.Truth.assertThat; +import com.google.cloud.spanner.AbstractResultSet.CloseableIterator; +import com.google.cloud.spanner.AbstractResultSet.ResumableStreamIterator; import com.google.common.collect.AbstractIterator; import com.google.common.collect.Lists; import com.google.protobuf.ByteString; @@ -41,7 +43,7 @@ @RunWith(JUnit4.class) public class ResumableStreamIteratorTest { interface Starter { - SpannerImpl.CloseableIterator startStream(@Nullable ByteString resumeToken); + CloseableIterator startStream(@Nullable ByteString resumeToken); } interface ResultSetStream { @@ -64,7 +66,7 @@ static class NonRetryableException extends SpannerException { } static class ResultSetIterator extends AbstractIterator - implements SpannerImpl.CloseableIterator { + implements CloseableIterator { final ResultSetStream stream; ResultSetIterator(ResultSetStream stream) { @@ -89,7 +91,7 @@ public void close(@Nullable String message) { @Rule public ExpectedException expectedException = ExpectedException.none(); Starter starter = Mockito.mock(Starter.class); - SpannerImpl.ResumableStreamIterator iterator; + ResumableStreamIterator iterator; @Before public void setUp() { @@ -98,10 +100,9 @@ public void setUp() { private void initWithLimit(int maxBufferSize) { iterator = - new SpannerImpl.ResumableStreamIterator(maxBufferSize, "", null) { + new ResumableStreamIterator(maxBufferSize, "", null) { @Override - SpannerImpl.CloseableIterator startStream( - @Nullable ByteString resumeToken) { + CloseableIterator startStream(@Nullable ByteString resumeToken) { return starter.startStream(resumeToken); } }; diff --git a/google-cloud-clients/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SessionImplTest.java b/google-cloud-clients/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SessionImplTest.java index 169d2177228b..1a34e3c0efee 100644 --- a/google-cloud-clients/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SessionImplTest.java +++ b/google-cloud-clients/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SessionImplTest.java @@ -55,7 +55,7 @@ import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; -/** Unit tests for {@link com.google.cloud.spanner.SpannerImpl.SessionImpl}. */ +/** Unit tests for {@link com.google.cloud.spanner.SessionImpl}. */ @RunWith(JUnit4.class) public class SessionImplTest { @Rule public ExpectedException expectedException = ExpectedException.none(); @@ -69,7 +69,8 @@ public class SessionImplTest { @Before public void setUp() { MockitoAnnotations.initMocks(this); - SpannerImpl spanner = new SpannerImpl(rpc, 1, spannerOptions); + Mockito.when(spannerOptions.getPrefetchChunks()).thenReturn(1); + SpannerImpl spanner = new SpannerImpl(rpc, spannerOptions); String dbName = "projects/p1/instances/i1/databases/d1"; String sessionName = dbName + "/sessions/s1"; DatabaseId db = DatabaseId.of(dbName); diff --git a/google-cloud-clients/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SpannerImplTest.java b/google-cloud-clients/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SpannerImplTest.java index 93c20dfb19b3..58da14429569 100644 --- a/google-cloud-clients/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SpannerImplTest.java +++ b/google-cloud-clients/google-cloud-spanner/src/test/java/com/google/cloud/spanner/SpannerImplTest.java @@ -20,6 +20,7 @@ import static org.hamcrest.CoreMatchers.is; import static org.junit.Assert.assertThat; import static org.junit.Assert.fail; +import static org.mockito.Mockito.when; import com.google.cloud.grpc.GrpcTransportOptions; import com.google.cloud.spanner.spi.v1.SpannerRpc; @@ -49,7 +50,8 @@ public class SpannerImplTest { @Before public void setUp() { MockitoAnnotations.initMocks(this); - impl = new SpannerImpl(rpc, 1, spannerOptions); + when(spannerOptions.getPrefetchChunks()).thenReturn(1); + impl = new SpannerImpl(rpc, spannerOptions); } @Test @@ -99,7 +101,7 @@ public void getDbclientAgainGivesSame() { @Test public void getDbclientAfterCloseThrows() { - SpannerImpl imp = new SpannerImpl(rpc, 1, spannerOptions); + SpannerImpl imp = new SpannerImpl(rpc, spannerOptions); Map labels = new HashMap<>(); labels.put("env", "dev"); Mockito.when(spannerOptions.getSessionLabels()).thenReturn(labels); diff --git a/google-cloud-clients/google-cloud-spanner/src/test/java/com/google/cloud/spanner/TransactionManagerImplTest.java b/google-cloud-clients/google-cloud-spanner/src/test/java/com/google/cloud/spanner/TransactionManagerImplTest.java index ba7dee751664..910d0dd91421 100644 --- a/google-cloud-clients/google-cloud-spanner/src/test/java/com/google/cloud/spanner/TransactionManagerImplTest.java +++ b/google-cloud-clients/google-cloud-spanner/src/test/java/com/google/cloud/spanner/TransactionManagerImplTest.java @@ -24,8 +24,8 @@ import static org.mockito.MockitoAnnotations.initMocks; import com.google.cloud.Timestamp; -import com.google.cloud.spanner.SpannerImpl.SessionImpl; import com.google.cloud.spanner.TransactionManager.TransactionState; +import com.google.cloud.spanner.TransactionRunnerImpl.TransactionContextImpl; import org.junit.Before; import org.junit.Rule; import org.junit.Test; @@ -41,7 +41,7 @@ public class TransactionManagerImplTest { @Rule public ExpectedException exception = ExpectedException.none(); @Mock private SessionImpl session; - @Mock SpannerImpl.TransactionContextImpl txn; + @Mock TransactionContextImpl txn; private TransactionManagerImpl manager; @Before @@ -117,7 +117,7 @@ public void resetAfterAbortSucceeds() { } catch (AbortedException e) { assertThat(manager.getState()).isEqualTo(TransactionState.ABORTED); } - txn = Mockito.mock(SpannerImpl.TransactionContextImpl.class); + txn = Mockito.mock(TransactionContextImpl.class); when(session.newTransaction()).thenReturn(txn); assertThat(manager.resetForRetry()).isEqualTo(txn); assertThat(manager.getState()).isEqualTo(TransactionState.STARTED); diff --git a/google-cloud-clients/google-cloud-spanner/src/test/java/com/google/cloud/spanner/TransactionRunnerImplTest.java b/google-cloud-clients/google-cloud-spanner/src/test/java/com/google/cloud/spanner/TransactionRunnerImplTest.java index d3cf17e6109e..89cf206c2c9b 100644 --- a/google-cloud-clients/google-cloud-spanner/src/test/java/com/google/cloud/spanner/TransactionRunnerImplTest.java +++ b/google-cloud-clients/google-cloud-spanner/src/test/java/com/google/cloud/spanner/TransactionRunnerImplTest.java @@ -18,15 +18,16 @@ import static com.google.common.truth.Truth.assertThat; import static org.junit.Assert.fail; -import static org.mockito.Mockito.any; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.eq; import static org.mockito.Mockito.doThrow; -import static org.mockito.Mockito.eq; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; import com.google.api.client.util.BackOff; import com.google.cloud.spanner.TransactionRunner.TransactionCallable; +import com.google.cloud.spanner.TransactionRunnerImpl.TransactionContextImpl; import com.google.cloud.spanner.spi.v1.SpannerRpc; import io.grpc.Context; import io.grpc.Status; @@ -43,10 +44,10 @@ @RunWith(JUnit4.class) public class TransactionRunnerImplTest { @Mock private SpannerRpc rpc; - @Mock private SpannerImpl.SessionImpl session; - @Mock private SpannerImpl.TransactionRunnerImpl.Sleeper sleeper; - @Mock private SpannerImpl.TransactionContextImpl txn; - private SpannerImpl.TransactionRunnerImpl transactionRunner; + @Mock private SessionImpl session; + @Mock private TransactionRunnerImpl.Sleeper sleeper; + @Mock private TransactionContextImpl txn; + private TransactionRunnerImpl transactionRunner; private boolean firstRun; @Before @@ -54,7 +55,7 @@ public void setUp() throws Exception { MockitoAnnotations.initMocks(this); firstRun = true; when(session.newTransaction()).thenReturn(txn); - transactionRunner = new SpannerImpl.TransactionRunnerImpl(session, rpc, sleeper, 1); + transactionRunner = new TransactionRunnerImpl(session, rpc, sleeper, 1); } @Test