|
18 | 18 |
|
19 | 19 | import com.google.api.core.ApiFuture; |
20 | 20 | import com.google.api.core.SettableApiFuture; |
| 21 | +import com.google.cloud.BaseServiceException; |
21 | 22 | import com.google.cloud.storage.BlobDescriptor.ZeroCopySupport.DisposableByteString; |
22 | 23 | import com.google.cloud.storage.ResponseContentLifecycleHandle.ChildRef; |
23 | 24 | import com.google.cloud.storage.RetryContext.OnFailure; |
24 | 25 | import com.google.cloud.storage.RetryContext.OnSuccess; |
| 26 | +import com.google.cloud.storage.UnbufferedReadableByteChannelSession.UnbufferedReadableByteChannel; |
| 27 | +import com.google.common.base.Preconditions; |
25 | 28 | import com.google.protobuf.ByteString; |
26 | 29 | import com.google.storage.v2.ReadRange; |
27 | 30 | import java.io.Closeable; |
28 | 31 | import java.io.IOException; |
| 32 | +import java.io.InterruptedIOException; |
| 33 | +import java.nio.ByteBuffer; |
| 34 | +import java.nio.channels.ClosedChannelException; |
| 35 | +import java.nio.channels.ReadableByteChannel; |
29 | 36 | import java.util.ArrayList; |
30 | 37 | import java.util.Collections; |
31 | 38 | import java.util.List; |
| 39 | +import java.util.concurrent.ArrayBlockingQueue; |
| 40 | +import java.util.concurrent.BlockingQueue; |
32 | 41 | import java.util.concurrent.atomic.AtomicLong; |
| 42 | +import org.checkerframework.checker.nullness.qual.Nullable; |
33 | 43 |
|
34 | 44 | abstract class BlobDescriptorStreamRead implements AutoCloseable, Closeable { |
35 | 45 |
|
@@ -116,6 +126,10 @@ static ZeroCopyByteStringAccumulatingRead createZeroCopyByteStringAccumulatingRe |
116 | 126 | return new ZeroCopyByteStringAccumulatingRead(readId, rangeSpec, retryContext, complete); |
117 | 127 | } |
118 | 128 |
|
| 129 | + static StreamingRead streamingRead(long readId, RangeSpec rangeSpec, RetryContext retryContext) { |
| 130 | + return new StreamingRead(readId, rangeSpec, retryContext, false); |
| 131 | + } |
| 132 | + |
119 | 133 | /** Base class of a read that will accumulate before completing by resolving a future */ |
120 | 134 | abstract static class AccumulatingRead<Result> extends BlobDescriptorStreamRead { |
121 | 135 | protected final List<ChildRef> childRefs; |
@@ -182,20 +196,227 @@ public void close() throws IOException { |
182 | 196 |
|
183 | 197 | /** |
184 | 198 | * Base class of a read that will be processed in a streaming manner (e.g. {@link |
185 | | - * java.nio.channels.ReadableByteChannel}) |
| 199 | + * ReadableByteChannel}) |
186 | 200 | */ |
187 | | - abstract static class StreamingRead extends BlobDescriptorStreamRead { |
188 | | - private StreamingRead(long readId, RangeSpec range, RetryContext retryContext) { |
189 | | - super(readId, range, retryContext); |
| 201 | + static class StreamingRead extends BlobDescriptorStreamRead |
| 202 | + implements UnbufferedReadableByteChannel { |
| 203 | + private final SettableApiFuture<Void> failFuture; |
| 204 | + private final BlockingQueue<Closeable> queue; |
| 205 | + |
| 206 | + private boolean complete; |
| 207 | + @Nullable private ChildRefHelper leftovers; |
| 208 | + |
| 209 | + private StreamingRead( |
| 210 | + long readId, RangeSpec rangeSpec, RetryContext retryContext, boolean closed) { |
| 211 | + this( |
| 212 | + readId, |
| 213 | + rangeSpec, |
| 214 | + new AtomicLong(rangeSpec.begin()), |
| 215 | + retryContext, |
| 216 | + closed, |
| 217 | + SettableApiFuture.create(), |
| 218 | + new ArrayBlockingQueue<>(2), |
| 219 | + false, |
| 220 | + null); |
190 | 221 | } |
191 | 222 |
|
192 | 223 | private StreamingRead( |
193 | | - long readId, |
| 224 | + long newReadId, |
194 | 225 | RangeSpec rangeSpec, |
195 | 226 | AtomicLong readOffset, |
196 | 227 | RetryContext retryContext, |
197 | | - boolean closed) { |
198 | | - super(readId, rangeSpec, readOffset, retryContext, closed); |
| 228 | + boolean closed, |
| 229 | + SettableApiFuture<Void> failFuture, |
| 230 | + BlockingQueue<Closeable> queue, |
| 231 | + boolean complete, |
| 232 | + @Nullable ChildRefHelper leftovers) { |
| 233 | + super(newReadId, rangeSpec, readOffset, retryContext, closed); |
| 234 | + this.failFuture = failFuture; |
| 235 | + this.queue = queue; |
| 236 | + this.complete = complete; |
| 237 | + this.leftovers = leftovers; |
| 238 | + } |
| 239 | + |
| 240 | + @Override |
| 241 | + boolean acceptingBytes() { |
| 242 | + return !closed && !tombstoned; |
| 243 | + } |
| 244 | + |
| 245 | + @Override |
| 246 | + void accept(ChildRef childRef) throws IOException { |
| 247 | + retryContext.reset(); |
| 248 | + int size = childRef.byteString().size(); |
| 249 | + offer(childRef); |
| 250 | + readOffset.addAndGet(size); |
| 251 | + } |
| 252 | + |
| 253 | + @Override |
| 254 | + void eof() throws IOException { |
| 255 | + retryContext.reset(); |
| 256 | + offer(EofMarker.INSTANCE); |
| 257 | + } |
| 258 | + |
| 259 | + @Override |
| 260 | + ApiFuture<?> fail(Throwable t) { |
| 261 | + try { |
| 262 | + offer(new SmuggledFailure(t)); |
| 263 | + failFuture.set(null); |
| 264 | + } catch (InterruptedIOException e) { |
| 265 | + Thread.currentThread().interrupt(); |
| 266 | + failFuture.setException(e); |
| 267 | + } |
| 268 | + return failFuture; |
| 269 | + } |
| 270 | + |
| 271 | + @Override |
| 272 | + StreamingRead withNewReadId(long newReadId) { |
| 273 | + tombstoned = true; |
| 274 | + return new StreamingRead( |
| 275 | + newReadId, |
| 276 | + rangeSpec, |
| 277 | + readOffset, |
| 278 | + retryContext, |
| 279 | + closed, |
| 280 | + failFuture, |
| 281 | + queue, |
| 282 | + complete, |
| 283 | + leftovers); |
| 284 | + } |
| 285 | + |
| 286 | + @Override |
| 287 | + public void close() throws IOException { |
| 288 | + if (!closed) { |
| 289 | + retryContext.reset(); |
| 290 | + closed = true; |
| 291 | + if (leftovers != null) { |
| 292 | + leftovers.ref.close(); |
| 293 | + } |
| 294 | + GrpcUtils.closeAll(queue); |
| 295 | + } |
| 296 | + } |
| 297 | + |
| 298 | + @Override |
| 299 | + public boolean isOpen() { |
| 300 | + return !closed; |
| 301 | + } |
| 302 | + |
| 303 | + @Override |
| 304 | + public long read(ByteBuffer[] dsts, int offset, int length) throws IOException { |
| 305 | + if (closed) { |
| 306 | + throw new ClosedChannelException(); |
| 307 | + } |
| 308 | + if (complete) { |
| 309 | + close(); |
| 310 | + return -1; |
| 311 | + } |
| 312 | + |
| 313 | + long read = 0; |
| 314 | + long dstsRemaining = Buffers.totalRemaining(dsts, offset, length); |
| 315 | + if (leftovers != null) { |
| 316 | + read += leftovers.copy(dsts, offset, length); |
| 317 | + if (!leftovers.hasRemaining()) { |
| 318 | + leftovers.ref.close(); |
| 319 | + leftovers = null; |
| 320 | + } |
| 321 | + } |
| 322 | + |
| 323 | + java.lang.Object poll; |
| 324 | + while (read < dstsRemaining && (poll = queue.poll()) != null) { |
| 325 | + if (poll instanceof ChildRef) { |
| 326 | + ChildRefHelper ref = new ChildRefHelper((ChildRef) poll); |
| 327 | + read += ref.copy(dsts, offset, length); |
| 328 | + if (ref.hasRemaining()) { |
| 329 | + leftovers = ref; |
| 330 | + break; |
| 331 | + } else { |
| 332 | + ref.ref.close(); |
| 333 | + } |
| 334 | + } else if (poll == EofMarker.INSTANCE) { |
| 335 | + complete = true; |
| 336 | + if (read == 0) { |
| 337 | + close(); |
| 338 | + return -1; |
| 339 | + } |
| 340 | + break; |
| 341 | + } else if (poll instanceof SmuggledFailure) { |
| 342 | + SmuggledFailure throwable = (SmuggledFailure) poll; |
| 343 | + BaseServiceException coalesce = StorageException.coalesce(throwable.getSmuggled()); |
| 344 | + throw new IOException(coalesce); |
| 345 | + } else { |
| 346 | + //noinspection DataFlowIssue |
| 347 | + Preconditions.checkState( |
| 348 | + false, "unhandled queue element type %s", poll.getClass().getName()); |
| 349 | + } |
| 350 | + } |
| 351 | + |
| 352 | + return read; |
| 353 | + } |
| 354 | + |
| 355 | + private void offer(Closeable offer) throws InterruptedIOException { |
| 356 | + try { |
| 357 | + queue.put(offer); |
| 358 | + } catch (InterruptedException e) { |
| 359 | + Thread.currentThread().interrupt(); |
| 360 | + throw new InterruptedIOException(); |
| 361 | + } |
| 362 | + } |
| 363 | + |
| 364 | + /** |
| 365 | + * The queue items are added to is a queue of {@link Closeable}. This class smuggles a Throwable |
| 366 | + * in a no-op Closable, such that the throwable can be in the queue. |
| 367 | + * |
| 368 | + * <p>Refer to {@link #fail(Throwable)} to see where this class is instantiated. |
| 369 | + */ |
| 370 | + static final class SmuggledFailure implements Closeable { |
| 371 | + private final Throwable smuggled; |
| 372 | + |
| 373 | + private SmuggledFailure(Throwable smuggled) { |
| 374 | + this.smuggled = smuggled; |
| 375 | + } |
| 376 | + |
| 377 | + Throwable getSmuggled() { |
| 378 | + return smuggled; |
| 379 | + } |
| 380 | + |
| 381 | + @Override |
| 382 | + public void close() throws IOException {} |
| 383 | + } |
| 384 | + |
| 385 | + static final class ChildRefHelper { |
| 386 | + private final ChildRef ref; |
| 387 | + |
| 388 | + private final List<ByteBuffer> buffers; |
| 389 | + |
| 390 | + private ChildRefHelper(ChildRef ref) { |
| 391 | + this.ref = ref; |
| 392 | + this.buffers = ref.byteString().asReadOnlyByteBufferList(); |
| 393 | + } |
| 394 | + |
| 395 | + long copy(ByteBuffer[] dsts, int offset, int length) { |
| 396 | + long copied = 0; |
| 397 | + for (ByteBuffer b : buffers) { |
| 398 | + long copiedBytes = Buffers.copy(b, dsts, offset, length); |
| 399 | + copied += copiedBytes; |
| 400 | + if (b.hasRemaining()) break; |
| 401 | + } |
| 402 | + return copied; |
| 403 | + } |
| 404 | + |
| 405 | + boolean hasRemaining() { |
| 406 | + for (ByteBuffer b : buffers) { |
| 407 | + if (b.hasRemaining()) return true; |
| 408 | + } |
| 409 | + return false; |
| 410 | + } |
| 411 | + } |
| 412 | + |
| 413 | + private static final class EofMarker implements Closeable { |
| 414 | + private static final EofMarker INSTANCE = new EofMarker(); |
| 415 | + |
| 416 | + private EofMarker() {} |
| 417 | + |
| 418 | + @Override |
| 419 | + public void close() {} |
199 | 420 | } |
200 | 421 | } |
201 | 422 |
|
|
0 commit comments