Java 类com.amazonaws.services.s3.model.UploadPartRequest 实例源码

项目:s3-channels    文件:S3AppendableObjectChannel.java   
protected void startWorker(UploadPartRequest req, int retries) {
    int id = req.getPartNumber();

    CompletableFuture<Void> f = CompletableFuture
            .supplyAsync(() -> s3.uploadPart(req), executor)
            .handle((res, error) -> {
                workers.remove(id);
                if (res != null) {
                    done.add(res);
                }
                if (error != null && isOpen()) {
                    if (retries < failedPartUploadRetries) {
                        startWorker(req, retries + 1);
                    } else {
                        this.error = new IllegalStateException("Could not upload part " + id + " after "
                                + retries + " retries. Aborting upload", error.getCause());
                        cancel();
                    }
                }
                return null;
            });

    workers.put(id, f);
}
项目:s3-channels    文件:S3AppendableObjectChannelTest.java   
@Test
void testFailedUploadPart() throws Exception {
    final AmazonS3 mocked = mock(AmazonS3.class);
    s3channel = (S3AppendableObjectChannel) defaultBuilder("id")
            .failedPartUploadRetries(3)
            .amazonS3(mocked)
            .build();
    when(mocked.uploadPart(any())).thenThrow(new TestException());

    s3channel.skip(MIN_PART_SIZE).write(ByteBuffer.allocate(123));
    while (s3channel.getCancellation() == null) {
        Thread.sleep(25);
    }
    s3channel.getCancellation().get();
    assertTrue(!s3channel.getCancellation().isCompletedExceptionally());
    assertFalse(s3channel.isOpen());

    //coverage
    s3channel.startWorker(new UploadPartRequest().withPartNumber(1), 0);

    assertThrows(IllegalStateException.class, () -> s3channel.write(ByteBuffer.allocate(1)));

    verify(mocked, times(1)).abortMultipartUpload(any());
}
项目:hadoop    文件:S3AFastOutputStream.java   
public void uploadPartAsync(ByteArrayInputStream inputStream,
    int partSize) {
  final int currentPartNumber = partETagsFutures.size() + 1;
  final UploadPartRequest request =
      new UploadPartRequest().withBucketName(bucket).withKey(key)
          .withUploadId(uploadId).withInputStream(inputStream)
          .withPartNumber(currentPartNumber).withPartSize(partSize);
  request.setGeneralProgressListener(progressListener);
  ListenableFuture<PartETag> partETagFuture =
      executorService.submit(new Callable<PartETag>() {
        @Override
        public PartETag call() throws Exception {
          if (LOG.isDebugEnabled()) {
            LOG.debug("Uploading part {} for id '{}'", currentPartNumber,
                uploadId);
          }
          return client.uploadPart(request).getPartETag();
        }
      });
  partETagsFutures.add(partETagFuture);
}
项目:ibm-cos-sdk-java    文件:UploadCallable.java   
/**
 * Submits a callable for each part to upload to our thread pool and records its corresponding Future.
 */
private void uploadPartsInParallel(UploadPartRequestFactory requestFactory,
        String uploadId) {

    Map<Integer,PartSummary> partNumbers = identifyExistingPartsForResume(uploadId);

    while (requestFactory.hasMoreRequests()) {
        if (threadPool.isShutdown()) throw new CancellationException("TransferManager has been shutdown");
        UploadPartRequest request = requestFactory.getNextUploadPartRequest();
        if (partNumbers.containsKey(request.getPartNumber())) {
            PartSummary summary = partNumbers.get(request.getPartNumber());
            eTagsToSkip.add(new PartETag(request.getPartNumber(), summary
                    .getETag()));
            transferProgress.updateProgress(summary.getSize());
            continue;
        }
        futures.add(threadPool.submit(new UploadPartCallable(s3, request)));
    }
}
项目:ibm-cos-sdk-java    文件:S3CryptoModuleEO.java   
@Override
final long computeLastPartSize(UploadPartRequest request) {
    long plaintextLength;
    if (request.getFile() != null) {
        if (request.getPartSize() > 0)
            plaintextLength = request.getPartSize();
        else
            plaintextLength = request.getFile().length();
    } else if (request.getInputStream() != null) {
        plaintextLength = request.getPartSize();
    } else {
        return -1;
    }
    long cipherBlockSize = contentCryptoScheme.getBlockSizeInBytes();
    long offset = cipherBlockSize - (plaintextLength % cipherBlockSize);
    return plaintextLength + offset;
}
项目:aliyun-oss-hadoop-fs    文件:S3AFastOutputStream.java   
public void uploadPartAsync(ByteArrayInputStream inputStream,
    int partSize) {
  final int currentPartNumber = partETagsFutures.size() + 1;
  final UploadPartRequest request =
      new UploadPartRequest().withBucketName(bucket).withKey(key)
          .withUploadId(uploadId).withInputStream(inputStream)
          .withPartNumber(currentPartNumber).withPartSize(partSize);
  request.setGeneralProgressListener(progressListener);
  ListenableFuture<PartETag> partETagFuture =
      executorService.submit(new Callable<PartETag>() {
        @Override
        public PartETag call() throws Exception {
          if (LOG.isDebugEnabled()) {
            LOG.debug("Uploading part {} for id '{}'", currentPartNumber,
                uploadId);
          }
          return client.uploadPart(request).getPartETag();
        }
      });
  partETagsFutures.add(partETagFuture);
}
项目:big-c    文件:S3AFastOutputStream.java   
public void uploadPartAsync(ByteArrayInputStream inputStream,
    int partSize) {
  final int currentPartNumber = partETagsFutures.size() + 1;
  final UploadPartRequest request =
      new UploadPartRequest().withBucketName(bucket).withKey(key)
          .withUploadId(uploadId).withInputStream(inputStream)
          .withPartNumber(currentPartNumber).withPartSize(partSize);
  request.setGeneralProgressListener(progressListener);
  ListenableFuture<PartETag> partETagFuture =
      executorService.submit(new Callable<PartETag>() {
        @Override
        public PartETag call() throws Exception {
          if (LOG.isDebugEnabled()) {
            LOG.debug("Uploading part {} for id '{}'", currentPartNumber,
                uploadId);
          }
          return client.uploadPart(request).getPartETag();
        }
      });
  partETagsFutures.add(partETagFuture);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:S3AFastOutputStream.java   
public void uploadPartAsync(ByteArrayInputStream inputStream,
    int partSize) {
  final int currentPartNumber = partETagsFutures.size() + 1;
  final UploadPartRequest request =
      new UploadPartRequest().withBucketName(bucket).withKey(key)
          .withUploadId(uploadId).withInputStream(inputStream)
          .withPartNumber(currentPartNumber).withPartSize(partSize);
  request.setGeneralProgressListener(progressListener);
  ListenableFuture<PartETag> partETagFuture =
      executorService.submit(new Callable<PartETag>() {
        @Override
        public PartETag call() throws Exception {
          if (LOG.isDebugEnabled()) {
            LOG.debug("Uploading part {} for id '{}'", currentPartNumber,
                uploadId);
          }
          return client.uploadPart(request).getPartETag();
        }
      });
  partETagsFutures.add(partETagFuture);
}
项目:spring-cloud-aws    文件:SimpleStorageResource.java   
@Override
public UploadPartResult call() throws Exception {
    try {
        return this.amazonS3.uploadPart(new UploadPartRequest().withBucketName(this.bucketName).
                withKey(this.key).
                withUploadId(this.uploadId).
                withInputStream(new ByteArrayInputStream(this.content)).
                withPartNumber(this.partNumber).
                withLastPart(this.last).
                withPartSize(this.contentLength));
    } finally {
        //Release the memory, as the callable may still live inside the CompletionService which would cause
        // an exhaustive memory usage
        this.content = null;
    }
}
项目:elasticsearch_my    文件:DefaultS3OutputStream.java   
protected PartETag doUploadMultipart(S3BlobStore blobStore, String bucketName, String blobName, String uploadId, InputStream is,
        int length, boolean lastPart) throws AmazonS3Exception {
    UploadPartRequest request = new UploadPartRequest()
    .withBucketName(bucketName)
    .withKey(blobName)
    .withUploadId(uploadId)
    .withPartNumber(multipartChunks)
    .withInputStream(is)
    .withPartSize(length)
    .withLastPart(lastPart);

    UploadPartResult response = blobStore.client().uploadPart(request);
    return response.getPartETag();

}
项目:connectors    文件:AmazonS3ClientMock.java   
@Override
public UploadPartResult uploadPart(UploadPartRequest uploadPartRequest) throws AmazonClientException, AmazonServiceException {
    throw new UnsupportedOperationException();
}
项目:s3-channels    文件:S3AppendableObjectChannel.java   
protected UploadPartRequest createRequest(int id, ByteBuffer buffer) {
    buffer.rewind();
    return new UploadPartRequest()
            .withBucketName(bucket)
            .withKey(key)
            .withUploadId(uploadId)
            .withPartNumber(id)
            .withPartSize(buffer.limit())
            .withInputStream(new ByteArrayInputStream(buffer.array(), 0, buffer.limit()));
}
项目:ibm-cos-sdk-java    文件:UploadPartRequestFactory.java   
public synchronized UploadPartRequest getNextUploadPartRequest() {
    long partSize = Math.min(optimalPartSize, remainingBytes);
    boolean isLastPart = (remainingBytes - partSize <= 0);

    UploadPartRequest req = null;
    if (wrappedStream != null) {
        req = new UploadPartRequest()
            .withBucketName(bucketName)
            .withKey(key)
            .withUploadId(uploadId)
            .withInputStream(new InputSubstream(wrappedStream, 0, partSize, isLastPart))
            .withPartNumber(partNumber++)
            .withPartSize(partSize);
    } else {
        req = new UploadPartRequest()
            .withBucketName(bucketName)
            .withKey(key)
            .withUploadId(uploadId)
            .withFile(file)
            .withFileOffset(offset)
            .withPartNumber(partNumber++)
            .withPartSize(partSize);
    }
    req.withRequesterPays(origReq.isRequesterPays());
    TransferManager.appendMultipartUserAgent(req);

    if (sseCustomerKey != null) req.setSSECustomerKey(sseCustomerKey);

    offset += partSize;
    remainingBytes -= partSize;

    req.setLastPart(isLastPart);

    req.withGeneralProgressListener(origReq.getGeneralProgressListener())
       .withRequestMetricCollector(origReq.getRequestMetricCollector())
       ;
    req.getRequestClientOptions().setReadLimit(origReq.getReadLimit());
    return req;
}
项目:ibm-cos-sdk-java    文件:UploadCallable.java   
/**
 * Uploads all parts in the request in serial in this thread, then completes
 * the upload and returns the result.
 */
private UploadResult uploadPartsInSeries(UploadPartRequestFactory requestFactory) {

    final List<PartETag> partETags = new ArrayList<PartETag>();

    while (requestFactory.hasMoreRequests()) {
        if (threadPool.isShutdown()) throw new CancellationException("TransferManager has been shutdown");
        UploadPartRequest uploadPartRequest = requestFactory.getNextUploadPartRequest();
        // Mark the stream in case we need to reset it
        InputStream inputStream = uploadPartRequest.getInputStream();
        if (inputStream != null && inputStream.markSupported()) {
            if (uploadPartRequest.getPartSize() >= Integer.MAX_VALUE) {
                inputStream.mark(Integer.MAX_VALUE);
            } else {
                inputStream.mark((int)uploadPartRequest.getPartSize());
            }
        }
        partETags.add(s3.uploadPart(uploadPartRequest).getPartETag());
    }

    CompleteMultipartUploadRequest req =
        new CompleteMultipartUploadRequest(
            origReq.getBucketName(), origReq.getKey(), multipartUploadId,
                partETags)
                .withRequesterPays(origReq.isRequesterPays())
        .withGeneralProgressListener(origReq.getGeneralProgressListener())
        .withRequestMetricCollector(origReq.getRequestMetricCollector())
        ;
    CompleteMultipartUploadResult res = s3.completeMultipartUpload(req);

    UploadResult uploadResult = new UploadResult();
    uploadResult.setBucketName(res.getBucketName());
    uploadResult.setKey(res.getKey());
    uploadResult.setETag(res.getETag());
    uploadResult.setVersionId(res.getVersionId());
    return uploadResult;
}
项目:ibm-cos-sdk-java    文件:AWSS3V4Signer.java   
/**
 * Determine whether to use aws-chunked for signing
 */
private boolean useChunkEncoding(SignableRequest<?> request) {
    // If chunked encoding is explicitly disabled through client options return right here.
    // Chunked encoding only makes sense to do when the payload is signed
    if (!isPayloadSigningEnabled(request) || isChunkedEncodingDisabled(request)) {
        return false;
    }
    if (request.getOriginalRequestObject() instanceof PutObjectRequest
            || request.getOriginalRequestObject() instanceof UploadPartRequest) {
        return true;
    }
    return false;
}
项目:ibm-cos-sdk-java    文件:S3CryptoModuleBase.java   
protected final CipherLiteInputStream newMultipartS3CipherInputStream(
        UploadPartRequest req, CipherLite cipherLite) {
    final File fileOrig = req.getFile();
    final InputStream isOrig = req.getInputStream();
    InputStream isCurr = null;
    try {
        if (fileOrig == null) {
            if (isOrig == null) {
                throw new IllegalArgumentException(
                    "A File or InputStream must be specified when uploading part");
            }
            isCurr = isOrig;
        } else {
            isCurr = new ResettableInputStream(fileOrig);
        }
        isCurr = new InputSubstream(isCurr,
                req.getFileOffset(),
                req.getPartSize(),
                req.isLastPart());
        return cipherLite.markSupported()
             ? new CipherLiteInputStream(isCurr, cipherLite,
                   DEFAULT_BUFFER_SIZE,
                   IS_MULTI_PART, req.isLastPart())
             : new RenewableCipherLiteInputStream(isCurr, cipherLite,
                    DEFAULT_BUFFER_SIZE,
                    IS_MULTI_PART, req.isLastPart());
    } catch (Exception e) {
        cleanupDataSource(req, fileOrig, isOrig, isCurr, log);
        throw failure(e,"Unable to create cipher input stream");
    }
}
项目:ibm-cos-sdk-java    文件:UploadObjectObserver.java   
/**
 * Notified from {@link MultiFileOutputStream#fos()} when a part ready for
 * upload has been successfully created on disk. By default, this method
 * performs the following:
 * <ol>
 * <li>calls {@link #newUploadPartRequest(PartCreationEvent, File)} to
 * create an upload-part request for the newly created ciphertext file</li>
 * <li>call {@link #appendUserAgent(AmazonWebServiceRequest, String)} to
 * append the necessary user agent string to the request</li>
 * <li>and finally submit a concurrent task, which calls the method
 * {@link #uploadPart(UploadPartRequest)}, to be performed</li>
 * </ol>
 * <p>
 * To enable parallel uploads, implementation of this method should never
 * block.
 * 
 * @param event
 *            to represent the completion of a ciphertext file creation
 *            which is ready for multipart upload to S3.
 */
public void onPartCreate(PartCreationEvent event) {
    final File part = event.getPart();
    final UploadPartRequest reqUploadPart =
        newUploadPartRequest(event, part);
    final OnFileDelete fileDeleteObserver = event.getFileDeleteObserver();
    appendUserAgent(reqUploadPart, AmazonS3EncryptionClient.USER_AGENT);
    futures.add(es.submit(new Callable<UploadPartResult>() {
        @Override public UploadPartResult call() {
            // Upload the ciphertext directly via the non-encrypting
            // s3 client
            try {
                return uploadPart(reqUploadPart);
            } finally {
                // clean up part already uploaded 
                if (!part.delete()) {
                    LogFactory.getLog(getClass()).debug(
                            "Ignoring failure to delete file " + part
                                    + " which has already been uploaded");
                } else {
                    if (fileDeleteObserver != null)
                        fileDeleteObserver.onFileDelete(null);
                }
            }
        }
    }));
}
项目:ibm-cos-sdk-java    文件:UploadObjectObserver.java   
/**
 * Creates and returns an upload-part request corresponding to a ciphertext
 * file upon a part-creation event.
 * 
 * @param event
 *            the part-creation event of the ciphertxt file.
 * @param part
 *            the created ciphertext file corresponding to the upload-part
 */
protected UploadPartRequest newUploadPartRequest(PartCreationEvent event,
        final File part) {
    final UploadPartRequest reqUploadPart = new UploadPartRequest()
        .withBucketName(req.getBucketName())
        .withFile(part)
        .withKey(req.getKey())
        .withPartNumber(event.getPartNumber())
        .withPartSize(part.length())
        .withLastPart(event.isLastPart())
        .withUploadId(uploadId)
        .withObjectMetadata(req.getUploadPartMetadata())
        ;
    return reqUploadPart;
}
项目:nifi-minifi    文件:S3OutputStream.java   
public void uploadPart(ByteArrayInputStream inputStream, int partSize) {
  int currentPartNumber = partETags.size() + 1;
  UploadPartRequest request = new UploadPartRequest()
                                        .withBucketName(bucket)
                                        .withKey(key)
                                        .withUploadId(uploadId)
                                        .withInputStream(inputStream)
                                        .withPartNumber(currentPartNumber)
                                        .withPartSize(partSize)
                                        .withGeneralProgressListener(progressListener);
  log.debug("Uploading part {} for id '{}'", currentPartNumber, uploadId);
  partETags.add(s3.uploadPart(request).getPartETag());
}
项目:stocator    文件:COSAPIClient.java   
/**
 * Upload part of a multi-partition file.
 * <i>Important: this call does not close any input stream in the request.</i>
 * @param request request
 * @return the result of the operation
 * @throws AmazonClientException on problems
 */
public UploadPartResult uploadPart(UploadPartRequest request)
    throws AmazonClientException {
  try {
    UploadPartResult uploadPartResult = mClient.uploadPart(request);
    return uploadPartResult;
  } catch (AmazonClientException e) {
    throw e;
  }
}
项目:stocator    文件:COSAPIClient.java   
/**
 * Create and initialize a part request of a multipart upload.
 * Exactly one of: {@code uploadStream} or {@code sourceFile}
 * must be specified.
 * @param uploadId ID of ongoing upload
 * @param partNumber current part number of the upload
 * @param size amount of data
 * @param uploadStream source of data to upload
 * @param sourceFile optional source file
 * @return the request
 */
UploadPartRequest newUploadPartRequest(String uploadId,
    int partNumber, int size, InputStream uploadStream, File sourceFile) {
  Preconditions.checkNotNull(uploadId);
  // exactly one source must be set; xor verifies this
  Preconditions.checkArgument((uploadStream != null) ^ (sourceFile != null),
      "Data source");
  Preconditions.checkArgument(size > 0, "Invalid partition size %s", size);
  Preconditions.checkArgument(partNumber > 0 && partNumber <= 10000,
      "partNumber must be between 1 and 10000 inclusive, but is %s",
      partNumber);

  LOG.debug("Creating part upload request for {} #{} size {}",
      uploadId, partNumber, size);
  UploadPartRequest request = new UploadPartRequest()
      .withBucketName(mBucket)
      .withKey(key)
      .withUploadId(uploadId)
      .withPartNumber(partNumber)
      .withPartSize(size);
  if (uploadStream != null) {
    // there's an upload stream. Bind to it.
    request.setInputStream(uploadStream);
  } else {
    request.setFile(sourceFile);
  }
  return request;
}
项目:stocator    文件:COSBlockOutputStream.java   
/**
 * Upload a block of data. This will take the block
 *
 * @param block block to upload
 * @throws IOException upload failure
 */
private void uploadBlockAsync(final COSDataBlocks.DataBlock block) throws IOException {
  LOG.debug("Queueing upload of {}", block);
  final int size = block.dataSize();
  final COSDataBlocks.BlockUploadData uploadData = block.startUpload();
  final int currentPartNumber = partETagsFutures.size() + 1;
  final UploadPartRequest request = writeOperationHelper.newUploadPartRequest(uploadId,
      currentPartNumber, size,
      uploadData.getUploadStream(), uploadData.getFile());

  ListenableFuture<PartETag> partETagFuture = executorService.submit(new Callable<PartETag>() {
    @Override
    public PartETag call() throws Exception {
      // this is the queued upload operation
      LOG.debug("Uploading part {} for id '{}'", currentPartNumber, uploadId);
      // do the upload
      PartETag partETag;
      try {
        partETag = fs.uploadPart(request).getPartETag();
        LOG.debug("Completed upload of {} to part {}", block, partETag.getETag());
      } finally {
        // close the stream and block
        closeAll(LOG, uploadData, block);
      }
      return partETag;
    }
  });
  partETagsFutures.add(partETagFuture);
}
项目:bender    文件:S3TransporterTest.java   
private AmazonS3Client getMockClient() {
  AmazonS3Client mockClient = spy(AmazonS3Client.class);
  UploadPartResult uploadResult = new UploadPartResult();
  uploadResult.setETag("foo");
  doReturn(uploadResult).when(mockClient).uploadPart(any(UploadPartRequest.class));

  InitiateMultipartUploadResult initUploadResult = new InitiateMultipartUploadResult();
  initUploadResult.setUploadId("123");
  doReturn(initUploadResult).when(mockClient)
      .initiateMultipartUpload(any(InitiateMultipartUploadRequest.class));

  return mockClient;
}
项目:bender    文件:S3TransporterTest.java   
@Test
public void testGzFilename() throws TransportException, IllegalStateException, IOException {
  /*
   * Create mock client, requests, and replies
   */
  AmazonS3Client mockClient = getMockClient();

  /*
   * Fill buffer with mock data
   */
  S3TransportBuffer buffer = new S3TransportBuffer(1000, true, new S3TransportSerializer());
  InternalEvent mockIevent = mock(InternalEvent.class);
  doReturn("foo").when(mockIevent).getSerialized();

  /*
   * Create transport
   */
  Map<String, MultiPartUpload> multiPartUploads = new HashMap<String, MultiPartUpload>(0);
  S3Transport transport =
      new S3Transport(mockClient, "bucket", "basepath/", true, multiPartUploads);

  /*
   * Do actual test
   */
  buffer.add(mockIevent);
  LinkedHashMap<String, String> partitions = new LinkedHashMap<String, String>();
  partitions.put(S3Transport.FILENAME_KEY, "a_filename.gz");
  ArgumentCaptor<UploadPartRequest> argument = ArgumentCaptor.forClass(UploadPartRequest.class);
  transport.sendBatch(buffer, partitions, new TestContext());
  verify(mockClient).uploadPart(argument.capture());

  /*
   * Check results
   */
  assertEquals("basepath/a_filename.bz2", argument.getValue().getKey());
}
项目:bender    文件:S3TransporterTest.java   
@Test
public void testContextBasedFilename()
    throws TransportException, IllegalStateException, IOException {
  /*
   * Create mock client, requests, and replies
   */
  AmazonS3Client mockClient = getMockClient();

  /*
   * Fill buffer with mock data
   */
  S3TransportBuffer buffer = new S3TransportBuffer(1000, true, new S3TransportSerializer());
  InternalEvent mockIevent = mock(InternalEvent.class);
  doReturn("foo").when(mockIevent).getSerialized();

  /*
   * Create transport
   */
  Map<String, MultiPartUpload> multiPartUploads = new HashMap<String, MultiPartUpload>(0);
  S3Transport transport =
      new S3Transport(mockClient, "bucket", "basepath/", true, multiPartUploads);

  /*
   * Do actual test
   */
  buffer.add(mockIevent);
  LinkedHashMap<String, String> partitions = new LinkedHashMap<String, String>();
  ArgumentCaptor<UploadPartRequest> argument = ArgumentCaptor.forClass(UploadPartRequest.class);
  TestContext context = new TestContext();
  context.setAwsRequestId("request_id");
  transport.sendBatch(buffer, partitions, context);
  verify(mockClient).uploadPart(argument.capture());

  /*
   * Check results
   */
  assertEquals("basepath/request_id.bz2", argument.getValue().getKey());
}
项目:cloudkeeper    文件:S3ConnectionImpl.java   
@Override
public CompletableFuture<UploadPartResult> uploadPart(String bucketName, String key, String uploadId,
        int partNumber, InputStream inputStream, int length) {
    UploadPartRequest request = new UploadPartRequest()
        .withBucketName(bucketName)
        .withKey(key)
        .withUploadId(uploadId)
        .withPartNumber(partNumber)
        .withInputStream(inputStream)
        .withPartSize(length);
    return CompletableFuture.supplyAsync(() -> s3Client.uploadPart(request), executorService);
}
项目:kafka-connect-storage-cloud    文件:S3OutputStream.java   
public void uploadPart(ByteArrayInputStream inputStream, int partSize) {
  int currentPartNumber = partETags.size() + 1;
  UploadPartRequest request = new UploadPartRequest()
                                        .withBucketName(bucket)
                                        .withKey(key)
                                        .withUploadId(uploadId)
                                        .withInputStream(inputStream)
                                        .withPartNumber(currentPartNumber)
                                        .withPartSize(partSize)
                                        .withGeneralProgressListener(progressListener);
  log.debug("Uploading part {} for id '{}'", currentPartNumber, uploadId);
  partETags.add(s3.uploadPart(request).getPartETag());
}
项目:kafka-connect-storage-cloud    文件:S3SinkTaskTest.java   
@Test
public void testWriteRecordsSpanningMultiplePartsWithRetry() throws Exception {
  localProps.put(S3SinkConnectorConfig.FLUSH_SIZE_CONFIG, "10000");
  localProps.put(S3SinkConnectorConfig.S3_PART_RETRIES_CONFIG, "3");
  setUp();

  List<SinkRecord> sinkRecords = createRecords(11000);
  int totalBytes = calcByteSize(sinkRecords);
  final int parts = totalBytes / connectorConfig.getPartSize();

  // From time to time fail S3 upload part method
  final AtomicInteger count = new AtomicInteger();
  PowerMockito.doAnswer(new Answer<UploadPartResult>() {
    @Override
    public UploadPartResult answer(InvocationOnMock invocationOnMock) throws Throwable {
      if(count.getAndIncrement() % parts == 0){
        throw new SdkClientException("Boom!");
      } else {
        return (UploadPartResult)invocationOnMock.callRealMethod();
      }
    }
  }).when(s3).uploadPart(Mockito.isA(UploadPartRequest.class));


  replayAll();

  task = new S3SinkTask();
  task.initialize(context);
  task.start(properties);
  verifyAll();

  task.put(sinkRecords);
  task.close(context.assignment());
  task.stop();

  long[] validOffsets = {0, 10000};
  verify(sinkRecords, validOffsets);
}
项目:aws-codepipeline-plugin-for-jenkins    文件:PublisherCallableTest.java   
@Test
public void uploadsArtifactToS3() throws IOException {
    // when
    publisher.invoke(workspace, null);

    // then
    final InOrder inOrder = inOrder(clientFactory, awsClients, s3Client);
    inOrder.verify(clientFactory).getAwsClient(ACCESS_KEY, SECRET_KEY, PROXY_HOST, PROXY_PORT, REGION, PLUGIN_VERSION);
    inOrder.verify(awsClients).getCodePipelineClient();
    inOrder.verify(awsClients).getS3Client(credentialsProviderCaptor.capture());
    inOrder.verify(s3Client).initiateMultipartUpload(initiateMultipartUploadRequestCaptor.capture());
    inOrder.verify(s3Client).uploadPart(uploadPartRequestCaptor.capture());

    final com.amazonaws.auth.AWSSessionCredentials credentials
        = (com.amazonaws.auth.AWSSessionCredentials) credentialsProviderCaptor.getValue().getCredentials();
    assertEquals(JOB_ACCESS_KEY, credentials.getAWSAccessKeyId());
    assertEquals(JOB_SECRET_KEY, credentials.getAWSSecretKey());
    assertEquals(JOB_SESSION_TOKEN, credentials.getSessionToken());

    verify(codePipelineClient).getJobDetails(getJobDetailsRequestCaptor.capture());
    assertEquals(JOB_ID, getJobDetailsRequestCaptor.getValue().getJobId());

    final InitiateMultipartUploadRequest initRequest = initiateMultipartUploadRequestCaptor.getValue();
    assertEquals(S3_BUCKET_NAME, initRequest.getBucketName());
    assertEquals(S3_OBJECT_KEY, initRequest.getKey());

    final UploadPartRequest uploadRequest = uploadPartRequestCaptor.getValue();
    assertEquals(S3_BUCKET_NAME, uploadRequest.getBucketName());
    assertEquals(S3_OBJECT_KEY, uploadRequest.getKey());
    assertEquals(UPLOAD_ID, uploadRequest.getUploadId());

    assertContainsIgnoreCase("[AWS CodePipeline Plugin] Uploading artifact:", outContent.toString());
    assertContainsIgnoreCase("[AWS CodePipeline Plugin] Upload successful", outContent.toString());
}
项目:aws-codepipeline-plugin-for-jenkins    文件:PublisherToolsTest.java   
@Before
public void setUp() {
    MockitoAnnotations.initMocks(this);

    when(mockS3Client.initiateMultipartUpload(any(InitiateMultipartUploadRequest.class)))
            .thenReturn(mockUploadResult);
    when(mockS3Client.uploadPart(any(UploadPartRequest.class))).thenReturn(mockPartRequest);
    when(mockUploadResult.getUploadId()).thenReturn("123");
    when(mockArtifact.getLocation()).thenReturn(mockLocation);
    when(mockLocation.getS3Location()).thenReturn(s3ArtifactLocation);
    when(s3ArtifactLocation.getBucketName()).thenReturn("Bucket");
    when(s3ArtifactLocation.getObjectKey()).thenReturn("Key");

    outContent = TestUtils.setOutputStream();
}
项目:aws-codepipeline-plugin-for-jenkins    文件:PublisherToolsTest.java   
@Test
public void uploadFileSuccess() throws IOException {
    TestUtils.initializeTestingFolders();

    final File compressedFile = CompressionTools.compressFile(
            "ZipProject",
            PATH_TO_COMPRESS,
            CompressionType.Zip,
            null);

    PublisherTools.uploadFile(
            compressedFile,
            mockArtifact,
            CompressionType.Zip,
            null, // No custom encryption key
            mockS3Client,
            null); // Listener

    final InOrder inOrder = inOrder(mockS3Client);
    inOrder.verify(mockS3Client, times(1)).initiateMultipartUpload(initiateCaptor.capture());
    // Total size is less than 5MB, should only be one upload
    inOrder.verify(mockS3Client, times(1)).uploadPart(any(UploadPartRequest.class));
    inOrder.verify(mockS3Client, times(1)).completeMultipartUpload(any(CompleteMultipartUploadRequest.class));

    assertContainsIgnoreCase("[AWS CodePipeline Plugin] Uploading artifact:", outContent.toString());
    assertContainsIgnoreCase("[AWS CodePipeline Plugin] Upload successful", outContent.toString());

    final InitiateMultipartUploadRequest request = initiateCaptor.getValue();
    final SSEAwsKeyManagementParams encryptionParams = request.getSSEAwsKeyManagementParams();
    assertNotNull(encryptionParams);
    assertNull(encryptionParams.getAwsKmsKeyId());
    assertEquals("aws:kms", encryptionParams.getEncryption());

    compressedFile.delete();
    TestUtils.cleanUpTestingFolders();
}
项目:vs.msc.ws14    文件:S3DataOutputStream.java   
private void uploadPartAndFlushBuffer() throws IOException {

        boolean operationSuccessful = false;

        if (this.uploadId == null) {
            this.uploadId = initiateMultipartUpload();
        }

        try {

            if (this.partNumber >= MAX_PART_NUMBER) {
                throw new IOException("Cannot upload any more data: maximum part number reached");
            }

            final InputStream inputStream = new InternalUploadInputStream(this.buf, this.bytesWritten);
            final UploadPartRequest request = new UploadPartRequest();
            request.setBucketName(this.bucket);
            request.setKey(this.object);
            request.setInputStream(inputStream);
            request.setUploadId(this.uploadId);
            request.setPartSize(this.bytesWritten);
            request.setPartNumber(this.partNumber++);

            final UploadPartResult result = this.s3Client.uploadPart(request);
            this.partETags.add(result.getPartETag());

            this.bytesWritten = 0;
            operationSuccessful = true;

        } catch (AmazonServiceException e) {
            throw new IOException(StringUtils.stringifyException(e));
        } finally {
            if (!operationSuccessful) {
                abortUpload();
            }
        }
    }
项目:Singularity    文件:SingularityS3Uploader.java   
private void multipartUpload(String key, File file, ObjectMetadata objectMetadata, Optional<StorageClass> maybeStorageClass) throws Exception {
  List<PartETag> partETags = new ArrayList<>();
  InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(bucketName, key, objectMetadata);
  if (maybeStorageClass.isPresent()) {
    initRequest.setStorageClass(maybeStorageClass.get());
  }
  InitiateMultipartUploadResult initResponse = s3Client.initiateMultipartUpload(initRequest);

  long contentLength = file.length();
  long partSize = configuration.getUploadPartSize();

  try {
    long filePosition = 0;
    for (int i = 1; filePosition < contentLength; i++) {
      partSize = Math.min(partSize, (contentLength - filePosition));
      UploadPartRequest uploadRequest = new UploadPartRequest()
          .withBucketName(bucketName)
          .withKey(key)
          .withUploadId(initResponse.getUploadId())
          .withPartNumber(i)
          .withFileOffset(filePosition)
          .withFile(file)
          .withPartSize(partSize);
      partETags.add(s3Client.uploadPart(uploadRequest).getPartETag());
      filePosition += partSize;
    }

    CompleteMultipartUploadRequest completeRequest = new CompleteMultipartUploadRequest(bucketName, key, initResponse.getUploadId(), partETags);
    s3Client.completeMultipartUpload(completeRequest);
  } catch (Exception e) {
    s3Client.abortMultipartUpload(new AbortMultipartUploadRequest(bucketName, key, initResponse.getUploadId()));
    Throwables.propagate(e);
  }
}
项目:s3distcp    文件:MultipartUploadOutputStream.java   
public PartETag call()
/*     */       throws Exception
/*     */     {
/* 218 */       InputStream is = new ProgressableResettableBufferedFileInputStream(this.partFile, MultipartUploadOutputStream.this.progressable);
/*     */ 
/* 220 */       UploadPartRequest request = new UploadPartRequest().withBucketName(MultipartUploadOutputStream.this.bucketName).withKey(MultipartUploadOutputStream.this.key).withUploadId(MultipartUploadOutputStream.this.uploadId).withInputStream(is).withPartNumber(this.partNumber).withPartSize(this.partFile.length()).withMD5Digest(this.md5sum);
/*     */ 
/* 230 */       //MetricsSaver.StopWatch stopWatch = new MetricsSaver.StopWatch();
/*     */       UploadPartResult result;
/*     */       try
/*     */       {
/* 232 */         String message = String.format("S3 uploadPart bucket:%s key:%s part:%d size:%d", new Object[] { MultipartUploadOutputStream.this.bucketName, MultipartUploadOutputStream.this.key, Integer.valueOf(this.partNumber), Long.valueOf(this.partFile.length()) });
/*     */ 
/* 235 */         MultipartUploadOutputStream.LOG.info(message);
/* 236 */         result = MultipartUploadOutputStream.this.s3.uploadPart(request);
/* 237 */         //MetricsSaver.addValue("S3WriteDelay", stopWatch.elapsedTime());
/* 238 */         //MetricsSaver.addValue("S3WriteBytes", this.partFile.length());
/*     */       } catch (Exception e) {
/* 240 */         //MetricsSaver.addValueWithError("S3WriteDelay", stopWatch.elapsedTime(), e);
/* 241 */         throw e;
/*     */       } finally {
/*     */         try {
/* 244 */           if (is != null)
/* 245 */             is.close();
/*     */         }
/*     */         finally {
/* 248 */           this.partFile.delete();
/*     */         }
/*     */       }
/*     */ 
/* 252 */       return result.getPartETag();
/*     */     }
项目:elasticsearch_my    文件:AmazonS3Wrapper.java   
@Override
public UploadPartResult uploadPart(UploadPartRequest request) throws AmazonClientException, AmazonServiceException {
    return delegate.uploadPart(request);
}
项目:syndesis    文件:AmazonS3ClientMock.java   
@Override
public UploadPartResult uploadPart(UploadPartRequest uploadPartRequest) throws AmazonClientException, AmazonServiceException {
    throw new UnsupportedOperationException();
}
项目:syndesis    文件:AmazonS3ClientMock.java   
@Override
public UploadPartResult uploadPart(UploadPartRequest uploadPartRequest) throws AmazonClientException, AmazonServiceException {
    throw new UnsupportedOperationException();
}
项目:syndesis    文件:AmazonS3ClientMock.java   
@Override
public UploadPartResult uploadPart(UploadPartRequest uploadPartRequest) throws AmazonClientException, AmazonServiceException {
    throw new UnsupportedOperationException();
}
项目:connectors    文件:AmazonS3ClientMock.java   
@Override
public UploadPartResult uploadPart(UploadPartRequest uploadPartRequest) throws AmazonClientException, AmazonServiceException {
    throw new UnsupportedOperationException();
}
项目:connectors    文件:AmazonS3ClientMock.java   
@Override
public UploadPartResult uploadPart(UploadPartRequest uploadPartRequest) throws AmazonClientException, AmazonServiceException {
    throw new UnsupportedOperationException();
}