Java 类com.amazonaws.services.s3.model.UploadPartResult 实例源码

项目:spring-cloud-aws    文件:SimpleStorageResource.java   
@Override
public UploadPartResult call() throws Exception {
    try {
        return this.amazonS3.uploadPart(new UploadPartRequest().withBucketName(this.bucketName).
                withKey(this.key).
                withUploadId(this.uploadId).
                withInputStream(new ByteArrayInputStream(this.content)).
                withPartNumber(this.partNumber).
                withLastPart(this.last).
                withPartSize(this.contentLength));
    } finally {
        //Release the memory, as the callable may still live inside the CompletionService which would cause
        // an exhaustive memory usage
        this.content = null;
    }
}
项目:elasticsearch_my    文件:DefaultS3OutputStream.java   
protected PartETag doUploadMultipart(S3BlobStore blobStore, String bucketName, String blobName, String uploadId, InputStream is,
        int length, boolean lastPart) throws AmazonS3Exception {
    UploadPartRequest request = new UploadPartRequest()
    .withBucketName(bucketName)
    .withKey(blobName)
    .withUploadId(uploadId)
    .withPartNumber(multipartChunks)
    .withInputStream(is)
    .withPartSize(length)
    .withLastPart(lastPart);

    UploadPartResult response = blobStore.client().uploadPart(request);
    return response.getPartETag();

}
项目:connectors    文件:AmazonS3ClientMock.java   
@Override
public UploadPartResult uploadPart(UploadPartRequest uploadPartRequest) throws AmazonClientException, AmazonServiceException {
    throw new UnsupportedOperationException();
}
项目:ibm-cos-sdk-java    文件:AmazonS3Client.java   
/**
 * Used for performance testing purposes only.  Hence package private.
 * This method is subject to removal anytime without notice.
 */
CompleteMultipartUploadResult uploadObject(final UploadObjectRequest req)
        throws IOException, InterruptedException, ExecutionException {
    // Set up the pipeline for concurrent encrypt and upload
    // Set up a thread pool for this pipeline
    ExecutorService es = req.getExecutorService();
    final boolean defaultExecutorService = es == null;
    if (es == null)
        es = Executors.newFixedThreadPool(clientConfiguration.getMaxConnections());
    UploadObjectObserver observer = req.getUploadObjectObserver();
    if (observer == null)
        observer = new UploadObjectObserver();
    // initialize the observer
    observer.init(req, this, this, es);
    // Initiate upload
    observer.onUploadInitiation(req);
    final List<PartETag> partETags = new ArrayList<PartETag>();
    MultiFileOutputStream mfos = req.getMultiFileOutputStream();
    if (mfos == null)
        mfos = new MultiFileOutputStream();
    try {
        // initialize the multi-file output stream
        mfos.init(observer, req.getPartSize(), req.getDiskLimit());
        // Kicks off the encryption-upload pipeline;
        // Note mfos is automatically closed upon method completion.
        putLocalObject(req, mfos);
        // block till all part have been uploaded
        for (Future<UploadPartResult> future: observer.getFutures()) {
            UploadPartResult partResult = future.get();
            partETags.add(new PartETag(partResult.getPartNumber(), partResult.getETag()));
        }
    } finally {
        if (defaultExecutorService)
            es.shutdownNow();   // shut down the locally created thread pool
        mfos.cleanup();       // delete left-over temp files
    }
    // Complete upload
    return observer.onCompletion(partETags);
}
项目:ibm-cos-sdk-java    文件:UploadObjectObserver.java   
/**
 * Notified from {@link MultiFileOutputStream#fos()} when a part ready for
 * upload has been successfully created on disk. By default, this method
 * performs the following:
 * <ol>
 * <li>calls {@link #newUploadPartRequest(PartCreationEvent, File)} to
 * create an upload-part request for the newly created ciphertext file</li>
 * <li>call {@link #appendUserAgent(AmazonWebServiceRequest, String)} to
 * append the necessary user agent string to the request</li>
 * <li>and finally submit a concurrent task, which calls the method
 * {@link #uploadPart(UploadPartRequest)}, to be performed</li>
 * </ol>
 * <p>
 * To enable parallel uploads, implementation of this method should never
 * block.
 * 
 * @param event
 *            to represent the completion of a ciphertext file creation
 *            which is ready for multipart upload to S3.
 */
public void onPartCreate(PartCreationEvent event) {
    final File part = event.getPart();
    final UploadPartRequest reqUploadPart =
        newUploadPartRequest(event, part);
    final OnFileDelete fileDeleteObserver = event.getFileDeleteObserver();
    appendUserAgent(reqUploadPart, AmazonS3EncryptionClient.USER_AGENT);
    futures.add(es.submit(new Callable<UploadPartResult>() {
        @Override public UploadPartResult call() {
            // Upload the ciphertext directly via the non-encrypting
            // s3 client
            try {
                return uploadPart(reqUploadPart);
            } finally {
                // clean up part already uploaded 
                if (!part.delete()) {
                    LogFactory.getLog(getClass()).debug(
                            "Ignoring failure to delete file " + part
                                    + " which has already been uploaded");
                } else {
                    if (fileDeleteObserver != null)
                        fileDeleteObserver.onFileDelete(null);
                }
            }
        }
    }));
}
项目:stocator    文件:COSAPIClient.java   
/**
 * Upload part of a multi-partition file.
 * <i>Important: this call does not close any input stream in the request.</i>
 * @param request request
 * @return the result of the operation
 * @throws AmazonClientException on problems
 */
public UploadPartResult uploadPart(UploadPartRequest request)
    throws AmazonClientException {
  try {
    UploadPartResult uploadPartResult = mClient.uploadPart(request);
    return uploadPartResult;
  } catch (AmazonClientException e) {
    throw e;
  }
}
项目:bender    文件:S3TransporterTest.java   
private AmazonS3Client getMockClient() {
  AmazonS3Client mockClient = spy(AmazonS3Client.class);
  UploadPartResult uploadResult = new UploadPartResult();
  uploadResult.setETag("foo");
  doReturn(uploadResult).when(mockClient).uploadPart(any(UploadPartRequest.class));

  InitiateMultipartUploadResult initUploadResult = new InitiateMultipartUploadResult();
  initUploadResult.setUploadId("123");
  doReturn(initUploadResult).when(mockClient)
      .initiateMultipartUpload(any(InitiateMultipartUploadRequest.class));

  return mockClient;
}
项目:cloudkeeper    文件:S3ConnectionImpl.java   
@Override
public CompletableFuture<UploadPartResult> uploadPart(String bucketName, String key, String uploadId,
        int partNumber, InputStream inputStream, int length) {
    UploadPartRequest request = new UploadPartRequest()
        .withBucketName(bucketName)
        .withKey(key)
        .withUploadId(uploadId)
        .withPartNumber(partNumber)
        .withInputStream(inputStream)
        .withPartSize(length);
    return CompletableFuture.supplyAsync(() -> s3Client.uploadPart(request), executorService);
}
项目:kafka-connect-storage-cloud    文件:S3SinkTaskTest.java   
@Test
public void testWriteRecordsSpanningMultiplePartsWithRetry() throws Exception {
  localProps.put(S3SinkConnectorConfig.FLUSH_SIZE_CONFIG, "10000");
  localProps.put(S3SinkConnectorConfig.S3_PART_RETRIES_CONFIG, "3");
  setUp();

  List<SinkRecord> sinkRecords = createRecords(11000);
  int totalBytes = calcByteSize(sinkRecords);
  final int parts = totalBytes / connectorConfig.getPartSize();

  // From time to time fail S3 upload part method
  final AtomicInteger count = new AtomicInteger();
  PowerMockito.doAnswer(new Answer<UploadPartResult>() {
    @Override
    public UploadPartResult answer(InvocationOnMock invocationOnMock) throws Throwable {
      if(count.getAndIncrement() % parts == 0){
        throw new SdkClientException("Boom!");
      } else {
        return (UploadPartResult)invocationOnMock.callRealMethod();
      }
    }
  }).when(s3).uploadPart(Mockito.isA(UploadPartRequest.class));


  replayAll();

  task = new S3SinkTask();
  task.initialize(context);
  task.start(properties);
  verifyAll();

  task.put(sinkRecords);
  task.close(context.assignment());
  task.stop();

  long[] validOffsets = {0, 10000};
  verify(sinkRecords, validOffsets);
}
项目:spring-cloud-aws    文件:SimpleStorageResource.java   
private List<PartETag> getMultiPartsUploadResults() throws ExecutionException, InterruptedException {
    List<PartETag> result = new ArrayList<>(this.partNumberCounter);
    for (int i = 0; i < this.partNumberCounter; i++) {
        Future<UploadPartResult> uploadPartResultFuture = this.completionService.take();
        result.add(uploadPartResultFuture.get().getPartETag());
    }
    return result;
}
项目:vs.msc.ws14    文件:S3DataOutputStream.java   
private void uploadPartAndFlushBuffer() throws IOException {

        boolean operationSuccessful = false;

        if (this.uploadId == null) {
            this.uploadId = initiateMultipartUpload();
        }

        try {

            if (this.partNumber >= MAX_PART_NUMBER) {
                throw new IOException("Cannot upload any more data: maximum part number reached");
            }

            final InputStream inputStream = new InternalUploadInputStream(this.buf, this.bytesWritten);
            final UploadPartRequest request = new UploadPartRequest();
            request.setBucketName(this.bucket);
            request.setKey(this.object);
            request.setInputStream(inputStream);
            request.setUploadId(this.uploadId);
            request.setPartSize(this.bytesWritten);
            request.setPartNumber(this.partNumber++);

            final UploadPartResult result = this.s3Client.uploadPart(request);
            this.partETags.add(result.getPartETag());

            this.bytesWritten = 0;
            operationSuccessful = true;

        } catch (AmazonServiceException e) {
            throw new IOException(StringUtils.stringifyException(e));
        } finally {
            if (!operationSuccessful) {
                abortUpload();
            }
        }
    }
项目:s3distcp    文件:MultipartUploadOutputStream.java   
public PartETag call()
/*     */       throws Exception
/*     */     {
/* 218 */       InputStream is = new ProgressableResettableBufferedFileInputStream(this.partFile, MultipartUploadOutputStream.this.progressable);
/*     */ 
/* 220 */       UploadPartRequest request = new UploadPartRequest().withBucketName(MultipartUploadOutputStream.this.bucketName).withKey(MultipartUploadOutputStream.this.key).withUploadId(MultipartUploadOutputStream.this.uploadId).withInputStream(is).withPartNumber(this.partNumber).withPartSize(this.partFile.length()).withMD5Digest(this.md5sum);
/*     */ 
/* 230 */       //MetricsSaver.StopWatch stopWatch = new MetricsSaver.StopWatch();
/*     */       UploadPartResult result;
/*     */       try
/*     */       {
/* 232 */         String message = String.format("S3 uploadPart bucket:%s key:%s part:%d size:%d", new Object[] { MultipartUploadOutputStream.this.bucketName, MultipartUploadOutputStream.this.key, Integer.valueOf(this.partNumber), Long.valueOf(this.partFile.length()) });
/*     */ 
/* 235 */         MultipartUploadOutputStream.LOG.info(message);
/* 236 */         result = MultipartUploadOutputStream.this.s3.uploadPart(request);
/* 237 */         //MetricsSaver.addValue("S3WriteDelay", stopWatch.elapsedTime());
/* 238 */         //MetricsSaver.addValue("S3WriteBytes", this.partFile.length());
/*     */       } catch (Exception e) {
/* 240 */         //MetricsSaver.addValueWithError("S3WriteDelay", stopWatch.elapsedTime(), e);
/* 241 */         throw e;
/*     */       } finally {
/*     */         try {
/* 244 */           if (is != null)
/* 245 */             is.close();
/*     */         }
/*     */         finally {
/* 248 */           this.partFile.delete();
/*     */         }
/*     */       }
/*     */ 
/* 252 */       return result.getPartETag();
/*     */     }
项目:aws    文件:S3MultiPartUpload.java   
static List<PartETag> GetETags(List<UploadPartResult> responses)
{
    List <PartETag> etags = new ArrayList<PartETag>();
    for (UploadPartResult response: responses)
    {
        etags.add(new PartETag(response.getPartNumber(), response.getETag()));
    }
    return etags;
}
项目:elasticsearch_my    文件:AmazonS3Wrapper.java   
@Override
public UploadPartResult uploadPart(UploadPartRequest request) throws AmazonClientException, AmazonServiceException {
    return delegate.uploadPart(request);
}
项目:syndesis    文件:AmazonS3ClientMock.java   
@Override
public UploadPartResult uploadPart(UploadPartRequest uploadPartRequest) throws AmazonClientException, AmazonServiceException {
    throw new UnsupportedOperationException();
}
项目:syndesis    文件:AmazonS3ClientMock.java   
@Override
public UploadPartResult uploadPart(UploadPartRequest uploadPartRequest) throws AmazonClientException, AmazonServiceException {
    throw new UnsupportedOperationException();
}
项目:syndesis    文件:AmazonS3ClientMock.java   
@Override
public UploadPartResult uploadPart(UploadPartRequest uploadPartRequest) throws AmazonClientException, AmazonServiceException {
    throw new UnsupportedOperationException();
}
项目:connectors    文件:AmazonS3ClientMock.java   
@Override
public UploadPartResult uploadPart(UploadPartRequest uploadPartRequest) throws AmazonClientException, AmazonServiceException {
    throw new UnsupportedOperationException();
}
项目:connectors    文件:AmazonS3ClientMock.java   
@Override
public UploadPartResult uploadPart(UploadPartRequest uploadPartRequest) throws AmazonClientException, AmazonServiceException {
    throw new UnsupportedOperationException();
}
项目:ibm-cos-sdk-java    文件:AmazonS3Client.java   
private UploadPartResult doUploadPart(final String bucketName,
        final String key, final String uploadId, final int partNumber,
        final long partSize, Request<UploadPartRequest> request,
        InputStream inputStream,
        MD5DigestCalculatingInputStream md5DigestStream,
        final ProgressListener listener) {
    try {
        request.setContent(inputStream);
        ObjectMetadata metadata = invoke(request, new S3MetadataResponseHandler(), bucketName, key);
        final String etag = metadata.getETag();

        if (md5DigestStream != null
                && !skipMd5CheckStrategy.skipClientSideValidationPerUploadPartResponse(metadata)) {
            byte[] clientSideHash = md5DigestStream.getMd5Digest();
            byte[] serverSideHash = BinaryUtils.fromHex(etag);

            if (!Arrays.equals(clientSideHash, serverSideHash)) {
                final String info = "bucketName: " + bucketName + ", key: "
                        + key + ", uploadId: " + uploadId
                        + ", partNumber: " + partNumber + ", partSize: "
                        + partSize;
                throw new SdkClientException(
                     "Unable to verify integrity of data upload.  "
                    + "Client calculated content hash (contentMD5: "
                    + Base16.encodeAsString(clientSideHash)
                    + " in hex) didn't match hash (etag: "
                    + etag
                    + " in hex) calculated by Amazon S3.  "
                    + "You may need to delete the data stored in Amazon S3. "
                    + "(" + info + ")");
            }
        }
        publishProgress(listener, ProgressEventType.TRANSFER_PART_COMPLETED_EVENT);
        UploadPartResult result = new UploadPartResult();
        result.setETag(etag);
        result.setPartNumber(partNumber);
        result.setSSEAlgorithm(metadata.getSSEAlgorithm());
        result.setSSECustomerAlgorithm(metadata.getSSECustomerAlgorithm());
        result.setSSECustomerKeyMd5(metadata.getSSECustomerKeyMd5());
        result.setRequesterCharged(metadata.isRequesterCharged());
        return result;
    } catch (Throwable t) {
        publishProgress(listener, ProgressEventType.TRANSFER_PART_FAILED_EVENT);
        // Leaving this here in case anyone is depending on it, but it's
        // inconsistent with other methods which only generate one of
        // COMPLETED_EVENT_CODE or FAILED_EVENT_CODE.
        publishProgress(listener, ProgressEventType.TRANSFER_PART_COMPLETED_EVENT);
        throw failure(t);
    }
}
项目:ibm-cos-sdk-java    文件:S3CryptoModuleBase.java   
/**
 * {@inheritDoc}
 *
 * <p>
 * <b>NOTE:</b> Because the encryption process requires context from
 * previous blocks, parts uploaded with the AmazonS3EncryptionClient (as
 * opposed to the normal AmazonS3Client) must be uploaded serially, and in
 * order. Otherwise, the previous encryption context isn't available to use
 * when encrypting the current part.
 */
@Override
public UploadPartResult uploadPartSecurely(UploadPartRequest req) {
    appendUserAgent(req, USER_AGENT);
    final int blockSize = contentCryptoScheme.getBlockSizeInBytes();
    final boolean isLastPart = req.isLastPart();
    final String uploadId = req.getUploadId();
    final long partSize = req.getPartSize();
    final boolean partSizeMultipleOfCipherBlockSize = 0 == (partSize % blockSize);
    if (!isLastPart && !partSizeMultipleOfCipherBlockSize) {
        throw new SdkClientException(
            "Invalid part size: part sizes for encrypted multipart uploads must be multiples "
                + "of the cipher block size ("
                + blockSize
                + ") with the exception of the last part.");
    }
    final T uploadContext = multipartUploadContexts.get(uploadId);
    if (uploadContext == null) {
        throw new SdkClientException(
            "No client-side information available on upload ID " + uploadId);
    }
    final UploadPartResult result;
    // Checks the parts are uploaded in series
    uploadContext.beginPartUpload(req.getPartNumber());
    CipherLite cipherLite = cipherLiteForNextPart(uploadContext);
    final File fileOrig = req.getFile();
    final InputStream isOrig = req.getInputStream();
    SdkFilterInputStream isCurr = null;
    try {
        CipherLiteInputStream clis = newMultipartS3CipherInputStream(req, cipherLite);
        isCurr = clis; // so the clis will be closed (in the finally block below) upon
                   // unexpected failure should we opened a file undereath
        isCurr = wrapForMultipart(clis, partSize);
        req.setInputStream(isCurr);
        // Treat all encryption requests as input stream upload requests,
        // not as file upload requests.
        req.setFile(null);
        req.setFileOffset(0);
        // The last part of the multipart upload will contain an extra
        // 16-byte mac
        if (isLastPart) {
            // We only change the size of the last part
            long lastPartSize = computeLastPartSize(req);
            if (lastPartSize > -1)
                req.setPartSize(lastPartSize);
            if (uploadContext.hasFinalPartBeenSeen()) {
                throw new SdkClientException(
                    "This part was specified as the last part in a multipart upload, but a previous part was already marked as the last part.  "
                  + "Only the last part of the upload should be marked as the last part.");
            }
        }

        result = s3.uploadPart(req);
    } finally {
        cleanupDataSource(req, fileOrig, isOrig, isCurr, log);
        uploadContext.endPartUpload();
    }
    if (isLastPart)
        uploadContext.setHasFinalPartBeenSeen(true);
    updateUploadContext(uploadContext, isCurr);
    return result;
}
项目:ibm-cos-sdk-java    文件:AmazonS3EncryptionClient.java   
@Override
public UploadPartResult uploadPart(UploadPartRequest req)
        throws SdkClientException, AmazonServiceException {
    return AmazonS3EncryptionClient.super.uploadPart(req);
}
项目:ibm-cos-sdk-java    文件:UploadObjectObserver.java   
/**
 * Uploads the ciphertext via the non-encrypting s3 client.
 * @param reqUploadPart part upload request
 * @return the result of the part upload when there is no exception
 */
protected UploadPartResult uploadPart(UploadPartRequest reqUploadPart) {
    // Upload the ciphertext directly via the non-encrypting
    // s3 client
    return s3direct.uploadPart(reqUploadPart);
}
项目:ibm-cos-sdk-java    文件:UploadObjectObserver.java   
public List<Future<UploadPartResult>> getFutures() {
    return futures;
}
项目:S3Decorators    文件:S3Decorator.java   
@Override
public UploadPartResult uploadPart(UploadPartRequest request) {
  return call(() -> getDelegate().uploadPart(request));
}
项目:bender    文件:S3TransporterTest.java   
@Test
public void testCompressedBuffer() throws TransportException, IllegalStateException, IOException {
  /*
   * Create mock client, requets, and replies
   */
  AmazonS3Client mockClient = getMockClient();

  /*
   * Capture the InputStream into a ByteArrayOutputStream before the Transport thread closes the
   * InputStream and makes it unavailable for reading.
   */
  ByteArrayOutputStream captured = new ByteArrayOutputStream();
  Answer answer = new Answer() {
    @Override
    public Object answer(InvocationOnMock invocation) throws Throwable {
      UploadPartRequest req = invocation.getArgumentAt(0, UploadPartRequest.class);
      captured.write(req.getInputStream());
      return new UploadPartResult();
    }
  };

  Mockito.doAnswer(answer).when(mockClient).uploadPart(any(UploadPartRequest.class));

  /*
   * Fill buffer with mock data
   */
  S3TransportBuffer buffer = new S3TransportBuffer(1000, true, new S3TransportSerializer());
  InternalEvent mockIevent = mock(InternalEvent.class);
  doReturn("foo").when(mockIevent).getSerialized();

  /*
   * Create transport
   */
  Map<String, MultiPartUpload> multiPartUploads = new HashMap<String, MultiPartUpload>(0);
  S3Transport transport =
      new S3Transport(mockClient, "bucket", "basepath", true, multiPartUploads);

  /*
   * Do actual test
   */
  buffer.add(mockIevent);
  LinkedHashMap<String, String> partitions = new LinkedHashMap<String, String>();
  partitions.put(S3Transport.FILENAME_KEY, "a_filename");
  ArgumentCaptor<UploadPartRequest> argument = ArgumentCaptor.forClass(UploadPartRequest.class);

  buffer.close();
  transport.sendBatch(buffer, partitions, new TestContext());
  verify(mockClient).uploadPart(argument.capture());

  /*
   * Check results
   */
  assertEquals("bucket", argument.getValue().getBucketName());
  assertEquals("basepath/a_filename.bz2", argument.getValue().getKey());
  assertEquals(1, argument.getValue().getPartNumber());
  assertEquals(40, argument.getValue().getPartSize());
  assertEquals("123", argument.getValue().getUploadId());

  /*
   * Convert the actual InputStream from the client into a ByteArrayOutputStream which can be read
   * and verified.
   */
  byte[] actualBytes = captured.toByteArray();
  byte[] expectedBytes =
      {66, 90, 104, 57, 49, 65, 89, 38, 83, 89, 118, -10, -77, -27, 0, 0, 0, -63, 0, 0, 16, 1, 0,
          -96, 0, 48, -52, 12, -62, 12, 46, -28, -118, 112, -95, 32, -19, -19, 103, -54};

  assertArrayEquals(expectedBytes, actualBytes);
}
项目:bender    文件:S3TransporterTest.java   
@Test
public void testCompressed() throws TransportException, IllegalStateException, IOException {
  /*
   * Create mock client, requets, and replies
   */
  AmazonS3Client mockClient = getMockClient();

  /*
   * Capture the InputStream into a ByteArrayOutputStream before the Transport thread closes the
   * InputStream and makes it unavailable for reading.
   */
  ByteArrayOutputStream captured = new ByteArrayOutputStream();
  Answer answer = new Answer() {
    @Override
    public Object answer(InvocationOnMock invocation) throws Throwable {
      UploadPartRequest req = invocation.getArgumentAt(0, UploadPartRequest.class);
      captured.write(req.getInputStream());
      return new UploadPartResult();
    }
  };

  Mockito.doAnswer(answer).when(mockClient).uploadPart(any(UploadPartRequest.class));

  /*
   * Fill buffer with mock data
   */
  S3TransportBuffer buffer = new S3TransportBuffer(1000, false, new S3TransportSerializer());
  InternalEvent mockIevent = mock(InternalEvent.class);
  doReturn("foo").when(mockIevent).getSerialized();

  /*
   * Create transport
   */
  Map<String, MultiPartUpload> multiPartUploads = new HashMap<String, MultiPartUpload>(0);
  S3Transport transport =
      new S3Transport(mockClient, "bucket", "basepath", true, multiPartUploads);

  /*
   * Do actual test
   */
  buffer.add(mockIevent);
  LinkedHashMap<String, String> partitions = new LinkedHashMap<String, String>();
  partitions.put(S3Transport.FILENAME_KEY, "a_filename");
  ArgumentCaptor<UploadPartRequest> argument = ArgumentCaptor.forClass(UploadPartRequest.class);

  buffer.close();
  transport.sendBatch(buffer, partitions, new TestContext());
  verify(mockClient).uploadPart(argument.capture());

  /*
   * Check results
   */
  assertEquals("bucket", argument.getValue().getBucketName());
  assertEquals("basepath/a_filename.bz2", argument.getValue().getKey());
  assertEquals(1, argument.getValue().getPartNumber());
  assertEquals(40, argument.getValue().getPartSize());
  assertEquals("123", argument.getValue().getUploadId());

  /*
   * Convert the actual InputStream from the client into a ByteArrayOutputStream which can be read
   * and verified.
   */
  byte[] actualBytes = captured.toByteArray();
  byte[] expectedBytes =
      {66, 90, 104, 57, 49, 65, 89, 38, 83, 89, 118, -10, -77, -27, 0, 0, 0, -63, 0, 0, 16, 1, 0,
          -96, 0, 48, -52, 12, -62, 12, 46, -28, -118, 112, -95, 32, -19, -19, 103, -54};

  assertArrayEquals(expectedBytes, actualBytes);
}
项目:bender    文件:S3TransporterTest.java   
@Test(expected = TransportException.class)
public void testAmazonClientException()
    throws TransportException, IllegalStateException, IOException {
  /*
   * Create mock client, requets, and replies
   */
  AmazonS3Client mockClient = mock(AmazonS3Client.class);
  UploadPartResult uploadResult = new UploadPartResult();
  uploadResult.setETag("foo");
  doThrow(new AmazonClientException("expected")).when(mockClient)
      .uploadPart(any(UploadPartRequest.class));

  InitiateMultipartUploadResult initUploadResult = new InitiateMultipartUploadResult();
  initUploadResult.setUploadId("123");
  doReturn(initUploadResult).when(mockClient)
      .initiateMultipartUpload(any(InitiateMultipartUploadRequest.class));

  /*
   * Fill buffer with mock data
   */
  S3TransportBuffer buffer = new S3TransportBuffer(1000, false, new S3TransportSerializer());
  InternalEvent mockIevent = mock(InternalEvent.class);
  doReturn("foo").when(mockIevent).getSerialized();

  /*
   * Create transport
   */
  Map<String, MultiPartUpload> multiPartUploads = new HashMap<String, MultiPartUpload>(0);
  S3Transport transport =
      new S3Transport(mockClient, "bucket", "basepath", false, multiPartUploads);

  /*
   * Do actual test
   */
  buffer.add(mockIevent);
  LinkedHashMap<String, String> partitions = new LinkedHashMap<String, String>();
  partitions.put(S3Transport.FILENAME_KEY, "a_filename");

  ArgumentCaptor<UploadPartRequest> argument = ArgumentCaptor.forClass(UploadPartRequest.class);
  try {
    transport.sendBatch(buffer, partitions, new TestContext());
  } catch (Exception e) {
    assertEquals(e.getCause().getClass(), AmazonClientException.class);
    throw e;
  }
}
项目:Camel    文件:AmazonS3ClientMock.java   
@Override
public UploadPartResult uploadPart(UploadPartRequest uploadPartRequest) throws AmazonClientException, AmazonServiceException {
    throw new UnsupportedOperationException();
}
项目:apex-malhar    文件:S3OutputModuleMockTest.java   
@Test
public void testS3OutputModule() throws Exception
{
  InitiateMultipartUploadResult result = new InitiateMultipartUploadResult();
  result.setUploadId(uploadId);

  PutObjectResult objResult = new PutObjectResult();
  objResult.setETag("SuccessFullyUploaded");

  UploadPartResult partResult = new UploadPartResult();
  partResult.setPartNumber(1);
  partResult.setETag("SuccessFullyPartUploaded");

  MockitoAnnotations.initMocks(this);
  when(client.initiateMultipartUpload(any(InitiateMultipartUploadRequest.class))).thenReturn(result);
  when(client.putObject(any(PutObjectRequest.class))).thenReturn(objResult);
  when(client.uploadPart(any(UploadPartRequest.class))).thenReturn(partResult);
  when(client.completeMultipartUpload(any(CompleteMultipartUploadRequest.class))).thenReturn(completeMultiPart());

  Application app = new S3OutputModuleMockTest.Application();
  Configuration conf = new Configuration();
  conf.set("dt.operator.HDFSInputModule.prop.files", inputDir);
  conf.set("dt.operator.HDFSInputModule.prop.blockSize", "10");
  conf.set("dt.operator.HDFSInputModule.prop.blocksThreshold", "1");
  conf.set("dt.attr.CHECKPOINT_WINDOW_COUNT","20");

  conf.set("dt.operator.S3OutputModule.prop.accessKey", "accessKey");
  conf.set("dt.operator.S3OutputModule.prop.secretAccessKey", "secretKey");
  conf.set("dt.operator.S3OutputModule.prop.bucketName", "bucketKey");
  conf.set("dt.operator.S3OutputModule.prop.outputDirectoryPath", outputDir);

  Path outDir = new Path("file://" + new File(outputDir).getAbsolutePath());
  final Path outputFilePath =  new Path(outDir.toString() + File.separator + FILE);
  final FileSystem fs = FileSystem.newInstance(outDir.toUri(), new Configuration());
  LocalMode lma = LocalMode.newInstance();
  lma.prepareDAG(app, conf);
  LocalMode.Controller lc = lma.getController();
  lc.setHeartbeatMonitoringEnabled(true);

  ((StramLocalCluster)lc).setExitCondition(new Callable<Boolean>()
  {
    @Override
    public Boolean call() throws Exception
    {
      return fs.exists(outputFilePath);
    }
  });
  lc.run(10000);

  Assert.assertTrue("output file exist", fs.exists(outputFilePath));
}
项目:presto    文件:MockAmazonS3.java   
@Override
public UploadPartResult uploadPart(UploadPartRequest request)
        throws AmazonClientException
{
    return null;
}
项目:s3proxy    文件:AwsSdkTest.java   
@Test
public void testBigMultipartUpload() throws Exception {
    String key = "multipart-upload";
    long partSize = context.getBlobStore().getMinimumMultipartPartSize();
    long size = partSize + 1;
    ByteSource byteSource = TestUtils.randomByteSource().slice(0, size);

    InitiateMultipartUploadRequest initRequest =
            new InitiateMultipartUploadRequest(containerName, key);
    InitiateMultipartUploadResult initResponse =
            client.initiateMultipartUpload(initRequest);
    String uploadId = initResponse.getUploadId();

    ByteSource byteSource1 = byteSource.slice(0, partSize);
    UploadPartRequest uploadRequest1 = new UploadPartRequest()
            .withBucketName(containerName)
            .withKey(key)
            .withUploadId(uploadId)
            .withPartNumber(1)
            .withInputStream(byteSource1.openStream())
            .withPartSize(byteSource1.size());
    uploadRequest1.getRequestClientOptions().setReadLimit(
            (int) byteSource1.size());
    UploadPartResult uploadPartResult1 = client.uploadPart(uploadRequest1);

    ByteSource byteSource2 = byteSource.slice(partSize, size - partSize);
    UploadPartRequest uploadRequest2 = new UploadPartRequest()
            .withBucketName(containerName)
            .withKey(key)
            .withUploadId(uploadId)
            .withPartNumber(2)
            .withInputStream(byteSource2.openStream())
            .withPartSize(byteSource2.size());
    uploadRequest2.getRequestClientOptions().setReadLimit(
            (int) byteSource2.size());
    UploadPartResult uploadPartResult2 = client.uploadPart(uploadRequest2);

    CompleteMultipartUploadRequest completeRequest =
            new CompleteMultipartUploadRequest(
                    containerName, key, uploadId,
                    ImmutableList.of(
                            uploadPartResult1.getPartETag(),
                            uploadPartResult2.getPartETag()));
    client.completeMultipartUpload(completeRequest);

    S3Object object = client.getObject(containerName, key);
    assertThat(object.getObjectMetadata().getContentLength()).isEqualTo(
            size);
    try (InputStream actual = object.getObjectContent();
            InputStream expected = byteSource.openStream()) {
        assertThat(actual).hasContentEqualTo(expected);
    }
}
项目:Scribengin    文件:AmazonS3Mock.java   
@Override
public UploadPartResult uploadPart(UploadPartRequest request) throws AmazonClientException, AmazonServiceException {
  // TODO Auto-generated method stub
  return null;
}
项目:aws    文件:S3MultiPartUpload.java   
public void actionPerformed(ActionEvent ae) {
    JFileChooser fileChooser = new JFileChooser();
    int showOpenDialog = fileChooser.showOpenDialog(frame);
    if (showOpenDialog != JFileChooser.APPROVE_OPTION) return;

    createAmazonS3Bucket();




    File fileToUpload = fileChooser.getSelectedFile();
    initiateRequest = new InitiateMultipartUploadRequest(AWSResources.S3_BUCKET_NAME,fileToUpload.getName());
    initResult = AWSResources.S3.initiateMultipartUpload(initiateRequest);
    uploadId = initResult.getUploadId();
    try {

        long objectSize = fileToUpload.length();

        long partSize = 5 * (long)Math.pow(2.0, 20.0); //5MB
        long bytePosition = 0;
        List<UploadPartResult> uploadResponses = new ArrayList<UploadPartResult>();
        for (int i = 1; bytePosition < objectSize; i++)
        {
            System.out.print("Uploading:" + i +"\n");
            partSize=Math.min(partSize, (objectSize - bytePosition));
            UploadPartRequest uploadRequest = new UploadPartRequest().withBucketName(AWSResources.S3_BUCKET_NAME)
                    .withKey(fileToUpload.getName())
                    .withFile(fileToUpload)
                    .withPartSize(partSize)
                    .withUploadId(uploadId)
                    .withFileOffset(bytePosition)
                    .withPartNumber(i);
            //uploadRequest.setProgressListener(new ProgressListener(fileToUpload, i, partSize));

            uploadResponses.add(AWSResources.S3.uploadPart(uploadRequest));
            bytePosition += partSize;   
        }

        CompleteMultipartUploadRequest completeRequest = new CompleteMultipartUploadRequest(AWSResources.S3_BUCKET_NAME,fileToUpload.getName(),uploadId,GetETags(uploadResponses));
        CompleteMultipartUploadResult completeUploadResult = AWSResources.S3.completeMultipartUpload(completeRequest);
        System.out.println(completeUploadResult.getETag());
    } catch (AmazonS3Exception ex){
        System.out.println(ex.getErrorMessage());
    }
}
项目:ibm-cos-sdk-java    文件:CryptoModuleDispatcher.java   
/**
 * {@inheritDoc}
 *
 * <p>
 * <b>NOTE:</b> Because the encryption process requires context from block
 * N-1 in order to encrypt block N, parts uploaded with the
 * AmazonS3EncryptionClient (as opposed to the normal AmazonS3Client) must
 * be uploaded serially, and in order. Otherwise, the previous encryption
 * context isn't available to use when encrypting the current part.
 */
@Override
public UploadPartResult uploadPartSecurely(UploadPartRequest req)
    throws SdkClientException, AmazonServiceException {
    return defaultCryptoMode == EncryptionOnly
         ? eo.uploadPartSecurely(req)
         : ae.uploadPartSecurely(req)
         ;
}
项目:ibm-cos-sdk-java    文件:AmazonS3EncryptionClient.java   
/**
 * {@inheritDoc}
 *
 * <p>
 * <b>NOTE:</b> Because the encryption process requires context from block
 * N-1 in order to encrypt block N, parts uploaded with the
 * AmazonS3EncryptionClient (as opposed to the normal AmazonS3Client) must
 * be uploaded serially, and in order. Otherwise, the previous encryption
 * context isn't available to use when encrypting the current part.
 */
@Override
public UploadPartResult uploadPart(UploadPartRequest uploadPartRequest)
        throws SdkClientException, AmazonServiceException {
    return crypto.uploadPartSecurely(uploadPartRequest);
}
项目:cloudkeeper    文件:S3Connection.java   
/**
 * Uploads a new part of a multi-part upload.
 *
 * @return Future that will be completed with an upload-part result on success, and an
 *     {@link com.amazonaws.AmazonClientException} in case of transmission failure. The future may also be completed
 *     with another runtime time exception; however, this indicates a logical bug (programming error).
 * @see AmazonS3#uploadPart(com.amazonaws.services.s3.model.UploadPartRequest)
 */
CompletableFuture<UploadPartResult> uploadPart(String bucketName, String key, String uploadId, int partNumber,
    InputStream inputStream, int length);
项目:ibm-cos-sdk-java    文件:S3Direct.java   
public abstract UploadPartResult uploadPart(UploadPartRequest req);
项目:ibm-cos-sdk-java    文件:S3CryptoModule.java   
public abstract UploadPartResult uploadPartSecurely(UploadPartRequest req);
项目:ibm-cos-sdk-java    文件:S3DirectSpi.java   
public UploadPartResult uploadPart(UploadPartRequest req);