Java 类com.amazonaws.services.s3.model.InitiateMultipartUploadResult 实例源码

项目:s3-channels    文件:S3WritableObjectChannelBuilderTest.java   
@Test
void testGetters() {
    assertEquals(S3WritableObjectChannel.MIN_PART_SIZE, builder.getPartSize());
    assertEquals(123, builder.partSize(123).getPartSize());
    assertEquals(amazonS3, builder.amazonS3());
    assertEquals(executorService, builder.executorService());
    assertEquals("upldId", builder.uploadId());
    assertEquals("bucket", builder.bucket());
    assertEquals("key", builder.key());
    assertEquals(2, builder.failedPartUploadRetries());
    assertFalse(builder.hasDelayedHeader());
    assertFalse(builder.closeExecutorOnChannelClose());
    assertTrue(builder.defaultCachedThreadPoolExecutor().closeExecutorOnChannelClose());

    InitiateMultipartUploadResult r = new InitiateMultipartUploadResult();
    r.setBucketName("bucket1");
    r.setKey("key1");
    r.setUploadId("upldId1");
    builder.initiateMultipartUploadResult(r);
    assertEquals("upldId1", builder.uploadId());
    assertEquals("bucket1", builder.bucket());
    assertEquals("key1", builder.key());

    builder.executorService().shutdown();
}
项目:S3Mock    文件:AmazonClientUploadIT.java   
/**
 * Tests if not yet completed / aborted multipart uploads are listed.
 *
 * @throws Exception not expected
 */
@Test
public void shouldListMultipartUploads() throws Exception {
  s3Client.createBucket(BUCKET_NAME);

  assertThat(s3Client.listMultipartUploads(new ListMultipartUploadsRequest(BUCKET_NAME))
      .getMultipartUploads(), is(empty()));

  final InitiateMultipartUploadResult initiateMultipartUploadResult = s3Client
      .initiateMultipartUpload(new InitiateMultipartUploadRequest(BUCKET_NAME, UPLOAD_FILE_NAME));
  final String uploadId = initiateMultipartUploadResult.getUploadId();

  final MultipartUploadListing listing =
      s3Client.listMultipartUploads(new ListMultipartUploadsRequest(BUCKET_NAME));
  assertThat(listing.getMultipartUploads(), is(not(empty())));
  assertThat(listing.getBucketName(), equalTo(BUCKET_NAME));
  assertThat(listing.getMultipartUploads(), hasSize(1));
  final MultipartUpload upload = listing.getMultipartUploads().get(0);
  assertThat(upload.getUploadId(), equalTo(uploadId));
  assertThat(upload.getKey(), equalTo(UPLOAD_FILE_NAME));
}
项目:S3Mock    文件:AmazonClientUploadIT.java   
/**
 * Tests if a multipart upload can be aborted.
 *
 * @throws Exception not expected
 */
@Test
public void shouldAbortMultipartUpload() throws Exception {
  s3Client.createBucket(BUCKET_NAME);

  assertThat(s3Client.listMultipartUploads(new ListMultipartUploadsRequest(BUCKET_NAME))
      .getMultipartUploads(), is(empty()));

  final InitiateMultipartUploadResult initiateMultipartUploadResult = s3Client
      .initiateMultipartUpload(new InitiateMultipartUploadRequest(BUCKET_NAME, UPLOAD_FILE_NAME));
  final String uploadId = initiateMultipartUploadResult.getUploadId();

  assertThat(s3Client.listMultipartUploads(new ListMultipartUploadsRequest(BUCKET_NAME))
      .getMultipartUploads(), is(not(empty())));

  s3Client.abortMultipartUpload(
      new AbortMultipartUploadRequest(BUCKET_NAME, UPLOAD_FILE_NAME, uploadId));

  assertThat(s3Client.listMultipartUploads(new ListMultipartUploadsRequest(BUCKET_NAME))
      .getMultipartUploads(), is(empty()));
}
项目:apex-malhar    文件:S3InitiateFileUploadOperator.java   
/**
 * For the input file, initiate the upload and emit the UploadFileMetadata through the fileMetadataOutput,
 * uploadMetadataOutput ports.
 * @param tuple given tuple
 */
protected void processTuple(AbstractFileSplitter.FileMetadata tuple)
{
  if (currentWindowId <= windowDataManager.getLargestCompletedWindow()) {
    return;
  }
  String keyName = getKeyName(tuple.getFilePath());
  String uploadId = "";
  if (tuple.getNumberOfBlocks() > 1) {
    InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(bucketName, keyName);
    initRequest.setObjectMetadata(createObjectMetadata());
    InitiateMultipartUploadResult initResponse = s3Client.initiateMultipartUpload(initRequest);
    uploadId = initResponse.getUploadId();
  }
  UploadFileMetadata uploadFileMetadata = new UploadFileMetadata(tuple, uploadId, keyName);
  fileMetadataOutput.emit(uploadFileMetadata);
  uploadMetadataOutput.emit(uploadFileMetadata);
  currentWindowRecoveryState.add(uploadFileMetadata);
}
项目:apex-malhar    文件:S3InitiateFileUploadOperatorTest.java   
@Test
public void testInitiateUpload()
{
  InitiateMultipartUploadResult result = new InitiateMultipartUploadResult();
  result.setUploadId(uploadId);

  MockitoAnnotations.initMocks(this);
  when(client.initiateMultipartUpload(any(InitiateMultipartUploadRequest.class))).thenReturn(result);
  when(fileMetadata.getFilePath()).thenReturn("/tmp/file1.txt");
  when(fileMetadata.getNumberOfBlocks()).thenReturn(4);

  S3InitiateFileUploadTest operator = new S3InitiateFileUploadTest();
  operator.setBucketName("testbucket");
  operator.setup(context);

  CollectorTestSink<S3InitiateFileUploadOperator.UploadFileMetadata> fileSink = new CollectorTestSink<>();
  CollectorTestSink<Object> tmp = (CollectorTestSink)fileSink;
  operator.fileMetadataOutput.setSink(tmp);
  operator.beginWindow(0);
  operator.processTuple(fileMetadata);
  operator.endWindow();

  S3InitiateFileUploadOperator.UploadFileMetadata emitted = (S3InitiateFileUploadOperator.UploadFileMetadata)tmp.collectedTuples.get(0);
  Assert.assertEquals("Upload ID :", uploadId, emitted.getUploadId());
}
项目:s3proxy    文件:AwsSdkTest.java   
@Test
public void testAtomicMpuAbort() throws Exception {
    String key = "testAtomicMpuAbort";
    ObjectMetadata metadata = new ObjectMetadata();
    metadata.setContentLength(BYTE_SOURCE.size());
    client.putObject(containerName, key, BYTE_SOURCE.openStream(),
            metadata);

    InitiateMultipartUploadRequest initRequest =
            new InitiateMultipartUploadRequest(containerName, key);
    InitiateMultipartUploadResult initResponse =
            client.initiateMultipartUpload(initRequest);
    String uploadId = initResponse.getUploadId();

    client.abortMultipartUpload(new AbortMultipartUploadRequest(
                containerName, key, uploadId));

    S3Object object = client.getObject(containerName, key);
    assertThat(object.getObjectMetadata().getContentLength()).isEqualTo(
            BYTE_SOURCE.size());
    try (InputStream actual = object.getObjectContent();
            InputStream expected = BYTE_SOURCE.openStream()) {
        assertThat(actual).hasContentEqualTo(expected);
    }
}
项目:s3proxy    文件:AwsSdkTest.java   
@Test
public void testPartNumberMarker() throws Exception {
    String blobName = "foo";
    InitiateMultipartUploadResult result = client.initiateMultipartUpload(
            new InitiateMultipartUploadRequest(containerName, blobName));
    ListPartsRequest request = new ListPartsRequest(containerName,
            blobName, result.getUploadId());

    client.listParts(request.withPartNumberMarker(0));

    try {
        client.listParts(request.withPartNumberMarker(1));
        Fail.failBecauseExceptionWasNotThrown(AmazonS3Exception.class);
    } catch (AmazonS3Exception e) {
        assertThat(e.getErrorCode()).isEqualTo("NotImplemented");
    }
}
项目:vs.msc.ws14    文件:S3DataOutputStream.java   
private String initiateMultipartUpload() throws IOException {

        boolean operationSuccessful = false;
        final InitiateMultipartUploadRequest request = new InitiateMultipartUploadRequest(this.bucket, this.object);
        if (this.useRRS) {
            request.setStorageClass(StorageClass.ReducedRedundancy);
        } else {
            request.setStorageClass(StorageClass.Standard);
        }

        try {

            final InitiateMultipartUploadResult result = this.s3Client.initiateMultipartUpload(request);
            operationSuccessful = true;
            return result.getUploadId();

        } catch (AmazonServiceException e) {
            throw new IOException(StringUtils.stringifyException(e));
        } finally {
            if (!operationSuccessful) {
                abortUpload();
            }
        }
    }
项目:s3-channels    文件:S3WritableObjectChannelBuilder.java   
/**
 * Retrieves bucket, key and uploadId from {@link InitiateMultipartUploadResult}
 */
public S3WritableObjectChannelBuilder initiateMultipartUploadResult(InitiateMultipartUploadResult result) {
    bucket(result.getBucketName());
    key(result.getKey());
    uploadId(result.getUploadId());
    return this;
}
项目:ibm-cos-sdk-java    文件:CryptoModuleDispatcher.java   
@Override
public InitiateMultipartUploadResult initiateMultipartUploadSecurely(
        InitiateMultipartUploadRequest req)
                throws SdkClientException, AmazonServiceException {
    return defaultCryptoMode == EncryptionOnly 
         ? eo.initiateMultipartUploadSecurely(req)
         : ae.initiateMultipartUploadSecurely(req)
         ;
}
项目:ibm-cos-sdk-java    文件:AmazonS3EncryptionClient.java   
/** 
 * {@inheritDoc}
 * <p>
 * Use {@link EncryptedInitiateMultipartUploadRequest} to specify materialsDescription for the EncryptionMaterials to be used for this request.
 * AmazonS3EncryptionClient would use {@link EncryptionMaterialsProvider#getEncryptionMaterials(java.util.Map)} to retrieve encryption materials
 * corresponding to the materialsDescription specified in the current request.
 * </p>
 */
@Override
public InitiateMultipartUploadResult initiateMultipartUpload(
        InitiateMultipartUploadRequest req) {
    boolean isCreateEncryptionMaterial = true;
    if (req instanceof EncryptedInitiateMultipartUploadRequest) {
        EncryptedInitiateMultipartUploadRequest cryptoReq = 
                (EncryptedInitiateMultipartUploadRequest) req;
        isCreateEncryptionMaterial = cryptoReq.isCreateEncryptionMaterial();
    }
    return isCreateEncryptionMaterial
         ? crypto.initiateMultipartUploadSecurely(req)
         : super.initiateMultipartUpload(req)
         ;
}
项目:bender    文件:S3TransporterTest.java   
private AmazonS3Client getMockClient() {
  AmazonS3Client mockClient = spy(AmazonS3Client.class);
  UploadPartResult uploadResult = new UploadPartResult();
  uploadResult.setETag("foo");
  doReturn(uploadResult).when(mockClient).uploadPart(any(UploadPartRequest.class));

  InitiateMultipartUploadResult initUploadResult = new InitiateMultipartUploadResult();
  initUploadResult.setUploadId("123");
  doReturn(initUploadResult).when(mockClient)
      .initiateMultipartUpload(any(InitiateMultipartUploadRequest.class));

  return mockClient;
}
项目:cloudkeeper    文件:S3ConnectionImpl.java   
@Override
public CompletableFuture<InitiateMultipartUploadResult> initiateMultipartUpload(String bucketName, String key) {
    InitiateMultipartUploadRequest request = new InitiateMultipartUploadRequest(bucketName, key);
    ObjectMetadata objectMetadata = new ObjectMetadata();
    if (serverSideEncrypted) {
        objectMetadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
        request.setObjectMetadata(objectMetadata);
    }
    return CompletableFuture.supplyAsync(() -> s3Client.initiateMultipartUpload(request), executorService);
}
项目:omakase    文件:S3Client.java   
public String initiateMultipartUpload(S3Upload upload, String originalFilename) {
    try {
        runtimeCredentialsProvider.setAwsCredentials(upload.getAwsCredentials());
        amazonS3.setRegion(Region.getRegion(Regions.fromName(upload.getRegion())));
        ObjectMetadata objectMetadata = new ObjectMetadata();
        objectMetadata.addUserMetadata("original-filename", originalFilename);
        InitiateMultipartUploadResult result = amazonS3.initiateMultipartUpload(new InitiateMultipartUploadRequest(upload.getBucket(), upload.getKey(), objectMetadata));
        return result.getUploadId();
    } catch (AmazonClientException e) {
        throw new OmakaseRuntimeException(e);
    }
}
项目:s3-stream-uploader    文件:MultipartUploadState.java   
MultipartUploadState(String bucketName, String prefix, String filename, long fileSizeBytes, InitiateMultipartUploadResult initResult) {
    this.initResult = initResult;
    this.fileSizeBytes = fileSizeBytes;
    this.bucketName = bucketName;
    this.prefix = prefix;
    this.filename = filename;
}
项目:Singularity    文件:SingularityS3Uploader.java   
private void multipartUpload(String key, File file, ObjectMetadata objectMetadata, Optional<StorageClass> maybeStorageClass) throws Exception {
  List<PartETag> partETags = new ArrayList<>();
  InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(bucketName, key, objectMetadata);
  if (maybeStorageClass.isPresent()) {
    initRequest.setStorageClass(maybeStorageClass.get());
  }
  InitiateMultipartUploadResult initResponse = s3Client.initiateMultipartUpload(initRequest);

  long contentLength = file.length();
  long partSize = configuration.getUploadPartSize();

  try {
    long filePosition = 0;
    for (int i = 1; filePosition < contentLength; i++) {
      partSize = Math.min(partSize, (contentLength - filePosition));
      UploadPartRequest uploadRequest = new UploadPartRequest()
          .withBucketName(bucketName)
          .withKey(key)
          .withUploadId(initResponse.getUploadId())
          .withPartNumber(i)
          .withFileOffset(filePosition)
          .withFile(file)
          .withPartSize(partSize);
      partETags.add(s3Client.uploadPart(uploadRequest).getPartETag());
      filePosition += partSize;
    }

    CompleteMultipartUploadRequest completeRequest = new CompleteMultipartUploadRequest(bucketName, key, initResponse.getUploadId(), partETags);
    s3Client.completeMultipartUpload(completeRequest);
  } catch (Exception e) {
    s3Client.abortMultipartUpload(new AbortMultipartUploadRequest(bucketName, key, initResponse.getUploadId()));
    Throwables.propagate(e);
  }
}
项目:s3distcp    文件:MultipartUploadOutputStream.java   
public MultipartUploadOutputStream(AmazonS3 s3, ThreadPoolExecutor threadPool, Progressable progressable, String bucketName, String key, ObjectMetadata metadata, long partSize, File[] tempDirs)
/*     */   {
/*  75 */     RetryPolicy basePolicy = RetryPolicies.retryUpToMaximumCountWithFixedSleep(4, 10L, TimeUnit.SECONDS);
/*  76 */     Map exceptionToPolicyMap = new HashMap();
/*     */ 
/*  78 */     exceptionToPolicyMap.put(Exception.class, basePolicy);
/*     */ 
/*  80 */     RetryPolicy methodPolicy = RetryPolicies.retryByException(RetryPolicies.TRY_ONCE_THEN_FAIL, exceptionToPolicyMap);
/*     */ 
/*  82 */     Map methodNameToPolicyMap = new HashMap();
/*     */ 
/*  84 */     methodNameToPolicyMap.put("completeMultipartUpload", methodPolicy);
/*     */ 
/*  86 */     this.s3 = ((AmazonS3)RetryProxy.create(AmazonS3.class, s3, methodNameToPolicyMap));
/*  87 */     InitiateMultipartUploadResult result = this.s3.initiateMultipartUpload(new InitiateMultipartUploadRequest(bucketName, key).withObjectMetadata(metadata));
/*     */ 
/*  89 */     this.threadPool = threadPool;
/*  90 */     this.progressable = progressable;
/*  91 */     this.futures = new ArrayList();
/*     */ 
/*  93 */     this.tempDirs = tempDirs;
/*  94 */     this.bucketName = bucketName;
/*  95 */     this.key = key;
/*  96 */     this.uploadId = result.getUploadId();
/*  97 */     this.partSize = partSize;
/*     */ 
/*  99 */     setTempFileAndOutput();
/*     */   }
项目:elasticsearch_my    文件:AmazonS3Wrapper.java   
@Override
public InitiateMultipartUploadResult initiateMultipartUpload(InitiateMultipartUploadRequest request) throws AmazonClientException, AmazonServiceException {
    return delegate.initiateMultipartUpload(request);
}
项目:syndesis    文件:AmazonS3ClientMock.java   
@Override
public InitiateMultipartUploadResult initiateMultipartUpload(InitiateMultipartUploadRequest initiateMultipartUploadRequest) throws AmazonClientException, AmazonServiceException {
    throw new UnsupportedOperationException();
}
项目:syndesis    文件:AmazonS3ClientMock.java   
@Override
public InitiateMultipartUploadResult initiateMultipartUpload(InitiateMultipartUploadRequest initiateMultipartUploadRequest) throws AmazonClientException, AmazonServiceException {
    throw new UnsupportedOperationException();
}
项目:syndesis    文件:AmazonS3ClientMock.java   
@Override
public InitiateMultipartUploadResult initiateMultipartUpload(InitiateMultipartUploadRequest initiateMultipartUploadRequest) throws AmazonClientException, AmazonServiceException {
    throw new UnsupportedOperationException();
}
项目:connectors    文件:AmazonS3ClientMock.java   
@Override
public InitiateMultipartUploadResult initiateMultipartUpload(InitiateMultipartUploadRequest initiateMultipartUploadRequest) throws AmazonClientException, AmazonServiceException {
    throw new UnsupportedOperationException();
}
项目:connectors    文件:AmazonS3ClientMock.java   
@Override
public InitiateMultipartUploadResult initiateMultipartUpload(InitiateMultipartUploadRequest initiateMultipartUploadRequest) throws AmazonClientException, AmazonServiceException {
    throw new UnsupportedOperationException();
}
项目:connectors    文件:AmazonS3ClientMock.java   
@Override
public InitiateMultipartUploadResult initiateMultipartUpload(InitiateMultipartUploadRequest initiateMultipartUploadRequest) throws AmazonClientException, AmazonServiceException {
    throw new UnsupportedOperationException();
}
项目:s3-channels    文件:AbstractS3WritableObjectChannelSuite.java   
protected InitiateMultipartUploadResult initMultiPart() {
    defaultAmazonS3().deleteObject(testBucket, this.key);
    return defaultAmazonS3().initiateMultipartUpload(new InitiateMultipartUploadRequest(testBucket, key));
}
项目:ibm-cos-sdk-java    文件:AmazonS3Client.java   
@Override
public InitiateMultipartUploadResult initiateMultipartUpload(
        InitiateMultipartUploadRequest initiateMultipartUploadRequest)
        throws SdkClientException, AmazonServiceException {
    initiateMultipartUploadRequest = beforeClientExecution(initiateMultipartUploadRequest);
    rejectNull(initiateMultipartUploadRequest,
        "The request parameter must be specified when initiating a multipart upload");

    rejectNull(initiateMultipartUploadRequest.getBucketName(),
        "The bucket name parameter must be specified when initiating a multipart upload");
    rejectNull(initiateMultipartUploadRequest.getKey(),
        "The key parameter must be specified when initiating a multipart upload");

    Request<InitiateMultipartUploadRequest> request = createRequest(initiateMultipartUploadRequest.getBucketName(), initiateMultipartUploadRequest.getKey(), initiateMultipartUploadRequest, HttpMethodName.POST);
    request.addParameter("uploads", null);

    if (initiateMultipartUploadRequest.getStorageClass() != null)
        request.addHeader(Headers.STORAGE_CLASS, initiateMultipartUploadRequest.getStorageClass().toString());

    if (initiateMultipartUploadRequest.getRedirectLocation() != null) {
        request.addHeader(Headers.REDIRECT_LOCATION, initiateMultipartUploadRequest.getRedirectLocation());
    }

    if ( initiateMultipartUploadRequest.getAccessControlList() != null ) {
        addAclHeaders(request, initiateMultipartUploadRequest.getAccessControlList());
    } else if ( initiateMultipartUploadRequest.getCannedACL() != null ) {
        request.addHeader(Headers.S3_CANNED_ACL, initiateMultipartUploadRequest.getCannedACL().toString());
    }

    if (initiateMultipartUploadRequest.objectMetadata != null) {
        populateRequestMetadata(request, initiateMultipartUploadRequest.objectMetadata);
    }

    populateRequesterPaysHeader(request, initiateMultipartUploadRequest.isRequesterPays());

    // Populate the SSE-C parameters to the request header
    populateSSE_C(request, initiateMultipartUploadRequest.getSSECustomerKey());

    // Populate the SSE AWS KMS parameters to the request header
    populateSSE_KMS(request,
            initiateMultipartUploadRequest.getSSEAwsKeyManagementParams());

    // Be careful that we don't send the object's total size as the content
    // length for the InitiateMultipartUpload request.
    setZeroContentLength(request);
    // Set the request content to be empty (but not null) to force the runtime to pass
    // any query params in the query string and not the request body, to keep S3 happy.
    request.setContent(new ByteArrayInputStream(new byte[0]));

    @SuppressWarnings("unchecked")
    ResponseHeaderHandlerChain<InitiateMultipartUploadResult> responseHandler = new ResponseHeaderHandlerChain<InitiateMultipartUploadResult>(
            // xml payload unmarshaller
            new Unmarshallers.InitiateMultipartUploadResultUnmarshaller(),
            // header handlers
            new ServerSideEncryptionHeaderHandler<InitiateMultipartUploadResult>(),
            new S3RequesterChargedHeaderHandler<InitiateMultipartUploadResult>(),
            new InitiateMultipartUploadHeaderHandler());
    return invoke(request, responseHandler,
            initiateMultipartUploadRequest.getBucketName(), initiateMultipartUploadRequest.getKey());
}
项目:ibm-cos-sdk-java    文件:InitiateMultipartUploadHeaderHandler.java   
@Override
public void handle(InitiateMultipartUploadResult result, HttpResponse response) {
    result.setAbortDate(ServiceUtils.parseRfc822Date(response.getHeaders().get(Headers.ABORT_DATE)));
    result.setAbortRuleId(response.getHeaders().get(Headers.ABORT_RULE_ID));
}
项目:ibm-cos-sdk-java    文件:S3Direct.java   
public abstract InitiateMultipartUploadResult initiateMultipartUpload(
InitiateMultipartUploadRequest req);
项目:ibm-cos-sdk-java    文件:S3CryptoModule.java   
public abstract InitiateMultipartUploadResult initiateMultipartUploadSecurely(
InitiateMultipartUploadRequest req);
项目:ibm-cos-sdk-java    文件:S3DirectSpi.java   
public InitiateMultipartUploadResult initiateMultipartUpload(
InitiateMultipartUploadRequest req);
项目:ibm-cos-sdk-java    文件:AmazonS3EncryptionClient.java   
@Override
public InitiateMultipartUploadResult initiateMultipartUpload(
        InitiateMultipartUploadRequest req) {
    return AmazonS3EncryptionClient.super.initiateMultipartUpload(req);
}
项目:S3Decorators    文件:S3Decorator.java   
@Override
public InitiateMultipartUploadResult initiateMultipartUpload(InitiateMultipartUploadRequest request) {
  return call(() -> getDelegate().initiateMultipartUpload(request));
}
项目:backuprotator    文件:AWSHandler.java   
private CompleteMultipartUploadResult copyMultipartFile(Bucket srcBucket, Bucket targetBucket, String fileName,
        long size) {
    // Create lists to hold copy responses
    List<CopyPartResult> copyResponses =
            new ArrayList<CopyPartResult>();

    // Step 2: Initialize
    InitiateMultipartUploadRequest initiateRequest = 
            new InitiateMultipartUploadRequest(targetBucket.getName(), targetBucket.getPath() + SEPARATOR + fileName);

    InitiateMultipartUploadResult initResult = 
            s3Client.initiateMultipartUpload(initiateRequest);

     // Step 4. Copy parts.
    long partSize = 5 * (long)Math.pow(2.0, 20.0); // 5 MB
    long bytePosition = 0;
    for (int i = 1; bytePosition < size; i++)
    {
        // Step 5. Save copy response.
        CopyPartRequest copyRequest = new CopyPartRequest()
           .withDestinationBucketName(targetBucket.getName())
           .withDestinationKey(targetBucket.getPath() + SEPARATOR + fileName)
           .withSourceBucketName(srcBucket.getName())
           .withSourceKey(srcBucket.getPath() + SEPARATOR + fileName)
           .withUploadId(initResult.getUploadId())
           .withFirstByte(bytePosition)
           .withLastByte(bytePosition + partSize -1 >= size ? size - 1 : bytePosition + partSize - 1) 
           .withPartNumber(i);

        copyResponses.add(s3Client.copyPart(copyRequest));
        bytePosition += partSize;
    }
    CompleteMultipartUploadRequest completeRequest = new 
            CompleteMultipartUploadRequest(
                    targetBucket.getName(),
                    targetBucket.getPath() + SEPARATOR + fileName,
                    initResult.getUploadId(),
                    GetETags(copyResponses));
    // Step 7. Complete copy operation.
    CompleteMultipartUploadResult completeUploadResponse =
        s3Client.completeMultipartUpload(completeRequest);
    return completeUploadResponse;
}
项目:bender    文件:S3TransporterTest.java   
@Test(expected = TransportException.class)
public void testAmazonClientException()
    throws TransportException, IllegalStateException, IOException {
  /*
   * Create mock client, requets, and replies
   */
  AmazonS3Client mockClient = mock(AmazonS3Client.class);
  UploadPartResult uploadResult = new UploadPartResult();
  uploadResult.setETag("foo");
  doThrow(new AmazonClientException("expected")).when(mockClient)
      .uploadPart(any(UploadPartRequest.class));

  InitiateMultipartUploadResult initUploadResult = new InitiateMultipartUploadResult();
  initUploadResult.setUploadId("123");
  doReturn(initUploadResult).when(mockClient)
      .initiateMultipartUpload(any(InitiateMultipartUploadRequest.class));

  /*
   * Fill buffer with mock data
   */
  S3TransportBuffer buffer = new S3TransportBuffer(1000, false, new S3TransportSerializer());
  InternalEvent mockIevent = mock(InternalEvent.class);
  doReturn("foo").when(mockIevent).getSerialized();

  /*
   * Create transport
   */
  Map<String, MultiPartUpload> multiPartUploads = new HashMap<String, MultiPartUpload>(0);
  S3Transport transport =
      new S3Transport(mockClient, "bucket", "basepath", false, multiPartUploads);

  /*
   * Do actual test
   */
  buffer.add(mockIevent);
  LinkedHashMap<String, String> partitions = new LinkedHashMap<String, String>();
  partitions.put(S3Transport.FILENAME_KEY, "a_filename");

  ArgumentCaptor<UploadPartRequest> argument = ArgumentCaptor.forClass(UploadPartRequest.class);
  try {
    transport.sendBatch(buffer, partitions, new TestContext());
  } catch (Exception e) {
    assertEquals(e.getCause().getClass(), AmazonClientException.class);
    throw e;
  }
}
项目:cloudkeeper    文件:S3BufferedOutputStream.java   
private CompletableFuture<String> getUploadIdFuture() {
    if (uploadIdFuture == null) {
        uploadIdFuture = s3Connection.initiateMultipartUpload(bucketName, key)
            .thenApplyAsync(InitiateMultipartUploadResult::getUploadId, executorService);
        partETagFutures = new ArrayList<>();
    }
    return uploadIdFuture;
}
项目:Camel    文件:AmazonS3ClientMock.java   
@Override
public InitiateMultipartUploadResult initiateMultipartUpload(InitiateMultipartUploadRequest initiateMultipartUploadRequest) throws AmazonClientException, AmazonServiceException {
    throw new UnsupportedOperationException();
}
项目:apex-malhar    文件:S3OutputModuleMockTest.java   
@Test
public void testS3OutputModule() throws Exception
{
  InitiateMultipartUploadResult result = new InitiateMultipartUploadResult();
  result.setUploadId(uploadId);

  PutObjectResult objResult = new PutObjectResult();
  objResult.setETag("SuccessFullyUploaded");

  UploadPartResult partResult = new UploadPartResult();
  partResult.setPartNumber(1);
  partResult.setETag("SuccessFullyPartUploaded");

  MockitoAnnotations.initMocks(this);
  when(client.initiateMultipartUpload(any(InitiateMultipartUploadRequest.class))).thenReturn(result);
  when(client.putObject(any(PutObjectRequest.class))).thenReturn(objResult);
  when(client.uploadPart(any(UploadPartRequest.class))).thenReturn(partResult);
  when(client.completeMultipartUpload(any(CompleteMultipartUploadRequest.class))).thenReturn(completeMultiPart());

  Application app = new S3OutputModuleMockTest.Application();
  Configuration conf = new Configuration();
  conf.set("dt.operator.HDFSInputModule.prop.files", inputDir);
  conf.set("dt.operator.HDFSInputModule.prop.blockSize", "10");
  conf.set("dt.operator.HDFSInputModule.prop.blocksThreshold", "1");
  conf.set("dt.attr.CHECKPOINT_WINDOW_COUNT","20");

  conf.set("dt.operator.S3OutputModule.prop.accessKey", "accessKey");
  conf.set("dt.operator.S3OutputModule.prop.secretAccessKey", "secretKey");
  conf.set("dt.operator.S3OutputModule.prop.bucketName", "bucketKey");
  conf.set("dt.operator.S3OutputModule.prop.outputDirectoryPath", outputDir);

  Path outDir = new Path("file://" + new File(outputDir).getAbsolutePath());
  final Path outputFilePath =  new Path(outDir.toString() + File.separator + FILE);
  final FileSystem fs = FileSystem.newInstance(outDir.toUri(), new Configuration());
  LocalMode lma = LocalMode.newInstance();
  lma.prepareDAG(app, conf);
  LocalMode.Controller lc = lma.getController();
  lc.setHeartbeatMonitoringEnabled(true);

  ((StramLocalCluster)lc).setExitCondition(new Callable<Boolean>()
  {
    @Override
    public Boolean call() throws Exception
    {
      return fs.exists(outputFilePath);
    }
  });
  lc.run(10000);

  Assert.assertTrue("output file exist", fs.exists(outputFilePath));
}
项目:aws-codepipeline-plugin-for-jenkins    文件:PublisherTools.java   
public static void uploadFile(
        final File file,
        final Artifact artifact,
        final CompressionType compressionType,
        final EncryptionKey encryptionKey,
        final AmazonS3 amazonS3,
        final BuildListener listener) throws IOException {

    LoggingHelper.log(listener, "Uploading artifact: " + artifact + ", file: " + file);

    final String bucketName = artifact.getLocation().getS3Location().getBucketName();
    final String objectKey  = artifact.getLocation().getS3Location().getObjectKey();
    final List<PartETag> partETags = new ArrayList<>();

    final InitiateMultipartUploadRequest initiateMultipartUploadRequest = new InitiateMultipartUploadRequest(
            bucketName,
            objectKey,
            createObjectMetadata(compressionType))
        .withSSEAwsKeyManagementParams(toSSEAwsKeyManagementParams(encryptionKey));

    final InitiateMultipartUploadResult initiateMultipartUploadResult
            = amazonS3.initiateMultipartUpload(initiateMultipartUploadRequest);

    final long contentLength = file.length();
    long filePosition = 0;
    long partSize = 5 * 1024 * 1024; // Set part size to 5 MB

    for (int i = 1; filePosition < contentLength; i++) {
        partSize = Math.min(partSize, (contentLength - filePosition));

        final UploadPartRequest uploadPartRequest = new UploadPartRequest()
                .withBucketName(bucketName)
                .withKey(objectKey)
                .withUploadId(initiateMultipartUploadResult.getUploadId())
                .withPartNumber(i)
                .withFileOffset(filePosition)
                .withFile(file)
                .withPartSize(partSize);

        partETags.add(amazonS3.uploadPart(uploadPartRequest).getPartETag());

        filePosition += partSize;
    }

    final CompleteMultipartUploadRequest completeMultipartUpload
            = new CompleteMultipartUploadRequest(
                bucketName,
                objectKey,
                initiateMultipartUploadResult.getUploadId(),
                partETags);

    amazonS3.completeMultipartUpload(completeMultipartUpload);

    LoggingHelper.log(listener, "Upload successful");
}
项目:presto    文件:MockAmazonS3.java   
@Override
public InitiateMultipartUploadResult initiateMultipartUpload(InitiateMultipartUploadRequest request)
        throws AmazonClientException
{
    return null;
}
项目:datacollector    文件:SupportBundleManager.java   
/**
 * Instead of providing support bundle directly to user, upload it to StreamSets backend services.
 */
public void uploadNewBundle(List<String> generators) throws IOException {
  boolean enabled = configuration.get(Constants.UPLOAD_ENABLED, Constants.DEFAULT_UPLOAD_ENABLED);
  String accessKey = configuration.get(Constants.UPLOAD_ACCESS, Constants.DEFAULT_UPLOAD_ACCESS);
  String secretKey = configuration.get(Constants.UPLOAD_SECRET, Constants.DEFAULT_UPLOAD_SECRET);
  String bucket = configuration.get(Constants.UPLOAD_BUCKET, Constants.DEFAULT_UPLOAD_BUCKET);
  int bufferSize = configuration.get(Constants.UPLOAD_BUFFER_SIZE, Constants.DEFAULT_UPLOAD_BUFFER_SIZE);

  if(!enabled) {
    throw new IOException("Uploading support bundles was disabled by administrator.");
  }

  AWSCredentialsProvider credentialsProvider = new StaticCredentialsProvider(new BasicAWSCredentials(accessKey, secretKey));
  AmazonS3Client s3Client = new AmazonS3Client(credentialsProvider, new ClientConfiguration());
  s3Client.setS3ClientOptions(new S3ClientOptions().withPathStyleAccess(true));
  s3Client.setRegion(Region.getRegion(Regions.US_WEST_2));

  // Object Metadata
  ObjectMetadata metadata = new ObjectMetadata();
  for(Map.Entry<Object, Object> entry: getMetadata().entrySet()) {
    metadata.addUserMetadata((String)entry.getKey(), (String)entry.getValue());
  }

  // Generate bundle
  SupportBundle bundle = generateNewBundle(generators);

  // Uploading part by part
  LOG.info("Initiating multi-part support bundle upload");
  List<PartETag> partETags = new ArrayList<>();
  InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(bucket, bundle.getBundleKey());
  initRequest.setObjectMetadata(metadata);
  InitiateMultipartUploadResult initResponse = s3Client.initiateMultipartUpload(initRequest);

  try {
    byte[] buffer = new byte[bufferSize];
    int partId = 1;
    int size = -1;
    while ((size = readFully(bundle.getInputStream(), buffer)) != -1) {
      LOG.debug("Uploading part {} of size {}", partId, size);
      UploadPartRequest uploadRequest = new UploadPartRequest()
        .withBucketName(bucket)
        .withKey(bundle.getBundleKey())
        .withUploadId(initResponse.getUploadId())
        .withPartNumber(partId++)
        .withInputStream(new ByteArrayInputStream(buffer))
        .withPartSize(size);

      partETags.add(s3Client.uploadPart(uploadRequest).getPartETag());
    }

    CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest(
      bucket,
      bundle.getBundleKey(),
      initResponse.getUploadId(),
      partETags
    );

    s3Client.completeMultipartUpload(compRequest);
    LOG.info("Support bundle upload finished");
  } catch (Exception e) {
    LOG.error("Support bundle upload failed", e);
    s3Client.abortMultipartUpload(new AbortMultipartUploadRequest(
      bucket,
      bundle.getBundleKey(),
      initResponse.getUploadId())
    );

    throw new IOException("Can't upload support bundle", e);
  } finally {
    // Close the client
    s3Client.shutdown();
  }
}