Java 类com.amazonaws.services.s3.model.InitiateMultipartUploadRequest 实例源码

项目:hadoop    文件:S3AFastOutputStream.java   
private MultiPartUpload initiateMultiPartUpload() throws IOException {
  final ObjectMetadata om = createDefaultMetadata();
  final InitiateMultipartUploadRequest initiateMPURequest =
      new InitiateMultipartUploadRequest(bucket, key, om);
  initiateMPURequest.setCannedACL(cannedACL);
  try {
    return new MultiPartUpload(
        client.initiateMultipartUpload(initiateMPURequest).getUploadId());
  } catch (AmazonServiceException ase) {
    throw new IOException("Unable to initiate MultiPartUpload (server side)" +
        ": " + ase, ase);
  } catch (AmazonClientException ace) {
    throw new IOException("Unable to initiate MultiPartUpload (client side)" +
        ": " + ace, ace);
  }
}
项目:S3Mock    文件:AmazonClientUploadIT.java   
/**
 * Tests if not yet completed / aborted multipart uploads are listed.
 *
 * @throws Exception not expected
 */
@Test
public void shouldListMultipartUploads() throws Exception {
  s3Client.createBucket(BUCKET_NAME);

  assertThat(s3Client.listMultipartUploads(new ListMultipartUploadsRequest(BUCKET_NAME))
      .getMultipartUploads(), is(empty()));

  final InitiateMultipartUploadResult initiateMultipartUploadResult = s3Client
      .initiateMultipartUpload(new InitiateMultipartUploadRequest(BUCKET_NAME, UPLOAD_FILE_NAME));
  final String uploadId = initiateMultipartUploadResult.getUploadId();

  final MultipartUploadListing listing =
      s3Client.listMultipartUploads(new ListMultipartUploadsRequest(BUCKET_NAME));
  assertThat(listing.getMultipartUploads(), is(not(empty())));
  assertThat(listing.getBucketName(), equalTo(BUCKET_NAME));
  assertThat(listing.getMultipartUploads(), hasSize(1));
  final MultipartUpload upload = listing.getMultipartUploads().get(0);
  assertThat(upload.getUploadId(), equalTo(uploadId));
  assertThat(upload.getKey(), equalTo(UPLOAD_FILE_NAME));
}
项目:S3Mock    文件:AmazonClientUploadIT.java   
/**
 * Tests if a multipart upload can be aborted.
 *
 * @throws Exception not expected
 */
@Test
public void shouldAbortMultipartUpload() throws Exception {
  s3Client.createBucket(BUCKET_NAME);

  assertThat(s3Client.listMultipartUploads(new ListMultipartUploadsRequest(BUCKET_NAME))
      .getMultipartUploads(), is(empty()));

  final InitiateMultipartUploadResult initiateMultipartUploadResult = s3Client
      .initiateMultipartUpload(new InitiateMultipartUploadRequest(BUCKET_NAME, UPLOAD_FILE_NAME));
  final String uploadId = initiateMultipartUploadResult.getUploadId();

  assertThat(s3Client.listMultipartUploads(new ListMultipartUploadsRequest(BUCKET_NAME))
      .getMultipartUploads(), is(not(empty())));

  s3Client.abortMultipartUpload(
      new AbortMultipartUploadRequest(BUCKET_NAME, UPLOAD_FILE_NAME, uploadId));

  assertThat(s3Client.listMultipartUploads(new ListMultipartUploadsRequest(BUCKET_NAME))
      .getMultipartUploads(), is(empty()));
}
项目:aliyun-oss-hadoop-fs    文件:S3AFastOutputStream.java   
private MultiPartUpload initiateMultiPartUpload() throws IOException {
  final ObjectMetadata om = createDefaultMetadata();
  final InitiateMultipartUploadRequest initiateMPURequest =
      new InitiateMultipartUploadRequest(bucket, key, om);
  initiateMPURequest.setCannedACL(cannedACL);
  try {
    return new MultiPartUpload(
        client.initiateMultipartUpload(initiateMPURequest).getUploadId());
  } catch (AmazonServiceException ase) {
    throw new IOException("Unable to initiate MultiPartUpload (server side)" +
        ": " + ase, ase);
  } catch (AmazonClientException ace) {
    throw new IOException("Unable to initiate MultiPartUpload (client side)" +
        ": " + ace, ace);
  }
}
项目:big-c    文件:S3AFastOutputStream.java   
private MultiPartUpload initiateMultiPartUpload() throws IOException {
  final ObjectMetadata om = createDefaultMetadata();
  final InitiateMultipartUploadRequest initiateMPURequest =
      new InitiateMultipartUploadRequest(bucket, key, om);
  initiateMPURequest.setCannedACL(cannedACL);
  try {
    return new MultiPartUpload(
        client.initiateMultipartUpload(initiateMPURequest).getUploadId());
  } catch (AmazonServiceException ase) {
    throw new IOException("Unable to initiate MultiPartUpload (server side)" +
        ": " + ase, ase);
  } catch (AmazonClientException ace) {
    throw new IOException("Unable to initiate MultiPartUpload (client side)" +
        ": " + ace, ace);
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:S3AFastOutputStream.java   
private MultiPartUpload initiateMultiPartUpload() throws IOException {
  final ObjectMetadata om = createDefaultMetadata();
  final InitiateMultipartUploadRequest initiateMPURequest =
      new InitiateMultipartUploadRequest(bucket, key, om);
  initiateMPURequest.setCannedACL(cannedACL);
  try {
    return new MultiPartUpload(
        client.initiateMultipartUpload(initiateMPURequest).getUploadId());
  } catch (AmazonServiceException ase) {
    throw new IOException("Unable to initiate MultiPartUpload (server side)" +
        ": " + ase, ase);
  } catch (AmazonClientException ace) {
    throw new IOException("Unable to initiate MultiPartUpload (client side)" +
        ": " + ace, ace);
  }
}
项目:apex-malhar    文件:S3InitiateFileUploadOperator.java   
/**
 * For the input file, initiate the upload and emit the UploadFileMetadata through the fileMetadataOutput,
 * uploadMetadataOutput ports.
 * @param tuple given tuple
 */
protected void processTuple(AbstractFileSplitter.FileMetadata tuple)
{
  if (currentWindowId <= windowDataManager.getLargestCompletedWindow()) {
    return;
  }
  String keyName = getKeyName(tuple.getFilePath());
  String uploadId = "";
  if (tuple.getNumberOfBlocks() > 1) {
    InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(bucketName, keyName);
    initRequest.setObjectMetadata(createObjectMetadata());
    InitiateMultipartUploadResult initResponse = s3Client.initiateMultipartUpload(initRequest);
    uploadId = initResponse.getUploadId();
  }
  UploadFileMetadata uploadFileMetadata = new UploadFileMetadata(tuple, uploadId, keyName);
  fileMetadataOutput.emit(uploadFileMetadata);
  uploadMetadataOutput.emit(uploadFileMetadata);
  currentWindowRecoveryState.add(uploadFileMetadata);
}
项目:apex-malhar    文件:S3InitiateFileUploadOperatorTest.java   
@Test
public void testInitiateUpload()
{
  InitiateMultipartUploadResult result = new InitiateMultipartUploadResult();
  result.setUploadId(uploadId);

  MockitoAnnotations.initMocks(this);
  when(client.initiateMultipartUpload(any(InitiateMultipartUploadRequest.class))).thenReturn(result);
  when(fileMetadata.getFilePath()).thenReturn("/tmp/file1.txt");
  when(fileMetadata.getNumberOfBlocks()).thenReturn(4);

  S3InitiateFileUploadTest operator = new S3InitiateFileUploadTest();
  operator.setBucketName("testbucket");
  operator.setup(context);

  CollectorTestSink<S3InitiateFileUploadOperator.UploadFileMetadata> fileSink = new CollectorTestSink<>();
  CollectorTestSink<Object> tmp = (CollectorTestSink)fileSink;
  operator.fileMetadataOutput.setSink(tmp);
  operator.beginWindow(0);
  operator.processTuple(fileMetadata);
  operator.endWindow();

  S3InitiateFileUploadOperator.UploadFileMetadata emitted = (S3InitiateFileUploadOperator.UploadFileMetadata)tmp.collectedTuples.get(0);
  Assert.assertEquals("Upload ID :", uploadId, emitted.getUploadId());
}
项目:aws-codepipeline-plugin-for-jenkins    文件:PublisherCallableTest.java   
@Test
public void canUploadMultipleOutputArtifacts() throws IOException {
    // given
    jenkinsOutputs.clear();
    jenkinsOutputs.add(new OutputArtifact(TEST_FILE, "dummyArtifact"));
    jenkinsOutputs.add(new OutputArtifact("Dir1", "dummyArtifact1"));
    jenkinsOutputs.add(new OutputArtifact("Dir2", "dummyArtifact2"));

    outputArtifacts.clear();
    outputArtifacts.add(outputArtifact);
    outputArtifacts.add(outputArtifact1);
    outputArtifacts.add(outputArtifact2);

    // when
    publisher.invoke(workspace, null);

    // then
    verify(s3Client, times(3)).initiateMultipartUpload(any(InitiateMultipartUploadRequest.class));
}
项目:s3proxy    文件:AwsSdkTest.java   
@Test
public void testAtomicMpuAbort() throws Exception {
    String key = "testAtomicMpuAbort";
    ObjectMetadata metadata = new ObjectMetadata();
    metadata.setContentLength(BYTE_SOURCE.size());
    client.putObject(containerName, key, BYTE_SOURCE.openStream(),
            metadata);

    InitiateMultipartUploadRequest initRequest =
            new InitiateMultipartUploadRequest(containerName, key);
    InitiateMultipartUploadResult initResponse =
            client.initiateMultipartUpload(initRequest);
    String uploadId = initResponse.getUploadId();

    client.abortMultipartUpload(new AbortMultipartUploadRequest(
                containerName, key, uploadId));

    S3Object object = client.getObject(containerName, key);
    assertThat(object.getObjectMetadata().getContentLength()).isEqualTo(
            BYTE_SOURCE.size());
    try (InputStream actual = object.getObjectContent();
            InputStream expected = BYTE_SOURCE.openStream()) {
        assertThat(actual).hasContentEqualTo(expected);
    }
}
项目:s3proxy    文件:AwsSdkTest.java   
@Test
public void testPartNumberMarker() throws Exception {
    String blobName = "foo";
    InitiateMultipartUploadResult result = client.initiateMultipartUpload(
            new InitiateMultipartUploadRequest(containerName, blobName));
    ListPartsRequest request = new ListPartsRequest(containerName,
            blobName, result.getUploadId());

    client.listParts(request.withPartNumberMarker(0));

    try {
        client.listParts(request.withPartNumberMarker(1));
        Fail.failBecauseExceptionWasNotThrown(AmazonS3Exception.class);
    } catch (AmazonS3Exception e) {
        assertThat(e.getErrorCode()).isEqualTo("NotImplemented");
    }
}
项目:vs.msc.ws14    文件:S3DataOutputStream.java   
private String initiateMultipartUpload() throws IOException {

        boolean operationSuccessful = false;
        final InitiateMultipartUploadRequest request = new InitiateMultipartUploadRequest(this.bucket, this.object);
        if (this.useRRS) {
            request.setStorageClass(StorageClass.ReducedRedundancy);
        } else {
            request.setStorageClass(StorageClass.Standard);
        }

        try {

            final InitiateMultipartUploadResult result = this.s3Client.initiateMultipartUpload(request);
            operationSuccessful = true;
            return result.getUploadId();

        } catch (AmazonServiceException e) {
            throw new IOException(StringUtils.stringifyException(e));
        } finally {
            if (!operationSuccessful) {
                abortUpload();
            }
        }
    }
项目:elasticsearch_my    文件:DefaultS3OutputStream.java   
protected String doInitialize(S3BlobStore blobStore, String bucketName, String blobName, boolean serverSideEncryption) {
    InitiateMultipartUploadRequest request = new InitiateMultipartUploadRequest(bucketName, blobName)
            .withCannedACL(blobStore.getCannedACL())
            .withStorageClass(blobStore.getStorageClass());

    if (serverSideEncryption) {
        ObjectMetadata md = new ObjectMetadata();
        md.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
        request.setObjectMetadata(md);
    }

    return blobStore.client().initiateMultipartUpload(request).getUploadId();
}
项目:ibm-cos-sdk-java    文件:AmazonS3Client.java   
/**
 * Creates and returns a multi-part upload initiation request from the given upload-object
 * request.
 */
protected final InitiateMultipartUploadRequest newInitiateMultipartUploadRequest(
        UploadObjectRequest req) {
    return new InitiateMultipartUploadRequest(
            req.getBucketName(), req.getKey(), req.getMetadata())
        .withRedirectLocation(req.getRedirectLocation())
        .withSSEAwsKeyManagementParams(req.getSSEAwsKeyManagementParams())
        .withSSECustomerKey(req.getSSECustomerKey())
        .withStorageClass(req.getStorageClass())
        .withAccessControlList(req.getAccessControlList())
        .withCannedACL(req.getCannedAcl())
        .withGeneralProgressListener(req.getGeneralProgressListener())
        .withRequestMetricCollector(req.getRequestMetricCollector())
        ;
}
项目:ibm-cos-sdk-java    文件:UploadCallable.java   
/**
 * Initiates a multipart upload and returns the upload id
 * @param isUsingEncryption
 */
private String initiateMultipartUpload(PutObjectRequest origReq, boolean isUsingEncryption) {

    InitiateMultipartUploadRequest req = null;
    if (isUsingEncryption && origReq instanceof EncryptedPutObjectRequest) {
        req = new EncryptedInitiateMultipartUploadRequest(
                origReq.getBucketName(), origReq.getKey()).withCannedACL(
                origReq.getCannedAcl()).withObjectMetadata(origReq.getMetadata());
        ((EncryptedInitiateMultipartUploadRequest) req)
                .setMaterialsDescription(((EncryptedPutObjectRequest) origReq).getMaterialsDescription());
    } else {
        req = new InitiateMultipartUploadRequest(origReq.getBucketName(), origReq.getKey())
            .withCannedACL(origReq.getCannedAcl())
            .withObjectMetadata(origReq.getMetadata());
    }

    TransferManager.appendMultipartUserAgent(req);

    req.withAccessControlList(origReq.getAccessControlList())
       .withRequesterPays(origReq.isRequesterPays())
       .withStorageClass(origReq.getStorageClass())
       .withRedirectLocation(origReq.getRedirectLocation())
       .withSSECustomerKey(origReq.getSSECustomerKey())
       .withSSEAwsKeyManagementParams(origReq.getSSEAwsKeyManagementParams())
       .withGeneralProgressListener(origReq.getGeneralProgressListener())
       .withRequestMetricCollector(origReq.getRequestMetricCollector())
       ;

    String uploadId = s3.initiateMultipartUpload(req).getUploadId();
    log.debug("Initiated new multipart upload: " + uploadId);

    return uploadId;
}
项目:ibm-cos-sdk-java    文件:CryptoModuleDispatcher.java   
@Override
public InitiateMultipartUploadResult initiateMultipartUploadSecurely(
        InitiateMultipartUploadRequest req)
                throws SdkClientException, AmazonServiceException {
    return defaultCryptoMode == EncryptionOnly 
         ? eo.initiateMultipartUploadSecurely(req)
         : ae.initiateMultipartUploadSecurely(req)
         ;
}
项目:ibm-cos-sdk-java    文件:S3CryptoModuleEO.java   
@Override
final MultipartUploadCbcContext newUploadContext(
        InitiateMultipartUploadRequest req,
        ContentCryptoMaterial cekMaterial) {
    MultipartUploadCbcContext encryptedUploadContext = new MultipartUploadCbcContext(
            req.getBucketName(), req.getKey(), cekMaterial);
    byte[] iv = cekMaterial.getCipherLite().getIV();
    encryptedUploadContext.setNextInitializationVector(iv);
    return encryptedUploadContext; 
}
项目:ibm-cos-sdk-java    文件:AmazonS3EncryptionClient.java   
/** 
 * {@inheritDoc}
 * <p>
 * Use {@link EncryptedInitiateMultipartUploadRequest} to specify materialsDescription for the EncryptionMaterials to be used for this request.
 * AmazonS3EncryptionClient would use {@link EncryptionMaterialsProvider#getEncryptionMaterials(java.util.Map)} to retrieve encryption materials
 * corresponding to the materialsDescription specified in the current request.
 * </p>
 */
@Override
public InitiateMultipartUploadResult initiateMultipartUpload(
        InitiateMultipartUploadRequest req) {
    boolean isCreateEncryptionMaterial = true;
    if (req instanceof EncryptedInitiateMultipartUploadRequest) {
        EncryptedInitiateMultipartUploadRequest cryptoReq = 
                (EncryptedInitiateMultipartUploadRequest) req;
        isCreateEncryptionMaterial = cryptoReq.isCreateEncryptionMaterial();
    }
    return isCreateEncryptionMaterial
         ? crypto.initiateMultipartUploadSecurely(req)
         : super.initiateMultipartUpload(req)
         ;
}
项目:ibm-cos-sdk-java    文件:UploadObjectObserver.java   
protected InitiateMultipartUploadRequest newInitiateMultipartUploadRequest(
        UploadObjectRequest req) {
    return new EncryptedInitiateMultipartUploadRequest(
            req.getBucketName(), req.getKey(), req.getMetadata())
        .withMaterialsDescription(req.getMaterialsDescription())
        .withRedirectLocation(req.getRedirectLocation())
        .withSSEAwsKeyManagementParams(req.getSSEAwsKeyManagementParams())
        .withSSECustomerKey(req.getSSECustomerKey())
        .withStorageClass(req.getStorageClass())
        .withAccessControlList(req.getAccessControlList())
        .withCannedACL(req.getCannedAcl())
        .withGeneralProgressListener(req.getGeneralProgressListener())
        .withRequestMetricCollector(req.getRequestMetricCollector())
        ;
}
项目:nifi-minifi    文件:S3OutputStream.java   
private MultipartUpload newMultipartUpload() throws IOException {
  InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(bucket, key, new ObjectMetadata());
  try {
    return new MultipartUpload(s3.initiateMultipartUpload(initRequest).getUploadId());
  } catch (AmazonClientException e) {
    throw new IOException("Unable to initiate MultipartUpload: " + e, e);
  }
}
项目:stocator    文件:COSAPIClient.java   
/**
 * Start the multipart upload process.
 * @return the upload result containing the ID
 * @throws IOException IO problem
 */
String initiateMultiPartUpload() throws IOException {
  LOG.debug("Initiating Multipart upload");
  final InitiateMultipartUploadRequest initiateMPURequest =
      new InitiateMultipartUploadRequest(mBucket,
          key,
          newObjectMetadata(-1));
  try {
    return mClient.initiateMultipartUpload(initiateMPURequest)
        .getUploadId();
  } catch (AmazonClientException ace) {
    throw translateException("initiate MultiPartUpload", key, ace);
  }
}
项目:bender    文件:S3TransporterTest.java   
private AmazonS3Client getMockClient() {
  AmazonS3Client mockClient = spy(AmazonS3Client.class);
  UploadPartResult uploadResult = new UploadPartResult();
  uploadResult.setETag("foo");
  doReturn(uploadResult).when(mockClient).uploadPart(any(UploadPartRequest.class));

  InitiateMultipartUploadResult initUploadResult = new InitiateMultipartUploadResult();
  initUploadResult.setUploadId("123");
  doReturn(initUploadResult).when(mockClient)
      .initiateMultipartUpload(any(InitiateMultipartUploadRequest.class));

  return mockClient;
}
项目:cloudkeeper    文件:S3ConnectionImpl.java   
@Override
public CompletableFuture<InitiateMultipartUploadResult> initiateMultipartUpload(String bucketName, String key) {
    InitiateMultipartUploadRequest request = new InitiateMultipartUploadRequest(bucketName, key);
    ObjectMetadata objectMetadata = new ObjectMetadata();
    if (serverSideEncrypted) {
        objectMetadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
        request.setObjectMetadata(objectMetadata);
    }
    return CompletableFuture.supplyAsync(() -> s3Client.initiateMultipartUpload(request), executorService);
}
项目:kafka-connect-storage-cloud    文件:S3OutputStream.java   
private MultipartUpload newMultipartUpload() throws IOException {
  InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(
      bucket,
      key,
      newObjectMetadata()
  ).withCannedACL(cannedAcl);

  try {
    return new MultipartUpload(s3.initiateMultipartUpload(initRequest).getUploadId());
  } catch (AmazonClientException e) {
    // TODO: elaborate on the exception interpretation. If this is an AmazonServiceException,
    // there's more info to be extracted.
    throw new IOException("Unable to initiate MultipartUpload: " + e, e);
  }
}
项目:omakase    文件:S3Client.java   
public String initiateMultipartUpload(S3Upload upload, String originalFilename) {
    try {
        runtimeCredentialsProvider.setAwsCredentials(upload.getAwsCredentials());
        amazonS3.setRegion(Region.getRegion(Regions.fromName(upload.getRegion())));
        ObjectMetadata objectMetadata = new ObjectMetadata();
        objectMetadata.addUserMetadata("original-filename", originalFilename);
        InitiateMultipartUploadResult result = amazonS3.initiateMultipartUpload(new InitiateMultipartUploadRequest(upload.getBucket(), upload.getKey(), objectMetadata));
        return result.getUploadId();
    } catch (AmazonClientException e) {
        throw new OmakaseRuntimeException(e);
    }
}
项目:aws-codepipeline-plugin-for-jenkins    文件:PublisherCallableTest.java   
@Test
public void uploadsArtifactToS3() throws IOException {
    // when
    publisher.invoke(workspace, null);

    // then
    final InOrder inOrder = inOrder(clientFactory, awsClients, s3Client);
    inOrder.verify(clientFactory).getAwsClient(ACCESS_KEY, SECRET_KEY, PROXY_HOST, PROXY_PORT, REGION, PLUGIN_VERSION);
    inOrder.verify(awsClients).getCodePipelineClient();
    inOrder.verify(awsClients).getS3Client(credentialsProviderCaptor.capture());
    inOrder.verify(s3Client).initiateMultipartUpload(initiateMultipartUploadRequestCaptor.capture());
    inOrder.verify(s3Client).uploadPart(uploadPartRequestCaptor.capture());

    final com.amazonaws.auth.AWSSessionCredentials credentials
        = (com.amazonaws.auth.AWSSessionCredentials) credentialsProviderCaptor.getValue().getCredentials();
    assertEquals(JOB_ACCESS_KEY, credentials.getAWSAccessKeyId());
    assertEquals(JOB_SECRET_KEY, credentials.getAWSSecretKey());
    assertEquals(JOB_SESSION_TOKEN, credentials.getSessionToken());

    verify(codePipelineClient).getJobDetails(getJobDetailsRequestCaptor.capture());
    assertEquals(JOB_ID, getJobDetailsRequestCaptor.getValue().getJobId());

    final InitiateMultipartUploadRequest initRequest = initiateMultipartUploadRequestCaptor.getValue();
    assertEquals(S3_BUCKET_NAME, initRequest.getBucketName());
    assertEquals(S3_OBJECT_KEY, initRequest.getKey());

    final UploadPartRequest uploadRequest = uploadPartRequestCaptor.getValue();
    assertEquals(S3_BUCKET_NAME, uploadRequest.getBucketName());
    assertEquals(S3_OBJECT_KEY, uploadRequest.getKey());
    assertEquals(UPLOAD_ID, uploadRequest.getUploadId());

    assertContainsIgnoreCase("[AWS CodePipeline Plugin] Uploading artifact:", outContent.toString());
    assertContainsIgnoreCase("[AWS CodePipeline Plugin] Upload successful", outContent.toString());
}
项目:aws-codepipeline-plugin-for-jenkins    文件:PublisherToolsTest.java   
@Before
public void setUp() {
    MockitoAnnotations.initMocks(this);

    when(mockS3Client.initiateMultipartUpload(any(InitiateMultipartUploadRequest.class)))
            .thenReturn(mockUploadResult);
    when(mockS3Client.uploadPart(any(UploadPartRequest.class))).thenReturn(mockPartRequest);
    when(mockUploadResult.getUploadId()).thenReturn("123");
    when(mockArtifact.getLocation()).thenReturn(mockLocation);
    when(mockLocation.getS3Location()).thenReturn(s3ArtifactLocation);
    when(s3ArtifactLocation.getBucketName()).thenReturn("Bucket");
    when(s3ArtifactLocation.getObjectKey()).thenReturn("Key");

    outContent = TestUtils.setOutputStream();
}
项目:aws-codepipeline-plugin-for-jenkins    文件:PublisherToolsTest.java   
@Test
public void uploadFileSuccess() throws IOException {
    TestUtils.initializeTestingFolders();

    final File compressedFile = CompressionTools.compressFile(
            "ZipProject",
            PATH_TO_COMPRESS,
            CompressionType.Zip,
            null);

    PublisherTools.uploadFile(
            compressedFile,
            mockArtifact,
            CompressionType.Zip,
            null, // No custom encryption key
            mockS3Client,
            null); // Listener

    final InOrder inOrder = inOrder(mockS3Client);
    inOrder.verify(mockS3Client, times(1)).initiateMultipartUpload(initiateCaptor.capture());
    // Total size is less than 5MB, should only be one upload
    inOrder.verify(mockS3Client, times(1)).uploadPart(any(UploadPartRequest.class));
    inOrder.verify(mockS3Client, times(1)).completeMultipartUpload(any(CompleteMultipartUploadRequest.class));

    assertContainsIgnoreCase("[AWS CodePipeline Plugin] Uploading artifact:", outContent.toString());
    assertContainsIgnoreCase("[AWS CodePipeline Plugin] Upload successful", outContent.toString());

    final InitiateMultipartUploadRequest request = initiateCaptor.getValue();
    final SSEAwsKeyManagementParams encryptionParams = request.getSSEAwsKeyManagementParams();
    assertNotNull(encryptionParams);
    assertNull(encryptionParams.getAwsKmsKeyId());
    assertEquals("aws:kms", encryptionParams.getEncryption());

    compressedFile.delete();
    TestUtils.cleanUpTestingFolders();
}
项目:aws-codepipeline-plugin-for-jenkins    文件:PublisherToolsTest.java   
@Test
public void uploadWithCustomKmsEncryptionKey() throws IOException {
    TestUtils.initializeTestingFolders();

    when(mockEncryptionKey.getId()).thenReturn("KMS-KEY-ARN");
    when(mockEncryptionKey.getType()).thenReturn(EncryptionKeyType.KMS.toString());

    final File compressedFile = CompressionTools.compressFile(
            "ZipProject",
            PATH_TO_COMPRESS,
            CompressionType.Zip,
            null);

    PublisherTools.uploadFile(
            compressedFile,
            mockArtifact,
            CompressionType.Zip,
            mockEncryptionKey,
            mockS3Client,
            null); // Listener

    verify(mockS3Client).initiateMultipartUpload(initiateCaptor.capture());

    assertContainsIgnoreCase("[AWS CodePipeline Plugin] Upload successful", outContent.toString());

    final InitiateMultipartUploadRequest request = initiateCaptor.getValue();
    final SSEAwsKeyManagementParams encryptionParams = request.getSSEAwsKeyManagementParams();
    assertNotNull(encryptionParams);
    assertEquals("KMS-KEY-ARN", encryptionParams.getAwsKmsKeyId());
    assertEquals("aws:kms", encryptionParams.getEncryption());

    compressedFile.delete();
    TestUtils.cleanUpTestingFolders();
}
项目:aws-codepipeline-plugin-for-jenkins    文件:PublisherToolsTest.java   
@Test
public void uploadWithUnknownEncryptionKeyType() throws IOException {
    TestUtils.initializeTestingFolders();

    when(mockEncryptionKey.getId()).thenReturn("KMS-KEY-ARN");
    when(mockEncryptionKey.getType()).thenReturn("Custom");

    final File compressedFile = CompressionTools.compressFile(
            "ZipProject",
            PATH_TO_COMPRESS,
            CompressionType.Zip,
            null);

    PublisherTools.uploadFile(
            compressedFile,
            mockArtifact,
            CompressionType.Zip,
            mockEncryptionKey,
            mockS3Client,
            null); // Listener

    verify(mockS3Client).initiateMultipartUpload(initiateCaptor.capture());

    assertContainsIgnoreCase("[AWS CodePipeline Plugin] Upload successful", outContent.toString());

    final InitiateMultipartUploadRequest request = initiateCaptor.getValue();
    final SSEAwsKeyManagementParams encryptionParams = request.getSSEAwsKeyManagementParams();
    assertNotNull(encryptionParams);
    assertNull(encryptionParams.getAwsKmsKeyId());
    assertEquals("aws:kms", encryptionParams.getEncryption());

    compressedFile.delete();
    TestUtils.cleanUpTestingFolders();
}
项目:Singularity    文件:SingularityS3Uploader.java   
private void multipartUpload(String key, File file, ObjectMetadata objectMetadata, Optional<StorageClass> maybeStorageClass) throws Exception {
  List<PartETag> partETags = new ArrayList<>();
  InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(bucketName, key, objectMetadata);
  if (maybeStorageClass.isPresent()) {
    initRequest.setStorageClass(maybeStorageClass.get());
  }
  InitiateMultipartUploadResult initResponse = s3Client.initiateMultipartUpload(initRequest);

  long contentLength = file.length();
  long partSize = configuration.getUploadPartSize();

  try {
    long filePosition = 0;
    for (int i = 1; filePosition < contentLength; i++) {
      partSize = Math.min(partSize, (contentLength - filePosition));
      UploadPartRequest uploadRequest = new UploadPartRequest()
          .withBucketName(bucketName)
          .withKey(key)
          .withUploadId(initResponse.getUploadId())
          .withPartNumber(i)
          .withFileOffset(filePosition)
          .withFile(file)
          .withPartSize(partSize);
      partETags.add(s3Client.uploadPart(uploadRequest).getPartETag());
      filePosition += partSize;
    }

    CompleteMultipartUploadRequest completeRequest = new CompleteMultipartUploadRequest(bucketName, key, initResponse.getUploadId(), partETags);
    s3Client.completeMultipartUpload(completeRequest);
  } catch (Exception e) {
    s3Client.abortMultipartUpload(new AbortMultipartUploadRequest(bucketName, key, initResponse.getUploadId()));
    Throwables.propagate(e);
  }
}
项目:s3distcp    文件:MultipartUploadOutputStream.java   
public MultipartUploadOutputStream(AmazonS3 s3, ThreadPoolExecutor threadPool, Progressable progressable, String bucketName, String key, ObjectMetadata metadata, long partSize, File[] tempDirs)
/*     */   {
/*  75 */     RetryPolicy basePolicy = RetryPolicies.retryUpToMaximumCountWithFixedSleep(4, 10L, TimeUnit.SECONDS);
/*  76 */     Map exceptionToPolicyMap = new HashMap();
/*     */ 
/*  78 */     exceptionToPolicyMap.put(Exception.class, basePolicy);
/*     */ 
/*  80 */     RetryPolicy methodPolicy = RetryPolicies.retryByException(RetryPolicies.TRY_ONCE_THEN_FAIL, exceptionToPolicyMap);
/*     */ 
/*  82 */     Map methodNameToPolicyMap = new HashMap();
/*     */ 
/*  84 */     methodNameToPolicyMap.put("completeMultipartUpload", methodPolicy);
/*     */ 
/*  86 */     this.s3 = ((AmazonS3)RetryProxy.create(AmazonS3.class, s3, methodNameToPolicyMap));
/*  87 */     InitiateMultipartUploadResult result = this.s3.initiateMultipartUpload(new InitiateMultipartUploadRequest(bucketName, key).withObjectMetadata(metadata));
/*     */ 
/*  89 */     this.threadPool = threadPool;
/*  90 */     this.progressable = progressable;
/*  91 */     this.futures = new ArrayList();
/*     */ 
/*  93 */     this.tempDirs = tempDirs;
/*  94 */     this.bucketName = bucketName;
/*  95 */     this.key = key;
/*  96 */     this.uploadId = result.getUploadId();
/*  97 */     this.partSize = partSize;
/*     */ 
/*  99 */     setTempFileAndOutput();
/*     */   }
项目:elasticsearch_my    文件:AmazonS3Wrapper.java   
@Override
public InitiateMultipartUploadResult initiateMultipartUpload(InitiateMultipartUploadRequest request) throws AmazonClientException, AmazonServiceException {
    return delegate.initiateMultipartUpload(request);
}
项目:syndesis    文件:AmazonS3ClientMock.java   
@Override
public InitiateMultipartUploadResult initiateMultipartUpload(InitiateMultipartUploadRequest initiateMultipartUploadRequest) throws AmazonClientException, AmazonServiceException {
    throw new UnsupportedOperationException();
}
项目:syndesis    文件:AmazonS3ClientMock.java   
@Override
public InitiateMultipartUploadResult initiateMultipartUpload(InitiateMultipartUploadRequest initiateMultipartUploadRequest) throws AmazonClientException, AmazonServiceException {
    throw new UnsupportedOperationException();
}
项目:syndesis    文件:AmazonS3ClientMock.java   
@Override
public InitiateMultipartUploadResult initiateMultipartUpload(InitiateMultipartUploadRequest initiateMultipartUploadRequest) throws AmazonClientException, AmazonServiceException {
    throw new UnsupportedOperationException();
}
项目:connectors    文件:AmazonS3ClientMock.java   
@Override
public InitiateMultipartUploadResult initiateMultipartUpload(InitiateMultipartUploadRequest initiateMultipartUploadRequest) throws AmazonClientException, AmazonServiceException {
    throw new UnsupportedOperationException();
}
项目:connectors    文件:AmazonS3ClientMock.java   
@Override
public InitiateMultipartUploadResult initiateMultipartUpload(InitiateMultipartUploadRequest initiateMultipartUploadRequest) throws AmazonClientException, AmazonServiceException {
    throw new UnsupportedOperationException();
}
项目:connectors    文件:AmazonS3ClientMock.java   
@Override
public InitiateMultipartUploadResult initiateMultipartUpload(InitiateMultipartUploadRequest initiateMultipartUploadRequest) throws AmazonClientException, AmazonServiceException {
    throw new UnsupportedOperationException();
}
项目:s3-channels    文件:AbstractS3WritableObjectChannelSuite.java   
protected InitiateMultipartUploadResult initMultiPart() {
    defaultAmazonS3().deleteObject(testBucket, this.key);
    return defaultAmazonS3().initiateMultipartUpload(new InitiateMultipartUploadRequest(testBucket, key));
}