Java 类com.amazonaws.services.s3.model.CompleteMultipartUploadRequest 实例源码

项目:ibm-cos-sdk-java    文件:S3CryptoModuleBase.java   
@Override
public CompleteMultipartUploadResult completeMultipartUploadSecurely(
        CompleteMultipartUploadRequest req) {
    appendUserAgent(req, USER_AGENT);
    String uploadId = req.getUploadId();
    final T uploadContext = multipartUploadContexts.get(uploadId);

    if (uploadContext != null && !uploadContext.hasFinalPartBeenSeen()) {
        throw new SdkClientException(
            "Unable to complete an encrypted multipart upload without being told which part was the last.  "
            + "Without knowing which part was the last, the encrypted data in Amazon S3 is incomplete and corrupt.");
    }
    CompleteMultipartUploadResult result = s3.completeMultipartUpload(req);

    // In InstructionFile mode, we want to write the instruction file only
    // after the whole upload has completed correctly.
    if (uploadContext != null
    &&  cryptoConfig.getStorageMode() == InstructionFile) {
        // Put the instruction file into S3
        s3.putObject(createInstructionPutRequest(
                uploadContext.getBucketName(), uploadContext.getKey(),
                uploadContext.getContentCryptoMaterial()));
    }
    multipartUploadContexts.remove(uploadId);
    return result;
}
项目:s3-channels    文件:S3AppendableObjectChannel.java   
@Override
@SuppressWarnings("unchecked")
public void close() {
    if (!isOpen()) {
        return;
    }
    closed = true;
    uploadPendingParts();
    CompletableFuture<Void>[] futures;
    synchronized (workers) {
        futures = workers.values().toArray(new CompletableFuture[workers.size()]);
    }
    CompletableFuture<Void> complete = CompletableFuture
            .allOf(futures)
            .thenApplyAsync((x) -> {
                s3.completeMultipartUpload(new CompleteMultipartUploadRequest()
                        .withBucketName(bucket)
                        .withKey(key)
                        .withUploadId(uploadId)
                        .withPartETags(done));
                return null;
            }, executor);

    try {
        complete.get();
    } catch (Exception e) {
        cancel();
        throw ExceptionUtils.mapExecutionException(e);
    } finally {
        tryCloseExecutor();
    }
}
项目:hadoop    文件:S3AFastOutputStream.java   
public void complete(List<PartETag> partETags) {
  if (LOG.isDebugEnabled()) {
    LOG.debug("Completing multi-part upload for key '{}', id '{}'", key,
        uploadId);
  }
  final CompleteMultipartUploadRequest completeRequest =
      new CompleteMultipartUploadRequest(bucket, key, uploadId, partETags);
  client.completeMultipartUpload(completeRequest);

}
项目:ibm-cos-sdk-java    文件:CompleteMultipartUpload.java   
@Override
public UploadResult call() throws Exception {
    CompleteMultipartUploadResult res;

    try {
        CompleteMultipartUploadRequest req = new CompleteMultipartUploadRequest(
                origReq.getBucketName(), origReq.getKey(), uploadId,
                collectPartETags())
                .withRequesterPays(origReq.isRequesterPays())
            .withGeneralProgressListener(origReq.getGeneralProgressListener())
            .withRequestMetricCollector(origReq.getRequestMetricCollector())
            ;
        res = s3.completeMultipartUpload(req);
    } catch (Exception e) {
        publishProgress(listener, ProgressEventType.TRANSFER_FAILED_EVENT);
        throw e;
    }

    UploadResult uploadResult = new UploadResult();
    uploadResult.setBucketName(origReq
            .getBucketName());
    uploadResult.setKey(origReq.getKey());
    uploadResult.setETag(res.getETag());
    uploadResult.setVersionId(res.getVersionId());

    monitor.uploadComplete();

    return uploadResult;
}
项目:ibm-cos-sdk-java    文件:UploadCallable.java   
/**
 * Uploads all parts in the request in serial in this thread, then completes
 * the upload and returns the result.
 */
private UploadResult uploadPartsInSeries(UploadPartRequestFactory requestFactory) {

    final List<PartETag> partETags = new ArrayList<PartETag>();

    while (requestFactory.hasMoreRequests()) {
        if (threadPool.isShutdown()) throw new CancellationException("TransferManager has been shutdown");
        UploadPartRequest uploadPartRequest = requestFactory.getNextUploadPartRequest();
        // Mark the stream in case we need to reset it
        InputStream inputStream = uploadPartRequest.getInputStream();
        if (inputStream != null && inputStream.markSupported()) {
            if (uploadPartRequest.getPartSize() >= Integer.MAX_VALUE) {
                inputStream.mark(Integer.MAX_VALUE);
            } else {
                inputStream.mark((int)uploadPartRequest.getPartSize());
            }
        }
        partETags.add(s3.uploadPart(uploadPartRequest).getPartETag());
    }

    CompleteMultipartUploadRequest req =
        new CompleteMultipartUploadRequest(
            origReq.getBucketName(), origReq.getKey(), multipartUploadId,
                partETags)
                .withRequesterPays(origReq.isRequesterPays())
        .withGeneralProgressListener(origReq.getGeneralProgressListener())
        .withRequestMetricCollector(origReq.getRequestMetricCollector())
        ;
    CompleteMultipartUploadResult res = s3.completeMultipartUpload(req);

    UploadResult uploadResult = new UploadResult();
    uploadResult.setBucketName(res.getBucketName());
    uploadResult.setKey(res.getKey());
    uploadResult.setETag(res.getETag());
    uploadResult.setVersionId(res.getVersionId());
    return uploadResult;
}
项目:ibm-cos-sdk-java    文件:CompleteMultipartCopy.java   
@Override
public CopyResult call() throws Exception {
    CompleteMultipartUploadResult res;

    try {
        CompleteMultipartUploadRequest req = new CompleteMultipartUploadRequest(
                origReq.getDestinationBucketName(), origReq.getDestinationKey(), uploadId,
                collectPartETags())
                .withRequesterPays(origReq.isRequesterPays())
                .withGeneralProgressListener(origReq.getGeneralProgressListener())
                .withRequestMetricCollector(origReq.getRequestMetricCollector())
                ;
        res = s3.completeMultipartUpload(req);
    } catch (Exception e) {
        publishProgress(listener, ProgressEventType.TRANSFER_FAILED_EVENT);
        throw e;
    }

    CopyResult copyResult = new CopyResult();
    copyResult.setSourceBucketName(origReq.getSourceBucketName());
    copyResult.setSourceKey(origReq.getSourceKey());
    copyResult.setDestinationBucketName(res
            .getBucketName());
    copyResult.setDestinationKey(res.getKey());
    copyResult.setETag(res.getETag());
    copyResult.setVersionId(res.getVersionId());

    monitor.copyComplete();

    return copyResult;
}
项目:ibm-cos-sdk-java    文件:CryptoModuleDispatcher.java   
@Override
public CompleteMultipartUploadResult completeMultipartUploadSecurely(
        CompleteMultipartUploadRequest req)
                throws SdkClientException, AmazonServiceException {
    return defaultCryptoMode == EncryptionOnly 
         ? eo.completeMultipartUploadSecurely(req)
         : ae.completeMultipartUploadSecurely(req)
         ;
}
项目:aliyun-oss-hadoop-fs    文件:S3AFastOutputStream.java   
public void complete(List<PartETag> partETags) {
  if (LOG.isDebugEnabled()) {
    LOG.debug("Completing multi-part upload for key '{}', id '{}'", key,
        uploadId);
  }
  final CompleteMultipartUploadRequest completeRequest =
      new CompleteMultipartUploadRequest(bucket, key, uploadId, partETags);
  client.completeMultipartUpload(completeRequest);

}
项目:stocator    文件:COSAPIClient.java   
/**
 * Complete a multipart upload operation.
 * @param uploadId multipart operation Id
 * @param partETags list of partial uploads
 * @return the result
 * @throws AmazonClientException on problems
 */
CompleteMultipartUploadResult completeMultipartUpload(String uploadId,
    List<PartETag> partETags) throws AmazonClientException {
  LOG.debug("Completing multipart upload {} with {} parts",
      uploadId, partETags.size());
  return mClient.completeMultipartUpload(
      new CompleteMultipartUploadRequest(mBucket,
          key,
          uploadId,
          partETags));
}
项目:big-c    文件:S3AFastOutputStream.java   
public void complete(List<PartETag> partETags) {
  if (LOG.isDebugEnabled()) {
    LOG.debug("Completing multi-part upload for key '{}', id '{}'", key,
        uploadId);
  }
  final CompleteMultipartUploadRequest completeRequest =
      new CompleteMultipartUploadRequest(bucket, key, uploadId, partETags);
  client.completeMultipartUpload(completeRequest);

}
项目:cloudkeeper    文件:S3ConnectionImpl.java   
@Override
public CompletableFuture<CompleteMultipartUploadResult> completeMultipartUpload(String bucketName, String key,
        String uploadId, List<PartETag> partETags) {
    CompleteMultipartUploadRequest request
        = new CompleteMultipartUploadRequest(bucketName, key, uploadId, partETags);
    return CompletableFuture.supplyAsync(() -> s3Client.completeMultipartUpload(request), executorService);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:S3AFastOutputStream.java   
public void complete(List<PartETag> partETags) {
  if (LOG.isDebugEnabled()) {
    LOG.debug("Completing multi-part upload for key '{}', id '{}'", key,
        uploadId);
  }
  final CompleteMultipartUploadRequest completeRequest =
      new CompleteMultipartUploadRequest(bucket, key, uploadId, partETags);
  client.completeMultipartUpload(completeRequest);

}
项目:apex-malhar    文件:S3FileMerger.java   
/**
 * Send the CompleteMultipartUploadRequest to S3 if all the blocks of a file are uploaded into S3.
 * @param keyName file to upload into S3
 */
private void verifyAndEmitFileMerge(String keyName)
{
  if (currentWindowId <= windowDataManager.getLargestCompletedWindow()) {
    return;
  }
  S3InitiateFileUploadOperator.UploadFileMetadata uploadFileMetadata = fileMetadatas.get(keyName);
  List<PartETag> partETags = uploadParts.get(keyName);
  if (partETags == null || uploadFileMetadata == null ||
      uploadFileMetadata.getFileMetadata().getNumberOfBlocks() != partETags.size()) {
    return;
  }

  if (partETags.size() <= 1) {
    uploadedFiles.add(keyName);
    LOG.debug("Uploaded file {} successfully", keyName);
    return;
  }

  CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest(bucketName,
      keyName, uploadFileMetadata.getUploadId(), partETags);
  CompleteMultipartUploadResult result = s3Client.completeMultipartUpload(compRequest);
  if (result.getETag() != null) {
    uploadedFiles.add(keyName);
    LOG.debug("Uploaded file {} successfully", keyName);
  }
}
项目:omakase    文件:S3Client.java   
public void completeMultipartUpload(S3Upload upload, List<S3Part> parts) {
    try {
        runtimeCredentialsProvider.setAwsCredentials(upload.getAwsCredentials());
        amazonS3.setRegion(Region.getRegion(Regions.fromName(upload.getRegion())));
        amazonS3.completeMultipartUpload(new CompleteMultipartUploadRequest(upload.getBucket(), upload.getKey(), upload.getUploadId(),
                                                                            parts.stream().map(s3Part -> new PartETag(s3Part.getNumber(), s3Part.getEtag())).collect(Collectors.toList())));
    } catch (AmazonClientException e) {
        throw new OmakaseRuntimeException(e);
    }
}
项目:aws-codepipeline-plugin-for-jenkins    文件:PublisherToolsTest.java   
@Test
public void uploadFileSuccess() throws IOException {
    TestUtils.initializeTestingFolders();

    final File compressedFile = CompressionTools.compressFile(
            "ZipProject",
            PATH_TO_COMPRESS,
            CompressionType.Zip,
            null);

    PublisherTools.uploadFile(
            compressedFile,
            mockArtifact,
            CompressionType.Zip,
            null, // No custom encryption key
            mockS3Client,
            null); // Listener

    final InOrder inOrder = inOrder(mockS3Client);
    inOrder.verify(mockS3Client, times(1)).initiateMultipartUpload(initiateCaptor.capture());
    // Total size is less than 5MB, should only be one upload
    inOrder.verify(mockS3Client, times(1)).uploadPart(any(UploadPartRequest.class));
    inOrder.verify(mockS3Client, times(1)).completeMultipartUpload(any(CompleteMultipartUploadRequest.class));

    assertContainsIgnoreCase("[AWS CodePipeline Plugin] Uploading artifact:", outContent.toString());
    assertContainsIgnoreCase("[AWS CodePipeline Plugin] Upload successful", outContent.toString());

    final InitiateMultipartUploadRequest request = initiateCaptor.getValue();
    final SSEAwsKeyManagementParams encryptionParams = request.getSSEAwsKeyManagementParams();
    assertNotNull(encryptionParams);
    assertNull(encryptionParams.getAwsKmsKeyId());
    assertEquals("aws:kms", encryptionParams.getEncryption());

    compressedFile.delete();
    TestUtils.cleanUpTestingFolders();
}
项目:Singularity    文件:SingularityS3Uploader.java   
private void multipartUpload(String key, File file, ObjectMetadata objectMetadata, Optional<StorageClass> maybeStorageClass) throws Exception {
  List<PartETag> partETags = new ArrayList<>();
  InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(bucketName, key, objectMetadata);
  if (maybeStorageClass.isPresent()) {
    initRequest.setStorageClass(maybeStorageClass.get());
  }
  InitiateMultipartUploadResult initResponse = s3Client.initiateMultipartUpload(initRequest);

  long contentLength = file.length();
  long partSize = configuration.getUploadPartSize();

  try {
    long filePosition = 0;
    for (int i = 1; filePosition < contentLength; i++) {
      partSize = Math.min(partSize, (contentLength - filePosition));
      UploadPartRequest uploadRequest = new UploadPartRequest()
          .withBucketName(bucketName)
          .withKey(key)
          .withUploadId(initResponse.getUploadId())
          .withPartNumber(i)
          .withFileOffset(filePosition)
          .withFile(file)
          .withPartSize(partSize);
      partETags.add(s3Client.uploadPart(uploadRequest).getPartETag());
      filePosition += partSize;
    }

    CompleteMultipartUploadRequest completeRequest = new CompleteMultipartUploadRequest(bucketName, key, initResponse.getUploadId(), partETags);
    s3Client.completeMultipartUpload(completeRequest);
  } catch (Exception e) {
    s3Client.abortMultipartUpload(new AbortMultipartUploadRequest(bucketName, key, initResponse.getUploadId()));
    Throwables.propagate(e);
  }
}
项目:elasticsearch_my    文件:DefaultS3OutputStream.java   
protected void doCompleteMultipart(S3BlobStore blobStore, String bucketName, String blobName, String uploadId, List<PartETag> parts)
        throws AmazonS3Exception {
    CompleteMultipartUploadRequest request = new CompleteMultipartUploadRequest(bucketName, blobName, uploadId, parts);
    blobStore.client().completeMultipartUpload(request);
}
项目:elasticsearch_my    文件:AmazonS3Wrapper.java   
@Override
public CompleteMultipartUploadResult completeMultipartUpload(CompleteMultipartUploadRequest request) throws AmazonClientException, AmazonServiceException {
    return delegate.completeMultipartUpload(request);
}
项目:syndesis    文件:AmazonS3ClientMock.java   
@Override
public CompleteMultipartUploadResult completeMultipartUpload(CompleteMultipartUploadRequest completeMultipartUploadRequest) throws AmazonClientException, AmazonServiceException {
    throw new UnsupportedOperationException();
}
项目:syndesis    文件:AmazonS3ClientMock.java   
@Override
public CompleteMultipartUploadResult completeMultipartUpload(CompleteMultipartUploadRequest completeMultipartUploadRequest) throws AmazonClientException, AmazonServiceException {
    throw new UnsupportedOperationException();
}
项目:syndesis    文件:AmazonS3ClientMock.java   
@Override
public CompleteMultipartUploadResult completeMultipartUpload(CompleteMultipartUploadRequest completeMultipartUploadRequest) throws AmazonClientException, AmazonServiceException {
    throw new UnsupportedOperationException();
}
项目:connectors    文件:AmazonS3ClientMock.java   
@Override
public CompleteMultipartUploadResult completeMultipartUpload(CompleteMultipartUploadRequest completeMultipartUploadRequest) throws AmazonClientException, AmazonServiceException {
    throw new UnsupportedOperationException();
}
项目:connectors    文件:AmazonS3ClientMock.java   
@Override
public CompleteMultipartUploadResult completeMultipartUpload(CompleteMultipartUploadRequest completeMultipartUploadRequest) throws AmazonClientException, AmazonServiceException {
    throw new UnsupportedOperationException();
}
项目:connectors    文件:AmazonS3ClientMock.java   
@Override
public CompleteMultipartUploadResult completeMultipartUpload(CompleteMultipartUploadRequest completeMultipartUploadRequest) throws AmazonClientException, AmazonServiceException {
    throw new UnsupportedOperationException();
}
项目:ibm-cos-sdk-java    文件:AmazonS3Client.java   
@Override
public CompleteMultipartUploadResult completeMultipartUpload(
        CompleteMultipartUploadRequest completeMultipartUploadRequest)
        throws SdkClientException, AmazonServiceException {
    completeMultipartUploadRequest = beforeClientExecution(completeMultipartUploadRequest);
    rejectNull(completeMultipartUploadRequest,
        "The request parameter must be specified when completing a multipart upload");

    String bucketName = completeMultipartUploadRequest.getBucketName();
    String key = completeMultipartUploadRequest.getKey();
    String uploadId = completeMultipartUploadRequest.getUploadId();
    rejectNull(bucketName,
        "The bucket name parameter must be specified when completing a multipart upload");
    rejectNull(key,
        "The key parameter must be specified when completing a multipart upload");
    rejectNull(uploadId,
        "The upload ID parameter must be specified when completing a multipart upload");
    rejectNull(completeMultipartUploadRequest.getPartETags(),
        "The part ETags parameter must be specified when completing a multipart upload");

    int retries = 0;
    CompleteMultipartUploadHandler handler;
    do {
        Request<CompleteMultipartUploadRequest> request = createRequest(bucketName, key, completeMultipartUploadRequest, HttpMethodName.POST);
        request.addParameter("uploadId", uploadId);

        populateRequesterPaysHeader(request, completeMultipartUploadRequest.isRequesterPays());

        byte[] xml = RequestXmlFactory.convertToXmlByteArray(completeMultipartUploadRequest.getPartETags());
        request.addHeader("Content-Type", "application/xml");
        request.addHeader("Content-Length", String.valueOf(xml.length));

        request.setContent(new ByteArrayInputStream(xml));

        @SuppressWarnings("unchecked")
        ResponseHeaderHandlerChain<CompleteMultipartUploadHandler> responseHandler = new ResponseHeaderHandlerChain<CompleteMultipartUploadHandler>(
                // xml payload unmarshaller
                new Unmarshallers.CompleteMultipartUploadResultUnmarshaller(),
                // header handlers
                new ServerSideEncryptionHeaderHandler<CompleteMultipartUploadHandler>(),
                new ObjectExpirationHeaderHandler<CompleteMultipartUploadHandler>(),
                new S3VersionHeaderHandler<CompleteMultipartUploadHandler>(),
                new S3RequesterChargedHeaderHandler<CompleteMultipartUploadHandler>());
        handler = invoke(request, responseHandler, bucketName, key);
        if (handler.getCompleteMultipartUploadResult() != null) {
            return handler.getCompleteMultipartUploadResult();
        }
    } while (shouldRetryCompleteMultipartUpload(completeMultipartUploadRequest,
            handler.getAmazonS3Exception(), retries++));

    throw handler.getAmazonS3Exception();
}
项目:ibm-cos-sdk-java    文件:S3Direct.java   
public abstract CompleteMultipartUploadResult completeMultipartUpload(
CompleteMultipartUploadRequest req);
项目:ibm-cos-sdk-java    文件:S3CryptoModule.java   
public abstract CompleteMultipartUploadResult completeMultipartUploadSecurely(
CompleteMultipartUploadRequest req);
项目:ibm-cos-sdk-java    文件:S3DirectSpi.java   
public CompleteMultipartUploadResult completeMultipartUpload(
CompleteMultipartUploadRequest req);
项目:ibm-cos-sdk-java    文件:AmazonS3EncryptionClient.java   
@Override
public CompleteMultipartUploadResult completeMultipartUpload(
        CompleteMultipartUploadRequest req) {
    return crypto.completeMultipartUploadSecurely(req);
}
项目:ibm-cos-sdk-java    文件:AmazonS3EncryptionClient.java   
@Override
public CompleteMultipartUploadResult completeMultipartUpload(
        CompleteMultipartUploadRequest req) {
    return AmazonS3EncryptionClient.super.completeMultipartUpload(req);
}
项目:S3Decorators    文件:S3Decorator.java   
@Override
public CompleteMultipartUploadResult completeMultipartUpload(CompleteMultipartUploadRequest request) {
  return call(() -> getDelegate().completeMultipartUpload(request));
}
项目:nifi-minifi    文件:S3OutputStream.java   
public void complete() {
  log.debug("Completing multi-part upload for key '{}', id '{}'", key, uploadId);
  CompleteMultipartUploadRequest completeRequest =
      new CompleteMultipartUploadRequest(bucket, key, uploadId, partETags);
  s3.completeMultipartUpload(completeRequest);
}
项目:backuprotator    文件:AWSHandler.java   
private CompleteMultipartUploadResult copyMultipartFile(Bucket srcBucket, Bucket targetBucket, String fileName,
        long size) {
    // Create lists to hold copy responses
    List<CopyPartResult> copyResponses =
            new ArrayList<CopyPartResult>();

    // Step 2: Initialize
    InitiateMultipartUploadRequest initiateRequest = 
            new InitiateMultipartUploadRequest(targetBucket.getName(), targetBucket.getPath() + SEPARATOR + fileName);

    InitiateMultipartUploadResult initResult = 
            s3Client.initiateMultipartUpload(initiateRequest);

     // Step 4. Copy parts.
    long partSize = 5 * (long)Math.pow(2.0, 20.0); // 5 MB
    long bytePosition = 0;
    for (int i = 1; bytePosition < size; i++)
    {
        // Step 5. Save copy response.
        CopyPartRequest copyRequest = new CopyPartRequest()
           .withDestinationBucketName(targetBucket.getName())
           .withDestinationKey(targetBucket.getPath() + SEPARATOR + fileName)
           .withSourceBucketName(srcBucket.getName())
           .withSourceKey(srcBucket.getPath() + SEPARATOR + fileName)
           .withUploadId(initResult.getUploadId())
           .withFirstByte(bytePosition)
           .withLastByte(bytePosition + partSize -1 >= size ? size - 1 : bytePosition + partSize - 1) 
           .withPartNumber(i);

        copyResponses.add(s3Client.copyPart(copyRequest));
        bytePosition += partSize;
    }
    CompleteMultipartUploadRequest completeRequest = new 
            CompleteMultipartUploadRequest(
                    targetBucket.getName(),
                    targetBucket.getPath() + SEPARATOR + fileName,
                    initResult.getUploadId(),
                    GetETags(copyResponses));
    // Step 7. Complete copy operation.
    CompleteMultipartUploadResult completeUploadResponse =
        s3Client.completeMultipartUpload(completeRequest);
    return completeUploadResponse;
}
项目:bender    文件:MultiPartUpload.java   
public CompleteMultipartUploadRequest getCompleteMultipartUploadRequest() {
  return new CompleteMultipartUploadRequest(this.bucketName, this.key, this.uploadId,
      this.partETags);
}
项目:kafka-connect-storage-cloud    文件:S3OutputStream.java   
public void complete() {
  log.debug("Completing multi-part upload for key '{}', id '{}'", key, uploadId);
  CompleteMultipartUploadRequest completeRequest =
      new CompleteMultipartUploadRequest(bucket, key, uploadId, partETags);
  s3.completeMultipartUpload(completeRequest);
}
项目:Camel    文件:AmazonS3ClientMock.java   
@Override
public CompleteMultipartUploadResult completeMultipartUpload(CompleteMultipartUploadRequest completeMultipartUploadRequest) throws AmazonClientException, AmazonServiceException {
    throw new UnsupportedOperationException();
}
项目:apex-malhar    文件:S3OutputModuleMockTest.java   
@Test
public void testS3OutputModule() throws Exception
{
  InitiateMultipartUploadResult result = new InitiateMultipartUploadResult();
  result.setUploadId(uploadId);

  PutObjectResult objResult = new PutObjectResult();
  objResult.setETag("SuccessFullyUploaded");

  UploadPartResult partResult = new UploadPartResult();
  partResult.setPartNumber(1);
  partResult.setETag("SuccessFullyPartUploaded");

  MockitoAnnotations.initMocks(this);
  when(client.initiateMultipartUpload(any(InitiateMultipartUploadRequest.class))).thenReturn(result);
  when(client.putObject(any(PutObjectRequest.class))).thenReturn(objResult);
  when(client.uploadPart(any(UploadPartRequest.class))).thenReturn(partResult);
  when(client.completeMultipartUpload(any(CompleteMultipartUploadRequest.class))).thenReturn(completeMultiPart());

  Application app = new S3OutputModuleMockTest.Application();
  Configuration conf = new Configuration();
  conf.set("dt.operator.HDFSInputModule.prop.files", inputDir);
  conf.set("dt.operator.HDFSInputModule.prop.blockSize", "10");
  conf.set("dt.operator.HDFSInputModule.prop.blocksThreshold", "1");
  conf.set("dt.attr.CHECKPOINT_WINDOW_COUNT","20");

  conf.set("dt.operator.S3OutputModule.prop.accessKey", "accessKey");
  conf.set("dt.operator.S3OutputModule.prop.secretAccessKey", "secretKey");
  conf.set("dt.operator.S3OutputModule.prop.bucketName", "bucketKey");
  conf.set("dt.operator.S3OutputModule.prop.outputDirectoryPath", outputDir);

  Path outDir = new Path("file://" + new File(outputDir).getAbsolutePath());
  final Path outputFilePath =  new Path(outDir.toString() + File.separator + FILE);
  final FileSystem fs = FileSystem.newInstance(outDir.toUri(), new Configuration());
  LocalMode lma = LocalMode.newInstance();
  lma.prepareDAG(app, conf);
  LocalMode.Controller lc = lma.getController();
  lc.setHeartbeatMonitoringEnabled(true);

  ((StramLocalCluster)lc).setExitCondition(new Callable<Boolean>()
  {
    @Override
    public Boolean call() throws Exception
    {
      return fs.exists(outputFilePath);
    }
  });
  lc.run(10000);

  Assert.assertTrue("output file exist", fs.exists(outputFilePath));
}
项目:aws-codepipeline-plugin-for-jenkins    文件:PublisherTools.java   
public static void uploadFile(
        final File file,
        final Artifact artifact,
        final CompressionType compressionType,
        final EncryptionKey encryptionKey,
        final AmazonS3 amazonS3,
        final BuildListener listener) throws IOException {

    LoggingHelper.log(listener, "Uploading artifact: " + artifact + ", file: " + file);

    final String bucketName = artifact.getLocation().getS3Location().getBucketName();
    final String objectKey  = artifact.getLocation().getS3Location().getObjectKey();
    final List<PartETag> partETags = new ArrayList<>();

    final InitiateMultipartUploadRequest initiateMultipartUploadRequest = new InitiateMultipartUploadRequest(
            bucketName,
            objectKey,
            createObjectMetadata(compressionType))
        .withSSEAwsKeyManagementParams(toSSEAwsKeyManagementParams(encryptionKey));

    final InitiateMultipartUploadResult initiateMultipartUploadResult
            = amazonS3.initiateMultipartUpload(initiateMultipartUploadRequest);

    final long contentLength = file.length();
    long filePosition = 0;
    long partSize = 5 * 1024 * 1024; // Set part size to 5 MB

    for (int i = 1; filePosition < contentLength; i++) {
        partSize = Math.min(partSize, (contentLength - filePosition));

        final UploadPartRequest uploadPartRequest = new UploadPartRequest()
                .withBucketName(bucketName)
                .withKey(objectKey)
                .withUploadId(initiateMultipartUploadResult.getUploadId())
                .withPartNumber(i)
                .withFileOffset(filePosition)
                .withFile(file)
                .withPartSize(partSize);

        partETags.add(amazonS3.uploadPart(uploadPartRequest).getPartETag());

        filePosition += partSize;
    }

    final CompleteMultipartUploadRequest completeMultipartUpload
            = new CompleteMultipartUploadRequest(
                bucketName,
                objectKey,
                initiateMultipartUploadResult.getUploadId(),
                partETags);

    amazonS3.completeMultipartUpload(completeMultipartUpload);

    LoggingHelper.log(listener, "Upload successful");
}
项目:presto    文件:MockAmazonS3.java   
@Override
public CompleteMultipartUploadResult completeMultipartUpload(CompleteMultipartUploadRequest request)
        throws AmazonClientException
{
    return null;
}
项目:datacollector    文件:SupportBundleManager.java   
/**
 * Instead of providing support bundle directly to user, upload it to StreamSets backend services.
 */
public void uploadNewBundle(List<String> generators) throws IOException {
  boolean enabled = configuration.get(Constants.UPLOAD_ENABLED, Constants.DEFAULT_UPLOAD_ENABLED);
  String accessKey = configuration.get(Constants.UPLOAD_ACCESS, Constants.DEFAULT_UPLOAD_ACCESS);
  String secretKey = configuration.get(Constants.UPLOAD_SECRET, Constants.DEFAULT_UPLOAD_SECRET);
  String bucket = configuration.get(Constants.UPLOAD_BUCKET, Constants.DEFAULT_UPLOAD_BUCKET);
  int bufferSize = configuration.get(Constants.UPLOAD_BUFFER_SIZE, Constants.DEFAULT_UPLOAD_BUFFER_SIZE);

  if(!enabled) {
    throw new IOException("Uploading support bundles was disabled by administrator.");
  }

  AWSCredentialsProvider credentialsProvider = new StaticCredentialsProvider(new BasicAWSCredentials(accessKey, secretKey));
  AmazonS3Client s3Client = new AmazonS3Client(credentialsProvider, new ClientConfiguration());
  s3Client.setS3ClientOptions(new S3ClientOptions().withPathStyleAccess(true));
  s3Client.setRegion(Region.getRegion(Regions.US_WEST_2));

  // Object Metadata
  ObjectMetadata metadata = new ObjectMetadata();
  for(Map.Entry<Object, Object> entry: getMetadata().entrySet()) {
    metadata.addUserMetadata((String)entry.getKey(), (String)entry.getValue());
  }

  // Generate bundle
  SupportBundle bundle = generateNewBundle(generators);

  // Uploading part by part
  LOG.info("Initiating multi-part support bundle upload");
  List<PartETag> partETags = new ArrayList<>();
  InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(bucket, bundle.getBundleKey());
  initRequest.setObjectMetadata(metadata);
  InitiateMultipartUploadResult initResponse = s3Client.initiateMultipartUpload(initRequest);

  try {
    byte[] buffer = new byte[bufferSize];
    int partId = 1;
    int size = -1;
    while ((size = readFully(bundle.getInputStream(), buffer)) != -1) {
      LOG.debug("Uploading part {} of size {}", partId, size);
      UploadPartRequest uploadRequest = new UploadPartRequest()
        .withBucketName(bucket)
        .withKey(bundle.getBundleKey())
        .withUploadId(initResponse.getUploadId())
        .withPartNumber(partId++)
        .withInputStream(new ByteArrayInputStream(buffer))
        .withPartSize(size);

      partETags.add(s3Client.uploadPart(uploadRequest).getPartETag());
    }

    CompleteMultipartUploadRequest compRequest = new CompleteMultipartUploadRequest(
      bucket,
      bundle.getBundleKey(),
      initResponse.getUploadId(),
      partETags
    );

    s3Client.completeMultipartUpload(compRequest);
    LOG.info("Support bundle upload finished");
  } catch (Exception e) {
    LOG.error("Support bundle upload failed", e);
    s3Client.abortMultipartUpload(new AbortMultipartUploadRequest(
      bucket,
      bundle.getBundleKey(),
      initResponse.getUploadId())
    );

    throw new IOException("Can't upload support bundle", e);
  } finally {
    // Close the client
    s3Client.shutdown();
  }
}