Java 类com.amazonaws.services.s3.model.StorageClass 实例源码

项目:elasticsearch_my    文件:S3BlobStore.java   
public static StorageClass initStorageClass(String storageClass) {
    if (storageClass == null || storageClass.equals("")) {
        return StorageClass.Standard;
    }

    try {
        StorageClass _storageClass = StorageClass.fromValue(storageClass.toUpperCase(Locale.ENGLISH));
        if (_storageClass.equals(StorageClass.Glacier)) {
            throw new BlobStoreException("Glacier storage class is not supported");
        }

        return _storageClass;
    } catch (IllegalArgumentException illegalArgumentException) {
        throw new BlobStoreException("`" + storageClass + "` is not a valid S3 Storage Class.");
    }
}
项目:gradle-s3-build-cache    文件:AwsS3BuildCacheService.java   
@Override
public void store(BuildCacheKey key, BuildCacheEntryWriter writer) {
  logger.info("Start storing cache entry '{}' in S3 bucket", key.getHashCode());
  ObjectMetadata meta = new ObjectMetadata();
  meta.setContentType(BUILD_CACHE_CONTENT_TYPE);

  try (ByteArrayOutputStream os = new ByteArrayOutputStream()) {
    writer.writeTo(os);
    meta.setContentLength(os.size());
    try (InputStream is = new ByteArrayInputStream(os.toByteArray())) {
        PutObjectRequest request = getPutObjectRequest(key, meta, is);
        if(this.reducedRedundancy) {
          request.withStorageClass(StorageClass.ReducedRedundancy);
        }
        s3.putObject(request);
    }
  } catch (IOException e) {
    throw new BuildCacheException("Error while storing cache object in S3 bucket", e);
  }
}
项目:circus-train    文件:S3MapReduceCpOptionsParserTest.java   
@Before
public void init() {
  copierOptions.put(CREDENTIAL_PROVIDER, URI.create("localjceks://file/foo/bar.jceks"));
  copierOptions.put(MULTIPART_UPLOAD_CHUNK_SIZE, 4096);
  copierOptions.put(S3_SERVER_SIDE_ENCRYPTION, true);
  copierOptions.put(STORAGE_CLASS, StorageClass.Glacier.toString());
  copierOptions.put(TASK_BANDWIDTH, 1024);
  copierOptions.put(NUMBER_OF_WORKERS_PER_MAP, 12);
  copierOptions.put(MULTIPART_UPLOAD_THRESHOLD, 2048L);
  copierOptions.put(MAX_MAPS, 5);
  copierOptions.put(COPY_STRATEGY, "mycopystrategy");
  copierOptions.put(LOG_PATH, new Path("hdfs:///tmp/logs"));
  copierOptions.put(REGION, Regions.EU_WEST_1.getName());
  copierOptions.put(IGNORE_FAILURES, false);
  copierOptions.put(S3_ENDPOINT_URI, "http://s3.endpoint/");
  copierOptions.put(UPLOAD_RETRY_COUNT, 5);
  copierOptions.put(UPLOAD_RETRY_DELAY_MS, 520);
  copierOptions.put(UPLOAD_BUFFER_SIZE, 1024);
  parser = new S3MapReduceCpOptionsParser(SOURCES, TARGET, DEFAULT_CREDS_PROVIDER);
}
项目:circus-train    文件:S3MapReduceCpOptionsParserTest.java   
private void assertDefaults(S3MapReduceCpOptions options) {
  assertThat(options.getCredentialsProvider(), is(URI.create("localjceks://file/foo/bar.jceks")));
  assertThat(options.getMultipartUploadPartSize(), is(4096L));
  assertThat(options.isS3ServerSideEncryption(), is(true));
  assertThat(options.getStorageClass(), is(StorageClass.Glacier.toString()));
  assertThat(options.getMaxBandwidth(), is(1024L));
  assertThat(options.getNumberOfUploadWorkers(), is(12));
  assertThat(options.getMultipartUploadThreshold(), is(2048L));
  assertThat(options.getMaxMaps(), is(5));
  assertThat(options.getCopyStrategy(), is("mycopystrategy"));
  assertThat(options.getLogPath(), is(new Path("hdfs:///tmp/logs")));
  assertThat(options.getRegion(), is(Regions.EU_WEST_1.getName()));
  assertThat(options.isIgnoreFailures(), is(false));
  assertThat(options.getS3EndpointUri(), is(URI.create("http://s3.endpoint/")));
  assertThat(options.getUploadRetryCount(), is(5));
  assertThat(options.getUploadRetryDelayMs(), is(520L));
  assertThat(options.getUploadBufferSize(), is(1024));
}
项目:circus-train    文件:S3MapReduceCpOptionsTest.java   
@Test
public void defaultValues() {
  S3MapReduceCpOptions options = new S3MapReduceCpOptions();
  assertThat(options.isHelp(), is(false));
  assertThat(options.isBlocking(), is(true));
  assertThat(options.getSources(), is(nullValue()));
  assertThat(options.getTarget(), is(nullValue()));
  assertThat(options.getCredentialsProvider(), is(nullValue()));
  assertThat(options.getMultipartUploadPartSize(), is(5L * 1024 * 1024));
  assertThat(options.isS3ServerSideEncryption(), is(false));
  assertThat(options.getStorageClass(), is(StorageClass.Standard.toString()));
  assertThat(options.getMaxBandwidth(), is(100L));
  assertThat(options.getNumberOfUploadWorkers(), is(20));
  assertThat(options.getMultipartUploadThreshold(), is(16L * 1024 * 1024));
  assertThat(options.getMaxMaps(), is(20));
  assertThat(options.getCopyStrategy(), is("uniformsize"));
  assertThat(options.getLogPath(), is(nullValue()));
  assertThat(options.getRegion(), is(nullValue()));
  assertThat(options.isIgnoreFailures(), is(false));
  assertThat(options.getS3EndpointUri(), is(nullValue()));
  assertThat(options.getUploadRetryCount(), is(3));
  assertThat(options.getUploadRetryDelayMs(), is(300L));
  assertThat(options.getUploadBufferSize(), is(0));
}
项目:herd    文件:S3DaoTest.java   
@Test
public void testRestoreObjects()
{
    // Put a 1 byte Glacier storage class file in S3.
    ObjectMetadata metadata = new ObjectMetadata();
    metadata.setHeader(Headers.STORAGE_CLASS, StorageClass.Glacier);
    metadata.setOngoingRestore(false);
    s3Operations
        .putObject(new PutObjectRequest(storageDaoTestHelper.getS3ManagedBucketName(), TARGET_S3_KEY, new ByteArrayInputStream(new byte[1]), metadata),
            null);

    // Initiate a restore request for the test S3 file.
    S3FileTransferRequestParamsDto params = new S3FileTransferRequestParamsDto();
    params.setS3BucketName(storageDaoTestHelper.getS3ManagedBucketName());
    params.setFiles(Arrays.asList(new File(TARGET_S3_KEY)));
    s3Dao.restoreObjects(params, S3_RESTORE_OBJECT_EXPIRATION_IN_DAYS);

    // Validate that there is an ongoing restore request for this object.
    ObjectMetadata objectMetadata = s3Operations.getObjectMetadata(storageDaoTestHelper.getS3ManagedBucketName(), TARGET_S3_KEY, null);
    assertTrue(objectMetadata.getOngoingRestore());
}
项目:herd    文件:S3DaoTest.java   
@Test
public void testRestoreObjectsGlacierObjectAlreadyBeingRestored()
{
    // Put a 1 byte Glacier storage class file in S3 flagged as already being restored.
    ObjectMetadata metadata = new ObjectMetadata();
    metadata.setHeader(Headers.STORAGE_CLASS, StorageClass.Glacier);
    metadata.setOngoingRestore(true);
    s3Operations
        .putObject(new PutObjectRequest(storageDaoTestHelper.getS3ManagedBucketName(), TARGET_S3_KEY, new ByteArrayInputStream(new byte[1]), metadata),
            null);

    // Initiate a restore request for the test S3 file.
    S3FileTransferRequestParamsDto params = new S3FileTransferRequestParamsDto();
    params.setS3BucketName(storageDaoTestHelper.getS3ManagedBucketName());
    params.setFiles(Arrays.asList(new File(TARGET_S3_KEY)));
    s3Dao.restoreObjects(params, S3_RESTORE_OBJECT_EXPIRATION_IN_DAYS);

    // Validate that there is still an ongoing restore request for this object.
    ObjectMetadata objectMetadata = s3Operations.getObjectMetadata(storageDaoTestHelper.getS3ManagedBucketName(), TARGET_S3_KEY, null);
    assertTrue(objectMetadata.getOngoingRestore());
}
项目:herd    文件:S3DaoTest.java   
@Test
public void testValidateGlacierS3FilesRestored()
{
    // Put a 1 byte already restored Glacier storage class file in S3.
    ObjectMetadata metadata = new ObjectMetadata();
    metadata.setHeader(Headers.STORAGE_CLASS, StorageClass.Glacier);
    metadata.setOngoingRestore(false);
    s3Operations
        .putObject(new PutObjectRequest(storageDaoTestHelper.getS3ManagedBucketName(), TARGET_S3_KEY, new ByteArrayInputStream(new byte[1]), metadata),
            null);

    // Validate the file.
    S3FileTransferRequestParamsDto params = new S3FileTransferRequestParamsDto();
    params.setS3BucketName(storageDaoTestHelper.getS3ManagedBucketName());
    params.setFiles(Arrays.asList(new File(TARGET_S3_KEY)));
    s3Dao.validateGlacierS3FilesRestored(params);
}
项目:herd    文件:S3DaoTest.java   
@Test
public void testValidateGlacierS3FilesRestoredGlacierObjectRestoreNotInitiated()
{
    // Put a 1 byte Glacier storage class file in S3 that has no restore initiated (OngoingRestore flag is null).
    ObjectMetadata metadata = new ObjectMetadata();
    metadata.setHeader(Headers.STORAGE_CLASS, StorageClass.Glacier);
    s3Operations
        .putObject(new PutObjectRequest(storageDaoTestHelper.getS3ManagedBucketName(), TARGET_S3_KEY, new ByteArrayInputStream(new byte[1]), metadata),
            null);

    // Try to validate if the Glacier S3 file is already restored.
    try
    {
        S3FileTransferRequestParamsDto params = new S3FileTransferRequestParamsDto();
        params.setS3BucketName(storageDaoTestHelper.getS3ManagedBucketName());
        params.setFiles(Arrays.asList(new File(TARGET_S3_KEY)));
        s3Dao.validateGlacierS3FilesRestored(params);
        fail("Should throw an IllegalArgumentException when Glacier S3 file is not restored.");
    }
    catch (IllegalArgumentException e)
    {
        assertEquals(String
            .format("Archived Glacier S3 file \"%s\" is not restored. StorageClass {GLACIER}, OngoingRestore flag {null}, S3 bucket name {%s}",
                TARGET_S3_KEY, storageDaoTestHelper.getS3ManagedBucketName()), e.getMessage());
    }
}
项目:vs.msc.ws14    文件:S3DataOutputStream.java   
private String initiateMultipartUpload() throws IOException {

        boolean operationSuccessful = false;
        final InitiateMultipartUploadRequest request = new InitiateMultipartUploadRequest(this.bucket, this.object);
        if (this.useRRS) {
            request.setStorageClass(StorageClass.ReducedRedundancy);
        } else {
            request.setStorageClass(StorageClass.Standard);
        }

        try {

            final InitiateMultipartUploadResult result = this.s3Client.initiateMultipartUpload(request);
            operationSuccessful = true;
            return result.getUploadId();

        } catch (AmazonServiceException e) {
            throw new IOException(StringUtils.stringifyException(e));
        } finally {
            if (!operationSuccessful) {
                abortUpload();
            }
        }
    }
项目:ivy-s3-resolver    文件:S3Resolver.java   
public S3Resolver(
  String name,
  AWSCredentialsProvider credentialsProvider,
  boolean overwrite,
  Region region,
  Optional<CannedAccessControlList> acl,
  boolean serverSideEncryption,
  StorageClass storageClass
) {
  setName(name);
  setRepository(new S3Repository(
    credentialsProvider,
    overwrite,
    region,
    acl,
    serverSideEncryption,
    storageClass
  ));
}
项目:ivy-s3-resolver    文件:S3Repository.java   
public S3Repository(
  AWSCredentialsProvider provider,
  boolean overwrite,
  Region region,
  CannedAccessControlList acl,
  boolean serverSideEncryption,
  StorageClass storageClass
) {
  this(
    AmazonS3Client.builder().standard()
      .withCredentials(provider)
      .withRegion(region.toString())
      .build(),
    overwrite,
    Optional.ofNullable(acl),
    serverSideEncryption,
    storageClass
  );
}
项目:ivy-s3-resolver    文件:S3Repository.java   
public S3Repository(
  AWSCredentialsProvider provider,
  boolean overwrite,
  Region region,
  Optional<CannedAccessControlList> acl,
  boolean serverSideEncryption,
  StorageClass storageClass
) {
  this(
    AmazonS3Client.builder().standard()
      .withCredentials(provider)
      .withRegion(region.toString())
      .build(),
    overwrite,
    acl,
    serverSideEncryption,
    storageClass
  );
}
项目:elasticsearch_my    文件:S3BlobStoreTests.java   
public void testInitStorageClass() throws IOException {
    // it should default to `standard`
    assertThat(S3BlobStore.initStorageClass(null), equalTo(StorageClass.Standard));
    assertThat(S3BlobStore.initStorageClass(""), equalTo(StorageClass.Standard));

    // it should accept [standard, standard_ia, reduced_redundancy]
    assertThat(S3BlobStore.initStorageClass("standard"), equalTo(StorageClass.Standard));
    assertThat(S3BlobStore.initStorageClass("standard_ia"), equalTo(StorageClass.StandardInfrequentAccess));
    assertThat(S3BlobStore.initStorageClass("reduced_redundancy"), equalTo(StorageClass.ReducedRedundancy));
}
项目:gradle-s3-build-cache    文件:AwsS3BuildCacheServiceTest.java   
@Test
public void storePutsObjectAndUsesReducedRedundancyWhenConfigured() throws IOException {
  /** Setup **/
  buildCacheService = spy(new AwsS3BuildCacheService(s3, "bucketName", true));
  doReturn(putObjectRequest).when(buildCacheService).getPutObjectRequest(any(BuildCacheKey.class),
          any(ObjectMetadata.class), any(InputStream.class));

  /** Run **/
  buildCacheService.store(key, writer);

  /** Check **/
  verifyThatStoreStores();
  verify(putObjectRequest).withStorageClass(eq(StorageClass.ReducedRedundancy));
}
项目:gradle-s3-build-cache    文件:AwsS3BuildCacheServiceTest.java   
@Test
public void storePutsObjectAndDoesNotUseReducedRedundancyWhenConfigured() throws IOException {
  /** Setup **/
  buildCacheService = spy(new AwsS3BuildCacheService(s3, "bucketName", false));
  doReturn(putObjectRequest).when(buildCacheService).getPutObjectRequest(any(BuildCacheKey.class),
          any(ObjectMetadata.class), any(InputStream.class));

  /** Run **/
  buildCacheService.store(key, writer);

  /** Check **/
  verifyThatStoreStores();
  verify(putObjectRequest, never()).withStorageClass(eq(StorageClass.ReducedRedundancy));
}
项目:circus-train    文件:S3MapReduceCpCopierTest.java   
@Test
public void overwriteAllCopierOptions() throws Exception {
  when(copierOptions.get(CREDENTIAL_PROVIDER)).thenReturn("jceks://hdfs/foo/bar.jceks");
  when(copierOptions.get(MULTIPART_UPLOAD_CHUNK_SIZE)).thenReturn("1234");
  when(copierOptions.get(S3_SERVER_SIDE_ENCRYPTION)).thenReturn("true");
  when(copierOptions.get(STORAGE_CLASS)).thenReturn("reduced_redundancy");
  when(copierOptions.get(TASK_BANDWIDTH)).thenReturn("567");
  when(copierOptions.get(NUMBER_OF_WORKERS_PER_MAP)).thenReturn("89");
  when(copierOptions.get(MULTIPART_UPLOAD_THRESHOLD)).thenReturn("123456");
  when(copierOptions.get(MAX_MAPS)).thenReturn("78");
  when(copierOptions.get(COPY_STRATEGY)).thenReturn("the-strategy");
  when(copierOptions.get(LOG_PATH)).thenReturn("hdfs://path/to/logs/");
  when(copierOptions.get(REGION)).thenReturn("us-east-1");
  when(copierOptions.get(IGNORE_FAILURES)).thenReturn("true");

  S3MapReduceCpCopier copier = new S3MapReduceCpCopier(conf, sourceDataBaseLocation, Collections.<Path> emptyList(),
      replicaDataLocation, copierOptions, executor, metricRegistry);
  Metrics metrics = copier.copy();
  assertThat(metrics, not(nullValue()));

  verify(executor).exec(confCaptor.capture(), optionsCaptor.capture());

  S3MapReduceCpOptions options = optionsCaptor.getValue();
  assertThat(options.getSources(), is(Arrays.asList(sourceDataBaseLocation)));
  assertThat(options.getTarget(), is(replicaDataLocation.toUri()));
  assertThat(options.getCredentialsProvider(), is(URI.create("jceks://hdfs/foo/bar.jceks")));
  assertThat(options.getMultipartUploadPartSize(), is(1234L));
  assertThat(options.isS3ServerSideEncryption(), is(true));
  assertThat(options.getStorageClass(), is(StorageClass.ReducedRedundancy.toString()));
  assertThat(options.getMaxBandwidth(), is(567L));
  assertThat(options.getNumberOfUploadWorkers(), is(89));
  assertThat(options.getMultipartUploadThreshold(), is(123456L));
  assertThat(options.getMaxMaps(), is(78));
  assertThat(options.getCopyStrategy(), is("the-strategy"));
  assertThat(options.getLogPath(), is(new Path("hdfs://path/to/logs/")));
  assertThat(options.getRegion(), is(Regions.US_EAST_1.getName()));
  assertThat(options.isIgnoreFailures(), is(true));
}
项目:circus-train    文件:S3MapReduceCpOptionsTest.java   
@Test
public void builderWithStorageClass() {
  S3MapReduceCpOptions options = S3MapReduceCpOptions
      .builder(SOURCES, TARGET)
      .storageClass(StorageClass.Glacier.toString())
      .build();
  assertThat(options.isHelp(), is(false));
  assertThat(options.isBlocking(), is(true));
  assertThat(options.getSources(), is(SOURCES));
  assertThat(options.getTarget(), is(TARGET));
  assertThat(options.getCredentialsProvider(), is(ConfigurationVariable.CREDENTIAL_PROVIDER.defaultURIValue()));
  assertThat(options.getMultipartUploadPartSize(),
      is(ConfigurationVariable.MINIMUM_UPLOAD_PART_SIZE.defaultLongValue()));
  assertThat(options.isS3ServerSideEncryption(),
      is(ConfigurationVariable.S3_SERVER_SIDE_ENCRYPTION.defaultBooleanValue()));
  assertThat(options.getStorageClass(), is(StorageClass.Glacier.toString()));
  assertThat(options.getMaxBandwidth(), is(ConfigurationVariable.MAX_BANDWIDTH.defaultLongValue()));
  assertThat(options.getNumberOfUploadWorkers(),
      is(ConfigurationVariable.NUMBER_OF_UPLOAD_WORKERS.defaultIntValue()));
  assertThat(options.getMultipartUploadThreshold(),
      is(ConfigurationVariable.MULTIPART_UPLOAD_THRESHOLD.defaultLongValue()));
  assertThat(options.getMaxMaps(), is(ConfigurationVariable.MAX_MAPS.defaultIntValue()));
  assertThat(options.getCopyStrategy(), is(ConfigurationVariable.COPY_STRATEGY.defaultValue()));
  assertThat(options.getLogPath(), is(nullValue()));
  assertThat(options.getRegion(), is(ConfigurationVariable.REGION.defaultValue()));
  assertThat(options.isIgnoreFailures(), is(ConfigurationVariable.IGNORE_FAILURES.defaultBooleanValue()));
  assertThat(options.getS3EndpointUri(), is(ConfigurationVariable.S3_ENDPOINT_URI.defaultURIValue()));
  assertThat(options.getUploadRetryCount(), is(ConfigurationVariable.UPLOAD_RETRY_COUNT.defaultIntValue()));
  assertThat(options.getUploadRetryDelayMs(), is(ConfigurationVariable.UPLOAD_RETRY_DELAY_MS.defaultLongValue()));
  assertThat(options.getUploadBufferSize(), is(ConfigurationVariable.UPLOAD_BUFFER_SIZE.defaultIntValue()));
}
项目:ibm-cos-sdk-java    文件:AmazonS3Client.java   
@Override
public void changeObjectStorageClass(String bucketName, String key, StorageClass newStorageClass)
    throws SdkClientException, AmazonServiceException {
    rejectNull(bucketName,
        "The bucketName parameter must be specified when changing an object's storage class");
    rejectNull(key,
        "The key parameter must be specified when changing an object's storage class");
    rejectNull(newStorageClass,
        "The newStorageClass parameter must be specified when changing an object's storage class");

    copyObject(new CopyObjectRequest(bucketName, key, bucketName, key)
        .withStorageClass(newStorageClass.toString()));
}
项目:backuprotator    文件:AWSHandler.java   
private boolean isAccessibleStorageClass(ObjectMetadata metadata) {
    boolean accessible = false;
    String storageClass = metadata.getStorageClass();
    if (storageClass == null || StorageClass.Standard.equals(StorageClass.fromValue(storageClass))) {
        accessible = true;
    }
    return accessible;
}
项目:herd    文件:ExpireRestoredBusinessObjectDataHelperServiceImpl.java   
/**
 * Executes S3 specific steps required to expire business object data.
 *
 * @param businessObjectDataRestoreDto the DTO that holds various parameters needed to expire business object data
 */
protected void executeS3SpecificStepsImpl(BusinessObjectDataRestoreDto businessObjectDataRestoreDto)
{
    // Create an S3 file transfer parameters DTO to access the S3 bucket.
    // Since the S3 key prefix represents a directory, we add a trailing '/' character to it.
    S3FileTransferRequestParamsDto s3FileTransferRequestParamsDto = storageHelper.getS3FileTransferRequestParamsDto();
    s3FileTransferRequestParamsDto.setS3Endpoint(businessObjectDataRestoreDto.getS3Endpoint());
    s3FileTransferRequestParamsDto.setS3BucketName(businessObjectDataRestoreDto.getS3BucketName());
    s3FileTransferRequestParamsDto.setS3KeyPrefix(StringUtils.appendIfMissing(businessObjectDataRestoreDto.getS3KeyPrefix(), "/"));

    // Get a list of S3 files matching the S3 key prefix. When listing S3 files, we ignore 0 byte objects that represent S3 directories.
    List<S3ObjectSummary> actualS3Files = s3Service.listDirectory(s3FileTransferRequestParamsDto, true);

    // Validate existence and file size of the S3 files.
    storageFileHelper
        .validateRegisteredS3Files(businessObjectDataRestoreDto.getStorageFiles(), actualS3Files, businessObjectDataRestoreDto.getStorageName(),
            businessObjectDataRestoreDto.getBusinessObjectDataKey());

    // Build a list of files to restore by selection only objects that have Glacier storage class.
    List<S3ObjectSummary> glacierS3Files = new ArrayList<>();
    for (S3ObjectSummary s3ObjectSummary : actualS3Files)
    {
        if (StorageClass.Glacier.toString().equals(s3ObjectSummary.getStorageClass()))
        {
            glacierS3Files.add(s3ObjectSummary);
        }
    }

    // Set a list of files to expire.
    s3FileTransferRequestParamsDto.setFiles(storageFileHelper.getFiles(storageFileHelper.createStorageFilesFromS3ObjectSummaries(glacierS3Files)));

    // To expire the restored S3 objects, initiate restore requests with expiration set to 1 day.
    s3Service.restoreObjects(s3FileTransferRequestParamsDto, 1);
}
项目:herd    文件:BusinessObjectDataFinalizeRestoreHelperServiceImpl.java   
/**
 * Executes S3 specific steps for the business object data finalize restore.
 *
 * @param businessObjectDataRestoreDto the DTO that holds various parameters needed to perform a business object data restore
 */
protected void executeS3SpecificStepsImpl(BusinessObjectDataRestoreDto businessObjectDataRestoreDto)
{
    // Create an S3 file transfer parameters DTO to access the S3 bucket.
    // Since the S3 key prefix represents a directory, we add a trailing '/' character to it.
    S3FileTransferRequestParamsDto s3FileTransferRequestParamsDto = storageHelper.getS3FileTransferRequestParamsDto();
    s3FileTransferRequestParamsDto.setS3BucketName(businessObjectDataRestoreDto.getS3BucketName());
    s3FileTransferRequestParamsDto.setS3Endpoint(businessObjectDataRestoreDto.getS3Endpoint());
    s3FileTransferRequestParamsDto.setS3KeyPrefix(StringUtils.appendIfMissing(businessObjectDataRestoreDto.getS3KeyPrefix(), "/"));

    // Get actual S3 files by selecting all S3 keys matching the S3 key prefix form the S3 bucket.
    // When listing S3 files, we ignore 0 byte objects that represent S3 directories.
    List<S3ObjectSummary> actualS3Files = s3Service.listDirectory(s3FileTransferRequestParamsDto, true);

    // Validate existence and file size of the S3 files.
    storageFileHelper
        .validateRegisteredS3Files(businessObjectDataRestoreDto.getStorageFiles(), actualS3Files, businessObjectDataRestoreDto.getStorageName(),
            businessObjectDataRestoreDto.getBusinessObjectDataKey());

    // Build a list of files to check for restore status by selection only objects that are currently archived in Glacier (have Glacier storage class).
    List<S3ObjectSummary> glacierS3Files = new ArrayList<>();
    for (S3ObjectSummary s3ObjectSummary : actualS3Files)
    {
        if (StorageClass.Glacier.toString().equals(s3ObjectSummary.getStorageClass()))
        {
            glacierS3Files.add(s3ObjectSummary);
        }
    }

    // Validate that all Glacier storage class S3 files are now restored.
    s3FileTransferRequestParamsDto.setFiles(storageFileHelper.getFiles(storageFileHelper.createStorageFilesFromS3ObjectSummaries(glacierS3Files)));
    s3Service.validateGlacierS3FilesRestored(s3FileTransferRequestParamsDto);
}
项目:herd    文件:S3DaoImpl.java   
/**
 * Prepares the object metadata for server side encryption and reduced redundancy storage.
 *
 * @param params the parameters.
 * @param metadata the metadata to prepare.
 */
private void prepareMetadata(final S3FileTransferRequestParamsDto params, ObjectMetadata metadata)
{
    // Set the server side encryption
    if (params.getKmsKeyId() != null)
    {
        /*
         * TODO Use proper way to set KMS once AWS provides a way.
         * We are modifying the raw headers directly since TransferManager's uploadFileList operation does not provide a way to set a KMS key ID.
         * This would normally cause some issues when uploading where an MD5 checksum validation exception will be thrown, even though the object is
         * correctly uploaded.
         * To get around this, a system property defined at
         * com.amazonaws.services.s3.internal.SkipMd5CheckStrategy.DISABLE_PUT_OBJECT_MD5_VALIDATION_PROPERTY must be set.
         */
        metadata.setSSEAlgorithm(SSEAlgorithm.KMS.getAlgorithm());
        metadata.setHeader(Headers.SERVER_SIDE_ENCRYPTION_AWS_KMS_KEYID, params.getKmsKeyId().trim());
    }
    else
    {
        metadata.setSSEAlgorithm(SSEAlgorithm.AES256.getAlgorithm());
    }

    // If specified, set the metadata to use RRS.
    if (Boolean.TRUE.equals(params.isUseRrs()))
    {
        // TODO: For upload File, we can set RRS on the putObjectRequest. For uploadDirectory, this is the only
        // way to do it. However, setHeader() is flagged as For Internal Use Only
        metadata.setHeader(Headers.STORAGE_CLASS, StorageClass.ReducedRedundancy.toString());
    }
}
项目:herd    文件:S3DaoTest.java   
@Test
public void testRestoreObjectsAmazonServiceException()
{
    // Build a mock file path that triggers an Amazon service exception when we request to restore an object.
    String testKey = String.format("%s/%s", TEST_S3_KEY_PREFIX, MockS3OperationsImpl.MOCK_S3_FILE_NAME_SERVICE_EXCEPTION);

    // Put a 1 byte Glacier storage class file in S3.
    ObjectMetadata metadata = new ObjectMetadata();
    metadata.setHeader(Headers.STORAGE_CLASS, StorageClass.Glacier);
    metadata.setOngoingRestore(false);
    s3Operations
        .putObject(new PutObjectRequest(storageDaoTestHelper.getS3ManagedBucketName(), testKey, new ByteArrayInputStream(new byte[1]), metadata), null);

    // Try to initiate a restore request for a mocked S3 file that would trigger an Amazon service exception when we request to restore an object.
    try
    {
        S3FileTransferRequestParamsDto params = new S3FileTransferRequestParamsDto();
        params.setS3BucketName(storageDaoTestHelper.getS3ManagedBucketName());
        params.setFiles(Arrays.asList(new File(testKey)));
        s3Dao.restoreObjects(params, S3_RESTORE_OBJECT_EXPIRATION_IN_DAYS);
        fail("Should throw an IllegalStateException when an S3 restore object operation fails.");
    }
    catch (IllegalStateException e)
    {
        assertEquals(String.format("Failed to initiate a restore request for \"%s\" key in \"%s\" bucket. " +
                "Reason: InternalError (Service: null; Status Code: 0; Error Code: InternalError; Request ID: null)", testKey,
            storageDaoTestHelper.getS3ManagedBucketName()), e.getMessage());
    }
}
项目:herd    文件:S3DaoTest.java   
@Test
public void testRestoreObjectsNonGlacierObject()
{
    // Put a 1 byte non-Glacier storage class file in S3.
    ObjectMetadata metadata = new ObjectMetadata();
    metadata.setHeader(Headers.STORAGE_CLASS, StorageClass.Standard);
    metadata.setOngoingRestore(false);
    s3Operations
        .putObject(new PutObjectRequest(storageDaoTestHelper.getS3ManagedBucketName(), TARGET_S3_KEY, new ByteArrayInputStream(new byte[1]), metadata),
            null);

    // Try to initiate a restore request for a non-Glacier file.
    try
    {
        S3FileTransferRequestParamsDto params = new S3FileTransferRequestParamsDto();
        params.setS3BucketName(storageDaoTestHelper.getS3ManagedBucketName());
        params.setFiles(Arrays.asList(new File(TARGET_S3_KEY)));
        s3Dao.restoreObjects(params, S3_RESTORE_OBJECT_EXPIRATION_IN_DAYS);
        fail("Should throw an IllegalStateException when file has a non-Glacier storage class.");
    }
    catch (IllegalStateException e)
    {
        assertEquals(String.format("Failed to initiate a restore request for \"%s\" key in \"%s\" bucket. " +
                "Reason: object is not in Glacier (Service: null; Status Code: 0; Error Code: null; Request ID: null)", TARGET_S3_KEY,
            storageDaoTestHelper.getS3ManagedBucketName()), e.getMessage());
    }
}
项目:herd    文件:S3DaoTest.java   
@Test
public void testValidateGlacierS3FilesRestoredAmazonServiceException()
{
    // Build a mock file path that triggers an Amazon service exception when we request S3 metadata for the object.
    String testKey = String.format("%s/%s", TEST_S3_KEY_PREFIX, MockS3OperationsImpl.MOCK_S3_FILE_NAME_SERVICE_EXCEPTION);

    // Put a 1 byte Glacier storage class file in S3.
    ObjectMetadata metadata = new ObjectMetadata();
    metadata.setHeader(Headers.STORAGE_CLASS, StorageClass.Glacier);
    metadata.setOngoingRestore(false);
    s3Operations
        .putObject(new PutObjectRequest(storageDaoTestHelper.getS3ManagedBucketName(), testKey, new ByteArrayInputStream(new byte[1]), metadata), null);

    // Try to validate if the Glacier S3 file is already restored for a mocked S3 file
    // that triggers an Amazon service exception when we request S3 metadata for the object.
    try
    {
        S3FileTransferRequestParamsDto params = new S3FileTransferRequestParamsDto();
        params.setS3BucketName(storageDaoTestHelper.getS3ManagedBucketName());
        params.setFiles(Arrays.asList(new File(testKey)));
        s3Dao.validateGlacierS3FilesRestored(params);
        fail("Should throw an IllegalStateException when Glacier S3 object validation fails due to an Amazon service exception.");
    }
    catch (IllegalStateException e)
    {
        assertEquals(String.format("Fail to check restore status for \"%s\" key in \"%s\" bucket. " +
                "Reason: InternalError (Service: null; Status Code: 0; Error Code: InternalError; Request ID: null)", testKey,
            storageDaoTestHelper.getS3ManagedBucketName()), e.getMessage());
    }
}
项目:herd    文件:S3DaoTest.java   
@Test
public void testValidateGlacierS3FilesRestoredGlacierObjectRestoreInProgress()
{
    // Put a 1 byte Glacier storage class file in S3 that is still being restored (OngoingRestore flag is true).
    ObjectMetadata metadata = new ObjectMetadata();
    metadata.setHeader(Headers.STORAGE_CLASS, StorageClass.Glacier);
    metadata.setOngoingRestore(true);
    s3Operations
        .putObject(new PutObjectRequest(storageDaoTestHelper.getS3ManagedBucketName(), TARGET_S3_KEY, new ByteArrayInputStream(new byte[1]), metadata),
            null);

    // Try to validate if the Glacier S3 file is already restored.
    try
    {
        S3FileTransferRequestParamsDto params = new S3FileTransferRequestParamsDto();
        params.setS3BucketName(storageDaoTestHelper.getS3ManagedBucketName());
        params.setFiles(Arrays.asList(new File(TARGET_S3_KEY)));
        s3Dao.validateGlacierS3FilesRestored(params);
        fail("Should throw an IllegalArgumentException when Glacier S3 file is not restored.");
    }
    catch (IllegalArgumentException e)
    {
        assertEquals(String
            .format("Archived Glacier S3 file \"%s\" is not restored. StorageClass {GLACIER}, OngoingRestore flag {true}, S3 bucket name {%s}",
                TARGET_S3_KEY, storageDaoTestHelper.getS3ManagedBucketName()), e.getMessage());
    }
}
项目:cmn-project    文件:S3.java   
public void createFolder(String bucket, String folder) {
    Asserts.isTrue(folder.startsWith("/"), "s3 key can't start with /, folder={}", folder);

    logger.info("create folder, bucket={}, folder={}", bucket, folder);
    InputStream input = new ByteArrayInputStream(new byte[0]);
    ObjectMetadata metadata = new ObjectMetadata();
    metadata.setContentLength(0);
    s3.putObject(new PutObjectRequest(bucket, folder, input, metadata).withStorageClass(StorageClass.ReducedRedundancy));
}
项目:cmn-project    文件:S3.java   
public void putObject(String bucket, String key, String content) {
    Asserts.isFalse(key.startsWith("/"), "s3 key can't start with /, key={}", key);

    byte[] bytes = content.getBytes(Charset.forName("UTF-8"));

    String etag = etag(bytes);
    if (etagMatches(bucket, key, etag)) return;

    logger.info("put string content, bucket={}, key={}, contentLength={}", bucket, key, bytes.length);

    ByteArrayInputStream inputStream = new ByteArrayInputStream(bytes);
    ObjectMetadata objectMetadata = new ObjectMetadata();
    objectMetadata.setContentLength(bytes.length);
    s3.putObject(new PutObjectRequest(bucket, key, inputStream, objectMetadata).withStorageClass(StorageClass.ReducedRedundancy));
}
项目:cmn-project    文件:S3.java   
public void putObject(String bucket, String key, File file) {
    Asserts.isFalse(key.startsWith("/"), "s3 key can't start with /, key={}", key);

    String etag = etag(file);
    if (etagMatches(bucket, key, etag)) return;

    logger.info("put object, bucket={}, key={}, file={}", bucket, key, file.getAbsoluteFile());
    s3.putObject(new PutObjectRequest(bucket, key, file).withStorageClass(StorageClass.ReducedRedundancy));
}
项目:openbd-core    文件:AmazonKey.java   
public StorageClass getAmazonStorageClass(String storage) {
    if (storage == null)
        return StorageClass.Standard;
    else if (storage.equalsIgnoreCase("standard"))
        return StorageClass.Standard;
    else if (storage.toLowerCase().startsWith("reduced"))
        return StorageClass.ReducedRedundancy;
    else
        return StorageClass.Standard;
}
项目:para    文件:AWSFileStore.java   
@Override
public String store(String path, InputStream data) {
    if (StringUtils.startsWith(path, "/")) {
        path = path.substring(1);
    }
    if (StringUtils.isBlank(path) || data == null) {
        return null;
    }
    int maxFileSizeMBytes = Config.getConfigInt("para.s3.max_filesize_mb", 10);
    try {
        if (data.available() > 0 && data.available() <= (maxFileSizeMBytes * 1024 * 1024)) {
            ObjectMetadata om = new ObjectMetadata();
            om.setCacheControl("max-age=15552000, must-revalidate");    // 180 days
            if (path.endsWith(".gz")) {
                om.setContentEncoding("gzip");
                path = path.substring(0, path.length() - 3);
            }
            path = System.currentTimeMillis() + "." + path;
            PutObjectRequest por = new PutObjectRequest(bucket, path, data, om);
            por.setCannedAcl(CannedAccessControlList.PublicRead);
            por.setStorageClass(StorageClass.ReducedRedundancy);
            s3.putObject(por);
            return Utils.formatMessage(baseUrl, Config.AWS_REGION, bucket, path);
        }
    } catch (IOException e) {
        logger.error(null, e);
    } finally {
        try {
            data.close();
        } catch (IOException ex) {
            logger.error(null, ex);
        }
    }
    return null;
}
项目:Singularity    文件:SingularityS3Uploader.java   
private void multipartUpload(String key, File file, ObjectMetadata objectMetadata, Optional<StorageClass> maybeStorageClass) throws Exception {
  List<PartETag> partETags = new ArrayList<>();
  InitiateMultipartUploadRequest initRequest = new InitiateMultipartUploadRequest(bucketName, key, objectMetadata);
  if (maybeStorageClass.isPresent()) {
    initRequest.setStorageClass(maybeStorageClass.get());
  }
  InitiateMultipartUploadResult initResponse = s3Client.initiateMultipartUpload(initRequest);

  long contentLength = file.length();
  long partSize = configuration.getUploadPartSize();

  try {
    long filePosition = 0;
    for (int i = 1; filePosition < contentLength; i++) {
      partSize = Math.min(partSize, (contentLength - filePosition));
      UploadPartRequest uploadRequest = new UploadPartRequest()
          .withBucketName(bucketName)
          .withKey(key)
          .withUploadId(initResponse.getUploadId())
          .withPartNumber(i)
          .withFileOffset(filePosition)
          .withFile(file)
          .withPartSize(partSize);
      partETags.add(s3Client.uploadPart(uploadRequest).getPartETag());
      filePosition += partSize;
    }

    CompleteMultipartUploadRequest completeRequest = new CompleteMultipartUploadRequest(bucketName, key, initResponse.getUploadId(), partETags);
    s3Client.completeMultipartUpload(completeRequest);
  } catch (Exception e) {
    s3Client.abortMultipartUpload(new AbortMultipartUploadRequest(bucketName, key, initResponse.getUploadId()));
    Throwables.propagate(e);
  }
}
项目:ivy-s3-resolver    文件:S3Resolver.java   
public S3Resolver(
  String name,
  AWSCredentialsProvider credentialsProvider,
  boolean overwrite,
  Region region,
  CannedAccessControlList acl,
  boolean serverSideEncryption,
  StorageClass storageClass
) {
  this(name, credentialsProvider, overwrite, region, Optional.ofNullable(acl), serverSideEncryption, storageClass);
}
项目:ivy-s3-resolver    文件:S3Repository.java   
public S3Repository(
  AWSCredentialsProvider provider,
  boolean overwrite,
  Region region
) {
  this(
    provider,
    overwrite,
    region,
    Optional.ofNullable(CannedAccessControlList.PublicRead),
    false,
    StorageClass.Standard
  );
}
项目:ivy-s3-resolver    文件:S3Repository.java   
/**
 * Package-private constructor specifically for taking an {@link AmazonS3} instance that can be mocked under test.
 */
S3Repository(
  AmazonS3 s3Client,
  boolean overwrite,
  Optional<CannedAccessControlList> acl,
  boolean serverSideEncryption,
  StorageClass storageClass
) {
  this.s3Client = s3Client;
  this.overwrite = overwrite;
  this.acl = acl;
  this.serverSideEncryption = serverSideEncryption;
  this.storageClass = storageClass;
}
项目:elasticsearch_my    文件:S3BlobStoreTests.java   
public void testCaseInsensitiveStorageClass() throws IOException {
    assertThat(S3BlobStore.initStorageClass("sTandaRd"), equalTo(StorageClass.Standard));
    assertThat(S3BlobStore.initStorageClass("sTandaRd_Ia"), equalTo(StorageClass.StandardInfrequentAccess));
    assertThat(S3BlobStore.initStorageClass("reduCED_redundancy"), equalTo(StorageClass.ReducedRedundancy));
}
项目:elasticsearch_my    文件:AmazonS3Wrapper.java   
@Override
public void changeObjectStorageClass(String bucketName, String key, StorageClass newStorageClass) throws AmazonClientException, AmazonServiceException {
    delegate.changeObjectStorageClass(bucketName, key, newStorageClass);
}
项目:syndesis    文件:AmazonS3ClientMock.java   
@Override
public void changeObjectStorageClass(String bucketName, String key, StorageClass newStorageClass) throws AmazonClientException, AmazonServiceException {
    throw new UnsupportedOperationException();
}
项目:syndesis    文件:AmazonS3ClientMock.java   
@Override
public void changeObjectStorageClass(String bucketName, String key, StorageClass newStorageClass) throws AmazonClientException, AmazonServiceException {
    throw new UnsupportedOperationException();
}