Java 类com.mongodb.gridfs.GridFS 实例源码

项目:KernelHive    文件:DataManager.java   
public int getNextId(GridFS destDatabase) {
    DBCollection countersCollection = destDatabase.getDB().getCollection("counters");

    DBObject record = countersCollection.findOne(new BasicDBObject("_id", "package"));
    if (record == null) {
        BasicDBObject dbObject = new BasicDBObject("_id", "package");
        dbObject.append("seq", 0);
        countersCollection.insert(dbObject);
        record = dbObject;
    }
    int oldID = (int) record.get("seq");
    int newID = oldID + 1;
    record.put("seq", newID);
    countersCollection.update(new BasicDBObject("_id", "package"), record);

    return newID;
}
项目:anhalytics-core    文件:MongoFileManager.java   
/**
 * Inserts publication annex document.
 */
public void insertAnnexDocument(BinaryFile bf, String dateString) throws ParseException {
    try {
        GridFS gfs = new GridFS(db, MongoCollectionsInterface.PUB_ANNEXES);
        BasicDBObject whereQuery = new BasicDBObject();
        whereQuery.put("repositoryDocId", bf.getRepositoryDocId());
        whereQuery.put("filename", bf.getFileName());
        gfs.remove(whereQuery);
        //version ?
        GridFSInputFile gfsFile = gfs.createFile(bf.getStream(), true);
        gfsFile.put("uploadDate", Utilities.parseStringDate(dateString));
        gfsFile.setFilename(bf.getFileName());
        gfsFile.put("source", bf.getSource());
        gfsFile.put("version", bf.getRepositoryDocVersion());
        gfsFile.put("repositoryDocId", bf.getRepositoryDocId());
        gfsFile.put("anhalyticsId", bf.getAnhalyticsId());
        gfsFile.save();
    } catch (ParseException e) {
        logger.error(e.getMessage(), e.getCause());
    }
}
项目:KernelHive    文件:DataManager.java   
public DataAddress uploadData(String data, DataAddress dataAddress) throws UnknownHostException {
      ServerAddress server = new ServerAddress(dataAddress.hostname, dataAddress.port);
      GridFS database = connectToDatabase(server);

      logger.info("Database connected");

      GridFSInputFile file = database.createFile(data.getBytes());
      int newID = getNextId(database);
      logger.info("Got new id for uploaded file: " + newID);
file.setFilename(String.valueOf(newID));
      file.put("_id", newID);
      file.save();

      logger.info("after save");

      return new DataAddress(dataAddress.hostname, dataAddress.port, newID);
  }
项目:whatsmars    文件:GridFSClient.java   
private String isExistedImage(BundleEntry entry) {
    GridFS gridFS = getInstance();
    DBObject query = new BasicDBObject();
    query.put("crc",entry.crc);
    query.put("md5_source",entry.md5);
    GridFSDBFile _current = gridFS.findOne(query);
    //根据MD5值查询,检测是否存在
    if(_current == null) {
        return null;
    }
    String format = (String)_current.get("format");
    if(format.startsWith(".")) {
        return _current.getFilename() + format;
    }
    return _current.getFilename() + "." + format;
}
项目:Rapture    文件:GridFSBlobHandler.java   
@Override
public Boolean storeBlob(CallingContext context, String docPath, InputStream newContent, Boolean append) {
    GridFS gridFS = getGridFS();
    GridFSInputFile file;
    if (!append) {
        gridFS.remove(docPath);
        file = createNewFile(docPath, newContent);
    } else {
        GridFSDBFile existing = gridFS.findOne(docPath);
        if (existing != null) {
            try {
                file = updateExisting(context, docPath, newContent, gridFS, existing);
            } catch (IOException e) {
                file = null;
                log.error(String.format("Error while appending to docPath %s: %s", docPath, ExceptionToString.format(e)));
            }

        } else {
            file = createNewFile(docPath, newContent);
        }
    }
    return file != null;
}
项目:Rapture    文件:GridFSBlobHandler.java   
@Override
public Boolean deleteBlob(CallingContext context, String docPath) {
    GridFS gridFS = getGridFS();
    String lockKey = createLockKey(gridFS, docPath);
    LockHandle lockHandle = grabLock(context, lockKey);
    boolean retVal = false;
    try {
        if (gridFS.findOne(docPath) != null) {
            gridFS.remove(docPath);
            retVal = true;
        }
    } finally {
        releaseLock(context, lockKey, lockHandle);
    }
    return retVal;
}
项目:Rapture    文件:GridFSBlobHandler.java   
@Override
public InputStream getBlob(CallingContext context, String docPath) {
    GridFS gridFS = getGridFS();
    String lockKey = createLockKey(gridFS, docPath);
    LockHandle lockHandle = grabLock(context, lockKey);
    InputStream retVal = null;
    try {
        GridFSDBFile file = gridFS.findOne(docPath);
        if (file != null) {
            retVal = file.getInputStream();
        }
    } finally {
        releaseLock(context, lockKey, lockHandle);
    }
    return retVal;
}
项目:beam    文件:MongoDbGridFSIO.java   
@Override
public long getEstimatedSizeBytes(PipelineOptions options) throws Exception {
  Mongo mongo = spec.connectionConfiguration().setupMongo();
  try {
    GridFS gridfs = spec.connectionConfiguration().setupGridFS(mongo);
    DBCursor cursor = createCursor(gridfs);
    long size = 0;
    while (cursor.hasNext()) {
      GridFSDBFile file = (GridFSDBFile) cursor.next();
      size += file.getLength();
    }
    return size;
  } finally {
    mongo.close();
  }
}
项目:biomedicus    文件:MongoDbXmiWriter.java   
@Override
public void initialize(UimaContext aContext) throws ResourceInitializationException {
  super.initialize(aContext);

  String mongoServer = (String) aContext.getConfigParameterValue(PARAM_MONGO_SERVER);
  int mongoPort = (Integer) aContext.getConfigParameterValue(PARAM_MONGO_PORT);
  String mongoDbName = (String) aContext.getConfigParameterValue(PARAM_MONGO_DB_NAME);

  try {
    mongoClient = new MongoClient(mongoServer, mongoPort);
  } catch (UnknownHostException e) {
    throw new ResourceInitializationException(e);
  }

  DB db = mongoClient.getDB(mongoDbName);

  gridFS = new GridFS(db);
}
项目:Camel    文件:GridFsEndpoint.java   
@SuppressWarnings("deprecation")
public void initializeConnection() throws Exception {
    LOG.info("Initialize GridFS endpoint: {}", this.toString());
    if (database == null) {
        throw new IllegalStateException("Missing required endpoint configuration: database");
    }
    db = mongoConnection.getDB(database);
    if (db == null) {
        throw new IllegalStateException("Could not initialize GridFsComponent. Database " + database + " does not exist.");
    }
    gridFs = new GridFS(db, bucket == null ? GridFS.DEFAULT_BUCKET : bucket) {
        {
            filesCollection = getFilesCollection();
        }
    };
}
项目:anhalytics-core    文件:MongoFileManager.java   
/**
 * Inserts grobid tei using GridFS.
 */
public void insertGrobidTei(String teiString, String repositoryDocId, String anhalyticsId, String version, String source, String type, String date) {
    try {
        GridFS gfs = new GridFS(db, MongoCollectionsInterface.GROBID_TEIS);
        gfs.remove(repositoryDocId + ".tei.xml");
        GridFSInputFile gfsFile = gfs.createFile(new ByteArrayInputStream(teiString.getBytes()), true);
        gfsFile.put("uploadDate", Utilities.parseStringDate(date));
        gfsFile.setFilename(repositoryDocId + ".tei.xml");
        gfsFile.put("repositoryDocId", repositoryDocId);
        gfsFile.put("anhalyticsId", anhalyticsId);
        gfsFile.put("source", source);
        gfsFile.put("version", version);
        gfsFile.put("documentType", type);
        gfsFile.save();
    } catch (ParseException e) {
        logger.error(e.getMessage(), e.getCause());
    }
}
项目:anhalytics-core    文件:MongoFileManager.java   
/**
 * Inserts TEI metadata document in the GridFS.
 */
public void insertMetadataTei(String tei, String doi, String pdfUrl, String source, String repositoryDocId, String version, String type, String date) {
    try {
        GridFS gfs = new GridFS(db, MongoCollectionsInterface.METADATAS_TEIS);
        gfs.remove(repositoryDocId + ".tei.xml");
        GridFSInputFile gfsFile = gfs.createFile(new ByteArrayInputStream(tei.getBytes()), true);
        gfsFile.put("uploadDate", Utilities.parseStringDate(date));
        gfsFile.setFilename(repositoryDocId + ".tei.xml");
        gfsFile.put("repositoryDocId", repositoryDocId);
        gfsFile.put("anhalyticsId", generateAnhalyticsId(repositoryDocId, doi, pdfUrl));
        gfsFile.put("source", source);
        gfsFile.put("version", version);
        gfsFile.put("documentType", type);
        gfsFile.save();
    } catch (ParseException e) {
        logger.error(e.getMessage(), e.getCause());
    }
}
项目:anhalytics-core    文件:MongoFileManager.java   
/**
 * Inserts PDF binary document in the GridFS.
 */
public void insertBinaryDocument(BinaryFile bf, String date) {
    try {
        GridFS gfs = new GridFS(db, MongoCollectionsInterface.BINARIES);
        gfs.remove(bf.getFileName());
        GridFSInputFile gfsFile = gfs.createFile(bf.getStream(), true);
        gfsFile.put("uploadDate", Utilities.parseStringDate(date));
        gfsFile.setFilename(bf.getFileName());
        gfsFile.put("repositoryDocId", bf.getRepositoryDocId());
        gfsFile.put("anhalyticsId", bf.getAnhalyticsId());
        gfsFile.put("source", bf.getSource());
        gfsFile.put("version", bf.getRepositoryDocVersion());
        gfsFile.put("documentType", bf.getDocumentType());
        gfsFile.setContentType(bf.getFileType());
        gfsFile.save();
    } catch (ParseException e) {
        logger.error(e.getMessage(), e.getCause());
    }

}
项目:anhalytics-core    文件:MongoFileManager.java   
/**
 * Updates already existing tei with new (more enriched one, fulltext..).
 */
public void updateTei(String newTei, String repositoryDocId, String collection) {
    try {
        GridFS gfs = new GridFS(db, collection);
        GridFSDBFile gdf = gfs.findOne(repositoryDocId + ".tei.xml");
        GridFSInputFile gfsNew = gfs.createFile(new ByteArrayInputStream(newTei.getBytes()), true);
        gfsNew.put("uploadDate", gdf.getUploadDate());
        gfsNew.setFilename(gdf.get("repositoryDocId") + ".tei.xml");
        gfsNew.put("repositoryDocId", gdf.get("repositoryDocId"));
        gfsNew.put("documentType", gdf.get("documentType"));
        gfsNew.put("anhalyticsId", gdf.get("anhalyticsId"));
        gfsNew.put("source", gdf.get("source"));

        gfsNew.save();
        gfs.remove(gdf);
    } catch (Exception e) {
        logger.error(e.getMessage(), e.getCause());
    }
}
项目:anhalytics-core    文件:MongoFileManager.java   
/**
 * inserts a Arxiv/istex TEI document in the GridFS.
 */
public void insertExternalTeiDocument(InputStream file, String identifier, String repository, String namespace, String dateString) {
    try {
        GridFS gfs = new GridFS(db, namespace);
        GridFSInputFile gfsFile = gfs.createFile(file, true);
        gfs.remove(identifier + ".pdf");
        gfsFile.put("uploadDate", Utilities.parseStringDate(dateString));
        gfsFile.setFilename(identifier + ".tei.xml");
        gfsFile.put("identifier", identifier);
        gfsFile.put("repository", repository);
        gfsFile.setContentType("application/tei+xml");
        gfsFile.save();
    } catch (ParseException e) {
        logger.error(e.getMessage(), e.getCause());
    }

}
项目:anhalytics-core    文件:MongoFileManager.java   
/**
 * Check if the document has been already grobidified.
 */
public boolean isGrobidified(String repositoryDocId, String version) {
    GridFS gfs = new GridFS(db, MongoCollectionsInterface.GROBID_TEIS);
    BasicDBObject whereQuery = new BasicDBObject();
    whereQuery.put("repositoryDocId", repositoryDocId);
    whereQuery.put("version", version);
    List<GridFSDBFile> fs = null;
    boolean result = false;
    fs = gfs.find(whereQuery);
    if (fs.size() > 0) {
        result = true;
    } else {
        result = false;
    }
    return result;
}
项目:anhalytics-core    文件:ProcessTest.java   
@Test public void testFetchDocumentsByDate() throws ParserConfigurationException, IOException, InterruptedException {
    System.out.println("Test harvesting..");

    HarvestProperties.setOaiUrl("http://api.archives-ouvertes.fr/oai/hal");
    OAIHarvester oaih = new OAIHarvester(mm);
    DateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd");
    Calendar cal = Calendar.getInstance();
    //cal.add(Calendar.DATE, -1);
    String date = dateFormat.format(cal.getTime());
    Utilities.updateDates(date, date);

    Runnable fooRunner = new MyRunnable(oaih, date);

    Thread fooThread = new Thread(fooRunner);
    fooThread.start();

    Thread.sleep(60000);

    fooThread.interrupt();

    List<GridFSDBFile> f = (new GridFS(db, MongoManager.ADDITIONAL_TEIS)).find((DBObject)null);
    assertTrue(f.size() > 1);
}
项目:LODVader    文件:SubjectsBucket.java   
public  TreeMap<String,SubjectsBucket> createAllBuckets(int distributionID) {

    TreeMap<String,SubjectsBucket> result = new  TreeMap<String,SubjectsBucket>();

    // get collection
    GridFS gfs = new GridFS(DBSuperClass2.getDBInstance(), SUBJECTS_BUCKET_COLLECTION_NAME);

    // create query
    BasicDBObject distribution = new BasicDBObject(DISTRIBUTION_ID, distributionID);

    // make query
    List<GridFSDBFile> buckets = gfs.find(distribution);

    for (GridFSDBFile f : buckets) {
        BloomFilterI filter = BloomFilterFactory.newBloomFilter();
        try {
            filter.readFrom(new BufferedInputStream(f.getInputStream()));
        } catch (IOException e) {
            // TODO Auto-generated catch block
            e.printStackTrace();
        }
        result.put(f.get(FIRST_RESOURCE).toString(),new SubjectsBucket(filter, f.get(FIRST_RESOURCE).toString(), f.get(LAST_RESOURCE).toString()));
    }

    return result;
}
项目:osiris    文件:MapFileRepositoryCustoImplTest.java   
@Test
public void getFileMapByAppId() throws Exception{

    //Fixture
    Mockito.when(mongoTemplate.getDb()).thenReturn(db);
    PowerMockito.whenNew(GridFS.class).withArguments(db,collectionNameMap).thenReturn(gridFS);      
    Mockito.when(gridFS.findOne(idApp)).thenReturn(gridFSFile);
    Mockito.when(gridFSFile.getInputStream()).thenReturn(inputStream);

    // Experimentations
    InputStream response=mapFileRepositoryCustomImpl.getMapFileByAppId(idApp);

    // Expectations
    Mockito.verify(gridFS).findOne(idApp);
    Mockito.verify(gridFSFile).getInputStream();
    Assert.assertEquals("File .map must be the same",inputStream,response);
}
项目:osiris    文件:MapFileRepositoryCustoImplTest.java   
@Test
public void getFileMapByAppId() throws Exception{

    //Fixture
    Mockito.when(mongoTemplate.getDb()).thenReturn(db);
    PowerMockito.whenNew(GridFS.class).withArguments(db,collectionNameMap).thenReturn(gridFS);      
    Mockito.when(gridFS.findOne(idApp)).thenReturn(gridFSFile);
    Mockito.when(gridFSFile.getInputStream()).thenReturn(inputStream);

    // Experimentations
    InputStream response=mapFileRepositoryCustomImpl.getMapFileByAppId(idApp);

    // Expectations
    Mockito.verify(gridFS).findOne(idApp);
    Mockito.verify(gridFSFile).getInputStream();
    Assert.assertEquals("File .map must be the same",inputStream,response);
}
项目:osiris    文件:ImportFilesRepositoryCustomImplTest.java   
@Test
public void saveFileMap() throws Exception{

    //Fixture
    Mockito.when(mongoTemplate.getDb()).thenReturn(db);
    PowerMockito.whenNew(GridFS.class).withArguments(db, collectionNameMap).thenReturn(gridFS);     
    Mockito.when(gridFS.findOne(idApp)).thenReturn(gridFSFile);
    Mockito.when(gridFS.createFile(file)).thenReturn(gridFSInputFile);

    // Experimentations
    importFilesRepositoryCustomImpl.saveFileMap(idApp,file);

    // Expectations
    Mockito.verify(gridFS).remove(gridFSFile);
    Mockito.verify(gridFS).createFile(file);
    Mockito.verify(gridFSInputFile).setFilename(idApp);
    Mockito.verify(gridFSInputFile).save();
}
项目:osiris    文件:ImportFilesRepositoryCustomImplTest.java   
@Test
public void saveFileMapWithoutRemoving() throws Exception{

    //Fixture
    Mockito.when(mongoTemplate.getDb()).thenReturn(db);
    PowerMockito.whenNew(GridFS.class).withArguments(db, collectionNameMap).thenReturn(gridFS);     
    Mockito.when(gridFS.findOne(idApp)).thenReturn(null);
    Mockito.when(gridFS.createFile(file)).thenReturn(gridFSInputFile);

    // Experimentations
    importFilesRepositoryCustomImpl.saveFileMap(idApp,file);

    // Expectations
    Mockito.verify(gridFS).createFile(file);
    Mockito.verify(gridFSInputFile).setFilename(idApp);
    Mockito.verify(gridFSInputFile).save();
}
项目:osiris    文件:ImportFilesRepositoryCustomImplTest.java   
@Test
public void saveFileOSM() throws Exception{

    //Fixture
    Mockito.when(mongoTemplate.getDb()).thenReturn(db);
    PowerMockito.whenNew(GridFS.class).withArguments(db, collectionNameOSM).thenReturn(gridFS);     
    Mockito.when(gridFS.findOne(idApp)).thenReturn(gridFSFile);
    Mockito.when(gridFS.createFile(file)).thenReturn(gridFSInputFile);

    // Experimentations
    importFilesRepositoryCustomImpl.saveFileOSM(idApp,file);

    // Expectations
    Mockito.verify(gridFS).remove(gridFSFile);
    Mockito.verify(gridFS).createFile(file);
    Mockito.verify(gridFSInputFile).setFilename(idApp);
    Mockito.verify(gridFSInputFile).save();
}
项目:osiris    文件:ImportFilesRepositoryCustomImplTest.java   
@Test
public void saveFileOSMWithoutRemoving() throws Exception{

    //Fixture
    Mockito.when(mongoTemplate.getDb()).thenReturn(db);
    PowerMockito.whenNew(GridFS.class).withArguments(db, collectionNameOSM).thenReturn(gridFS);     
    Mockito.when(gridFS.findOne(idApp)).thenReturn(null);
    Mockito.when(gridFS.createFile(file)).thenReturn(gridFSInputFile);

    // Experimentations
    importFilesRepositoryCustomImpl.saveFileOSM(idApp,file);

    // Expectations
    Mockito.verify(gridFS).createFile(file);
    Mockito.verify(gridFSInputFile).setFilename(idApp);
    Mockito.verify(gridFSInputFile).save();
}
项目:osiris    文件:ImportFilesRepositoryCustomImplTest.java   
@Test
public void saveFileObj() throws Exception{

    //Fixture
    Mockito.when(mongoTemplate.getDb()).thenReturn(db);
    PowerMockito.whenNew(GridFS.class).withArguments(db, collectionNameObj).thenReturn(gridFS);     
    Mockito.when(gridFS.findOne(idApp)).thenReturn(gridFSFile);
    Mockito.when(gridFS.createFile(file)).thenReturn(gridFSInputFile);

    // Experimentations
    importFilesRepositoryCustomImpl.saveFileObj(idApp,file);

    // Expectations
    Mockito.verify(gridFS).remove(gridFSFile);
    Mockito.verify(gridFS).createFile(file);
    Mockito.verify(gridFSInputFile).setFilename(idApp);
    Mockito.verify(gridFSInputFile).save();
}
项目:osiris    文件:ImportFilesRepositoryCustomImplTest.java   
@Test
public void saveFileObjWithoutRemoving() throws Exception{

    //Fixture
    Mockito.when(mongoTemplate.getDb()).thenReturn(db);
    PowerMockito.whenNew(GridFS.class).withArguments(db, collectionNameObj).thenReturn(gridFS);     
    Mockito.when(gridFS.findOne(idApp)).thenReturn(null);
    Mockito.when(gridFS.createFile(file)).thenReturn(gridFSInputFile);

    // Experimentations
    importFilesRepositoryCustomImpl.saveFileObj(idApp,file);

    // Expectations
    Mockito.verify(gridFS).createFile(file);
    Mockito.verify(gridFSInputFile).setFilename(idApp);
    Mockito.verify(gridFSInputFile).save();
}
项目:openbd-core    文件:Find.java   
public cfData execute(cfSession _session, cfArgStructData argStruct ) throws cfmRunTimeException {

    // Get the necessary Mongo references
    DB          db  = getDB(_session, argStruct);
    GridFS  gridfs  = getGridFS(_session, argStruct, db);


    // Get the file information
    String filename = getNamedStringParam(argStruct, "filename", null);
    if ( filename != null ){
        return toArray( gridfs.find(filename) );
    } else {

        cfData mTmp = getNamedParam(argStruct, "query", null);
        if ( mTmp != null )
            return toArray( gridfs.find(getDBObject(mTmp)) );
    }

    throwException(_session, "Please specify file or a query");
    return null;
}
项目:mod-image-resizer    文件:GridFsFileAccess.java   
@Override
public void write(String dest, ImageFile img, Handler<String> handler) {
    String [] path = parsePath(dest);
    if (path == null || path.length < 1) {
        handler.handle(null);
        return;
    }
    String id;
    if (path.length == 2 && path[1] != null && !path[1].trim().isEmpty()) {
        id = path[1];
    } else {
        id = UUID.randomUUID().toString();
    }
    GridFS fs = new GridFS(db, path[0]);
    try {
        saveFile(img, id, fs);
    } catch (DuplicateKeyException e) {
        fs.remove(new BasicDBObject("_id", id));
        saveFile(img, id, fs);
    }
    handler.handle(id);
}
项目:mod-image-resizer    文件:ResizerFSTest.java   
@Test
public void testResizeStoreMongo() throws Exception {
    JsonObject json = new JsonObject()
            .putString("action", "resize")
            .putString("src", SRC_IMG)
            .putString("dest", "gridfs://fs")
            .putNumber("width", 300);

    eb.send(ADDRESS, json, new Handler<Message<JsonObject>>() {
        public void handle(Message<JsonObject> reply) {
            assertEquals("ok", reply.body().getString("status"));
            String id = reply.body().getString("output");
            GridFS fs = new GridFS(db, "fs");
            GridFSDBFile f = fs.findOne((DBObject) JSON.parse("{\"_id\":\"" + id + "\"}"));
            assertEquals("image/jpeg", f.getContentType());
            testComplete();
        }
    });
}
项目:hdfs-archiver    文件:HDFSArchiver.java   
private boolean isDumpToLocal(GridFSFile file, GridFS fs){
    long limit = 0; 
    if(this.sizeToLocal.containsKey(fs.getDB().getName())){
        limit = this.sizeToLocal.get(fs.getDB().getName());
    }else {
        limit = this.sizeToLocal.get("default");
    }
    if(file == null && limit == 0){
        return true;
    }else if(file != null){
        log.debug("Check db limit, file size:" + file.getLength() + ", db:" + limit);
        return file.getLength() > limit;
    }else {
        return false;
    }       
}
项目:hdfs-archiver    文件:HDFSArchiver.java   
public List searchFile(String path, int offset, int limit){
    DBCursor cursor = null;
    GridFS fs = defaultFs;
    if(path.indexOf('$') > 0){
        String[] t = path.split("\\$", 2);
        fs = getGridFS(t[0]);
        path = t[1];
    }

    if(path != null && !"".equals(path.trim())){
        DBObject f = new BasicDBObject();
        path = path.replace("*", ".*");
        f.put("filename", java.util.regex.Pattern.compile("^" + path));
        cursor = fs.getFileList(f);
    }else {
        cursor = fs.getFileList();
    }
    List result = cursor.skip(offset).limit(limit).toArray();
    cursor.close();
    return result;
}
项目:nanopub-server    文件:NanopubDb.java   
private NanopubDb() {
    logger.info("Initialize new DB object");
    conf = ServerConf.get();
    ServerAddress serverAddress = new ServerAddress(conf.getMongoDbHost(), conf.getMongoDbPort());
    List<MongoCredential> credentials = new ArrayList<>();
    if (conf.getMongoDbUsername() != null) {
        credentials.add(MongoCredential.createMongoCRCredential(
                conf.getMongoDbUsername(),
                conf.getMongoDbName(),
                conf.getMongoDbPassword().toCharArray()));
    }
    mongo = new MongoClient(serverAddress, credentials);
    db = mongo.getDB(conf.getMongoDbName());
    packageGridFs = new GridFS(db, "packages_gzipped");
    init();
}
项目:jclouds-gridfs-blobstore    文件:GridFSBlobStore.java   
@Override
public Blob getBlob(String container, String name, GetOptions options) {
    GridFSIdentifier identifier = parseGridFSIdentifier(container);
    if (!identifier.storeExists(mongo)) {
        throw new ContainerNotFoundException(container, "could not find expected collections in database");
    }
    // TODO: support get options
    if (options != null && (
            options.getIfMatch() != null || options.getIfNoneMatch() != null ||
                    options.getIfModifiedSince() != null || options.getIfUnmodifiedSince() != null ||
                    !options.getRanges().isEmpty()
    )) {
        throw new IllegalArgumentException("Get options are not currently supported by this provider");
    }
    GridFS gridFS = identifier.connect(mongo); // TODO: cache
    GridFSDBFile dbFile = gridFS.findOne(name);
    if (dbFile == null) {
        return null;
    }
    Blob blob = dbFileToBlob.apply(dbFile);
    blob.getMetadata().setContainer(container);
    return blob;
}
项目:jclouds-gridfs-blobstore    文件:GridFSBlobStore.java   
@Override
public String putBlob(String container, Blob blob, PutOptions options) {
    if (options != null && !options.isMultipart()) {
        throw new IllegalArgumentException("only multipart is supported by this provider");
    }
    Payload payload = checkNotNull(blob.getPayload());
    BlobMetadata metadata = blob.getMetadata();
    ContentMetadata contentMetadata = metadata.getContentMetadata();
    GridFS gridFS = parseGridFSIdentifier(container).connect(mongo);
    GridFSInputFile inputFile = gridFS.createFile(payload.getInput(), metadata.getName(), true);
    inputFile.setContentType(contentMetadata.getContentType());
    DBObject fileMetadata = new BasicDBObject();
    fileMetadata.putAll(metadata.getUserMetadata());
    inputFile.setMetaData(fileMetadata);
    inputFile.save();
    return inputFile.getMD5();
}
项目:glados-wiki    文件:FileStoreService.java   
public Optional<FileEntry> save(final String name, final String mime,
                                final String creator, final boolean privateFile, final String description,
                                InputStream in) {
    GridFS gf = gridFS;
    GridFSInputFile f = gf.createFile(in);
    f.setFilename(name);
    f.setContentType(mime);
    //
    DBObject metadata = f.getMetaData();
    if (metadata == null) {
        metadata = new BasicDBObject();
        f.setMetaData(metadata);
    }
    metadata.put("creator", creator);
    metadata.put("private", privateFile);
    metadata.put("description", description);
    //
    f.save();
    //
    return this.loadFileEntry((ObjectId) f.getId());
}
项目:MongoWorkBench    文件:GridFSPutFileCommand.java   
@Override
public void execute() throws Exception {
    MongoClient mdb = MongoFactory.getInst().getMongo( sName );

    if ( mdb == null )
        throw new Exception("no server selected");

    if ( sDb == null )
        throw new Exception("no database selected");

    MongoFactory.getInst().setActiveDB(sDb);
    DB db   = mdb.getDB(sDb);

    GridFS  gfs = new GridFS( db, sColl.substring(0,sColl.lastIndexOf(".")) );

    GridFSInputFile gridFSInputFile = gfs.createFile(getFile);
    gridFSInputFile.setContentType( MimetypesFileTypeMap.getDefaultFileTypeMap().getContentType(getFile) );
    gridFSInputFile.save();

    setMessage( "fileLoaded=" + getFile + "; size=" + getFile.length() );
}
项目:MongoWorkBench    文件:GridFSGetFileCommand.java   
@Override
public void execute() throws Exception {
    MongoClient mdb = MongoFactory.getInst().getMongo( sName );

    if ( mdb == null )
        throw new Exception("no server selected");

    if ( sDb == null )
        throw new Exception("no database selected");

    MongoFactory.getInst().setActiveDB(sDb);
    DB db   = mdb.getDB(sDb);

    GridFS  gfs = new GridFS( db, sColl.substring(0,sColl.lastIndexOf(".")) );
    GridFSDBFile gridFSDBFile = gfs.find(id);
    gridFSDBFile.writeTo(saveFile);

    setMessage( "fileSaved=" + saveFile + "; size=" + saveFile.length() );
}
项目:MongoWorkBench    文件:GridFSRemoveFileCommand.java   
@Override
public void execute() throws Exception {
    MongoClient mdb = MongoFactory.getInst().getMongo( sName );

    if ( mdb == null )
        throw new Exception("no server selected");

    if ( sDb == null )
        throw new Exception("no database selected");

    MongoFactory.getInst().setActiveDB(sDb);
    DB db   = mdb.getDB(sDb);

    GridFS  gfs = new GridFS( db, sColl.substring(0,sColl.lastIndexOf(".")) );
    gfs.remove( new ObjectId(id) );

    setMessage("fileRemoved="+id);
}
项目:MongoWorkBench    文件:GridFSCreateBucketCommand.java   
@Override
public void execute() throws Exception {
    MongoClient mdb = MongoFactory.getInst().getMongo( sName );

    if ( mdb == null )
        throw new Exception("no server selected");

    if ( sDb == null )
        throw new Exception("no database selected");

    MongoFactory.getInst().setActiveDB(sDb);
    DB db   = mdb.getDB(sDb);

    GridFS  gfs = new GridFS( db, sColl );
    gfs.getBucketName();

    setMessage("bucketCreated=" + sColl);
}
项目:KernelHive    文件:DataManager.java   
public GridFS connectToDatabase(ServerAddress server) {
    MongoCredential credential = MongoCredential.createMongoCRCredential("hive-dataserver", "admin", "hive-dataserver".toCharArray());
    MongoClient mongoClient = new MongoClient(server, Arrays.asList(credential));
    logger.info("got client");
    DB db = mongoClient.getDB("hive-dataserver");
    logger.info("Got DB");
    return new GridFS(db);
}