Java 类com.datastax.driver.core.ConsistencyLevel 实例源码

项目:simulacron    文件:ActivityLogIntegrationTest.java   
@Test
public void testVerifyQueryLogInfo() throws Exception {
  long currentTimestamp = System.currentTimeMillis();
  String[] queries = new String[] {"select * from table1"};
  primeAndExecuteQueries(queries, queries);

  List<QueryLog> queryLogs = getAllQueryLogs(server.getLogs(server.getCluster()));
  assertThat(queryLogs.size()).isEqualTo(1);
  QueryLog log = queryLogs.get(0);
  assertThat(log.getConnection()).isNotNull();
  assertThat(log.getConsistency()).isEqualTo(adapt(ConsistencyLevel.LOCAL_ONE));
  assertThat(log.getReceivedTimestamp()).isNotZero();
  assertThat(log.getReceivedTimestamp()).isGreaterThan(currentTimestamp);
  assertThat(log.getClientTimestamp()).isEqualTo(100);
  assertThat(log.isPrimed()).isTrue();
}
项目:flink-cassandra-connector-examples    文件:ClientSessionProvider.java   
public static Session getClientSession(String hostAddr) {
    if(REGISTRY.containsKey(hostAddr)) {
        return REGISTRY.get(hostAddr);
    } else {
        Cluster.Builder clientClusterBuilder = new Cluster.Builder()
                .addContactPoint(hostAddr)
                .withQueryOptions(new QueryOptions()
                        .setConsistencyLevel(ConsistencyLevel.ONE)
                        .setSerialConsistencyLevel(ConsistencyLevel.LOCAL_SERIAL))
                .withoutJMXReporting()
                .withoutMetrics()
                .withReconnectionPolicy(new ConstantReconnectionPolicy(RECONNECT_DELAY_IN_MS));
        long startTimeInMillis = System.currentTimeMillis();
        Cluster clientCluster = clientClusterBuilder.build();
        Session clientSession = clientCluster.connect();

        LOG.info("Client session established after {} ms.", System.currentTimeMillis() - startTimeInMillis);
        REGISTRY.putIfAbsent(hostAddr, clientSession);
        return clientSession;
    }
}
项目:kafka-connect-cassandra    文件:CassandraSinkConnectorConfig.java   
public static ConfigDef config() {
  return new ConfigDef()
      .define(CONTACT_POINTS_CONFIG, ConfigDef.Type.LIST, ImmutableList.of("localhost"), ConfigDef.Importance.MEDIUM, CONTACT_POINTS_DOC)
      .define(PORT_CONFIG, ConfigDef.Type.INT, 9042, ValidPort.of(), ConfigDef.Importance.MEDIUM, PORT_DOC)
      .define(CONSISTENCY_LEVEL_CONFIG, ConfigDef.Type.STRING, ConsistencyLevel.LOCAL_QUORUM.toString(), ValidEnum.of(ConsistencyLevel.class), ConfigDef.Importance.MEDIUM, CONSISTENCY_LEVEL_DOC)
      .define(USERNAME_CONFIG, ConfigDef.Type.STRING, "cassandra", ConfigDef.Importance.MEDIUM, USERNAME_DOC)
      .define(PASSWORD_CONFIG, ConfigDef.Type.PASSWORD, "cassandra", ConfigDef.Importance.MEDIUM, PASSWORD_DOC)
      .define(SECURITY_ENABLE_CONFIG, ConfigDef.Type.BOOLEAN, false, ConfigDef.Importance.MEDIUM, SECURITY_ENABLE_DOC)
      .define(COMPRESSION_CONFIG, ConfigDef.Type.STRING, "NONE", ConfigDef.ValidString.in(CLIENT_COMPRESSION.keySet().stream().toArray(String[]::new)), ConfigDef.Importance.MEDIUM, COMPRESSION_DOC)
      .define(SSL_ENABLED_CONFIG, ConfigDef.Type.BOOLEAN, false, ConfigDef.Importance.MEDIUM, SSL_ENABLED_DOC)
      .define(SSL_PROVIDER_CONFIG, ConfigDef.Type.STRING, SslProvider.JDK.toString(), ValidEnum.of(SslProvider.class), ConfigDef.Importance.MEDIUM, SSL_PROVIDER_DOC)
      .define(DELETES_ENABLE_CONFIG, ConfigDef.Type.BOOLEAN, true, ConfigDef.Importance.MEDIUM, DELETES_ENABLE_DOC)
      .define(KEYSPACE_CONFIG, ConfigDef.Type.STRING, ConfigDef.Importance.HIGH, KEYSPACE_DOC)
      .define(KEYSPACE_CREATE_ENABLED_CONFIG, ConfigDef.Type.BOOLEAN, true, ConfigDef.Importance.HIGH, KEYSPACE_CREATE_ENABLED_DOC)
      .define(TABLE_MANAGE_ENABLED_CONFIG, ConfigDef.Type.BOOLEAN, true, ConfigDef.Importance.MEDIUM, SCHEMA_MANAGE_CREATE_DOC)
      .define(TABLE_CREATE_COMPRESSION_ENABLED_CONFIG, ConfigDef.Type.BOOLEAN, true, ConfigDef.Importance.MEDIUM, TABLE_CREATE_COMPRESSION_ENABLED_DOC)
      .define(TABLE_CREATE_COMPRESSION_ALGORITHM_CONFIG, ConfigDef.Type.STRING, "NONE", ConfigDef.ValidString.in(TABLE_COMPRESSION.keySet().stream().toArray(String[]::new)), ConfigDef.Importance.MEDIUM, TABLE_CREATE_COMPRESSION_ALGORITHM_DOC)
      .define(TABLE_CREATE_CACHING_CONFIG, ConfigDef.Type.STRING, SchemaBuilder.Caching.NONE.toString(), ValidEnum.of(SchemaBuilder.Caching.class), ConfigDef.Importance.MEDIUM, TABLE_CREATE_CACHING_DOC);
}
项目:dmaap-framework    文件:CassandraConfigDb.java   
public CassandraConfigDb(List<String> contactPoints, int port) {

    this.contactPoints = new ArrayList<InetAddress> (contactPoints.size());

    for (String contactPoint : contactPoints) {
        try {
            this.contactPoints.add(InetAddress.getByName(contactPoint));
        } catch (UnknownHostException e) {
               throw new IllegalArgumentException(e.getMessage());
        }
    }

    this.port = port;

    cluster = (new Cluster.Builder()).withPort (this.port)
            .addContactPoints(this.contactPoints)
            .withSocketOptions(new SocketOptions().setReadTimeoutMillis(60000).setKeepAlive(true).setReuseAddress(true))
            .withLoadBalancingPolicy(new RoundRobinPolicy())
            .withReconnectionPolicy(new ConstantReconnectionPolicy(500L))
            .withQueryOptions(new QueryOptions().setConsistencyLevel(ConsistencyLevel.ONE))
            .build ();

    session = cluster.newSession();
    preparedStatements = new ConcurrentHashMap<StatementName, PreparedStatement> ();
    prepareStatementCreateLock = new Object();
}
项目:music    文件:MusicDataStore.java   
public void executePut(String query, String consistency){
    logger.debug("in data store handle, executing put:"+query);
    long start = System.currentTimeMillis();
    Statement statement = new SimpleStatement(query);
    if(consistency.equalsIgnoreCase("critical")){
        logger.info("Executing critical put query:"+query);
        statement.setConsistencyLevel(ConsistencyLevel.QUORUM);
    }
    else if (consistency.equalsIgnoreCase("eventual")){
        logger.info("Executing normal put query:"+query);
        statement.setConsistencyLevel(ConsistencyLevel.ONE);
    }
    session.execute(statement); 
    long end = System.currentTimeMillis();
    logger.debug("Time taken for actual put in cassandra:"+(end-start));
}
项目:AAF    文件:AbsCassDAO.java   
/**
 * Create a PSInfo and create Prepared Statement
 * 
 * @param trans
 * @param theCQL
 * @param loader
 */
public PSInfo(TRANS trans, String theCQL, Loader<DATA> loader, ConsistencyLevel consistency) {
    this.loader = loader;
    this.consistency=consistency;
    psinfos.add(this);

    cql = theCQL.trim().toUpperCase();
    if(cql.startsWith("INSERT")) {
        crud = CRUD.create;
    } else if(cql.startsWith("UPDATE")) {
        crud = CRUD.update;
    } else if(cql.startsWith("DELETE")) {
        crud = CRUD.delete;
    } else {
        crud = CRUD.read;
    }

    int idx = 0, count=0;
    while((idx=cql.indexOf('?',idx))>=0) {
        ++idx;
        ++count;
    }
    size=count;
}
项目:emodb    文件:CqlSubscriptionDAO.java   
@Timed(name = "bv.emodb.databus.CqlSubscriptionDAO.insertSubscription", absolute = true)
@Override
public void insertSubscription(String ownerId, String subscription, Condition tableFilter,
                               Duration subscriptionTtl, Duration eventTtl) {
    Map<String, Object> json = ImmutableMap.<String, Object>builder()
            .put("filter", tableFilter.toString())
            .put("expiresAt", _clock.millis() + subscriptionTtl.getMillis())
            .put("eventTtl", Ttls.toSeconds(eventTtl, 1, Integer.MAX_VALUE))
            .put("ownerId", ownerId)
            .build();

    _keyspace.getCqlSession().execute(
            insertInto(CF_NAME)
                    .value(rowkeyColumn(), ROW_KEY)
                    .value(subscriptionNameColumn(), subscription)
                    .value(subscriptionColumn(), JsonHelper.asJson(json))
                    .using(ttl(Ttls.toSeconds(subscriptionTtl, 1, Integer.MAX_VALUE)))
                    .setConsistencyLevel(ConsistencyLevel.LOCAL_QUORUM));
}
项目:emodb    文件:CqlSubscriptionDAO.java   
@Timed(name = "bv.emodb.databus.CqlSubscriptionDAO.getSubscription", absolute = true)
@Override
public OwnedSubscription getSubscription(String subscription) {
    ResultSet resultSet = _keyspace.getCqlSession().execute(
            select(subscriptionNameColumn(), subscriptionColumn())
                    .from(CF_NAME)
                    .where(eq(rowkeyColumn(), ROW_KEY))
                    .and(eq(subscriptionNameColumn(), subscription))
                    .setConsistencyLevel(ConsistencyLevel.LOCAL_QUORUM));

    Row row = resultSet.one();
    if (row == null) {
        return null;
    }
    return rowToOwnedSubscription(row);
}
项目:beam    文件:CassandraServiceImpl.java   
/**
 * Get a Cassandra cluster using hosts and port.
 */
private Cluster getCluster(List<String> hosts, int port, String username, String password,
                           String localDc, String consistencyLevel) {
  Cluster.Builder builder = Cluster.builder()
      .addContactPoints(hosts.toArray(new String[0]))
      .withPort(port);

  if (username != null) {
    builder.withAuthProvider(new PlainTextAuthProvider(username, password));
  }

  if (localDc != null) {
    builder.withLoadBalancingPolicy(
        new TokenAwarePolicy(new DCAwareRoundRobinPolicy.Builder().withLocalDc(localDc).build()));
  } else {
    builder.withLoadBalancingPolicy(new TokenAwarePolicy(new RoundRobinPolicy()));
  }

  if (consistencyLevel != null) {
    builder.withQueryOptions(
        new QueryOptions().setConsistencyLevel(ConsistencyLevel.valueOf(consistencyLevel)));
  }

  return builder.build();
}
项目:incubator-zeppelin-druid    文件:InterpreterLogicTest.java   
@Test
public void should_generate_simple_statement() throws Exception {
    //Given
    String input = "SELECT * FROM users LIMIT 10;";
    CassandraQueryOptions options = new CassandraQueryOptions(Option.apply(QUORUM),
            Option.<ConsistencyLevel>empty(),
            Option.empty(),
            Option.<RetryPolicy>empty(),
            Option.empty());

    //When
    final SimpleStatement actual = helper.generateSimpleStatement(new SimpleStm(input), options, intrContext);

    //Then
    assertThat(actual).isNotNull();
    assertThat(actual.getQueryString()).isEqualTo("SELECT * FROM users LIMIT 10;");
    assertThat(actual.getConsistencyLevel()).isSameAs(QUORUM);
}
项目:incubator-zeppelin-druid    文件:InterpreterLogicTest.java   
@Test
public void should_generate_batch_statement() throws Exception {
    //Given
    Statement st1 = new SimpleStatement("SELECT * FROM users LIMIT 10;");
    Statement st2 = new SimpleStatement("INSERT INTO users(id) VALUES(10);");
    Statement st3 = new SimpleStatement("UPDATE users SET name = 'John DOE' WHERE id=10;");
    CassandraQueryOptions options = new CassandraQueryOptions(Option.apply(QUORUM),
            Option.<ConsistencyLevel>empty(),
            Option.empty(),
            Option.<RetryPolicy>empty(),
            Option.empty());

    //When
    BatchStatement actual = helper.generateBatchStatement(UNLOGGED, options, toScalaList(asList(st1, st2, st3)));

    //Then
    assertThat(actual).isNotNull();
    final List<Statement> statements = new ArrayList<>(actual.getStatements());
    assertThat(statements).hasSize(3);
    assertThat(statements.get(0)).isSameAs(st1);
    assertThat(statements.get(1)).isSameAs(st2);
    assertThat(statements.get(2)).isSameAs(st3);
    assertThat(actual.getConsistencyLevel()).isSameAs(QUORUM);
}
项目:cassandra-reaper    文件:CassandraStorage.java   
@Override
public RetryDecision onReadTimeout(
    Statement stmt,
    ConsistencyLevel cl,
    int required,
    int received,
    boolean retrieved,
    int retry) {

  if (retry > 1) {
    try {
      Thread.sleep(100);
    } catch (InterruptedException expected) { }
  }
  return null != stmt && stmt.isIdempotent()
      ? retry < 10 ? RetryDecision.retry(cl) : RetryDecision.rethrow()
      : DefaultRetryPolicy.INSTANCE.onReadTimeout(stmt, cl, required, received, retrieved, retry);
}
项目:simulacron    文件:DriverTypeAdaptersTest.java   
@Test
public void testShouldAdaptConsistencyLevels() {
  assertThat(adapt(ConsistencyLevel.ANY)).isEqualTo(ANY);
  assertThat(adapt(ConsistencyLevel.ONE)).isSameAs(ONE);
  assertThat(adapt(ConsistencyLevel.TWO)).isSameAs(TWO);
  assertThat(adapt(ConsistencyLevel.THREE)).isSameAs(THREE);
  assertThat(adapt(ConsistencyLevel.QUORUM)).isSameAs(QUORUM);
  assertThat(adapt(ConsistencyLevel.ALL)).isSameAs(ALL);
  assertThat(adapt(ConsistencyLevel.LOCAL_QUORUM)).isSameAs(LOCAL_QUORUM);
  assertThat(adapt(ConsistencyLevel.EACH_QUORUM)).isSameAs(EACH_QUORUM);
  assertThat(adapt(ConsistencyLevel.SERIAL)).isSameAs(SERIAL);
  assertThat(adapt(ConsistencyLevel.LOCAL_SERIAL)).isSameAs(LOCAL_SERIAL);
  assertThat(adapt(ConsistencyLevel.LOCAL_ONE)).isSameAs(LOCAL_ONE);
}
项目:simulacron    文件:DriverTypeAdaptersTest.java   
@Test
public void testShouldExtractConsistencyLevels() {
  assertThat(extract(ANY)).isEqualTo(ConsistencyLevel.ANY);
  assertThat(extract(ONE)).isSameAs(ConsistencyLevel.ONE);
  assertThat(extract(TWO)).isSameAs(ConsistencyLevel.TWO);
  assertThat(extract(THREE)).isSameAs(ConsistencyLevel.THREE);
  assertThat(extract(QUORUM)).isSameAs(ConsistencyLevel.QUORUM);
  assertThat(extract(ALL)).isSameAs(ConsistencyLevel.ALL);
  assertThat(extract(LOCAL_QUORUM)).isSameAs(ConsistencyLevel.LOCAL_QUORUM);
  assertThat(extract(EACH_QUORUM)).isSameAs(ConsistencyLevel.EACH_QUORUM);
  assertThat(extract(SERIAL)).isSameAs(ConsistencyLevel.SERIAL);
  assertThat(extract(LOCAL_SERIAL)).isSameAs(ConsistencyLevel.LOCAL_SERIAL);
  assertThat(extract(LOCAL_ONE)).isSameAs(ConsistencyLevel.LOCAL_ONE);
}
项目:kafka-connect-cassandra    文件:CassandraSinkConnectorConfig.java   
public CassandraSinkConnectorConfig(Map<?, ?> originals) {
    super(config(), originals);
    this.port = getInt(PORT_CONFIG);
    final List<String> contactPoints = this.getList(CONTACT_POINTS_CONFIG);
    this.contactPoints = contactPoints.toArray(new String[contactPoints.size()]);
    this.consistencyLevel = ConfigUtils.getEnum(ConsistencyLevel.class, this, CONSISTENCY_LEVEL_CONFIG);
//    this.compression = ConfigUtils.getEnum(ProtocolOptions.Compression.class, this, COMPRESSION_CONFIG);
    this.username = getString(USERNAME_CONFIG);
    this.password = getPassword(PASSWORD_CONFIG).value();
    this.securityEnabled = getBoolean(SECURITY_ENABLE_CONFIG);
    this.sslEnabled = getBoolean(SSL_ENABLED_CONFIG);
    this.deletesEnabled = getBoolean(DELETES_ENABLE_CONFIG);

    final String keyspace = getString(KEYSPACE_CONFIG);

    if (Strings.isNullOrEmpty(keyspace)) {
      this.keyspace = null;
    } else {
      this.keyspace = keyspace;
    }

    final String compression = getString(COMPRESSION_CONFIG);
    this.compression = CLIENT_COMPRESSION.get(compression);
    this.sslProvider = ConfigUtils.getEnum(SslProvider.class, this, SSL_PROVIDER_CONFIG);
    this.keyspaceCreateEnabled = getBoolean(KEYSPACE_CREATE_ENABLED_CONFIG);
    this.tableManageEnabled = getBoolean(TABLE_MANAGE_ENABLED_CONFIG);
    this.tableCompressionEnabled = getBoolean(TABLE_CREATE_COMPRESSION_ENABLED_CONFIG);
    this.tableCompressionAlgorithm = ConfigUtils.getEnum(TableOptions.CompressionOptions.Algorithm.class, this, TABLE_CREATE_COMPRESSION_ALGORITHM_CONFIG);
    this.tableCaching = ConfigUtils.getEnum(SchemaBuilder.Caching.class, this, TABLE_CREATE_CACHING_CONFIG);
  }
项目:ats-framework    文件:CassandraDbProvider.java   
/**
 * Currently we connect just once and then reuse the connection.
 * We do not bother with closing the connection.
 *
 * It is normal to use one Session per DB. The Session is thread safe.
 */
private void connect() {

    if (cluster == null) {

        log.info("Connecting to Cassandra server on " + this.dbHost + " at port " + this.dbPort);

        // allow fetching as much data as present in the DB
        QueryOptions queryOptions = new QueryOptions();
        queryOptions.setFetchSize(Integer.MAX_VALUE);
        queryOptions.setConsistencyLevel(ConsistencyLevel.ONE);

        cluster = Cluster.builder()
                         .addContactPoint(this.dbHost)
                         .withPort(this.dbPort)
                         .withLoadBalancingPolicy(new TokenAwarePolicy(new RoundRobinPolicy()))
                         .withReconnectionPolicy(new ExponentialReconnectionPolicy(500, 30000))
                         .withQueryOptions(queryOptions)
                         .withCredentials(this.dbUser, this.dbPassword)
                         .build();

    }

    if (session == null) {

        log.info("Connecting to Cassandra DB with name " + this.dbName);
        session = cluster.connect(dbName);
    }
}
项目:eventapis    文件:StoreConfiguration.java   
@Bean("cassandraDataSource")
@Primary
public DataSource createDataSource() {
    DataSource dataSource = new DataSource();
    dataSource.setContactPoints("127.0.0.1");
    dataSource.setPort(9042);
    dataSource.setReadConsistency(ConsistencyLevel.ONE.name());
    dataSource.setWriteConsistency(ConsistencyLevel.ONE.name());
    dataSource.setLoadBalancingPolicy(new RoundRobinPolicy());
    return dataSource;
}
项目:Lagerta    文件:DataSource.java   
/**
 * Parses consistency level provided as string.
 *
 * @param level consistency level string.
 * @return consistency level.
 */
private ConsistencyLevel parseConsistencyLevel(String level) {
    if (level == null) {
        return null;
    }
    try {
        return ConsistencyLevel.valueOf(level.trim().toUpperCase());
    }
    catch (Throwable e) {
        throw new IgniteException("Incorrect consistency level '" + level + "' specified for Cassandra connection", e);
    }
}
项目:cassandra-java-driver-examples    文件:CustomRetryPolicy.java   
@Override
public RetryDecision onReadTimeout(Statement stmnt, ConsistencyLevel cl, int requiredResponses,
        int receivedResponses, boolean dataReceived, int rTime) {
    if (dataReceived) {
        return RetryDecision.ignore();
    } else if (rTime < readAttempts) {
        return RetryDecision.retry(cl);
    } else {
        return RetryDecision.rethrow();
    }

}
项目:cassandra-java-driver-examples    文件:CustomRetryPolicy.java   
@Override
public RetryDecision onWriteTimeout(Statement stmnt, ConsistencyLevel cl, WriteType wt, int requiredResponses,
        int receivedResponses, int wTime) {
    if (wTime < writeAttempts) {
        return RetryDecision.retry(cl);
    }
    return RetryDecision.rethrow();
}
项目:cassandra-java-driver-examples    文件:CustomRetryPolicy.java   
@Override
public RetryDecision onUnavailable(Statement stmnt, ConsistencyLevel cl, int requiredResponses,
        int receivedResponses, int uTime) {
    if (uTime < unavailableAttempts) {
        return RetryDecision.retry(ConsistencyLevel.ONE);
    }
    return RetryDecision.rethrow();
}
项目:cassandra-java-driver-examples    文件:SimpleStatementExample.java   
public static void main(String[] args) {
    Session session = Connection.connect();

    SimpleStatement statement1 = new SimpleStatement("insert into user (id, name, age) values (?, ?, ?)",
            UUIDs.timeBased(), "user01", 30);

    statement1.setConsistencyLevel(ConsistencyLevel.ONE);

    try {

        ResultSet rs = session.execute(statement1);
        System.out.println(rs);
    } catch (Exception ex) {
        ex.printStackTrace();
    }

    SimpleStatement statement2 = new SimpleStatement("select id, name, age from user");

    ResultSet rs2 = session.execute(statement2);

    System.out.println(rs2);

    for (Row row : rs2) {
        System.out.printf("id: %s, name: %s, age: %d\n", row.get(0, UUID.class), 
                  row.getString(1), row.getInt(2));
    }

    Connection.close();
}
项目:iotplatform    文件:CassandraAbstractDao.java   
private ResultSet execute(Statement statement, ConsistencyLevel level) {
    log.debug("Execute cassandra statement {}", statement);
    if (statement.getConsistencyLevel() == null) {
        statement.setConsistencyLevel(level);
    }
    return getSession().execute(statement);
}
项目:iotplatform    文件:CassandraAbstractDao.java   
private ResultSetFuture executeAsync(Statement statement, ConsistencyLevel level) {
    log.debug("Execute cassandra async statement {}", statement);
    if (statement.getConsistencyLevel() == null) {
        statement.setConsistencyLevel(level);
    }
    return getSession().executeAsync(statement);
}
项目:iotplatform    文件:CassandraQueryOptions.java   
protected ConsistencyLevel getDefaultReadConsistencyLevel() {
    if (defaultReadConsistencyLevel == null) {
        if (readConsistencyLevel != null) {
            defaultReadConsistencyLevel = ConsistencyLevel.valueOf(readConsistencyLevel.toUpperCase());
        } else {
            defaultReadConsistencyLevel = ConsistencyLevel.ONE;
        }
    }
    return defaultReadConsistencyLevel;
}
项目:iotplatform    文件:CassandraQueryOptions.java   
protected ConsistencyLevel getDefaultWriteConsistencyLevel() {
    if (defaultWriteConsistencyLevel == null) {
        if (writeConsistencyLevel != null) {
            defaultWriteConsistencyLevel = ConsistencyLevel.valueOf(writeConsistencyLevel.toUpperCase());
        } else {
            defaultWriteConsistencyLevel = ConsistencyLevel.ONE;
        }
    }
    return defaultWriteConsistencyLevel;
}
项目:boontadata-streams    文件:CassandraConfiguration.java   
protected CassandraConfiguration()  {


try {
    seed = JavaConversions.asScalaBuffer(Arrays.asList(InetAddress.getByName("localhost")));
} catch (UnknownHostException e) {
    // TODO Auto-generated catch block
    e.printStackTrace();
}

CassandraCluster cc =  new CassandraCluster(seed, 9042,null, 8000, 120000, 1000,6000, 
        new ProtocolOptions().getCompression().LZ4, ConsistencyLevel.ONE);
session = cc.session();

}
项目:music    文件:MusicDataStore.java   
public ResultSet executeEventualGet(String query){
    logger.info("Executing normal get query:"+query);
    long start = System.currentTimeMillis();
    Statement statement = new SimpleStatement(query);
    statement.setConsistencyLevel(ConsistencyLevel.ONE);
    ResultSet results = session.execute(statement);
    long end = System.currentTimeMillis();
    logger.debug("Time taken for actual get in cassandra:"+(end-start));
    return results; 
}
项目:music    文件:MusicDataStore.java   
public ResultSet executeCriticalGet(String query){
    Statement statement = new SimpleStatement(query);
    logger.info("Executing critical get query:"+query);
    statement.setConsistencyLevel(ConsistencyLevel.QUORUM);
    ResultSet results = session.execute(statement);
    return results; 
}
项目:music    文件:MusicClient.java   
private void executeCreateQuery(String query, String consistency) throws Exception {
    Statement statement = new SimpleStatement(query);
    if (consistency.equalsIgnoreCase("atomic"))
        statement.setConsistencyLevel(ConsistencyLevel.ALL);
    else if (consistency.equalsIgnoreCase("eventual"))
        statement.setConsistencyLevel(ConsistencyLevel.ONE);
    else
        throw new Exception("Consistency level "+consistency+ " unknown!!");
    session.execute(statement);
}
项目:cfs    文件:Options.java   
public Options() {
    username = "nothing";
    password = "nothing";
    consistencyLevel = ConsistencyLevel.LOCAL_ONE;
    numberOfThreads = 16;
    dc=null;
    personalQueueSize = Integer.MAX_VALUE;
    columnNames = new ArrayList<>();
    columnNames.add("*");
    sleepMilliSeconds = 1;
}
项目:AbacusUtil    文件:CassandraExecutor.java   
public StatementSettings(ConsistencyLevel consistency, ConsistencyLevel serialConsistency, boolean traceQuery, RetryPolicy retryPolicy, int fetchSize) {
    this.consistency = consistency;
    this.serialConsistency = serialConsistency;
    this.traceQuery = traceQuery;
    this.retryPolicy = retryPolicy;
    this.fetchSize = fetchSize;
}
项目:Camel    文件:CassandraComponentProducerTest.java   
@Test
public void testRequestNotConsistent() throws Exception {

    CassandraEndpoint endpoint = getMandatoryEndpoint(NOT_CONSISTENT_URI, CassandraEndpoint.class);
    assertEquals(ConsistencyLevel.ANY, endpoint.getConsistencyLevel());

    Object response = notConsistentProducerTemplate.requestBody(Arrays.asList("j_anstey", "Jonathan", "Anstey"));
}
项目:AAF    文件:CassDAOImpl.java   
protected static ConsistencyLevel readConsistency(AuthzTrans trans, String table) {
    String prop = trans.getProperty(CASS_READ_CONSISTENCY+'.'+table);
    if(prop==null) {
        prop = trans.getProperty(CASS_READ_CONSISTENCY);
        if(prop==null) {
            return ConsistencyLevel.ONE; // this is Cassandra Default
        }
    }
    return ConsistencyLevel.valueOf(prop);
}
项目:AAF    文件:CassDAOImpl.java   
protected static ConsistencyLevel writeConsistency(AuthzTrans trans, String table) {
    String prop = trans.getProperty(CASS_WRITE_CONSISTENCY+'.'+table);
    if(prop==null) {
        prop = trans.getProperty(CASS_WRITE_CONSISTENCY);
        if(prop==null) {
            return ConsistencyLevel.ONE; // this is Cassandra Default\
        }
    }
    return ConsistencyLevel.valueOf(prop);
}
项目:emodb    文件:CqlSubscriptionDAO.java   
@Timed(name = "bv.emodb.databus.CqlSubscriptionDAO.deleteSubscription", absolute = true)
@Override
public void deleteSubscription(String subscription) {
    _keyspace.getCqlSession().execute(
            delete()
                    .from(CF_NAME)
                    .where(eq(rowkeyColumn(), ROW_KEY))
                    .and(eq(subscriptionNameColumn(), subscription))
                    .setConsistencyLevel(ConsistencyLevel.LOCAL_QUORUM));
}
项目:emodb    文件:CqlSubscriptionDAO.java   
@Timed(name = "bv.emodb.databus.CqlSubscriptionDAO.getAllSubscriptions", absolute = true)
@Override
public Iterable<OwnedSubscription> getAllSubscriptions() {
    return () -> {
        ResultSet resultSet = _keyspace.getCqlSession().execute(
                select(subscriptionNameColumn(), subscriptionColumn())
                        .from(CF_NAME)
                        .where(eq(rowkeyColumn(), ROW_KEY))
                        .setConsistencyLevel(ConsistencyLevel.LOCAL_QUORUM)
                        .setFetchSize(200));

        return StreamSupport.stream(resultSet.spliterator(), false).map(this::rowToOwnedSubscription).iterator();
    };
}
项目:emodb    文件:CqlSubscriptionDAO.java   
@Timed(name = "bv.emodb.databus.CqlSubscriptionDAO.getAllSubscriptionNames", absolute = true)
@Override
public Iterable<String> getAllSubscriptionNames() {
    return () -> {
        ResultSet resultSet = _keyspace.getCqlSession().execute(
                select(subscriptionNameColumn())
                        .from(CF_NAME)
                        .where(eq(rowkeyColumn(), ROW_KEY))
                        .setConsistencyLevel(ConsistencyLevel.LOCAL_QUORUM)
                        .setFetchSize(5000));

        return StreamSupport.stream(resultSet.spliterator(), false).map(row -> row.getString(0)).iterator();
    };
}
项目:emodb    文件:CQLStashTableDAO.java   
public void addTokenRangesForTable(String stashId, AstyanaxStorage readStorage, TableJson tableJson) {
    String placement = readStorage.getPlacementName();
    ensureStashTokenRangeTableExists();

    String tableInfo = JsonHelper.asJson(tableJson.getRawJson());
    Session session = _placementCache.get(_systemTablePlacement).getKeyspace().getCqlSession();

    // Add two records for each shard for the table: one which identifies the start token for the shard, and
    // one that identifies (exclusively) the end token for the shard.  This will allow for efficient range queries
    // later on.

    Iterator<ByteBufferRange> tableTokenRanges = readStorage.scanIterator(null);
    // To prevent sending over-large batches split into groups of 8 ranges which results in 16 statements per batch
    Iterators.partition(tableTokenRanges, 8).forEachRemaining(ranges -> {
        BatchStatement batchStatement = new BatchStatement();
        for (ByteBufferRange range : ranges) {
            batchStatement.add(QueryBuilder.insertInto(STASH_TOKEN_RANGE_TABLE)
                    .value(STASH_ID_COLUMN, stashId)
                    .value(DATA_CENTER_COLUMN, _dataCenters.getSelf().getName())
                    .value(PLACEMENT_COLUMN, placement)
                    .value(RANGE_TOKEN_COLUMN, range.getStart())
                    .value(IS_START_TOKEN_COLUMN, true)
                    .value(TABLE_JSON_COLUMN, tableInfo));

            batchStatement.add(QueryBuilder.insertInto(STASH_TOKEN_RANGE_TABLE)
                    .value(STASH_ID_COLUMN, stashId)
                    .value(DATA_CENTER_COLUMN, _dataCenters.getSelf().getName())
                    .value(PLACEMENT_COLUMN, placement)
                    .value(RANGE_TOKEN_COLUMN, range.getEnd())
                    .value(IS_START_TOKEN_COLUMN, false)
                    .value(TABLE_JSON_COLUMN, tableInfo));
        }

        session.execute(batchStatement.setConsistencyLevel(ConsistencyLevel.LOCAL_QUORUM));
    });
}
项目:emodb    文件:CQLStashTableDAO.java   
private Iterator<ProtoStashTokenRange> getTokenRangesBetweenIntraShard(String stashId, String placement,
                                                                       ByteBuffer fromInclusive, ByteBuffer toExclusive) {
    // Since the range falls entirely within a single shard run a targeted query that only looks for the beginning
    // of that shard.

    ByteBuffer startToken = RowKeyUtils.getRowKeyRaw(RowKeyUtils.getShardId(fromInclusive), RowKeyUtils.getTableUuid(fromInclusive), new byte[0]);

    ResultSet resultSet = _placementCache.get(_systemTablePlacement)
            .getKeyspace()
            .getCqlSession()
            .execute(
                    QueryBuilder.select(TABLE_JSON_COLUMN)
                            .from(STASH_TOKEN_RANGE_TABLE)
                            .where(QueryBuilder.eq(STASH_ID_COLUMN, stashId))
                            .and(QueryBuilder.eq(DATA_CENTER_COLUMN, _dataCenters.getSelf().getName()))
                            .and(QueryBuilder.eq(PLACEMENT_COLUMN, placement))
                            .and(QueryBuilder.eq(RANGE_TOKEN_COLUMN, startToken))
                            .and(QueryBuilder.eq(IS_START_TOKEN_COLUMN, true))
                            .limit(1)
                            .setConsistencyLevel(ConsistencyLevel.LOCAL_QUORUM));

    Row row = resultSet.one();

    if (row == null) {
        return Iterators.emptyIterator();
    }

    TableJson tableJson = toTableJson(row.getString(0));
    return Iterators.singletonIterator(new ProtoStashTokenRange(fromInclusive, toExclusive, tableJson));
}