@Override void testRow(final int i) throws IOException { Scan scan = new Scan(getRandomRow(this.rand, opts.totalRows)); scan.setCaching(opts.caching); FilterList list = new FilterList(); if (opts.addColumns) { scan.addColumn(FAMILY_NAME, QUALIFIER_NAME); } else { scan.addFamily(FAMILY_NAME); } if (opts.filterAll) { list.addFilter(new FilterAllFilter()); } list.addFilter(new WhileMatchFilter(new PageFilter(120))); scan.setFilter(list); ResultScanner s = this.table.getScanner(scan); for (Result rr; (rr = s.next()) != null;) { updateValueSize(rr); } s.close(); }
private Scan getVertexIndexScanWithLimit(String label, boolean isUnique, String key, Object from, int limit, boolean reversed) { byte[] prefix = serializeForRead(label, isUnique, key, null); byte[] startRow = from != null ? serializeForRead(label, isUnique, key, from) : prefix; byte[] stopRow = HConstants.EMPTY_END_ROW; if (graph.configuration().getInstanceType() == HBaseGraphConfiguration.InstanceType.BIGTABLE) { if (reversed) { throw new UnsupportedOperationException("Reverse scans not supported by Bigtable"); } else { // PrefixFilter in Bigtable does not automatically stop // See https://github.com/GoogleCloudPlatform/cloud-bigtable-client/issues/1087 stopRow = HBaseGraphUtils.incrementBytes(prefix); } } if (reversed) startRow = HBaseGraphUtils.incrementBytes(startRow); Scan scan = new Scan(startRow, stopRow); FilterList filterList = new FilterList(); filterList.addFilter(new PrefixFilter(prefix)); filterList.addFilter(new PageFilter(limit)); scan.setFilter(filterList); scan.setReversed(reversed); return scan; }
@Override void testRow(final int i) throws IOException { Scan scan = new Scan(getRandomRow(this.rand, opts.totalRows)); FilterList list = new FilterList(); scan.addColumn(FAMILY_NAME, QUALIFIER_NAME); if (opts.filterAll) { list.addFilter(new FilterAllFilter()); } list.addFilter(new WhileMatchFilter(new PageFilter(120))); scan.setFilter(list); ResultScanner s = this.table.getScanner(scan); for (Result rr; (rr = s.next()) != null;) { updateValueSize(rr); } s.close(); }
@Override void testRow(final int i) throws IOException { Scan scan = new Scan().withStartRow(getRandomRow(this.rand, opts.totalRows)) .setCaching(opts.caching).setCacheBlocks(opts.cacheBlocks) .setAsyncPrefetch(opts.asyncPrefetch).setReadType(opts.scanReadType); FilterList list = new FilterList(); if (opts.addColumns) { scan.addColumn(FAMILY_NAME, QUALIFIER_NAME); } else { scan.addFamily(FAMILY_NAME); } if (opts.filterAll) { list.addFilter(new FilterAllFilter()); } list.addFilter(new WhileMatchFilter(new PageFilter(120))); scan.setFilter(list); ResultScanner s = this.table.getScanner(scan); for (Result rr; (rr = s.next()) != null;) { updateValueSize(rr); } s.close(); }
/** * Helper method that generates a scan object for * use in a query * @param theQuery the query to generate a scan for * @param theTableMap the table the query is against * @return the generated scan */ private <T> Scan generateScan( Query<T> theQuery, HBaseTableMap theTableMap ) { Scan scan = new Scan( ); for( Filter filter : theQuery.getFilters( ) ) { if( filter instanceof RowFilter ) { RowFilter<?> rowFilter = ( RowFilter<?> )filter; HBaseKeyMap keyMap = theTableMap.getKey(); if( rowFilter.getLimit() > 0 ) { scan.setFilter( new PageFilter( rowFilter.getLimit( ) ) ); } if( rowFilter.getEndKey() != null ) { scan.setStopRow( ( byte[] )keyMap.getKeyTranslator().translate( rowFilter.getEndKey( ) ) ); } if( rowFilter.getStartKey() != null ) { scan.setStartRow( ( byte[] )keyMap.getKeyTranslator().translate( rowFilter.getStartKey( ) ) ); } } } return scan; }
@Override void testRow(final int i) throws IOException { Scan scan = new Scan(getRandomRow(this.rand, this.totalRows)); scan.addColumn(FAMILY_NAME, QUALIFIER_NAME); scan.setFilter(new WhileMatchFilter(new PageFilter(120))); ResultScanner s = this.table.getScanner(scan); s.close(); }
/** * 获取过滤器。 * * @return 过滤器。 */ public synchronized Filter getFilter() { if (pageSize > 0) { addFilter(new PageFilter(pageSize)); pageSize = 0L; } return filters; }
public Iterator<Edge> edges(Object fromId, int limit) { final EdgeReader parser = new EdgeReader(graph); Scan scan = fromId != null ? new Scan(ValueUtils.serializeWithSalt(fromId)) : new Scan(); scan.setFilter(new PageFilter(limit)); ResultScanner scanner = null; try { scanner = table.getScanner(scan); return CloseableIteratorUtils.limit(HBaseGraphUtils.mapWithCloseAtEnd(scanner, parser::parse), limit); } catch (IOException e) { throw new HBaseGraphException(e); } }
public Iterator<Vertex> vertices(Object fromId, int limit) { final VertexReader parser = new VertexReader(graph); Scan scan = fromId != null ? new Scan(ValueUtils.serializeWithSalt(fromId)) : new Scan(); scan.setFilter(new PageFilter(limit)); ResultScanner scanner = null; try { scanner = table.getScanner(scan); return CloseableIteratorUtils.limit(HBaseGraphUtils.mapWithCloseAtEnd(scanner, parser::parse), limit); } catch (IOException e) { throw new HBaseGraphException(e); } }
@Override void testRow(final int i) throws IOException { Scan scan = new Scan(getRandomRow(this.rand, this.totalRows)); scan.addColumn(FAMILY_NAME, QUALIFIER_NAME); scan.setFilter(new WhileMatchFilter(new PageFilter(120))); ResultScanner s = this.table.getScanner(scan); for (Result rr; (rr = s.next()) != null;) ; s.close(); }
@Override void testRow(final int i) throws IOException { Scan scan = new Scan(getRandomRow(this.rand, this.totalRows)); scan.addColumn(FAMILY_NAME, QUALIFIER_NAME); scan.setFilter(new WhileMatchFilter(new PageFilter(120))); ResultScanner s = this.table.getScanner(scan); //int count = 0; for (Result rr = null; (rr = s.next()) != null;) { // LOG.info("" + count++ + " " + rr.toString()); } s.close(); }
/** * Specifies a range of rows to retrieve based on a starting row key * and retrieves up to limit rows. Each row is passed to the supplied * DataScanner. */ public void ScanRows(String startDate, String symbol, int limit, DataScanner scanner) throws IOException { ResultScanner results = null; try (Connection conn = ConnectionFactory.createConnection(config)){ // Get the table Table table = conn.getTable(TableName.valueOf(TABLE_NAME)); // Create the scan Scan scan = new Scan(); // start at a specific rowkey. scan.setStartRow(makeKey(startDate, symbol)); // Tell the server not to cache more than limit rows // since we won;t need them scan.setCaching(limit); // Can also set a server side filter scan.setFilter(new PageFilter(limit)); // Get the scan results results = table.getScanner(scan); // Iterate over the scan results and break at the limit int count = 0; for ( Result r : results ) { scanner.ProcessRow(r); if ( count++ >= limit ) break; } } finally { // ResultScanner must be closed. if ( results != null ) results.close(); } }
public HbaseScanManager setFilter(Filter filter) { if (filter instanceof PageFilter && isPageLimitExceeded((PageFilter) filter)) { throw new IllegalArgumentException("Page size limit it to big, should be smaller than: " + MAX_DATA_PER_SCAN); } scan.setFilter(filter); return this; }
@Override public List<User> find(String startRow, long pageSize) { Scan scan = new Scan(); startRow = startRow == null ? "" : startRow; scan.setStartRow(Bytes.toBytes(startRow)); PageFilter filter = new PageFilter(pageSize); // TODO order and sort // scan.setStartRow(startRow).setMaxResultSize(pageSize); scan.setFilter(filter); List<User> userList = find(User.TB_NAME, scan, getRowMapper(User.CF_KEY, type)); return userList; }
@Override void testRow(final int i) throws IOException { Scan scan = new Scan(getRandomRow(this.rand, opts.totalRows)); FilterList list = new FilterList(); scan.addColumn(FAMILY_NAME, QUALIFIER_NAME); if (opts.filterAll) { list.addFilter(new FilterAllFilter()); } list.addFilter(new WhileMatchFilter(new PageFilter(120))); scan.setFilter(list); ResultScanner s = this.table.getScanner(scan); for (Result rr; (rr = s.next()) != null;) ; s.close(); }
private void setMaxRows(Scan scan, int maxRows) { try { // in old versions of the HBase API, the 'setMaxResultSize' method // is not available Method method = scan.getClass().getMethod("setMaxResultSize", long.class); method.invoke(scan, (long) maxRows); logger.debug("Succesfully set maxRows using Scan.setMaxResultSize({})", maxRows); } catch (Exception e) { logger.debug( "HBase API does not have Scan.setMaxResultSize(long) method, setting maxRows using PageFilter.", e); scan.setFilter(new PageFilter(maxRows)); } }
@Override public boolean doesStatisticExist(final UniqueIdCache uniqueIdCache, final StatisticConfiguration statisticConfiguration, final RollUpBitMask rollUpBitMask, final Period period) { boolean isFound = false; final String statUuid = statisticConfiguration.getUuid(); final UID statUuidUid = uniqueIdCache.getUniqueIdOrDefault(statUuid); Scan scan = buildBasicScan(rollUpBitMask, period, statisticConfiguration); // filter on rows with a key starting with the UID of our stat name final Filter prefixFilter = new PrefixFilter(statUuidUid.getUidBytes()); // filter on the first row found final Filter pageFilter = new PageFilter(1); final Filter keyOnlyFilter = new KeyOnlyFilter(); final FilterList filters = new FilterList(prefixFilter, pageFilter, keyOnlyFilter); scan.setFilter(filters); final Table tableInterface = getTable(); final ResultScanner scanner = getScanner(tableInterface, scan); try { // the page filter may return more than one row as it is run on each // region so you may get one per region. We just want the first one we find final Result result = scanner.next(); if (result != null && result.getRow() != null) { isFound = true; } } catch (final Throwable t) { closeScanner(scanner); closeTable(tableInterface); throw new HBaseException(t.getMessage(), t); } finally { closeScanner(scanner); closeTable(tableInterface); } return isFound; }
/** * List events for the given entity id in decreasing order of timestamp, from the given startKey. Returns n results * @param entityId entity id * @param startKey key for the first event to be returned, used for pagination * @param n number of events to be returned * @return list of events * @throws AtlasException */ public List<EntityAuditEvent> listEvents(String entityId, String startKey, short n) throws AtlasException { if (LOG.isDebugEnabled()) { LOG.debug("Listing events for entity id {}, starting timestamp {}, #records {}", entityId, startKey, n); } Table table = null; ResultScanner scanner = null; try { table = connection.getTable(tableName); /** * Scan Details: * In hbase, the events are stored in increasing order of timestamp. So, doing reverse scan to get the latest event first * Page filter is set to limit the number of results returned. * Stop row is set to the entity id to avoid going past the current entity while scanning * small is set to true to optimise RPC calls as the scanner is created per request */ Scan scan = new Scan().setReversed(true).setFilter(new PageFilter(n)) .setStopRow(Bytes.toBytes(entityId)) .setCaching(n) .setSmall(true); if (StringUtils.isEmpty(startKey)) { //Set start row to entity id + max long value byte[] entityBytes = getKey(entityId, Long.MAX_VALUE); scan = scan.setStartRow(entityBytes); } else { scan = scan.setStartRow(Bytes.toBytes(startKey)); } scanner = table.getScanner(scan); Result result; List<EntityAuditEvent> events = new ArrayList<>(); //PageFilter doesn't ensure n results are returned. The filter is per region server. //So, adding extra check on n here while ((result = scanner.next()) != null && events.size() < n) { EntityAuditEvent event = fromKey(result.getRow()); //In case the user sets random start key, guarding against random events if (!event.getEntityId().equals(entityId)) { continue; } event.setUser(getResultString(result, COLUMN_USER)); event.setAction(EntityAuditEvent.EntityAuditAction.valueOf(getResultString(result, COLUMN_ACTION))); event.setDetails(getResultString(result, COLUMN_DETAIL)); if (persistEntityDefinition) { String colDef = getResultString(result, COLUMN_DEFINITION); if (colDef != null) { event.setEntityDefinition(colDef); } } events.add(event); } if (LOG.isDebugEnabled()) { LOG.debug("Got events for entity id {}, starting timestamp {}, #records {}", entityId, startKey, events.size()); } return events; } catch (IOException e) { throw new AtlasException(e); } finally { close(scanner); close(table); } }
public HbaseScanManager(String accountId, String componentId) { this.accountId = accountId; this.componentId = componentId; defaultPageFilter = new PageFilter(MAX_DATA_PER_SCAN); }
private boolean isPageLimitExceeded(PageFilter filter) { return filter.getPageSize() > MAX_DATA_PER_SCAN; }
public void setNumRows(int n) { this.filterList.addFilter(new PageFilter(n == 0 ? DEFAULT_NUM_ROWS : n)); }
@Test public void testGetScanConfig() throws Exception { final List<Criteria.Field> fr = new ArrayList<>(); fr.add(new CriteriaImpl.FieldImpl("notifierName", "test_notifier")); fr.add(new CriteriaImpl.FieldImpl("status", "NEW")); final List<byte[]> nnList = Arrays.asList("s".getBytes(CHARSET), "qs".getBytes(CHARSET), "NEW".getBytes(CHARSET)); new Expectations() { { mockNotificationCriteria.clazz(); times = 1; result = Notification.class; mockNotificationCriteria.fieldRestrictions(); times = 1; result = fr; mockIndexMapper.getIndexedFieldNames(); times = 1; result = Arrays.asList("notifierName"); mockNotificationCriteria.numRows(); times = 1; result = 5; mockIndexMapper.mapMemberValue("status", "NEW"); times = 1; result = nnList; } }; hBaseScanConfigBuilder = new HBaseScanConfigBuilder(); hBaseScanConfigBuilder.addMappers(Notification.class, Arrays.asList(mockIndexMapper)); Criteria<Notification> eventCriteria = new CriteriaImpl<>(Notification.class); HBaseScanConfig<Notification> notificationScanConfig = hBaseScanConfigBuilder.getScanConfig(mockNotificationCriteria); System.out.println(notificationScanConfig); assertEquals(mockIndexMapper, notificationScanConfig.getMapper()); assertArrayEquals("test_notifier|0".getBytes(CHARSET), notificationScanConfig.getStartRow()); assertArrayEquals(("test_notifier|"+Long.MAX_VALUE).getBytes(CHARSET), notificationScanConfig.getStopRow()); assertEquals(2, notificationScanConfig.filterList().getFilters().size()); // column filter should be first Filter firstFilter = notificationScanConfig.filterList().getFilters().get(0); assertEquals(SingleColumnValueFilter.class, firstFilter.getClass()); // page filter should be last Filter secondFilter = notificationScanConfig.filterList().getFilters().get(1); assertEquals(PageFilter.class, secondFilter.getClass()); }