Python numpy 模块,int32() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.int32()

项目:pybot    作者:spillai    | 项目源码 | 文件源码
def visualize(self, vis, colored=True): 

        try: 
            tids = set(self.ids)
        except: 
            return vis

        for hid, hbox in izip(self.ids, self.bboxes): 
            cv2.rectangle(vis, (hbox[0], hbox[1]), (hbox[2], hbox[3]), (0,255,0), 1)

        vis = super(BoundingBoxKLT, self).viz(vis, colored=colored)

        # for tid, pts in self.tm_.tracks.iteritems(): 
        #     if tid not in tids: continue
        #     cv2.polylines(vis, [np.vstack(pts.items).astype(np.int32)[-4:]], False, 
        #                   (0,255,0), thickness=1)
        #     tl, br = np.int32(pts.latest_item)-2, np.int32(pts.latest_item)+2
        #     cv2.rectangle(vis, (tl[0], tl[1]), (br[0], br[1]), (0,255,0), -1)

        # OpenCVKLT.draw_tracks(self, vis, colored=colored, max_track_length=10)
        return vis
项目:spikefuel    作者:duguyue100    | 项目源码 | 文件源码
def draw_poly_box(frame, pts, color=[0, 255, 0]):
    """Draw polylines bounding box.

    Parameters
    ----------
    frame : OpenCV Mat
        A given frame with an object
    pts : numpy array
        consists of bounding box information with size (n points, 2)
    color : list
        color of the bounding box, the default is green

    Returns
    -------
    new_frame : OpenCV Mat
        A frame with given bounding box.
    """
    new_frame = frame.copy()
    temp_pts = np.array(pts, np.int32)
    temp_pts = temp_pts.reshape((-1, 1, 2))
    cv2.polylines(new_frame, [temp_pts], True, color, thickness=2)

    return new_frame
项目:AVSR-Deep-Speech    作者:pandeydivesh15    | 项目源码 | 文件源码
def sparse_tuple_from(sequences, dtype=np.int32):
    r"""Creates a sparse representention of ``sequences``.
    Args:

        * sequences: a list of lists of type dtype where each element is a sequence

    Returns a tuple with (indices, values, shape)
    """
    indices = []
    values = []

    for n, seq in enumerate(sequences):
        indices.extend(zip([n]*len(seq), range(len(seq))))
        values.extend(seq)

    indices = np.asarray(indices, dtype=np.int64)
    values = np.asarray(values, dtype=dtype)
    shape = np.asarray([len(sequences), indices.max(0)[1]+1], dtype=np.int64)

    return tf.SparseTensor(indices=indices, values=values, shape=shape)
项目:pybot    作者:spillai    | 项目源码 | 文件源码
def __init__(self, filename, target_map, classifier='svm'): 

        self.seed_ = 0
        self.filename_ = filename
        self.target_map_ = target_map
        self.target_ids_ = (np.unique(target_map.keys())).astype(np.int32)
        self.epoch_no_ = 0
        self.st_time_ = time.time()

        # Setup classifier
        print('-------------------------------')        
        print('====> Building Classifier, setting class weights') 
        if classifier == 'svm': 
            self.clf_hyparams_ = {'C':[0.01, 0.1, 1.0, 10.0, 100.0], 'class_weight': ['balanced']}
            self.clf_base_ = LinearSVC(random_state=self.seed_)
        elif classifier == 'sgd': 
            self.clf_hyparams_ = {'alpha':[0.0001, 0.001, 0.01, 0.1, 1.0, 10.0], 'class_weight':['auto']} # 'loss':['hinge'], 
            self.clf_ = SGDClassifier(loss='log', penalty='l2', shuffle=False, random_state=self.seed_, 
                                      warm_start=True, n_jobs=-1, n_iter=1, verbose=4)
        else: 
            raise Exception('Unknown classifier type %s. Choose from [sgd, svm, gradient-boosting, extra-trees]' 
                            % classifier)
项目:pybot    作者:spillai    | 项目源码 | 文件源码
def draw_hulls(im, hulls): 
    assert(isinstance(hulls, list))
    cv2.polylines(im, map(lambda hull: hull.astype(np.int32), hulls), 1, (0, 255, 0) if im.ndim == 3 else 255, thickness=1)       
    return im
项目:pybot    作者:spillai    | 项目源码 | 文件源码
def draw_tracks(self, out, colored=False, color_type='unique', min_track_length=4, max_track_length=4):
        """
        color_type: {age, unique}
        """

        N = 20
        # inds = self.confident_tracks(min_length=min_track_length)
        # if not len(inds): 
        #     return

        # ids, pts = self.latest_ids[inds], self.latest_pts[inds]
        # lengths = self.tm_.lengths[inds]

        ids, pts, lengths = self.latest_ids, self.latest_pts, self.tm_.lengths

        if color_type == 'unique': 
            cwheel = colormap(np.linspace(0, 1, N))
            cols = np.vstack([cwheel[tid % N] for idx, tid in enumerate(ids)])
        elif color_type == 'age': 
            cols = colormap(lengths)
        else: 
            raise ValueError('Color type {:} undefined, use age or unique'.format(color_type))

        if not colored: 
            cols = np.tile([0,240,0], [len(self.tm_.tracks), 1])

        for col, pts in izip(cols.astype(np.int64), self.tm_.tracks.itervalues()): 
            cv2.polylines(out, [np.vstack(pts.items).astype(np.int32)[-max_track_length:]], False, 
                          tuple(col), thickness=1)
            tl, br = np.int32(pts.latest_item)-2, np.int32(pts.latest_item)+2
            cv2.rectangle(out, (tl[0], tl[1]), (br[0], br[1]), tuple(col), -1)
项目:convolutional-pose-machines-tensorflow    作者:timctho    | 项目源码 | 文件源码
def warpImage(src, theta, phi, gamma, scale, fovy):
    halfFovy = fovy * 0.5
    d = math.hypot(src.shape[1], src.shape[0])
    sideLength = scale * d / math.cos(deg2Rad(halfFovy))
    sideLength = np.int32(sideLength)

    M = warpMatrix(src.shape[1], src.shape[0], theta, phi, gamma, scale, fovy)
    dst = cv2.warpPerspective(src, M, (sideLength, sideLength))
    mid_x = mid_y = dst.shape[0] // 2
    target_x = target_y = src.shape[0] // 2
    offset = (target_x % 2)

    if len(dst.shape) == 3:
        dst = dst[mid_y - target_y:mid_y + target_y + offset,
              mid_x - target_x:mid_x + target_x + offset,
              :]
    else:
        dst = dst[mid_y - target_y:mid_y + target_y + offset,
              mid_x - target_x:mid_x + target_x + offset]

    return dst
项目:deep-summarization    作者:harpribot    | 项目源码 | 文件源码
def _load_data_graph(self):
        """
        Loads the data graph consisting of the encoder and decoder input placeholders, Label (Target tip summary)
        placeholders and the weights of the hidden layer of the Seq2Seq model.

        :return: None
        """
        # input
        with tf.variable_scope("train_test", reuse=True):
            # review input - Both original and reversed
            self.enc_inp_fwd = [tf.placeholder(tf.int32, shape=(None,), name="input%i" % t)
                                for t in range(self.seq_length)]
            self.enc_inp_bwd = [tf.placeholder(tf.int32, shape=(None,), name="input%i" % t)
                                for t in range(self.seq_length)]
            # desired output
            self.labels = [tf.placeholder(tf.int32, shape=(None,), name="labels%i" % t)
                           for t in range(self.seq_length)]
            # weight of the hidden layer
            self.weights = [tf.ones_like(labels_t, dtype=tf.float32)
                            for labels_t in self.labels]

            # Decoder input: prepend some "GO" token and drop the final
            # token of the encoder input
            self.dec_inp = ([tf.zeros_like(self.labels[0], dtype=np.int32, name="GO")] + self.labels[:-1])
项目:deep-summarization    作者:harpribot    | 项目源码 | 文件源码
def _load_data_graph(self):
        """
        Loads the data graph consisting of the encoder and decoder input placeholders, Label (Target tip summary)
        placeholders and the weights of the hidden layer of the Seq2Seq model.

        :return: None
        """
        # input
        with tf.variable_scope("train_test", reuse=True):
            self.enc_inp = [tf.placeholder(tf.int32, shape=(None,),
                                           name="input%i" % t)
                            for t in range(self.seq_length)]
            # desired output
            self.labels = [tf.placeholder(tf.int32, shape=(None,),
                                          name="labels%i" % t)
                           for t in range(self.seq_length)]
            # weight of the hidden layer
            self.weights = [tf.ones_like(labels_t, dtype=tf.float32)
                            for labels_t in self.labels]

            # Decoder input: prepend some "GO" token and drop the final
            # token of the encoder input
            self.dec_inp = ([tf.zeros_like(self.labels[0], dtype=np.int32, name="GO")]
                            + self.labels[:-1])
项目:deep-summarization    作者:harpribot    | 项目源码 | 文件源码
def _load_data_graph(self):
        """
        Loads the data graph consisting of the encoder and decoder input placeholders, Label (Target tip summary)
        placeholders and the weights of the hidden layer of the Seq2Seq model.

        :return: None
        """
        # input
        with tf.variable_scope("train_test", reuse=True):
            self.enc_inp = [tf.placeholder(tf.int32, shape=(None,), name="input%i" % t)
                            for t in range(self.seq_length)]
            # desired output
            self.labels = [tf.placeholder(tf.int32, shape=(None,), name="labels%i" % t)
                           for t in range(self.seq_length)]
            # weight of the hidden layer
            self.weights = [tf.ones_like(labels_t, dtype=tf.float32)
                            for labels_t in self.labels]

            # Decoder input: prepend some "GO" token and drop the final
            # token of the encoder input
            self.dec_inp = ([tf.zeros_like(self.labels[0], dtype=np.int32, name="GO")] + self.labels[:-1])
项目:deep-summarization    作者:harpribot    | 项目源码 | 文件源码
def _load_data_graph(self):
        """
        Loads the data graph consisting of the encoder and decoder input placeholders, Label (Target tip summary)
        placeholders and the weights of the hidden layer of the Seq2Seq model.

        :return: None
        """
        # input
        with tf.variable_scope("train_test", reuse=True):
            # review input - Both original and reversed
            self.enc_inp_fwd = [tf.placeholder(tf.int32, shape=(None,), name="input%i" % t)
                                for t in range(self.seq_length)]
            self.enc_inp_bwd = [tf.placeholder(tf.int32, shape=(None,), name="input%i" % t)
                                for t in range(self.seq_length)]
            # desired output
            self.labels = [tf.placeholder(tf.int32, shape=(None,), name="labels%i" % t)
                           for t in range(self.seq_length)]
            # weight of the hidden layer
            self.weights = [tf.ones_like(labels_t, dtype=tf.float32)
                            for labels_t in self.labels]

            # Decoder input: prepend some "GO" token and drop the final
            # token of the encoder input
            self.dec_inp = ([tf.zeros_like(self.labels[0], dtype=np.int32, name="GO")] + self.labels[:-1])
项目:AVSR-Deep-Speech    作者:pandeydivesh15    | 项目源码 | 文件源码
def sparse_tuple_from(sequences, dtype=np.int32):
    r"""Creates a sparse representention of ``sequences``.
    Args:

        * sequences: a list of lists of type dtype where each element is a sequence

    Returns a tuple with (indices, values, shape)
    """
    indices = []
    values = []

    for n, seq in enumerate(sequences):
        indices.extend(zip([n]*len(seq), range(len(seq))))
        values.extend(seq)

    indices = np.asarray(indices, dtype=np.int64)
    values = np.asarray(values, dtype=dtype)
    shape = np.asarray([len(sequences), indices.max(0)[1]+1], dtype=np.int64)

    return tf.SparseTensor(indices=indices, values=values, shape=shape)
项目:nidaqmx-python    作者:ni    | 项目源码 | 文件源码
def _write_binary_i_32(
        task_handle, write_array, num_samps_per_chan, auto_start, timeout,
        data_layout=FillMode.GROUP_BY_CHANNEL):
    samps_per_chan_written = ctypes.c_int()

    cfunc = lib_importer.windll.DAQmxWriteBinaryI32
    if cfunc.argtypes is None:
        with cfunc.arglock:
            if cfunc.argtypes is None:
                cfunc.argtypes = [
                    lib_importer.task_handle, ctypes.c_int, c_bool32,
                    ctypes.c_double, ctypes.c_int,
                    wrapped_ndpointer(dtype=numpy.int32, flags=('C', 'W')),
                    ctypes.POINTER(ctypes.c_int), ctypes.POINTER(c_bool32)]

    error_code = cfunc(
        task_handle, num_samps_per_chan, auto_start, timeout,
        data_layout.value, write_array,
        ctypes.byref(samps_per_chan_written), None)
    check_for_error(error_code)

    return samps_per_chan_written.value
项目:nidaqmx-python    作者:ni    | 项目源码 | 文件源码
def _read_binary_i_32(
        task_handle, read_array, num_samps_per_chan, timeout,
        fill_mode=FillMode.GROUP_BY_CHANNEL):
    samps_per_chan_read = ctypes.c_int()

    cfunc = lib_importer.windll.DAQmxReadBinaryI32
    if cfunc.argtypes is None:
        with cfunc.arglock:
            if cfunc.argtypes is None:
                cfunc.argtypes = [
                    lib_importer.task_handle, ctypes.c_int, ctypes.c_double,
                    ctypes.c_int,
                    wrapped_ndpointer(dtype=numpy.int32, flags=('C', 'W')),
                    ctypes.c_uint, ctypes.POINTER(ctypes.c_int),
                    ctypes.POINTER(c_bool32)]

    error_code = cfunc(
        task_handle, num_samps_per_chan, timeout, fill_mode.value,
        read_array, numpy.prod(read_array.shape),
        ctypes.byref(samps_per_chan_read), None)
    check_for_error(error_code)

    return samps_per_chan_read.value
项目:treecat    作者:posterior    | 项目源码 | 文件源码
def make_complete_graph(num_vertices):
    """Constructs a complete graph.

    The pairing function is: k = v1 + v2 * (v2 - 1) // 2

    Args:
        num_vertices: Number of vertices.

    Returns: A tuple with elements:
        V: Number of vertices.
        K: Number of edges.
        grid: a 3 x K grid of (edge, vertex, vertex) triples.
    """
    V = num_vertices
    K = V * (V - 1) // 2
    grid = np.zeros([3, K], np.int32)
    k = 0
    for v2 in range(V):
        for v1 in range(v2):
            grid[:, k] = [k, v1, v2]
            k += 1
    return grid
项目:treecat    作者:posterior    | 项目源码 | 文件源码
def quantize_from_probs2(probs, resolution):
    """Quantize multiple non-normalized probs to given resolution.

    Args:
        probs: An [N, M]-shaped numpy array of non-normalized probabilities.

    Returns:
        An [N, M]-shaped array of quantized probabilities such that
        np.all(result.sum(axis=1) == resolution).
    """
    assert len(probs.shape) == 2
    N, M = probs.shape
    probs = probs / probs.sum(axis=1, keepdims=True)
    result = np.zeros(probs.shape, np.int8)
    range_N = np.arange(N, dtype=np.int32)
    for _ in range(resolution):
        sample = probs.argmax(axis=1)
        result[range_N, sample] += 1
        probs[range_N, sample] -= 1.0 / resolution
    return result
项目:treecat    作者:posterior    | 项目源码 | 文件源码
def make_ragged_index(columns):
    """Make an index to hold data in a ragged array.

    Args:
        columns: A list of [N, _]-shaped numpy arrays of varying size, where
            N is the number of rows.

    Returns:
        A [len(columns)+1]-shaped array of begin,end positions of each column.
    """
    ragged_index = np.zeros([len(columns) + 1], dtype=np.int32)
    ragged_index[0] = 0
    for v, column in enumerate(columns):
        ragged_index[v + 1] = ragged_index[v] + column.shape[-1]
    ragged_index.flags.writeable = False
    return ragged_index
项目:neurobind    作者:Kyubyong    | 项目源码 | 文件源码
def get_batch_data():
    # Load data
    X, Y = load_data()

    # calc total batch count
    num_batch = len(X) // hp.batch_size

    # Convert to tensor
    X = tf.convert_to_tensor(X, tf.int32)
    Y = tf.convert_to_tensor(Y, tf.float32)

    # Create Queues
    input_queues = tf.train.slice_input_producer([X, Y])

    # create batch queues
    x, y = tf.train.batch(input_queues,
                          num_threads=8,
                          batch_size=hp.batch_size,
                          capacity=hp.batch_size * 64,
                          allow_smaller_final_batch=False)

    return x, y, num_batch  # (N, T), (N, T), ()
项目:pyelastix    作者:almarklein    | 项目源码 | 文件源码
def _get_dtype_maps():
    """ Get dictionaries to map numpy data types to ITK types and the 
    other way around.
    """

    # Define pairs
    tmp = [ (np.float32, 'MET_FLOAT'),  (np.float64, 'MET_DOUBLE'),
            (np.uint8, 'MET_UCHAR'),    (np.int8, 'MET_CHAR'),
            (np.uint16, 'MET_USHORT'),  (np.int16, 'MET_SHORT'),
            (np.uint32, 'MET_UINT'),    (np.int32, 'MET_INT'),
            (np.uint64, 'MET_ULONG'),   (np.int64, 'MET_LONG') ]

    # Create dictionaries
    map1, map2 = {}, {}
    for np_type, itk_type in tmp:
        map1[np_type.__name__] = itk_type
        map2[itk_type] = np_type.__name__

    # Done
    return map1, map2
项目:cellranger    作者:10XGenomics    | 项目源码 | 文件源码
def reg2bin_vector(begin, end):
    '''Vectorized tabix reg2bin -- much faster than reg2bin'''
    result = np.zeros(begin.shape)

    # Entries filled
    done = np.zeros(begin.shape, dtype=np.bool)

    for (bits, bins) in rev_bit_bins:
        begin_shift = begin >> bits
        new_done = (begin >> bits) == (end >> bits)
        mask = np.logical_and(new_done, np.logical_not(done))
        offset = ((1 << (29 - bits)) - 1) / 7
        result[mask] = offset + begin_shift[mask]

        done = new_done

    return result.astype(np.int32)
项目:cellranger    作者:10XGenomics    | 项目源码 | 文件源码
def get_depth_info(read_iter, chrom, cstart, cend):

    depths = np.zeros(cend-cstart, np.int32)

    for read in read_iter:
        pos = read.pos
        rstart = max(pos, cstart)

        # Increment to the end of the window or the end of the
        # alignment, whichever comes first
        rend = min(read.aend, cend)
        depths[(rstart-cstart):(rend-cstart)] += 1

    positions = np.arange(cstart, cend, dtype=np.int32)

    depth_df = pd.DataFrame({"chrom": chrom, "pos": positions, "coverage": depths})
    return depth_df
项目:pi_gcs    作者:lbusoni    | 项目源码 | 文件源码
def getDataRecorderConfiguration(self):
        nRecorders= self.getNumberOfRecorderTables()
        sourceBufSize= 256
        source= ctypes.create_string_buffer('\000', sourceBufSize)
        option= CIntArray(np.zeros(nRecorders, dtype=np.int32))
        table=CIntArray(np.arange(1, nRecorders + 1))

        self._lib.PI_qDRC.argtypes= [c_int, CIntArray, c_char_p,
                                     CIntArray, c_int, c_int]

        self._convertErrorToException(
            self._lib.PI_qDRC(self._id, table, source,
                              option, sourceBufSize, nRecorders))

        sources= [x.strip() for x in source.value.split('\n')]
        cfg= DataRecorderConfiguration()
        for i in range(nRecorders):
            cfg.setTable(table.toNumpyArray()[i],
                         sources[i],
                         option.toNumpyArray()[i])
        return cfg
项目:Deep360Pilot-optical-flow    作者:yenchenlin    | 项目源码 | 文件源码
def read_flow(path, filename):
    flowdata = None
    with open(path + filename + '.flo') as f:
        # Valid .flo file checker
        magic = np.fromfile(f, np.float32, count=1)
        if 202021.25 != magic:
            print 'Magic number incorrect. Invalid .flo file'
        else:
            # Reshape data into 3D array (columns, rows, bands)
            w = int(np.fromfile(f, np.int32, count=1))
            h = int(np.fromfile(f, np.int32, count=1))
            #print 'Reading {}.flo with shape: ({}, {}, 2)'.format(filename, h, w)
            flowdata = np.fromfile(f, np.float32, count=2*w*h)

            # NOTE: numpy shape(h, w, ch) is opposite to image shape(w, h, ch)
            flowdata = np.reshape(flowdata, (h, w, 2))

    return flowdata
项目:rank-ordered-autoencoder    作者:paulbertens    | 项目源码 | 文件源码
def __init__(self, input_shape, output_shape):
        self.input_shape = input_shape
        self.input = np.zeros((output_shape[0], self.input_shape[0] * self.input_shape[1] *
                               self.input_shape[2]),dtype=np.float32)
        self.output = np.zeros(output_shape, dtype=np.float32)
        self.output_raw = np.zeros_like(self.output)
        self.output_error = np.zeros_like(self.output)
        self.output_average = np.zeros(self.output.shape[1], dtype=np.float32)
        self.weights = np.random.normal(0, np.sqrt(2.0 / (self.output.shape[1] + self.input.shape[1])),
                                        size=(self.input.shape[1], self.output.shape[1])).astype(np.float32)
        self.gradient = np.zeros_like(self.weights)
        self.reconstruction = np.zeros_like(self.weights)
        self.errors = np.zeros_like(self.weights)
        self.output_ranks = np.zeros(self.output.shape[1], dtype=np.int32)
        self.learning_rate = 1
        self.norm_limit = 0.1
项目:genomedisco    作者:kundajelab    | 项目源码 | 文件源码
def load_data(infile, chroms, resolutions):
    starts = infile['starts'][...]
    chromosomes = infile['chromosomes'][...]
    data = {}
    for res in resolutions:
        data[res] = {}
        for i, chrom in enumerate(chromosomes):
            if chrom not in chroms:
                continue
            start = (starts[i] / res) * res
            dist = infile['dist.%s.%i' % (chrom, res)][...]
            valid_rows = infile['valid.%s.%i' % (chrom, res)][...]
            corr = infile['corr.%s.%i' % (chrom, res)][...]
            valid = numpy.zeros(corr.shape, dtype=numpy.bool)
            N, M = corr.shape
            valid = numpy.zeros((N, M), dtype=numpy.int32)
            for i in range(min(N - 1, M)):
                P = N - i - 1
                valid[:P, i] = valid_rows[(i + 1):] * valid_rows[:P]
            temp = corr * dist
            valid[numpy.where(numpy.abs(temp) == numpy.inf)] = False
            data[res][chrom] = [start, temp, valid]
    return data
项目:genomedisco    作者:kundajelab    | 项目源码 | 文件源码
def load_data(infile, chroms, resolutions):
    starts = infile['starts'][...]
    chromosomes = infile['chromosomes'][...]
    data = {}
    for res in resolutions:
        data[res] = {}
        for i, chrom in enumerate(chromosomes):
            if chrom not in chroms:
                continue
            start = (starts[i] / res) * res
            dist = infile['dist.%s.%i' % (chrom, res)][...]
            valid_rows = infile['valid.%s.%i' % (chrom, res)][...]
            corr = infile['corr.%s.%i' % (chrom, res)][...]
            valid = numpy.zeros(corr.shape, dtype=numpy.bool)
            N, M = corr.shape
            valid = numpy.zeros((N, M), dtype=numpy.int32)
            for i in range(min(N - 1, M)):
                P = N - i - 1
                valid[:P, i] = valid_rows[(i + 1):] * valid_rows[:P]
            temp = corr * dist
            valid[numpy.where(numpy.abs(temp) == numpy.inf)] = False
            data[res][chrom] = [start, temp, valid]
    return data
项目:sampleRNN_ICLR2017    作者:soroushmehr    | 项目源码 | 文件源码
def __linear_quantize(data, q_levels):
    """
    floats in (0, 1) to ints in [0, q_levels-1]
    scales normalized across axis 1
    """
    # Normalization is on mini-batch not whole file
    #eps = numpy.float64(1e-5)
    #data -= data.min(axis=1)[:, None]
    #data *= ((q_levels - eps) / data.max(axis=1)[:, None])
    #data += eps/2
    #data = data.astype('int32')

    eps = numpy.float64(1e-5)
    data *= (q_levels - eps)
    data += eps/2
    data = data.astype('int32')
    return data
项目:HandDetection    作者:YunqiuXu    | 项目源码 | 文件源码
def generate_anchors_pre(height, width, feat_stride, anchor_scales=(8,16,32), anchor_ratios=(0.5,1,2)):
  """ A wrapper function to generate anchors given different scales
    Also return the number of anchors in variable 'length'
  """
  anchors = generate_anchors(ratios=np.array(anchor_ratios), scales=np.array(anchor_scales))
  A = anchors.shape[0]
  shift_x = np.arange(0, width) * feat_stride
  shift_y = np.arange(0, height) * feat_stride
  shift_x, shift_y = np.meshgrid(shift_x, shift_y)
  shifts = np.vstack((shift_x.ravel(), shift_y.ravel(), shift_x.ravel(), shift_y.ravel())).transpose()
  K = shifts.shape[0]
  # width changes faster, so here it is H, W, C
  anchors = anchors.reshape((1, A, 4)) + shifts.reshape((1, K, 4)).transpose((1, 0, 2))
  anchors = anchors.reshape((K * A, 4)).astype(np.float32, copy=False)
  length = np.int32(anchors.shape[0])

  return anchors, length
项目:cxflow-tensorflow    作者:Cognexa    | 项目源码 | 文件源码
def test_repeat(self):
        """ Test if `repeat` works the same as np.repeat."""

        with tf.Session().as_default():
            # try different tensor types
            for npdtype, tfdtype in [(np.int32, tf.int32), (np.float32, tf.float32)]:
                for init_value in [np.array([0, 1, 2, 3], dtype=npdtype),
                                   np.array([[0, 1], [2, 3], [4, 5]], dtype=npdtype)]:
                    # and all their axes
                    for axis in range(len(init_value.shape)):
                        for repeats in [1, 2, 3, 11]:
                            tensor = tf.constant(init_value, dtype=tfdtype)

                            repeated_value = repeat(tensor, repeats=repeats, axis=axis).eval()
                            expected_value = np.repeat(init_value, repeats=repeats, axis=axis)

                            self.assertTrue(np.all(repeated_value == expected_value))
项目:pybot    作者:spillai    | 项目源码 | 文件源码
def __init__(self, target, instance, files): 
            self.target = target 
            self.instance = instance
            mask_files = natural_sort(filter(lambda fn: '_maskcrop.png' in fn, files))
            depth_files = natural_sort(filter(lambda  fn: '_depthcrop.png' in fn, files))
            rgb_files = natural_sort(list(set(files) - set(mask_files) - set(depth_files)))
            loc_files = natural_sort(map(lambda fn: fn.replace('_crop.png', '_loc.txt'), rgb_files))

            # Ensure all have equal number of files (Hack! doesn't ensure filename consistency)
            nfiles = np.min([len(loc_files), len(mask_files), len(depth_files), len(rgb_files)])
            mask_files, depth_files, rgb_files, loc_files = mask_files[:nfiles], depth_files[:nfiles], \
                                                            rgb_files[:nfiles], loc_files[:nfiles]

            # print target, instance, len(loc_files), len(mask_files), len(depth_files), len(rgb_files)
            assert(len(mask_files) == len(depth_files) == len(rgb_files) == len(loc_files))

            # Read images
            self.rgb = ImageDatasetReader.from_filenames(rgb_files)
            self.depth = ImageDatasetReader.from_filenames(depth_files)
            self.mask = ImageDatasetReader.from_filenames(mask_files)

            # Read top-left locations of bounding box
            self.locations = np.vstack([np.loadtxt(loc, delimiter=',', dtype=np.int32) 
                                        for loc in loc_files])
项目:pybot    作者:spillai    | 项目源码 | 文件源码
def frame_to_json(bboxes, targets): 
    """
    {'polygon': [{'x': [1,2,3], 'y': [2,3,4], 'object': 3}]}
    Also decorated (see decorate_frame with pretty_names, polygons, targets)
    """

    assert(len(bboxes) == len(targets))

    if len(bboxes): 
        bb = bboxes.astype(np.int32)
        return {'polygon': 
                [{'x': [int(b[0]), int(b[0]), int(b[2]), int(b[2])], 
                  'y': [int(b[1]), int(b[3]), int(b[3]), int(b[1])], 
                  'object': int(object_id)} \
                 for object_id, b in zip(targets, bb)]}
    else: 
        return {}
项目:pybot    作者:spillai    | 项目源码 | 文件源码
def im_detect_and_describe(img, mask=None, detector='dense', descriptor='SIFT', colorspace='gray',
                           step=4, levels=7, scale=np.sqrt(2)): 
    """ 
    Describe image using dense sampling / specific detector-descriptor combination. 
    """
    detector = get_detector(detector=detector, step=step, levels=levels, scale=scale)
    extractor = cv2.DescriptorExtractor_create(descriptor)

    try:     
        kpts = detector.detect(img, mask=mask)
        kpts, desc = extractor.compute(img, kpts)

        if descriptor == 'SIFT': 
            kpts, desc = root_sift(kpts, desc)

        pts = np.vstack([kp.pt for kp in kpts]).astype(np.int32)
        return pts, desc

    except Exception as e: 
        print 'im_detect_and_describe', e
        return None, None
项目:pybot    作者:spillai    | 项目源码 | 文件源码
def im_describe(*args, **kwargs): 
    """ 
    Describe image using dense sampling / specific detector-descriptor combination. 
    Sugar for description-only call. 
    """
    kpts, desc = im_detect_and_describe(*args, **kwargs)
    return desc

# def color_codes(img, kpts): 
#     # Extract color information (Lab)
#     pts = np.vstack([kp.pt for kp in kpts]).astype(np.int32)
#     imgc = median_blur(img, size=5) 
#     cdesc = img[pts[:,1], pts[:,0]]
#     return kpts, np.hstack([desc, cdesc])


# =====================================================================
# General-purpose object recognition interfaces, and functions
# ---------------------------------------------------------------------
项目:pybot    作者:spillai    | 项目源码 | 文件源码
def draw_flow(img, flow, step=16):
    h, w = img.shape[:2]
    y, x = np.mgrid[step/2:h:step, step/2:w:step].reshape(2,-1)
    fx, fy = flow[y,x].T
    m = np.bitwise_and(np.isfinite(fx), np.isfinite(fy))
    lines = np.vstack([x[m], y[m], x[m]+fx[m], y[m]+fy[m]]).T.reshape(-1, 2, 2)
    lines = np.int32(lines + 0.5)
    vis = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
    cv2.polylines(vis, lines, 0, (0, 255, 0))
    for (x1, y1), (x2, y2) in lines:
        cv2.circle(vis, (x1, y1), 1, (0, 255, 0), -1)
    return vis
项目:lung-cancer-detector    作者:YichenGong    | 项目源码 | 文件源码
def _pre_process_XMLs(self):
        print("Pre-processing XMLs...")

        nodule_info_list = lidc_xml_parser.load_xmls(self._xmls)
        #Create a more sensible list for iteration
        #over the dataset of nodules
        self._nodule_info = {}
        for nodule_info in nodule_info_list:
            series = nodule_info['header']['uid']
            if series not in self._nodule_info:
                self._nodule_info[series] = []

            for nodule in nodule_info['readings']:
                #We'll ignore the Non-Nodules right now
                if nodule.is_nodule():
                    for roi in nodule.get_roi():
                        z = roi.z
                        iid = roi.image_uid
                        vertices = np.array([(edge.x, edge.y) for edge in roi.get_edges()], np.int32).reshape((-1, 1, 2))
                        self._nodule_info[series].append((iid, z, vertices))

        print("XMLs pre-processing completes...")
项目:brain_segmentation    作者:Ryo-Ito    | 项目源码 | 文件源码
def validate(model):
    dice_coefs = []
    for image_path, label_path in zip(df_val["image"], df_val["label"]):
        image = load_nifti(image_path)
        label = load_nifti(label_path)
        centers = [[], [], []]
        for img_len, len_out, center, n_tile in zip(image.shape, args.output_shape, centers, args.n_tiles):
            assert img_len < len_out * n_tile, "{} must be smaller than {} x {}".format(img_len, len_out, n_tile)
            stride = int((img_len - len_out) / (n_tile - 1))
            center.append(len_out / 2)
            for i in range(n_tile - 2):
                center.append(center[-1] + stride)
            center.append(img_len - len_out / 2)
        output = np.zeros((dataset["n_classes"],) + image.shape[:-1])
        for x, y, z in itertools.product(*centers):
            patch = crop_patch(image, [x, y, z], args.input_shape)
            patch = np.expand_dims(patch, 0)
            patch = xp.asarray(patch)
            slices_out = [slice(center - len_out / 2, center + len_out / 2) for len_out, center in zip(args.output_shape, [x, y, z])]
            slices_in = [slice((len_in - len_out) / 2, len_in - (len_in - len_out) / 2) for len_out, len_in, in zip(args.output_shape, args.input_shape)]
            output[slice(None), slices_out[0], slices_out[1], slices_out[2]] += chainer.cuda.to_cpu(model(patch).data[0, slice(None), slices_in[0], slices_in[1], slices_in[2]])
        y = np.argmax(output, axis=0).astype(np.int32)
        dice_coefs.append(dice_coefficients(y, label, labels=range(dataset["n_classes"])))
    dice_coefs = np.array(dice_coefs)
    return np.mean(dice_coefs, axis=0)
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def __init__(self, filename):
        """
        filename: string, path to ASCII file to read.
        """

        self.filename = filename

        # read the first line to check the data type (int or float) of the data
        f = open(self.filename)
        line = f.readline()

        additional_parameters = {}
        if '.' not in line:
            additional_parameters['dtype'] = np.int32

        self.data = np.loadtxt(self.filename, **additional_parameters)

        if len(self.data.shape) == 1:
            self.data = self.data[:, np.newaxis]
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def __init__(self, filename):
        """
        filename: string, path to ASCII file to read.
        """

        self.filename = filename

        # read the first line to check the data type (int or float) of the data
        f = open(self.filename)
        line = f.readline()

        additional_parameters = {}
        if '.' not in line:
            additional_parameters['dtype'] = np.int32

        self.data = np.loadtxt(self.filename, **additional_parameters)

        if len(self.data.shape) == 1:
            self.data = self.data[:, np.newaxis]
项目:Projects    作者:it2school    | 项目源码 | 文件源码
def array2d(surface):
    """pygame.numpyarray.array2d(Surface): return array

    copy pixels into a 2d array

    Copy the pixels from a Surface into a 2D array. The bit depth of the
    surface will control the size of the integer values, and will work
    for any type of pixel format.

    This function will temporarily lock the Surface as pixels are copied
    (see the Surface.lock - lock the Surface memory for pixel access
    method).
    """
    bpp = surface.get_bytesize()
    try:
        dtype = (numpy.uint8, numpy.uint16, numpy.int32, numpy.int32)[bpp - 1]
    except IndexError:
        raise ValueError("unsupported bit depth %i for 2D array" % (bpp * 8,))
    size = surface.get_size()
    array = numpy.empty(size, dtype)
    surface_to_array(array, surface)
    return array
项目:Projects    作者:it2school    | 项目源码 | 文件源码
def map_array(surface, array):
    """pygame.numpyarray.map_array(Surface, array3d): return array2d

    map a 3d array into a 2d array

    Convert a 3D array into a 2D array. This will use the given Surface
    format to control the conversion.

    Note: arrays do not need to be 3D, as long as the minor axis has
    three elements giving the component colours, any array shape can be
    used (for example, a single colour can be mapped, or an array of
    colours). The array shape is limited to eleven dimensions maximum,
    including the three element minor axis.
    """
    if array.ndim == 0:
        raise ValueError("array must have at least 1 dimension")
    shape = array.shape
    if shape[-1] != 3:
        raise ValueError("array must be a 3d array of 3-value color data")
    target = numpy_empty(shape[:-1], numpy.int32)
    pix_map_array(target, array, surface)
    return target
项目:text_classification    作者:brightmart    | 项目源码 | 文件源码
def test():
    #below is a function test; if you use this for text classifiction, you need to tranform sentence to indices of vocabulary first. then feed data to the graph.
    num_classes=10
    learning_rate=0.01
    batch_size=8
    decay_steps=1000
    decay_rate=0.9
    sequence_length=5
    vocab_size=10000
    embed_size=100
    is_training=True
    dropout_keep_prob=1#0.5
    textRNN=TextRCNN(num_classes, learning_rate, batch_size, decay_steps, decay_rate,sequence_length,vocab_size,embed_size,is_training)
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        for i in range(100):
            input_x=np.zeros((batch_size,sequence_length)) #[None, self.sequence_length]
            input_y=input_y=np.array([1,0,1,1,1,2,1,1]) #np.zeros((batch_size),dtype=np.int32) #[None, self.sequence_length]
            loss,acc,predict,_=sess.run([textRNN.loss_val,textRNN.accuracy,textRNN.predictions,textRNN.train_op],
                                        feed_dict={textRNN.input_x:input_x,textRNN.input_y:input_y,textRNN.dropout_keep_prob:dropout_keep_prob})
            print("loss:",loss,"acc:",acc,"label:",input_y,"prediction:",predict)
#test()
项目:text_classification    作者:brightmart    | 项目源码 | 文件源码
def test():
    #below is a function test; if you use this for text classifiction, you need to tranform sentence to indices of vocabulary first. then feed data to the graph.
    num_classes=10
    learning_rate=0.01
    batch_size=8
    decay_steps=1000
    decay_rate=0.9
    sequence_length=5
    vocab_size=10000
    embed_size=100
    is_training=True
    dropout_keep_prob=1#0.5
    textRNN=TextRCNN(num_classes, learning_rate, batch_size, decay_steps, decay_rate,sequence_length,vocab_size,embed_size,is_training)
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        for i in range(100):
            input_x=np.zeros((batch_size,sequence_length)) #[None, self.sequence_length]
            input_y=input_y=np.array([1,0,1,1,1,2,1,1]) #np.zeros((batch_size),dtype=np.int32) #[None, self.sequence_length]
            loss,acc,predict,_=sess.run([textRNN.loss_val,textRNN.accuracy,textRNN.predictions,textRNN.train_op],
                                        feed_dict={textRNN.input_x:input_x,textRNN.input_y:input_y,textRNN.dropout_keep_prob:dropout_keep_prob})
            print("loss:",loss,"acc:",acc,"label:",input_y,"prediction:",predict)
#test()
项目:almond-nnparser    作者:Stanford-Mobisocial-IoT-Lab    | 项目源码 | 文件源码
def parse_all(self, fp):
        vectors = []
        for line in fp.readlines():
            try:
                program = line.strip().split()
                vector = self.vectorize_program(program)[0]
                self.parse(vector)
                vectors.append(vector)
            except ValueError as e:
                print(e)
        return np.array(vectors, dtype=np.int32)
项目:almond-nnparser    作者:Stanford-Mobisocial-IoT-Lab    | 项目源码 | 文件源码
def get_init_state(self, batch_size):
        return tf.ones((batch_size,), dtype=tf.int32) * self.start_state
项目:almond-nnparser    作者:Stanford-Mobisocial-IoT-Lab    | 项目源码 | 文件源码
def vectorize(sentence, words, max_length, add_eos=False):
    vector = np.zeros((max_length,), dtype=np.int32)
    assert words['<<PAD>>'] == 0
    #vector[0] = words['<<GO>>']
    if isinstance(sentence, str):
        sentence = sentence.split(' ')
    i = 0
    for i, word in enumerate(sentence):
        word = word.strip()
        if len(word) == 0:
            raise ValueError("empty token in " + str(sentence))
        if word in words:
            vector[i] = words[word]
        elif '<<UNK>>' in words:
            unknown_tokens.add(word)
            #print("sentence: ", sentence, "; word: ", word)
            vector[i] = words['<<UNK>>']
        else:
            raise ValueError('Unknown token ' + word)
        if i+1 == max_length:
            break
    length = i+1
    if add_eos:
        if length < max_length:
            vector[length] = words['<<EOS>>']
            length += 1
        else:
            print("unterminated sentence", sentence)
    else:
        if length == max_length and length < len(sentence):
            print("truncated sentence", sentence)
    return (vector, length)
项目:nidaqmx-python    作者:ni    | 项目源码 | 文件源码
def set_analog_power_up_states_with_output_type(
            self, power_up_states):
        """
        Updates power up states for analog physical channels.

        Args:
            power_up_states (List[nidaqmx.types.AOPowerUpState]):
                Contains the physical channels and power up states to
                set. Each element of the list contains a physical channel
                and the power up state to set for that physical channel.

                - physical_channel (str): Specifies the physical channel to
                  modify.
                - power_up_state (float): Specifies the power up state
                  to set for the physical channel specified with the
                  **physical_channel** input.
                - channel_type (:class:`nidaqmx.constants.AOPowerUpOutputBehavior`):
                  Specifies the output type for the physical channel
                  specified with the **physical_channel** input.
        """
        physical_channel = flatten_channel_string(
            [p.physical_channel for p in power_up_states])
        state = numpy.float64(
            [p.power_up_state for p in power_up_states])
        channel_type = numpy.int32(
            [p.channel_type.value for p in power_up_states])

        cfunc = lib_importer.cdll.DAQmxSetAnalogPowerUpStatesWithOutputType
        if cfunc.argtypes is None:
            with cfunc.arglock:
                if cfunc.argtypes is None:
                    cfunc.argtypes = [
                        ctypes_byte_str,
                        wrapped_ndpointer(dtype=numpy.float64,
                                          flags=('C','W')),
                        wrapped_ndpointer(dtype=numpy.int32,
                                          flags=('C','W'))]

        error_code = cfunc(
            physical_channel, state, channel_type, len(power_up_states))
        check_for_error(error_code)
项目:nidaqmx-python    作者:ni    | 项目源码 | 文件源码
def ai_meas_types(self):
        """
        List[:class:`nidaqmx.constants.UsageTypeAI`]: Indicates the
            measurement types supported by the channel.
        """
        cfunc = lib_importer.windll.DAQmxGetPhysicalChanAISupportedMeasTypes
        if cfunc.argtypes is None:
            with cfunc.arglock:
                if cfunc.argtypes is None:
                    cfunc.argtypes = [
                        ctypes_byte_str, wrapped_ndpointer(dtype=numpy.int32,
                        flags=('C','W')), ctypes.c_uint]

        temp_size = 0
        while True:
            val = numpy.zeros(temp_size, dtype=numpy.int32)

            size_or_code = cfunc(
                self._name, val, temp_size)

            if is_array_buffer_too_small(size_or_code):
                # Buffer size must have changed between calls; check again.
                temp_size = 0
            elif size_or_code > 0 and temp_size == 0:
                # Buffer size obtained, use to retrieve data.
                temp_size = size_or_code
            else:
                break

        check_for_error(size_or_code)

        return [UsageTypeAI(e) for e in val]
项目:nidaqmx-python    作者:ni    | 项目源码 | 文件源码
def ao_output_types(self):
        """
        List[:class:`nidaqmx.constants.UsageTypeAO`]: Indicates the
            output types supported by the channel.
        """
        cfunc = (lib_importer.windll.
                 DAQmxGetPhysicalChanAOSupportedOutputTypes)
        if cfunc.argtypes is None:
            with cfunc.arglock:
                if cfunc.argtypes is None:
                    cfunc.argtypes = [
                        ctypes_byte_str, wrapped_ndpointer(dtype=numpy.int32,
                        flags=('C','W')), ctypes.c_uint]

        temp_size = 0
        while True:
            val = numpy.zeros(temp_size, dtype=numpy.int32)

            size_or_code = cfunc(
                self._name, val, temp_size)

            if is_array_buffer_too_small(size_or_code):
                # Buffer size must have changed between calls; check again.
                temp_size = 0
            elif size_or_code > 0 and temp_size == 0:
                # Buffer size obtained, use to retrieve data.
                temp_size = size_or_code
            else:
                break

        check_for_error(size_or_code)

        return [UsageTypeAO(e) for e in val]
项目:nidaqmx-python    作者:ni    | 项目源码 | 文件源码
def ao_power_up_output_types(self):
        """
        List[:class:`nidaqmx.constants.AOPowerUpOutputBehavior`]:
            Indicates the power up output types supported by the
            channel.
        """
        cfunc = (lib_importer.windll.
                 DAQmxGetPhysicalChanAOSupportedPowerUpOutputTypes)
        if cfunc.argtypes is None:
            with cfunc.arglock:
                if cfunc.argtypes is None:
                    cfunc.argtypes = [
                        ctypes_byte_str, wrapped_ndpointer(dtype=numpy.int32,
                        flags=('C','W')), ctypes.c_uint]

        temp_size = 0
        while True:
            val = numpy.zeros(temp_size, dtype=numpy.int32)

            size_or_code = cfunc(
                self._name, val, temp_size)

            if is_array_buffer_too_small(size_or_code):
                # Buffer size must have changed between calls; check again.
                temp_size = 0
            elif size_or_code > 0 and temp_size == 0:
                # Buffer size obtained, use to retrieve data.
                temp_size = size_or_code
            else:
                break

        check_for_error(size_or_code)

        return [AOPowerUpOutputBehavior(e) for e in val]
项目:nidaqmx-python    作者:ni    | 项目源码 | 文件源码
def co_output_types(self):
        """
        List[:class:`nidaqmx.constants.UsageTypeCO`]: Indicates the
            output types supported by the channel.
        """
        cfunc = (lib_importer.windll.
                 DAQmxGetPhysicalChanCOSupportedOutputTypes)
        if cfunc.argtypes is None:
            with cfunc.arglock:
                if cfunc.argtypes is None:
                    cfunc.argtypes = [
                        ctypes_byte_str, wrapped_ndpointer(dtype=numpy.int32,
                        flags=('C','W')), ctypes.c_uint]

        temp_size = 0
        while True:
            val = numpy.zeros(temp_size, dtype=numpy.int32)

            size_or_code = cfunc(
                self._name, val, temp_size)

            if is_array_buffer_too_small(size_or_code):
                # Buffer size must have changed between calls; check again.
                temp_size = 0
            elif size_or_code > 0 and temp_size == 0:
                # Buffer size obtained, use to retrieve data.
                temp_size = size_or_code
            else:
                break

        check_for_error(size_or_code)

        return [UsageTypeCO(e) for e in val]