Python numpy 模块,hsplit() 实例源码

我们从Python开源项目中,提取了以下34个代码示例,用于说明如何使用numpy.hsplit()

项目:algorithm-reference-library    作者:SKA-ScienceDataProcessor    | 项目源码 | 文件源码
def xyz_at_latitude(local_xyz, lat):
    """
    Rotate local XYZ coordinates into celestial XYZ coordinates. These
    coordinate systems are very similar, with X pointing towards the
    geographical east in both cases. However, before the rotation Z
    points towards the zenith, whereas afterwards it will point towards
    celestial north (parallel to the earth axis).

    :param lat: target latitude (radians or astropy quantity)
    :param local_xyz: Array of local XYZ coordinates
    :return: Celestial XYZ coordinates
    """

    x, y, z = numpy.hsplit(local_xyz, 3)

    lat2 = numpy.pi / 2 - lat
    y2 = -z * numpy.sin(lat2) + y * numpy.cos(lat2)
    z2 = z * numpy.cos(lat2) + y * numpy.sin(lat2)

    return numpy.hstack([x, y2, z2])
项目:Neural-Photo-Editor    作者:ajbrock    | 项目源码 | 文件源码
def paint_latents( event ):
   global r, Z, output,painted_rects,MASK,USER_MASK,RECON 

   # Get extent of latent paintbrush
   x1, y1 = ( event.x - d.get() ), ( event.y - d.get() )
   x2, y2 = ( event.x + d.get() ), ( event.y + d.get() )

   selected_widget = event.widget

   # Paint in latent space and update Z
   painted_rects.append(event.widget.create_rectangle( x1, y1, x2, y2, fill = rb(color.get()),outline = rb(color.get()) ))
   r[max((y1-bd),0):min((y2-bd),r.shape[0]),max((x1-bd),0):min((x2-bd),r.shape[1])] = color.get()/255.0;
   Z = np.asarray([np.mean(o) for v in [np.hsplit(h,Z.shape[0])\
                                                         for h in np.vsplit((r),Z.shape[1])]\
                                                         for o in v]).reshape(Z.shape[0],Z.shape[1])
   if SAMPLE_FLAG:
        update_photo(None,output)
        update_canvas(w) # Remove this if you wish to see a more free-form paintbrush
   else:     
        DELTA = model.sample_at(np.float32([Z.flatten()]))[0]-to_tanh(np.float32(RECON))
        MASK=scipy.ndimage.filters.gaussian_filter(np.min([np.mean(np.abs(DELTA),axis=0),np.ones((64,64))],axis=0),0.7)
        # D = dampen(to_tanh(np.float32(RECON)),MASK*DELTA+(1-MASK)*ERROR)
        D = MASK*DELTA+(1-MASK)*ERROR
        IM = np.uint8(from_tanh(to_tanh(RECON)+D))
        update_canvas(w) # Remove this if you wish to see a more free-form paintbrush
        update_photo(IM,output)  

# Scroll to lighten or darken an image patch
项目:cupy    作者:cupy    | 项目源码 | 文件源码
def hsplit(ary, indices_or_sections):
    """Splits an array into multiple sub arrays horizontally.

    This is equivalent to ``split`` with ``axis=0`` if ``ary`` has one
    dimension, and otherwise that with ``axis=1``.

    .. seealso:: :func:`cupy.split` for more detail, :func:`numpy.hsplit`

    """
    if ary.ndim == 0:
        raise ValueError('Cannot hsplit a zero-dimensional array')
    if ary.ndim == 1:
        return split(ary, indices_or_sections, 0)
    else:
        return split(ary, indices_or_sections, 1)
项目:uncover-ml    作者:GeoscienceAustralia    | 项目源码 | 文件源码
def apply_multiple_masked(func, data, args=(), kwargs={}):
    # Data is a sequence of arrays (i.e. X, y pairs for training)

    datastack = []
    dims = []
    flat = []
    for d in data:
        if d.ndim == 2:
            datastack.append(d)
            dims.append(d.shape[1])
            flat.append(False)
        elif d.ndim == 1:
            datastack.append(d[:, np.newaxis])
            dims.append(1)
            flat.append(True)
        else:
            raise RuntimeError("data arrays have to be 1 or 2D arrays")

    # Decorate functions to work on stacked data
    dims = np.cumsum(dims[:-1])  # dont split by last dim

    unstack = lambda catdata: [d.flatten() if f else d for d, f
                               in zip(np.hsplit(catdata, dims), flat)]

    unstackfunc = lambda catdata, *nargs, **nkwargs: \
        func(*chain(unstack(catdata), nargs), **nkwargs)

    return apply_masked(unstackfunc, np.ma.hstack(datastack), args, kwargs)


#
# Static module properties
#

# Add all models available to the learning pipeline here!
项目:sonnet    作者:deepmind    | 项目源码 | 文件源码
def testComputation(self):
    batch_size = 2
    hidden_size = 4
    inputs = tf.placeholder(tf.float32, shape=[batch_size, hidden_size])
    prev_cell = tf.placeholder(tf.float32, shape=[batch_size, hidden_size])
    prev_hidden = tf.placeholder(tf.float32, shape=[batch_size, hidden_size])
    lstm = snt.LSTM(hidden_size)
    _, next_state = lstm(inputs, (prev_hidden, prev_cell))
    next_hidden, next_cell = next_state
    lstm_variables = lstm.get_variables()
    param_map = {param.name.split("/")[-1].split(":")[0]:
                 param for param in lstm_variables}

    # With random data, check the TF calculation matches the Numpy version.
    input_data = np.random.randn(batch_size, hidden_size)
    prev_hidden_data = np.random.randn(batch_size, hidden_size)
    prev_cell_data = np.random.randn(batch_size, hidden_size)

    with self.test_session() as session:
      tf.global_variables_initializer().run()
      fetches = [(next_hidden, next_cell),
                 param_map[snt.LSTM.W_GATES],
                 param_map[snt.LSTM.B_GATES]]
      output = session.run(fetches,
                           {inputs: input_data,
                            prev_cell: prev_cell_data,
                            prev_hidden: prev_hidden_data})

    next_state_ex, gate_weights_ex, gate_biases_ex = output
    in_and_hid = np.concatenate((input_data, prev_hidden_data), axis=1)
    real_gate = np.dot(in_and_hid, gate_weights_ex) + gate_biases_ex
    # i = input_gate, j = next_input, f = forget_gate, o = output_gate
    i, j, f, o = np.hsplit(real_gate, 4)
    real_cell = (prev_cell_data / (1 + np.exp(-(f + lstm._forget_bias))) +
                 1 / (1 + np.exp(-i)) * np.tanh(j))
    real_hidden = np.tanh(real_cell) * 1 / (1 + np.exp(-o))

    self.assertAllClose(real_hidden, next_state_ex[0])
    self.assertAllClose(real_cell, next_state_ex[1])
项目:algorithm-reference-library    作者:SKA-ScienceDataProcessor    | 项目源码 | 文件源码
def xyz_to_uvw(xyz, ha, dec):
    """
    Rotate :math:`(x,y,z)` positions in earth coordinates to
    :math:`(u,v,w)` coordinates relative to astronomical source
    position :math:`(ha, dec)`. Can be used for both antenna positions
    as well as for baselines.

    Hour angle and declination can be given as single values or arrays
    of the same length. Angles can be given as radians or astropy
    quantities with a valid conversion.

    :param xyz: :math:`(x,y,z)` co-ordinates of antennas in array
    :param ha: hour angle of phase tracking centre (:math:`ha = ra - lst`)
    :param dec: declination of phase tracking centre.
    """

    x, y, z = numpy.hsplit(xyz, 3)

    # Two rotations:
    #  1. by 'ha' along the z axis
    #  2. by '90-dec' along the u axis
    u = x * numpy.cos(ha) - y * numpy.sin(ha)
    v0 = x * numpy.sin(ha) + y * numpy.cos(ha)
    w = z * numpy.sin(dec) - v0 * numpy.cos(dec)
    v = z * numpy.cos(dec) + v0 * numpy.sin(dec)

    return numpy.hstack([u, v, w])
项目:algorithm-reference-library    作者:SKA-ScienceDataProcessor    | 项目源码 | 文件源码
def uvw_to_xyz(uvw, ha, dec):
    """
    Rotate :math:`(x,y,z)` positions relative to a sky position at
    :math:`(ha, dec)` to earth coordinates. Can be used for both
    antenna positions as well as for baselines.

    Hour angle and declination can be given as single values or arrays
    of the same length. Angles can be given as radians or astropy
    quantities with a valid conversion.

    :param uvw: :math:`(u,v,w)` co-ordinates of antennas in array
    :param ha: hour angle of phase tracking centre (:math:`ha = ra - lst`)
    :param dec: declination of phase tracking centre
    """

    u, v, w = numpy.hsplit(uvw, 3)

    # Two rotations:
    #  1. by 'dec-90' along the u axis
    #  2. by '-ha' along the z axis
    v0 = v * numpy.sin(dec) - w * numpy.cos(dec)
    z = v * numpy.cos(dec) + w * numpy.sin(dec)
    x = u * numpy.cos(ha) + v0 * numpy.sin(ha)
    y = -u * numpy.sin(ha) + v0 * numpy.cos(ha)

    return numpy.hstack([x, y, z])
项目:algorithm-reference-library    作者:SKA-ScienceDataProcessor    | 项目源码 | 文件源码
def visibility_recentre(uvw, dl, dm):
    """ Compensate for kernel re-centering - see `w_kernel_function`.

    :param uvw: Visibility coordinates
    :param dl: Horizontal shift to compensate for
    :param dm: Vertical shift to compensate for
    :returns: Visibility coordinates re-centrered on the peak of their w-kernel
    """

    u, v, w = numpy.hsplit(uvw, 3)
    return numpy.hstack([u - w * dl, v - w * dm, w])
项目:l1l2py    作者:slipguru    | 项目源码 | 文件源码
def test_nested_model_void(self):
        from l1l2py import tools
        data, test_data = np.vsplit(self.X, 2)
        labels, test_labels = np.hsplit(self.Y, 2)

        tau_opt, lambda_opt = (50.0, 0.1)
        mu_range = np.linspace(0.1, 1.0, 10)

        assert_raises(
            ValueError, nested_models,
            data, labels, test_data, test_labels,
            mu_range, tau_opt, lambda_opt,
            error_function=tools.regression_error,
            data_normalizer=tools.standardize,
            labels_normalizer=tools.center)
项目:LSTM_PIT    作者:snsun    | 项目源码 | 文件源码
def convert_cmvn_to_numpy(inputs_cmvn, labels_cmvn):
    """Convert global binary ark cmvn to numpy format."""
    tf.logging.info("Convert %s and %s to numpy format" % (
        inputs_cmvn, labels_cmvn))
    inputs_filename = os.path.join(FLAGS.data_dir, inputs_cmvn + '.cmvn')
    labels_filename = os.path.join(FLAGS.data_dir, labels_cmvn + '.cmvn')

    inputs = read_binary_file(inputs_filename, 0)
    labels = read_binary_file(labels_filename, 0)

    inputs_frame = inputs[0][-1]
    labels_frame = labels[0][-1]

    assert inputs_frame == labels_frame

    cmvn_inputs = np.hsplit(inputs, [inputs.shape[1]-1])[0]
    cmvn_labels = np.hsplit(labels, [labels.shape[1]-1])[0]

    mean_inputs = cmvn_inputs[0] / inputs_frame
    stddev_inputs = np.sqrt(cmvn_inputs[1] / inputs_frame - mean_inputs ** 2)
    mean_labels = cmvn_labels[0] / labels_frame
    stddev_labels = np.sqrt(cmvn_labels[1] / labels_frame - mean_labels ** 2)

    cmvn_name = os.path.join(FLAGS.output_dir, "train_cmvn.npz")
    np.savez(cmvn_name,
             mean_inputs=mean_inputs,
             stddev_inputs=stddev_inputs,
             mean_labels=mean_labels,
             stddev_labels=stddev_labels)

    tf.logging.info("Write to %s" % cmvn_name)
项目:LSTM_PIT    作者:snsun    | 项目源码 | 文件源码
def convert_cmvn_to_numpy(inputs_cmvn, labels_cmvn):
    """Convert global binary ark cmvn to numpy format."""
    tf.logging.info("Convert %s and %s to numpy format" % (
        inputs_cmvn, labels_cmvn))
    inputs_filename = os.path.join(FLAGS.data_dir, inputs_cmvn + '.cmvn')
    labels_filename = os.path.join(FLAGS.data_dir, labels_cmvn + '.cmvn')

    inputs = read_binary_file(inputs_filename, 0)
    labels = read_binary_file(labels_filename, 0)

    inputs_frame = inputs[0][-1]
    labels_frame = labels[0][-1]

    assert inputs_frame == labels_frame

    cmvn_inputs = np.hsplit(inputs, [inputs.shape[1]-1])[0]
    cmvn_labels = np.hsplit(labels, [labels.shape[1]-1])[0]

    mean_inputs = cmvn_inputs[0] / inputs_frame
    stddev_inputs = np.sqrt(cmvn_inputs[1] / inputs_frame - mean_inputs ** 2)
    mean_labels = cmvn_labels[0] / labels_frame
    stddev_labels = np.sqrt(cmvn_labels[1] / labels_frame - mean_labels ** 2)

    cmvn_name = os.path.join(FLAGS.output_dir, "train_cmvn.npz")
    np.savez(cmvn_name,
             mean_inputs=mean_inputs,
             stddev_inputs=stddev_inputs,
             mean_labels=mean_labels,
             stddev_labels=stddev_labels)

    tf.logging.info("Write to %s" % cmvn_name)
项目:monasca-analytics    作者:openstack    | 项目源码 | 文件源码
def _generate_train_test_sets(self, samples, ratio_train):
        num_samples_train = int(len(samples) * ratio_train)

        data, labels = np.hsplit(samples, [-1])
        X_train = np.array(data[:num_samples_train])
        _labels = np.array(labels[:num_samples_train])
        X_train_label = _labels.ravel()
        X_test = np.array(data[num_samples_train:])
        _labels = np.array(labels[num_samples_train:])
        X_test_label = _labels.ravel()
        return X_train, X_train_label, X_test, X_test_label
项目:monasca-analytics    作者:openstack    | 项目源码 | 文件源码
def _generate_train_test_sets(self, samples, ratio_train):
        num_samples_train = int(len(samples) * ratio_train)

        data, labels = np.hsplit(samples, [-1])
        X_train = np.array(data[:num_samples_train])
        _labels = np.array(labels[:num_samples_train])
        X_train_label = _labels.ravel()
        X_test = np.array(data[num_samples_train:])
        _labels = np.array(labels[num_samples_train:])
        X_test_label = _labels.ravel()
        return X_train, X_train_label, X_test, X_test_label
项目:monasca-analytics    作者:openstack    | 项目源码 | 文件源码
def _generate_train_test_sets(self, samples, ratio_train):
        num_samples_train = int(len(samples) * ratio_train)

        data, labels = np.hsplit(samples, [-1])
        X_train = np.array(data[:num_samples_train])
        _labels = np.array(labels[:num_samples_train])
        X_train_label = _labels.ravel()
        X_test = np.array(data[num_samples_train:])
        _labels = np.array(labels[num_samples_train:])
        X_test_label = _labels.ravel()
        return X_train, X_train_label, X_test, X_test_label
项目:monasca-analytics    作者:openstack    | 项目源码 | 文件源码
def _generate_train_test_sets(self, samples, ratio_train):
        num_samples_train = int(len(samples) * ratio_train)

        data, labels = np.hsplit(samples, [-1])
        X_train = np.array(data[:num_samples_train])
        _labels = np.array(labels[:num_samples_train])
        X_train_label = _labels.ravel()
        X_test = np.array(data[num_samples_train:])
        _labels = np.array(labels[num_samples_train:])
        X_test_label = _labels.ravel()
        return X_train, X_train_label, X_test, X_test_label
项目:chainer-deconv    作者:germanRos    | 项目源码 | 文件源码
def hsplit(ary, indices_or_sections):
    """Splits an array into multiple sub arrays horizontally.

    This is equivalent to ``split`` with ``axis=0`` if ``ary`` has one
    dimension, and otherwise that with ``axis=1``.

    .. seealso:: :func:`cupy.split` for more detail, :func:`numpy.hsplit`

    """
    if ary.ndim == 0:
        raise ValueError('Cannot hsplit a zero-dimensional array')
    if ary.ndim == 1:
        return split(ary, indices_or_sections, 0)
    else:
        return split(ary, indices_or_sections, 1)
项目:reinforcement-learning-policy-gradients    作者:DarkElement75    | 项目源码 | 文件源码
def run_config(self, config_i, config_n, run_count, mb_n, initial_epsilon, initial_learning_rate, epsilon_decay_rate, learning_rate_decay_rate, discount_factor):
        #Need this
        mb_n = int(mb_n)
        if mb_n == 0:
            mb_n = 1

        #So these are optimized in a way that they still work on different E value (/epochs)
        #Negative value so we can still use our hyperparameter class and not go outside of range necessary
        epsilon_decay_rate = -epsilon_decay_rate/self.epochs
        learning_rate_decay_rate = -float(learning_rate_decay_rate)/self.epochs

        cartpole_agent = PolicyGradientLearner(self.epochs, mb_n, self.timestep_n, initial_epsilon, initial_learning_rate, discount_factor, epsilon_decay_rate, learning_rate_decay_rate)

        #For results, becomes 3d array of shape runs, epochs, values
        results = []
        for run_i in range(run_count):
            #Reset environment
            cartpole_agent.init_env('CartPole-v0')

            #Gain more results for mean
            results.append(cartpole_agent.train_env(config_i, config_n, run_i, run_count))

        #Now we have 2d array of shape epochs, values
        average_results = np.mean(results, axis=0)

        #Ok this ones a bit complicated
        #We need to get a list like 1, 2, 3 if the number of values we get from this is 3, hence the
        #i+1 for i in range(len(average_results[0])-1)

        #Then, we do hsplit to split our matrix on the columns(since we have 0, 1, 2, all the column indices)
        #and thus get our indepentent average values for each one
        average_values = np.hsplit(average_results, np.array([i+1 for i in range(len(average_results[0])-1)]))#so many brackets asdfahlkasdf))Fasdf0))))

        #So we can transpose the column vector back to a row one for each of these
        average_values = [average_value.flatten() for average_value in average_values]

        #Yay, assign values
        average_costs, average_avg_timesteps, average_max_timesteps = average_values

        return average_avg_timesteps
项目:ustc-grade-automatic-notification    作者:zzh1996    | 项目源码 | 文件源码
def hack(self, img):
        test_img_array = np.asarray(bytearray(img), dtype=np.uint8)
        test_img = cv2.imdecode(test_img_array, -1)
        test_gray = cv2.cvtColor(test_img, cv2.COLOR_BGR2GRAY)
        test_final = cv2.threshold(test_gray, 100, 255, cv2.THRESH_BINARY)[1]
        test_cells = np.array([i.reshape(-1).astype(np.float32)
                            for i in np.hsplit(test_final, 4)])
        ret, result, neighbours, dist = self.knn.find_nearest(test_cells, k=1)
        result = result.reshape(-1)
        letter = []
        for i in result:
            letter.append(chr(i))
        return ''.join(letter)
项目:ustc-grade-automatic-notification    作者:zzh1996    | 项目源码 | 文件源码
def cut(filename):
    img = cv2.imread(filename)
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    final = cv2.threshold(gray, 100, 255, cv2.THRESH_BINARY)[1]
    cells = np.hsplit(final, 4)
    for i in range(4):
        cv2.imwrite(filename.split('.')[0] + str(i) + '.jpg', cells[i])
项目:ustc-grade-automatic-notification    作者:zzh1996    | 项目源码 | 文件源码
def downpic(filename):
    r = requests.get('http://mis.teach.ustc.edu.cn/randomImage.do')
    img_array = np.asarray(bytearray(r.content), dtype=np.uint8)
    img = cv2.imdecode(img_array, -1)
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    final = cv2.threshold(gray, 100, 255, cv2.THRESH_BINARY)[1]
    cells = np.hsplit(final, 4)
    for i in range(4):
        cv2.imwrite(str(filename+i)+'.jpg', cells[i])
项目:AppsOfDataAnalysis    作者:nhanloukiala    | 项目源码 | 文件源码
def Encoding(data, general_matrix=None):
    encoder = LabelBinarizer()
    count = 0
    # encoding
    for i in range(data.shape[1]):
        if type(data[0, i]) == str:
            count += 1
            col = data[:, i]
            unique = np.unique(col if general_matrix is None else general_matrix[:, i])

            try:
                encoder.fit(unique)
            except:
                pass

            new_col = encoder.transform(col)

            # split at i and i + 1
            before, removed, after = np.hsplit(data, [i, i + 1])
            # concatenate
            data = np.concatenate((before, new_col, after), axis=1)
            before, removed, after = np.hsplit(general_matrix, [i, i + 1])
            general_matrix = np.concatenate((before, encoder.transform(general_matrix[:, i]), after), axis=1)

    print "count : %d" % count
    # return data
    return data
项目:tensorflow    作者:yscbm    | 项目源码 | 文件源码
def extract_data(filenames):
    #????????
    for f in filenames:
        if not tf.gfile.Exists(f):
            raise ValueError('Failed to find file: ' + f)
    #????
    labels = None
    images = None

    for f in filenames:
        bytestream=open(f,'rb') 
        #????
        buf = bytestream.read(TRAIN_NUM * (IMAGE_SIZE * IMAGE_SIZE * NUM_CHANNELS+LABEL_SIZE))
        #???????np???
        data = np.frombuffer(buf, dtype=np.uint8)
        #??????
        data = data.reshape(TRAIN_NUM,LABEL_SIZE+IMAGE_SIZE* IMAGE_SIZE* NUM_CHANNELS)
        #????
        labels_images = np.hsplit(data, [LABEL_SIZE])

        label = labels_images[0].reshape(TRAIN_NUM)
        image = labels_images[1].reshape(TRAIN_NUM,IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS)

        if labels == None:
            labels = label
            images = image
        else:
            #??????????
            labels = np.concatenate((labels,label))
            images = np.concatenate((images,image))

    images = (images - (PIXEL_DEPTH / 2.0)) / PIXEL_DEPTH

    return labels,images
项目:uplift    作者:mustafaseisa    | 项目源码 | 文件源码
def __recall(self, data, gridSize = 10):

        assert self.__optimized == True
        assert type(data) == ndarray
        assert min(data.shape) > 2 

        y, X = hsplit(data, [1])
        ny = len(y)

        assert gridSize < ny
        assert unique(y).all() in [-1,0,1]
        assert X.shape[1] == self._m

        from math import ceil

        grid = linspace(0, ny - 1, gridSize, True)

        orderedLabels = y[argsort(X.dot(self.w), axis=0).flatten()[::-1]] == 1
        proportions = cumsum(orderedLabels)/sum(orderedLabels)

        recall = list(map(lambda tick: proportions[ceil(tick)], grid))
        recall.insert(0, 0.)

        grid = list((grid+1)/ny)
        grid.insert(0, 0.)

        return (grid, recall)
项目:cps2-gfx-editor    作者:goosechooser    | 项目源码 | 文件源码
def _split_array(image):
    """Splits an image into 16x16 sized tiles.

    Returns a list of arrays.
    """

    tiles = []
    dims = image.shape
    split_image = np.vsplit(image, int(dims[0]/16))
    for tile in split_image:
        tiles.extend(np.hsplit(tile, int(dims[1]/16)))
    return tiles

#Currently for 16x16 tiles only
项目:gbdxtools    作者:DigitalGlobe    | 项目源码 | 文件源码
def __call__(self, coords):
        assert isinstance(coords, np.ndarray)
        try:
            d0, d1 = coords.shape
            assert d1 ==2
        except (ValueError, AssertionError):
            raise NotImplementedError("input coords must be [N x 2] dimension numpy array")
        if d1 != 2:
            raise NotImplementedError("input coords must be [N x 2] dimension numpy array")

        xarr, yarr = np.hsplit(coords, 2)
        res = self.fwd(xarr, yarr)
        return res
项目:orange-infrared    作者:markotoplak    | 项目源码 | 文件源码
def determine_sweeps(self):
        """
        Determine if input interferogram is single-sweep or
        double-sweep (Forward-Backward).
        """
        # Just testing 1st row for now
        # assuming all in a group were collected the same way
        data = self.data.X[0]
        zpd = irfft.peak_search(data)
        middle = data.shape[0] // 2
        # Allow variation of +/- 0.4 % in zpd location
        var = middle // 250
        if zpd >= middle - var and zpd <= middle + var:
            # single, symmetric
            self.sweeps = 0
        else:
            try:
                data = np.hsplit(data, 2)
            except ValueError:
                # odd number of data points, probably single
                self.sweeps = 0
                return
            zpd1 = irfft.peak_search(data[0])
            zpd2 = irfft.peak_search(data[1][::-1])
            # Forward / backward zpds never perfectly match
            if zpd1 >= zpd2 - var and zpd1 <= zpd2 + var:
                # forward-backward, symmetric and asymmetric
                self.sweeps = 1
            else:
                # single, asymetric
                self.sweeps = 0
项目:PyFusionGUI    作者:SyntaxVoid    | 项目源码 | 文件源码
def EM_GMM_GMM_clustering(instance_array_amps, n_clusters=9, sin_cos = 0, number_of_starts = 10, show_covariances = 0, clim=None, covariance_type='diag', n_iter = 50):
    '''
    Cluster using a Gaussian for the real and imag part of the ratio of the complex value between adjacent channels
    Supposed to be for imaging diagnostics

    SRH: 18May2014
    '''
    print 'starting EM-GMM-GMM algorithm from sckit-learn, clusters=%d, retries : %d'%(n_clusters,number_of_starts)
    #tmp = np.zeros((instance_array_amps.shape[0], instance_array_amps.shape[1]-1),dtype=complex)
    #for i in range(1,instance_array_amps.shape[1]):
    #    tmp[:,i-1] = instance_array_amps[:,i]/instance_array_amps[:,i-1]
    #print 'ratio :', np.sum(np.abs(np.imag(instance_array_amps)))/np.sum(np.abs(np.real(instance_array_amps)))
    data_complex = instance_array_amps/np.sum(instance_array_amps,axis = 1)[:,np.newaxis]
    #data_complex = instance_array_amps/(instance_array_amps[:,2])[:,np.newaxis]
    #print 'hello..', instance_array_amps.shape
    input_data = np.hstack((np.real(data_complex), np.real(data_complex)))
    #k_means_cluster_assignments, k_means_cluster_details = k_means_clustering(input_data, n_clusters=n_clusters, sin_cos = 1, number_of_starts = 3,)
    #print k_means_cluster_assignments
    #input_data = np.hstack((np.abs(data_complex),(np.abs(data_complex))))
    n_dim = data_complex.shape[1]
    #print n_clusters
    gmm = mixture.GMM(n_components = n_clusters, covariance_type = covariance_type, n_init = number_of_starts, n_iter = n_iter,)
    gmm.fit(input_data)
    cluster_assignments = gmm.predict(input_data)
    bic_value = gmm.bic(input_data)
    LL = np.sum(gmm.score(input_data))

    #Extract the means, variances and covariances
    gmm_covars = np.array(gmm._get_covars())
    gmm_vars = np.array([np.diagonal(i) for i in gmm._get_covars()])
    gmm_vars_re, gmm_vars_im = np.hsplit(gmm_vars,2)
    gmm_covars_re = np.array([i[0:n_dim,0:n_dim] for i in gmm._get_covars()])
    gmm_covars_im = np.array([i[n_dim:,n_dim:] for i in gmm._get_covars()])
    gmm_means = gmm.means_
    gmm_means_re, gmm_means_im = np.hsplit(gmm_means, 2)
    #Bundle up the answer
    cluster_details = {'EM_GMM_means':gmm_means, 'EM_GMM_variances':gmm_vars, 'EM_GMM_covariances':gmm_covars, 'EM_GMM_means_re':gmm_means_re, 'EM_GMM_variances_re':gmm_vars_re, 'EM_GMM_covariances_re':gmm_covars_re,'EM_GMM_means_im':gmm_means_im, 'EM_GMM_variances_im':gmm_vars_im, 'EM_GMM_covariances_im':gmm_covars_im,'BIC':bic_value,'LL':LL}
    print 'EM_GMM_GMM Converged: ', gmm.converged_

    return cluster_assignments, cluster_details
项目:extract    作者:dblalock    | 项目源码 | 文件源码
def colsAsList(A):
    if len(A.shape) < 2:
        return A
    return np.hsplit(A, A.shape[1])
项目:rswarp    作者:radiasoft    | 项目源码 | 文件源码
def rotateVec(vec, rotaxis, theta):
    """
    Given a 3-vector vec, rotate about rotaxis by $\theta$

    Also accepts iterable input for vec and rotaxis if the arguments are
    compatible lengths.
    """
    assert not (np.any(np.isnan(vec)) or
                np.any(np.isnan(rotaxis)) or
                np.any(np.isnan(theta))), "Inputs must not be NaN"
    if np.shape(vec) == (3, ):
        R = rotationMatrix3D(rotaxis, theta)
        norm = np.linalg.norm(vec)
        res = np.dot(R, vec)
        assert np.isclose(np.linalg.norm(res), norm), "Rotation changed vector norm"
        return np.dot(R, vec)
    else:
        assert np.shape(vec)[0] == np.shape(rotaxis)[0] == np.shape(theta)[0], "Dimension mismatch in rotateVec()"
        # Unfortunately, seems that np.dot can't be coerced into doing this operation all at once
        # Tried to build a tensor of rotation matrices and use np.einsum, but couldn't get good reuslts.
        # If this becomes slow at any point, it's a good target for optimization.
        res = np.zeros(shape=(np.shape(vec)[0], 3))
        for i, (v, r, t) in enumerate(zip(vec, rotaxis, theta)):
            # print("In rotateVec(): r={}, t={}".format(r, t))
            norm = np.linalg.norm(v)
            R = rotationMatrix3D(r, t)
            res[i] = np.dot(R, v)
            assert np.isclose(np.linalg.norm(res[i]), norm), "Rotation changed vector norm: v={}, r={}, t={}, R={}".format(v, r, t, R)
        return np.hsplit(res, 3)
项目:rswarp    作者:radiasoft    | 项目源码 | 文件源码
def multiple_vector_test():
    vecs = [(1, 0, 0),
            (0, 1, 0),
            (0, 0, 1)]
    rotaxes = [(0, 0, 1),
               (0, 0, 1),
               (0, 0, 1)]
    thetas = [np.pi/4,
              np.pi/4,
              np.pi/4]
    expected = np.hsplit(np.array([(np.sqrt(2)/2., np.sqrt(2)/2., 0),
                                   (-np.sqrt(2)/2., np.sqrt(2)/2., 0),
                                   (0, 0, 1)]), 3)
    res = rotateVec(vecs, rotaxes, thetas)
    assert np.allclose(res, expected)
项目:sonnet    作者:deepmind    | 项目源码 | 文件源码
def testPeephole(self):
    batch_size = 5
    hidden_size = 20

    # Initialize the rnn and verify the number of parameter sets.
    inputs = tf.placeholder(tf.float32, shape=[batch_size, hidden_size])
    prev_cell = tf.placeholder(tf.float32, shape=[batch_size, hidden_size])
    prev_hidden = tf.placeholder(tf.float32, shape=[batch_size, hidden_size])
    lstm = snt.LSTM(hidden_size, use_peepholes=True)
    _, next_state = lstm(inputs, (prev_hidden, prev_cell))
    next_hidden, next_cell = next_state
    lstm_variables = lstm.get_variables()
    self.assertEqual(len(lstm_variables), 5, "LSTM should have 5 variables")

    # Unpack parameters into dict and check their sizes.
    param_map = {param.name.split("/")[-1].split(":")[0]:
                 param for param in lstm_variables}
    self.assertShapeEqual(np.ndarray(4 * hidden_size),
                          param_map[snt.LSTM.B_GATES].initial_value)
    self.assertShapeEqual(np.ndarray((2 * hidden_size, 4 * hidden_size)),
                          param_map[snt.LSTM.W_GATES].initial_value)
    self.assertShapeEqual(np.ndarray(hidden_size),
                          param_map[snt.LSTM.W_F_DIAG].initial_value)
    self.assertShapeEqual(np.ndarray(hidden_size),
                          param_map[snt.LSTM.W_I_DIAG].initial_value)
    self.assertShapeEqual(np.ndarray(hidden_size),
                          param_map[snt.LSTM.W_O_DIAG].initial_value)

    # With random data, check the TF calculation matches the Numpy version.
    input_data = np.random.randn(batch_size, hidden_size)
    prev_hidden_data = np.random.randn(batch_size, hidden_size)
    prev_cell_data = np.random.randn(batch_size, hidden_size)

    with self.test_session() as session:
      tf.global_variables_initializer().run()
      fetches = [(next_hidden, next_cell),
                 param_map[snt.LSTM.W_GATES],
                 param_map[snt.LSTM.B_GATES],
                 param_map[snt.LSTM.W_F_DIAG],
                 param_map[snt.LSTM.W_I_DIAG],
                 param_map[snt.LSTM.W_O_DIAG]]
      output = session.run(fetches,
                           {inputs: input_data,
                            prev_cell: prev_cell_data,
                            prev_hidden: prev_hidden_data})

    next_state_ex, w_ex, b_ex, wfd_ex, wid_ex, wod_ex = output
    in_and_hid = np.concatenate((input_data, prev_hidden_data), axis=1)
    real_gate = np.dot(in_and_hid, w_ex) + b_ex
    # i = input_gate, j = next_input, f = forget_gate, o = output_gate
    i, j, f, o = np.hsplit(real_gate, 4)
    real_cell = (prev_cell_data /
                 (1 + np.exp(-(f + lstm._forget_bias +
                               wfd_ex * prev_cell_data))) +
                 1 / (1 + np.exp(-(i + wid_ex * prev_cell_data))) * np.tanh(j))
    real_hidden = (np.tanh(real_cell + wod_ex * real_cell) *
                   1 / (1 + np.exp(-o)))

    self.assertAllClose(real_hidden, next_state_ex[0])
    self.assertAllClose(real_cell, next_state_ex[1])
项目:pit-speech-separation    作者:snsun    | 项目源码 | 文件源码
def convert_cmvn_to_numpy(inputs_cmvn, labels_cmvn):
  if FLAGS.labels_cmvn !='':
    """Convert global binary ark cmvn to numpy format."""
    tf.logging.info("Convert %s and %s to numpy format" % (
        inputs_cmvn, labels_cmvn))
    inputs_filename = FLAGS.inputs_cmvn
    labels_filename = FLAGS.labels_cmvn

    inputs = read_binary_file(inputs_filename, 0)
    labels = read_binary_file(labels_filename, 0)

    inputs_frame = inputs[0][-1]
    labels_frame = labels[0][-1]

    #assert inputs_frame == labels_frame

    cmvn_inputs = np.hsplit(inputs, [inputs.shape[1]-1])[0]
    cmvn_labels = np.hsplit(labels, [labels.shape[1]-1])[0]

    mean_inputs = cmvn_inputs[0] / inputs_frame
    stddev_inputs = np.sqrt(cmvn_inputs[1] / inputs_frame - mean_inputs ** 2)
    mean_labels = cmvn_labels[0] / labels_frame
    stddev_labels = np.sqrt(cmvn_labels[1] / labels_frame - mean_labels ** 2)

    cmvn_name = os.path.join(FLAGS.output_dir, "train_cmvn.npz")
    np.savez(cmvn_name,
             mean_inputs=mean_inputs,
             stddev_inputs=stddev_inputs,
             mean_labels=mean_labels,
             stddev_labels=stddev_labels)

    tf.logging.info("Write to %s" % cmvn_name)
  else :
    """Convert global binary ark cmvn to numpy format."""
    tf.logging.info("Convert %s to numpy format" % (
        inputs_cmvn))
    inputs_filename = FLAGS.inputs_cmvn

    inputs = read_binary_file(inputs_filename, 0)

    inputs_frame = inputs[0][-1]


    cmvn_inputs = np.hsplit(inputs, [inputs.shape[1]-1])[0]

    mean_inputs = cmvn_inputs[0] / inputs_frame
    stddev_inputs = np.sqrt(cmvn_inputs[1] / inputs_frame - mean_inputs ** 2)

    cmvn_name = os.path.join(FLAGS.output_dir, "train_cmvn.npz")
    np.savez(cmvn_name,
             mean_inputs=mean_inputs,
             stddev_inputs=stddev_inputs)

    tf.logging.info("Write to %s" % cmvn_name)
项目:uplift    作者:mustafaseisa    | 项目源码 | 文件源码
def __init__(self, treatment, control):

        assert [type(treatment), type(control)] == [ndarray, ndarray] # numpy arrays only

        yt, Xt = hsplit(treatment, [1])
        yc, Xc = hsplit(control, [1])

        self._nt, mt = Xt.shape
        self._nc, mc = Xc.shape
        self._n = self._nc + self._nt # n is number of datum across both groups

        assert min(mt, mc, self._nt, self._nc) >= 1 and self._n >= 3 # data shouldn't be trivial
        assert mt == mc # same number of features in treatment and control

        self._m = mt # store number of features

        assert unique(yt).all() in [-1,1] and unique(yc).all() in [-1,1] # labels are binary

        tPlusIndex = where(yt.flatten() == 1.0)[0] # index for positive in treatment
        self._ntplus = len(tPlusIndex) # number of such points (length of index)
        tMinusIndex = delete(range(self._nt), tPlusIndex) # index for negative in treatment
        self._ntminus = self._nt - self._ntplus # number of such points

        self._Dtplus = Xt[tPlusIndex] # positive treatment datum
        self._Dtminus = Xt[tMinusIndex] # negative treatment datum

        cPlusIndex = where(yc.flatten() == 1.0)[0] # index for positive in control
        self._ncplus = len(cPlusIndex) # number of such points (length of index)
        cMinusIndex = delete(range(self._nc), cPlusIndex) # index for negative in control
        self._ncminus = self._nc - self._ncplus # number of such points

        self._Dcplus = Xc[cPlusIndex] # positive treatment datum
        self._Dcminus = Xc[cMinusIndex] # negative treatment datum

        # model parameters

        self.__optimized = False # indicator for whether otpimization routine was performed
        options['show_progress'] = False # supress optimization output

        self.w = None # hyperplane slope
        self.b1 = None # treatment group intercept
        self.b2 = None # control group intercept
        self.threshold = None # thresholding predictor function

        print("Successfully initialized.")
项目:PyFusionGUI    作者:SyntaxVoid    | 项目源码 | 文件源码
def __init__(self, instance_array_amps, n_clusters = 9, n_iterations = 20, n_cpus=1, start='random', kappa_calc='approx', hard_assignments = 0, kappa_converged = 0.1, mu_converged = 0.01, min_iterations=10, LL_converged = 1.e-4, verbose = 0, seed=None, norm_method = 'sum'):
        print 'EM_GMM_GMM2', instance_array_amps.shape
        self.settings = {'n_clusters':n_clusters,'n_iterations':n_iterations,'n_cpus':n_cpus,'start':start,
                         'kappa_calc':kappa_calc,'hard_assignments':hard_assignments, 'method':'EM_VMM_GMM'}
        #self.instance_array = copy.deepcopy(instance_array)
        self.instance_array_amps = instance_array_amps
        self.data_complex = norm_bet_chans(instance_array_amps, method = norm_method)
        print 'hello norm method',  norm_method
        self.data_complex = instance_array_amps/np.sum(instance_array_amps,axis = 1)[:,np.newaxis]
        self.input_data = np.hstack((np.real(self.data_complex), np.imag(self.data_complex)))
        self.n_dim = self.data_complex.shape[1]
        self.n_instances, self.n_dimensions = self.input_data.shape

        self.n_clusters = n_clusters; self.max_iterations = n_iterations; self.start = start
        self.hard_assignments = hard_assignments; self.seed = seed
        if self.seed == None: self.seed = os.getpid()
        print('seed,',self.seed)
        np.random.seed(self.seed)
        self.iteration = 1

        self._initialisation()
        self.convergence_record = []; converged = 0 
        self.LL_diff = np.inf
        while converged!=1:
            start_time = time.time()
            self._EM_VMM_GMM_expectation_step()
            if self.hard_assignments:
                print 'hard assignments'
                self.cluster_assignments = np.argmax(self.zij,axis=1)
                self.zij = self.zij *0
                for i in range(self.n_clusters):
                    self.zij[self.cluster_assignments==i,i] = 1

            valid_items = self.probs>(1.e-300)
            self.LL_list.append(np.sum(self.zij[valid_items]*np.log(self.probs[valid_items])))
            self._EM_VMM_GMM_maximisation_step()
            if (self.iteration>=2): self.LL_diff = np.abs(((self.LL_list[-1] - self.LL_list[-2])/self.LL_list[-2]))
            if verbose:
                print 'Time for iteration %d :%.2f, mu_convergence:%.3e, kappa_convergence:%.3e, LL: %.8e, LL_dif : %.3e'%(self.iteration,time.time() - start_time,self.convergence_mean, self.convergence_std, self.LL_list[-1],self.LL_diff)
            self.convergence_record.append([self.iteration, self.convergence_mean, self.convergence_std])
            mean_converged = mu_converged; std_converged = kappa_converged
            if (self.iteration > min_iterations) and (self.convergence_mean<mean_converged) and (self.convergence_std<std_converged) and (self.LL_diff<LL_converged):
                converged = 1
                print 'Convergence criteria met!!'
            elif self.iteration > n_iterations:
                converged = 1
                print 'Max number of iterations'
            self.iteration+=1
        print os.getpid(), 'Time for iteration %d :%.2f, mu_convergence:%.3e, kappa_convergence:%.3e, LL: %.8e, LL_dif : %.3e'%(self.iteration,time.time() - start_time,self.convergence_mean, self.convergence_std,self.LL_list[-1],self.LL_diff)
        #print 'AIC : %.2f'%(2*(mu_list.shape[0]*mu_list.shape[1])-2.*LL_list[-1])
        self.cluster_assignments = np.argmax(self.zij,axis=1)
        self.BIC = -2*self.LL_list[-1]+self.n_clusters*3*np.log(self.n_dimensions)
        gmm_means_re, gmm_means_im = np.hsplit(self.mean_list, 2)
        gmm_vars_re, gmm_vars_im = np.hsplit(self.std_list**2, 2)

        self.cluster_details = {'EM_GMM_means':self.mean_list, 'EM_GMM_variances':self.std_list**2,'BIC':self.BIC,'LL':self.LL_list, 
                                'EM_GMM_means_re':gmm_means_re, 'EM_GMM_variances_re':gmm_vars_re,
                                'EM_GMM_means_im':gmm_means_im, 'EM_GMM_variances_im':gmm_vars_im}