Python numpy 模块,multiply() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.multiply()

项目:Stein-Variational-Gradient-Descent    作者:DartML    | 项目源码 | 文件源码
def svgd_kernel(self, h = -1):
        sq_dist = pdist(self.theta)
        pairwise_dists = squareform(sq_dist)**2
        if h < 0: # if h < 0, using median trick
            h = np.median(pairwise_dists)  
            h = np.sqrt(0.5 * h / np.log(self.theta.shape[0]+1))

        # compute the rbf kernel

        Kxy = np.exp( -pairwise_dists / h**2 / 2)

        dxkxy = -np.matmul(Kxy, self.theta)
        sumkxy = np.sum(Kxy, axis=1)
        for i in range(self.theta.shape[1]):
            dxkxy[:, i] = dxkxy[:,i] + np.multiply(self.theta[:,i],sumkxy)
        dxkxy = dxkxy / (h**2)
        return (Kxy, dxkxy)
项目:esys-pbi    作者:fsxfreak    | 项目源码 | 文件源码
def update(self,frame,events):
        falloff = self.falloff

        img = frame.img
        pts = [denormalize(pt['norm_pos'],frame.img.shape[:-1][::-1],flip_y=True) for pt in events.get('gaze_positions',[]) if pt['confidence']>=self.g_pool.min_data_confidence]

        overlay = np.ones(img.shape[:-1],dtype=img.dtype)

        # draw recent gaze postions as black dots on an overlay image.
        for gaze_point in pts:
            try:
                overlay[int(gaze_point[1]),int(gaze_point[0])] = 0
            except:
                pass

        out = cv2.distanceTransform(overlay,cv2.DIST_L2, 5)

        # fix for opencv binding inconsitency
        if type(out)==tuple:
            out = out[0]

        overlay =  1/(out/falloff+1)

        img[:] = np.multiply(img, cv2.cvtColor(overlay,cv2.COLOR_GRAY2RGB), casting="unsafe")
项目:pycpd    作者:siavashk    | 项目源码 | 文件源码
def EStep(self):
    P = np.zeros((self.M, self.N))

    for i in range(0, self.M):
      diff     = self.X - np.tile(self.TY[i, :], (self.N, 1))
      diff    = np.multiply(diff, diff)
      P[i, :] = P[i, :] + np.sum(diff, axis=1)

    c = (2 * np.pi * self.sigma2) ** (self.D / 2)
    c = c * self.w / (1 - self.w)
    c = c * self.M / self.N

    P = np.exp(-P / (2 * self.sigma2))
    den = np.sum(P, axis=0)
    den = np.tile(den, (self.M, 1))
    den[den==0] = np.finfo(float).eps

    self.P   = np.divide(P, den)
    self.Pt1 = np.sum(self.P, axis=0)
    self.P1  = np.sum(self.P, axis=1)
    self.Np  = np.sum(self.P1)
项目:NumpyDL    作者:oujago    | 项目源码 | 文件源码
def derivative(self, input=None):
        """The derivative of sigmoid is

        .. math:: \\frac{dy}{dx} & = (1-\\varphi(x)) \\otimes \\varphi(x)  \\\\
                  & = \\frac{e^{-x}}{(1+e^{-x})^2} \\\\
                  & = \\frac{e^x}{(1+e^x)^2}

        Returns
        -------
        float32
            The derivative of sigmoid function.
        """
        last_forward = self.forward(input) if input else self.last_forward
        return np.multiply(last_forward, 1 - last_forward)


# sigmoid-end
# tanh-start
项目:Stein-Variational-Gradient-Descent    作者:DartML    | 项目源码 | 文件源码
def svgd_kernel(self, theta, h = -1):
        sq_dist = pdist(theta)
        pairwise_dists = squareform(sq_dist)**2
        if h < 0: # if h < 0, using median trick
            h = np.median(pairwise_dists)  
            h = np.sqrt(0.5 * h / np.log(theta.shape[0]+1))

        # compute the rbf kernel
        Kxy = np.exp( -pairwise_dists / h**2 / 2)

        dxkxy = -np.matmul(Kxy, theta)
        sumkxy = np.sum(Kxy, axis=1)
        for i in range(theta.shape[1]):
            dxkxy[:, i] = dxkxy[:,i] + np.multiply(theta[:,i],sumkxy)
        dxkxy = dxkxy / (h**2)
        return (Kxy, dxkxy)
项目:coursera_ML_in_python    作者:whyjay17    | 项目源码 | 文件源码
def gradientDescent(X, y, theta, alpha, iters):
    temp = np.matrix(np.zeros(theta.shape))
    params = int(theta.ravel().shape[1]) #flattens
    cost = np.zeros(iters)

    for i in range(iters):
        err = (X * theta.T) - y

        for j in range(params):
            term = np.multiply(err, X[:,j])
            temp[0, j] = theta[0, j] - ((alpha / len(X)) * np.sum(term))

        theta = temp
        cost[i] = computeCost(X, y, theta)

    return theta, cost
项目:coursera_ML_in_python    作者:whyjay17    | 项目源码 | 文件源码
def computeCost(X, y, theta):
    inner = np.power(((X * theta.T) - y), 2)
    return np.sum(inner) / (2 * len(X))

#def gradientDescent(X, y, theta, alpha, iters):
#    temp = np.matrix(np.zeros(theta.shape))
#    params = int(theta.ravel().shape[1]) #flattens
#    cost = np.zeros(iters)
#
#    for i in range(iters):
#        err = (X * theta.T) - y
#        
#        for j in range(params):
#            term = np.multiply(err, X[:,j])
#            temp[0, j] = theta[0, j] - ((alpha / len(X)) * np.sum(term))
#        
#        theta = temp
#        cost[i] = computeCost(X, y, theta)
#    
#    return theta, cost
项目:photinia    作者:XoriieInpottn    | 项目源码 | 文件源码
def _extract_images(filename):
        """???????????????????

        :param filename: ?????
        :return: 4??numpy??[index, y, x, depth]? ???np.float32
        """
        images = []
        print('Extracting {}'.format(filename))
        with gzip.GzipFile(fileobj=open(filename, 'rb')) as f:
            buf = f.read()
            index = 0
            magic, num_images, rows, cols = struct.unpack_from('>IIII', buf, index)
            if magic != 2051:
                raise ValueError('Invalid magic number {} in MNIST image file: {}'.format(magic, filename))
            index += struct.calcsize('>IIII')
            for i in range(num_images):
                img = struct.unpack_from('>784B', buf, index)
                index += struct.calcsize('>784B')
                img = np.array(img, dtype=np.float32)
                # ????[0,255]???[0,1]
                img = np.multiply(img, 1.0 / 255.0)
                img = img.reshape(rows, cols, 1)
                images.append(img)
        return np.array(images, dtype=np.float32)
项目:BlueWhale    作者:caffe2    | 项目源码 | 文件源码
def get_max_q_values(
        self,
        next_states: np.ndarray,
        possible_next_actions: Optional[np.ndarray] = None,
        use_target_network: Optional[bool] = True
    ) -> np.ndarray:
        q_values = self.get_q_values_all_actions(
            next_states, use_target_network
        )

        if possible_next_actions is not None:
            mask = np.multiply(
                np.logical_not(possible_next_actions),
                self.ACTION_NOT_POSSIBLE_VAL
            )
            q_values += mask

        return np.max(q_values, axis=1, keepdims=True)
项目:BlueWhale    作者:caffe2    | 项目源码 | 文件源码
def gen_training_data(
    num_features,
    num_training_samples,
    num_outputs,
    noise_scale=0.1,
):
    np.random.seed(0)
    random.seed(1)
    input_distribution = stats.norm()
    training_inputs = input_distribution.rvs(
        size=(num_training_samples, num_features)
    ).astype(np.float32)
    weights = np.random.normal(size=(num_outputs, num_features)
                              ).astype(np.float32).transpose()
    noise = np.multiply(
        np.random.normal(size=(num_training_samples, num_outputs)), noise_scale
    )
    training_outputs = (np.dot(training_inputs, weights) +
                        noise).astype(np.float32)

    return training_inputs, training_outputs, weights, input_distribution
项目:Flavor-Network    作者:lingcheng99    | 项目源码 | 文件源码
def make_tfidf(arr):
    '''input, numpy array with flavor counts for each recipe and compounds
    return numpy array adjusted as tfidf
    '''
    arr2 = arr.copy()
    N=arr2.shape[0]
    l2_rows = np.sqrt(np.sum(arr2**2, axis=1)).reshape(N, 1)
    l2_rows[l2_rows==0]=1
    arr2_norm = arr2/l2_rows

    arr2_freq = np.sum(arr2_norm>0, axis=0)
    arr2_idf = np.log(float(N+1) / (1.0 + arr2_freq)) + 1.0

    from sklearn.preprocessing import normalize
    tfidf = np.multiply(arr2_norm, arr2_idf)
    tfidf = normalize(tfidf, norm='l2', axis=1)
    print tfidf.shape
    return tfidf
项目:Flavor-Network    作者:lingcheng99    | 项目源码 | 文件源码
def make_tfidf(arr):
    '''input, numpy array with flavor counts for each recipe and compounds
    return numpy array adjusted as tfidf
    '''
    arr2 = arr.copy()
    N=arr2.shape[0]
    l2_rows = np.sqrt(np.sum(arr2**2, axis=1)).reshape(N, 1)
    l2_rows[l2_rows==0]=1
    arr2_norm = arr2/l2_rows

    arr2_freq = np.sum(arr2_norm>0, axis=0)
    arr2_idf = np.log(float(N+1) / (1.0 + arr2_freq)) + 1.0

    from sklearn.preprocessing import normalize
    tfidf = np.multiply(arr2_norm, arr2_idf)
    tfidf = normalize(tfidf, norm='l2', axis=1)
    print tfidf.shape
    return tfidf
项目:OCR    作者:OrangeGuo    | 项目源码 | 文件源码
def train(self, training_data_array):
        for data in training_data_array:
            # ??????????
            y1 = np.dot(np.mat(self.theta1), np.mat(data.y0).T)
            sum1 = y1 + np.mat(self.input_layer_bias)
            y1 = self.sigmoid(sum1)

            y2 = np.dot(np.array(self.theta2), y1)
            y2 = np.add(y2, self.hidden_layer_bias)
            y2 = self.sigmoid(y2)

            # ??????????
            actual_vals = [0] * 10
            actual_vals[data.label] = 1
            output_errors = np.mat(actual_vals).T - np.mat(y2)
            hidden_errors = np.multiply(np.dot(np.mat(self.theta2).T, output_errors), self.sigmoid_prime(sum1))

            # ???????????
            self.theta1 += self.LEARNING_RATE * np.dot(np.mat(hidden_errors), np.mat(data.y0))
            self.theta2 += self.LEARNING_RATE * np.dot(np.mat(output_errors), np.mat(y1).T)
            self.hidden_layer_bias += self.LEARNING_RATE * output_errors
            self.input_layer_bias += self.LEARNING_RATE * hidden_errors
项目:Parallel.GAMIT    作者:demiangomez    | 项目源码 | 文件源码
def ct2lg(dX, dY, dZ, lat, lon):

    n = dX.size
    R = np.zeros((3, 3, n))

    R[0, 0, :] = -np.multiply(np.sin(np.deg2rad(lat)), np.cos(np.deg2rad(lon)))
    R[0, 1, :] = -np.multiply(np.sin(np.deg2rad(lat)), np.sin(np.deg2rad(lon)))
    R[0, 2, :] = np.cos(np.deg2rad(lat))
    R[1, 0, :] = -np.sin(np.deg2rad(lon))
    R[1, 1, :] = np.cos(np.deg2rad(lon))
    R[1, 2, :] = np.zeros((1, n))
    R[2, 0, :] = np.multiply(np.cos(np.deg2rad(lat)), np.cos(np.deg2rad(lon)))
    R[2, 1, :] = np.multiply(np.cos(np.deg2rad(lat)), np.sin(np.deg2rad(lon)))
    R[2, 2, :] = np.sin(np.deg2rad(lat))

    dxdydz = np.column_stack((np.column_stack((dX, dY)), dZ))

    RR = np.reshape(R[0, :, :], (3, n))
    dx = np.sum(np.multiply(RR, dxdydz.transpose()), axis=0)
    RR = np.reshape(R[1, :, :], (3, n))
    dy = np.sum(np.multiply(RR, dxdydz.transpose()), axis=0)
    RR = np.reshape(R[2, :, :], (3, n))
    dz = np.sum(np.multiply(RR, dxdydz.transpose()), axis=0)

    return dx, dy, dz
项目:Parallel.GAMIT    作者:demiangomez    | 项目源码 | 文件源码
def ct2lg(self, dX, dY, dZ, lat, lon):

        n = dX.size
        R = numpy.zeros((3, 3, n))

        R[0, 0, :] = -numpy.multiply(numpy.sin(numpy.deg2rad(lat)), numpy.cos(numpy.deg2rad(lon)))
        R[0, 1, :] = -numpy.multiply(numpy.sin(numpy.deg2rad(lat)), numpy.sin(numpy.deg2rad(lon)))
        R[0, 2, :] = numpy.cos(numpy.deg2rad(lat))
        R[1, 0, :] = -numpy.sin(numpy.deg2rad(lon))
        R[1, 1, :] = numpy.cos(numpy.deg2rad(lon))
        R[1, 2, :] = numpy.zeros((1, n))
        R[2, 0, :] = numpy.multiply(numpy.cos(numpy.deg2rad(lat)), numpy.cos(numpy.deg2rad(lon)))
        R[2, 1, :] = numpy.multiply(numpy.cos(numpy.deg2rad(lat)), numpy.sin(numpy.deg2rad(lon)))
        R[2, 2, :] = numpy.sin(numpy.deg2rad(lat))

        dxdydz = numpy.column_stack((numpy.column_stack((dX, dY)), dZ))

        RR = numpy.reshape(R[0, :, :], (3, n))
        dx = numpy.sum(numpy.multiply(RR, dxdydz.transpose()), axis=0)
        RR = numpy.reshape(R[1, :, :], (3, n))
        dy = numpy.sum(numpy.multiply(RR, dxdydz.transpose()), axis=0)
        RR = numpy.reshape(R[2, :, :], (3, n))
        dz = numpy.sum(numpy.multiply(RR, dxdydz.transpose()), axis=0)

        return dx, dy, dz
项目:Parallel.GAMIT    作者:demiangomez    | 项目源码 | 文件源码
def ct2lg(dX, dY, dZ, lat, lon):

    n = dX.size
    R = np.zeros((3, 3, n))

    R[0, 0, :] = -np.multiply(np.sin(np.deg2rad(lat)), np.cos(np.deg2rad(lon)))
    R[0, 1, :] = -np.multiply(np.sin(np.deg2rad(lat)), np.sin(np.deg2rad(lon)))
    R[0, 2, :] = np.cos(np.deg2rad(lat))
    R[1, 0, :] = -np.sin(np.deg2rad(lon))
    R[1, 1, :] = np.cos(np.deg2rad(lon))
    R[1, 2, :] = np.zeros((1, n))
    R[2, 0, :] = np.multiply(np.cos(np.deg2rad(lat)), np.cos(np.deg2rad(lon)))
    R[2, 1, :] = np.multiply(np.cos(np.deg2rad(lat)), np.sin(np.deg2rad(lon)))
    R[2, 2, :] = np.sin(np.deg2rad(lat))

    dxdydz = np.column_stack((np.column_stack((dX, dY)), dZ))

    RR = np.reshape(R[0, :, :], (3, n))
    dx = np.sum(np.multiply(RR, dxdydz.transpose()), axis=0)
    RR = np.reshape(R[1, :, :], (3, n))
    dy = np.sum(np.multiply(RR, dxdydz.transpose()), axis=0)
    RR = np.reshape(R[2, :, :], (3, n))
    dz = np.sum(np.multiply(RR, dxdydz.transpose()), axis=0)

    return dx, dy, dz
项目:Parallel.GAMIT    作者:demiangomez    | 项目源码 | 文件源码
def ct2lg(self, dX, dY, dZ, lat, lon):

        n = dX.size
        R = numpy.zeros((3, 3, n))

        R[0, 0, :] = -numpy.multiply(numpy.sin(numpy.deg2rad(lat)), numpy.cos(numpy.deg2rad(lon)))
        R[0, 1, :] = -numpy.multiply(numpy.sin(numpy.deg2rad(lat)), numpy.sin(numpy.deg2rad(lon)))
        R[0, 2, :] = numpy.cos(numpy.deg2rad(lat))
        R[1, 0, :] = -numpy.sin(numpy.deg2rad(lon))
        R[1, 1, :] = numpy.cos(numpy.deg2rad(lon))
        R[1, 2, :] = numpy.zeros((1, n))
        R[2, 0, :] = numpy.multiply(numpy.cos(numpy.deg2rad(lat)), numpy.cos(numpy.deg2rad(lon)))
        R[2, 1, :] = numpy.multiply(numpy.cos(numpy.deg2rad(lat)), numpy.sin(numpy.deg2rad(lon)))
        R[2, 2, :] = numpy.sin(numpy.deg2rad(lat))

        dxdydz = numpy.column_stack((numpy.column_stack((dX, dY)), dZ))

        RR = numpy.reshape(R[0, :, :], (3, n))
        dx = numpy.sum(numpy.multiply(RR, dxdydz.transpose()), axis=0)
        RR = numpy.reshape(R[1, :, :], (3, n))
        dy = numpy.sum(numpy.multiply(RR, dxdydz.transpose()), axis=0)
        RR = numpy.reshape(R[2, :, :], (3, n))
        dz = numpy.sum(numpy.multiply(RR, dxdydz.transpose()), axis=0)

        return dx, dy, dz
项目:Parallel.GAMIT    作者:demiangomez    | 项目源码 | 文件源码
def ct2lg(self, dX, dY, dZ, lat, lon):

        n = dX.size
        R = numpy.zeros((3, 3, n))

        R[0, 0, :] = -numpy.multiply(numpy.sin(numpy.deg2rad(lat)), numpy.cos(numpy.deg2rad(lon)))
        R[0, 1, :] = -numpy.multiply(numpy.sin(numpy.deg2rad(lat)), numpy.sin(numpy.deg2rad(lon)))
        R[0, 2, :] = numpy.cos(numpy.deg2rad(lat))
        R[1, 0, :] = -numpy.sin(numpy.deg2rad(lon))
        R[1, 1, :] = numpy.cos(numpy.deg2rad(lon))
        R[1, 2, :] = numpy.zeros((1, n))
        R[2, 0, :] = numpy.multiply(numpy.cos(numpy.deg2rad(lat)), numpy.cos(numpy.deg2rad(lon)))
        R[2, 1, :] = numpy.multiply(numpy.cos(numpy.deg2rad(lat)), numpy.sin(numpy.deg2rad(lon)))
        R[2, 2, :] = numpy.sin(numpy.deg2rad(lat))

        dxdydz = numpy.column_stack((numpy.column_stack((dX, dY)), dZ))

        RR = numpy.reshape(R[0, :, :], (3, n))
        dx = numpy.sum(numpy.multiply(RR, dxdydz.transpose()), axis=0)
        RR = numpy.reshape(R[1, :, :], (3, n))
        dy = numpy.sum(numpy.multiply(RR, dxdydz.transpose()), axis=0)
        RR = numpy.reshape(R[2, :, :], (3, n))
        dz = numpy.sum(numpy.multiply(RR, dxdydz.transpose()), axis=0)

        return dx, dy, dz
项目:tensorflow-basic    作者:weaponsjtu    | 项目源码 | 文件源码
def __init__(self, images, labels, fake_data=False):
    if fake_data:
      self._num_examples = 10000
    else:
      assert images.shape[0] == labels.shape[0], (
          "images.shape: %s labels.shape: %s" % (images.shape,
                                                 labels.shape))
      self._num_examples = images.shape[0]
      # Convert shape from [num examples, rows, columns, depth]
      # to [num examples, rows*columns] (assuming depth == 1)
      assert images.shape[3] == 1
      images = images.reshape(images.shape[0],
                              images.shape[1] * images.shape[2])
      # Convert from [0, 255] -> [0.0, 1.0].
      images = images.astype(numpy.float32)
      images = numpy.multiply(images, 1.0 / 255.0)
    self._images = images
    self._labels = labels
    self._epochs_completed = 0
    self._index_in_epoch = 0
项目:tf_base    作者:ozansener    | 项目源码 | 文件源码
def __init__(self, images, labels, fake_data=False):
    if fake_data:
      self._num_examples = 10000
    else:
      assert images.shape[0] == labels.shape[0], (
          "images.shape: %s labels.shape: %s" % (images.shape,
                                                 labels.shape))
      self._num_examples = images.shape[0]

      # Convert shape from [num examples, rows, columns, depth]
      # to [num examples, rows*columns] (assuming depth == 1)
      assert images.shape[3] == 1
      images = images.reshape(images.shape[0],
                              images.shape[1] * images.shape[2])
      # Convert from [0, 255] -> [0.0, 1.0].
      images = images.astype(numpy.float32)
      images = numpy.multiply(images, 1.0 / 255.0)
    self._images = images
    self._labels = labels
    self._epochs_completed = 0
    self._index_in_epoch = 0
项目:mss_pytorch    作者:Js-Mim    | 项目源码 | 文件源码
def phaseSensitive(self):
        """
            Computation of Phase Sensitive Mask. As appears in :
            H Erdogan, John R. Hershey, Shinji Watanabe, and Jonathan Le Roux,
            "Phase-sensitive and recognition-boosted speech separation using deep recurrent neural networks,"
            in ICASSP 2015, Brisbane, April, 2015.

        Args:
            mTarget:   (2D ndarray) Magnitude Spectrogram of the target component
            pTarget:   (2D ndarray) Phase Spectrogram of the output component
            mY:        (2D ndarray) Magnitude Spectrogram of the residual component
            pY:        (2D ndarray) Phase Spectrogram of the residual component
        Returns:
            mask:      (2D ndarray) Array that contains time frequency gain values

        """
        print('Phase Sensitive Masking.')
        # Compute Phase Difference
        Theta = (self._pTarget - self._pY)
        self._mask = 2./ (1. + np.exp(-np.multiply(np.divide(self._sTarget, self._eps + self._nResidual), np.cos(Theta)))) - 1.
项目:PyFaceRecognizer    作者:Hironsan    | 项目源码 | 文件源码
def __init__(self,
                 images,
                 labels,
                 dtype=dtypes.float32,
                 reshape=True):

        dtype = dtypes.as_dtype(dtype).base_dtype
        if dtype not in (dtypes.uint8, dtypes.float32):
            raise TypeError('Invalid image dtype %r, expected uint8 or float32' %dtype)

        self._num_examples = images.shape[0]

        if dtype == dtypes.float32:
            # Convert from [0, 255] -> [0.0, 1.0].
            images = images.astype(np.float32)
            images = np.multiply(images, 1.0 / 255.0)
        self._images = images
        self._labels = labels
        self._epochs_completed = 0
        self._index_in_epoch = 0
项目:radar    作者:amoose136    | 项目源码 | 文件源码
def test_wrap_with_iterable(self):
        # test fix for bug #1026:

        class with_wrap(np.ndarray):
            __array_priority__ = 10

            def __new__(cls):
                return np.asarray(1).view(cls).copy()

            def __array_wrap__(self, arr, context):
                return arr.view(type(self))

        a = with_wrap()
        x = ncu.multiply(a, (1, 2, 3))
        self.assertTrue(isinstance(x, with_wrap))
        assert_array_equal(x, np.array((1, 2, 3)))
项目:radar    作者:amoose136    | 项目源码 | 文件源码
def test_out_override(self):
        # 2016-01-29: NUMPY_UFUNC_DISABLED
        return

        # regression test for github bug 4753
        class OutClass(np.ndarray):
            def __numpy_ufunc__(self, ufunc, method, i, inputs, **kw):
                if 'out' in kw:
                    tmp_kw = kw.copy()
                    tmp_kw.pop('out')
                    func = getattr(ufunc, method)
                    kw['out'][...] = func(*inputs, **tmp_kw)

        A = np.array([0]).view(OutClass)
        B = np.array([5])
        C = np.array([6])
        np.multiply(C, B, A)
        assert_equal(A[0], 30)
        assert_(isinstance(A, OutClass))
        A[0] = 0
        np.multiply(C, B, out=A)
        assert_equal(A[0], 30)
        assert_(isinstance(A, OutClass))
项目:radar    作者:amoose136    | 项目源码 | 文件源码
def test_NotImplemented_not_returned(self):
        # See gh-5964 and gh-2091. Some of these functions are not operator
        # related and were fixed for other reasons in the past.
        binary_funcs = [
            np.power, np.add, np.subtract, np.multiply, np.divide,
            np.true_divide, np.floor_divide, np.bitwise_and, np.bitwise_or,
            np.bitwise_xor, np.left_shift, np.right_shift, np.fmax,
            np.fmin, np.fmod, np.hypot, np.logaddexp, np.logaddexp2,
            np.logical_and, np.logical_or, np.logical_xor, np.maximum,
            np.minimum, np.mod
            ]

        # These functions still return NotImplemented. Will be fixed in
        # future.
        # bad = [np.greater, np.greater_equal, np.less, np.less_equal, np.not_equal]

        a = np.array('1')
        b = 1
        for f in binary_funcs:
            assert_raises(TypeError, f, a, b)
项目:DeepOSM    作者:trailbehind    | 项目源码 | 文件源码
def predictions_for_tiles(test_images, model):
    """Batch predictions on the test image set, to avoid a memory spike."""
    npy_test_images = numpy.array([img_loc_tuple[0] for img_loc_tuple in test_images])
    test_images = npy_test_images.astype(numpy.float32)
    test_images = numpy.multiply(test_images, 1.0 / 255.0)

    all_predictions = []
    for x in range(0, len(test_images) - 100, 100):
        for p in model.predict(test_images[x:x + 100]):
            all_predictions.append(p)

    for p in model.predict(test_images[len(all_predictions):]):
        all_predictions.append(p)
    assert len(all_predictions) == len(test_images)

    return all_predictions
项目:Generative-ConvACs    作者:HUJI-Deep    | 项目源码 | 文件源码
def knn_masked_data(trX,trY,missing_data_dir, input_shape, k):

    raw_im_data = np.loadtxt(join(script_dir,missing_data_dir,'index.txt'),delimiter=' ',dtype=str)
    raw_mask_data = np.loadtxt(join(script_dir,missing_data_dir,'index_mask.txt'),delimiter=' ',dtype=str)
    # Using 'brute' method since we only want to do one query per classifier
    # so this will be quicker as it avoids overhead of creating a search tree
    knn_m = KNeighborsClassifier(algorithm='brute',n_neighbors=k)
    prob_Y_hat = np.zeros((raw_im_data.shape[0],int(np.max(trY)+1)))
    total_images = raw_im_data.shape[0]
    pbar = progressbar.ProgressBar(widgets=[progressbar.FormatLabel('\rProcessed %(value)d of %(max)d Images '), progressbar.Bar()], maxval=total_images, term_width=50).start()
    for i in range(total_images):
        mask_im=load_image(join(script_dir,missing_data_dir,raw_mask_data[i][0]), input_shape,1).reshape(np.prod(input_shape))
        mask = np.logical_not(mask_im > eps) # since mask is 1 at missing locations
        v_im=load_image(join(script_dir,missing_data_dir,raw_im_data[i][0]), input_shape, 255).reshape(np.prod(input_shape))
        rep_mask = np.tile(mask,(trX.shape[0],1))
        # Corrupt whole training set according to the current mask
        corr_trX = np.multiply(trX, rep_mask)        
        knn_m.fit(corr_trX, trY)
        prob_Y_hat[i,:] = knn_m.predict_proba(v_im.reshape(1,-1))
        pbar.update(i)
    pbar.finish()
    return prob_Y_hat
项目:speech_feature_extractor    作者:ZhihaoDU    | 项目源码 | 文件源码
def cochleagram_extractor(xx, sr, win_len, shift_len, channel_number, win_type):
    fcoefs, f = make_erb_filters(sr, channel_number, 50)
    fcoefs = np.flipud(fcoefs)
    xf = erb_frilter_bank(xx, fcoefs)

    if win_type == 'hanning':
        window = np.hanning(channel_number)
    elif win_type == 'hamming':
        window = np.hamming(channel_number)
    elif win_type == 'triangle':
        window = (1 - (np.abs(channel_number - 1 - 2 * np.arange(1, channel_number + 1, 1)) / (channel_number + 1)))
    else:
        window = np.ones(channel_number)
    window = window.reshape((channel_number, 1))

    xe = np.power(xf, 2.0)
    frames = 1 + ((np.size(xe, 1)-win_len) // shift_len)
    cochleagram = np.zeros((channel_number, frames))
    for i in range(frames):
        one_frame = np.multiply(xe[:, i*shift_len:i*shift_len+win_len], np.repeat(window, win_len, 1))
        cochleagram[:, i] = np.sqrt(np.mean(one_frame, 1))

    cochleagram = np.where(cochleagram == 0.0, np.finfo(float).eps, cochleagram)
    return cochleagram
项目:speech_feature_extractor    作者:ZhihaoDU    | 项目源码 | 文件源码
def log_power_spectrum_extractor(x, win_len, shift_len, win_type, is_log=False):
    samples = x.shape[0]
    frames = (samples - win_len) // shift_len
    stft = np.zeros((win_len, frames), dtype=np.complex64)
    spect = np.zeros((win_len // 2 + 1, frames), dtype=np.float64)

    if win_type == 'hanning':
        window = np.hanning(win_len)
    elif win_type == 'hamming':
        window = np.hamming(win_len)
    elif win_type == 'rectangle':
        window = np.ones(win_len)

    for i in range(frames):
        one_frame = x[i*shift_len: i*shift_len+win_len]
        windowed_frame = np.multiply(one_frame, window)
        stft[:, i] = np.fft.fft(windowed_frame, win_len)
        if is_log:
            spect[:, i] = np.log(np.power(np.abs(stft[0: win_len//2+1, i]), 2.))
        else:
            spect[:, i] = np.power(np.abs(stft[0: win_len//2+1, i]), 2.)

    return spect
项目:speech_feature_extractor    作者:ZhihaoDU    | 项目源码 | 文件源码
def stft_extractor(x, win_len, shift_len, win_type):
    samples = x.shape[0]
    frames = (samples - win_len) // shift_len
    stft = np.zeros((win_len, frames), dtype=np.complex64)
    spect = np.zeros((win_len // 2 + 1, frames), dtype=np.complex64)

    if win_type == 'hanning':
        window = np.hanning(win_len)
    elif win_type == 'hamming':
        window = np.hamming(win_len)
    elif win_type == 'rectangle':
        window = np.ones(win_len)

    for i in range(frames):
        one_frame = x[i*shift_len: i*shift_len+win_len]
        windowed_frame = np.multiply(one_frame, window)
        stft[:, i] = np.fft.fft(windowed_frame, win_len)
        spect[:, i] = stft[: win_len//2+1, i]

    return spect
项目:speech_feature_extractor    作者:ZhihaoDU    | 项目源码 | 文件源码
def spectrum_extractor(x, win_len, shift_len, win_type, is_log):
    samples = x.shape[0]
    frames = (samples - win_len) // shift_len
    stft = np.zeros((win_len, frames), dtype=np.complex64)
    spectrum = np.zeros((win_len // 2 + 1, frames), dtype=np.float64)

    if win_type == 'hanning':
        window = np.hanning(win_len)
    elif win_type == 'hamming':
        window = np.hamming(win_len)
    elif win_type == 'triangle':
        window = (1 - (np.abs(win_len - 1 - 2 * np.arange(1, win_len + 1, 1)) / (win_len + 1)))
    else:
        window = np.ones(win_len)
    for i in range(frames):
        one_frame = x[i*shift_len: i*shift_len+win_len]
        windowed_frame = np.multiply(one_frame, window)
        stft[:, i] = np.fft.fft(windowed_frame, win_len)
        if is_log:
            spectrum[:, i] = np.log(np.abs(stft[0: win_len//2+1, i]))
        else:
            spectrum[:, i] = np.abs(stft[0: win_len // 2 + 1:, i])

    return spectrum
项目:DVH-Analytics    作者:cutright    | 项目源码 | 文件源码
def get_volume_of_dose(self, dose, **kwargs):
        volumes = np.zeros(self.count)
        for x in range(0, self.count):

            dvh = np.zeros(len(self.dvh))
            for y in range(0, len(self.dvh)):
                dvh[y] = self.dvh[y][x]
            if 'input' in kwargs and kwargs['input'] == 'relative':
                if isinstance(self.rx_dose[x], basestring):
                    volumes[x] = 0
                else:
                    volumes[x] = volume_of_dose(dvh, dose * self.rx_dose[x])
            else:
                volumes[x] = volume_of_dose(dvh, dose)

        if 'output' in kwargs and kwargs['output'] == 'absolute':
            volumes = np.multiply(volumes, self.volume[0:self.count])
        else:
            volumes = np.multiply(volumes, 100.)

        return volumes.tolist()
项目:aes_wimp    作者:Js-Mim    | 项目源码 | 文件源码
def phaseSensitive(self):
        """
            Computation of Phase Sensitive Mask. As appears in :
            H Erdogan, John R. Hershey, Shinji Watanabe, and Jonathan Le Roux,
            "Phase-sensitive and recognition-boosted speech separation using deep recurrent neural networks,"
            in ICASSP 2015, Brisbane, April, 2015.

        Args:
            mTarget:   (2D ndarray) Magnitude Spectrogram of the target component
            pTarget:   (2D ndarray) Phase Spectrogram of the output component
            mY:        (2D ndarray) Magnitude Spectrogram of the output component
            pY:        (2D ndarray) Phase Spectrogram of the output component
        Returns:
            mask:      (2D ndarray) Array that contains time frequency gain values

        """
        print('Phase Sensitive Masking.')
        # Compute Phase Difference
        Theta = (self._pTarget - self._pY)
        self._mask = 2./ (1. + np.exp(-np.multiply(np.divide(self._sTarget, self._eps + self._nResidual), np.cos(Theta)))) - 1.
项目:house-of-enlightenment    作者:house-of-enlightenment    | 项目源码 | 文件源码
def next_frame(self, pixels, t, collaboration_state, osc_data):

        # render every 2 frames so the ripples are slower
        self.frameCount += 1
        if (self.frameCount % 2 == 0):
            pixels[:, :] = self.get_pixels()
            return

        # only generate a ripple every couple frames
        if (random.random() < 0.12):
            self.start_ripple()

        # calculate a pixel values based on it's neighbors
        self.ripple_state[1:-1, 1:-1] = (
            self.previous_ripple_state[:-2, 1:-1] + self.previous_ripple_state[2:, 1:-1] +
            self.previous_ripple_state[1:-1, :-2] + self.previous_ripple_state[1:-1, 2:]
        ) * 0.5 - self.ripple_state[1:-1, 1:-1]

        # damping
        # numpy doesn't like multiplying ints and floats so tell it to be unsafe
        np.multiply(self.ripple_state, self.damping, out=self.ripple_state, casting='unsafe')

        pixels[:, :] = self.get_pixels()
        self.swap_buffers()
项目:tensorflow-udacity-deep-learning    作者:hpssjellis    | 项目源码 | 文件源码
def __init__(self, images, labels, fake_data=False):
    if fake_data:
      self._num_examples = 10000
    else:
      assert images.shape[0] == labels.shape[0], (
          "images.shape: %s labels.shape: %s" % (images.shape,
                                                 labels.shape))
      self._num_examples = images.shape[0]
      # Convert shape from [num examples, rows, columns, depth]
      # to [num examples, rows*columns] (assuming depth == 1)
      assert images.shape[3] == 1
      images = images.reshape(images.shape[0],
                              images.shape[1] * images.shape[2])
      # Convert from [0, 255] -> [0.0, 1.0].
      images = images.astype(numpy.float32)
      images = numpy.multiply(images, 1.0 / 255.0)
    self._images = images
    self._labels = labels
    self._epochs_completed = 0
    self._index_in_epoch = 0
项目:tensorflow-udacity-deep-learning    作者:hpssjellis    | 项目源码 | 文件源码
def __init__(self, images, labels, fake_data=False):
    if fake_data:
      self._num_examples = 10000
    else:
      assert images.shape[0] == labels.shape[0], (
          "images.shape: %s labels.shape: %s" % (images.shape,
                                                 labels.shape))
      self._num_examples = images.shape[0]
      # Convert shape from [num examples, rows, columns, depth]
      # to [num examples, rows*columns] (assuming depth == 1)
      assert images.shape[3] == 1
      images = images.reshape(images.shape[0],
                              images.shape[1] * images.shape[2])
      # Convert from [0, 255] -> [0.0, 1.0].
      images = images.astype(numpy.float32)
      images = numpy.multiply(images, 1.0 / 255.0)
    self._images = images
    self._labels = labels
    self._epochs_completed = 0
    self._index_in_epoch = 0
项目:tensorflow-udacity-deep-learning    作者:hpssjellis    | 项目源码 | 文件源码
def __init__(self, images, labels, fake_data=False):
    if fake_data:
      self._num_examples = 10000
    else:
      assert images.shape[0] == labels.shape[0], (
          "images.shape: %s labels.shape: %s" % (images.shape,
                                                 labels.shape))
      self._num_examples = images.shape[0]
      # Convert shape from [num examples, rows, columns, depth]
      # to [num examples, rows*columns] (assuming depth == 1)
      assert images.shape[3] == 1
      images = images.reshape(images.shape[0],
                              images.shape[1] * images.shape[2])
      # Convert from [0, 255] -> [0.0, 1.0].
      images = images.astype(numpy.float32)
      images = numpy.multiply(images, 1.0 / 255.0)
    self._images = images
    self._labels = labels
    self._epochs_completed = 0
    self._index_in_epoch = 0
项目:matlab_imresize    作者:fatheral    | 项目源码 | 文件源码
def imresizemex(inimg, weights, indices, dim):
    in_shape = inimg.shape
    w_shape = weights.shape
    out_shape = list(in_shape)
    out_shape[dim] = w_shape[0]
    outimg = np.zeros(out_shape)
    if dim == 0:
        for i_img in range(in_shape[1]):
            for i_w in range(w_shape[0]):
                w = weights[i_w, :]
                ind = indices[i_w, :]
                im_slice = inimg[ind, i_img].astype(np.float64)
                outimg[i_w, i_img] = np.sum(np.multiply(np.squeeze(im_slice, axis=0), w.T), axis=0)
    elif dim == 1:
        for i_img in range(in_shape[0]):
            for i_w in range(w_shape[0]):
                w = weights[i_w, :]
                ind = indices[i_w, :]
                im_slice = inimg[i_img, ind].astype(np.float64)
                outimg[i_img, i_w] = np.sum(np.multiply(np.squeeze(im_slice, axis=0), w.T), axis=0)        
    if inimg.dtype == np.uint8:
        outimg = np.clip(outimg, 0, 255)
        return np.around(outimg).astype(np.uint8)
    else:
        return outimg
项目:ngraph    作者:NervanaSystems    | 项目源码 | 文件源码
def test_cputensor_multiply_constant():
    """TODO."""
    M = ng.make_axis(length=1)
    N = ng.make_axis(length=3)

    np_a = np.array([[1, 2, 3]], dtype=np.float32)
    np_c = np.multiply(np_a, 2)

    a = ng.constant(np_a, [M, N])
    b = ng.constant(2)
    c = ng.multiply(a, b)

    with executor(c) as ex:
        result = ex()
    print(result)
    assert np.array_equal(result, np_c)
项目:ngraph    作者:NervanaSystems    | 项目源码 | 文件源码
def test_cputensor_fusion():
    """TODO."""
    M = ng.make_axis(length=1)
    N = ng.make_axis(length=3)

    np_a = np.array([[1, 2, 3]], dtype=np.float32)
    np_b = np.array([[3, 2, 1]], dtype=np.float32)
    np_d = np.multiply(np_b, np.add(np_a, 2))

    a = ng.constant(np_a, [M, N])
    b = ng.constant(np_b, [M, N])
    c = ng.constant(2)
    d = ng.multiply(b, ng.add(a, c))

    with executor(d) as ex:
        result = ex()
    print(result)
    assert np.array_equal(result, np_d)
项目:ngraph    作者:NervanaSystems    | 项目源码 | 文件源码
def discrete_uniform(self, low, high, quantum, axes, dtype=None):
        """
        Returns a tensor initialized with a discrete uniform distribution.

        Arguments:
            low: The lower limit of the values.
            high: The upper limit of the values.
            quantum: Distance between values.
            axes: The axes of the tensor.

        Returns:
            The tensor.

        """
        if dtype is None:
            dtype = self.dtype

        n = math.floor((high - low) / quantum)
        result = np.array(self.rng.random_integers(
            0, n, ng.make_axes(axes).lengths), dtype=dtype)
        np.multiply(result, quantum, result)
        np.add(result, low, result)
        return result
项目:tflearn    作者:tflearn    | 项目源码 | 文件源码
def __init__(self, images, labels, fake_data=False):
        if fake_data:
            self._num_examples = 10000
        else:
            assert images.shape[0] == labels.shape[0], (
                "images.shape: %s labels.shape: %s" % (images.shape,
                                                       labels.shape))
            self._num_examples = images.shape[0]
            # Convert shape from [num examples, rows, columns, depth]
            # to [num examples, rows*columns] (assuming depth == 1)
            assert images.shape[3] == 1
            images = images.reshape(images.shape[0],
                                    images.shape[1] * images.shape[2])
            # Convert from [0, 255] -> [0.0, 1.0].
            images = images.astype(numpy.float32)
            images = numpy.multiply(images, 1.0 / 255.0)
        self._images = images
        self._labels = labels
        self._epochs_completed = 0
        self._index_in_epoch = 0
项目:AND4NMF    作者:PrincetonML    | 项目源码 | 文件源码
def train(self):
        eps = 1e-10
        for i in range(self.epo):
            if i % 1 == 0:
                self.show_error()

            A = np.asarray(self.A.copy())
            Z = np.asarray(self.Z.copy())
            start = time.time()
            Z1 = np.multiply(Z, np.asarray(self.A.transpose() * self.Y))
            Z = np.divide(Z1, eps + np.asarray(self.A.transpose() * self.A * self.Z)) # + eps to avoid divided by 0
            self.Z = np.asmatrix(Z)
            A1 = np.multiply(A, np.asarray( self.Y * self.Z.transpose()))
            A = np.divide(A1, eps + np.asarray( self.A * self.Z * self.Z.transpose()))
            end = time.time()
            self.A = np.asmatrix(A)
            self.time = self.time + end - start
项目:hipsternet    作者:wiseodd    | 项目源码 | 文件源码
def __init__(self, images, labels, fake_data=False):
        if fake_data:
            self._num_examples = 10000
        else:
            assert images.shape[0] == labels.shape[0], (
                "images.shape: %s labels.shape: %s" % (images.shape,
                                                       labels.shape))
            self._num_examples = images.shape[0]
            # Convert shape from [num examples, rows, columns, depth]
            # to [num examples, rows*columns] (assuming depth == 1)
            assert images.shape[3] == 1
            images = images.reshape(images.shape[0],
                                    images.shape[1] * images.shape[2])
            # Convert from [0, 255] -> [0.0, 1.0].
            images = images.astype(numpy.float32)
            images = numpy.multiply(images, 1.0 / 255.0)
        self._images = images
        self._labels = labels
        self._epochs_completed = 0
        self._index_in_epoch = 0
项目:lexsub    作者:orenmel    | 项目源码 | 文件源码
def mult(self, target, deps, geo_mean_flag, tfo):

        #SUPPORT NONE TARGET

        target_vec = self.word_vecs.represent(target)
        scores = self.word_vecs.pos_scores(target_vec)
        for dep in deps:
            if dep in self.context_vecs:
                dep_vec = self.context_vecs.represent(dep)
                mult_scores = self.word_vecs.pos_scores(dep_vec)
                if geo_mean_flag:
                    mult_scores = mult_scores**(1.0/len(deps))        
                scores = np.multiply(scores, mult_scores)
            else:
                tfo.write("NOTICE: %s not in context embeddings. Ignoring.\n" % dep)   

        result_vec = self.word_vecs.top_scores(scores, -1)                
        return result_vec
项目:SecuML    作者:ANSSI-FR    | 项目源码 | 文件源码
def getTopWeightedFeatures(experiment_id, inst_exp_id, instance_id, size):
    instance_id = int(instance_id)
    exp = ExperimentFactory.getFactory().fromJson(experiment_id, session)
    validation_experiment = ExperimentFactory.getFactory().fromJson(inst_exp_id, session)
    #get the features
    features_names, features_values = validation_experiment.getFeatures(instance_id)
    features_values = [float(value) for value in features_values]
    #get the pipeline with scaler and logistic model
    pipeline = exp.getModelPipeline()
    #scale the features
    scaled_values = pipeline.named_steps['scaler'].transform(np.reshape(features_values,(1, -1)))
    weighted_values = np.multiply(scaled_values, pipeline.named_steps['model'].coef_)
    features = map(lambda name, value, w_value: (name, value, w_value),
                          features_names, features_values, weighted_values[0])
    features.sort(key = lambda tup: abs(tup[2]))
    features = features[:-int(size)-1:-1]
    tooltips = [x[1] for x in features]
    barplot = BarPlot([x[0] for x in features])
    dataset = PlotDataset([x[2] for x in features], None)
    dataset.setColor(colors_tools.red)
    barplot.addDataset(dataset)
    return jsonify(barplot.toJson(tooltip_data = tooltips))
项目:antgo    作者:jianzfb    | 项目源码 | 文件源码
def __init__(self, images, labels, fake_data=False):
    """Construct a DataSet. """
    assert images.shape[0] == labels.shape[0], (
        'images.shape: %s labels.shape: %s' % (images.shape,
                                               labels.shape))
    self._num_examples = images.shape[0]

    # Convert shape from [num examples, rows, columns, depth]
    # to [num examples, rows*columns] (assuming depth == 1)
    assert images.shape[3] == 1
    images = images.reshape(images.shape[0],
                            images.shape[1] * images.shape[2])
    # Convert from [0, 255] -> [0.0, 1.0].
    images = images.astype(np.float32)
    images = np.multiply(images, 1.0 / 255.0)
    self._images = images
    self._labels = labels
项目:dexml    作者:DexGroves    | 项目源码 | 文件源码
def prop_backward(self, X, y):
        layers_rev = list(reversed(self.layers))
        Zs_rev = list(reversed(self.Zs))
        As_rev = list(reversed(self.As))
        As_rev.append(X)

        delta0 = np.multiply(-(y - As_rev[0]), self.sigma_prime(Zs_rev[0]))
        djdw0 = np.dot(As_rev[1].T, delta0)

        self.deltas = [delta0]
        self.djdws = [djdw0]

        for i in xrange(0, len(layers_rev) - 1):
            delta_n = np.dot(self.deltas[i], layers_rev[i].W.T) * \
                self.sigma_prime(Zs_rev[i + 1])
            djdw_n = np.dot(As_rev[i + 2].T, delta_n)

            self.deltas.append(delta_n)
            self.djdws.append(djdw_n)

        self.deltas = list(reversed(self.deltas))
        self.djdws = list(reversed(self.djdws))
项目:dexml    作者:DexGroves    | 项目源码 | 文件源码
def prop_backward(self, X, y):
        layers_rev = list(reversed(self.layers))
        Zs_rev = list(reversed(self.Zs))
        As_rev = list(reversed(self.As))
        As_rev.append(X)

        delta0 = np.multiply(-(y - As_rev[0]), self.sigma_prime(Zs_rev[0]))
        djdw0 = np.dot(As_rev[1].T, delta0)

        self.deltas = [delta0]
        self.djdws = [djdw0]

        for i in xrange(0, len(layers_rev) - 1):
            delta_n = np.dot(self.deltas[i], layers_rev[i].W.T) * \
                self.sigma_prime(Zs_rev[i + 1])
            djdw_n = np.dot(As_rev[i + 2].T, delta_n)

            self.deltas.append(delta_n)
            self.djdws.append(djdw_n)

        self.deltas = list(reversed(self.deltas))
        self.djdws = list(reversed(self.djdws))
项目:decaptcha    作者:ksopyla    | 项目源码 | 文件源码
def __init__(self, images, labels, fake_data=False):
    if fake_data:
      self._num_examples = 10000
    else:
      assert images.shape[0] == labels.shape[0], (
          "images.shape: %s labels.shape: %s" % (images.shape,
                                                 labels.shape))
      self._num_examples = images.shape[0]
      # Convert shape from [num examples, rows, columns, depth]
      # to [num examples, rows*columns] (assuming depth == 1)
      assert images.shape[3] == 1
      images = images.reshape(images.shape[0],
                              images.shape[1] * images.shape[2])
      # Convert from [0, 255] -> [0.0, 1.0].
      images = images.astype(numpy.float32)
      images = numpy.multiply(images, 1.0 / 255.0)
    self._images = images
    self._labels = labels
    self._epochs_completed = 0
    self._index_in_epoch = 0