Python numpy 模块,product() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.product()

项目:deep_architect    作者:negrinho    | 项目源码 | 文件源码
def compile(self, in_x, train_feed, eval_feed):
        n = np.product(self.in_d)
        m, param_init_fn = [dom[i] for (dom, i) in zip(self.domains, self.chosen)]

        #sc = np.sqrt(6.0) / np.sqrt(m + n)
        #W = tf.Variable(tf.random_uniform([n, m], -sc, sc))
        W = tf.Variable( param_init_fn( [n, m] ) )
        b = tf.Variable(tf.zeros([m]))

        # if the number of input dimensions is larger than one, flatten the 
        # input and apply the affine transformation. 
        if len(self.in_d) > 1:
            in_x_flat = tf.reshape(in_x, shape=[-1, n])
            out_y = tf.add(tf.matmul(in_x_flat, W), b)
        else:
            out_y = tf.add(tf.matmul(in_x, W), b)
        return out_y

# computes the output dimension based on the padding scheme used.
# this comes from the tensorflow documentation
项目:deep_architect    作者:negrinho    | 项目源码 | 文件源码
def get_outdim(self):
        #assert in_x == self.b.get_outdim()
        # relaxing input dimension equal to output dimension. taking into
        # account the padding scheme considered.
        out_d_b = self.b.get_outdim()
        in_d = self.in_d

        if len(out_d_b) == len(in_d):
            out_d = tuple(
                [max(od_i, id_i) for (od_i, id_i) in zip(out_d_b, in_d)])

        else:
            # flattens both input and output. 
            out_d_b_flat = np.product(out_d_b)
            in_d_flat = np.product(in_d)
            out_d = (max(out_d_b_flat, in_d_flat) ,)

        return out_d
项目:Projects    作者:it2school    | 项目源码 | 文件源码
def get_surface(self, dest_surf = None):
        camera = self.camera

        im = highgui.cvQueryFrame(camera)
        #convert Ipl image to PIL image
        #print type(im)
        if im:
            xx = opencv.adaptors.Ipl2NumPy(im)
            #print type(xx)
            #print xx.iscontiguous()
            #print dir(xx)
            #print xx.shape
            xxx = numpy.reshape(xx, (numpy.product(xx.shape),))

            if xx.shape[2] != 3:
                raise ValueError("not sure what to do about this size")

            pg_img = pygame.image.frombuffer(xxx, (xx.shape[1],xx.shape[0]), "RGB")

            # if there is a destination surface given, we blit onto that.
            if dest_surf:
                dest_surf.blit(pg_img, (0,0))
            return dest_surf
            #return pg_img
项目:radar    作者:amoose136    | 项目源码 | 文件源码
def test_addsumprod(self):
        # Tests add, sum, product.
        (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
        assert_equal(np.add.reduce(x), add.reduce(x))
        assert_equal(np.add.accumulate(x), add.accumulate(x))
        assert_equal(4, sum(array(4), axis=0))
        assert_equal(4, sum(array(4), axis=0))
        assert_equal(np.sum(x, axis=0), sum(x, axis=0))
        assert_equal(np.sum(filled(xm, 0), axis=0), sum(xm, axis=0))
        assert_equal(np.sum(x, 0), sum(x, 0))
        assert_equal(np.product(x, axis=0), product(x, axis=0))
        assert_equal(np.product(x, 0), product(x, 0))
        assert_equal(np.product(filled(xm, 1), axis=0), product(xm, axis=0))
        s = (3, 4)
        x.shape = y.shape = xm.shape = ym.shape = s
        if len(s) > 1:
            assert_equal(np.concatenate((x, y), 1), concatenate((xm, ym), 1))
            assert_equal(np.add.reduce(x, 1), add.reduce(x, 1))
            assert_equal(np.sum(x, 1), sum(x, 1))
            assert_equal(np.product(x, 1), product(x, 1))
项目:radar    作者:amoose136    | 项目源码 | 文件源码
def test_testAddSumProd(self):
        # Test add, sum, product.
        (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
        self.assertTrue(eq(np.add.reduce(x), add.reduce(x)))
        self.assertTrue(eq(np.add.accumulate(x), add.accumulate(x)))
        self.assertTrue(eq(4, sum(array(4), axis=0)))
        self.assertTrue(eq(4, sum(array(4), axis=0)))
        self.assertTrue(eq(np.sum(x, axis=0), sum(x, axis=0)))
        self.assertTrue(eq(np.sum(filled(xm, 0), axis=0), sum(xm, axis=0)))
        self.assertTrue(eq(np.sum(x, 0), sum(x, 0)))
        self.assertTrue(eq(np.product(x, axis=0), product(x, axis=0)))
        self.assertTrue(eq(np.product(x, 0), product(x, 0)))
        self.assertTrue(eq(np.product(filled(xm, 1), axis=0),
                           product(xm, axis=0)))
        if len(s) > 1:
            self.assertTrue(eq(np.concatenate((x, y), 1),
                               concatenate((xm, ym), 1)))
            self.assertTrue(eq(np.add.reduce(x, 1), add.reduce(x, 1)))
            self.assertTrue(eq(np.sum(x, 1), sum(x, 1)))
            self.assertTrue(eq(np.product(x, 1), product(x, 1)))
项目:HTM_experiments    作者:ctrl-z-9000-times    | 项目源码 | 文件源码
def __init__(self, input_shape, output_shape, output_sparsity=.05):
        """
        """
        self.learning_rate   = 1/100
        self.input_shape     = tuple(input_shape)
        self.output_shape    = tuple(output_shape)
        self.input_size      = np.product(self.input_shape)
        self.output_size     = np.product(self.output_shape)
        self.on_bits         = max(1, int(round(output_sparsity * self.output_size)))
        self.xp_q            = NStepQueue(3, .90, self.learn)
        self.expected_values = np.random.random((self.input_size, self.output_size)) * self.learning_rate
        self.expected_values = np.array(self.expected_values, dtype=np.float32)
        print("Supervised Controller")
        print("\tExpected Values shape:", self.expected_values.shape)
        print("\tFuture discount:", self.xp_q.discount)
        print("\tLearning Rate:", self.learning_rate)
项目:HTM_experiments    作者:ctrl-z-9000-times    | 项目源码 | 文件源码
def predict(self, input_sdr=None):
        """
        Argument inputs is ndarray of indexes into the input space.
        Returns probability of each catagory in output space.
        """
        self.input_sdr.assign(input_sdr)
        pdf = self.stats[self.input_sdr.flat_index]
        if True:
            # Combine multiple probabilities into single pdf. Product, not
            # summation, to combine probabilities of independant events. The
            # problem with this is if a few unexpected bits turn on it
            # mutliplies the result by zero, and the test dataset is going to
            # have unexpected things in it.  
            return np.product(pdf, axis=0, keepdims=False)
        else:
            # Use summation B/C it works well.
            return np.sum(pdf, axis=0, keepdims=False)
项目:dist_hyperas    作者:osh    | 项目源码 | 文件源码
def add_task(self, dataset_filename, model_filename):
        dataset_src = open(dataset_filename,'r').read()
        model_src = open(model_filename,"r").read()
        src,info = hopt.extract_hopts(model_src)
        ss_size = int(np.product( map(lambda x: len(x["options"]), info.values() ) ))
        print "Search space size: ", ss_size
        w = []

        for i in range(0,ss_size):
            info_i,src_i = hopt.produce_variant(src,copy.deepcopy(info),i)
            info_i["subtask"] = i
            w.append( (info_i,dataset_src,src_i) )

        print "submitting task..."
        rv = self.socket.send_json(("submit_task", w))
        print rv
项目:vampyre    作者:GAMPTeam    | 项目源码 | 文件源码
def repeat_sum(u,shape,rep_axes):
    """
    Computes sum of a repeated matrix

    In effect, this routine computes 
    code:`np.sum(repeat(u,shape,rep_axes))`.  However, it performs
    this without having to perform the full repetition.

    """
    # Must convert to np.array to perform slicing
    shape_vec = np.array(shape,dtype=int)
    rep_vec = np.array(rep_axes,dtype=int)

    # repeat and sum
    urep = repeat_axes(u,shape,rep_axes,rep=False)
    usum = np.sum(urep)*np.product(shape_vec[rep_vec])
    return usum
项目:sympl    作者:mcgibbon    | 项目源码 | 文件源码
def get_final_shape(data_array, out_dims, direction_to_names):
    """
    Determine the final shape that data_array must be reshaped to in order to
    have one axis for each of the out_dims (for instance, combining all
    axes collected by the '*' direction).
    """
    final_shape = []
    for direction in out_dims:
        if len(direction_to_names[direction]) == 0:
            final_shape.append(1)
        else:
            # determine shape once dimensions for direction (usually '*') are combined
            final_shape.append(
                np.product([len(data_array.coords[name])
                            for name in direction_to_names[direction]]))
    return final_shape
项目:bolero    作者:rock-learning    | 项目源码 | 文件源码
def _init_space(self, space):
        if not isinstance(space, gym.Space):
            raise ValueError("Unknown space, type '%s'" % type(space))
        elif isinstance(space, gym.spaces.Box):
            n_dims = np.product(space.shape)
            handler = BoxClipHandler(space.low, space.high)
        elif isinstance(space, gym.spaces.Discrete):
            n_dims = 1
            handler = IntHandler(space.n)
        elif isinstance(space, gym.spaces.HighLow):
            n_dims = space.num_rows
            handler = HighLowHandler(space.matrix)
        elif isinstance(space, gym.spaces.Tuple):
            raise NotImplementedError("Space of type '%s' is not supported"
                                      % type(space))
        return n_dims, handler
项目:dl4nlp    作者:yohokuno    | 项目源码 | 文件源码
def train(self, sentences, iterations=1000):
        # Preprocess sentences to create indices of context and next words
        self.dictionary = build_dictionary(sentences, self.vocabulary_size)
        indices = to_indices(sentences, self.dictionary)
        self.reverse_dictionary = {index: word for word, index in self.dictionary.items()}
        inputs, outputs = self.create_context(indices)

        # Create cost and gradient function for gradient descent
        shapes = [self.W_shape, self.U_shape, self.H_shape, self.C_shape]
        flatten_nplm_cost_gradient = flatten_cost_gradient(nplm_cost_gradient, shapes)
        cost_gradient = bind_cost_gradient(flatten_nplm_cost_gradient, inputs, outputs,
                                           sampler=get_stochastic_sampler(10))

        # Train neural network
        parameters_size = np.sum(np.product(shape) for shape in shapes)
        initial_parameters = np.random.normal(size=parameters_size)
        self.parameters, cost_history = gradient_descent(cost_gradient, initial_parameters, iterations)
        return cost_history
项目:arlpy    作者:org-arl    | 项目源码 | 文件源码
def ser(x, y):
    """Measure symbol error rate between symbols in x and y.

    :param x: symbol array #1
    :param y: symbol array #2
    :returns: symbol error rate

    >>> import arlpy
    >>> arlpy.comms.ser([0,1,2,3], [0,1,2,2])
    0.25
    """
    x = _np.asarray(x, dtype=_np.int)
    y = _np.asarray(y, dtype=_np.int)
    n = _np.product(_np.shape(x))
    e = _np.count_nonzero(x^y)
    return float(e)/n
项目:arlpy    作者:org-arl    | 项目源码 | 文件源码
def ber(x, y, m=2):
    """Measure bit error rate between symbols in x and y.

    :param x: symbol array #1
    :param y: symbol array #2
    :param m: symbol alphabet size (maximum 64)
    :returns: bit error rate

    >>> import arlpy
    >>> arlpy.comms.ber([0,1,2,3], [0,1,2,2], m=4)
    0.125
    """
    x = _np.asarray(x, dtype=_np.int)
    y = _np.asarray(y, dtype=_np.int)
    if _np.any(x >= m) or _np.any(y >= m) or _np.any(x < 0) or _np.any(y < 0):
        raise ValueError('Invalid data for specified m')
    if m == 2:
        return ser(x, y)
    if m > _MAX_M:
        raise ValueError('m > %d not supported' % (_MAX_M))
    n = _np.product(_np.shape(x))*_np.log2(m)
    e = x^y
    e = e[_np.nonzero(e)]
    e = _np.sum(_popcount[e])
    return float(e)/n
项目:sporco    作者:bwohlberg    | 项目源码 | 文件源码
def __init__(self, xshape, dtype, opt=None):
        """
        Initialise an FISTADFT object with problem size and options.

        Parameters
        ----------
        xshape : tuple of ints
          Shape of working variable X (the primary variable)
        dtype : data-type
          Data type for working variables
        opt : :class:`FISTADFT.Options` object
          Algorithm options
        """

        if opt is None:
            opt = FISTADFT.Options()
        Nx = np.product(xshape)
        super(FISTADFT, self).__init__(Nx, xshape, dtype, opt)

        self.Xf = None
        self.Yf = None
项目:sporco    作者:bwohlberg    | 项目源码 | 文件源码
def __init__(self, xshape, dtype, opt=None):
        """
        Initialise an ADMMEqual object with problem size and options.

        Parameters
        ----------
        xshape : tuple of ints
          Shape of working variable X (the primary variable)
        dtype : data-type
          Data type for working variables
        opt : :class:`ADMMEqual.Options` object
          Algorithm options
        """

        if opt is None:
            opt = ADMMEqual.Options()
        Nx = np.product(xshape)
        super(ADMMEqual, self).__init__(Nx, xshape, xshape, dtype, opt)
项目:sporco    作者:bwohlberg    | 项目源码 | 文件源码
def mpraw_as_np(shape, dtype):
    """Construct a numpy array of the specified shape and dtype for which the
    underlying storage is a multiprocessing RawArray in shared memory.

    Parameters
    ----------
    shape : tuple
      Shape of numpy array
    dtype : data-type
      Data type of array

    Returns
    -------
    arr : ndarray
      Numpy array
    """

    sz = int(np.product(shape))
    csz = sz * np.dtype(dtype).itemsize
    raw = mp.RawArray('c', csz)
    return np.frombuffer(raw, dtype=dtype, count=sz).reshape(shape)
项目:tools    作者:kastnerkyle    | 项目源码 | 文件源码
def slinterp(X, factor, copy=True):
    """
    Slow-ish linear interpolation of a 1D numpy array. There must be some
    better function to do this in numpy.

    Parameters
    ----------
    X : ndarray
        1D input array to interpolate

    factor : int
        Integer factor to interpolate by

    Return
    ------
    X_r : ndarray
    """
    sz = np.product(X.shape)
    X = np.array(X, copy=copy)
    X_s = np.hstack((X[1:], [0]))
    X_r = np.zeros((factor, sz))
    for i in range(factor):
        X_r[i, :] = (factor - i) / float(factor) * X + (i / float(factor)) * X_s
    return X_r.T.ravel()[:(sz - 1) * factor + 1]
项目:tools    作者:kastnerkyle    | 项目源码 | 文件源码
def slinterp(X, factor, copy=True):
    """
    Slow-ish linear interpolation of a 1D numpy array. There must be some
    better function to do this in numpy.

    Parameters
    ----------
    X : ndarray
        1D input array to interpolate

    factor : int
        Integer factor to interpolate by

    Return
    ------
    X_r : ndarray
    """
    sz = np.product(X.shape)
    X = np.array(X, copy=copy)
    X_s = np.hstack((X[1:], [0]))
    X_r = np.zeros((factor, sz))
    for i in range(factor):
        X_r[i, :] = (factor - i) / float(factor) * X + (i / float(factor)) * X_s
    return X_r.T.ravel()[:(sz - 1) * factor + 1]
项目:instacart-basket-prediction    作者:colinmorris    | 项目源码 | 文件源码
def exact_expected_fscore_naive(probs, thresh):
  """NB: This algorithm is exponential in the size of probs!
  Based on initial measurements, less than 15 items is
  sub-second. 16 = 2s, 17=4s, 18=8s, and, well, you know
  the rest...
  possible relaxation to allow larger number of products:
  force items with sufficiently low probs (e.g. < 1%) off
  in groundtruths.
  """
  probs = np.asarray(probs)
  n = len(probs)
  expected = 0
  p_none = np.product(1-probs)
  predict_none = p_none > thresh
  predictions = (probs >= thresh).astype(np.int8)
  for gt in itertools.product([0,1], repeat=n):
    gt = np.array(gt)
    fs = fscore(predictions, gt, predict_none)
    p = gt_prob(gt, probs)
    expected += fs * p
  return expected
项目:vec4ir    作者:lgalke    | 项目源码 | 文件源码
def eqe1(E, query, vocabulary, priors):
    """
    Arguments:
        E - word embedding
        Q - list of query terms
        vocabulary -- list of relevant words
        priors - precomputed priors with same indices as vocabulary
    >>> E = dict()
    >>> E['a'] = np.asarray([0.5,0.5])
    >>> E['b'] = np.asarray([0.2,0.8])
    >>> E['c'] = np.asarray([0.9,0.1])
    >>> E['d'] = np.asarray([0.8,0.2])
    >>> q = "a b".split()
    >>> vocabulary = "a b c".split()
    >>> priors = np.asarray([0.25,0.5,0.25])
    >>> posterior = eqe1(E, q, vocabulary, priors)
    >>> vocabulary[np.argmax(posterior)]
    'c'
    """
    posterior = [priors[i] *
                 np.product([delta(E[qi], E[w]) / priors[i] for qi in query])
                 for i, w in enumerate(vocabulary)]

    return np.asarray(posterior)
项目:luna16    作者:gzuidhof    | 项目源码 | 文件源码
def weight_by_class_balance(truth, classes=None):
    """
    Determines a loss weight map given the truth by balancing the classes from the classes argument.
    The classes argument can be used to only include certain classes (you may for instance want to exclude the background).
    """

    if classes is None:
        # Include all classes
        classes = np.unique(truth)

    weight_map = np.zeros_like(truth, dtype=np.float32)
    total_amount = np.product(truth.shape)

    for c in classes:
        class_mask = np.where(truth==c,1,0)
        class_weight = 1/((np.sum(class_mask)+1e-8)/total_amount)

        weight_map += (class_mask*class_weight)#/total_amount

    return weight_map
项目:pyprocessmacro    作者:QuentinAndre    | 项目源码 | 文件源码
def eval_expression(expr, values=None):
    """
    Evaluate a symbolic expression and returns a numerical array.
    :param expr: A symbolic expression to evaluate, in the form of a N_terms * N_Vars matrix
    :param values: None, or a dictionary of variable:value pairs, to substitute in the symbolic expression.
    :return: An evaled expression, in the form of an N_terms array.
    """
    n_coeffs = expr.shape[0]
    evaled_expr = np.zeros(n_coeffs)
    for (i, term) in enumerate(expr):
        if values:
            evaled_term = np.array([values.get(elem, 0) if isinstance(elem, str) else elem for elem in term])
        else:
            evaled_term = np.array(
                [0 if isinstance(elem, str) else elem for elem in term])  # All variables at 0
        evaled_expr[i] = np.product(evaled_term.astype(float))  # Gradient is the product of values
    return evaled_expr
项目:krpcScripts    作者:jwvanderbeck    | 项目源码 | 文件源码
def test_addsumprod(self):
        # Tests add, sum, product.
        (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
        assert_equal(np.add.reduce(x), add.reduce(x))
        assert_equal(np.add.accumulate(x), add.accumulate(x))
        assert_equal(4, sum(array(4), axis=0))
        assert_equal(4, sum(array(4), axis=0))
        assert_equal(np.sum(x, axis=0), sum(x, axis=0))
        assert_equal(np.sum(filled(xm, 0), axis=0), sum(xm, axis=0))
        assert_equal(np.sum(x, 0), sum(x, 0))
        assert_equal(np.product(x, axis=0), product(x, axis=0))
        assert_equal(np.product(x, 0), product(x, 0))
        assert_equal(np.product(filled(xm, 1), axis=0), product(xm, axis=0))
        s = (3, 4)
        x.shape = y.shape = xm.shape = ym.shape = s
        if len(s) > 1:
            assert_equal(np.concatenate((x, y), 1), concatenate((xm, ym), 1))
            assert_equal(np.add.reduce(x, 1), add.reduce(x, 1))
            assert_equal(np.sum(x, 1), sum(x, 1))
            assert_equal(np.product(x, 1), product(x, 1))
项目:krpcScripts    作者:jwvanderbeck    | 项目源码 | 文件源码
def test_testAddSumProd(self):
        # Test add, sum, product.
        (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
        self.assertTrue(eq(np.add.reduce(x), add.reduce(x)))
        self.assertTrue(eq(np.add.accumulate(x), add.accumulate(x)))
        self.assertTrue(eq(4, sum(array(4), axis=0)))
        self.assertTrue(eq(4, sum(array(4), axis=0)))
        self.assertTrue(eq(np.sum(x, axis=0), sum(x, axis=0)))
        self.assertTrue(eq(np.sum(filled(xm, 0), axis=0), sum(xm, axis=0)))
        self.assertTrue(eq(np.sum(x, 0), sum(x, 0)))
        self.assertTrue(eq(np.product(x, axis=0), product(x, axis=0)))
        self.assertTrue(eq(np.product(x, 0), product(x, 0)))
        self.assertTrue(eq(np.product(filled(xm, 1), axis=0),
                           product(xm, axis=0)))
        if len(s) > 1:
            self.assertTrue(eq(np.concatenate((x, y), 1),
                               concatenate((xm, ym), 1)))
            self.assertTrue(eq(np.add.reduce(x, 1), add.reduce(x, 1)))
            self.assertTrue(eq(np.sum(x, 1), sum(x, 1)))
            self.assertTrue(eq(np.product(x, 1), product(x, 1)))
项目:yt    作者:yt-project    | 项目源码 | 文件源码
def _read_raw_field(self, grid, field):
        field_name = field[1]
        base_dir = self.ds.index.raw_file

        box_list = self.ds.index.raw_field_map[field_name][0]
        fn_list = self.ds.index.raw_field_map[field_name][1]
        offset_list = self.ds.index.raw_field_map[field_name][2]

        lev = grid.Level        
        filename = base_dir + "Level_%d/" % lev + fn_list[grid.id]
        offset = offset_list[grid.id]
        box = box_list[grid.id]

        lo = box[0]
        hi = box[1]
        shape = hi - lo + 1
        with open(filename, "rb") as f:
            f.seek(offset)
            f.readline()  # always skip the first line
            arr = np.fromfile(f, 'float64', np.product(shape))
            arr = arr.reshape(shape, order='F')
        return arr
项目:yt    作者:yt-project    | 项目源码 | 文件源码
def __init__(self, ds, max_level=2):
        self.max_level = max_level
        self.cell_count = 0
        self.layers = []
        self.domain_dimensions = ds.domain_dimensions
        self.domain_left_edge = ds.domain_left_edge
        self.domain_right_edge = ds.domain_right_edge
        self.grid_filename = "amr_grid.inp"
        self.ds = ds

        base_layer = RadMC3DLayer(0, None, 0,
                                  self.domain_left_edge,
                                  self.domain_right_edge,
                                  self.domain_dimensions)

        self.layers.append(base_layer)
        self.cell_count += np.product(ds.domain_dimensions)

        sorted_grids = sorted(ds.index.grids, key=lambda x: x.Level)
        for grid in sorted_grids:
            if grid.Level <= self.max_level:
                self._add_grid_to_layers(grid)
项目:data_tools    作者:veugene    | 项目源码 | 文件源码
def __init__(self, patchsize, source, binary_mask=None,
                 random_order=False, mirrored=True, max_num=None):
        self.patchsize = patchsize
        self.source = source.astype(np.float32)
        self.mask = binary_mask
        self.random_order = random_order
        self.mirrored = mirrored
        self.max_num = max_num

        if len(self.source.shape)==2:
            self.source = self.source[:,:,np.newaxis]
        if self.mask is not None and len(self.mask.shape)==2:
            self.mask = self.mask[:,:,np.newaxis]

        if self.mask is not None:
            self.num_patches = (self.mask>0).sum()
        else:
            self.num_patches = np.product(self.source.shape)
项目:drmad    作者:bigaidream-projects    | 项目源码 | 文件源码
def apply(self, data, copy=False):
        if copy:
            data = np.copy(data)
        data_shape = data.shape
        if len(data.shape) > 2:
            data = data.reshape(data.shape[0], np.product(data.shape[1:]))
        assert len(data.shape) == 2, 'Contrast norm on flattened data'
#        assert np.min(data) >= 0.
#        assert np.max(data) <= 1.
        data -= data.mean(axis=1)[:, np.newaxis]
        norms = np.sqrt(np.sum(data ** 2, axis=1)) / self.scale
        norms[norms < self.epsilon] = self.epsilon
        data /= norms[:, np.newaxis]
        if data_shape != data.shape:
            data = data.reshape(data_shape)
        return data
项目:data-science-bowl-2017    作者:tondonia    | 项目源码 | 文件源码
def weight_by_class_balance(truth, classes=None):
    """
    Determines a loss weight map given the truth by balancing the classes from the classes argument.
    The classes argument can be used to only include certain classes (you may for instance want to exclude the background).
    """

    if classes is None:
        # Include all classes
        classes = np.unique(truth)

    weight_map = np.zeros_like(truth, dtype=np.float32)
    total_amount = np.product(truth.shape)

    min_weight = sys.maxint
    for c in classes:
        class_mask = np.where(truth==c,1,0)
        class_weight = 1/((np.sum(class_mask)+1e-8)/total_amount)
        if class_weight < min_weight:
            min_weight = class_weight
        weight_map += (class_mask*class_weight)#/total_amount
    weight_map /= min_weight
    return weight_map
项目:mriqc    作者:poldracklab    | 项目源码 | 文件源码
def __iter__(self):
        """Iterate over the points in the grid.
        Returns
        -------
        params : iterator over dict of string to any
            Yields dictionaries mapping each estimator parameter to one of its
            allowed values.
        """
        for p in self.param_grid:
            # Always sort the keys of a dictionary, for reproducibility
            items = list(p.items())
            if not items:
                yield {}
            else:
                for estimator, grid_list in items:
                    for grid in grid_list:
                        grid_points = sorted(list(grid.items()))
                        keys, values = zip(*grid_points)
                        for v in product(*values):
                            params = dict(zip(keys, v))
                            yield (estimator, params)
项目:coremltools    作者:apple    | 项目源码 | 文件源码
def test_activation_layer_params(self):
        options = dict(
            activation = ['tanh', 'relu', 'sigmoid', 'softmax', 'softplus', 'softsign', 'hard_sigmoid', 'elu']
        )

        # Define a function that tests a model
        num_channels = 10
        input_dim = 10
        def build_model(x):
            model = Sequential()
            model.add(Dense(num_channels, input_dim = input_dim))
            model.add(Activation(**dict(zip(options.keys(), x))))
            return x, model

        # Iterate through all combinations
        product = itertools.product(*options.values())
        args = [build_model(p) for p in product]

        # Test the cases
        print("Testing a total of %s cases. This could take a while" % len(args))
        for param, model in args:
            model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
            self._run_test(model, param)
项目:coremltools    作者:apple    | 项目源码 | 文件源码
def test_dense_layer_params(self):
        options = dict(
            activation = ['relu', 'softmax', 'tanh', 'sigmoid', 'softplus', 'softsign', 'elu','hard_sigmoid'],
            use_bias = [True, False],
        )
        # Define a function that tests a model
        input_shape = (10,)
        num_channels = 10
        def build_model(x):
            kwargs = dict(zip(options.keys(), x))
            model = Sequential()
            model.add(Dense(num_channels, input_shape = input_shape, **kwargs))
            return x, model

        # Iterate through all combinations
        product = itertools.product(*options.values())
        args = [build_model(p) for p in product]

        # Test the cases
        print("Testing a total of %s cases. This could take a while" % len(args))
        for param, model in args:
            self._run_test(model, param)
项目:coremltools    作者:apple    | 项目源码 | 文件源码
def test_conv_layer_params(self, model_precision=_MLMODEL_FULL_PRECISION):
        options = dict(
            activation = ['relu', 'tanh', 'sigmoid'], # keras does not support softmax on 4-D
            use_bias = [True, False],
            padding = ['same', 'valid'],
            filters = [1, 3, 5],
            kernel_size = [[5,5]], # fails when sizes are different
        )

        # Define a function that tests a model
        input_shape = (10, 10, 1)
        def build_model(x):
            kwargs = dict(zip(options.keys(), x))
            model = Sequential()
            model.add(Conv2D(input_shape = input_shape, **kwargs))
            return x, model

        # Iterate through all combinations
        product = itertools.product(*options.values())
        args = [build_model(p) for p in product]

        # Test the cases
        print("Testing a total of %s cases. This could take a while" % len(args))
        for param, model in args:
            self._run_test(model, param, model_precision=model_precision)
项目:coremltools    作者:apple    | 项目源码 | 文件源码
def test_activation_layer_params(self):
        options = dict(
            activation = ['tanh', 'relu', 'sigmoid', 'softmax', 'softplus', 'softsign']
        )

        # Define a function that tests a model
        num_channels = 10
        input_dim = 10
        def build_model(x):
            model = Sequential()
            model.add(Dense(num_channels, input_dim = input_dim))
            model.add(Activation(**dict(zip(options.keys(), x))))
            return x, model

        # Iterate through all combinations
        product = itertools.product(*options.values())
        args = [build_model(p) for p in product]

        # Test the cases
        print("Testing a total of %s cases. This could take a while" % len(args))
        for param, model in args:
            model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
            self._run_test(model, param)
项目:coremltools    作者:apple    | 项目源码 | 文件源码
def test_dense_layer_params(self):
        options = dict(
            activation = ['relu', 'softmax', 'tanh', 'sigmoid'],
            bias = [True, False],
        )

        # Define a function that tests a model
        input_dim = 10
        num_channels = 10
        def build_model(x):
            kwargs = dict(zip(options.keys(), x))
            model = Sequential()
            model.add(Dense(num_channels, input_dim = input_dim, **kwargs))
            return x, model

        # Iterate through all combinations
        product = itertools.product(*options.values())
        args = [build_model(p) for p in product]

        # Test the cases
        print("Testing a total of %s cases. This could take a while" % len(args))
        for param, model in args:
            self._run_test(model, param)
项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda    作者:SignalMedia    | 项目源码 | 文件源码
def cartesian_product(X):
    '''
    Numpy version of itertools.product or pandas.compat.product.
    Sometimes faster (for large inputs)...

    Examples
    --------
    >>> cartesian_product([list('ABC'), [1, 2]])
    [array(['A', 'A', 'B', 'B', 'C', 'C'], dtype='|S1'),
    array([1, 2, 1, 2, 1, 2])]

    '''

    lenX = np.fromiter((len(x) for x in X), dtype=int)
    cumprodX = np.cumproduct(lenX)

    a = np.roll(cumprodX, 1)
    a[0] = 1

    b = cumprodX[-1] / cumprodX

    return [np.tile(np.repeat(np.asarray(com._values_from_object(x)), b[i]),
                    np.product(a[i]))
            for i, x in enumerate(X)]
项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda    作者:SignalMedia    | 项目源码 | 文件源码
def test_addsumprod(self):
        # Tests add, sum, product.
        (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
        assert_equal(np.add.reduce(x), add.reduce(x))
        assert_equal(np.add.accumulate(x), add.accumulate(x))
        assert_equal(4, sum(array(4), axis=0))
        assert_equal(4, sum(array(4), axis=0))
        assert_equal(np.sum(x, axis=0), sum(x, axis=0))
        assert_equal(np.sum(filled(xm, 0), axis=0), sum(xm, axis=0))
        assert_equal(np.sum(x, 0), sum(x, 0))
        assert_equal(np.product(x, axis=0), product(x, axis=0))
        assert_equal(np.product(x, 0), product(x, 0))
        assert_equal(np.product(filled(xm, 1), axis=0), product(xm, axis=0))
        s = (3, 4)
        x.shape = y.shape = xm.shape = ym.shape = s
        if len(s) > 1:
            assert_equal(np.concatenate((x, y), 1), concatenate((xm, ym), 1))
            assert_equal(np.add.reduce(x, 1), add.reduce(x, 1))
            assert_equal(np.sum(x, 1), sum(x, 1))
            assert_equal(np.product(x, 1), product(x, 1))
项目:AIFun    作者:Plottel    | 项目源码 | 文件源码
def get_surface(self, dest_surf = None):
        camera = self.camera

        im = highgui.cvQueryFrame(camera)
        #convert Ipl image to PIL image
        #print type(im)
        if im:
            xx = opencv.adaptors.Ipl2NumPy(im)
            #print type(xx)
            #print xx.iscontiguous()
            #print dir(xx)
            #print xx.shape
            xxx = numpy.reshape(xx, (numpy.product(xx.shape),))

            if xx.shape[2] != 3:
                raise ValueError("not sure what to do about this size")

            pg_img = pygame.image.frombuffer(xxx, (xx.shape[1],xx.shape[0]), "RGB")

            # if there is a destination surface given, we blit onto that.
            if dest_surf:
                dest_surf.blit(pg_img, (0,0))
            return dest_surf
            #return pg_img
项目:aws-lambda-numpy    作者:vitolimandibhrata    | 项目源码 | 文件源码
def test_addsumprod(self):
        # Tests add, sum, product.
        (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
        assert_equal(np.add.reduce(x), add.reduce(x))
        assert_equal(np.add.accumulate(x), add.accumulate(x))
        assert_equal(4, sum(array(4), axis=0))
        assert_equal(4, sum(array(4), axis=0))
        assert_equal(np.sum(x, axis=0), sum(x, axis=0))
        assert_equal(np.sum(filled(xm, 0), axis=0), sum(xm, axis=0))
        assert_equal(np.sum(x, 0), sum(x, 0))
        assert_equal(np.product(x, axis=0), product(x, axis=0))
        assert_equal(np.product(x, 0), product(x, 0))
        assert_equal(np.product(filled(xm, 1), axis=0), product(xm, axis=0))
        s = (3, 4)
        x.shape = y.shape = xm.shape = ym.shape = s
        if len(s) > 1:
            assert_equal(np.concatenate((x, y), 1), concatenate((xm, ym), 1))
            assert_equal(np.add.reduce(x, 1), add.reduce(x, 1))
            assert_equal(np.sum(x, 1), sum(x, 1))
            assert_equal(np.product(x, 1), product(x, 1))
项目:aws-lambda-numpy    作者:vitolimandibhrata    | 项目源码 | 文件源码
def test_testAddSumProd(self):
        # Test add, sum, product.
        (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
        self.assertTrue(eq(np.add.reduce(x), add.reduce(x)))
        self.assertTrue(eq(np.add.accumulate(x), add.accumulate(x)))
        self.assertTrue(eq(4, sum(array(4), axis=0)))
        self.assertTrue(eq(4, sum(array(4), axis=0)))
        self.assertTrue(eq(np.sum(x, axis=0), sum(x, axis=0)))
        self.assertTrue(eq(np.sum(filled(xm, 0), axis=0), sum(xm, axis=0)))
        self.assertTrue(eq(np.sum(x, 0), sum(x, 0)))
        self.assertTrue(eq(np.product(x, axis=0), product(x, axis=0)))
        self.assertTrue(eq(np.product(x, 0), product(x, 0)))
        self.assertTrue(eq(np.product(filled(xm, 1), axis=0),
                           product(xm, axis=0)))
        if len(s) > 1:
            self.assertTrue(eq(np.concatenate((x, y), 1),
                               concatenate((xm, ym), 1)))
            self.assertTrue(eq(np.add.reduce(x, 1), add.reduce(x, 1)))
            self.assertTrue(eq(np.sum(x, 1), sum(x, 1)))
            self.assertTrue(eq(np.product(x, 1), product(x, 1)))
项目:pyDataView    作者:edwardsmith999    | 项目源码 | 文件源码
def __init__(self, fdir, fname, nperbin):

        if (fdir[-1] != '/'): fdir += '/'
        self.fdir = fdir
        self.procxyz = self.get_proc_topology()
        self.procs = int(np.product(self.procxyz))
        print("OpenFOAM_RawData Warning - disable parallel check, assuming always parallel")
        self.parallel_run = True
        #if self.procs != 1:
        #    self.parallel_run = True
        #else:
        #    self.parallel_run = False
        self.grid = self.get_grid()
        self.reclist = self.get_reclist()
        self.maxrec = len(self.reclist) - 1 # count from 0
        self.fname = fname
        self.npercell = nperbin #self.get_npercell()
        self.nu = self.get_nu()
        self.header = None
项目:keras-steering-angle-visualizations    作者:jacobgil    | 项目源码 | 文件源码
def visualize_hypercolumns(model, original_img):

    img = np.float32(cv2.resize(original_img, (200, 66))) / 255.0

    layers_extract = [9]

    hc = extract_hypercolumns(model, layers_extract, img)
    avg = np.product(hc, axis=0)
    avg = np.abs(avg)
    avg = avg / np.max(np.max(avg))

    heatmap = cv2.applyColorMap(np.uint8(255 * avg), cv2.COLORMAP_JET)
    heatmap = np.float32(heatmap) / np.max(np.max(heatmap))
    heatmap = cv2.resize(heatmap, original_img.shape[0:2][::-1])

    both = 255 * heatmap * 0.7 + original_img
    both = both / np.max(both)
    return both
项目:kaggle_dsb    作者:syagev    | 项目源码 | 文件源码
def weight_by_class_balance(truth, classes=None):
    """
    Determines a loss weight map given the truth by balancing the classes from the classes argument.
    The classes argument can be used to only include certain classes (you may for instance want to exclude the background).
    """

    if classes is None:
        # Include all classes
        classes = np.unique(truth)

    weight_map = np.zeros_like(truth, dtype=np.float32)
    total_amount = np.product(truth.shape)

    for c in classes:
        class_mask = np.where(truth==c,1,0)
        class_weight = 1/((np.sum(class_mask)+1e-8)/total_amount)

        weight_map += (class_mask*class_weight)#/total_amount

    return weight_map
项目:yt_astro_analysis    作者:yt-project    | 项目源码 | 文件源码
def __init__(self, ds, max_level=2):
        self.max_level = max_level
        self.cell_count = 0
        self.layers = []
        self.domain_dimensions = ds.domain_dimensions
        self.domain_left_edge = ds.domain_left_edge
        self.domain_right_edge = ds.domain_right_edge
        self.grid_filename = "amr_grid.inp"
        self.ds = ds

        base_layer = RadMC3DLayer(0, None, 0,
                                  self.domain_left_edge,
                                  self.domain_right_edge,
                                  self.domain_dimensions)

        self.layers.append(base_layer)
        self.cell_count += np.product(ds.domain_dimensions)

        sorted_grids = sorted(ds.index.grids, key=lambda x: x.Level)
        for grid in sorted_grids:
            if grid.Level <= self.max_level:
                self._add_grid_to_layers(grid)
项目:lambda-numba    作者:rlhotovy    | 项目源码 | 文件源码
def test_addsumprod(self):
        # Tests add, sum, product.
        (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
        assert_equal(np.add.reduce(x), add.reduce(x))
        assert_equal(np.add.accumulate(x), add.accumulate(x))
        assert_equal(4, sum(array(4), axis=0))
        assert_equal(4, sum(array(4), axis=0))
        assert_equal(np.sum(x, axis=0), sum(x, axis=0))
        assert_equal(np.sum(filled(xm, 0), axis=0), sum(xm, axis=0))
        assert_equal(np.sum(x, 0), sum(x, 0))
        assert_equal(np.product(x, axis=0), product(x, axis=0))
        assert_equal(np.product(x, 0), product(x, 0))
        assert_equal(np.product(filled(xm, 1), axis=0), product(xm, axis=0))
        s = (3, 4)
        x.shape = y.shape = xm.shape = ym.shape = s
        if len(s) > 1:
            assert_equal(np.concatenate((x, y), 1), concatenate((xm, ym), 1))
            assert_equal(np.add.reduce(x, 1), add.reduce(x, 1))
            assert_equal(np.sum(x, 1), sum(x, 1))
            assert_equal(np.product(x, 1), product(x, 1))
项目:lambda-numba    作者:rlhotovy    | 项目源码 | 文件源码
def test_testAddSumProd(self):
        # Test add, sum, product.
        (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
        self.assertTrue(eq(np.add.reduce(x), add.reduce(x)))
        self.assertTrue(eq(np.add.accumulate(x), add.accumulate(x)))
        self.assertTrue(eq(4, sum(array(4), axis=0)))
        self.assertTrue(eq(4, sum(array(4), axis=0)))
        self.assertTrue(eq(np.sum(x, axis=0), sum(x, axis=0)))
        self.assertTrue(eq(np.sum(filled(xm, 0), axis=0), sum(xm, axis=0)))
        self.assertTrue(eq(np.sum(x, 0), sum(x, 0)))
        self.assertTrue(eq(np.product(x, axis=0), product(x, axis=0)))
        self.assertTrue(eq(np.product(x, 0), product(x, 0)))
        self.assertTrue(eq(np.product(filled(xm, 1), axis=0),
                           product(xm, axis=0)))
        if len(s) > 1:
            self.assertTrue(eq(np.concatenate((x, y), 1),
                               concatenate((xm, ym), 1)))
            self.assertTrue(eq(np.add.reduce(x, 1), add.reduce(x, 1)))
            self.assertTrue(eq(np.sum(x, 1), sum(x, 1)))
            self.assertTrue(eq(np.product(x, 1), product(x, 1)))
项目:opminreplicability    作者:epochx    | 项目源码 | 文件源码
def __iter__(self):
        """Iterate over the points in the grid.

        Returns
        -------
        params : iterator over dict of string to any
            Yields dictionaries mapping each estimator parameter to one of its
            allowed values.
        """
        for p in self.param_grid:
            # Always sort the keys of a dictionary, for reproducibility
            items = sorted(p.items())
            if not items:
                yield {}
            else:
                keys, values = zip(*items)
                for v in product(*values):
                    params = dict(zip(keys, v))
                    yield params
项目:dynesty    作者:joshspeagle    | 项目源码 | 文件源码
def make_eigvals_positive(am, targetprod):
    """For the symmetric square matrix `am`, increase any zero eigenvalues
    such that the total product of eigenvalues is greater or equal to
    `targetprod`. Returns a (possibly) new, non-singular matrix."""

    w, v = linalg.eigh(am)  # use eigh since a is symmetric
    mask = w < 1.e-10
    if np.any(mask):
        nzprod = np.product(w[~mask])  # product of nonzero eigenvalues
        nzeros = mask.sum()  # number of zero eigenvalues
        new_val = max(1.e-10, (targetprod / nzprod) ** (1. / nzeros))
        w[mask] = new_val  # adjust zero eigvals
        am_new = np.dot(np.dot(v, np.diag(w)), linalg.inv(v))  # re-form cov
    else:
        am_new = am

    return am_new
项目:deliver    作者:orchestor    | 项目源码 | 文件源码
def test_addsumprod(self):
        # Tests add, sum, product.
        (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
        assert_equal(np.add.reduce(x), add.reduce(x))
        assert_equal(np.add.accumulate(x), add.accumulate(x))
        assert_equal(4, sum(array(4), axis=0))
        assert_equal(4, sum(array(4), axis=0))
        assert_equal(np.sum(x, axis=0), sum(x, axis=0))
        assert_equal(np.sum(filled(xm, 0), axis=0), sum(xm, axis=0))
        assert_equal(np.sum(x, 0), sum(x, 0))
        assert_equal(np.product(x, axis=0), product(x, axis=0))
        assert_equal(np.product(x, 0), product(x, 0))
        assert_equal(np.product(filled(xm, 1), axis=0), product(xm, axis=0))
        s = (3, 4)
        x.shape = y.shape = xm.shape = ym.shape = s
        if len(s) > 1:
            assert_equal(np.concatenate((x, y), 1), concatenate((xm, ym), 1))
            assert_equal(np.add.reduce(x, 1), add.reduce(x, 1))
            assert_equal(np.sum(x, 1), sum(x, 1))
            assert_equal(np.product(x, 1), product(x, 1))