Python numpy 模块,isscalar() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.isscalar()

项目:pycma    作者:CMA-ES    | 项目源码 | 文件源码
def prctile(data, p_vals=[0, 25, 50, 75, 100], sorted_=False):
        """``prctile(data, 50)`` returns the median, but p_vals can
        also be a sequence.

        Provides for small samples or extremes IMHO better values than
        matplotlib.mlab.prctile or np.percentile, however also slower.

        """
        ps = [p_vals] if np.isscalar(p_vals) else p_vals

        if not sorted_:
            data = sorted(data)
        n = len(data)
        d = []
        for p in ps:
            fi = p * n / 100 - 0.5
            if fi <= 0:  # maybe extrapolate?
                d.append(data[0])
            elif fi >= n - 1:
                d.append(data[-1])
            else:
                i = int(fi)
                d.append((i + 1 - fi) * data[i] + (fi - i) * data[i + 1])
        return d[0] if np.isscalar(p_vals) else d
项目:tsbitmaps    作者:binhmop    | 项目源码 | 文件源码
def discretize(self, ts, bins=None, global_min=None, global_max=None):
        if bins is None:
            bins = self._bins

        if np.isscalar(bins):
            num_bins = bins

            min_value = ts.min()
            max_value = ts.max()
            if min_value == max_value:
                min_value = global_min
                max_value = global_max
            step = (max_value - min_value) / num_bins
            ts_bins = np.arange(min_value, max_value, step)
        else:
            ts_bins = bins

        inds = np.digitize(ts, ts_bins)
        binned_ts = tuple(str(i - 1) for i in inds)
        return binned_ts
项目:radar    作者:amoose136    | 项目源码 | 文件源码
def test_basic(self):
        dt_numeric = np.typecodes['AllFloat'] + np.typecodes['AllInteger']
        dt_complex = np.typecodes['Complex']

        # test real
        a = np.eye(3)
        for dt in dt_numeric + 'O':
            b = a.astype(dt)
            res = np.vdot(b, b)
            assert_(np.isscalar(res))
            assert_equal(np.vdot(b, b), 3)

        # test complex
        a = np.eye(3) * 1j
        for dt in dt_complex + 'O':
            b = a.astype(dt)
            res = np.vdot(b, b)
            assert_(np.isscalar(res))
            assert_equal(np.vdot(b, b), 3)

        # test boolean
        b = np.eye(3, dtype=np.bool)
        res = np.vdot(b, b)
        assert_(np.isscalar(res))
        assert_equal(np.vdot(b, b), True)
项目:third_person_im    作者:bstadie    | 项目源码 | 文件源码
def prctile(data, p_vals=[0, 25, 50, 75, 100], sorted_=False):
            """``prctile(data, 50)`` returns the median, but p_vals can
            also be a sequence.

            Provides for small samples better values than matplotlib.mlab.prctile,
            however also slower.

            """
            ps = [p_vals] if isscalar(p_vals) else p_vals

            if not sorted_:
                data = sorted(data)
            n = len(data)
            d = []
            for p in ps:
                fi = p * n / 100 - 0.5
                if fi <= 0:  # maybe extrapolate?
                    d.append(data[0])
                elif fi >= n - 1:
                    d.append(data[-1])
                else:
                    i = int(fi)
                    d.append((i + 1 - fi) * data[i] + (fi - i) * data[i + 1])
            return d[0] if isscalar(p_vals) else d
项目:third_person_im    作者:bstadie    | 项目源码 | 文件源码
def elli(self, x, rot=0, xoffset=0, cond=1e6, actuator_noise=0.0, both=False):
        """Ellipsoid test objective function"""
        if not isscalar(x[0]):  # parallel evaluation
            return [self.elli(xi, rot) for xi in x]  # could save 20% overall
        if rot:
            x = rotate(x)
        N = len(x)
        if actuator_noise:
            x = x + actuator_noise * np.random.randn(N)

        ftrue = sum(cond**(np.arange(N) / (N - 1.)) * (x + xoffset)**2)

        alpha = 0.49 + 1. / N
        beta = 1
        felli = np.random.rand(1)[0]**beta * ftrue * \
                max(1, (10.**9 / (ftrue + 1e-99))**(alpha * np.random.rand(1)[0]))
        # felli = ftrue + 1*np.random.randn(1)[0] / (1e-30 +
        #                                           np.abs(np.random.randn(1)[0]))**0
        if both:
            return (felli, ftrue)
        else:
            # return felli  # possibly noisy value
            return ftrue  # + np.random.randn()
项目:chainerrl    作者:chainer    | 项目源码 | 文件源码
def compute_policy_gradient_full_correction(
        action_distrib, action_distrib_mu, action_value, v,
        truncation_threshold):
    """Compute off-policy bias correction term wrt all actions."""
    assert truncation_threshold is not None
    assert np.isscalar(v)
    with chainer.no_backprop_mode():
        rho_all_inv = compute_full_importance(action_distrib_mu,
                                              action_distrib)
        correction_weight = (
            np.maximum(1 - truncation_threshold * rho_all_inv,
                       np.zeros_like(rho_all_inv)) *
            action_distrib.all_prob.data[0])
        correction_advantage = action_value.q_values.data[0] - v
    return -F.sum(correction_weight *
                  action_distrib.all_log_prob *
                  correction_advantage, axis=1)
项目:chainerrl    作者:chainer    | 项目源码 | 文件源码
def compute_policy_gradient_sample_correction(
        action_distrib, action_distrib_mu, action_value, v,
        truncation_threshold):
    """Compute off-policy bias correction term wrt a sampled action."""
    assert np.isscalar(v)
    assert truncation_threshold is not None
    with chainer.no_backprop_mode():
        sample_action = action_distrib.sample().data
        rho_dash_inv = compute_importance(
            action_distrib_mu, action_distrib, sample_action)
        if (truncation_threshold > 0 and
                rho_dash_inv >= 1 / truncation_threshold):
            return chainer.Variable(np.asarray([0], dtype=np.float32))
        correction_weight = max(0, 1 - truncation_threshold * rho_dash_inv)
        assert correction_weight <= 1
        q = float(action_value.evaluate_actions(sample_action).data[0])
        correction_advantage = q - v
    return -(correction_weight *
             action_distrib.log_prob(sample_action) *
             correction_advantage)
项目:brainiak    作者:brainiak    | 项目源码 | 文件源码
def __call__(self, x):
        """Return the GMM likelihood for given point(s).

        See :eq:`gmm-likelihood`.

        Arguments
        ---------
        x : scalar (or) 1D array of reals
            Point(s) at which likelihood needs to be computed

        Returns
        -------
        scalar (or) 1D array
            Likelihood values at the given point(s)
        """

        if np.isscalar(x):
            return self.get_gmm_pdf(x)
        else:
            return np.array([self.get_gmm_pdf(t) for t in x])
项目:dtnn    作者:atomistic-machine-learning    | 项目源码 | 文件源码
def convert_atoms(self, row):
        numbers = row.get('numbers')
        positions = row.get('positions').astype(self.floatX)
        pbc = row.get('pbc')
        cell = row.get('cell').astype(self.floatX)
        features = [numbers, positions, cell, pbc]

        for k in list(self.kvp.keys()):
            f = row[k]
            if np.isscalar(f):
                f = np.array([f])
            if f.dtype in [np.float16, np.float32, np.float64]:
                f = f.astype(self.floatX)
            features.append(f)
        for k in list(self.data.keys()):
            f = np.array(row.data[k])
            if np.isscalar(f):
                f = np.array([f])
            if f.dtype in [np.float16, np.float32, np.float64]:
                f = f.astype(self.floatX)
            features.append(f)
        return features
项目:vampyre    作者:GAMPTeam    | 项目源码 | 文件源码
def __init__(self, A, shape0):
        LinTrans.__init__(self)
        self.A = A
        if np.isscalar(shape0):
            shape0 = (shape0,)
        self.shape0 = shape0

        # Compute the output shape
        # Note that A.dot(x) operates on the second to last axis of x
        Ashape = A.shape
        shape1 = np.array(shape0)
        if len(shape0) == 1:
            self.aaxis = 0
        else:
            self.aaxis = len(shape0)-2
        shape1[self.aaxis] = Ashape[0]
        self.shape1 = tuple(shape1)

        # Set SVD terms to not computed
        self.svd_computed = False
        self.svd_avail = True
项目:vampyre    作者:GAMPTeam    | 项目源码 | 文件源码
def __init__(self, zval, pz, shape, var_axes=(0,),\
                 is_complex=False):
        Estim.__init__(self)

        # Convert scalars to arrays
        if np.isscalar(zval):
            zval = np.array([zval])
        if np.isscalar(pz):
            pz = np.array([pz])

        # Set parameters
        self.zval = zval
        self.pz = pz
        self.shape = shape
        self.is_complex = is_complex
        self.fz = -np.log(pz)

        # Set the variance axes
        if var_axes == 'all':
            ndim = len(shape)
            var_axes = tuple(range(ndim))
        self.var_axes = var_axes        
        self.cost_avail = True
项目:aboleth    作者:data61    | 项目源码 | 文件源码
def test_dense_embeddings(make_categories, reps, layer):
    """Test the embedding layer."""
    x, K = make_categories
    x = np.repeat(x, reps, axis=-1)
    N = len(x)
    S = 3
    x_, X_ = _make_placeholders(x, S, tf.int32)
    output, reg = layer(output_dim=D, n_categories=K)(X_)

    tc = tf.test.TestCase()
    with tc.test_session():
        tf.global_variables_initializer().run()
        r = reg.eval()

        assert np.isscalar(r)
        assert r >= 0

        Phi = output.eval(feed_dict={x_: x})

        assert Phi.shape == (S, N, D * reps)
项目:aboleth    作者:data61    | 项目源码 | 文件源码
def test_dense_outputs(dense, make_data):
    """Make sure the dense layers output expected dimensions."""
    x, _, _ = make_data
    S = 3

    x_, X_ = _make_placeholders(x, S)
    N = x.shape[0]

    Phi, KL = dense(output_dim=D)(X_)

    tc = tf.test.TestCase()
    with tc.test_session():
        tf.global_variables_initializer().run()
        P = Phi.eval(feed_dict={x_: x})
        assert P.shape == (S, N, D)
        assert P.dtype == np.float32
        assert np.isscalar(KL.eval(feed_dict={x_: x}))
项目:aboleth    作者:data61    | 项目源码 | 文件源码
def test_kl_gaussian_normal(random):
    """Test Gaussian/Normal KL."""
    dim = (5, 10)
    Dim = (5, 10, 10)

    mu0 = random.randn(*dim).astype(np.float32)
    L0 = random_chol(Dim)
    q = MultivariateNormalTriL(mu0, L0)

    mu1 = random.randn(*dim).astype(np.float32)
    std1 = 1.0
    L1 = [(std1 * np.eye(dim[1])).astype(np.float32) for _ in range(dim[0])]
    p = tf.distributions.Normal(mu1, std1)

    KL = kl_sum(q, p)
    KLr = KLdiv(mu0, L0, mu1, L1)

    tc = tf.test.TestCase()
    with tc.test_session():
        kl = KL.eval()
        assert np.isscalar(kl)
        assert np.allclose(kl, KLr)
项目:pytoshop    作者:mdboom    | 项目源码 | 文件源码
def _determine_channels_and_depth(layers, depth, color_mode):
    # type: (List[Layer], Optional[int], int) -> Tuple[int, int]

    num_channels = 0
    for image in _iterate_all_images(layers):
        if (image.color_mode is not None and
                image.color_mode != color_mode):
            raise ValueError("Mismatched color mode")
        for index, channel in image.channels.items():
            if np.isscalar(channel):
                continue
            num_channels = max(num_channels, index + 1)
            channel_depth = channel.dtype.itemsize * 8
            if depth is None:
                depth = channel_depth
            elif depth != channel_depth:
                raise ValueError("Different image depths in input")

    if num_channels == 0 or depth is None:
        raise ValueError("Can't determine num channels or depth")

    return num_channels, depth
项目:rllabplusplus    作者:shaneshixiang    | 项目源码 | 文件源码
def prctile(data, p_vals=[0, 25, 50, 75, 100], sorted_=False):
            """``prctile(data, 50)`` returns the median, but p_vals can
            also be a sequence.

            Provides for small samples better values than matplotlib.mlab.prctile,
            however also slower.

            """
            ps = [p_vals] if isscalar(p_vals) else p_vals

            if not sorted_:
                data = sorted(data)
            n = len(data)
            d = []
            for p in ps:
                fi = p * n / 100 - 0.5
                if fi <= 0:  # maybe extrapolate?
                    d.append(data[0])
                elif fi >= n - 1:
                    d.append(data[-1])
                else:
                    i = int(fi)
                    d.append((i + 1 - fi) * data[i] + (fi - i) * data[i + 1])
            return d[0] if isscalar(p_vals) else d
项目:rllabplusplus    作者:shaneshixiang    | 项目源码 | 文件源码
def elli(self, x, rot=0, xoffset=0, cond=1e6, actuator_noise=0.0, both=False):
        """Ellipsoid test objective function"""
        if not isscalar(x[0]):  # parallel evaluation
            return [self.elli(xi, rot) for xi in x]  # could save 20% overall
        if rot:
            x = rotate(x)
        N = len(x)
        if actuator_noise:
            x = x + actuator_noise * np.random.randn(N)

        ftrue = sum(cond**(np.arange(N) / (N - 1.)) * (x + xoffset)**2)

        alpha = 0.49 + 1. / N
        beta = 1
        felli = np.random.rand(1)[0]**beta * ftrue * \
                max(1, (10.**9 / (ftrue + 1e-99))**(alpha * np.random.rand(1)[0]))
        # felli = ftrue + 1*np.random.randn(1)[0] / (1e-30 +
        #                                           np.abs(np.random.randn(1)[0]))**0
        if both:
            return (felli, ftrue)
        else:
            # return felli  # possibly noisy value
            return ftrue  # + np.random.randn()
项目:pytrip    作者:pytrip    | 项目源码 | 文件源码
def _f(let):
    """
    f function from Dasu paper, takes let-cube as parameter
    Equation (7) in https://doi.org/10.1093/jrr/rru020

    input parameters may be either numpy.array or scalars
    TODO: handle real cubes.

    :params let: LET in [keV/um]

    :returns: result of the f function
    """

    ld = 86.0
    result = (1 - np.exp(-let / ld) * (1 + let / ld)) * ld / let

    # map any zero LET areas to 0.0
    if np.isscalar(result):  # scalar
        if result == np.inf:
            result = 0.0
    else:
        result[result == np.inf] = 0.0  # numpy arrays

    return result
项目:3D-R2N2    作者:chrischoy    | 项目源码 | 文件源码
def sparse_to_dense(voxel_data, dims, dtype=np.bool):
    if voxel_data.ndim != 2 or voxel_data.shape[0] != 3:
        raise ValueError('voxel_data is wrong shape; should be 3xN array.')
    if np.isscalar(dims):
        dims = [dims] * 3
    dims = np.atleast_2d(dims).T
    # truncate to integers
    xyz = voxel_data.astype(np.int)
    # discard voxels that fall outside dims
    valid_ix = ~np.any((xyz < 0) | (xyz >= dims), 0)
    xyz = xyz[:, valid_ix]
    out = np.zeros(dims.flatten(), dtype=dtype)
    out[tuple(xyz)] = True
    return out

# def get_linear_index(x, y, z, dims):
# """ Assuming xzy order. (y increasing fastest.
# TODO ensure this is right when dims are not all same
# """
# return x*(dims[1]*dims[2]) + z*dims[1] + y
项目:Safe-RL-Benchmark    作者:befelix    | 项目源码 | 文件源码
def __init__(self, lower, upper, shape=None):
        """Initialize BoundedSpace.

        Parameters
        ----------
        lower : array-like
            Lower bound of the space. Either an array or an integer.
            Must agree with the input of the upper bound.
        upper : array-like
            Upper bound of the space. Either an array or an integer. Must
            agree with the input of the lower bound.
        shape : integer
            Shape of the bounds. Input will be ignored, if the bounds are non
            scalar, if they are scalar, it must be set.
        """
        if (np.isscalar(lower) and np.isscalar(upper)):
            assert shape is not None, "Shape must be set, if bounds are scalar"
            self.lower = np.zeros(shape) + lower
            self.upper = np.zeros(shape) + upper
        else:
            self.lower = np.array(lower)
            self.upper = np.array(upper)
            assert self.lower.shape == self.upper.shape, "Shapes do not agree."

        self._dim = None
项目:gym    作者:openai    | 项目源码 | 文件源码
def test_env(spec):
    env = spec.make()
    ob_space = env.observation_space
    act_space = env.action_space
    ob = env.reset()
    assert ob_space.contains(ob), 'Reset observation: {!r} not in space'.format(ob)
    a = act_space.sample()
    observation, reward, done, _info = env.step(a)
    assert ob_space.contains(observation), 'Step observation: {!r} not in space'.format(observation)
    assert np.isscalar(reward), "{} is not a scalar for {}".format(reward, env)
    assert isinstance(done, bool), "Expected {} to be a boolean".format(done)

    for mode in env.metadata.get('render.modes', []):
        env.render(mode=mode)
    env.render(close=True)

    # Make sure we can render the environment after close.
    for mode in env.metadata.get('render.modes', []):
        env.render(mode=mode)
    env.render(close=True)

    env.close()

# Run a longer rollout on some environments
项目:krpcScripts    作者:jwvanderbeck    | 项目源码 | 文件源码
def test_basic(self):
        dt_numeric = np.typecodes['AllFloat'] + np.typecodes['AllInteger']
        dt_complex = np.typecodes['Complex']

        # test real
        a = np.eye(3)
        for dt in dt_numeric + 'O':
            b = a.astype(dt)
            res = np.vdot(b, b)
            assert_(np.isscalar(res))
            assert_equal(np.vdot(b, b), 3)

        # test complex
        a = np.eye(3) * 1j
        for dt in dt_complex + 'O':
            b = a.astype(dt)
            res = np.vdot(b, b)
            assert_(np.isscalar(res))
            assert_equal(np.vdot(b, b), 3)

        # test boolean
        b = np.eye(3, dtype=np.bool)
        res = np.vdot(b, b)
        assert_(np.isscalar(res))
        assert_equal(np.vdot(b, b), True)
项目:ottertune    作者:cmu-db    | 项目源码 | 文件源码
def __init__(self, length_scale=1.0, magnitude=1.0, check_numerics=True,
                 debug=False):
        assert np.isscalar(length_scale)
        assert np.isscalar(magnitude)
        assert length_scale > 0 and magnitude > 0
        self.length_scale = length_scale
        self.magnitude = magnitude
        self.check_numerics = check_numerics
        self.debug = debug
        self.X_train = None
        self.y_train = None
        self.xy_ = None
        self.K = None
        self.graph = None
        self.vars = None
        self.ops = None
项目:3D-IWGAN    作者:EdwardSmith1884    | 项目源码 | 文件源码
def sparse_to_dense(voxel_data, dims, dtype=np.bool):
    if voxel_data.ndim!=2 or voxel_data.shape[0]!=3:
        raise ValueError('voxel_data is wrong shape; should be 3xN array.')
    if np.isscalar(dims):
        dims = [dims]*3
    dims = np.atleast_2d(dims).T
    # truncate to integers
    xyz = voxel_data.astype(np.int)
    # discard voxels that fall outside dims
    valid_ix = ~np.any((xyz < 0) | (xyz >= dims), 0)
    xyz = xyz[:,valid_ix]
    out = np.zeros(dims.flatten(), dtype=dtype)
    out[tuple(xyz)] = True
    return out

#def get_linear_index(x, y, z, dims):
    #""" Assuming xzy order. (y increasing fastest.
    #TODO ensure this is right when dims are not all same
    #"""
    #return x*(dims[1]*dims[2]) + z*dims[1] + y
项目:3D-IWGAN    作者:EdwardSmith1884    | 项目源码 | 文件源码
def sparse_to_dense(voxel_data, dims, dtype=np.bool):
    if voxel_data.ndim!=2 or voxel_data.shape[0]!=3:
        raise ValueError('voxel_data is wrong shape; should be 3xN array.')
    if np.isscalar(dims):
        dims = [dims]*3
    dims = np.atleast_2d(dims).T
    # truncate to integers
    xyz = voxel_data.astype(np.int)
    # discard voxels that fall outside dims
    valid_ix = ~np.any((xyz < 0) | (xyz >= dims), 0)
    xyz = xyz[:,valid_ix]
    out = np.zeros(dims.flatten(), dtype=dtype)
    out[tuple(xyz)] = True
    return out

#def get_linear_index(x, y, z, dims):
    #""" Assuming xzy order. (y increasing fastest.
    #TODO ensure this is right when dims are not all same
    #"""
    #return x*(dims[1]*dims[2]) + z*dims[1] + y
项目:cellranger    作者:10XGenomics    | 项目源码 | 文件源码
def store_reference_metadata(self, reference_path, ref_type, metric_prefix):
        """ ref_type - string e.g., 'Transcriptome'
            metric_prefix - string e.g., 'vdj' """

        if self.metadata is None:
            self.metadata = {}

        ref_metadata = cr_utils._load_reference_metadata_file(reference_path)

        for key in cr_constants.REFERENCE_METADATA_KEYS:
            value = ref_metadata.get(key, '')
            if value is None:
                value = ''

            # Backward compatibility with old reference metadata jsons that don't contain the type field
            if key == cr_constants.REFERENCE_TYPE_KEY and value == '':
                self.metadata['%s%s' % (metric_prefix, cr_constants.REFERENCE_TYPE_KEY)] = ref_type
                continue

            if np.isscalar(value):
                self.metadata['%s%s' % (metric_prefix, key)] = value
            elif key == cr_constants.REFERENCE_GENOMES_KEY:
                # Special case for genome key
                self.metadata['%s%s' % (metric_prefix, key)] = cr_reference.get_ref_name_from_genomes(value)
            else:
                self.metadata['%s%s' % (metric_prefix, key)] = ', '.join(str(x) for x in value)
项目:scikit-dataaccess    作者:MITHaystack    | 项目源码 | 文件源码
def getROIstations(geo_point,radiusParam,data,header):
    '''
    This function returns the 4ID station codes for the stations in a region

    The region of interest is defined by the geographic coordinate and a window size

    @param geo_point: The geographic (lat,lon) coordinate of interest
    @param radiusParam: An overloaded radius of interest [km] or latitude and longitude window [deg] around the geo_point
    @param data: Stabilized (or unstabilized) data generated from the data fetcher or out of stab_sys
    @param header: Header dictionary with stations metadata keyed by their 4ID code. This is output with the data.

    @return station_list, list of site 4ID codes in the specified geographic region
     '''
    ccPos = (geo_point[0]*np.pi/180, geo_point[1]*np.pi/180)
    if np.isscalar(radiusParam):
        station_list = []
        for ii in header.keys():
            coord = (header[ii]['refNEU'][0]*np.pi/180,(header[ii]['refNEU'][1]-360)*np.pi/180)
            dist = 6371*2*np.arcsin(np.sqrt(np.sin((ccPos[0]-coord[0])/2)**2+np.cos(ccPos[0])*np.cos(coord[0])*np.sin((ccPos[1]-coord[1])/2)**2))
            if np.abs(dist) < radiusParam:
                station_list.append(header[ii]['4ID'])
    else:
        # overloaded radiusParam term to be radius or lat/lon window size
        latWin = radiusParam[0]/2
        lonWin = radiusParam[1]/2
        station_list = []

        try:
            for ii in header.keys():
                coord = (header[ii]['refNEU'][0],(header[ii]['refNEU'][1]-360))
                if (geo_point[0]-latWin)<=coord[0]<=(geo_point[0]+latWin) and (geo_point[1]-lonWin)<=coord[1]<=(geo_point[1]+lonWin):
                    station_list.append(header[ii]['4ID'])
        except:
            station_list = None

    return station_list
项目:scikit-dataaccess    作者:MITHaystack    | 项目源码 | 文件源码
def __call__(self, y, x):
        ''' 
        Convert pixel coordinates to lat/lon

        @param y: y coordinate
        @param x: x coordinate

        @return (lat, lon)
        '''


        # # If interpolation of geodata is necessary
        # if self.lat_data is None:

        ret_lat = self.alat(y+self.y_offset,x+self.x_offset, grid=False)
        ret_lon = self.alon(y+self.y_offset,x+self.x_offset, grid=False)

        if np.isscalar(y) and np.isscalar(x):
            ret_lat = ret_lat.item()
            ret_lon = ret_lon.item()

        return ret_lat, ret_lon

        # # If geodata is the same resolution as science data
        # else:
        #     return self.lat_data[y,x], self.lon_data[y,x]


# Utility function to retrieve the value of a bit in a bit flag
项目:galario    作者:mtazzari    | 项目源码 | 文件源码
def assert_allclose(x, y, rtol=1e-10, atol=1e-8):
    """Drop in replacement for `numpy.testing.assert_allclose` that shows the nonmatching elements"""
    if np.isscalar(x) and np.isscalar(y) == 1:
        return np.testing.assert_allclose(x, y, rtol=rtol, atol=atol)

    if x.shape != y.shape:
        raise AssertionError("Shape mismatch: %s vs %s" % (str(x.shape), str(y.shape)))

    d = ~np.isclose(x, y, rtol, atol)
    if np.any(d):
        miss = np.where(d)[0]
        raise AssertionError("""Mismatch of %d elements (%g %%) at the level of rtol=%g, atol=%g
    %s
    %s
    %s""" % (len(miss), len(miss)/x.size, rtol, atol, repr(miss), str(x[d]), str(y[d])))
项目:pycma    作者:CMA-ES    | 项目源码 | 文件源码
def __call__(self, solutions, *args, **kwargs):
        """return penalty or list of penalties, by default zero(s).

        This interface seems too specifically tailored to the derived
        BoundPenalty class, it should maybe change.

        """
        if np.isscalar(solutions[0]):
            return 0.0
        else:
            return len(solutions) * [0.0]
项目:pycma    作者:CMA-ES    | 项目源码 | 文件源码
def repair(self, x, copy_if_changed=True):
        """sets out-of-bounds components of ``x`` on the bounds.

        """
        # TODO (old data): CPU(N,lam,iter=20,200,100): 3.3s of 8s for two bounds, 1.8s of 6.5s for one bound
        # remark: np.max([bounds[0], x]) is about 40 times slower than max((bounds[0], x))
        copy = copy_if_changed
        bounds = self.bounds
        if bounds not in (None, [None, None], (None, None)):  # solely for effiency
            if copy:
                x = np.array(x, copy=True)
            if bounds[0] is not None:
                if np.isscalar(bounds[0]):
                    for i in rglen(x):
                        x[i] = max((bounds[0], x[i]))
                else:
                    for i in rglen(x):
                        j = min([i, len(bounds[0]) - 1])
                        if bounds[0][j] is not None:
                            x[i] = max((bounds[0][j], x[i]))
            if bounds[1] is not None:
                if np.isscalar(bounds[1]):
                    for i in rglen(x):
                        x[i] = min((bounds[1], x[i]))
                else:
                    for i in rglen(x):
                        j = min((i, len(bounds[1]) - 1))
                        if bounds[1][j] is not None:
                            x[i] = min((bounds[1][j], x[i]))
        return x

    # ____________________________________________________________
    #
项目:pycma    作者:CMA-ES    | 项目源码 | 文件源码
def __call__(self, x, archive, gp):
        """returns the boundary violation penalty for `x`,
        where `x` is a single solution or a list or np.array of solutions.

        """
        if x in (None, (), []):
            return x
        if self.bounds in (None, [None, None], (None, None)):
            return 0.0 if np.isscalar(x[0]) else [0.0] * len(x)  # no penalty

        x_is_single_vector = np.isscalar(x[0])
        if x_is_single_vector:
            x = [x]

        # add fixed variables to self.gamma
        try:
            gamma = list(self.gamma)  # fails if self.gamma is a scalar
            for i in sorted(gp.fixed_values):  # fails if fixed_values is None
                gamma.insert(i, 0.0)
            gamma = np.array(gamma, copy=False)
        except TypeError:
            gamma = self.gamma
        pen = []
        for xi in x:
            # CAVE: this does not work with already repaired values!!
            # CPU(N,lam,iter=20,200,100)?: 3s of 10s, np.array(xi): 1s
            # remark: one deep copy can be prevented by xold = xi first
            xpheno = gp.pheno(archive[xi]['geno'])
            # necessary, because xi was repaired to be in bounds
            xinbounds = self.repair(xpheno)
            # could be omitted (with unpredictable effect in case of external repair)
            fac = 1  # exp(0.1 * (log(self.scal) - np.mean(self.scal)))
            pen.append(sum(gamma * ((xinbounds - xpheno) / fac)**2) / len(xi))
        return pen[0] if x_is_single_vector else pen

    # ____________________________________________________________
    #
项目:pycma    作者:CMA-ES    | 项目源码 | 文件源码
def multiply_C(self, factor):
        """multiply ``self.C`` with ``factor`` updating internal states.

        ``factor`` can be a scalar, a vector or a matrix. The vector
        is used as outer product and multiplied element-wise, i.e.,
        ``multiply_C(diag(C)**-0.5)`` generates a correlation matrix.

        Details:
        """
        self._updateC()
        if np.isscalar(factor):
            self.C *= factor
            self.D *= factor**0.5
            try:
                self.inverse_root_C /= factor**0.5
            except AttributeError:
                pass
        elif len(np.asarray(factor).shape) == 1:
            self.C *= np.outer(factor, factor)
            self._decompose_C()
        elif len(factor.shape) == 2:
            self.C *= factor
            self._decompose_C()
        else:
            raise ValueError(str(factor))
        # raise NotImplementedError('never tested')
项目:pycma    作者:CMA-ES    | 项目源码 | 文件源码
def __call__(self, x, *args):
        f = Function.__call__(self, x, *args)
        if self.rel_noise:
            f += f * self.rel_noise(len(x))
            assert np.isscalar(f)
        if self.abs_noise:
            f += self.abs_noise(len(x))
        return f
项目:pycma    作者:CMA-ES    | 项目源码 | 文件源码
def max(vec, vec_or_scalar):
        b = vec_or_scalar
        if np.isscalar(b):
            m = [max(x, b) for x in vec]
        else:
            m = [max(vec[i], b[i]) for i in rglen((vec))]
        return m
项目:pycma    作者:CMA-ES    | 项目源码 | 文件源码
def min(a, b):
        iss = np.isscalar
        if iss(a) and iss(b):
            return min(a, b)
        if iss(a):
            a, b = b, a
        # now only b can be still a scalar
        if iss(b):
            return [min(x, b) for x in a]
        else:  # two non-scalars must have the same length
            return [min(a[i], b[i]) for i in rglen((a))]
项目:pycma    作者:CMA-ES    | 项目源码 | 文件源码
def is_vector_list(x):
    """make an educated guess whether ``x`` is a list of vectors.

    >>> from cma.utilities.utils import is_vector_list as ivl
    >>> assert ivl([[0], [0]]) and not ivl([1,2,3])

    """
    try:
        return np.isscalar(x[0][0])
    except:
        return False
项目:tensorboard    作者:dmlc    | 项目源码 | 文件源码
def makenp(x, modality=None):
    # if already numpy, return
    if isinstance(x, np.ndarray):
        if modality == 'IMG' and x.dtype == np.uint8:
            return x.astype(np.float32) / 255.0
        return x
    if np.isscalar(x):
        return np.array([x])
    if 'torch' in str(type(x)):
        return pytorch_np(x, modality)
    if 'chainer' in str(type(x)):
        return chainer_np(x, modality)
    if 'mxnet' in str(type(x)):
        return mxnet_np(x, modality)
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def drawSymbol(painter, symbol, size, pen, brush):
    if symbol is None:
        return
    painter.scale(size, size)
    painter.setPen(pen)
    painter.setBrush(brush)
    if isinstance(symbol, basestring):
        symbol = Symbols[symbol]
    if np.isscalar(symbol):
        symbol = list(Symbols.values())[symbol % len(Symbols)]
    painter.drawPath(symbol)
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def gaussianFilter(data, sigma):
    """
    Drop-in replacement for scipy.ndimage.gaussian_filter.

    (note: results are only approximately equal to the output of
     gaussian_filter)
    """
    if np.isscalar(sigma):
        sigma = (sigma,) * data.ndim

    baseline = data.mean()
    filtered = data - baseline
    for ax in range(data.ndim):
        s = sigma[ax]
        if s == 0:
            continue

        # generate 1D gaussian kernel
        ksize = int(s * 6)
        x = np.arange(-ksize, ksize)
        kernel = np.exp(-x**2 / (2*s**2))
        kshape = [1,] * data.ndim
        kshape[ax] = len(kernel)
        kernel = kernel.reshape(kshape)

        # convolve as product of FFTs
        shape = data.shape[ax] + ksize
        scale = 1.0 / (abs(s) * (2*np.pi)**0.5)
        filtered = scale * np.fft.irfft(np.fft.rfft(filtered, shape, axis=ax) * 
                                        np.fft.rfft(kernel, shape, axis=ax), 
                                        axis=ax)

        # clip off extra data
        sl = [slice(None)] * data.ndim
        sl[ax] = slice(filtered.shape[ax]-data.shape[ax],None,None)
        filtered = filtered[sl]
    return filtered + baseline
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def drawSymbol(painter, symbol, size, pen, brush):
    if symbol is None:
        return
    painter.scale(size, size)
    painter.setPen(pen)
    painter.setBrush(brush)
    if isinstance(symbol, basestring):
        symbol = Symbols[symbol]
    if np.isscalar(symbol):
        symbol = list(Symbols.values())[symbol % len(Symbols)]
    painter.drawPath(symbol)
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def gaussianFilter(data, sigma):
    """
    Drop-in replacement for scipy.ndimage.gaussian_filter.

    (note: results are only approximately equal to the output of
     gaussian_filter)
    """
    if np.isscalar(sigma):
        sigma = (sigma,) * data.ndim

    baseline = data.mean()
    filtered = data - baseline
    for ax in range(data.ndim):
        s = sigma[ax]
        if s == 0:
            continue

        # generate 1D gaussian kernel
        ksize = int(s * 6)
        x = np.arange(-ksize, ksize)
        kernel = np.exp(-x**2 / (2*s**2))
        kshape = [1,] * data.ndim
        kshape[ax] = len(kernel)
        kernel = kernel.reshape(kshape)

        # convolve as product of FFTs
        shape = data.shape[ax] + ksize
        scale = 1.0 / (abs(s) * (2*np.pi)**0.5)
        filtered = scale * np.fft.irfft(np.fft.rfft(filtered, shape, axis=ax) * 
                                        np.fft.rfft(kernel, shape, axis=ax), 
                                        axis=ax)

        # clip off extra data
        sl = [slice(None)] * data.ndim
        sl[ax] = slice(filtered.shape[ax]-data.shape[ax],None,None)
        filtered = filtered[sl]
    return filtered + baseline
项目:comprehend    作者:Fenugreek    | 项目源码 | 文件源码
def stimuli(self, layer=-1, location=[.5], corrsort=True, activation=1.0,
                static_hidden=True, overlay=None):

        if np.isscalar(location): location = [location]
        coders = self.coders
        if layer < 0: layer += len(coders)
        out_shape = coders[layer].output_shape(reduced=False)
        n_hidden = out_shape[-1]

        values = np.zeros([n_hidden] + list(out_shape[1:]),
                          dtype=self.dtype.as_numpy_dtype)

        mid_indices = [0 for j in range(len(out_shape) - 2)]
        for i in range(n_hidden):
            for loc in location:
                if len(mid_indices):
                    mid_indices[0] = int(out_shape[1] * loc)
                indices = [i] + mid_indices + [i]
                values[tuple(indices)] = activation                   

        self.set_batch_size(n_hidden)
        values = coders[layer].get_reconstructed_input(values, reduced=False, overlay=overlay,
                                                       static_hidden=static_hidden)
        for i in range(layer - 1, -1, -1):
            if coders[i].output_shape() != coders[i+1].input_shape():
                values = tf.reshape(values, coders[i].output_shape())
            values = coders[i].get_reconstructed_input(values, reduced=True, overlay=overlay,
                                                       static_hidden=static_hidden)

        values = values.eval().squeeze()
        if corrsort: return values[features.corrsort(values, use_tsp=True)]
        else: return values
项目:comprehend    作者:Fenugreek    | 项目源码 | 文件源码
def _make_overlay(self, location):
        if np.isscalar(location): location = [location]
        overlay = np.zeros(self.shapes[2], np.bool)
        for loc in location:
            overlay[:, :, loc, ...] = True
        return overlay
项目:comprehend    作者:Fenugreek    | 项目源码 | 文件源码
def get_reconstructed_input(self, hidden, reduced=False, overlay=None,
                                static_hidden=False, scale=True, **kwargs):
        """
        overlay mask holds positions of max indices (when max pooling was done).
        If None, use previous state where possible.
        If None, and no previous state, assign random positions.
        If scalar, set max indices to this.
        If list, put in multiple positions (optionally divide by pool_width if <scale>).

        Same random position is assigned to every hidden
        """        

        if not reduced:
            return Conv.get_reconstructed_input(self, hidden)

        hidden = tf.tile(tf.expand_dims(hidden, 3),
                         [1, 1, self.pool_width, 1, 1])

        if overlay is None:
            overlay = self.state.get('overlay')
            if overlay is None:
                overlay = self._random_overlay(static_hidden=static_hidden)
        elif np.isscalar(overlay) or type(overlay) == list:
            if scale and type(overlay) == list and len(overlay) > 1:
                scale = 1. / len(overlay)
            else: scale = None
            overlay = self._make_overlay(overlay)

        return Conv.get_reconstructed_input(self,
                                  self._pool_overlay(hidden, overlay), scale=scale)
项目:tsbitmaps    作者:binhmop    | 项目源码 | 文件源码
def _get_num_bins(self, bins):
        if np.isscalar(bins):
            num_bins = bins
        else:
            num_bins = len(bins)  # bins is an array of bins
        return num_bins
项目:dc_stat_think    作者:justinbois    | 项目源码 | 文件源码
def ecdf_formal(x, data):
    """
    Compute the values of the formal ECDF generated from `data` at x.
    I.e., if F is the ECDF, return F(x).

    Parameters
    ----------
    x : int, float, or array_like
        Positions at which the formal ECDF is to be evaluated.
    data : array_like
        One-dimensional array of data to use to generate the ECDF.

    Returns
    -------
    output : float or ndarray
        Value of the ECDF at `x`.
    """
    # Remember if the input was scalar
    if np.isscalar(x):
        return_scalar = True
    else:
        return_scalar = False

    # If x has any nans, raise a RuntimeError
    if np.isnan(x).any():
        raise RuntimeError('Input cannot have NaNs.')

    # Convert x to array
    x = _convert_data(x, inf_ok=True)

    # Convert data to sorted NumPy array with no nan's
    data = _convert_data(data, inf_ok=True)

    # Compute formal ECDF value
    out = _ecdf_formal(x, np.sort(data))

    if return_scalar:
        return out[0]
    return out
项目:mpnum    作者:dseuss    | 项目源码 | 文件源码
def __mul__(self, fact):
        """Multiply ``MPArray`` by a scalar.
          .. todo::  These could be made more stable by rescaling all
            non-normalized tens
        """
        if np.isscalar(fact):
            lcanon, rcanon = self.canonical_form
            ltens = self._lt
            ltens_new = it.chain(ltens[:lcanon], [fact * ltens[lcanon]],
                                 ltens[lcanon + 1:])
            return type(self)(LocalTensors(ltens_new, cform=(lcanon, rcanon)))

        raise NotImplementedError("Multiplication by non-scalar not supported")
项目:mpnum    作者:dseuss    | 项目源码 | 文件源码
def __imul__(self, fact):
        if np.isscalar(fact):
            lcanon, _ = self.canonical_form
            # FIXME TEMPORARY FIX
            #  self._lt[lcanon] *= fact
            self._lt.update(lcanon, self._lt[lcanon] * fact)
            return self

        raise NotImplementedError("Multiplication by non-scalar not supported")
项目:mpnum    作者:dseuss    | 项目源码 | 文件源码
def __truediv__(self, divisor):
        if np.isscalar(divisor):
            return self.__mul__(1 / divisor)
        raise NotImplementedError("Division by non-scalar not supported")