Python numpy 模块,infty() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.infty()

项目:mpnum    作者:dseuss    | 项目源码 | 文件源码
def assert_mpa_identical(mpa1, mpa2, decimal=np.infty):
    """Verify that two MPAs are complety identical
    """
    assert len(mpa1) == len(mpa2)
    assert mpa1.canonical_form == mpa2.canonical_form
    assert mpa1.dtype == mpa2.dtype

    for i, lten1, lten2 in zip(it.count(), mpa1.lt, mpa2.lt):
        if decimal is np.infty:
            assert_array_equal(lten1, lten2,
                               err_msg='mismatch in lten {}'.format(i))
        else:
            assert_array_almost_equal(lten1, lten2, decimal=decimal,
                                      err_msg='mismatch in lten {}'.format(i))
    # TODO: We should make a comprehensive comparison between `mpa1`
    # and `mpa2`.  Are we missing other things?
项目:POT    作者:rflamary    | 项目源码 | 文件源码
def __init__(self, reg_e=1., max_iter=1000,
                 tol=10e-9, verbose=False, log=False,
                 metric="sqeuclidean", norm=None,
                 distribution_estimation=distribution_estimation_uniform,
                 out_of_sample_map='ferradans', limit_max=np.infty):

        self.reg_e = reg_e
        self.max_iter = max_iter
        self.tol = tol
        self.verbose = verbose
        self.log = log
        self.metric = metric
        self.norm = norm
        self.limit_max = limit_max
        self.distribution_estimation = distribution_estimation
        self.out_of_sample_map = out_of_sample_map
项目:POT    作者:rflamary    | 项目源码 | 文件源码
def __init__(self, reg_e=1., reg_cl=0.1,
                 max_iter=10, max_inner_iter=200,
                 tol=10e-9, verbose=False,
                 metric="sqeuclidean", norm=None,
                 distribution_estimation=distribution_estimation_uniform,
                 out_of_sample_map='ferradans', limit_max=np.infty):

        self.reg_e = reg_e
        self.reg_cl = reg_cl
        self.max_iter = max_iter
        self.max_inner_iter = max_inner_iter
        self.tol = tol
        self.verbose = verbose
        self.metric = metric
        self.norm = norm
        self.distribution_estimation = distribution_estimation
        self.out_of_sample_map = out_of_sample_map
        self.limit_max = limit_max
项目:cupy    作者:cupy    | 项目源码 | 文件源码
def train_gmm(X, max_iter, tol, means, covariances):
    xp = cupy.get_array_module(X)
    lower_bound = -np.infty
    converged = False
    weights = xp.array([0.5, 0.5], dtype=np.float32)
    inv_cov = 1 / xp.sqrt(covariances)

    for n_iter in six.moves.range(max_iter):
        prev_lower_bound = lower_bound
        log_prob_norm, log_resp = e_step(X, inv_cov, means, weights)
        weights, means, covariances = m_step(X, xp.exp(log_resp))
        inv_cov = 1 / xp.sqrt(covariances)
        lower_bound = log_prob_norm
        change = lower_bound - prev_lower_bound
        if abs(change) < tol:
            converged = True
            break

    if not converged:
        print('Failed to converge. Increase max-iter or tol.')

    return inv_cov, means, weights, covariances
项目:finite_volume_seismic_model    作者:cjvogl    | 项目源码 | 文件源码
def containing_rect(self):
        r"""Find containing rectangle of fault in x-y plane.

        Returns tuple of x-limits and y-limits.

        """

        extent = [numpy.infty, -numpy.infty, numpy.infty, -numpy.infty]
        for subfault in self.subfaults:
            for corner in subfault.corners:
                extent[0] = min(corner[0], extent[0])
                extent[1] = max(corner[0], extent[1])
                extent[2] = min(corner[1], extent[2])
                extent[3] = max(corner[1], extent[3])

        return extent
项目:finite_volume_seismic_model    作者:cjvogl    | 项目源码 | 文件源码
def containing_rect(self):
        r"""Find containing rectangle of fault in x-y plane.

        Returns tuple of x-limits and y-limits.

        """

        extent = [numpy.infty, -numpy.infty, numpy.infty, -numpy.infty]
        for subfault in self.subfaults:
            for corner in subfault.corners:
                extent[0] = min(corner[0], extent[0])
                extent[1] = max(corner[0], extent[1])
                extent[2] = min(corner[1], extent[2])
                extent[3] = max(corner[1], extent[3])

        return extent
项目:jingjuSingingPhraseMatching    作者:ronggong    | 项目源码 | 文件源码
def bicGMMModelSelection(X):
    '''
    bic model selection
    :param X: features - observation * dimension
    :return:
    '''
    lowest_bic = np.infty
    bic = []
    n_components_range  = [10,15,20,25,30,35,40,45,50,55,60,65,70]
    best_n_components   = n_components_range[0]
    for n_components in n_components_range:
        # Fit a Gaussian mixture with EM
        print 'Fitting GMM with n_components =',str(n_components)
        gmm = mixture.GaussianMixture(n_components=n_components,
                                      covariance_type='diag')
        gmm.fit(X)
        bic.append(gmm.bic(X))
        if bic[-1] < lowest_bic:
            lowest_bic = bic[-1]
            best_n_components = n_components
            best_gmm          = gmm

    return best_n_components,gmm
项目:bates_galaxies_lab    作者:aleksds    | 项目源码 | 文件源码
def chivecfn(theta):
    """A version of lnprobfn that returns the simple uncertainty 
    normalized residual instead of the log-posterior, for use with 
    least-squares optimization methods like Levenburg-Marquardt.
    """
    lnp_prior = model.prior_product(theta)
    if not np.isfinite(lnp_prior):
        return -np.infty

    # Generate mean model
    t1 = time.time()
    try:
        spec, phot, x = model.mean_model(theta, obs, sps=sps)
    except(ValueError):
        return -np.infty
    d1 = time.time() - t1

    chispec = chi_spec(spec, obs)
    chiphot = chi_phot(phot, obs)
    return np.concatenate([chispec, chiphot])
项目:bates_galaxies_lab    作者:aleksds    | 项目源码 | 文件源码
def chivecfn(theta):
    """A version of lnprobfn that returns the simple uncertainty 
    normalized residual instead of the log-posterior, for use with 
    least-squares optimization methods like Levenburg-Marquardt.
    """
    lnp_prior = model.prior_product(theta)
    if not np.isfinite(lnp_prior):
        return -np.infty

    # Generate mean model
    t1 = time.time()
    try:
        spec, phot, x = model.mean_model(theta, obs, sps=sps)
    except(ValueError):
        return -np.infty
    d1 = time.time() - t1

    chispec = chi_spec(spec, obs)
    chiphot = chi_phot(phot, obs)
    return np.concatenate([chispec, chiphot])
项目:bates_galaxies_lab    作者:aleksds    | 项目源码 | 文件源码
def chivecfn(theta):
            """A version of lnprobfn that returns the simple uncertainty 
            normalized residual instead of the log-posterior, for use with 
            least-squares optimization methods like Levenburg-Marquardt.
            """
            lnp_prior = model.prior_product(theta)
            if not np.isfinite(lnp_prior):
                return -np.infty

            # Generate mean model
            t1 = time.time()
            try:
                spec, phot, x = model.mean_model(theta, obs, sps=sps)
            except(ValueError):
                return -np.infty
            d1 = time.time() - t1

            chispec = chi_spec(spec, obs)
            chiphot = chi_phot(phot, obs)
            return np.concatenate([chispec, chiphot])

        ###
项目:bates_galaxies_lab    作者:aleksds    | 项目源码 | 文件源码
def chivecfn(theta):
            """A version of lnprobfn that returns the simple uncertainty 
            normalized residual instead of the log-posterior, for use with 
            least-squares optimization methods like Levenburg-Marquardt.
            """
            lnp_prior = model.prior_product(theta)
            if not np.isfinite(lnp_prior):
                return -np.infty

            # Generate mean model
            t1 = time.time()
            try:
                spec, phot, x = model.mean_model(theta, obs, sps=sps)
            except(ValueError):
                return -np.infty
            d1 = time.time() - t1

            chispec = chi_spec(spec, obs)
            chiphot = chi_phot(phot, obs)
            return np.concatenate([chispec, chiphot])

        ###
项目:bates_galaxies_lab    作者:aleksds    | 项目源码 | 文件源码
def chivecfn(theta):
    """A version of lnprobfn that returns the simple uncertainty 
    normalized residual instead of the log-posterior, for use with 
    least-squares optimization methods like Levenburg-Marquardt.
    """
    lnp_prior = model.prior_product(theta)
    if not np.isfinite(lnp_prior):
        return -np.infty

    # Generate mean model
    t1 = time.time()
    try:
        spec, phot, x = model.mean_model(theta, obs, sps=sps)
    except(ValueError):
        return -np.infty
    d1 = time.time() - t1

    chispec = chi_spec(spec, obs)
    chiphot = chi_phot(phot, obs)
    return np.concatenate([chispec, chiphot])
项目:bates_galaxies_lab    作者:aleksds    | 项目源码 | 文件源码
def chivecfn(theta):
            """A version of lnprobfn that returns the simple uncertainty 
            normalized residual instead of the log-posterior, for use with 
            least-squares optimization methods like Levenburg-Marquardt.
            """
            lnp_prior = model.prior_product(theta)
            if not np.isfinite(lnp_prior):
                return -np.infty

            # Generate mean model
            t1 = time.time()
            try:
                spec, phot, x = model.mean_model(theta, obs, sps=sps)
            except(ValueError):
                return -np.infty
            d1 = time.time() - t1

            chispec = chi_spec(spec, obs)
            chiphot = chi_phot(phot, obs)
            return np.concatenate([chispec, chiphot])

        ###
项目:bates_galaxies_lab    作者:aleksds    | 项目源码 | 文件源码
def chivecfn(theta):
            """A version of lnprobfn that returns the simple uncertainty 
            normalized residual instead of the log-posterior, for use with 
            least-squares optimization methods like Levenburg-Marquardt.
            """
            lnp_prior = model.prior_product(theta)
            if not np.isfinite(lnp_prior):
                return -np.infty

            # Generate mean model
            t1 = time.time()
            try:
                spec, phot, x = model.mean_model(theta, obs, sps=sps)
            except(ValueError):
                return -np.infty
            d1 = time.time() - t1

            chispec = chi_spec(spec, obs)
            chiphot = chi_phot(phot, obs)
            return np.concatenate([chispec, chiphot])

        ###
项目:ADEM    作者:mike-n-7    | 项目源码 | 文件源码
def sanity_check(test_emb, train_emb, num_test):
    '''
    Sanity check on the cosine similarity calculations
    Finds the closest vector in the space by brute force
    '''
    correct_list = []
    for i in xrange(num_test):
        smallest_norm = np.infty
        index = 0
        for j in xrange(len(train_emb)):
            norm = np.linalg.norm(emb - test_emb[i])
            if norm < smallest_norm:
                smallest_norm = norm
                index = j
        correct_list.append(index)
    # Pad the list to make it the same length as test_emb
    for i in xrange(len(test_emb) - num_test):
        correct_list.append(-1)
    return correct_list
项目:ADEM    作者:mike-n-7    | 项目源码 | 文件源码
def sanity_check(test_emb, train_emb, num_test):
    '''
    Sanity check on the cosine similarity calculations
    Finds the closest vector in the space by brute force
    '''
    correct_list = []
    for i in xrange(num_test):
        smallest_norm = np.infty
        index = 0
        for j in xrange(len(train_emb)):
            norm = np.linalg.norm(emb - test_emb[i])
            if norm < smallest_norm:
                smallest_norm = norm
                index = j
        correct_list.append(index)
    # Pad the list to make it the same length as test_emb
    for i in xrange(len(test_emb) - num_test):
        correct_list.append(-1)
    return correct_list
项目:sparsereg    作者:Ohjeah    | 项目源码 | 文件源码
def crowding_distance(models, *attrs):
    """
    Assumes models in lexicographical sorted.
    """

    get_fit = _get_fit(models, attrs)

    f = np.array(sorted([get_fit(m) for m in models]))

    scale = np.max(f, axis=0) - np.min(f, axis=0)

    with np.errstate(invalid="ignore"):
        dist = np.sum(abs(np.roll(f, 1, axis=0) - np.roll(f, -1, axis=0) ) / scale, axis=1)
    dist[0] = np.infty
    dist[-1] = np.infty
    return dist
项目:highdimensional-decision-boundary-plot    作者:tmadl    | 项目源码 | 文件源码
def _find_decision_boundary_on_hypersphere(self, centroid, R, penalize_known=False):
        def objective(phi, grad=0):
            # search on hypersphere surface in polar coordinates - map back to cartesian
            cx = centroid + polar_to_cartesian(phi, R)
            try:
                cx2d = self.dimensionality_reduction.transform([cx])[0]
                error = self.decision_boundary_distance(cx)
                if penalize_known:
                    # slight penalty for being too close to already known decision boundary
                    # keypoints
                    db_distances = [euclidean(cx2d, self.decision_boundary_points_2d[k])
                                    for k in range(len(self.decision_boundary_points_2d))]
                    error += 1e-8 * ((self.mean_2d_dist - np.min(db_distances)) /
                                     self.mean_2d_dist)**2
                return error
            except (Exception, ex):
                print("Error in objective function:", ex)
                return np.infty

        optimizer = self._get_optimizer(
            D=self.X.shape[1] - 1, upper_bound=2 * np.pi, iteration_budget=self.hypersphere_iteration_budget)
        optimizer.set_min_objective(objective)
        db_phi = optimizer.optimize([rnd.random() * 2 * np.pi for k in range(self.X.shape[1] - 1)])
        db_point = centroid + polar_to_cartesian(db_phi, R)
        return db_point
项目:CV-lecture-quizzes-python    作者:pdvelez    | 项目源码 | 文件源码
def find_best_match(patch, strip):
    # TODO: Find patch in strip and return column index (x value) of topleft corner

    # We will use SSD to find out the best match

    best_id = 0
    min_diff = np.infty

    for i in range(int(strip.shape[1] - patch.shape[1])):
        temp = strip[:, i: i + patch.shape[1]]
        ssd = np.sum((temp - patch) ** 2)
        if ssd < min_diff:
            min_diff = ssd
            best_id = i

    return best_id

# Test code:

# Load images
项目:scikit-cmeans    作者:bm424    | 项目源码 | 文件源码
def converge(self, x):
        """Finds cluster centers through an alternating optimization routine.

        Terminates when either the number of cycles reaches `max_iter` or the
        objective function changes by less than `tol`.

        Parameters
        ----------
        x : :obj:`np.ndarray`
            (n_samples, n_features)
            The original data.

        """
        centers = []
        j_new = np.infty
        for i in range(self.max_iter):
            j_old = j_new
            self.update(x)
            centers.append(self.centers)
            j_new = self.objective(x)
            if np.abs(j_old - j_new) < self.tol:
                break
        return np.array(centers)
项目:cleverhans    作者:tensorflow    | 项目源码 | 文件源码
def test_generate_np_gives_adversarial_example_linfinity(self):
        self.help_generate_np_gives_adversarial_example(np.infty)
项目:electrostatics    作者:tomduck    | 项目源码 | 文件源码
def E(self, x):  # pylint: disable=invalid-name
        """Electric field vector.
        Ref: http://www.phys.uri.edu/gerhard/PHY204/tsl31.pdf
        """
        x = array(x)
        x1, x2, lam = self.x1, self.x2, self.lam

        # Get lengths and angles for the different triangles
        theta1, theta2 = angle(x, x1, x2), pi - angle(x, x2, x1)
        a = point_line_distance(x, x1, x2)
        r1, r2 = norm(x - x1), norm(x - x2)

        # Calculate the parallel and perpendicular components
        sign = where(is_left(x, x1, x2), 1, -1)

        # pylint: disable=invalid-name
        Epara = lam*(1/r2-1/r1)
        Eperp = -sign*lam*(cos(theta2)-cos(theta1))/where(a == 0, infty, a)

        # Transform into the coordinate space and return
        dx = x2 - x1

        if len(x.shape) == 2:
            Epara = Epara[::, newaxis]
            Eperp = Eperp[::, newaxis]

        return Eperp * (array([-dx[1], dx[0]])/norm(dx)) + Epara * (dx/norm(dx))
项目:nanopores    作者:mitschabaude    | 项目源码 | 文件源码
def interpolation(X, Y, extend_to_infty=True):
    if extend_to_infty:
        X = [-np.infty] + X + [np.infty]
        Y = [Y[0]] + Y + [Y[-1]]
    X = np.array(X)
    Y = np.array(Y)
    return lambda x: evaluate_interpolation(x, X, Y)
项目:algorithm-reference-library    作者:SKA-ScienceDataProcessor    | 项目源码 | 文件源码
def test_predict_facets(self):
        self.actualSetUp()
        self.params['facets'] = 2
        self._predict_base(predict_facets, fluxthreshold=numpy.infty)
项目:algorithm-reference-library    作者:SKA-ScienceDataProcessor    | 项目源码 | 文件源码
def test_predict_timeslice(self):
        # This works poorly because of the poor interpolation accuracy for point sources. The corresponding
        # invert works well particularly if the beam sampling is high
        self.actualSetUp()
        self._predict_base(predict_timeslice, fluxthreshold=numpy.infty)
项目:algorithm-reference-library    作者:SKA-ScienceDataProcessor    | 项目源码 | 文件源码
def test_predict_timeslice_wprojection(self):
        self.actualSetUp()
        self.params['kernel'] = 'wprojection'
        self.params['wstep'] = 2.0
        self._predict_base(predict_timeslice, fluxthreshold=numpy.infty)
项目:pl-cnn    作者:oval-group    | 项目源码 | 文件源码
def initialize_run(nnet):

    if nnet.data.dataset_name == 'imagenet':
        nnet.max_passes = 1
        nnet.max_inner_iterations = 5
        nnet.max_outer_iterations = 1
        nnet.epoch = epoch_imagenet

    elif Cfg.store_on_gpu:
        nnet.max_passes = 50
        nnet.max_inner_iterations = 100
        nnet.max_outer_iterations = 1
        nnet.epoch = epoch_full_gpu

        nnet.old_objective = np.infty
        nnet.old_validation_acc = 0.

        performance(nnet, which_set='train', print_=True)

    else:
        nnet.max_passes = 50
        nnet.max_inner_iterations = 100
        nnet.max_outer_iterations = 1
        nnet.epoch = epoch_part_gpu

        nnet.old_objective = np.infty
        nnet.old_validation_acc = 0.

        performance(nnet, which_set='train', print_=True)

    return
项目:chxanalys    作者:yugangzhang    | 项目源码 | 文件源码
def get_series_g2_taus( fra_max_list, acq_time=1, max_fra_num=None, log_taus = True, 
                        num_bufs = 8):
    '''
    Get taus for dose dependent analysis
    Parameters:
        fra_max_list: a list, a lsit of largest available frame number        
        acq_time: acquistion time for each frame
        log_taus: if true, will use the multi-tau defined taus bu using buf_num (default=8),
               otherwise, use deltau =1        
    Return:
        tausd, a dict, with keys as taus_max_list items  
    e.g., 
    get_series_g2_taus( fra_max_list=[20,30,40], acq_time=1, max_fra_num=None, log_taus = True,  num_bufs = 8)
    --> 
    {20: array([ 0,  1,  2,  3,  4,  5,  6,  7,  8, 10, 12, 14, 16]),
     30: array([ 0,  1,  2,  3,  4,  5,  6,  7,  8, 10, 12, 14, 16, 20, 24, 28]),
     40: array([ 0,  1,  2,  3,  4,  5,  6,  7,  8, 10, 12, 14, 16, 20, 24, 28, 32])
    }

    '''
    tausd = {}
    for n in fra_max_list:
        if max_fra_num is not None:
            L = max_fra_num
        else:
            L = np.infty            
        if n>L:
            warnings.warn("Warning: the dose value is too large, and please" 
                          "check the maxium dose in this data set and give a smaller dose value."
                          "We will use the maxium dose of the data.") 
            n = L 
        if log_taus:
            lag_steps = get_multi_tau_lag_steps(n,  num_bufs)
        else:
            lag_steps = np.arange( n )
        tausd[n] = lag_steps * acq_time
    return tausd
项目:mirapie    作者:Chutlhu    | 项目源码 | 文件源码
def wavread(fileName, lmax=np.infty, offset = 0, len_in_smpl = False):
    """reads the wave file file and returns a NDarray and the sampling frequency"""

    def isValid(filename):
        if not fileName:
            return False
        try:
            fileHandle = wave.open(fileName, 'r')
            fileHandle.close()
            return True
        except:
            return False
    if not isValid(fileName):
        print("invalid WAV file. Aborting")
        return None

    # metadata properties of a file
    length, nChans, fs, sampleWidth = wavinfo(fileName)
    if not len_in_smpl:
        lmax = lmax * fs
    length = min(length - offset, lmax)
    waveform = np.zeros((length, nChans))

    # reading data
    fileHandle = wave.open(fileName, 'r')
    fileHandle.setpos(offset)
    batchSizeT = 3000000
    pos = 0
    while pos < length:
        batchSize = min(batchSizeT, length - pos)
        str_bytestream = fileHandle.readframes(int(batchSize*2/sampleWidth))
        tempData = np.fromstring(str_bytestream, 'h')
        tempData = tempData.astype(float)
        tempData = tempData.reshape(batchSize, nChans)
        waveform[pos:pos+batchSize, :] = tempData / float(2**(8*sampleWidth - 1))
        pos += batchSize
    fileHandle.close()
    return (waveform, fs)
项目:NLP.py    作者:PythonOptimizers    | 项目源码 | 文件源码
def norm_infty(x):
    """Compute infinity norm of `x`."""
    if len(x) > 0:
        return norm(x, ord=infty)
    return 0.0
项目:bates_galaxies_lab    作者:aleksds    | 项目源码 | 文件源码
def lnprobfn(theta):
    """Given a parameter vector, a dictionary of observational data 
    and a model object, return the ln of the posterior. 
    This requires that an sps object (and if using spectra 
    and gaussian processes, a GP object) be instantiated.
    """

    print('lnprobfn loves pizza')
    # Calculate prior probability and exit if not within prior
    lnp_prior = model.prior_product(theta)
    if not np.isfinite(lnp_prior):
        return -np.infty

    # Generate mean model
    t1 = time.time()
    try:
        spec, phot, x = model.mean_model(theta, obs, sps=sps)
    except(ValueError):
        return -np.infty
    d1 = time.time() - t1
    vectors = {}  # This would be used for noise model weight functions

    # Calculate likelihoods
    t2 = time.time()
    lnp_spec = lnlike_spec(spec, obs=obs, spec_noise=spec_noise)
    lnp_phot = lnlike_phot(phot, obs=obs, phot_noise=phot_noise)
    d2 = time.time() - t2
    if verbose:
        write_log(theta, lnp_prior, lnp_spec, lnp_phot, d1, d2)

    return lnp_prior + lnp_phot + lnp_spec

# In [11]
项目:bates_galaxies_lab    作者:aleksds    | 项目源码 | 文件源码
def lnprobfn(theta):
    """Given a parameter vector, a dictionary of observational data 
    and a model object, return the ln of the posterior. 
    This requires that an sps object (and if using spectra 
    and gaussian processes, a GP object) be instantiated.
    """

    print('lnprobfn loves pizza')
    # Calculate prior probability and exit if not within prior
    lnp_prior = model.prior_product(theta)
    if not np.isfinite(lnp_prior):
        print('oh shit prior')
        return -np.infty

    # Generate mean model
    t1 = time.time()
    try:
        spec, phot, x = model.mean_model(theta, obs, sps=sps)
    except(ValueError):
        return -np.infty
    d1 = time.time() - t1
    vectors = {}  # This would be used for noise model weight functions

    # Calculate likelihoods
    t2 = time.time()
    lnp_spec = lnlike_spec(spec, obs=obs, spec_noise=spec_noise)
    lnp_phot = lnlike_phot(phot, obs=obs, phot_noise=phot_noise)
    d2 = time.time() - t2
    if verbose:
        write_log(theta, lnp_prior, lnp_spec, lnp_phot, d1, d2)

    return lnp_prior + lnp_phot + lnp_spec

# In [11]
项目:bates_galaxies_lab    作者:aleksds    | 项目源码 | 文件源码
def lnprobfn(theta):
    """Given a parameter vector, a dictionary of observational data 
    and a model object, return the ln of the posterior. 
    This requires that an sps object (and if using spectra 
    and gaussian processes, a GP object) be instantiated.
    """

    #print('lnprobfn loves pizza')
    # Calculate prior probability and exit if not within prior
    lnp_prior = model.prior_product(theta)
    if not np.isfinite(lnp_prior):
        #print('oh shit prior')
        return -np.infty

    # Generate mean model
    t1 = time.time()
    try:
        spec, phot, x = model.mean_model(theta, obs, sps=sps)
    except(ValueError):
        return -np.infty
    d1 = time.time() - t1
    vectors = {}  # This would be used for noise model weight functions

    # Calculate likelihoods
    t2 = time.time()
    lnp_spec = lnlike_spec(spec, obs=obs, spec_noise=spec_noise)
    lnp_phot = lnlike_phot(phot, obs=obs, phot_noise=phot_noise)
    d2 = time.time() - t2
    if verbose:
        write_log(theta, lnp_prior, lnp_spec, lnp_phot, d1, d2)

    return lnp_prior + lnp_phot + lnp_spec

# In [11]
项目:bates_galaxies_lab    作者:aleksds    | 项目源码 | 文件源码
def lnprobfn(theta):
    """Given a parameter vector, a dictionary of observational data 
    and a model object, return the ln of the posterior. 
    This requires that an sps object (and if using spectra 
    and gaussian processes, a GP object) be instantiated.
    """

    # Calculate prior probability and exit if not within prior
    lnp_prior = model.prior_product(theta)
    if not np.isfinite(lnp_prior):
        return -np.infty

    # Generate mean model
    t1 = time.time()
    try:
        spec, phot, x = model.mean_model(theta, obs, sps=sps)
    except(ValueError):
        return -np.infty
    d1 = time.time() - t1
    vectors = {}  # This would be used for noise model weight functions

    # Calculate likelihoods
    t2 = time.time()
    lnp_spec = lnlike_spec(spec, obs=obs, spec_noise=spec_noise)
    lnp_phot = lnlike_phot(phot, obs=obs, phot_noise=phot_noise)
    d2 = time.time() - t2
    if verbose:
        write_log(theta, lnp_prior, lnp_spec, lnp_phot, d1, d2)

    return lnp_prior + lnp_phot + lnp_spec
#RUNNING PROSPECTOR
# Outputs
项目:bates_galaxies_lab    作者:aleksds    | 项目源码 | 文件源码
def lnprobfn(theta):
    """Given a parameter vector, a dictionary of observational data 
    and a model object, return the ln of the posterior. 
    This requires that an sps object (and if using spectra 
    and gaussian processes, a GP object) be instantiated.
    """

    #print('lnprobfn loves pizza')
    # Calculate prior probability and exit if not within prior
    lnp_prior = model.prior_product(theta)
    if not np.isfinite(lnp_prior):
        #print('oh shit prior')
        return -np.infty

    # Generate mean model
    t1 = time.time()
    try:
        spec, phot, x = model.mean_model(theta, obs, sps=sps)
    except(ValueError):
        return -np.infty
    d1 = time.time() - t1
    vectors = {}  # This would be used for noise model weight functions

    # Calculate likelihoods
    t2 = time.time()
    lnp_spec = lnlike_spec(spec, obs=obs, spec_noise=spec_noise)
    lnp_phot = lnlike_phot(phot, obs=obs, phot_noise=phot_noise)
    d2 = time.time() - t2
    if verbose:
        write_log(theta, lnp_prior, lnp_spec, lnp_phot, d1, d2)

    return lnp_prior + lnp_phot + lnp_spec

# In [11]
项目:fang    作者:rgrosse    | 项目源码 | 文件源码
def check_partition_function():
    with misc.gnumpy_conversion_check('allow'):
        rbm = random_rbm()
        total = -np.infty

        for vis_ in itertools.product(*[[0, 1]] * NVIS):
            vis = gnp.garray(vis_)
            for hid_ in itertools.product(*[[0, 1]] * NHID):
                hid = gnp.garray(hid_)
                total = np.logaddexp(total, rbm.energy(vis[nax, :], hid[nax, :])[0])

        assert np.allclose(tractable.exact_partition_function(rbm, batch_units=BATCH_UNITS), total)
项目:empymod    作者:empymod    | 项目源码 | 文件源码
def test_get_layer_nr():
    bip = np.array([0, 0, 300])
    lbip, zbip = utils.get_layer_nr(bip, np.array([-np.infty, 500]))
    assert lbip == 0
    assert zbip == 300
    lbip, _ = utils.get_layer_nr(bip, np.array([-np.infty, 0, 300, 500]))
    assert lbip == 1
    lbip, _ = utils.get_layer_nr(bip, np.array([-np.infty, 0, 200]))
    assert lbip == 2
    bip = np.array([np.zeros(4), np.zeros(4), np.arange(4)*100])
    lbip, _ = utils.get_layer_nr(bip, np.array([-np.infty, 0, 200]))
    assert_allclose(lbip, [0, 1, 1, 2])
项目:chapter_codes    作者:advancedpythonprogramming    | 项目源码 | 文件源码
def maximum(values):
    temp_max = -np.infty
    for v in values:
        if v > temp_max:
            temp_max = v
        yield temp_max
项目:Robo-Plot    作者:JackBuck    | 项目源码 | 文件源码
def __init__(self, y_axis: Axis, x_axis: Axis):
        self.x_axis = x_axis
        self.y_axis = y_axis

        self.x_soft_lower_limit = -np.infty
        self.x_soft_upper_limit = np.infty
        self.y_soft_lower_limit = -np.infty
        self.y_soft_upper_limit = np.infty
项目:CV-lecture-quizzes-python    作者:pdvelez    | 项目源码 | 文件源码
def find_best_match(patch, strip):

    # We will use SSD to find out the best match
    best_id = 0
    min_diff = np.infty

    for i in range(int(strip.shape[1] - patch.shape[1])):
        temp = strip[:, i: i + patch.shape[1]]
        ssd = np.sum((temp - patch) ** 2)
        if ssd < min_diff:
            min_diff = ssd
            best_id = i

    return best_id
项目:Parallel-SGD    作者:angadgill    | 项目源码 | 文件源码
def test_labels_assignment_and_inertia():
    # pure numpy implementation as easily auditable reference gold
    # implementation
    rng = np.random.RandomState(42)
    noisy_centers = centers + rng.normal(size=centers.shape)
    labels_gold = - np.ones(n_samples, dtype=np.int)
    mindist = np.empty(n_samples)
    mindist.fill(np.infty)
    for center_id in range(n_clusters):
        dist = np.sum((X - noisy_centers[center_id]) ** 2, axis=1)
        labels_gold[dist < mindist] = center_id
        mindist = np.minimum(dist, mindist)
    inertia_gold = mindist.sum()
    assert_true((mindist >= 0.0).all())
    assert_true((labels_gold != -1).all())

    # perform label assignment using the dense array input
    x_squared_norms = (X ** 2).sum(axis=1)
    labels_array, inertia_array = _labels_inertia(
        X, x_squared_norms, noisy_centers)
    assert_array_almost_equal(inertia_array, inertia_gold)
    assert_array_equal(labels_array, labels_gold)

    # perform label assignment using the sparse CSR input
    x_squared_norms_from_csr = row_norms(X_csr, squared=True)
    labels_csr, inertia_csr = _labels_inertia(
        X_csr, x_squared_norms_from_csr, noisy_centers)
    assert_array_almost_equal(inertia_csr, inertia_gold)
    assert_array_equal(labels_csr, labels_gold)
项目:scikit-cmeans    作者:bm424    | 项目源码 | 文件源码
def fit(self, x):
        """Optimizes cluster centers by restarting convergence several times.

        Parameters
        ----------
        x : :obj:`np.ndarray`
            (n_samples, n_features)
            The original data.

        """
        objective_best = np.infty
        memberships_best = None
        centers_best = None
        j_list = []
        for i in range(self.n_init):
            self.centers = None
            self.memberships = None
            self.converge(x)
            objective = self.objective(x)
            j_list.append(objective)
            if objective < objective_best:
                memberships_best = self.memberships.copy()
                centers_best = self.centers.copy()
                objective_best = objective
        self.memberships = memberships_best
        self.centers = centers_best
        return j_list
项目:scikit-cmeans    作者:bm424    | 项目源码 | 文件源码
def objective(self, x):
        if self.memberships is None or self.centers is None:
            return np.infty
        distances = self.distances(x)
        return np.sum(self.memberships * distances)
项目:scikit-cmeans    作者:bm424    | 项目源码 | 文件源码
def objective(self, x):
        if self.memberships is None or self.centers is None:
            return np.infty
        distances = self.distances(x)
        return np.sum(self.fuzzifier(self.memberships) * distances)
项目:POT    作者:rflamary    | 项目源码 | 文件源码
def fit(self, Xs=None, ys=None, Xt=None, yt=None):
        """Build a coupling matrix from source and target sets of samples
        (Xs, ys) and (Xt, yt)

        Parameters
        ----------
        Xs : array-like, shape (n_source_samples, n_features)
            The training input samples.
        ys : array-like, shape (n_source_samples,)
            The class labels
        Xt : array-like, shape (n_target_samples, n_features)
            The training input samples.
        yt : array-like, shape (n_target_samples,)
            The class labels. If some target samples are unlabeled, fill the
            yt's elements with -1.

            Warning: Note that, due to this convention -1 cannot be used as a
            class label

        Returns
        -------
        self : object
            Returns self.
        """

        # check the necessary inputs parameters are here
        if check_params(Xs=Xs, Xt=Xt):

            # pairwise distance
            self.cost_ = dist(Xs, Xt, metric=self.metric)
            self.cost_ = cost_normalization(self.cost_, self.norm)

            if (ys is not None) and (yt is not None):

                if self.limit_max != np.infty:
                    self.limit_max = self.limit_max * np.max(self.cost_)

                # assumes labeled source samples occupy the first rows
                # and labeled target samples occupy the first columns
                classes = [c for c in np.unique(ys) if c != -1]
                for c in classes:
                    idx_s = np.where((ys != c) & (ys != -1))
                    idx_t = np.where(yt == c)

                    # all the coefficients corresponding to a source sample
                    # and a target sample :
                    # with different labels get a infinite
                    for j in idx_t[0]:
                        self.cost_[idx_s[0], j] = self.limit_max

            # distribution estimation
            self.mu_s = self.distribution_estimation(Xs)
            self.mu_t = self.distribution_estimation(Xt)

            # store arrays of samples
            self.xs_ = Xs
            self.xt_ = Xt

        return self
项目:microbiome-summer-school-2017    作者:aldro61    | 项目源码 | 文件源码
def cross_validation_spectrum_kernel(seq, is_train, folds):
    # Cross-validation
    # Repeat for each combination of candidate hyperparameter values
    substring_length = 3
    C_values = [0.01, 0.1, 1.0, 10., 100.]

    best_result = {"score": -np.infty}

    seq_vec = sequences_to_vec(seq, substring_length)
    K = np.dot(seq_vec, seq_vec.T)
    K_train = K[is_train][:, is_train]
    K_test = K[~is_train][:, is_train]
    y_train = labels[is_train]
    y_test = labels[~is_train]

    for C in C_values:

        print "Parameters: C: {0:.4f}".format(C)

        # Cross-validation
        fold_scores = []
        for fold in np.unique(folds):
            print "...Fold {0:d}".format(fold + 1)
            fold_K_train = K_train[folds != fold][:, folds != fold]
            fold_K_test = K_train[folds == fold][:, folds != fold]
            fold_y_train = y_train[folds != fold]
            fold_y_test = y_train[folds == fold]
            fold_estimator = SupportVectorRegression(kernel="precomputed", C=C)
            fold_estimator.fit(fold_K_train.copy(), fold_y_train)
            fold_scores.append(fold_estimator.score(fold_K_test, fold_y_test))
        cv_score = np.mean(fold_scores)
        print "...... cv score:", cv_score

        if cv_score > best_result["score"]:
            best_result["score"] = cv_score
            best_result["K"] = dict(train=K_train, test=K_test, full=K)
            best_result["estimator"] = SupportVectorRegression(kernel="precomputed", C=C).fit(K_train, y_train)
            best_result["hps"] = dict(C=C)

        print
        print
    return best_result
项目:GPflow    作者:GPflow    | 项目源码 | 文件源码
def __init__(self,
               loss,
               var_list=None,
               equalities=None,
               inequalities=None,
               var_to_bounds=None,
               **optimizer_kwargs):
    """Initialize a new interface instance.

    Args:
      loss: A scalar `Tensor` to be minimized.
      var_list: Optional `list` of `Variable` objects to update to minimize
        `loss`.  Defaults to the list of variables collected in the graph
        under the key `GraphKeys.TRAINABLE_VARIABLES`.
      equalities: Optional `list` of equality constraint scalar `Tensor`s to be
        held equal to zero.
      inequalities: Optional `list` of inequality constraint scalar `Tensor`s
        to be held nonnegative.
      var_to_bounds: Optional `dict` where each key is an optimization
        `Variable` and each corresponding value is a length-2 tuple of
        `(low, high)` bounds. Although enforcing this kind of simple constraint
        could be accomplished with the `inequalities` arg, not all optimization
        algorithms support general inequality constraints, e.g. L-BFGS-B. Both
        `low` and `high` can either be numbers or anything convertible to a
        NumPy array that can be broadcast to the shape of `var` (using
        `np.broadcast_to`). To indicate that there is no bound, use `None` (or
        `+/- np.infty`). For example, if `var` is a 2x3 matrix, then any of
        the following corresponding `bounds` could be supplied:
        * `(0, np.infty)`: Each element of `var` held positive.
        * `(-np.infty, [1, 2])`: First column less than 1, second column less
          than 2.
        * `(-np.infty, [[1], [2], [3]])`: First row less than 1, second row less
          than 2, etc.
        * `(-np.infty, [[1, 2, 3], [4, 5, 6]])`: Entry `var[0, 0]` less than 1,
          `var[0, 1]` less than 2, etc.
      **optimizer_kwargs: Other subclass-specific keyword arguments.
    """
    self.optimizer_kwargs = optimizer_kwargs

    self._loss = loss
    self._equalities = equalities or []
    self._inequalities = inequalities or []
    self._var_to_bounds = var_to_bounds

    if var_list is None:
      self._vars = variables.trainable_variables()
    elif var_list == []:
      raise ValueError("No variables to optimize.")
    else:
      self._vars = list(var_list)

    self._packed_bounds = []
    self._update_placeholders = []
    self._var_updates = []
    self._packed_var = None
    self._packed_loss_grad = None
    self._packed_equality_grads = []
    self._packed_inequality_grads = []
    self._var_shapes = None
项目:GPflow    作者:GPflow    | 项目源码 | 文件源码
def _initialize_updated_shapes(self, session):
    shapes = array_ops.shape_n(self._vars)
    var_shapes = list(map(tuple, session.run(shapes)))

    if self._var_shapes is not None:
      new_old_shapes = zip(self._var_shapes, var_shapes)
      if all([old == new for old, new in new_old_shapes]):
        return

    self._var_shapes = var_shapes
    vars_and_shapes = zip(self._vars, self._var_shapes)
    vars_and_shapes_dict = dict(vars_and_shapes)

    packed_bounds = None
    if self._var_to_bounds is not None:
      left_packed_bounds = []
      right_packed_bounds = []
      for var, var_shape in vars_and_shapes:
        shape = list(var_shape)
        bounds = (-np.infty, np.infty)
        if var in var_to_bounds:
          bounds = var_to_bounds[var]
        left_packed_bounds.extend(list(np.broadcast_to(bounds[0], shape).flat))
        right_packed_bounds.extend(list(np.broadcast_to(bounds[1], shape).flat))
      packed_bounds = list(zip(left_packed_bounds, right_packed_bounds))
    self._packed_bounds = packed_bounds

    self._update_placeholders = [
        array_ops.placeholder(var.dtype) for var in self._vars
    ]
    self._var_updates = [
        var.assign(array_ops.reshape(placeholder, vars_and_shapes_dict[var]))
        for var, placeholder in zip(self._vars, self._update_placeholders)
    ]

    loss_grads = _compute_gradients(self._loss, self._vars)
    equalities_grads = [
        _compute_gradients(equality, self._vars)
        for equality in self._equalities
    ]
    inequalities_grads = [
        _compute_gradients(inequality, self._vars)
        for inequality in self._inequalities
    ]

    self._packed_var = self._pack(self._vars)
    self._packed_loss_grad = self._pack(loss_grads)
    self._packed_equality_grads = [
        self._pack(equality_grads) for equality_grads in equalities_grads
    ]
    self._packed_inequality_grads = [
        self._pack(inequality_grads) for inequality_grads in inequalities_grads
    ]

    dims = [_prod(vars_and_shapes_dict[var]) for var in self._vars]
    accumulated_dims = list(_accumulate(dims))
    self._packing_slices = [
        slice(start, end)
        for start, end in zip(accumulated_dims[:-1], accumulated_dims[1:])
    ]
项目:NLP.py    作者:PythonOptimizers    | 项目源码 | 文件源码
def __init__(self, model, **kwargs):
        """Initialize a slack form of an :class:`NLPModel`.

        :parameters:
            :model:  Original model to be transformed into a slack form.
        """
        self.model = model

        # Save number of variables and constraints prior to transformation
        self.original_n = model.n
        self.original_m = model.m

        # Number of slacks for the constaints
        n_slacks = model.nlowerC + model.nupperC + model.nrangeC
        self.n_slacks = n_slacks

        # Update effective number of variables and constraints
        n = self.original_n + n_slacks
        m = self.original_m

        Lvar = -np.infty * np.ones(n)
        Uvar = +np.infty * np.ones(n)

        # Copy orignal bounds
        Lvar[:self.original_n] = model.Lvar
        Uvar[:self.original_n] = model.Uvar

        # Add bounds corresponding to lower constraints
        bot = self.original_n
        self.sL = range(bot, bot + model.nlowerC)
        Lvar[bot:bot + model.nlowerC] = model.Lcon[model.lowerC]

        # Add bounds corresponding to upper constraints
        bot += model.nlowerC
        self.sU = range(bot, bot + model.nupperC)
        Uvar[bot:bot + model.nupperC] = model.Ucon[model.upperC]

        # Add bounds corresponding to range constraints
        bot += model.nupperC
        self.sR = range(bot, bot + model.nrangeC)
        Lvar[bot:bot + model.nrangeC] = model.Lcon[model.rangeC]
        Uvar[bot:bot + model.nrangeC] = model.Ucon[model.rangeC]

        # No more inequalities. All constraints are now equal to 0
        Lcon = Ucon = np.zeros(m)

        super(SlackModel, self).__init__(n=n, m=m, name='Slack-' + model.name,
                                         Lvar=Lvar, Uvar=Uvar,
                                         Lcon=Lcon, Ucon=Ucon)

        # Redefine primal and dual initial guesses
        self.original_x0 = model.x0[:]
        self.x0 = np.zeros(self.n)
        self.x0[:self.original_n] = self.original_x0[:]

        self.original_pi0 = model.pi0[:]
        self.pi0 = np.zeros(self.m)
        self.pi0[:self.original_m] = self.original_pi0[:]
        return
项目:bates_galaxies_lab    作者:aleksds    | 项目源码 | 文件源码
def lnprobfn(theta, model, obs, sps, verbose=False, spec_noise=None,
             phot_noise=None, residuals=False):
    """Define the likelihood function.
    Given a parameter vector and a dictionary of observational data and a model
    object, return the ln of the posterior. This requires that an sps object
    (and if using spectra and gaussian processes, a GP object) be instantiated.
    """
    from prospect.likelihood import lnlike_spec, lnlike_phot, chi_spec, chi_phot

    # Calculate the prior probability and exit if not within the prior
    lnp_prior = model.prior_product(theta)
    if not np.isfinite(lnp_prior):
        return -np.infty

    # Generate the mean model--
    t1 = time()
    try:
        model_spec, model_phot, model_extras = model.mean_model(theta, obs, sps=sps)
    except(ValueError):
        return -np.infty
    d1 = time() - t1

    # Return chi vectors for least-squares optimization--
    if residuals:
        chispec = chi_spec(model_spec, obs)
        chiphot = chi_phot(model_phot, obs)        
        return np.concatenate([chispec, chiphot])

    # Noise modeling--
    if spec_noise:
        spec_noise.update(**model.params)
    if phot_noise:
        phot_noise.update(**model.params)

    vectors = {
        'spec': model_spec,    # model spectrum
        'phot': model_phot,    # model photometry
        'sed': model._spec,    # object spectrum
        'cal': model._speccal, # object calibration spectrum
        }

    # Calculate likelihoods--
    t2 = time()
    lnp_spec = lnlike_spec(model_spec, obs=obs, spec_noise=spec_noise, **vectors)
    lnp_phot = lnlike_phot(model_phot, obs=obs, phot_noise=phot_noise, **vectors)
    d2 = time() - t2
    if False:
        from prospect.likelihood import write_log
        write_log(theta, lnp_prior, lnp_spec, lnp_phot, d1, d2)

    #if (lnp_prior + lnp_phot + lnp_spec) > 0:
    #    print('Total probability is positive!!', lnp_prior, lnp_phot)
    #    pdb.set_trace()

    return lnp_prior + lnp_phot + lnp_spec