Python numpy 模块,square() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.square()

项目:baselines    作者:openai    | 项目源码 | 文件源码
def mpi_moments(x, axis=0):
    x = np.asarray(x, dtype='float64')
    newshape = list(x.shape)
    newshape.pop(axis)
    n = np.prod(newshape,dtype=int)
    totalvec = np.zeros(n*2+1, 'float64')
    addvec = np.concatenate([x.sum(axis=axis).ravel(), 
        np.square(x).sum(axis=axis).ravel(), 
        np.array([x.shape[axis]],dtype='float64')])
    MPI.COMM_WORLD.Allreduce(addvec, totalvec, op=MPI.SUM)
    sum = totalvec[:n]
    sumsq = totalvec[n:2*n]
    count = totalvec[2*n]
    if count == 0:
        mean = np.empty(newshape); mean[:] = np.nan
        std = np.empty(newshape); std[:] = np.nan
    else:
        mean = sum/count
        std = np.sqrt(np.maximum(sumsq/count - np.square(mean),0))
    return mean, std, count
项目:distributional_perspective_on_RL    作者:Kiwoo    | 项目源码 | 文件源码
def batchnorm(x, name, phase, updates, gamma=0.96):
    k = x.get_shape()[1]
    runningmean = tf.get_variable(name+"/mean", shape=[1, k], initializer=tf.constant_initializer(0.0), trainable=False)
    runningvar = tf.get_variable(name+"/var", shape=[1, k], initializer=tf.constant_initializer(1e-4), trainable=False)
    testy = (x - runningmean) / tf.sqrt(runningvar)

    mean_ = mean(x, axis=0, keepdims=True)
    var_ = mean(tf.square(x), axis=0, keepdims=True)
    std = tf.sqrt(var_)
    trainy = (x - mean_) / std

    updates.extend([
        tf.assign(runningmean, runningmean * gamma + mean_ * (1 - gamma)),
        tf.assign(runningvar, runningvar * gamma + var_ * (1 - gamma))
    ])

    y = switch(phase, trainy, testy)

    out = y * tf.get_variable(name+"/scaling", shape=[1, k], initializer=tf.constant_initializer(1.0), trainable=True)\
            + tf.get_variable(name+"/translation", shape=[1,k], initializer=tf.constant_initializer(0.0), trainable=True)
    return out

# ================================================================
# Mathematical utils
# ================================================================
项目:distributional_perspective_on_RL    作者:Kiwoo    | 项目源码 | 文件源码
def huber_loss(x, delta=1.0):
    """Reference: https://en.wikipedia.org/wiki/Huber_loss"""
    return tf.where(
        tf.abs(x) < delta,
        tf.square(x) * 0.5,
        delta * (tf.abs(x) - 0.5 * delta)
    )

# ================================================================
# Basic Stuff
# ================================================================

# ================================================================
# Theano-like Function
# ================================================================

# ================================================================
# Optimizer utils
# ================================================================
项目:cellranger    作者:10XGenomics    | 项目源码 | 文件源码
def get_normalized_dispersion(mat_mean, mat_var, nbins=20):
    mat_disp = (mat_var - mat_mean) / np.square(mat_mean)

    quantiles = np.percentile(mat_mean, np.arange(0, 100, 100 / nbins))
    quantiles = np.append(quantiles, mat_mean.max())

    # merge bins with no difference in value
    quantiles = np.unique(quantiles)

    if len(quantiles) <= 1:
        # pathological case: the means are all identical. just return raw dispersion.
        return mat_disp

    # calc median dispersion per bin
    (disp_meds, _, disp_bins) = scipy.stats.binned_statistic(mat_mean, mat_disp, statistic='median', bins=quantiles)

    # calc median absolute deviation of dispersion per bin
    disp_meds_arr = disp_meds[disp_bins-1] # 0th bin is empty since our quantiles start from 0
    disp_abs_dev = abs(mat_disp - disp_meds_arr)
    (disp_mads, _, disp_bins) = scipy.stats.binned_statistic(mat_mean, disp_abs_dev, statistic='median', bins=quantiles)

    # calculate normalized dispersion
    disp_mads_arr = disp_mads[disp_bins-1]
    disp_norm = (mat_disp - disp_meds_arr) / disp_mads_arr
    return disp_norm
项目:pdnn    作者:petered    | 项目源码 | 文件源码
def temporalize(x, smoothing_steps, distance='L1'):
    """
    :param x: An (n_samples, n_dims) dataset
    :return: A (n_samples, ) array of indexes that can be used to shuffle the input for temporal smoothness.
    """
    x_flat = x.reshape(x.shape[0], -1)
    index_buffer = np.arange(1, smoothing_steps+1)
    next_sample_buffer = x_flat[1:smoothing_steps+1].copy()
    # Technically, we could do this without a next_sample_buffer (and only an index_buffer), but it would require
    # repeatedly accessing a bunch of really scattered memory, so we do it this way.
    shuffling_indices = np.zeros(len(x), dtype=int)
    rectifier = np.abs if distance=='L1' else np.square if distance=='L2' else bad_value(distance)
    p=ProgressIndicator(len(x), name = 'Temporalize')
    current_index = 0
    for i in xrange(len(x)):
        shuffling_indices[i] = current_index
        closest = np.argmin(rectifier(x_flat[current_index]-next_sample_buffer).sum(axis=1))
        current_index = index_buffer[closest]
        weve_aint_done_yet = i+smoothing_steps+1 < len(x)
        next_index = i+smoothing_steps+1
        next_sample_buffer[closest] = x_flat[next_index] if weve_aint_done_yet else float('inf')
        index_buffer[closest] = next_index if weve_aint_done_yet else -1
        p()
    return shuffling_indices
项目:vad    作者:bond005    | 项目源码 | 文件源码
def calculate_features_for_VAD(sound_frames, frequencies_axis, spectrogram):
    features = numpy.empty((spectrogram.shape[0], 3))
    # smooted_spectrogram, smoothed_frequencies_axis = smooth_spectrogram(spectrogram, frequencies_axis, 24)
    for time_ind in range(spectrogram.shape[0]):
        mean_spectrum = spectrogram[time_ind].mean()
        if mean_spectrum > 0.0:
            sfm = -10.0 * math.log10(stats.gmean(spectrogram[time_ind]) / mean_spectrum)
        else:
            sfm = 0.0
        # max_freq = smoothed_frequencies_axis[smooted_spectrogram[time_ind].argmax()]
        max_freq = frequencies_axis[spectrogram[time_ind].argmax()]
        features[time_ind][0] = numpy.square(sound_frames[time_ind]).mean()
        features[time_ind][1] = sfm
        features[time_ind][2] = max_freq
    """medfilt_order = 3
    for feature_ind in range(features.shape[0]):
        features[feature_ind] = signal.medfilt(features[feature_ind], medfilt_order)"""
    return features
项目:Brainforge    作者:sakram07    | 项目源码 | 文件源码
def score(self,X_test,y_test):
        """
        returns the score on test set

        <PARAMETERS>
        X : input features of testing set (numpy array, list)
        y : values to map to of testing set (numpy array, list)

        <return type>
        returns score (int)

        """
        y_test = np.reshape(y_test,(y_test.shape[0],1))
        if self.normalize_ == True:
            X_test = self.normalize(X_test)
        X_test = self.add_bias(X_test)
        self.predict(X_test)
        u = np.square(y_test - self.predictions).sum()
        v = np.square(y_test - y_test.mean()).sum()
        self.score = 1 - u/v
        return self.score
项目:DistanceGAN    作者:sagiebenaim    | 项目源码 | 文件源码
def get_std(self, num_items, vars, expectation):

        num_pairs = 0
        std_sum = 0.0

        # If self distance computed std for top and bottom half
        if self.use_self_distance:
            for i in xrange(num_items):
                var_half_1, var_half_2 = torch.chunk(vars[i], 2, dim=2)
                std_sum += np.square(self.as_np(self.distance(var_half_1, var_half_2)) - expectation)
            return np.sqrt(std_sum / num_items)

        # Otherwise compute std for all pairs of images
        for i in xrange(num_items - 1):
            for j in xrange(i + 1, num_items):
                num_pairs += 1
                std_sum += np.square(self.as_np(self.distance(vars[i], vars[j])) - expectation)

        return np.sqrt(std_sum / num_pairs)
项目:DistanceGAN    作者:sagiebenaim    | 项目源码 | 文件源码
def get_std(self, num_items, vars, expectation):

        num_pairs = 0
        std_sum = 0.0

        # If self distance computed std for top and bottom half
        if self.use_self_distance:
            for i in xrange(num_items):
                var_half_1, var_half_2 = torch.chunk(vars[i], 2, dim=2)
                std_sum += np.square(self.as_np(self.distance(var_half_1, var_half_2)) - expectation)
            return np.sqrt(std_sum / num_items)

        # Otherwise compute std for all pairs of images
        for i in xrange(num_items - 1):
            for j in xrange(i + 1, num_items):
                num_pairs += 1
                std_sum += np.square(self.as_np(self.distance(vars[i], vars[j])) - expectation)

        return np.sqrt(std_sum / num_pairs)
项目:audio_scripts    作者:audiofilter    | 项目源码 | 文件源码
def check_onset(audio):
        # Determine if there is an 'early' onset
        b_lpf, a_lpf = signal.butter(1, 200/44100.0, 'low')
        sim = numpy.square(audio)
        audio_lpf   = signal.filtfilt(b_lpf,a_lpf,sim)
        thres = 0.015

        invert = 0
        found = 0
        for i in xrange(len(audio)):
                if ((audio_lpf[i] > thres) and not found):
                        if (i < 4000):
                                print "Detected onset for ",fil," at time ",i/44.1," msecs "
                                found = 1
                                invert = 1
        return invert
项目:audio_scripts    作者:audiofilter    | 项目源码 | 文件源码
def check_onset(audio):
        # Determine if there is an 'early' onset
        b_lpf, a_lpf = signal.butter(1, 200/44100.0, 'low')
        sim = numpy.square(audio)
        audio_lpf   = signal.filtfilt(b_lpf,a_lpf,sim)
        thres = 0.015

        invert = 0
        found = 0
        for i in xrange(len(audio)):
                if ((audio_lpf[i] > thres) and not found):
                        if (i < 4000):
                                print "Detected onset for ",fil," at time ",i/44.1," msecs "
                                found = 1
                                invert = 1
        return invert
项目:untwist    作者:IoSR-Surrey    | 项目源码 | 文件源码
def process(self, wave):
        wave.check_mono()
        if wave.sample_rate != self.sr:
            raise Exception("Wrong sample rate")                              
        n = int(np.ceil(2 * wave.num_frames / float(self.w_len)))
        m = (n + 1) * self.w_len / 2 
        swindow = self.make_signal_window(n)
        win_ratios = [self.window / swindow[t * self.w_len / 2 : 
            t * self.w_len / 2 + self.w_len] 
            for t in range(n)]
        wave = wave.zero_pad(0, int(m - wave.num_frames))
        wave = audio.Wave(signal.hilbert(wave), wave.sample_rate)        
        result = np.zeros((self.n_bins, n))

        for b in range(self.n_bins): 
            w = self.widths[b]
            wc = 1 / np.square(w + 1)
            filter = self.filters[b]
            band = fftfilt(filter, wave.zero_pad(0, int(2 * w))[:,0])
            band = band[int(w) : int(w + m), np.newaxis]    
            for t in range(n):
                frame = band[t * self.w_len / 2:
                             t * self.w_len / 2 + self.w_len,:] * win_ratios[t]
                result[b, t] =  wc * np.real(np.conj(np.dot(frame.conj().T, frame)))
        return audio.Spectrogram(result, self.sr, self.w_len, self.w_len / 2)
项目:gym-extensions    作者:Breakend    | 项目源码 | 文件源码
def _step(self, a):
        pos_before = mass_center(self.model)
        self.do_simulation(a, self.frame_skip)
        pos_after = mass_center(self.model)

        pos_after_standup =  self.model.data.qpos[2][0]

        down = bool(( self.model.data.qpos[2] < 1.0) or ( self.model.data.qpos[2] > 2.0))

        alive_bonus = 5.0 if not down else 1.0

        data = self.model.data

        uph_cost = (pos_after_standup - 0) / self.model.opt.timestep
        lin_vel_cost = 0.25 * (pos_after - pos_before) / self.model.opt.timestep

        quad_ctrl_cost = 0.1 * np.square(data.ctrl).sum()
        quad_impact_cost = .5e-6 * np.square(data.cfrc_ext).sum()
        quad_impact_cost = min(quad_impact_cost, 10)

        reward = lin_vel_cost + uph_cost - quad_ctrl_cost - quad_impact_cost + alive_bonus
        qpos = self.model.data.qpos

        done = bool(False)
        return self._get_obs(), reward, done, dict(reward_linup=uph_cost, reward_quadctrl=-quad_ctrl_cost, reward_impact=-quad_impact_cost, reward_alive=alive_bonus)
项目:iutils    作者:inconvergent    | 项目源码 | 文件源码
def sandstroke_non_linear(self,xys,grains=10,left=True):

    pix = self.pix
    rectangle = self.ctx.rectangle
    fill = self.ctx.fill

    dx = xys[:,2] - xys[:,0]
    dy = xys[:,3] - xys[:,1]

    aa = arctan2(dy,dx)
    directions = column_stack([cos(aa),sin(aa)])

    dd = sqrt(square(dx)+square(dy))

    for i,d in enumerate(dd):
      rnd = sqrt(random((grains,1)))
      if left:
        rnd = 1.0-rnd

      for x,y in xys[i,:2] + directions[i,:]*rnd*d:
        rectangle(x,y,pix,pix)
        fill()
项目:iutils    作者:inconvergent    | 项目源码 | 文件源码
def sandstroke(self,xys,grains=10):

    pix = self.pix
    rectangle = self.ctx.rectangle
    fill = self.ctx.fill

    dx = xys[:,2] - xys[:,0]
    dy = xys[:,3] - xys[:,1]

    aa = arctan2(dy,dx)
    directions = column_stack([cos(aa),sin(aa)])

    dd = sqrt(square(dx)+square(dy))

    for i,d in enumerate(dd):
      for x,y in xys[i,:2] + directions[i,:]*random((grains,1))*d:
        rectangle(x,y,pix,pix)
        fill()
项目:geco_data    作者:stefco    | 项目源码 | 文件源码
def __init__(self, channel, timeseries=None, stats=None, n=None):
        """If timeseries is None, then we are manually initializing with custom
        stat values. Otherwise, calculate stat values from the timeseries.
        Input is assumed to be a GWpy timeseries."""
        self.channel = channel
        if timeseries is None:
            self.n = n
            self.stats = stats
        else:
            self.n = 1
            # wrap the end of the second onto the start of that same second.
            # obviously this is only okay with quasi periodic signals!
            zero_crossing = np.concatenate((timeseries.value[DUOTONE_START:],
                                            timeseries.value[:DUOTONE_END]))
            self.stats = {
                "sum": zero_crossing,
                "sum_sq": np.square(zero_crossing),
                "mean": zero_crossing,
                "sigma": zero_crossing * 0., # i.e. zeros
                "max": zero_crossing,
                "min": zero_crossing
            }
项目:snn4hrl    作者:florensacc    | 项目源码 | 文件源码
def step(self, action):
        self.forward_dynamics(action)
        next_obs = self.get_current_obs()
        lb, ub = self.action_bounds
        scaling = (ub - lb) * 0.5
        ctrl_cost = 0.5 * self.ctrl_cost_coeff * np.sum(
            np.square(action / scaling))
        forward_reward = np.linalg.norm(self.get_body_comvel("torso"))  # swimmer has no problem of jumping reward
        reward = forward_reward - ctrl_cost
        done = False
        if self.sparse_rew:
            if abs(self.get_body_com("torso")[0]) > 100.0:
                reward = 1.0
                done = True
            else:
                reward = 0.
        com = np.concatenate([self.get_body_com("torso").flat]).reshape(-1)
        ori = self.get_ori()
        return Step(next_obs, reward, done, com=com, ori=ori)
项目:snn4hrl    作者:florensacc    | 项目源码 | 文件源码
def step(self, action):
        self.forward_dynamics(action)
        next_obs = self.get_current_obs()
        lb, ub = self.action_bounds
        scaling = (ub - lb) * 0.5
        ctrl_cost = 0.5 * self.ctrl_cost_coeff * np.sum(
            np.square(action / scaling))
        forward_reward = np.linalg.norm(self.get_body_comvel("torso"))  # swimmer has no problem of jumping reward
        reward = forward_reward - ctrl_cost
        done = False
        if self.sparse_rew:
            if abs(self.get_body_com("torso")[0]) > 100.0:
                reward = 1.0
                done = True
            else:
                reward = 0.
        com = np.concatenate([self.get_body_com("torso").flat]).reshape(-1)
        ori = self.get_ori()
        return Step(next_obs, reward, done, com=com, ori=ori)
项目:snn4hrl    作者:florensacc    | 项目源码 | 文件源码
def step(self, action):
        self.forward_dynamics(action)
        next_obs = self.get_current_obs()
        lb, ub = self.action_bounds
        scaling = (ub - lb) * 0.5
        ctrl_cost = 0.5 * self.ctrl_cost_coeff * np.sum(
            np.square(action / scaling))
        forward_reward = np.linalg.norm(self.get_body_comvel("torso"))  # swimmer has no problem of jumping reward
        reward = forward_reward - ctrl_cost
        done = False
        if self.sparse_rew:
            if abs(self.get_body_com("torso")[0]) > 100.0:
                reward = 1.0
                done = True
            else:
                reward = 0.
        com = np.concatenate([self.get_body_com("torso").flat]).reshape(-1)
        ori = self.get_ori()
        return Step(next_obs, reward, done, com=com, ori=ori)
项目:snn4hrl    作者:florensacc    | 项目源码 | 文件源码
def step(self, action):
        self.forward_dynamics(action)
        comvel = self.get_body_comvel("torso")
        forward_reward = comvel[0]
        lb, ub = self.action_bounds
        scaling = (ub - lb) * 0.5
        ctrl_cost = 0.5 * 1e-2 * np.sum(np.square(action / scaling))
        contact_cost = 0.5 * 1e-3 * np.sum(
            np.square(np.clip(self.model.data.cfrc_ext, -1, 1))),
        survive_reward = 0.05
        reward = forward_reward - ctrl_cost - contact_cost + survive_reward
        state = self._state
        notdone = np.isfinite(state).all() \
            and state[2] >= 0.2 and state[2] <= 1.0
        done = not notdone
        ob = self.get_current_obs()
        return Step(ob, float(reward), done)
项目:psp    作者:cmap    | 项目源码 | 文件源码
def distance_function(values, medians):
    """This function calculates the distance metric.
    N.B. Only uses the non-NaN values.

    dist = sum( (s - m)^2 )

    s is the vector of sample values
    m is the vector of probe medians

    Args:
        values (numpy array of floats)
        medians (numpy array of floats)
    Returns:
        dist (float)
    """
    non_nan_idx = ~np.isnan(values)
    assert np.size(non_nan_idx) != 0, "All values in this sample are NaN!"

    non_nan_values = values[non_nan_idx]
    non_nan_medians = medians[non_nan_idx]
    dist = sum(np.square(non_nan_values - non_nan_medians))
    return dist

# tested #
项目:third_person_im    作者:bstadie    | 项目源码 | 文件源码
def step(self, action):
        self.forward_dynamics(action)
        comvel = self.get_body_comvel("torso")
        forward_reward = comvel[0]
        lb, ub = self.action_bounds
        scaling = (ub - lb) * 0.5
        ctrl_cost = 0.5 * 1e-2 * np.sum(np.square(action / scaling))
        contact_cost = 0.5 * 1e-3 * np.sum(
            np.square(np.clip(self.model.data.cfrc_ext, -1, 1))),
        survive_reward = 0.05
        reward = forward_reward - ctrl_cost - contact_cost + survive_reward
        state = self._state
        notdone = np.isfinite(state).all() \
            and state[2] >= 0.2 and state[2] <= 1.0
        done = not notdone
        ob = self.get_current_obs()
        return Step(ob, float(reward), done)
项目:third_person_im    作者:bstadie    | 项目源码 | 文件源码
def step(self, action):
        self.forward_dynamics(action)
        next_obs = self.get_current_obs()

        alive_bonus = self.alive_bonus
        data = self.model.data

        comvel = self.get_body_comvel("torso")

        lin_vel_reward = comvel[0]
        lb, ub = self.action_bounds
        scaling = (ub - lb) * 0.5
        ctrl_cost = .5 * self.ctrl_cost_coeff * np.sum(
            np.square(action / scaling))
        impact_cost = .5 * self.impact_cost_coeff * np.sum(
            np.square(np.clip(data.cfrc_ext, -1, 1)))
        vel_deviation_cost = 0.5 * self.vel_deviation_cost_coeff * np.sum(
            np.square(comvel[1:]))
        reward = lin_vel_reward + alive_bonus - ctrl_cost - \
            impact_cost - vel_deviation_cost
        done = data.qpos[2] < 0.8 or data.qpos[2] > 2.0

        return Step(next_obs, reward, done)
项目:third_person_im    作者:bstadie    | 项目源码 | 文件源码
def kl_sym(self, old_dist_info_vars, new_dist_info_vars):
        old_means = old_dist_info_vars["mean"]
        old_log_stds = old_dist_info_vars["log_std"]
        new_means = new_dist_info_vars["mean"]
        new_log_stds = new_dist_info_vars["log_std"]
        """
        Compute the KL divergence of two multivariate Gaussian distribution with
        diagonal covariance matrices
        """
        old_std = TT.exp(old_log_stds)
        new_std = TT.exp(new_log_stds)
        # means: (N*A)
        # std: (N*A)
        # formula:
        # { (\mu_1 - \mu_2)^2 + \sigma_1^2 - \sigma_2^2 } / (2\sigma_2^2) +
        # ln(\sigma_2/\sigma_1)
        numerator = TT.square(old_means - new_means) + \
                    TT.square(old_std) - TT.square(new_std)
        denominator = 2 * TT.square(new_std) + 1e-8
        return TT.sum(
            numerator / denominator + new_log_stds - old_log_stds, axis=-1)
项目:third_person_im    作者:bstadie    | 项目源码 | 文件源码
def kl(self, old_dist_info, new_dist_info):
        old_means = old_dist_info["mean"]
        old_log_stds = old_dist_info["log_std"]
        new_means = new_dist_info["mean"]
        new_log_stds = new_dist_info["log_std"]
        """
        Compute the KL divergence of two multivariate Gaussian distribution with
        diagonal covariance matrices
        """
        old_std = np.exp(old_log_stds)
        new_std = np.exp(new_log_stds)
        # means: (N*A)
        # std: (N*A)
        # formula:
        # { (\mu_1 - \mu_2)^2 + \sigma_1^2 - \sigma_2^2 } / (2\sigma_2^2) +
        # ln(\sigma_2/\sigma_1)
        numerator = np.square(old_means - new_means) + \
                    np.square(old_std) - np.square(new_std)
        denominator = 2 * np.square(new_std) + 1e-8
        return np.sum(
            numerator / denominator + new_log_stds - old_log_stds, axis=-1)
        # more lossy version
        # return TT.sum(
        #     numerator / denominator + TT.log(new_std) - TT.log(old_std ), axis=-1)
项目:third_person_im    作者:bstadie    | 项目源码 | 文件源码
def kl_sym(self, old_dist_info_vars, new_dist_info_vars):
        old_means = old_dist_info_vars["mean"]
        old_log_stds = old_dist_info_vars["log_std"]
        new_means = new_dist_info_vars["mean"]
        new_log_stds = new_dist_info_vars["log_std"]
        """
        Compute the KL divergence of two multivariate Gaussian distribution with
        diagonal covariance matrices
        """
        old_std = tf.exp(old_log_stds)
        new_std = tf.exp(new_log_stds)
        # means: (N*A)
        # std: (N*A)
        # formula:
        # { (\mu_1 - \mu_2)^2 + \sigma_1^2 - \sigma_2^2 } / (2\sigma_2^2) +
        # ln(\sigma_2/\sigma_1)
        numerator = tf.square(old_means - new_means) + \
                    tf.square(old_std) - tf.square(new_std)
        denominator = 2 * tf.square(new_std) + 1e-8
        return tf.reduce_sum(
            numerator / denominator + new_log_stds - old_log_stds, reduction_indices=-1)
项目:chemblnet    作者:jaak-s    | 项目源码 | 文件源码
def test_pmean(self):
        X = np.random.randn(10, 2, 3)
        pm = PosteriorMean()
        pm.addSample(X[0,:], average = False)
        pm.addSample(X[1,:], average = False)
        pm.addSample(X[2,:], average = True)
        pm.addSample(X[3,:], average = True)
        self.assertTrue(np.allclose(pm.sample_avg, X[2:4, :].mean(0)))
        self.assertTrue(pm.n == 2)

        for i in range(4, X.shape[0]):
            pm.addSample(X[i,:], average = True)
        self.assertTrue(np.allclose(pm.sample_avg, X[2:, :].mean(0)))
        self.assertTrue(pm.n == 8)

        Xvar      = pm.getVar()
        Xsub      = X[2:, :]
        Xvar_true = np.square((Xsub - Xsub.mean(0))).sum(0) / (Xsub.shape[0] - 1)
        self.assertTrue(np.allclose(Xvar, Xvar_true))
项目:baselines    作者:openai    | 项目源码 | 文件源码
def update(self, x):
        batch_mean = np.mean(x, axis=0)
        batch_var = np.var(x, axis=0)
        batch_count = x.shape[0]

        delta = batch_mean - self.mean
        tot_count = self.count + batch_count

        new_mean = self.mean + delta * batch_count / tot_count        
        m_a = self.var * (self.count)
        m_b = batch_var * (batch_count)
        M2 = m_a + m_b + np.square(delta) * self.count * batch_count / (self.count + batch_count)
        new_var = M2 / (self.count + batch_count)

        new_count = batch_count + self.count

        self.mean = new_mean
        self.var = new_var
        self.count = new_count
项目:baselines    作者:openai    | 项目源码 | 文件源码
def __init__(self, epsilon=1e-2, shape=()):

        self._sum = tf.get_variable(
            dtype=tf.float64,
            shape=shape,
            initializer=tf.constant_initializer(0.0),
            name="runningsum", trainable=False)
        self._sumsq = tf.get_variable(
            dtype=tf.float64,
            shape=shape,
            initializer=tf.constant_initializer(epsilon),
            name="runningsumsq", trainable=False)
        self._count = tf.get_variable(
            dtype=tf.float64,
            shape=(),
            initializer=tf.constant_initializer(epsilon),
            name="count", trainable=False)
        self.shape = shape

        self.mean = tf.to_float(self._sum / self._count)
        self.std = tf.sqrt( tf.maximum( tf.to_float(self._sumsq / self._count) - tf.square(self.mean) , 1e-2 ))

        newsum = tf.placeholder(shape=self.shape, dtype=tf.float64, name='sum')
        newsumsq = tf.placeholder(shape=self.shape, dtype=tf.float64, name='var')
        newcount = tf.placeholder(shape=[], dtype=tf.float64, name='count')
        self.incfiltparams = U.function([newsum, newsumsq, newcount], [],
            updates=[tf.assign_add(self._sum, newsum),
                     tf.assign_add(self._sumsq, newsumsq),
                     tf.assign_add(self._count, newcount)])
项目:baselines    作者:openai    | 项目源码 | 文件源码
def update(self, x):
        batch_mean = np.mean(x, axis=0)
        batch_var = np.var(x, axis=0)
        batch_count = x.shape[0]

        delta = batch_mean - self.mean
        tot_count = self.count + batch_count

        new_mean = self.mean + delta * batch_count / tot_count        
        m_a = self.var * (self.count)
        m_b = batch_var * (batch_count)
        M2 = m_a + m_b + np.square(delta) * self.count * batch_count / (self.count + batch_count)
        new_var = M2 / (self.count + batch_count)

        new_count = batch_count + self.count

        self.mean = new_mean
        self.var = new_var
        self.count = new_count
项目:rl_algorithms    作者:DanielTakeshi    | 项目源码 | 文件源码
def __init__(self, session, ob_dim=None, n_epochs=10, stepsize=1e-3):
        """ 
        They provide us with an ob_dim in the code so I assume we can use it;
        makes it easy to define the layers anyway. This gets constructed upon
        initialization so future calls to self.fit should remember this. I
        actually use the pre-processed version, though.
        """
        self.n_epochs    = n_epochs
        self.lrate       = stepsize
        self.sy_ytarg    = tf.placeholder(shape=[None], name="nnvf_y", dtype=tf.float32)
        self.sy_ob_no    = tf.placeholder(shape=[None, ob_dim+1], name="nnvf_ob", dtype=tf.float32)
        self.sy_h1       = utils.lrelu(utils.dense(self.sy_ob_no, 32, "nnvf_h1", weight_init=utils.normc_initializer(1.0)), leak=0.0)
        self.sy_h2       = utils.lrelu(utils.dense(self.sy_h1, 32, "nnvf_h2", weight_init=utils.normc_initializer(1.0)), leak=0.0)
        self.sy_final_n  = utils.dense(self.sy_h2, 1, "nnvf_final", weight_init=utils.normc_initializer(1.0))
        self.sy_ypred    = tf.reshape(self.sy_final_n, [-1])
        self.sy_l2_error = tf.reduce_mean(tf.square(self.sy_ypred - self.sy_ytarg))
        self.fit_op      = tf.train.AdamOptimizer(stepsize).minimize(self.sy_l2_error)
        self.sess = session
项目:prince    作者:MaxHalford    | 项目源码 | 文件源码
def row_cosine_similarities(self):
        """The squared row cosine similarities.

        The row cosine similarities are obtained by calculating the cosine of the angle shaped by
        the row principal coordinates and the row principal components. This is calculated by
        squaring each row projection coordinate and dividing each squared coordinate by the sum of
        the squared coordinates, which results in a ratio comprised between 0 and 1 representing the
        squared cosine.

        Returns:
            pandas.DataFrame: A dataframe of shape (`n`, `k`) containing the squared row cosine
            similarities.
        """
        squared_coordinates = np.square(self.row_principal_coordinates)
        total_squares = squared_coordinates.sum(axis='columns')
        return squared_coordinates.div(total_squares, axis='rows')
项目:prince    作者:MaxHalford    | 项目源码 | 文件源码
def column_cosine_similarities(self):
        """The squared column cosine similarities.

        The column cosine similarities are obtained by calculating the cosine of the angle shaped by
        the column principal coordinates and the column principal components. This is calculated by
        squaring each column projection coordinate and dividing each squared coordinate by the sum
        of the squared coordinates, which results in a ratio comprised between 0 and 1 representing
        the squared cosine.

        Returns:
            pandas.DataFrame: A dataframe of shape (`p`, `k`) containing the squared row cosine
            similarities.
        """
        squared_column_pc = np.square(self.column_principal_coordinates)
        total_squares = squared_column_pc.sum(axis='rows')
        return squared_column_pc.div(total_squares, axis='columns')
项目:prince    作者:MaxHalford    | 项目源码 | 文件源码
def row_cosine_similarities(self):
        """The squared row cosine similarities.

        The row cosine similarities are obtained by calculating the cosine of the angle shaped by
        the row principal coordinates and the row principal components. This is calculated by
        squaring each row projection coordinate and dividing each squared coordinate by the sum of
        the squared coordinates, which results in a ratio comprised between 0 and 1 representing the
        squared cosine.

        Returns:
            pandas.DataFrame: A dataframe of shape (`n`, `k`) containing the squared row cosine
            similarities.
        """
        squared_coordinates = np.square(self.row_principal_coordinates)
        total_squares = squared_coordinates.sum(axis='columns')
        return squared_coordinates.div(total_squares, axis='rows')
项目:EZClimate    作者:Litterman    | 项目源码 | 文件源码
def _tipping_point_update(self, tmp, consump, peak_temp_interval=30.0):
        """Determine whether a tipping point has occurred, if so reduce consumption for 
        all periods after this date.
        """
        draws = tmp.shape[0]
        disaster = self._disaster_simulation()
        disaster_cons = self._disaster_cons_simulation()
        period_lengths = self.tree.decision_times[1:] - self.tree.decision_times[:-1]

        tmp_scale = np.maximum(self.peak_temp, tmp)
        ave_prob_of_survival = 1.0 - np.square(tmp / tmp_scale) 
        prob_of_survival = ave_prob_of_survival**(period_lengths / peak_temp_interval)
        # this part may be done better, this takes a long time to loop over
        res = prob_of_survival < disaster
        rows, cols = np.nonzero(res)
        row, count = np.unique(rows, return_counts=True)
        first_occurance = zip(row, cols[np.insert(count.cumsum()[:-1],0,0)])
        for pos in first_occurance:
            consump[pos[0], pos[1]:] *= np.exp(-disaster_cons[pos[0]])
        return consump
项目:microbiome-summer-school-2017    作者:aldro61    | 项目源码 | 文件源码
def compute_P(max_string_length, sigma_position):
    """
    P is a matrix that contains all possible position
    uncertainty values. This function pre-compute all
    possible values since those values are independant of
    the amino acids sequence. 
    """

    P = np.zeros((max_string_length, max_string_length))

    for i in xrange(max_string_length):
        for j in xrange(max_string_length):
            P[i,j] = i-j

    P = np.square(P)
    P /= -2.0 * (sigma_position ** 2.0)
    P = np.exp(P)

    return P
项目:microbiome-summer-school-2017    作者:aldro61    | 项目源码 | 文件源码
def compute_psi_dict(amino_acids, aa_descriptors):
    """
    This function pre-compute the square Euclidean distance
    between all amino acids descriptors and stock the distance
    in an hash table for easy and fast access during the
    GS kernel computation.

    amino_acids -- List of all amino acids in aa_descriptors

    aa_descriptors -- The i-th row of this matrix contain the
        descriptors of the i-th amino acid of amino_acids list.
    """

    # For every amino acids couple (a_1, a_2) psiDict is a hash table
    # that contain the squared Euclidean distance between the descriptors
    # of a_1 and a_2
    psiDict = {}

    # Fill the hash table psiDict
    for i in xrange(len(amino_acids)):
        for j in xrange(len(amino_acids)):
            c = aa_descriptors[i] - aa_descriptors[j]
            psiDict[amino_acids[i], amino_acids[j]] = np.dot(c,c)

    return psiDict
项目:ngraph    作者:NervanaSystems    | 项目源码 | 文件源码
def __call__(self, input_data, weights):
        '''
        input_data in this case is a numpy array with batch_size on axis 1
        and weights is a matrix with 1 column
        '''
        if self.state is None:
            self.state = np.ones_like(weights)

        if self.velocity is None:
            self.velocity = np.zeros_like(weights)

        gradient = - input_data.mean(axis=1)

        self.state[:] = self.decay_rate * self.state + \
            (1.0 - self.decay_rate) * np.square(gradient)

        self.velocity = self.velocity * self.momentum + \
            self.learning_rate * gradient / np.sqrt(self.state + self.epsilon) + \
            self.learning_rate * self.wdecay * weights
        weights[:] = weights - self.velocity

        return weights
项目:ngraph    作者:NervanaSystems    | 项目源码 | 文件源码
def __call__(self, input_data, weights):
        '''
        input_data in this case is a numpy array with batch_size on axis 1
        and weights is a matrix with 1 column
        '''
        if self.state is None:
            self.state = np.zeros_like(weights)

        gradient = - input_data.mean(axis=1)

        self.state[:] = self.state + np.square(gradient)

        weights[:] = weights \
            - gradient * self.learning_rate / (np.sqrt(self.state + self.epsilon))

        return weights
项目:tensorpac    作者:EtienneCmb    | 项目源码 | 文件源码
def n_even_fcn(f, o, w, l):
    """Even case."""
    # Variables :
    k = np.array(range(0, int(l) + 1, 1)) + 0.5
    b = np.zeros(k.shape)

    # # Run Loop :
    for s in range(0, len(f), 2):
        m = (o[s + 1] - o[s]) / (f[s + 1] - f[s])
        b1 = o[s] - m * f[s]
        b = b + (m / (4 * np.pi * np.pi) * (np.cos(2 * np.pi * k * f[
            s + 1]) - np.cos(2 * np.pi * k * f[s])) / (
            k * k)) * abs(np.square(w[round((s + 1) / 2)]))
        b = b + (f[s + 1] * (m * f[s + 1] + b1) * np.sinc(2 * k * f[
            s + 1]) - f[s] * (m * f[s] + b1) * np.sinc(2 * k * f[s])) * abs(
            np.square(w[round((s + 1) / 2)]))

    a = (np.square(w[0])) * 4 * b
    h = 0.5 * np.concatenate((np.flipud(a), a))

    return h
项目:brainpipe    作者:EtienneCmb    | 项目源码 | 文件源码
def NevenFcn(F, M, W, L):  # N is even
    # Variables :
    k = np.array(range(0, int(L) + 1, 1)) + 0.5
    b = np.zeros(k.shape)

    # # Run Loop :
    for s in range(0, len(F), 2):
        m = (M[s + 1] - M[s]) / (F[s + 1] - F[s])
        b1 = M[s] - m * F[s]
        b = b + (m / (4 * np.pi * np.pi) * (np.cos(2 * np.pi * k * F[
            s + 1]) - np.cos(2 * np.pi * k * F[s])) / (
            k * k)) * abs(np.square(W[round((s + 1) / 2)]))
        b = b + (F[s + 1] * (m * F[s + 1] + b1) * np.sinc(2 * k * F[
          s + 1]) - F[s] * (m * F[s] + b1) * np.sinc(2 * k * F[s])) * abs(
            np.square(W[round((s + 1) / 2)]))

    a = (np.square(W[0])) * 4 * b
    h = 0.5 * np.concatenate((np.flipud(a), a))

    return h


####################################################################
# - Filt the signal :
####################################################################
项目:brainpipe    作者:EtienneCmb    | 项目源码 | 文件源码
def morlet(x, Fs, f, wavelet_width=7):
    dt = 1/Fs
    sf = f/wavelet_width
    st = 1/(2*np.pi*sf)
    N, nepoch = x.shape

    t = np.arange(-3.5*st, 3.5*st, dt)

    A = 1/(st*np.sqrt(np.pi))**(1/2)
    m = A*np.exp(-np.square(t)/(2*st**2))*np.exp(1j*2*np.pi*f*t)

    xMorlet = np.zeros((N, nepoch))
    for k in range(0, nepoch):
        y = 2*np.abs(np.convolve(x[:, k], m))/Fs
        xMorlet[:, k] = y[int(np.ceil(len(m)/2))-1:int(len(y)-np.floor(
            len(m)/2))]

    return xMorlet
项目:Python-Machine-Learning-Cookbook    作者:PacktPublishing    | 项目源码 | 文件源码
def euclidean_score(dataset, user1, user2):
    if user1 not in dataset:
        raise TypeError('User ' + user1 + ' not present in the dataset')

    if user2 not in dataset:
        raise TypeError('User ' + user2 + ' not present in the dataset')

    # Movies rated by both user1 and user2
    rated_by_both = {} 

    for item in dataset[user1]:
        if item in dataset[user2]:
            rated_by_both[item] = 1

    # If there are no common movies, the score is 0 
    if len(rated_by_both) == 0:
        return 0

    squared_differences = [] 

    for item in dataset[user1]:
        if item in dataset[user2]:
            squared_differences.append(np.square(dataset[user1][item] - dataset[user2][item]))

    return 1 / (1 + np.sqrt(np.sum(squared_differences)))
项目:multiagent-particle-envs    作者:openai    | 项目源码 | 文件源码
def agent_reward(self, agent, world):
        # Agents are negatively rewarded if caught by adversaries
        rew = 0
        shape = False
        adversaries = self.adversaries(world)
        if shape:  # reward can optionally be shaped (increased reward for increased distance from adversary)
            for adv in adversaries:
                rew += 0.1 * np.sqrt(np.sum(np.square(agent.state.p_pos - adv.state.p_pos)))
        if agent.collide:
            for a in adversaries:
                if self.is_collision(a, agent):
                    rew -= 10

        # agents are penalized for exiting the screen, so that they can be caught by the adversaries
        def bound(x):
            if x < 0.9:
                return 0
            if x < 1.0:
                return (x - 0.9) * 10
            return min(np.exp(2 * x - 2), 10)
        for p in range(world.dim_p):
            x = abs(agent.state.p_pos[p])
            rew -= bound(x)

        return rew
项目:multiagent-particle-envs    作者:openai    | 项目源码 | 文件源码
def benchmark_data(self, agent, world):
        rew = 0
        collisions = 0
        occupied_landmarks = 0
        min_dists = 0
        for l in world.landmarks:
            dists = [np.sqrt(np.sum(np.square(a.state.p_pos - l.state.p_pos))) for a in world.agents]
            min_dists += min(dists)
            rew -= min(dists)
            if min(dists) < 0.1:
                occupied_landmarks += 1
        if agent.collide:
            for a in world.agents:
                if self.is_collision(a, agent):
                    rew -= 1
                    collisions += 1
        return (rew, collisions, min_dists, occupied_landmarks)
项目:multiagent-particle-envs    作者:openai    | 项目源码 | 文件源码
def get_collision_force(self, entity_a, entity_b):
        if (not entity_a.collide) or (not entity_b.collide):
            return [None, None] # not a collider
        if (entity_a is entity_b):
            return [None, None] # don't collide against itself
        # compute actual distance between entities
        delta_pos = entity_a.state.p_pos - entity_b.state.p_pos
        dist = np.sqrt(np.sum(np.square(delta_pos)))
        # minimum allowable distance
        dist_min = entity_a.size + entity_b.size
        # softmax penetration
        k = self.contact_margin
        penetration = np.logaddexp(0, -(dist - dist_min)/k)*k
        force = self.contact_force * delta_pos / dist * penetration
        force_a = +force if entity_a.movable else None
        force_b = -force if entity_b.movable else None
        return [force_a, force_b]
项目:pyflux    作者:RJT1990    | 项目源码 | 文件源码
def squared_loss(data, predictions):
        """ Calculates squared loss

        Parameters
        ----------
        data : np.ndarray
            Univariate data

        predictions : np.ndarray
            Univariate predictions

        Returns
        ----------
        - np.ndarray of the squared loss
        """
        return np.square(data-predictions)
项目:rllabplusplus    作者:shaneshixiang    | 项目源码 | 文件源码
def step(self, action):
        self.forward_dynamics(action)
        comvel = self.get_body_comvel("torso")
        forward_reward = comvel[0]
        lb, ub = self.action_bounds
        scaling = (ub - lb) * 0.5
        ctrl_cost = 0.5 * 1e-2 * np.sum(np.square(action / scaling))
        contact_cost = 0.5 * 1e-3 * np.sum(
            np.square(np.clip(self.model.data.cfrc_ext, -1, 1))),
        survive_reward = 0.05
        reward = forward_reward - ctrl_cost - contact_cost + survive_reward
        state = self._state
        notdone = np.isfinite(state).all() \
            and state[2] >= 0.2 and state[2] <= 1.0
        done = not notdone
        ob = self.get_current_obs()
        return Step(ob, float(reward), done)
项目:rllabplusplus    作者:shaneshixiang    | 项目源码 | 文件源码
def step(self, action):
        self.forward_dynamics(action)
        next_obs = self.get_current_obs()

        alive_bonus = self.alive_bonus
        data = self.model.data

        comvel = self.get_body_comvel("torso")

        lin_vel_reward = comvel[0]
        lb, ub = self.action_bounds
        scaling = (ub - lb) * 0.5
        ctrl_cost = .5 * self.ctrl_cost_coeff * np.sum(
            np.square(action / scaling))
        impact_cost = .5 * self.impact_cost_coeff * np.sum(
            np.square(np.clip(data.cfrc_ext, -1, 1)))
        vel_deviation_cost = 0.5 * self.vel_deviation_cost_coeff * np.sum(
            np.square(comvel[1:]))
        reward = lin_vel_reward + alive_bonus - ctrl_cost - \
            impact_cost - vel_deviation_cost
        done = data.qpos[2] < 0.8 or data.qpos[2] > 2.0

        return Step(next_obs, reward, done)
项目:rllabplusplus    作者:shaneshixiang    | 项目源码 | 文件源码
def kl_sym(self, old_dist_info_vars, new_dist_info_vars):
        old_means = old_dist_info_vars["mean"]
        old_log_stds = old_dist_info_vars["log_std"]
        new_means = new_dist_info_vars["mean"]
        new_log_stds = new_dist_info_vars["log_std"]
        """
        Compute the KL divergence of two multivariate Gaussian distribution with
        diagonal covariance matrices
        """
        old_std = TT.exp(old_log_stds)
        new_std = TT.exp(new_log_stds)
        # means: (N*A)
        # std: (N*A)
        # formula:
        # { (\mu_1 - \mu_2)^2 + \sigma_1^2 - \sigma_2^2 } / (2\sigma_2^2) +
        # ln(\sigma_2/\sigma_1)
        numerator = TT.square(old_means - new_means) + \
                    TT.square(old_std) - TT.square(new_std)
        denominator = 2 * TT.square(new_std) + 1e-8
        return TT.sum(
            numerator / denominator + new_log_stds - old_log_stds, axis=-1)