Python tensorflow 模块,select() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用tensorflow.select()

项目:onto-lstm    作者:pdasigi    | 项目源码 | 文件源码
def switch(condition, then_tensor, else_tensor):
    """
    Keras' implementation of switch for tensorflow uses tf.switch which accepts only scalar conditions.
    It should use tf.select instead.
    """
    if K.backend() == 'tensorflow':
        import tensorflow as tf
        condition_shape = condition.get_shape()
        input_shape = then_tensor.get_shape()
        if condition_shape[-1] != input_shape[-1] and condition_shape[-1] == 1:
            # This means the last dim is an embedding dim. Keras does not mask this dimension. But tf wants
            # the condition and the then and else tensors to be the same shape.
            condition = K.dot(tf.cast(condition, tf.float32), tf.ones((1, input_shape[-1])))
        return tf.select(tf.cast(condition, dtype=tf.bool), then_tensor, else_tensor)
    else:
        import theano.tensor as T
        return T.switch(condition, then_tensor, else_tensor)
项目:weightnorm    作者:openai    | 项目源码 | 文件源码
def sample_from_discretized_mix_logistic(l,nr_mix):
    ls = int_shape(l)
    xs = ls[:-1] + [3]
    # unpack parameters
    logit_probs = l[:, :, :, :nr_mix]
    l = tf.reshape(l[:, :, :, nr_mix:], xs + [nr_mix*3])
    # sample mixture indicator from softmax
    sel = tf.one_hot(tf.argmax(logit_probs - tf.log(-tf.log(tf.random_uniform(logit_probs.get_shape(), minval=1e-5, maxval=1. - 1e-5))), 3), depth=nr_mix, dtype=tf.float32)
    sel = tf.reshape(sel, xs[:-1] + [1,nr_mix])
    # select logistic parameters
    means = tf.reduce_sum(l[:,:,:,:,:nr_mix]*sel,4)
    log_scales = tf.maximum(tf.reduce_sum(l[:,:,:,:,nr_mix:2*nr_mix]*sel,4), -7.)
    coeffs = tf.reduce_sum(tf.nn.tanh(l[:,:,:,:,2*nr_mix:3*nr_mix])*sel,4)
    # sample from logistic & clip to interval
    # we don't actually round to the nearest 8bit value when sampling
    u = tf.random_uniform(means.get_shape(), minval=1e-5, maxval=1. - 1e-5)
    x = means + tf.exp(log_scales)*(tf.log(u) - tf.log(1. - u))
    x0 = tf.minimum(tf.maximum(x[:,:,:,0], -1.), 1.)
    x1 = tf.minimum(tf.maximum(x[:,:,:,1] + coeffs[:,:,:,0]*x0, -1.), 1.)
    x2 = tf.minimum(tf.maximum(x[:,:,:,2] + coeffs[:,:,:,1]*x0 + coeffs[:,:,:,2]*x1, -1.), 1.)
    return tf.concat(3,[tf.reshape(x0,xs[:-1]+[1]), tf.reshape(x1,xs[:-1]+[1]), tf.reshape(x2,xs[:-1]+[1])])
项目:lang2program    作者:kelvinguu    | 项目源码 | 文件源码
def broadcast(tensor, target_tensor):
    """Broadcast a tensor to match the shape of a target tensor.

    Args:
        tensor (Tensor): tensor to be tiled
        target_tensor (Tensor): tensor whose shape is to be matched
    """
    rank = lambda t: t.get_shape().ndims
    assert rank(tensor) == rank(target_tensor)  # TODO: assert that tensors have no overlapping non-unity dimensions

    orig_shape = tf.shape(tensor)
    target_shape = tf.shape(target_tensor)

    # if dim == 1, set it to target_dim
    # else, set it to 1
    tiling_factor = tf.select(tf.equal(orig_shape, 1), target_shape, tf.ones([rank(tensor)], dtype=tf.int32))
    broadcasted = tf.tile(tensor, tiling_factor)

    # Add static shape information
    broadcasted.set_shape(target_tensor.get_shape())

    return broadcasted
项目:lang2program    作者:kelvinguu    | 项目源码 | 文件源码
def change_pad_value(values, mask, pad_val):
    """Given a set of values and a pad mask, change the value of all pad entries.

    Args:
        values (Tensor): of shape [batch_size, seq_length, :, ..., :].
        mask (Tensor): binary float tensor of shape [batch_size, seq_length]
        pad_val (float): value to set all pad entries to

    Returns:
        Tensor: a new Tensor of same shape as values
    """
    # broadcast the mask to match shape of values
    mask = expand_dims_for_broadcast(mask, values)  # (batch_size, seq_length, 1, ..., 1)
    mask = broadcast(mask, values)
    mask = tf.cast(mask, tf.bool)  # cast to bool

    # broadcast val
    broadcast_val = pad_val * tf.ones(tf.shape(values))

    new_values = tf.select(mask, values, broadcast_val)
    return new_values
项目:lang2program    作者:kelvinguu    | 项目源码 | 文件源码
def broadcast(tensor, target_tensor):
    """Broadcast a tensor to match the shape of a target tensor.

    Args:
        tensor (Tensor): tensor to be tiled
        target_tensor (Tensor): tensor whose shape is to be matched
    """
    rank = lambda t: t.get_shape().ndims
    assert rank(tensor) == rank(target_tensor)  # TODO: assert that tensors have no overlapping non-unity dimensions

    orig_shape = tf.shape(tensor)
    target_shape = tf.shape(target_tensor)

    # if dim == 1, set it to target_dim
    # else, set it to 1
    tiling_factor = tf.select(tf.equal(orig_shape, 1), target_shape, tf.ones([rank(tensor)], dtype=tf.int32))
    broadcasted = tf.tile(tensor, tiling_factor)

    # Add static shape information
    broadcasted.set_shape(target_tensor.get_shape())

    return broadcasted
项目:lang2program    作者:kelvinguu    | 项目源码 | 文件源码
def change_pad_value(values, mask, pad_val):
    """Given a set of values and a pad mask, change the value of all pad entries.

    Args:
        values (Tensor): of shape [batch_size, seq_length, :, ..., :].
        mask (Tensor): binary float tensor of shape [batch_size, seq_length]
        pad_val (float): value to set all pad entries to

    Returns:
        Tensor: a new Tensor of same shape as values
    """
    # broadcast the mask to match shape of values
    mask = expand_dims_for_broadcast(mask, values)  # (batch_size, seq_length, 1, ..., 1)
    mask = broadcast(mask, values)
    mask = tf.cast(mask, tf.bool)  # cast to bool

    # broadcast val
    broadcast_val = pad_val * tf.ones(tf.shape(values))

    new_values = tf.select(mask, values, broadcast_val)
    return new_values
项目:image_captioning    作者:AgrawalAmey    | 项目源码 | 文件源码
def backward(self):

        dx_flat = self.probs

        coords = tf.transpose(tf.pack([tf.range(self.N * self.T), self.y_flat]))
        binary_mask = tf.sparse_to_dense(coords, dx_flat.get_shape(), 1)
        # convert 1/0 to True/False
        binary_mask = tf.cast(binary_mask, tf.bool)
        decremented = dx_flat - 1
        # make new x out of old values or decresed, depending on mask 
        dx_flat = tf.select(binary_mask, decremented, dx_flat)
        dx_flat /= self.N
        dx_flat *= self.mask_flat[:, None]

        dx = tf.reshape(dx_flat, [self.N, self.T, self.V])

        return dx
项目:image_captioning    作者:AgrawalAmey    | 项目源码 | 文件源码
def backward(self):

        dx_flat = self.probs

        coords = tf.transpose(tf.pack([tf.range(self.N * self.T), self.y_flat]))
        binary_mask = tf.sparse_to_dense(coords, dx_flat.get_shape(), 1)
        # convert 1/0 to True/False
        binary_mask = tf.cast(binary_mask, tf.bool)
        decremented = dx_flat - 1
        # make new x out of old values or decresed, depending on mask 
        dx_flat = tf.select(binary_mask, decremented, dx_flat)
        dx_flat /= self.N
        dx_flat *= self.mask_flat[:, None]

        dx = tf.reshape(dx_flat, [self.N, self.T, self.V])

        return dx
项目:DDPG    作者:MOCR    | 项目源码 | 文件源码
def __init__(self, action_bounds):
        self.graph = tf.Graph()
        with self.graph.as_default():
            self.sess = tf.Session()       

            self.action_size = len(action_bounds[0])

            self.action_input = tf.placeholder(tf.float32, [None, self.action_size])
            self.pmax = tf.constant(action_bounds[0], dtype = tf.float32)
            self.pmin = tf.constant(action_bounds[1], dtype = tf.float32)
            self.prange = tf.constant([x - y for x, y in zip(action_bounds[0],action_bounds[1])], dtype = tf.float32)
            self.pdiff_max = tf.div(-self.action_input+self.pmax, self.prange)
            self.pdiff_min = tf.div(self.action_input - self.pmin, self.prange)
            self.zeros_act_grad_filter = tf.zeros([self.action_size])
            self.act_grad = tf.placeholder(tf.float32, [None, self.action_size])
            self.grad_inverter = tf.select(tf.greater(self.act_grad, self.zeros_act_grad_filter), tf.mul(self.act_grad, self.pdiff_max), tf.mul(self.act_grad, self.pdiff_min))
项目:sparsecnn    作者:fkiaee    | 项目源码 | 文件源码
def block_shrinkage_conv(V,mu,rho):
    coef = 0.5
    V_shape = tf.shape(V); one_val = tf.constant(1.0) 
    b = tf.div(mu,rho)
    V_shape1 = tf.concat(0,[tf.mul(tf.slice(V_shape,[2],[1]),tf.slice(V_shape,[3],[1])),tf.mul(tf.slice(V_shape,[0],[1]),tf.slice(V_shape,[1],[1]))])
    V = tf.reshape(tf.transpose(V,perm=[2,3,0,1]),V_shape1)
    norm_V = frobenius_norm_block(V,1)  
    norm_V_per_dimension = tf.div(norm_V,tf.cast(tf.slice(V_shape1,[1],[1]),'float'))
    zero_part = tf.zeros(V_shape1)
    zero_ind = tf.greater_equal(b,norm_V_per_dimension)
    num_zero = tf.reduce_sum(tf.cast(zero_ind,'float'))
#    f4 = lambda: tf.greater_equal(tf.truediv(tf.add(tf.reduce_min(fro),tf.reduce_mean(fro)),2.0),fro)
    f4 = lambda: tf.greater_equal(tf.reduce_mean(norm_V),norm_V)
    f5 = lambda: zero_ind
    zero_ind = tf.cond(tf.greater(num_zero,tf.mul(coef,tf.cast(V_shape1[0],'float'))),f4,f5)
    G = tf.select(zero_ind,zero_part,tf.mul(tf.sub(one_val,tf.div(b,tf.reshape(norm_V,[-1,1]))),V)) 
    G_shape = tf.concat(0,[tf.slice(V_shape,[2],[1]),tf.slice(V_shape,[3],[1]),tf.slice(V_shape,[0],[1]),tf.slice(V_shape,[1],[1])])
    G = tf.transpose(tf.reshape(G,G_shape),perm=[2,3,0,1])
    return G,zero_ind
项目:sparsecnn    作者:fkiaee    | 项目源码 | 文件源码
def block_truncate_conv(V,mu,rho):
    coef = 0.5
    V_shape = tf.shape(V) 
    b = tf.sqrt(tf.div(tf.mul(2.,mu),rho)) #threshold 
    # Reshape the 4D tensor of weights to a 2D matrix with rows containing the conv filters in vectorized form.
    V_shape1 = tf.concat(0,[tf.mul(tf.slice(V_shape,[2],[1]),tf.slice(V_shape,[3],[1])),tf.mul(tf.slice(V_shape,[0],[1]),tf.slice(V_shape,[1],[1]))])
    V = tf.reshape(tf.transpose(V,perm=[2,3,0,1]),V_shape1)
    norm_V = frobenius_norm_block(V,1)  
    norm_V_per_dimension = tf.div(norm_V,tf.cast(tf.slice(V_shape1,[1],[1]),'float'))
    # Implementation of Eq.10 in the paper using if condition inside the TensorFlow graph with tf.cond
    zero_part = tf.zeros(V_shape1)
    zero_ind = tf.greater_equal(b,norm_V_per_dimension)
    num_zero = tf.reduce_sum(tf.cast(zero_ind,'float'))
    # You can pass parameters to the functions in tf.cond() using lambda
    f4 = lambda: tf.greater_equal(tf.reduce_mean(norm_V),norm_V)
    f5 = lambda: zero_ind
    zero_ind = tf.cond(tf.greater(num_zero,tf.mul(coef,tf.cast(V_shape1[0],'float'))),f4,f5)
    G = tf.select(zero_ind,zero_part,V) 
    G_shape = tf.concat(0,[tf.slice(V_shape,[2],[1]),tf.slice(V_shape,[3],[1]),tf.slice(V_shape,[0],[1]),tf.slice(V_shape,[1],[1])])
    G = tf.transpose(tf.reshape(G,G_shape),perm=[2,3,0,1])
    return G,zero_ind
项目:tfkaldi    作者:vrenkens    | 项目源码 | 文件源码
def _apply_func(self, activations, is_training, reuse):
        '''
        apply own functionality
        Args:
            activations: the ioutputs to the wrapped activation function
            is_training: whether or not the network is in training mode
            reuse: wheter or not the variables in the network should be reused
        Returns:
            the output to the activation function
        '''

        with tf.variable_scope('l2_norm', reuse=reuse):
            #compute the mean squared value
            sig = tf.reduce_mean(tf.square(activations), 1, keep_dims=True)

            #divide the input by the mean squared value
            normalized = activations/sig

            #if the mean squared value is larger then one select the normalized
            #value otherwise select the unnormalised one
            return tf.select(tf.greater(tf.reshape(sig, [-1]), 1),
                             normalized, activations)
项目:tensorflow-litterbox    作者:rwightman    | 项目源码 | 文件源码
def _compute_huber(predictions, labels, delta=1.0):
    predictions.get_shape().assert_is_compatible_with(labels.get_shape())
    predictions = tf.to_float(predictions)
    labels = tf.to_float(labels)
    delta = tf.to_float(delta)

    diff = predictions - labels
    diff_abs = tf.abs(diff)
    delta_fact = 0.5 * tf.square(delta)
    condition = tf.less(diff_abs, delta)
    left_opt = 0.5 * tf.square(diff)
    right_opt = delta * diff_abs - delta_fact
    losses_val = tf.select(condition, left_opt, right_opt)
    return losses_val


# Returns non-reduced tensor of unweighted losses with batch dimension matching inputs
项目:VGG    作者:jackfan00    | 项目源码 | 文件源码
def yoloconfidloss(y_true, y_pred, t):
    real_y_true = tf.select(t, y_true, K.zeros_like(y_true))
    pobj = K.sigmoid(y_pred)
    lo = K.square(real_y_true-pobj)
    value_if_true = lamda_confid_obj*(lo)
    value_if_false = lamda_confid_noobj*(lo)
    loss1 = tf.select(t, value_if_true, value_if_false)

    loss = K.mean(loss1) 
    #
    noobj = tf.select(t, K.zeros_like(y_pred), pobj)
    noobjcount = tf.select(t, K.zeros_like(y_pred), K.ones_like(y_pred))
    ave_anyobj = K.sum(noobj) / K.sum(noobjcount)
    #ave_anyobj = K.mean(pobj)
    obj = tf.select(t, pobj, K.zeros_like(y_pred))
    objcount = tf.select(t, K.ones_like(y_pred), K.zeros_like(y_pred))
    #ave_obj = K.mean( K.sum(obj, axis=1) / (K.sum(objcount, axis=1)+0.000001) ) # prevent div 0
    ave_obj =  K.sum(obj) / (K.sum(objcount)+0.000001)  # prevent div 0
    return loss, ave_anyobj, ave_obj

# shape is (gridcells*2,)
项目:VGG    作者:jackfan00    | 项目源码 | 文件源码
def yoloclassloss(y_true, y_pred, t):
        lo = K.square(y_true-y_pred)
        value_if_true = lamda_class*(lo)
        value_if_false = K.zeros_like(y_true)
        loss1 = tf.select(t, value_if_true, value_if_false)
    # only extract predicted class value at obj location
    cat = K.sum(tf.select(t, y_pred, K.zeros_like(y_pred)), axis=1)
    # check valid class value
    objsum = K.sum(y_true, axis=1)
    # if objsum > 0.5 , means it contain some valid obj(may be 1,2.. objs)
    isobj = K.greater(objsum, 0.5)
    # only extract class value at obj location
    valid_cat = tf.select(isobj, cat, K.zeros_like(cat))
    # prevent div 0
    ave_cat = tf.select(K.greater(K.sum(objsum),0.5), K.sum(valid_cat) / K.sum(objsum) , -1)
    return K.mean(loss1), ave_cat
项目:dem    作者:hengyuan-hu    | 项目源码 | 文件源码
def sample(self):
        """Define the computation graph for one hmc sampling."""
        accept_rate, new_pos = hmc_sample(
            self.pos, self.stepsize, self.num_steps, self.potential_fn
        )
        new_avg_accept_rate = tf.add(
            self.avg_accept_slowness * self.avg_accept_rate,
            (1.0 - self.avg_accept_slowness) * accept_rate
        )
        new_stepsize = tf.select(new_avg_accept_rate > self.target_accept_rate,
                                 self.stepsize * self.stepsize_inc,
                                 self.stepsize * self.stepsize_dec)
        new_stepsize = tf.clip_by_value(
            new_stepsize, self.stepsize_min, self.stepsize_max
        )
        updates = [self.pos.assign(new_pos),
                   self.stepsize.assign(new_stepsize),
                   self.avg_accept_rate.assign(new_avg_accept_rate)]
        return new_pos, updates


# test =================
项目:ternarynet    作者:czhu95    | 项目源码 | 文件源码
def p_ternarize(x, p):

    x = tf.tanh(x)
    shape = x.get_shape()

    thre = tf.get_variable('T', trainable=False, collections=[tf.GraphKeys.VARIABLES, 'thresholds'],
            initializer=0.05)
    flat_x = tf.reshape(x, [-1])
    k = int(flat_x.get_shape().dims[0].value * (1 - p))
    topK, _ = tf.nn.top_k(tf.abs(flat_x), k)
    update_thre = thre.assign(topK[-1])
    tf.add_to_collection('update_thre_op', update_thre)

    mask = tf.zeros(shape)
    mask = tf.select((x > thre) | (x < -thre), tf.ones(shape), mask)

    with G.gradient_override_map({"Sign": "Identity", "Mul": "Add"}):
        w =  tf.sign(x) * tf.stop_gradient(mask)

    tf.histogram_summary(w.name, w)
    return w
项目:ternarynet    作者:czhu95    | 项目源码 | 文件源码
def tw_ternarize(x, thre):

    shape = x.get_shape()

    thre_x = tf.stop_gradient(tf.reduce_max(tf.abs(x)) * thre)

    w_p = tf.get_variable('Wp', collections=[tf.GraphKeys.VARIABLES, 'positives'], initializer=1.0)
    w_n = tf.get_variable('Wn', collections=[tf.GraphKeys.VARIABLES, 'negatives'], initializer=1.0)

    tf.scalar_summary(w_p.name, w_p)
    tf.scalar_summary(w_n.name, w_n)

    mask = tf.ones(shape)
    mask_p = tf.select(x > thre_x, tf.ones(shape) * w_p, mask)
    mask_np = tf.select(x < -thre_x, tf.ones(shape) * w_n, mask_p)
    mask_z = tf.select((x < thre_x) & (x > - thre_x), tf.zeros(shape), mask)

    with G.gradient_override_map({"Sign": "Identity", "Mul": "Add"}):
        w =  tf.sign(x) * tf.stop_gradient(mask_z)

    w = w * mask_np

    tf.histogram_summary(w.name, w)
    return w
项目:neural-abstract-anaphora    作者:amarasovic    | 项目源码 | 文件源码
def tf_nan_to_zeros_float32(tensor):
    """
    Mask NaN values with zeros
    :param tensor that might have Nan values
    :return: tensor with replaced Nan values with zeros
    """
    return tf.select(tf.is_nan(tensor), tf.zeros(tf.shape(tensor), dtype=tf.float32), tensor)
项目:deep-q-learning    作者:alvinwan    | 项目源码 | 文件源码
def huber_loss(x, delta=1.0):
    # https://en.wikipedia.org/wiki/Huber_loss
    return tf.select(
        tf.abs(x) < delta,
        tf.square(x) * 0.5,
        delta * (tf.abs(x) - 0.5 * delta)
    )
项目:ddpg-aigym    作者:stevenpjg    | 项目源码 | 文件源码
def __init__(self, action_bounds):

        self.sess = tf.InteractiveSession()       

        self.action_size = len(action_bounds[0])

        self.action_input = tf.placeholder(tf.float32, [None, self.action_size])
        self.pmax = tf.constant(action_bounds[0], dtype = tf.float32)
        self.pmin = tf.constant(action_bounds[1], dtype = tf.float32)
        self.prange = tf.constant([x - y for x, y in zip(action_bounds[0],action_bounds[1])], dtype = tf.float32)
        self.pdiff_max = tf.div(-self.action_input+self.pmax, self.prange)
        self.pdiff_min = tf.div(self.action_input - self.pmin, self.prange)
        self.zeros_act_grad_filter = tf.zeros([self.action_size])
        self.act_grad = tf.placeholder(tf.float32, [None, self.action_size])
        self.grad_inverter = tf.select(tf.greater(self.act_grad, self.zeros_act_grad_filter), tf.mul(self.act_grad, self.pdiff_max), tf.mul(self.act_grad, self.pdiff_min))
项目:Face-Pose-Net    作者:fengju514    | 项目源码 | 文件源码
def _relu(self, x, leakiness=0.0):
    """Relu, with optional leaky support."""
    return tf.select(tf.less(x, 0.0), leakiness * x, x, name='leaky_relu')
项目:RFR-solution    作者:baoblackcoal    | 项目源码 | 文件源码
def clipped_error(x):
  # Huber loss
  try:
    return tf.select(tf.abs(x) < 1.0, 0.5 * tf.square(x), tf.abs(x) - 0.5)
  except:
    return tf.where(tf.abs(x) < 1.0, 0.5 * tf.square(x), tf.abs(x) - 0.5)
  # return 0.5 * tf.square(x)
项目:mnist_LeNet    作者:LuxxxLucy    | 项目源码 | 文件源码
def sample_from_discretized_mix_logistic(l, nr_mix):
    ls = int_shape(l)
    xs = ls[:-1] + [3]
    # unpack parameters
    logit_probs = l[:, :, :, :nr_mix]
    l = tf.reshape(l[:, :, :, nr_mix:], xs + [nr_mix * 3])
    # sample mixture indicator from softmax
    sel = tf.one_hot(tf.argmax(logit_probs - tf.log(-tf.log(tf.random_uniform(
        logit_probs.get_shape(), minval=1e-5, maxval=1. - 1e-5))), 3), depth=nr_mix, dtype=tf.float32)
    sel = tf.reshape(sel, xs[:-1] + [1, nr_mix])
    # select logistic parameters
    means = tf.reduce_sum(l[:, :, :, :, :nr_mix] * sel, 4)
    log_scales = tf.maximum(tf.reduce_sum(
        l[:, :, :, :, nr_mix:2 * nr_mix] * sel, 4), -7.)
    coeffs = tf.reduce_sum(tf.nn.tanh(
        l[:, :, :, :, 2 * nr_mix:3 * nr_mix]) * sel, 4)
    # sample from logistic & clip to interval
    # we don't actually round to the nearest 8bit value when sampling
    u = tf.random_uniform(means.get_shape(), minval=1e-5, maxval=1. - 1e-5)
    x = means + tf.exp(log_scales) * (tf.log(u) - tf.log(1. - u))
    x0 = tf.minimum(tf.maximum(x[:, :, :, 0], -1.), 1.)
    x1 = tf.minimum(tf.maximum(
        x[:, :, :, 1] + coeffs[:, :, :, 0] * x0, -1.), 1.)
    x2 = tf.minimum(tf.maximum(
        x[:, :, :, 2] + coeffs[:, :, :, 1] * x0 + coeffs[:, :, :, 2] * x1, -1.), 1.)
    return tf.concat([tf.reshape(x0, xs[:-1] + [1]), tf.reshape(x1, xs[:-1] + [1]), tf.reshape(x2, xs[:-1] + [1])], 3)
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def elu(x, alpha=1.):
    '''Exponential linear unit.

    # Arguments
        x: Tensor to compute the activation function for.
        alpha: scalar
    '''
    res = tf.nn.elu(x)
    if alpha == 1:
        return res
    else:
        return tf.select(x > 0, res, alpha * res)
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def random_binomial(shape, p=0.0, dtype=_FLOATX, seed=None):
    if seed is None:
        seed = np.random.randint(10e6)
    return tf.select(tf.random_uniform(shape, dtype=dtype, seed=seed) <= p,
                     tf.ones(shape, dtype=dtype),
                     tf.zeros(shape, dtype=dtype))

# CTC
# tensorflow has a native implemenation, but it uses sparse tensors
# and therefore requires a wrapper for Keras. The functions below convert
# dense to sparse tensors and also wraps up the beam search code that is
# in tensorflow's CTC implementation
项目:rl_algorithms    作者:DanielTakeshi    | 项目源码 | 文件源码
def huber_loss(x, delta=1.0):
    # https://en.wikipedia.org/wiki/Huber_loss
    return tf.select(
        tf.abs(x) < delta,
        tf.square(x) * 0.5,
        delta * (tf.abs(x) - 0.5 * delta)
    )
项目:keraflow    作者:ipod825    | 项目源码 | 文件源码
def random_binomial(self, shape, p=0.0, dtype=_FLOATX):
        return tf.select(tf.random_uniform(shape, dtype=dtype) <= p, tf.ones(shape), tf.zeros(shape))

    # NUMPY API
项目:TF-phrasecut-public    作者:chenxi116    | 项目源码 | 文件源码
def logistic_loss_cond(scores, labels):
    # Classification loss as the average of weighed per-score loss
    cond = tf.select(tf.equal(labels, tf.zeros(tf.shape(labels))), 
        tf.zeros(tf.shape(labels)),
        tf.nn.sigmoid_cross_entropy_with_logits(logits = scores, labels = labels)
        )
    cls_loss = tf.reduce_mean(tf.reduce_sum(cond, [1, 2, 3]))

    return cls_loss
项目:tensorflow-CWS-LSTM    作者:elvinpoon    | 项目源码 | 文件源码
def certainty(self):
        certainty = self.seg_prediction * tf.log(self.seg_prediction)
        certainty = -tf.reduce_sum(certainty,reduction_indices=2)
        s1 = tf.ones(tf.shape(certainty))
        csum = tf.cumsum(s1,axis=1)
        mask = tf.less_equal(csum,tf.cast(tf.tile(tf.expand_dims(self._length,1),[1,tf.shape(certainty)[1]]),tf.float32))
        mask = tf.select(mask, tf.ones(tf.shape(certainty)),
                  tf.zeros(tf.shape(certainty)))
        certainty *= mask
        certainty = tf.reduce_sum(certainty, reduction_indices=1)
        return certainty
项目:vae_renyi_divergence    作者:YingzhenLi    | 项目源码 | 文件源码
def encode(self, input, sampling):
        output = input
        for layer in self.D_layers:
            output = layer.encode(output)
        # now compute mu and sigma
        Mu = self.Mu_layer.encode(output)
        if sampling:
            shape = Mu.get_shape()
            eps = tf.random_uniform(shape)
            output = tf.select(eps - Mu <= 0, tf.ones(shape), tf.zeros(shape))
        else:
            output = Mu            
        return output, Mu
项目:vae_renyi_divergence    作者:YingzhenLi    | 项目源码 | 文件源码
def encode(self, input, sampling):
        output = input
        for layer in self.D_layers:
            output = layer.encode(output)
        # now compute mu
        Mu = self.Mu_layer.encode(output)
        if sampling:
            shape = Mu.get_shape()
            eps = tf.random_uniform(shape)
            output = tf.select(eps - Mu <= 0, tf.ones(shape), tf.zeros(shape))
        else:
            output = Mu
        output = output * 2.0 - 1.0           
        return output, Mu
项目:slither.ml    作者:MadcowD    | 项目源码 | 文件源码
def clipped_error(x):
  # Huber loss
  try:
    return tf.select(tf.abs(x) < 1.0, 0.5 * tf.square(x), tf.abs(x) - 0.5)
  except:
    return tf.where(tf.abs(x) < 1.0, 0.5 * tf.square(x), tf.abs(x) - 0.5)
项目:neural-chat    作者:henriblancke    | 项目源码 | 文件源码
def beam_setup(self, time):

        emit_output = None
        next_cell_state = self.initial_state
        next_input = self.initial_input

        # Set up the beam search tracking state
        cand_symbols = tf.fill([self.batch_size_times_beam_size, 0], tf.constant(self.stop_token, dtype=tf.int32))
        cand_logprobs = tf.ones((self.batch_size_times_beam_size,), dtype=tf.float32) * -float('inf')

        first_in_beam_mask = tf.equal(tf.range(self.batch_size_times_beam_size) % self.beam_size, 0)

        beam_symbols = tf.fill([self.batch_size_times_beam_size, 0], tf.constant(self.stop_token, dtype=tf.int32))
        beam_logprobs = tf.select(
            first_in_beam_mask,
            tf.fill([self.batch_size_times_beam_size], 0.0),
            tf.fill([self.batch_size_times_beam_size], self.INVALID_SCORE)
        )

        # Set up correct dimensions for maintaining loop invariants.
        # Note that the last dimension (initialized to zero) is not a loop invariant,
        # so we need to clear it.
        # inference so that _shape is not necessary?
        cand_symbols._shape = tf.TensorShape((self.inferred_batch_size_times_beam_size, None))
        cand_logprobs._shape = tf.TensorShape((self.inferred_batch_size_times_beam_size,))
        beam_symbols._shape = tf.TensorShape((self.inferred_batch_size_times_beam_size, None))
        beam_logprobs._shape = tf.TensorShape((self.inferred_batch_size_times_beam_size,))

        next_loop_state = (
            cand_symbols,
            cand_logprobs,
            beam_symbols,
            beam_logprobs,
        )

        emit_output = tf.zeros(self.cell.output_size)
        elements_finished = tf.zeros([self.batch_size], dtype=tf.bool)

        return elements_finished, next_input, next_cell_state, emit_output, next_loop_state
项目:tensorflow-mnist-tutorial    作者:jaskru    | 项目源码 | 文件源码
def tf_format_mnist_images(X, Y, Y_, n=100, lines=10):
    correct_prediction = tf.equal(tf.argmax(Y,1), tf.argmax(Y_,1))
    correctly_recognised_indices = tf.squeeze(tf.where(correct_prediction), [1])  # indices of correctly recognised images
    incorrectly_recognised_indices = tf.squeeze(tf.where(tf.logical_not(correct_prediction)), [1]) # indices of incorrectly recognised images
    everything_incorrect_first = tf.concat(0, [incorrectly_recognised_indices, correctly_recognised_indices]) # images reordered with indeces of unrecognised images first
    everything_incorrect_first = tf.slice(everything_incorrect_first, [0], [n]) # compute first 100 only - no space to display more anyway
    # compute n=100 digits to display only
    Xs = tf.gather(X, everything_incorrect_first)
    Ys = tf.gather(Y, everything_incorrect_first)
    Ys_ = tf.gather(Y_, everything_incorrect_first)
    correct_prediction_s = tf.gather(correct_prediction, everything_incorrect_first)

    digits_left = tf.image.grayscale_to_rgb(tensorflowvisu_digits.digits_left())
    correct_tags = tf.gather(digits_left, tf.argmax(Ys_, 1)) # correct digits to be printed on the images
    digits_right = tf.image.grayscale_to_rgb(tensorflowvisu_digits.digits_right())
    computed_tags = tf.gather(digits_right, tf.argmax(Ys, 1)) # computed digits to be printed on the images
    #superimposed_digits = correct_tags+computed_tags
    superimposed_digits = tf.select(correct_prediction_s, tf.zeros_like(correct_tags),correct_tags+computed_tags) # only pring the correct and computed digits on unrecognised images
    correct_bkg   = tf.reshape(tf.tile([1.3,1.3,1.3], [28*28]), [1, 28,28,3]) # white background
    incorrect_bkg = tf.reshape(tf.tile([1.3,1.0,1.0], [28*28]), [1, 28,28,3]) # red background
    recognised_bkg = tf.gather(tf.concat(0, [incorrect_bkg, correct_bkg]), tf.cast(correct_prediction_s, tf.int32)) # pick either the red or the white background depending on recognised status

    I = tf.image.grayscale_to_rgb(Xs)
    I = ((1-(I+superimposed_digits))*recognised_bkg)/1.3 # stencil extra data on top of images and reorder them unrecognised first
    I = tf.image.convert_image_dtype(I, tf.uint8, saturate=True)
    Islices = [] # 100 images => 10x10 image block
    for imslice in range(lines):
        Islices.append(tf.concat(1, tf.unpack(tf.slice(I, [imslice*n//lines,0,0,0], [n//lines,28,28,3]))))
    I = tf.concat(0, Islices)
    return I

# n = HISTOGRAM_BUCKETS (global)
# Buckets the data into n buckets so that there are an equal number of data points in
# each bucket. Returns n+1 bucket boundaries. Spreads the reaminder data.size % n more
# or less evenly among the central buckets.
# data: 1-D ndarray containing float data, MUST BE SORTED in ascending order
#    n: integer, the number of desired output buckets
# return value: ndarray, 1-D vector of size n+1 containing the bucket boundaries
#               the first value is the min of the data, the last value is the max
项目:lsdc    作者:febert    | 项目源码 | 文件源码
def _get_sharding_func(size, num_shards):
    """Create sharding function for scatter update."""
    def func(ids):
      if num_shards == 1:
        return None, ids
      else:
        ids_per_shard = size // num_shards
        extras = size % num_shards
        assignments = tf.maximum(ids // (ids_per_shard + 1),
                                 (ids - extras) // ids_per_shard)
        new_ids = tf.select(assignments < extras,
                            ids % (ids_per_shard + 1),
                            (ids - extras) % ids_per_shard)
        return assignments, new_ids
    return func
项目:social-scene-understanding    作者:cvlab-epfl    | 项目源码 | 文件源码
def l1_robust_loss(predictions, targets, name=None):
  with tf.name_scope(name, 'HuberLoss', [predictions, targets]):
    delta = predictions - targets
    return tf.select(tf.abs(delta) < 1,
                     0.5 * tf.square(delta),
                     tf.abs(delta) - 0.5)
项目:keras-rl    作者:matthiasplappert    | 项目源码 | 文件源码
def huber_loss(y_true, y_pred, clip_value):
    # Huber loss, see https://en.wikipedia.org/wiki/Huber_loss and
    # https://medium.com/@karpathy/yes-you-should-understand-backprop-e2f06eab496b
    # for details.
    assert clip_value > 0.

    x = y_true - y_pred
    if np.isinf(clip_value):
        # Spacial case for infinity since Tensorflow does have problems
        # if we compare `K.abs(x) < np.inf`.
        return .5 * K.square(x)

    condition = K.abs(x) < clip_value
    squared_loss = .5 * K.square(x)
    linear_loss = clip_value * (K.abs(x) - .5 * clip_value)
    if K.backend() == 'tensorflow':
        import tensorflow as tf
        if hasattr(tf, 'select'):
            return tf.select(condition, squared_loss, linear_loss)  # condition, true, false
        else:
            return tf.where(condition, squared_loss, linear_loss)  # condition, true, false
    elif K.backend() == 'theano':
        from theano import tensor as T
        return T.switch(condition, squared_loss, linear_loss)
    else:
        raise RuntimeError('Unknown backend "{}".'.format(K.backend()))
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def elu(x, alpha=1.):
    '''Exponential linear unit.

    # Arguments
        x: Tensor to compute the activation function for.
        alpha: scalar
    '''
    res = tf.nn.elu(x)
    if alpha == 1:
        return res
    else:
        return tf.select(x > 0, res, alpha * res)
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def random_binomial(shape, p=0.0, dtype=None, seed=None):
    if dtype is None:
        dtype = floatx()
    if seed is None:
        seed = np.random.randint(10e6)
    return tf.select(tf.random_uniform(shape, dtype=dtype, seed=seed) <= p,
                     tf.ones(shape, dtype=dtype),
                     tf.zeros(shape, dtype=dtype))


# CTC
# tensorflow has a native implemenation, but it uses sparse tensors
# and therefore requires a wrapper for Keras. The functions below convert
# dense to sparse tensors and also wraps up the beam search code that is
# in tensorflow's CTC implementation
项目:Sing_Par    作者:wanghm92    | 项目源码 | 文件源码
def pseudo_predict(self, predictions, targets):
    """"""

    random_flip = tf.random_uniform(tf.shape(predictions))
    return tf.select(tf.greater(random_flip, self.global_sigmoid), predictions, targets)

  #=============================================================
项目:lang2program    作者:kelvinguu    | 项目源码 | 文件源码
def reduce_mean(seq_batch, allow_empty=False):
    """Compute the mean of each sequence in a SequenceBatch.

    Args:
        seq_batch (SequenceBatch): a SequenceBatch with the following attributes:
            values (Tensor): a Tensor of shape (batch_size, seq_length, :, ..., :)
            mask (Tensor): if the mask values are arbitrary floats (rather than binary), the mean will be
            a weighted average.
        allow_empty (bool): allow computing the average of an empty sequence. In this case, we assume 0/0 == 0, rather
            than NaN. Default is False, causing an error to be thrown.

    Returns:
        Tensor: of shape (batch_size, :, ..., :)
    """
    values, mask = seq_batch.values, seq_batch.mask
    # compute weights for the average
    sums = tf.reduce_sum(mask, 1, keep_dims=True)  # (batch_size, 1)

    if allow_empty:
        asserts = []  # no assertion
        sums = tf.select(tf.equal(sums, 0), tf.ones(tf.shape(sums)), sums)  # replace 0's with 1's
    else:
        asserts = [tf.assert_positive(sums)]  # throw error if 0's exist

    with tf.control_dependencies(asserts):
        weights = mask / sums  # (batch_size, seq_length)
    return weighted_sum(seq_batch, weights)
项目:cifar10-tensorflow    作者:namakemono    | 项目源码 | 文件源码
def _train(self, avg_loss):
        lr = tf.select(tf.less(self._global_step, 32000), 0.1, tf.select(tf.less(self._global_step, 48000), 0.01, 0.001))
        return tf.train.MomentumOptimizer(learning_rate=lr, momentum=0.9).minimize(avg_loss, global_step=self._global_step)
项目:cifar10-tensorflow    作者:namakemono    | 项目源码 | 文件源码
def _train(self, avg_loss):
        lr = tf.select(tf.less(self._global_step, 32000), 0.1, tf.select(tf.less(self._global_step, 48000), 0.01, 0.001))
        return tf.train.MomentumOptimizer(learning_rate=lr, momentum=0.9).minimize(avg_loss, global_step=self._global_step)
项目:tensorprob    作者:tensorprob    | 项目源码 | 文件源码
def set_logp_to_neg_inf(X, logp, bounds):
    """Set `logp` to negative infinity when `X` is outside the allowed bounds.

    # Arguments
        X: tensorflow.Tensor
            The variable to apply the bounds to
        logp: tensorflow.Tensor
            The log probability corrosponding to `X`
        bounds: list of `Region` objects
            The regions corrosponding to allowed regions of `X`

    # Returns
        logp: tensorflow.Tensor
            The newly bounded log probability
    """
    conditions = []
    for l, u in bounds:
        lower_is_neg_inf = not isinstance(l, tf.Tensor) and np.isneginf(l)
        upper_is_pos_inf = not isinstance(u, tf.Tensor) and np.isposinf(u)

        if not lower_is_neg_inf and upper_is_pos_inf:
            conditions.append(tf.greater(X, l))
        elif lower_is_neg_inf and not upper_is_pos_inf:
            conditions.append(tf.less(X, u))
        elif not (lower_is_neg_inf or upper_is_pos_inf):
            conditions.append(tf.logical_and(tf.greater(X, l), tf.less(X, u)))

    if len(conditions) > 0:
        is_inside_bounds = conditions[0]
        for condition in conditions[1:]:
            is_inside_bounds = tf.logical_or(is_inside_bounds, condition)

        logp = tf.select(
            is_inside_bounds,
            logp,
            tf.fill(tf.shape(X), config.dtype(-np.inf))
        )

    return logp
项目:TextGAN    作者:ankitkv    | 项目源码 | 文件源码
def _create_state(self, batch_size, dtype, cell_state=None):
        cand_symbols = tf.fill([batch_size, self.max_len], tf.constant(self.stop_token,
                                                                       dtype=tf.int32))
        cand_logprobs = tf.ones((batch_size,), dtype=tf.float32) * -float('inf')

        if cell_state is None:
            cell_state = self.cell.zero_state(batch_size*self.beam_size, dtype=dtype)
        else:
            cell_state = BeamDecoder._tile_along_beam(self.beam_size, cell_state)

        full_size = batch_size * self.beam_size
        first_in_beam_mask = tf.equal(tf.range(full_size) % self.beam_size, 0)

        beam_symbols = tf.fill([full_size, self.max_len], tf.constant(self.stop_token,
                                                                      dtype=tf.int32))
        beam_logprobs = tf.select(
            first_in_beam_mask,
            tf.fill([full_size], 0.0),
            tf.fill([full_size], -1e18),  # top_k does not play well with -inf
                                          # TODO: dtype-dependent value here
        )
        return (
            cand_symbols,
            cand_logprobs,
            beam_symbols,
            beam_logprobs,
            cell_state,
        )
项目:GSN    作者:peteykun    | 项目源码 | 文件源码
def binomial_draw(shape=[1], p=0.5, dtype='float32'):
  return tf.select(tf.less(tf.random_uniform(shape=shape, minval=0, maxval=1, dtype='float32'), tf.fill(shape, p)), tf.ones(shape, dtype=dtype), tf.zeros(shape, dtype=dtype))
项目:GSN    作者:peteykun    | 项目源码 | 文件源码
def salt_and_pepper(X, rate=0.3):
  a = binomial_draw(shape=tf.shape(X), p=1-rate, dtype='float32')
  b = binomial_draw(shape=tf.shape(X), p=0.5, dtype='float32')
  z = tf.zeros(tf.shape(X), dtype='float32')
  c = tf.select(tf.equal(a, z), b, z)
  return tf.add(tf.mul(X, a), c)

# Xavier Initializers
项目:GSN    作者:peteykun    | 项目源码 | 文件源码
def binomial_draw(shape=[1], p=0.5, dtype='float32'):
  return tf.select(tf.less(tf.random_uniform(shape=shape, minval=0, maxval=1, dtype='float32'), tf.fill(shape, p)), tf.ones(shape, dtype=dtype), tf.zeros(shape, dtype=dtype))
项目:GSN    作者:peteykun    | 项目源码 | 文件源码
def binomial_draw_vec(p_vec, dtype='float32'):
  shape = tf.shape(p_vec)
  return tf.select(tf.less(tf.random_uniform(shape=shape, minval=0, maxval=1, dtype='float32'), p_vec), tf.ones(shape, dtype=dtype), tf.zeros(shape, dtype=dtype))