Python theano.tensor 模块,inc_subtensor() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用theano.tensor.inc_subtensor()

项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def ctc_update_log_p(skip_idxs, zeros, active, log_p_curr, log_p_prev):
    active_skip_idxs = skip_idxs[(skip_idxs < active).nonzero()]
    active_next = T.cast(T.minimum(
        T.maximum(
            active + 1,
            T.max(T.concatenate([active_skip_idxs, [-1]])) + 2 + 1
        ), log_p_curr.shape[0]), 'int32')

    common_factor = T.max(log_p_prev[:active])
    p_prev = T.exp(log_p_prev[:active] - common_factor)
    _p_prev = zeros[:active_next]
    # copy over
    _p_prev = T.set_subtensor(_p_prev[:active], p_prev)
    # previous transitions
    _p_prev = T.inc_subtensor(_p_prev[1:], _p_prev[:-1])
    # skip transitions
    _p_prev = T.inc_subtensor(_p_prev[active_skip_idxs + 2], p_prev[active_skip_idxs])
    updated_log_p_prev = T.log(_p_prev) + common_factor

    log_p_next = T.set_subtensor(
        zeros[:active_next],
        log_p_curr[:active_next] + updated_log_p_prev
    )
    return active_next, log_p_next
项目:aspect_adversarial    作者:yuanzh    | 项目源码 | 文件源码
def create_adadelta_updates(updates, params, gparams, gsums, xsums,\
                                lr, eps, rho):
    for p, g, gacc, xacc in zip(params, gparams, gsums, xsums):
        if is_subtensor_op(p):
            origin, indexes = get_subtensor_op_inputs(p)
            gacc_slices = gacc[indexes]
            xacc_slices = xacc[indexes]
            new_gacc = rho * gacc_slices + (1.0-rho) * g**2
            d = -T.sqrt((xacc_slices + eps)/(new_gacc + eps)) * g
            new_xacc = rho * xacc_slices + (1.0-rho) * d**2
            updates[gacc] = T.set_subtensor(gacc_slices, new_gacc)
            updates[xacc] = T.set_subtensor(xacc_slices, new_xacc)
            updates[origin] = T.inc_subtensor(p, d)
        else:
            new_gacc = rho * gacc + (1.0-rho) * g**2
            d = -T.sqrt((xacc + eps)/(new_gacc + eps)) * g
            new_xacc = rho * xacc + (1.0-rho) * d**2
            updates[gacc] = new_gacc
            updates[xacc] = new_xacc
            updates[p] = p + d
项目:icml17_knn    作者:taolei87    | 项目源码 | 文件源码
def create_sgd_updates(updates, params, gparams, gsums, lr, momentum):
    has_momentum = momentum.get_value() > 0.0
    for p, g, acc in zip(params, gparams, gsums):
        if is_subtensor_op(p):
            origin, indexes = get_subtensor_op_inputs(p)
            if has_momentum:
                acc_slices = get_similar_subtensor(acc, indexes, p)
                new_acc = acc_slices*momentum + g
                updates[acc] = T.set_subtensor(acc_slices, new_acc)
            else:
                new_acc = g
            updates[origin] = T.inc_subtensor(p, - lr * new_acc)
        else:
            if has_momentum:
                new_acc = acc*momentum + g
                updates[acc] = new_acc
            else:
                new_acc = g
            updates[p] = p - lr * new_acc
项目:icml17_knn    作者:taolei87    | 项目源码 | 文件源码
def create_adagrad_updates(updates, params, gparams, gsums, lr, eps):
    for p, g, acc in zip(params, gparams, gsums):
        if is_subtensor_op(p):
            origin, indexes = get_subtensor_op_inputs(p)
            #acc_slices = acc[indexes]
            acc_slices = get_similar_subtensor(acc, indexes, p)
            new_acc = acc_slices + g**2
            updates[acc] = T.set_subtensor(acc_slices, new_acc)
            updates[origin] = T.inc_subtensor(p, \
                    - lr * (g / T.sqrt(new_acc + eps)))
        else:
            new_acc = acc + g**2
            updates[acc] = new_acc
            updates[p] = p - lr * (g / T.sqrt(new_acc + eps))
            #updates[p] = p - lr * (g / (T.sqrt(new_acc) + eps))
            # which one to use?
项目:icml17_knn    作者:taolei87    | 项目源码 | 文件源码
def create_adadelta_updates(updates, params, gparams, gsums, xsums,\
                                lr, eps, rho):
    for p, g, gacc, xacc in zip(params, gparams, gsums, xsums):
        if is_subtensor_op(p):
            origin, indexes = get_subtensor_op_inputs(p)
            gacc_slices = gacc[indexes]
            xacc_slices = xacc[indexes]
            new_gacc = rho * gacc_slices + (1.0-rho) * g**2
            d = -T.sqrt((xacc_slices + eps)/(new_gacc + eps)) * g
            new_xacc = rho * xacc_slices + (1.0-rho) * d**2
            updates[gacc] = T.set_subtensor(gacc_slices, new_gacc)
            updates[xacc] = T.set_subtensor(xacc_slices, new_xacc)
            updates[origin] = T.inc_subtensor(p, d)
        else:
            new_gacc = rho * gacc + (1.0-rho) * g**2
            d = -T.sqrt((xacc + eps)/(new_gacc + eps)) * g
            new_xacc = rho * xacc + (1.0-rho) * d**2
            updates[gacc] = new_gacc
            updates[xacc] = new_xacc
            updates[p] = p + d
项目:KGP-ASR    作者:KGPML    | 项目源码 | 文件源码
def _labeling_batch_to_class_batch(y, y_labeling, num_classes,
                                   y_hat_mask=None):
    # FIXME: y_hat_mask is currently not used
    batch_size = y.shape[1]
    N = y_labeling.shape[0]
    n_labels = y.shape[0]
    # sum over all repeated labels
    # from (T, B, L) to (T, C, B)
    out = T.zeros((num_classes, batch_size, N))
    y_labeling = y_labeling.dimshuffle((2, 1, 0))  # L, B, T
    y_ = y

    def scan_step(index, prev_res, y_labeling, y_):
        res_t = T.inc_subtensor(prev_res[y_[index, T.arange(batch_size)],
                                T.arange(batch_size)],
                                y_labeling[index, T.arange(batch_size)])
        return res_t

    result, updates = theano.scan(scan_step,
                                  sequences=[T.arange(n_labels)],
                                  non_sequences=[y_labeling, y_],
                                  outputs_info=[out])
    # result will be (C, B, T) so we make it (T, B, C)
    return result[-1].dimshuffle(2, 1, 0)
项目:deep-learning-keras-projects    作者:jasmeetsb    | 项目源码 | 文件源码
def ctc_update_log_p(skip_idxs, zeros, active, log_p_curr, log_p_prev):
    active_skip_idxs = skip_idxs[(skip_idxs < active).nonzero()]
    active_next = T.cast(T.minimum(
        T.maximum(
            active + 1,
            T.max(T.concatenate([active_skip_idxs, [-1]])) + 2 + 1
        ), log_p_curr.shape[0]), 'int32')

    common_factor = T.max(log_p_prev[:active])
    p_prev = T.exp(log_p_prev[:active] - common_factor)
    _p_prev = zeros[:active_next]
    # copy over
    _p_prev = T.set_subtensor(_p_prev[:active], p_prev)
    # previous transitions
    _p_prev = T.inc_subtensor(_p_prev[1:], _p_prev[:-1])
    # skip transitions
    _p_prev = T.inc_subtensor(_p_prev[active_skip_idxs + 2], p_prev[active_skip_idxs])
    updated_log_p_prev = T.log(_p_prev) + common_factor

    log_p_next = T.set_subtensor(
        zeros[:active_next],
        log_p_curr[:active_next] + updated_log_p_prev
    )
    return active_next, log_p_next
项目:neural-semantic-role-labeler    作者:hiroki13    | 项目源码 | 文件源码
def sgd(cost, params, emb=None, sub_emb=None, lr=0.1):
    updates = OrderedDict()

    """update sub-tensor of embeddings"""
    if emb:
        g = T.grad(cost, sub_emb)
        updates[emb] = T.inc_subtensor(sub_emb, -lr * g)

    """update parameters"""
    grads0 = T.grad(cost, params[0])
    for p, g in zip(params[0], grads0):
        updates[p] = p - lr * g

    """update parameters"""
    grads1 = T.grad(cost, params[1])
    for p, g in zip(params[1], grads1):
        updates[p] = p - lr * g

    return updates
项目:EUNN-theano    作者:iguanaus    | 项目源码 | 文件源码
def times_reflection(input, n_hidden, reflection):
    input_re = input[:, :n_hidden]
    input_im = input[:, n_hidden:]
    reflect_re = reflection[:n_hidden]
    reflect_im = reflection[n_hidden:]

    vstarv = (reflection**2).sum()

    input_re_reflect_re = T.dot(input_re, reflect_re)
    input_re_reflect_im = T.dot(input_re, reflect_im)
    input_im_reflect_re = T.dot(input_im, reflect_re)
    input_im_reflect_im = T.dot(input_im, reflect_im)

    a = T.outer(input_re_reflect_re - input_im_reflect_im, reflect_re)
    b = T.outer(input_re_reflect_im + input_im_reflect_re, reflect_im)
    c = T.outer(input_re_reflect_re - input_im_reflect_im, reflect_im)
    d = T.outer(input_re_reflect_im + input_im_reflect_re, reflect_re)

    output = input
    output = T.inc_subtensor(output[:, :n_hidden], - 2. / vstarv * (a + b))
    output = T.inc_subtensor(output[:, n_hidden:], - 2. / vstarv * (d - c))

    return output
项目:EUNN-theano    作者:iguanaus    | 项目源码 | 文件源码
def times_reflection(input, n_hidden, reflection):
    input_re = input[:, :n_hidden]
    input_im = input[:, n_hidden:]

    reflect_re = reflection[:n_hidden]
    reflect_im = reflection[n_hidden:]

    vstarv = (reflection**2).sum()

    input_re_reflect_re = K.dot(input_re, reflect_re)
    input_re_reflect_im = K.dot(input_re, reflect_im)
    input_im_reflect_re = K.dot(input_im, reflect_re)
    input_im_reflect_im = K.dot(input_im, reflect_im)

    a = Kouter(input_re_reflect_re - input_im_reflect_im, reflect_re)
    b = Kouter(input_re_reflect_im + input_im_reflect_re, reflect_im)
    c = Kouter(input_re_reflect_re - input_im_reflect_im, reflect_im)
    d = Kouter(input_re_reflect_im + input_im_reflect_re, reflect_re)

    output = input
    output = T.inc_subtensor(output[:, :n_hidden], - 2. / vstarv * (a + b))
    output = T.inc_subtensor(output[:, n_hidden:], - 2. / vstarv * (d - c))

    return output
项目:Generative-models    作者:aalitaiga    | 项目源码 | 文件源码
def apply(self, input_v, input_h):
        # Vertical stack
        v_nxn_out = self.vertical_conv_nxn.apply(input_v)
        # Different cropping are used depending on the row we wish to condition on
        v_nxn_out_to_h = v_nxn_out[:,:,:-(self.filter_size//2)-2,:]
        v_nxn_out_to_v = v_nxn_out[:,:,1:-(self.filter_size//2)-1,:]
        v_1x1_out = self.vertical_conv_1x1.apply(v_nxn_out_to_h)
        output_v = T.tanh(v_nxn_out_to_v[:,:self.num_filters,:,:]) * \
            T.nnet.sigmoid(v_nxn_out_to_v[:,self.num_filters:,:,:])

        # Horizontal stack
        h_1xn_out = self.horizontal_conv_1xn.apply(input_h)
        h_1xn_out = h_1xn_out[:,:,:,:-(self.filter_size//2)]
        h_sum = h_1xn_out + v_1x1_out
        h_activation = T.tanh(h_sum[:,:self.num_filters,:,:]) * \
            T.nnet.sigmoid(h_sum[:,self.num_filters:,:,:])
        h_1x1_out = self.horizontal_conv_1x1.apply(h_activation)
        if self.res:
            # input_h_padded = T.zeros(input_h.shape, dtype=theano.config.floatX)
            # input_h_padded = T.inc_subtensor(input_h_padded[:,:,3:,3:], input_h[:,:,:-3,:-3])
            # input_h = input_h_padded
            output_h = h_1x1_out #+ input_h
        else:
            output_h = h_1x1_out #h_activation
        return output_v, output_h
项目:SocializedWordEmbeddings    作者:HKUST-KnowComp    | 项目源码 | 文件源码
def create_sgd_updates(updates, params, gparams, gsums, lr, momentum):
    has_momentum = momentum.get_value() > 0.0
    for p, g, acc in zip(params, gparams, gsums):
        if is_subtensor_op(p):
            origin, indexes = get_subtensor_op_inputs(p)
            if has_momentum:
                acc_slices = get_similar_subtensor(acc, indexes, p)
                new_acc = acc_slices*momentum + g
                updates[acc] = T.set_subtensor(acc_slices, new_acc)
            else:
                new_acc = g
            updates[origin] = T.inc_subtensor(p, - lr * new_acc)
        else:
            if has_momentum:
                new_acc = acc*momentum + g
                updates[acc] = new_acc
            else:
                new_acc = g
            updates[p] = p - lr * new_acc
项目:SocializedWordEmbeddings    作者:HKUST-KnowComp    | 项目源码 | 文件源码
def create_adagrad_updates(updates, params, gparams, gsums, lr, eps):
    for p, g, acc in zip(params, gparams, gsums):
        if is_subtensor_op(p):
            origin, indexes = get_subtensor_op_inputs(p)
            #acc_slices = acc[indexes]
            acc_slices = get_similar_subtensor(acc, indexes, p)
            new_acc = acc_slices + g**2
            updates[acc] = T.set_subtensor(acc_slices, new_acc)
            updates[origin] = T.inc_subtensor(p, \
                    - lr * (g / T.sqrt(new_acc + eps)))
        else:
            new_acc = acc + g**2
            updates[acc] = new_acc
            updates[p] = p - lr * (g / T.sqrt(new_acc + eps))
            #updates[p] = p - lr * (g / (T.sqrt(new_acc) + eps))
            # which one to use?
项目:SocializedWordEmbeddings    作者:HKUST-KnowComp    | 项目源码 | 文件源码
def create_adadelta_updates(updates, params, gparams, gsums, xsums,\
                                lr, eps, rho):
    for p, g, gacc, xacc in zip(params, gparams, gsums, xsums):
        if is_subtensor_op(p):
            origin, indexes = get_subtensor_op_inputs(p)
            gacc_slices = gacc[indexes]
            xacc_slices = xacc[indexes]
            new_gacc = rho * gacc_slices + (1.0-rho) * g**2
            d = -T.sqrt((xacc_slices + eps)/(new_gacc + eps)) * g
            new_xacc = rho * xacc_slices + (1.0-rho) * d**2
            updates[gacc] = T.set_subtensor(gacc_slices, new_gacc)
            updates[xacc] = T.set_subtensor(xacc_slices, new_xacc)
            updates[origin] = T.inc_subtensor(p, d)
        else:
            new_gacc = rho * gacc + (1.0-rho) * g**2
            d = -T.sqrt((xacc + eps)/(new_gacc + eps)) * g
            new_xacc = rho * xacc + (1.0-rho) * d**2
            updates[gacc] = new_gacc
            updates[xacc] = new_xacc
            updates[p] = p + d
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def ctc_update_log_p(skip_idxs, zeros, active, log_p_curr, log_p_prev):
    active_skip_idxs = skip_idxs[(skip_idxs < active).nonzero()]
    active_next = T.cast(T.minimum(
        T.maximum(
            active + 1,
            T.max(T.concatenate([active_skip_idxs, [-1]])) + 2 + 1
        ), log_p_curr.shape[0]), 'int32')

    common_factor = T.max(log_p_prev[:active])
    p_prev = T.exp(log_p_prev[:active] - common_factor)
    _p_prev = zeros[:active_next]
    # copy over
    _p_prev = T.set_subtensor(_p_prev[:active], p_prev)
    # previous transitions
    _p_prev = T.inc_subtensor(_p_prev[1:], _p_prev[:-1])
    # skip transitions
    _p_prev = T.inc_subtensor(_p_prev[active_skip_idxs + 2], p_prev[active_skip_idxs])
    updated_log_p_prev = T.log(_p_prev) + common_factor

    log_p_next = T.set_subtensor(
        zeros[:active_next],
        log_p_curr[:active_next] + updated_log_p_prev
    )
    return active_next, log_p_next
项目:DeepMonster    作者:olimastro    | 项目源码 | 文件源码
def apply(self, x):
        # first fill zeros between each pixels
        # assumes the last 3 are c01
        shape = tuple(x.shape[i] for i in range(x.ndim-2))
        out = T.zeros(shape + (x.shape[-2] * 2, x.shape[-1] * 2), dtype=x.dtype)
        out = T.inc_subtensor(out[...,::2,::2], x)

        # blurr in with gauss kernel conv
        if x.ndim == 5 :
            out = out.reshape((x.shape[0]*x.shape[1],x.shape[2],out.shape[-2],out.shape[-1]))

        # this is necessary to avoid cross channels shenanigans
        preconv = out.reshape((out.shape[0]*out.shape[1],1,out.shape[2],out.shape[3]))
        conved = T.nnet.conv2d(preconv, self.kernel, subsample=(1,1), border_mode='half')
        out = conved.reshape(out.shape)

        if x.ndim == 5 :
            out = out.reshape((x.shape[0],x.shape[1],x.shape[2],out.shape[-2],out.shape[-1]))
        return out


# not sharp enough
项目:keras    作者:NVIDIA    | 项目源码 | 文件源码
def ctc_update_log_p(skip_idxs, zeros, active, log_p_curr, log_p_prev):
    active_skip_idxs = skip_idxs[(skip_idxs < active).nonzero()]
    active_next = T.cast(T.minimum(
        T.maximum(
            active + 1,
            T.max(T.concatenate([active_skip_idxs, [-1]])) + 2 + 1
        ), log_p_curr.shape[0]), 'int32')

    common_factor = T.max(log_p_prev[:active])
    p_prev = T.exp(log_p_prev[:active] - common_factor)
    _p_prev = zeros[:active_next]
    # copy over
    _p_prev = T.set_subtensor(_p_prev[:active], p_prev)
    # previous transitions
    _p_prev = T.inc_subtensor(_p_prev[1:], _p_prev[:-1])
    # skip transitions
    _p_prev = T.inc_subtensor(_p_prev[active_skip_idxs + 2], p_prev[active_skip_idxs])
    updated_log_p_prev = T.log(_p_prev) + common_factor

    log_p_next = T.set_subtensor(
        zeros[:active_next],
        log_p_curr[:active_next] + updated_log_p_prev
    )
    return active_next, log_p_next
项目:PixelVAE    作者:igul222    | 项目源码 | 文件源码
def Encoder(inputs):

    output = inputs

    output = T.nnet.relu(lib.ops.conv2d.Conv2D('Enc.1', input_dim=N_CHANNELS, output_dim=DIM_1, filter_size=3, inputs=output))
    output = T.nnet.relu(lib.ops.conv2d.Conv2D('Enc.2', input_dim=DIM_1,      output_dim=DIM_2, filter_size=3, inputs=output, stride=2))

    output = T.nnet.relu(lib.ops.conv2d.Conv2D('Enc.3', input_dim=DIM_2, output_dim=DIM_2, filter_size=3, inputs=output))
    output = T.nnet.relu(lib.ops.conv2d.Conv2D('Enc.4', input_dim=DIM_2, output_dim=DIM_3, filter_size=3, inputs=output, stride=2))

    # Pad from 7x7 to 8x8
    padded = T.zeros((output.shape[0], output.shape[1], 8, 8), dtype='float32')
    output = T.inc_subtensor(padded[:,:,:7,:7], output)

    output = T.nnet.relu(lib.ops.conv2d.Conv2D('Enc.5', input_dim=DIM_3, output_dim=DIM_3, filter_size=3, inputs=output))
    output = T.nnet.relu(lib.ops.conv2d.Conv2D('Enc.6', input_dim=DIM_3, output_dim=DIM_4, filter_size=3, inputs=output, stride=2))

    output = T.nnet.relu(lib.ops.conv2d.Conv2D('Enc.7', input_dim=DIM_4, output_dim=DIM_4, filter_size=3, inputs=output))
    output = T.nnet.relu(lib.ops.conv2d.Conv2D('Enc.8', input_dim=DIM_4, output_dim=DIM_4, filter_size=3, inputs=output))

    output = output.reshape((output.shape[0], -1))
    output = lib.ops.linear.Linear('Enc.Out', input_dim=4*4*DIM_4, output_dim=2*LATENT_DIM, inputs=output)
    return output[:, ::2], output[:, 1::2]
项目:PixelVAE    作者:igul222    | 项目源码 | 文件源码
def Encoder(inputs):

    output = inputs

    output = T.nnet.relu(lib.ops.conv2d.Conv2D('Enc.1', input_dim=N_CHANNELS, output_dim=DIM_1, filter_size=3, inputs=output))
    output = T.nnet.relu(lib.ops.conv2d.Conv2D('Enc.2', input_dim=DIM_1,      output_dim=DIM_2, filter_size=3, inputs=output, stride=2))

    output = T.nnet.relu(lib.ops.conv2d.Conv2D('Enc.3', input_dim=DIM_2, output_dim=DIM_2, filter_size=3, inputs=output))
    output = T.nnet.relu(lib.ops.conv2d.Conv2D('Enc.4', input_dim=DIM_2, output_dim=DIM_3, filter_size=3, inputs=output, stride=2))

    # Pad from 7x7 to 8x8
    padded = T.zeros((output.shape[0], output.shape[1], 8, 8), dtype='float32')
    output = T.inc_subtensor(padded[:,:,:7,:7], output)

    output = T.nnet.relu(lib.ops.conv2d.Conv2D('Enc.5', input_dim=DIM_3, output_dim=DIM_3, filter_size=3, inputs=output))
    output = T.nnet.relu(lib.ops.conv2d.Conv2D('Enc.6', input_dim=DIM_3, output_dim=DIM_4, filter_size=3, inputs=output, stride=2))

    output = T.nnet.relu(lib.ops.conv2d.Conv2D('Enc.7', input_dim=DIM_4, output_dim=DIM_4, filter_size=3, inputs=output))
    output = T.nnet.relu(lib.ops.conv2d.Conv2D('Enc.8', input_dim=DIM_4, output_dim=DIM_4, filter_size=3, inputs=output))

    output = output.reshape((output.shape[0], -1))
    output = lib.ops.linear.Linear('Enc.Out', input_dim=4*4*DIM_4, output_dim=2*LATENT_DIM, inputs=output)
    return output[:, ::2], output[:, 1::2]
项目:keras_superpixel_pooling    作者:parag2489    | 项目源码 | 文件源码
def call(self, inputs):
        # x = feature map
        # y = superpixel map, index from [0, n-1]
        x = inputs[0]  # batch_size x k x m x n
        y = inputs[1]  # batch_size x m x n
        ht = self.input_shapes[0][0]
        wd = self.input_shapes[0][1]
        z = K.zeros(shape=(self.batch_size, self.n_superpixels, self.n_features), dtype=float)
        # count = K.zeros(shape=(self.batch_size, self.n_superpixels, self.n_features), dtype=int)
        # for i in range(self.batch_size * ht * wd):  # these are the number of rows in positions
        z = T.inc_subtensor(z[self.superpixel_positions[:, 0], self.superpixel_positions[:, 1], :], x[self.positions[:, 0], :, self.positions[:, 1], self.positions[:, 2]])
        z /= self.superpixel_hist
        return z

    # def get_config(self):
        # config = {'n_superpixels': self.n_superpixels, 'n_features': self.n_features}
        # base_config = super(SuperpixelPooling, self).get_config()
        # return dict(list(base_config.items()) + list(config.items()))


# Aliases
项目:keras_superpixel_pooling    作者:parag2489    | 项目源码 | 文件源码
def ctc_update_log_p(skip_idxs, zeros, active, log_p_curr, log_p_prev):
    active_skip_idxs = skip_idxs[(skip_idxs < active).nonzero()]
    active_next = T.cast(T.minimum(
        T.maximum(
            active + 1,
            T.max(T.concatenate([active_skip_idxs, [-1]])) + 2 + 1
        ), log_p_curr.shape[0]), 'int32')

    common_factor = T.max(log_p_prev[:active])
    p_prev = T.exp(log_p_prev[:active] - common_factor)
    _p_prev = zeros[:active_next]
    # copy over
    _p_prev = T.set_subtensor(_p_prev[:active], p_prev)
    # previous transitions
    _p_prev = T.inc_subtensor(_p_prev[1:], _p_prev[:-1])
    # skip transitions
    _p_prev = T.inc_subtensor(_p_prev[active_skip_idxs + 2], p_prev[active_skip_idxs])
    updated_log_p_prev = T.log(_p_prev) + common_factor

    log_p_next = T.set_subtensor(
        zeros[:active_next],
        log_p_curr[:active_next] + updated_log_p_prev
    )
    return active_next, log_p_next
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_wrong_broadcast(self):
        a = tt.col()
        increment = tt.vector()

        # These symbolic graphs legitimate, as long as increment has exactly
        # one element. So it should fail at runtime, not at compile time.
        rng = numpy.random.RandomState(utt.fetch_seed())

        def rng_randX(*shape):
            return rng.rand(*shape).astype(theano.config.floatX)

        for op in (tt.set_subtensor, tt.inc_subtensor):
            for base in (a[:], a[0]):
                out = op(base, increment)
                f = theano.function([a, increment], out)
                # This one should work
                f(rng_randX(3, 1), rng_randX(1))
                # These ones should not
                self.assertRaises(ValueError,
                                  f, rng_randX(3, 1), rng_randX(2))
                self.assertRaises(ValueError,
                                  f, rng_randX(3, 1), rng_randX(3))
                self.assertRaises(ValueError,
                                  f, rng_randX(3, 1), rng_randX(0))
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_incsubtensor_mixed():

    # This catches a bug that occurred when incrementing
    # a float32 tensor by a float64 tensor.
    # The result is defined to be float32, so it is OK
    # to downcast the float64 increment in order to
    # transfer it to the GPU.
    # The bug was that the optimization called GpuFromHost
    # without casting first, causing the optimization to
    # fail.
    X = tensor.fmatrix()
    Y = tensor.dmatrix()
    Z = tensor.inc_subtensor(X[0:1, 0:1], Y)
    f = theano.function([X, Y], Z, mode=mode_with_gpu)
    packed, = f.maker.fgraph.inputs[1].clients
    client, idx = packed
    print(client)
    assert isinstance(client.op, tensor.Elemwise)
    assert isinstance(client.op.scalar_op, theano.scalar.Cast)
    packed, = client.outputs[0].clients
    client, idx = packed
    assert isinstance(client.op, cuda.GpuFromHost)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_inc_subtensor():
    cuda.shared_constructor
    # shared = tensor.shared
    x, y = T.fmatrices('x', 'y')
    xval = numpy.asarray(
        [[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype='float32')
    yval = numpy.asarray(
        [[10, 10, 10], [10, 10, 10], [10, 10, 10]], dtype='float32')
    expr = T.inc_subtensor(x[:, 1:3], y[:, 1:3])

    f = theano.function([x, y], expr, mode=mode_with_gpu)

    assert sum([isinstance(node.op, cuda.GpuIncSubtensor) and
                node.op.set_instead_of_inc is False
                for node in f.maker.fgraph.toposort()]) == 1
    utt.assert_allclose(f(xval, yval), [[1., 12., 13.],
                                        [4., 15., 16.], [7., 18., 19.]])
项目:ntm-one-shot    作者:tristandeleu    | 项目源码 | 文件源码
def accuracy_instance(predictions, targets, n=[1, 2, 3, 4, 5, 10], \
        nb_classes=5, nb_samples_per_class=10, batch_size=1):
    accuracy_0 = theano.shared(np.zeros((batch_size, nb_samples_per_class), \
        dtype=theano.config.floatX))
    indices_0 = theano.shared(np.zeros((batch_size, nb_classes), \
        dtype=np.int32))
    batch_range = T.arange(batch_size)
    def step_(p, t, acc, idx):
        acc = T.inc_subtensor(acc[batch_range, idx[batch_range, t]], T.eq(p, t))
        idx = T.inc_subtensor(idx[batch_range, t], 1)
        return (acc, idx)
    (raw_accuracy, _), _ = theano.foldl(step_, sequences=[predictions.dimshuffle(1, 0), \
        targets.dimshuffle(1, 0)], outputs_info=[accuracy_0, indices_0])
    accuracy = T.mean(raw_accuracy / nb_classes, axis=0)

    return accuracy
项目:urnn    作者:stwisdom    | 项目源码 | 文件源码
def times_reflection(input, n_hidden, reflection):
    input_re = input[:, :n_hidden]
    input_im = input[:, n_hidden:]
    reflect_re = reflection[:n_hidden]
    reflect_im = reflection[n_hidden:]

    vstarv = (reflection**2).sum()

    input_re_reflect_re = T.dot(input_re, reflect_re)
    input_re_reflect_im = T.dot(input_re, reflect_im)
    input_im_reflect_re = T.dot(input_im, reflect_re)
    input_im_reflect_im = T.dot(input_im, reflect_im)

    a = T.outer(input_re_reflect_re - input_im_reflect_im, reflect_re)
    b = T.outer(input_re_reflect_im + input_im_reflect_re, reflect_im)
    c = T.outer(input_re_reflect_re - input_im_reflect_im, reflect_im)
    d = T.outer(input_re_reflect_im + input_im_reflect_re, reflect_re)

    output = input
    output = T.inc_subtensor(output[:, :n_hidden], - 2. / vstarv * (a + b))
    output = T.inc_subtensor(output[:, n_hidden:], - 2. / vstarv * (d - c))

    return output
项目:urnn    作者:stwisdom    | 项目源码 | 文件源码
def times_reflection(input, n_hidden, reflection):
    input_re = input[:, :n_hidden]
    input_im = input[:, n_hidden:]

    reflect_re = reflection[:n_hidden]
    reflect_im = reflection[n_hidden:]

    vstarv = (reflection**2).sum()

    input_re_reflect_re = K.dot(input_re, reflect_re)
    input_re_reflect_im = K.dot(input_re, reflect_im)
    input_im_reflect_re = K.dot(input_im, reflect_re)
    input_im_reflect_im = K.dot(input_im, reflect_im)

    a = Kouter(input_re_reflect_re - input_im_reflect_im, reflect_re)
    b = Kouter(input_re_reflect_im + input_im_reflect_re, reflect_im)
    c = Kouter(input_re_reflect_re - input_im_reflect_im, reflect_im)
    d = Kouter(input_re_reflect_im + input_im_reflect_re, reflect_re)

    output = input
    output = T.inc_subtensor(output[:, :n_hidden], - 2. / vstarv * (a + b))
    output = T.inc_subtensor(output[:, n_hidden:], - 2. / vstarv * (d - c))

    return output
项目:Fully-Connected-DenseNets-Semantic-Segmentation    作者:titu1994    | 项目源码 | 文件源码
def depth_to_space_th(input, scale, data_format=None):
    ''' Uses phase shift algorithm to convert channels/depth for spatial resolution '''
    import theano.tensor as T
    if data_format is None:
        data_format = K.image_dim_ordering()
    data_format = data_format.lower()
    input = K._preprocess_conv2d_input(input, data_format)

    b, k, row, col = input.shape
    output_shape = (b, k // (scale ** 2), row * scale, col * scale)

    out = T.zeros(output_shape)
    r = scale

    for y, x in itertools.product(range(scale), repeat=2):
        out = T.inc_subtensor(out[:, :, y::r, x::r], input[:, r * y + x:: r * r, :, :])

    out = K._postprocess_conv2d_output(out, input, None, None, None, data_format)
    return out
项目:Neural-Photo-Editor    作者:ajbrock    | 项目源码 | 文件源码
def _get_p(self, start_choice):
        start_choice_idx = (start_choice-1).astype('int32')
        p_vals = T.concatenate([T.zeros((start_choice_idx,)), (self._l * T.arange(start_choice, self._input_size, dtype=theano.config.floatX))])
        p_vals = T.inc_subtensor(p_vals[start_choice_idx], 1.)  # Stupid hack because de multinomial does not contain a safety for numerical imprecision.
        return p_vals
项目:aspect_adversarial    作者:yuanzh    | 项目源码 | 文件源码
def create_adagrad_updates(updates, params, gparams, gsums, lr, eps):
    for p, g, acc in zip(params, gparams, gsums):
        if is_subtensor_op(p):
            origin, indexes = get_subtensor_op_inputs(p)
            #acc_slices = acc[indexes]
            acc_slices = get_similar_subtensor(acc, indexes, p)
            new_acc = acc_slices + g**2
            updates[acc] = T.set_subtensor(acc_slices, new_acc)
            updates[origin] = T.inc_subtensor(p, \
                    - lr * (g / T.sqrt(new_acc + eps)))
        else:
            new_acc = acc + g**2
            updates[acc] = new_acc
            updates[p] = p - lr * (g / T.sqrt(new_acc + eps))
项目:aspect_adversarial    作者:yuanzh    | 项目源码 | 文件源码
def create_adam_updates(updates, params, gparams, gsums, xsums, \
                            lr, eps, beta1, beta2):
    i = theano.shared(np.float64(0.0).astype(theano.config.floatX))
    i_t = i + 1.0
    omb1_t = 1.0 - beta1**i_t
    omb2_t = 1.0 - beta2**i_t
    lr_t = lr * (T.sqrt(omb2_t) / omb1_t)
    for p, g, m, v in zip(params, gparams, gsums, xsums):
        if is_subtensor_op(p):
            origin, indexes = get_subtensor_op_inputs(p)
            m_sub = m[indexes]
            v_sub = v[indexes]
            m_t = beta1*m_sub + (1.0-beta1)*g
            v_t = beta2*v_sub + (1.0-beta2)*T.sqr(g)
            g_t = m_t / (T.sqrt(v_t) + eps)
            updates[m] = T.set_subtensor(m_sub, m_t)
            updates[v] = T.set_subtensor(v_sub, v_t)
            updates[origin] = T.inc_subtensor(p, -lr_t*g_t)
        else:
            m_t = beta1*m + (1.0-beta1)*g
            v_t = beta2*v + (1.0-beta2)*T.sqr(g)
            g_t = m_t / (T.sqrt(v_t) + eps)
            updates[m] = m_t
            updates[v] = v_t
            updates[p] = p - lr_t*g_t
    updates[i] = i_t
项目:icml17_knn    作者:taolei87    | 项目源码 | 文件源码
def create_adam_updates(updates, params, gparams, gsums, xsums, \
                            lr, eps, beta1, beta2):
    i = theano.shared(np.float64(0.0).astype(theano.config.floatX))
    i_t = i + 1.0
    omb1_t = 1.0 - beta1**i_t
    omb2_t = 1.0 - beta2**i_t
    lr_t = lr * (T.sqrt(omb2_t) / omb1_t)
    for p, g, m, v in zip(params, gparams, gsums, xsums):
        if is_subtensor_op(p):
            origin, indexes = get_subtensor_op_inputs(p)
            m_sub = m[indexes]
            v_sub = v[indexes]
            m_t = beta1*m_sub + (1.0-beta1)*g
            v_t = beta2*v_sub + (1.0-beta2)*T.sqr(g)
            g_t = m_t / (T.sqrt(v_t) + eps)
            updates[m] = T.set_subtensor(m_sub, m_t)
            updates[v] = T.set_subtensor(v_sub, v_t)
            updates[origin] = T.inc_subtensor(p, -lr_t*g_t)
        else:
            m_t = beta1*m + (1.0-beta1)*g
            v_t = beta2*v + (1.0-beta2)*T.sqr(g)
            g_t = m_t / (T.sqrt(v_t) + eps)
            updates[m] = m_t
            updates[v] = v_t
            updates[p] = p - lr_t*g_t
    updates[i] = i_t
项目:keras-tf-Super-Resolution    作者:olgaliak    | 项目源码 | 文件源码
def depth_to_scale_th(input, scale, channels):
    ''' Uses phase shift algorithm [1] to convert channels/depth for spacial resolution '''
    import theano.tensor as T

    b, k, row, col = input.shape
    output_shape = (b, channels, row * scale, col * scale)

    out = T.zeros(output_shape)
    r = scale

    for y, x in itertools.product(range(scale), repeat=2):
        out = T.inc_subtensor(out[:, :, y::r, x::r], input[:, r * y + x :: r * r, :, :])

    return out
项目:supic    作者:Hirico    | 项目源码 | 文件源码
def get_output_for(self, input, deterministic=False, **kwargs):
        out, r = T.zeros(self.get_output_shape_for(input.shape)), self.upscale
        for y, x in itertools.product(range(r), repeat=2):
            out=T.inc_subtensor(out[:,:,y::r,x::r], input[:,r*y+x::r*r,:,:])
        return out
项目:neural-semantic-role-labeler    作者:hiroki13    | 项目源码 | 文件源码
def ada_grad(cost, params, emb=None, sub_emb=None, w=None, lr=0.1, eps=1.):
    updates = OrderedDict()

    """update sub-tensor of embeddings"""
    if emb:
        p = emb
        g = T.grad(cost, sub_emb)
        r = build_shared_zeros(p.get_value(True).shape)
        r_sub = r[w]
        r_sub_t = r_sub + T.sqr(g)
        r_t = T.set_subtensor(r_sub, r_sub_t)
        p_t = T.inc_subtensor(sub_emb, - (lr / (T.sqrt(r_sub_t) + eps)) * g)
        updates[r] = r_t
        updates[p] = p_t

    """update parameters"""
    grads0 = T.grad(cost, params[0])
    for p, g in zip(params[0], grads0):
        r = build_shared_zeros(p.get_value(True).shape)
        r_t = r + T.sqr(g)
        p_t = p - (lr / (T.sqrt(r_t) + eps)) * g
        updates[r] = r_t
        updates[p] = p_t

    """update parameters"""
    grads1 = T.grad(cost, params[1])
    for p, g in zip(params[1], grads1):
        r = build_shared_zeros(p.get_value(True).shape)
        r_t = r + T.sqr(g)
        p_t = p - (lr / (T.sqrt(r_t) + eps)) * g
        updates[r] = r_t
        updates[p] = p_t
    return updates
项目:neural-semantic-role-labeler    作者:hiroki13    | 项目源码 | 文件源码
def ada_delta(cost, params, emb, x, w, b=0.999, eps=1e-8):
    updates = OrderedDict()
    grads = T.grad(cost, params)

    """update sub-tensor of embeddings"""
    p = emb
    g = T.grad(cost, x)

    r = build_shared_zeros(p.get_value(True).shape)
    v = build_shared_zeros(p.get_value(True).shape)
    s = build_shared_zeros(p.get_value(True).shape)
    r_sub = r[w]
    v_sub = v[w]
    s_sub = s[w]

    r_sub_t = b * r_sub + (1 - b) * T.sqr(g)
    v_sub_t = (T.sqrt(s_sub) + eps) / (T.sqrt(r_sub) + eps) * g
    s_sub_t = b * s_sub + (1 - b) * T.sqr(v_sub_t)
    updates[r] = T.set_subtensor(r_sub, r_sub_t)
    updates[v] = T.set_subtensor(v_sub, v_sub_t)
    updates[s] = T.set_subtensor(s_sub, s_sub_t)
    updates[p] = T.inc_subtensor(x, -v_sub_t)

    """update parameters"""
    for p, g in zip(params, grads):
        r = build_shared_zeros(p.get_value(True).shape)
        v = build_shared_zeros(p.get_value(True).shape)
        s = build_shared_zeros(p.get_value(True).shape)
        r_t = b * r + (1 - b) * T.sqr(g)
        v_t = (T.sqrt(s) + eps) / (T.sqrt(r) + eps) * g
        s_t = b * s + (1 - b) * T.sqr(v_t)
        p_t = p - v_t
        updates[r] = r_t
        updates[v] = v_t
        updates[s] = s_t
        updates[p] = p_t
    return updates
项目:EUNN-theano    作者:iguanaus    | 项目源码 | 文件源码
def times_reflection_sub(input, n_hidden, n_sub, reflection):

    #print "n_hidden=%d, n_sub=%d" % (n_hidden,n_sub)    
    input_re = input[:, :n_hidden]
    input_im = input[:, n_hidden:]
    n_start=n_hidden-n_sub
    #print "n_start=%d" % n_start
    reflect_re = reflection[n_start:n_hidden]
    reflect_im = reflection[(n_hidden+n_start):]

    vstarv = (reflect_re**2).sum() + (reflect_im**2).sum()

    input_re_reflect_re = T.dot(input_re[:,n_start:], reflect_re)
    input_re_reflect_im = T.dot(input_re[:,n_start:], reflect_im)
    input_im_reflect_re = T.dot(input_im[:,n_start:], reflect_re)
    input_im_reflect_im = T.dot(input_im[:,n_start:], reflect_im)

    a = T.outer(input_re_reflect_re - input_im_reflect_im, reflect_re)
    b = T.outer(input_re_reflect_im + input_im_reflect_re, reflect_im)
    c = T.outer(input_re_reflect_re - input_im_reflect_im, reflect_im)
    d = T.outer(input_re_reflect_im + input_im_reflect_re, reflect_re)

    output = input
    output = T.inc_subtensor(output[:, n_start:n_hidden], - 2. / vstarv * (a + b))
    output = T.inc_subtensor(output[:, (n_hidden+n_start):], - 2. / vstarv * (d - c))

    return output
项目:DeepRes    作者:Aneeshers    | 项目源码 | 文件源码
def get_output_for(self, input, deterministic=False, **kwargs):
        out, r = T.zeros(self.get_output_shape_for(input.shape)), self.upscale
        for y, x in itertools.product(range(r), repeat=2):
            out=T.inc_subtensor(out[:,:,y::r,x::r], input[:,r*y+x::r*r,:,:])
        return out
项目:SocializedWordEmbeddings    作者:HKUST-KnowComp    | 项目源码 | 文件源码
def create_adam_updates(updates, params, gparams, gsums, xsums, \
                            lr, eps, beta1, beta2):
    i = theano.shared(np.float64(0.0).astype(theano.config.floatX))
    i_t = i + 1.0
    omb1_t = 1.0 - beta1**i_t
    omb2_t = 1.0 - beta2**i_t
    lr_t = lr * (T.sqrt(omb2_t) / omb1_t)
    for p, g, m, v in zip(params, gparams, gsums, xsums):
        if is_subtensor_op(p):
            origin, indexes = get_subtensor_op_inputs(p)
            m_sub = m[indexes]
            v_sub = v[indexes]
            m_t = beta1*m_sub + (1.0-beta1)*g
            v_t = beta2*v_sub + (1.0-beta2)*T.sqr(g)
            g_t = m_t / (T.sqrt(v_t) + eps)
            updates[m] = T.set_subtensor(m_sub, m_t)
            updates[v] = T.set_subtensor(v_sub, v_t)
            updates[origin] = T.inc_subtensor(p, -lr_t*g_t)
        else:
            m_t = beta1*m + (1.0-beta1)*g
            v_t = beta2*v + (1.0-beta2)*T.sqr(g)
            g_t = m_t / (T.sqrt(v_t) + eps)
            updates[m] = m_t
            updates[v] = v_t
            updates[p] = p - lr_t*g_t
    updates[i] = i_t
项目:deeplift    作者:kundajelab    | 项目源码 | 文件源码
def set_subtensor(subtensor, amnt):
    return T.inc_subtensor(subtensor, amnt, set_instead_of_inc=True)
项目:fg-gating    作者:kimiyoung    | 项目源码 | 文件源码
def get_output_for(self, input, **kwargs):
        def norm_fn(f, mask, label, previous, W_sim):
            # f: batch * class, mask: batch, label: batch, previous: batch * class, W_sim: class * class
            # previous: batch * class

            next = previous.dimshuffle(0, 1, 'x') + f.dimshuffle(0, 'x', 1) + W_sim.dimshuffle('x', 0, 1) # batch * class * class
            next = theano_logsumexp(next, axis = 1) # batch * class
            mask = mask.dimshuffle(0, 'x')
            next = previous * (1.0 - mask) + next * mask
            return next

        f = input # batch * time * class
        if self.end_points:
            for i in range(self.num_classes):
                f = T.inc_subtensor(f[:, 0, i], self.W_end_points[0, i])
                f = T.inc_subtensor(f[:, -1, i], self.W_end_points[1, i])

        initial = f[:, 0, :]
        outputs, _ = theano.scan(fn = norm_fn, \
         sequences = [f.dimshuffle(1, 0, 2)[1: ], self.mask_input.dimshuffle(1, 0)[1: ], self.label_input.dimshuffle(1, 0)[1:]], \
         outputs_info = initial, non_sequences = [self.W_sim], strict = True)
        norm = T.sum(theano_logsumexp(outputs[-1], axis = 1))

        f_pot = (f.reshape((-1, f.shape[-1]))[T.arange(f.shape[0] * f.shape[1]), self.label_input.flatten()] * self.mask_input.flatten()).sum()

        labels = self.label_input # batch * time
        shift_labels = T.roll(labels, -1, axis = 1)
        mask = self.mask_input # batch * time
        shift_mask = T.roll(mask, -1, axis = 1)

        g_pot = (self.W_sim[labels.flatten(), shift_labels.flatten()] * mask.flatten() * shift_mask.flatten()).sum()

        return - (f_pot + g_pot - norm) / f.shape[0] if self.normalize else - (f_pot + g_pot - norm)
项目:fg-gating    作者:kimiyoung    | 项目源码 | 文件源码
def get_output_for(self, input, **kwargs):
        def max_fn(f, mask, prev_score, prev_back, W_sim):
            next_score = prev_score.dimshuffle(0, 1, 'x') + f.dimshuffle(0, 'x', 1) + W_sim.dimshuffle('x', 0, 1)
            next_back = T.argmax(next_score, axis = 1)
            next_score = T.max(next_score, axis = 1)
            mask = mask.dimshuffle(0, 'x')
            next_score = next_score * mask + prev_score * (1.0 - mask)
            next_back = next_back * mask + prev_back * (1.0 - mask)
            next_back = T.cast(next_back, 'int32')
            return [next_score, next_back]

        def produce_fn(back, mask, prev_py):
            # back: batch * class, prev_py: batch, mask: batch
            next_py = back[T.arange(prev_py.shape[0]), prev_py]
            next_py = mask * next_py + (1.0 - mask) * prev_py
            next_py = T.cast(next_py, 'int32')
            return next_py

        f = input

        if self.end_points:
            for i in range(self.num_classes):
                f = T.inc_subtensor(f[:, 0, i], self.W_end_points[0, i])
                f = T.inc_subtensor(f[:, -1, i], self.W_end_points[1, i])

        init_score, init_back = f[:, 0, :], T.zeros_like(f[:, 0, :], dtype = 'int32')
        ([scores, backs], _) = theano.scan(fn = max_fn, \
            sequences = [f.dimshuffle(1, 0, 2)[1: ], self.mask_input.dimshuffle(1, 0)[1: ]], \
            outputs_info = [init_score, init_back], non_sequences = [self.W_sim], strict = True)

        init_py = T.argmax(scores[-1], axis = 1)
        init_py = T.cast(init_py, 'int32')
        # init_py: batch, backs: time * batch * class
        pys, _ = theano.scan(fn = produce_fn, \
            sequences = [backs, self.mask_input.dimshuffle(1, 0)[1:]], outputs_info = [init_py], go_backwards = True)
        # pys: (rev_time - 1) * batch
        pys = pys.dimshuffle(1, 0)[:, :: -1]
        # pys : batch * (time - 1)
        return T.concatenate([pys, init_py.dimshuffle(0, 'x')], axis = 1)
项目:neural-enhance    作者:alexjc    | 项目源码 | 文件源码
def get_output_for(self, input, deterministic=False, **kwargs):
        out, r = T.zeros(self.get_output_shape_for(input.shape)), self.upscale
        for y, x in itertools.product(range(r), repeat=2):
            out=T.inc_subtensor(out[:,:,y::r,x::r], input[:,r*y+x::r*r,:,:])
        return out
项目:Super-Resolution-using-Generative-Adversarial-Networks    作者:titu1994    | 项目源码 | 文件源码
def depth_to_scale_th(input, scale, channels):
    ''' Uses phase shift algorithm [1] to convert channels/depth for spacial resolution '''
    import theano.tensor as T

    b, k, row, col = input.shape
    output_shape = (b, channels, row * scale, col * scale)

    out = T.zeros(output_shape)
    r = scale

    for y, x in itertools.product(range(scale), repeat=2):
        out = T.inc_subtensor(out[:, :, y::r, x::r], input[:, r * y + x :: r * r, :, :])

    return out
项目:lstmprovisor-python    作者:Impro-Visor    | 项目源码 | 文件源码
def get_strengths_and_vects(self, input_activations):
        if self._pre_std is not None:
            input_activations = T.inc_subtensor(input_activations[:,:,self._pre_mask], self._srng.normal(input_activations.shape, self._pre_std))
        strengths, vects = self._inner_manager.get_strengths_and_vects(input_activations)
        if self._post_std is not None:
            vects = vects + self._srng.normal(vects.shape, self._post_std)
        return strengths, vects
项目:lstmprovisor-python    作者:Impro-Visor    | 项目源码 | 文件源码
def process(self, input_activations, extra_info=False):
        if self._pre_std is not None:
            input_activations = T.inc_subtensor(input_activations[:,:,self._pre_mask], self._srng.normal(input_activations.shape, self._pre_std))
        stuff = self._inner_manager.process(input_activations, extra_info)
        vects = stuff[2]
        if self._post_std is not None:
            vects = vects + self._srng.normal(vects.shape, self._post_std)
        return stuff[:2] + (vects,) + stuff[3:]
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_incsub_f16():
    shp = (3, 3)
    shared = gpuarray_shared_constructor
    xval = numpy.arange(numpy.prod(shp), dtype='float16').reshape(shp) + 1
    yval = numpy.empty((2,) + shp[1:], dtype='float16')
    yval[:] = 2
    x = shared(xval, name='x')
    y = tensor.tensor(dtype='float16',
                      broadcastable=(False,) * len(shp),
                      name='y')
    expr = tensor.advanced_inc_subtensor1(x, y, [0, 2])
    f = theano.function([y], expr, mode=mode_with_gpu)
    assert sum([isinstance(node.op, GpuAdvancedIncSubtensor1)
                for node in f.maker.fgraph.toposort()]) == 1
    rval = f(yval)
    rep = xval.copy()
    rep[[0, 2]] += yval
    assert numpy.allclose(rval, rep)

    expr = tensor.inc_subtensor(x[1:], y)
    f = theano.function([y], expr, mode=mode_with_gpu)
    assert sum([isinstance(node.op, GpuIncSubtensor)
                for node in f.maker.fgraph.toposort()]) == 1
    rval = f(yval)
    rep = xval.copy()
    rep[1:] += yval
    assert numpy.allclose(rval, rep)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_incsubtensor1(self):
        tv = numpy.asarray(self.rng.uniform(size=(3,)),
                           theano.config.floatX)
        t = theano.shared(tv)
        out = tensor.inc_subtensor(self.x[:3], t)
        self.check_rop_lop(out, self.in_shape)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_incsubtensor2(self):
        tv = numpy.asarray(self.rng.uniform(size=(10,)),
                           theano.config.floatX)
        t = theano.shared(tv)
        out = tensor.inc_subtensor(t[:4], self.x[:4])
        self.check_rop_lop(out, (10,))
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_incsubtensor_allocs0(self):
        x = tensor.matrix()
        y = tensor.matrix()
        y0 = tensor.zeros_like(y)
        z = tensor.inc_subtensor(x[:4], y0)
        f = theano.function([x, y], z, mode=self.mode)
        assert numpy.all([not isinstance(n.op, tensor.IncSubtensor) for n in
                          f.maker.fgraph.toposort()])