Python theano.tensor 模块,zeros() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用theano.tensor.zeros()

项目:iterative_inference_segm    作者:adri-romsor    | 项目源码 | 文件源码
def __getitem__(self, name):
        return self.layer_dict[name]


#def _pad_to_fit(x, target_shape):
    #"""
    #Spatially pad a tensor's feature maps with zeros as evenly as possible
    #(center it) to fit the target shape.

    #Expected target shape is larger than the shape of the tensor.

    #NOTE: padding may be unequal on either side of the map if the target
    #dimension is odd. This is why keras's ZeroPadding2D isn't used.
    #"""
    #pad_0 = {}
    #pad_1 = {}
    #for dim in [2, 3]:
        #pad_0[dim] = (target_shape[dim]-x.shape[dim])//2
        #pad_1[dim] = target_shape[dim]-x.shape[dim]-pad_0[dim]
    #output = T.zeros(target_shape)
    #indices = (slice(None),
               #slice(None),
               #slice(pad_0[2], target_shape[2]-pad_1[2]),
               #slice(pad_0[3], target_shape[3]-pad_1[3]))
    #return T.set_subtensor(output[indices], x)
项目:dsb3    作者:EliasVansteenkiste    | 项目源码 | 文件源码
def get_output_for(self, input, **kwargs):
        a, b, c = self.scale_factor
        upscaled = input
        if self.mode == 'repeat':
            if c > 1:
                upscaled = T.extra_ops.repeat(upscaled, c, 4)
            if b > 1:
                upscaled = T.extra_ops.repeat(upscaled, b, 3)
            if a > 1:
                upscaled = T.extra_ops.repeat(upscaled, a, 2)
        elif self.mode == 'dilate':
            if c > 1 or b > 1 or a > 1:
                output_shape = self.get_output_shape_for(input.shape)
                upscaled = T.zeros(shape=output_shape, dtype=input.dtype)
                upscaled = T.set_subtensor(
                    upscaled[:, :, ::a, ::b, ::c], input)
        return upscaled
项目:Neural-Photo-Editor    作者:ajbrock    | 项目源码 | 文件源码
def mdclW(num_filters,num_channels,filter_size,winit,name,scales):
    # Coefficient Initializer
    sinit = lasagne.init.Constant(1.0/(1+len(scales)))
    # Total filter size
    size = filter_size + (filter_size-1)*(scales[-1]-1)
    # Multiscale Dilated Filter 
    W = T.zeros((num_filters,num_channels,size,size))
    # Undilated Base Filter
    baseW = theano.shared(lasagne.utils.floatX(winit.sample((num_filters,num_channels,filter_size,filter_size))),name=name+'.W')
    for scale in enumerate(scales[::-1]): # enumerate backwards so that we place the main filter on top
            W = T.set_subtensor(W[:,:,scales[-1]-scale:size-scales[-1]+scale:scale,scales[-1]-scale:size-scales[-1]+scale:scale],
                                  baseW*theano.shared(lasagne.utils.floatX(sinit.sample(num_filters)), name+'.coeff_'+str(scale)).dimshuffle(0,'x','x','x'))
    return W

# Subpixel Upsample Layer from (https://arxiv.org/abs/1609.05158)
# This layer uses a set of r^2 set_subtensor calls to reorganize the tensor in a subpixel-layer upscaling style
# as done in the ESPCN Magic ony paper for super-resolution.
# r is the upscale factor.
# c is the number of output channels.
项目:Neural-Photo-Editor    作者:ajbrock    | 项目源码 | 文件源码
def reset(self):
        # Set Original ordering
        self.ordering.set_value(np.arange(self._input_size, dtype=theano.config.floatX))

        # Reset RandomStreams
        self._rng.seed(self._random_seed)

        # Initial layer connectivity
        self.layers_connectivity[0].set_value((self.ordering + 1).eval())
        for i in range(1, len(self.layers_connectivity)-1):
            self.layers_connectivity[i].set_value(np.zeros((self._hidden_sizes[i-1]), dtype=theano.config.floatX))
        self.layers_connectivity[-1].set_value(self.ordering.get_value())

        # Reset MRG_RandomStreams (GPU)
        self._mrng.rstate = self._initial_mrng_rstate
        for state, value in zip(self._mrng.state_updates, self._initial_mrng_state_updates):
            state[0].set_value(value)

        self.sample_connectivity()
项目:cortex    作者:rdevon    | 项目源码 | 文件源码
def sample(self, n_samples):
        '''
        Inspired by jbornschein's implementation.
        '''

        z0 = T.zeros((n_samples, self.dim,)).astype(floatX) + T.shape_padleft(self.b)
        rs = self.trng.uniform((self.dim, n_samples), dtype=floatX)

        def _step_sample(i, W_i, r_i, z):
            p_i = T.nnet.sigmoid(z[:, i]) * 0.9999 + 0.000005
            x_i = (r_i <= p_i).astype(floatX)
            z   = z + T.outer(x_i, W_i)
            return z, x_i

        seqs = [T.arange(self.dim), self.W, rs]
        outputs_info = [z0, None]
        non_seqs = []

        (zs, x), updates = scan(_step_sample, seqs, outputs_info, non_seqs,
                                self.dim)

        return x.T, updates
项目:cortex    作者:rdevon    | 项目源码 | 文件源码
def set_params(self):
        self.params = OrderedDict()

        dim_in = self.dim_in
        dim_out = self.dim_h

        for l in xrange(self.n_layers):
            if l > 0:
                dim_in = self.dim_h
            if l == self.n_layers - 1:
                dim_out = self.dim_out

            W = norm_weight(dim_in, dim_out,
                            scale=self.weight_scale, ortho=False)
            b = np.zeros((dim_out,)).astype(floatX)

            self.params['W%d' % l] = W
            self.params['b%d' % l] = b

        b = np.zeros((self.dim_out,)).astype(floatX)
        W = norm_weight(self.dim_out, self.dim_out, scale=self.weight_scale,
                                ortho=False)

        self.params['War'] = W
        self.params['bar'] = b
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def ctc_update_log_p(skip_idxs, zeros, active, log_p_curr, log_p_prev):
    active_skip_idxs = skip_idxs[(skip_idxs < active).nonzero()]
    active_next = T.cast(T.minimum(
        T.maximum(
            active + 1,
            T.max(T.concatenate([active_skip_idxs, [-1]])) + 2 + 1
        ), log_p_curr.shape[0]), 'int32')

    common_factor = T.max(log_p_prev[:active])
    p_prev = T.exp(log_p_prev[:active] - common_factor)
    _p_prev = zeros[:active_next]
    # copy over
    _p_prev = T.set_subtensor(_p_prev[:active], p_prev)
    # previous transitions
    _p_prev = T.inc_subtensor(_p_prev[1:], _p_prev[:-1])
    # skip transitions
    _p_prev = T.inc_subtensor(_p_prev[active_skip_idxs + 2], p_prev[active_skip_idxs])
    updated_log_p_prev = T.log(_p_prev) + common_factor

    log_p_next = T.set_subtensor(
        zeros[:active_next],
        log_p_curr[:active_next] + updated_log_p_prev
    )
    return active_next, log_p_next
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def ctc_path_probs(predict, Y, alpha=1e-4):
    smoothed_predict = (1 - alpha) * predict[:, Y] + alpha * np.float32(1.) / Y.shape[0]
    L = T.log(smoothed_predict)
    zeros = T.zeros_like(L[0])
    log_first = zeros

    f_skip_idxs = ctc_create_skip_idxs(Y)
    b_skip_idxs = ctc_create_skip_idxs(Y[::-1])  # there should be a shortcut to calculating this

    def step(log_f_curr, log_b_curr, f_active, log_f_prev, b_active, log_b_prev):
        f_active_next, log_f_next = ctc_update_log_p(f_skip_idxs, zeros, f_active, log_f_curr, log_f_prev)
        b_active_next, log_b_next = ctc_update_log_p(b_skip_idxs, zeros, b_active, log_b_curr, log_b_prev)
        return f_active_next, log_f_next, b_active_next, log_b_next

    [f_active, log_f_probs, b_active, log_b_probs], _ = theano.scan(
        step, sequences=[L, L[::-1, ::-1]], outputs_info=[np.int32(1), log_first, np.int32(1), log_first])

    idxs = T.arange(L.shape[1]).dimshuffle('x', 0)
    mask = (idxs < f_active.dimshuffle(0, 'x')) & (idxs < b_active.dimshuffle(0, 'x'))[::-1, ::-1]
    log_probs = log_f_probs + log_b_probs[::-1, ::-1] - L
    return log_probs, mask
项目:NMT    作者:tuzhaopeng    | 项目源码 | 文件源码
def _init_params(self):
        self.iBlocks = 1  # number of blocks in the input (from lower layer)

        W_em = self.init_fn(self.n_in,
                            self.n_class,
                            self.sparsity,
                            self.scale,
                            self.rng)
        self.W_em = theano.shared(W_em,
                                  name='W_%s' % self.name)
        self.b_em = theano.shared(
            self.bias_fn(self.n_class, self.bias_scale, self.rng),
            name='b_%s' % self.name)

        U_em = theano.shared(((self.rng.rand(self.iBlocks, self.n_class, 
            self.n_in, self.n_words_class)-0.5)/(self.n_words_class*self.n_in)
            ).astype(theano.config.floatX), name='U_%s'%self.name)
        self.U_em = U_em
        c_em = numpy.zeros((self.n_class, self.n_words_class), dtype='float32')
        n_words_last_class = self.n_out % self.n_words_class
        #c_em[-1, n_words_last_class:] = -numpy.inf
        self.c_em = theano.shared(c_em, name='c_%s' % self.name)

        self.params = [self.W_em, self.b_em, self.U_em, self.c_em]
        self.params_grad_scale = [self.grad_scale for x in self.params]
项目:NMT    作者:tuzhaopeng    | 项目源码 | 文件源码
def _init_params(self):
        self.iBlocks = 1  # number of blocks in the input (from lower layer)

        W_em = self.init_fn(self.n_in,
                            self.n_class,
                            self.sparsity,
                            self.scale,
                            self.rng)
        self.W_em = theano.shared(W_em,
                                  name='W_%s' % self.name)
        self.b_em = theano.shared(
            self.bias_fn(self.n_class, self.bias_scale, self.rng),
            name='b_%s' % self.name)

        U_em = theano.shared(((self.rng.rand(self.iBlocks, self.n_class, 
            self.n_in, self.n_words_class)-0.5)/(self.n_words_class*self.n_in)
            ).astype(theano.config.floatX), name='U_%s'%self.name)
        self.U_em = U_em
        c_em = numpy.zeros((self.n_class, self.n_words_class), dtype='float32')
        n_words_last_class = self.n_out % self.n_words_class
        #c_em[-1, n_words_last_class:] = -numpy.inf
        self.c_em = theano.shared(c_em, name='c_%s' % self.name)

        self.params = [self.W_em, self.b_em, self.U_em, self.c_em]
        self.params_grad_scale = [self.grad_scale for x in self.params]
项目:NMT    作者:tuzhaopeng    | 项目源码 | 文件源码
def __init__(self, state, rng,
            skip_init=False,
            compute_alignment=False):
        """Constructor.

        :param state:
            A state in the usual groundhog sense.
        :param rng:
            Random number generator. Something like numpy.random.RandomState(seed).
        :param skip_init:
            If True, all the layers are initialized with zeros. Saves time spent on
            parameter initialization if they are loaded later anyway.
        :param compute_alignment:
            If True, the alignment is returned by the decoder.
        """

        self.state = state
        self.rng = rng
        self.skip_init = skip_init
        self.compute_alignment = compute_alignment
项目:NMT    作者:tuzhaopeng    | 项目源码 | 文件源码
def __init__(self, state, rng,
            skip_init=False,
            compute_alignment=True):
        """Constructor.

        :param state:
            A state in the usual groundhog sense.
        :param rng:
            Random number generator. Something like numpy.random.RandomState(seed).
        :param skip_init:
            If True, all the layers are initialized with zeros. Saves time spent on
            parameter initialization if they are loaded later anyway.
        :param compute_alignment:
            If True, the alignment is returned by the decoder.
        """

        self.state = state
        self.rng = rng
        self.skip_init = skip_init
        self.compute_alignment = compute_alignment
项目:NMT    作者:tuzhaopeng    | 项目源码 | 文件源码
def parse_input(state, word2idx, line, raise_unk=False, idx2word=None, unk_sym=-1, null_sym=-1):
    if unk_sym < 0:
        unk_sym = state['unk_sym_source']
    if null_sym < 0:
        null_sym = state['null_sym_source']
    seqin = line.split()
    seqlen = len(seqin)
    seq = numpy.zeros(seqlen+1, dtype='int64')
    for idx,sx in enumerate(seqin):
        seq[idx] = word2idx.get(sx, unk_sym)
        if seq[idx] >= state['n_sym_source']:
            seq[idx] = unk_sym
        if seq[idx] == unk_sym and raise_unk:
            raise Exception("Unknown word {}".format(sx))

    seq[-1] = null_sym
    if idx2word:
        idx2word[null_sym] = '<eos>'
        idx2word[unk_sym] = state['oov']
        parsed_in = [idx2word[sx] for sx in seq]
        return seq, " ".join(parsed_in)

    return seq, seqin
项目:NMT    作者:tuzhaopeng    | 项目源码 | 文件源码
def parse_target(state, word2idx, line, raise_unk=False, idx2word=None, unk_sym=-1, null_sym=-1):
    if unk_sym < 0:
        unk_sym = state['unk_sym_target']
    if null_sym < 0:
        null_sym = state['null_sym_target']
    seqin = line.split()
    seqlen = len(seqin)
    seq = numpy.zeros(seqlen+1, dtype='int64')
    for idx,sx in enumerate(seqin):
        seq[idx] = word2idx.get(sx, unk_sym)
        if seq[idx] >= state['n_sym_target']:
            seq[idx] = unk_sym
        if seq[idx] == unk_sym and raise_unk:
            raise Exception("Unknown word {}".format(sx))

    seq[-1] = null_sym
    if idx2word:
        idx2word[null_sym] = '<eos>'
        idx2word[unk_sym] = state['oov']
        parsed_in = [idx2word[sx] for sx in seq]
        return seq, " ".join(parsed_in)

    return seq, seqin
项目:icml17_knn    作者:taolei87    | 项目源码 | 文件源码
def initialize_params(self, n_in, n_out, activation):
        if USE_XAVIER_INIT:
            if activation == ReLU:
                scale = np.sqrt(4.0/(n_in+n_out), dtype=theano.config.floatX)
                b_vals = np.ones(n_out, dtype=theano.config.floatX) * 0.01
            elif activation == softmax:
                scale = np.float64(0.001).astype(theano.config.floatX)
                b_vals = np.zeros(n_out, dtype=theano.config.floatX)
            else:
                scale = np.sqrt(2.0/(n_in+n_out), dtype=theano.config.floatX)
                b_vals = np.zeros(n_out, dtype=theano.config.floatX)
            W_vals = random_init((n_in,n_out), rng_type="normal") * scale
        else:
            W_vals = random_init((n_in,n_out))
            if activation == softmax:
                W_vals *= 1.0
            if activation == ReLU:
                b_vals = np.ones(n_out, dtype=theano.config.floatX) * 0.01
            else:
                b_vals = np.zeros(n_out, dtype=theano.config.floatX)
        self.W = create_shared(W_vals, name="W")
        if self.has_bias: self.b = create_shared(b_vals, name="b")
项目:supic    作者:Hirico    | 项目源码 | 文件源码
def __init__(self):
        super(DataLoader, self).__init__(daemon=True)
        self.data_ready = threading.Event()
        self.data_copied = threading.Event()

        self.orig_shape, self.seed_shape = args.batch_shape, args.batch_shape // args.zoom

        self.orig_buffer = np.zeros((args.buffer_size, 3, self.orig_shape, self.orig_shape), dtype=np.float32)
        self.seed_buffer = np.zeros((args.buffer_size, 3, self.seed_shape, self.seed_shape), dtype=np.float32)
        self.files = glob.glob(args.train)
        if len(self.files) == 0:
            error("There were no files found to train from searching for `{}`".format(args.train),
                  "  - Try putting all your images in one folder and using `--train=data/*.jpg`")

        self.available = set(range(args.buffer_size))
        self.ready = set()

        self.cwd = os.getcwd()
        self.start()
项目:supic    作者:Hirico    | 项目源码 | 文件源码
def process(self, original):
        # Snap the image to a shape that's compatible with the generator (2x, 4x)
        s = 2 ** max(args.generator_upscale, args.generator_downscale)
        by, bx = original.shape[0] % s, original.shape[1] % s
        original = original[by-by//2:original.shape[0]-by//2,bx-bx//2:original.shape[1]-bx//2,:]

        # Prepare paded input image as well as output buffer of zoomed size.
        s, p, z = args.rendering_tile, args.rendering_overlap, args.zoom
        image = np.pad(original, ((p, p), (p, p), (0, 0)), mode='reflect')
        output = np.zeros((original.shape[0] * z, original.shape[1] * z, 3), dtype=np.float32)

        # Iterate through the tile coordinates and pass them through the network.
        for y, x in itertools.product(range(0, original.shape[0], s), range(0, original.shape[1], s)):
            img = np.transpose(image[y:y+p*2+s,x:x+p*2+s,:] / 255.0 - 0.5, (2, 0, 1))[np.newaxis].astype(np.float32)
            *_, repro = self.model.predict(img)
            output[y*z:(y+s)*z,x*z:(x+s)*z,:] = np.transpose(repro[0] + 0.5, (1, 2, 0))[p*z:-p*z,p*z:-p*z,:]
            print('.', end='', flush=True)
        output = output.clip(0.0, 1.0) * 255.0

        # Match color histograms if the user specified this option.
        if args.rendering_histogram:
            for i in range(3):
                output[:,:,i] = self.match_histograms(output[:,:,i], original[:,:,i])

        return scipy.misc.toimage(output, cmin=0, cmax=255)
项目:KGP-ASR    作者:KGPML    | 项目源码 | 文件源码
def _add_blanks(y, blank_symbol, y_mask=None):
    """Add blanks to a matrix and updates mask
    Input shape: output_seq_len x num_batch
    Output shape: 2*output_seq_len+1 x num_batch
    """
    # for y
    y_extended = y.T.dimshuffle(0, 1, 'x')
    blanks = tensor.zeros_like(y_extended) + blank_symbol
    concat = tensor.concatenate([y_extended, blanks], axis=2)
    res = concat.reshape((concat.shape[0],
                          concat.shape[1] * concat.shape[2])).T
    begining_blanks = tensor.zeros((1, res.shape[1])) + blank_symbol
    blanked_y = tensor.concatenate([begining_blanks, res], axis=0)
    # for y_mask
    if y_mask is not None:
        y_mask_extended = y_mask.T.dimshuffle(0, 1, 'x')
        concat = tensor.concatenate([y_mask_extended,
                                     y_mask_extended], axis=2)
        res = concat.reshape((concat.shape[0],
                              concat.shape[1] * concat.shape[2])).T
        begining_blanks = tensor.ones((1, res.shape[1]), dtype=floatX)
        blanked_y_mask = tensor.concatenate([begining_blanks, res], axis=0)
    else:
        blanked_y_mask = None
    return blanked_y.astype('int32'), blanked_y_mask
项目:KGP-ASR    作者:KGPML    | 项目源码 | 文件源码
def _labeling_batch_to_class_batch(y, y_labeling, num_classes,
                                   y_hat_mask=None):
    # FIXME: y_hat_mask is currently not used
    batch_size = y.shape[1]
    N = y_labeling.shape[0]
    n_labels = y.shape[0]
    # sum over all repeated labels
    # from (T, B, L) to (T, C, B)
    out = T.zeros((num_classes, batch_size, N))
    y_labeling = y_labeling.dimshuffle((2, 1, 0))  # L, B, T
    y_ = y

    def scan_step(index, prev_res, y_labeling, y_):
        res_t = T.inc_subtensor(prev_res[y_[index, T.arange(batch_size)],
                                T.arange(batch_size)],
                                y_labeling[index, T.arange(batch_size)])
        return res_t

    result, updates = theano.scan(scan_step,
                                  sequences=[T.arange(n_labels)],
                                  non_sequences=[y_labeling, y_],
                                  outputs_info=[out])
    # result will be (C, B, T) so we make it (T, B, C)
    return result[-1].dimshuffle(2, 1, 0)
项目:keraflow    作者:ipod825    | 项目源码 | 文件源码
def padding(self, x, pad, pad_dims, output_shape):
        # x shape: (nb_sample, input_depth, rows, cols)

        output = T.zeros((x.shape[0],) + output_shape[1:])
        indices = [slice(None), slice(None)]  # nb_sample, input_depth does not change

        for i in range(2, len(output_shape)):
            if i not in pad_dims:
                indices.append(slice(None))
            else:
                p = pad[i-2]
                if isinstance(p, (tuple,list)):
                    assert len(p)==2
                    assert p[0]!=0 or p[1]!=0
                    indices.append(slice(p[0], -p[1]))
                else:
                    if p==0:
                        indices.append(slice(None))
                    else:
                        indices.append(slice(p, -p))

        return T.set_subtensor(output[indices], x)
项目:deep-learning-keras-projects    作者:jasmeetsb    | 项目源码 | 文件源码
def ctc_update_log_p(skip_idxs, zeros, active, log_p_curr, log_p_prev):
    active_skip_idxs = skip_idxs[(skip_idxs < active).nonzero()]
    active_next = T.cast(T.minimum(
        T.maximum(
            active + 1,
            T.max(T.concatenate([active_skip_idxs, [-1]])) + 2 + 1
        ), log_p_curr.shape[0]), 'int32')

    common_factor = T.max(log_p_prev[:active])
    p_prev = T.exp(log_p_prev[:active] - common_factor)
    _p_prev = zeros[:active_next]
    # copy over
    _p_prev = T.set_subtensor(_p_prev[:active], p_prev)
    # previous transitions
    _p_prev = T.inc_subtensor(_p_prev[1:], _p_prev[:-1])
    # skip transitions
    _p_prev = T.inc_subtensor(_p_prev[active_skip_idxs + 2], p_prev[active_skip_idxs])
    updated_log_p_prev = T.log(_p_prev) + common_factor

    log_p_next = T.set_subtensor(
        zeros[:active_next],
        log_p_curr[:active_next] + updated_log_p_prev
    )
    return active_next, log_p_next
项目:deep-learning-keras-projects    作者:jasmeetsb    | 项目源码 | 文件源码
def ctc_path_probs(predict, Y, alpha=1e-4):
    smoothed_predict = (1 - alpha) * predict[:, Y] + alpha * np.float32(1.) / Y.shape[0]
    L = T.log(smoothed_predict)
    zeros = T.zeros_like(L[0])
    log_first = zeros

    f_skip_idxs = ctc_create_skip_idxs(Y)
    b_skip_idxs = ctc_create_skip_idxs(Y[::-1])  # there should be a shortcut to calculating this

    def step(log_f_curr, log_b_curr, f_active, log_f_prev, b_active, log_b_prev):
        f_active_next, log_f_next = ctc_update_log_p(f_skip_idxs, zeros, f_active, log_f_curr, log_f_prev)
        b_active_next, log_b_next = ctc_update_log_p(b_skip_idxs, zeros, b_active, log_b_curr, log_b_prev)
        return f_active_next, log_f_next, b_active_next, log_b_next

    [f_active, log_f_probs, b_active, log_b_probs], _ = theano.scan(
        step, sequences=[L, L[::-1, ::-1]], outputs_info=[np.int32(1), log_first, np.int32(1), log_first])

    idxs = T.arange(L.shape[1]).dimshuffle('x', 0)
    mask = (idxs < f_active.dimshuffle(0, 'x')) & (idxs < b_active.dimshuffle(0, 'x'))[::-1, ::-1]
    log_probs = log_f_probs + log_b_probs[::-1, ::-1] - L
    return log_probs, mask
项目:ismir2015    作者:f0k    | 项目源码 | 文件源码
def get_output_for(self, input, **kwargs):
        input_shape = input.shape
        if self.dilation[0] > 1:
            # pad such that the time axis length is divisible by the dilation factor
            pad_w = (self.dilation[0] - input_shape[2] % self.dilation[0]) % self.dilation[0]
            input = T.concatenate((input, T.zeros((input_shape[0], input_shape[1], pad_w, input_shape[3]), input.dtype)), axis=2)
            # rearrange data to fold the time axis into the minibatch dimension
            input = input.reshape((input_shape[0], input_shape[1], -1, self.dilation[0], input_shape[3]))
            input = input.transpose(0, 3, 1, 2, 4)
            input = input.reshape((-1,) + tuple(input.shape[2:]))
        output = super(TimeDilatedMaxPool2DLayer, self).get_output_for(input, **kwargs)
        if self.dilation[0] > 1:
            # restore the time axis from the minibatch dimension
            output = output.reshape((input_shape[0], self.dilation[0]) + tuple(output.shape[1:]))
            output = output.transpose(0, 2, 3, 1, 4)
            output = output.reshape((input_shape[0], output.shape[1], -1, output.shape[4]))
            # remove the padding
            output = output[:, :, :output.shape[2] - pad_w]
        return output
项目:sciDT    作者:edvisees    | 项目源码 | 文件源码
def get_output(self, train=False):
    input = self.get_input(train)
    proj_input = self.activation(T.tensordot(input, self.att_proj, axes=(3,0)))
    if self.context == 'word':
      att_scores = T.tensordot(proj_input, self.att_scorer, axes=(3, 0))
    elif self.context == 'clause':
      def step(a_t, h_tm1, W_in, W, sc):
        h_t = T.tanh(T.tensordot(a_t, W_in, axes=(2,0)) + T.tensordot(h_tm1, W, axes=(2,0)))
        s_t = T.tensordot(h_t, sc, axes=(2,0))
        return h_t, s_t
      [_, scores], _ = theano.scan(step, sequences=[proj_input.dimshuffle(2,0,1,3)], outputs_info=[T.zeros((proj_input.shape[0], self.td1, self.rec_hid_dim)), None], non_sequences=[self.rec_in_weights, self.rec_hid_weights, self.att_scorer])
      att_scores = scores.dimshuffle(1,2,0)
    elif self.context == 'para':
      att_scores = T.tensordot(proj_input, self.att_scorer, axes=(3, 2)).sum(axis=(1, 2))
    # Nested scans. For shame!
    def get_sample_att(sample_input, sample_att):
      sample_att_inp, _ = theano.scan(fn=lambda s_att_i, s_input_i: T.dot(s_att_i, s_input_i), sequences=[T.nnet.softmax(sample_att), sample_input])
      return sample_att_inp

    att_input, _ = theano.scan(fn=get_sample_att, sequences=[input, att_scores])
    return att_input
项目:keras-neural-graph-fingerprint    作者:keiserlab    | 项目源码 | 文件源码
def temporal_padding(x, paddings=(1, 0), padvalue=0):
    '''Pad the middle dimension of a 3D tensor
    with `padding[0]` values left and `padding[1]` values right.

    Modified from keras.backend.temporal_padding
    https://github.com/fchollet/keras/blob/3bf913d/keras/backend/theano_backend.py#L590

    TODO: Implement for tensorflow (supposebly more easy)
    '''
    if not isinstance(paddings, (tuple, list, ndarray)):
        paddings = (paddings, paddings)

    input_shape = x.shape
    output_shape = (input_shape[0],
                    input_shape[1] + sum(paddings),
                    input_shape[2])
    output = T.zeros(output_shape)

    # Set pad value and set subtensor of actual tensor
    output = T.set_subtensor(output[:, :paddings[0], :], padvalue)
    output = T.set_subtensor(output[:, paddings[1]:, :], padvalue)
    output = T.set_subtensor(output[:, paddings[0]:x.shape[1] + paddings[0], :], x)
    return output
项目:NADE    作者:MarcCote    | 项目源码 | 文件源码
def nll_of_x_given_o(self, input, ordering):
        """ Returns the theano graph that computes $-ln p(\bx|o)$.

        Parameters
        ----------
        input: 1D vector
            One image with shape (nb_channels * images_height * images_width).

        ordering: 1D vector of int
            List of pixel indices representing the input ordering.
        """

        D = int(np.prod(self.image_shape))
        mask_o_d = T.zeros((D, D), dtype=theano.config.floatX)
        mask_o_d = T.set_subtensor(mask_o_d[T.arange(D), ordering], 1.)

        mask_o_lt_d = T.cumsum(mask_o_d, axis=0)
        mask_o_lt_d = T.set_subtensor(mask_o_lt_d[1:], mask_o_lt_d[:-1])
        mask_o_lt_d = T.set_subtensor(mask_o_lt_d[0, :], 0.)

        input = T.tile(input[None, :], (D, 1))
        nll = -T.sum(self.lnp_x_o_d_given_x_o_lt_d(input, mask_o_d, mask_o_lt_d))
        return nll
项目:Generative-models    作者:aalitaiga    | 项目源码 | 文件源码
def apply(self, input_v, input_h):
        # Vertical stack
        v_nxn_out = self.vertical_conv_nxn.apply(input_v)
        # Different cropping are used depending on the row we wish to condition on
        v_nxn_out_to_h = v_nxn_out[:,:,:-(self.filter_size//2)-2,:]
        v_nxn_out_to_v = v_nxn_out[:,:,1:-(self.filter_size//2)-1,:]
        v_1x1_out = self.vertical_conv_1x1.apply(v_nxn_out_to_h)
        output_v = T.tanh(v_nxn_out_to_v[:,:self.num_filters,:,:]) * \
            T.nnet.sigmoid(v_nxn_out_to_v[:,self.num_filters:,:,:])

        # Horizontal stack
        h_1xn_out = self.horizontal_conv_1xn.apply(input_h)
        h_1xn_out = h_1xn_out[:,:,:,:-(self.filter_size//2)]
        h_sum = h_1xn_out + v_1x1_out
        h_activation = T.tanh(h_sum[:,:self.num_filters,:,:]) * \
            T.nnet.sigmoid(h_sum[:,self.num_filters:,:,:])
        h_1x1_out = self.horizontal_conv_1x1.apply(h_activation)
        if self.res:
            # input_h_padded = T.zeros(input_h.shape, dtype=theano.config.floatX)
            # input_h_padded = T.inc_subtensor(input_h_padded[:,:,3:,3:], input_h[:,:,:-3,:-3])
            # input_h = input_h_padded
            output_h = h_1x1_out #+ input_h
        else:
            output_h = h_1x1_out #h_activation
        return output_v, output_h
项目:lemontree    作者:khshim    | 项目源码 | 文件源码
def set_shared(self):
        """
        This function overrides the parents' one.
        Set shared variables.

        Shared Variables
        ----------------
        W: 4D matrix
            shape is (output channel, input channel, kernel width, kernel height).
        b: 1D vector
            shape is (output channel).
        """
        W = np.zeros((self.output_shape[0], self.input_shape[0], self.kernel_shape[0], self.kernel_shape[1])).astype(theano.config.floatX)
        self.W = theano.shared(W, self.name + '_weight')
        self.W.tags = ['weight', self.name]
        b = np.zeros((self.output_shape[0])).astype(theano.config.floatX)
        self.b = theano.shared(b, self.name + '_bias')
        self.b.tags = ['bias', self.name]
项目:lemontree    作者:khshim    | 项目源码 | 文件源码
def get_output(self, input_):
        """
        This function overrides the parents' one.
        Creates symbolic function to compute output from an input.

        Parameters
        ----------
        input_: TensorVariable

        Returns
        -------
        TensorVariable
        """
        shape = (input_.shape[0],) + (self.input_shape[0], self.input_shape[1] + self.padding[0] + self.padding[1], self.input_shape[2] + self.padding[2] + self.padding[3])
        result = T.zeros(shape, dtype=theano.config.floatX)  # make zero output
        indices = (slice(None),
                   slice(None),
                   slice(self.padding[0], self.input_shape[1] + self.padding[0]),
                   slice(self.padding[2], self.input_shape[2] + self.padding[2])
                   )
        return T.set_subtensor(result[indices], input)



# TODO: MAKE THIS WORK!
项目:lemontree    作者:khshim    | 项目源码 | 文件源码
def set_shared(self):
        """
        This function overrides the parents' one.
        Set shared variables.

        Shared Variables
        ----------------
        W: 4D matrix
            shape is (output channel, input channel, kernel width, kernel height).
        b: 1D vector
            shape is (output channel).
        """
        W = np.zeros((self.input_shape[0], self.output_shape[0], self.kernel_shape[0], self.kernel_shape[1])).astype(theano.config.floatX)
        self.W = theano.shared(W, self.name + '_weight')
        self.W.tags = ['weight', self.name]
        b = np.zeros((self.output_shape[0])).astype(theano.config.floatX)
        self.b = theano.shared(b, self.name + '_bias')
        self.b.tags = ['bias', self.name]
项目:lemontree    作者:khshim    | 项目源码 | 文件源码
def get_output(self, input_):
        """
        This function overrides the parents' one.
        Creates symbolic function to compute output from an input.

        Parameters
        ----------
        input_: TensorVariable

        Returns
        -------
        TensorVariable
        """
        if self.upscale_mode == 'repeat':
            upscaled = T.extra_ops.repeat(input_, self.scale_factor[0], 2)
            upscaled = T.extra_ops.repeat(upscaled, self.scale_factor[1], 3)
        else:
            upscaled = T.zeros((input_.shape[0], input_.shape[1], \
                input_.shape[2] * self.scale_factor[0], input_.shape[3] * self.scale_factor[1]), dtype=theano.config.floatX)
            upscaled = T.set_subtensor(upscaled[:, :, ::self.scale_factor[0], ::self.scale_factor[1]], input_)

        return upscaled
项目:DeepRes    作者:Aneeshers    | 项目源码 | 文件源码
def __init__(self):
        super(DataLoader, self).__init__(daemon=True)
        self.data_ready = threading.Event()
        self.data_copied = threading.Event()

        self.orig_shape, self.seed_shape = args.batch_shape, args.batch_shape // args.zoom

        self.orig_buffer = np.zeros((args.buffer_size, 3, self.orig_shape, self.orig_shape), dtype=np.float32)
        self.seed_buffer = np.zeros((args.buffer_size, 3, self.seed_shape, self.seed_shape), dtype=np.float32)
        self.files = glob.glob(args.train)
        if len(self.files) == 0:
            error("U Messed UP AGAIN FIND THE FILES`{}`".format(args.train),
                  "  - IF NOT workzzz try TEST2: all your images in one folder and using `--train=data/*.jpg`")

        self.available = set(range(args.buffer_size))
        self.ready = set()

        self.cwd = os.getcwd()
        self.start()
项目:DeepRes    作者:Aneeshers    | 项目源码 | 文件源码
def process(self, original):
        s = 2 ** max(args.generator_upscale, args.generator_downscale)
        by, bx = original.shape[0] % s, original.shape[1] % s
        original = original[by-by//2:original.shape[0]-by//2,bx-bx//2:original.shape[1]-bx//2,:]

        s, p, z = args.rendering_tile, args.rendering_overlap, args.zoom
        image = np.pad(original, ((p, p), (p, p), (0, 0)), mode='reflect')
        output = np.zeros((original.shape[0] * z, original.shape[1] * z, 3), dtype=np.float32)

        for y, x in itertools.product(range(0, original.shape[0], s), range(0, original.shape[1], s)):
            img = np.transpose(image[y:y+p*2+s,x:x+p*2+s,:] / 255.0 - 0.5, (2, 0, 1))[np.newaxis].astype(np.float32)
            *_, repro = self.model.predict(img)
            output[y*z:(y+s)*z,x*z:(x+s)*z,:] = np.transpose(repro[0] + 0.5, (1, 2, 0))[p*z:-p*z,p*z:-p*z,:]
            print('.', end='', flush=True)
        output = output.clip(0.0, 1.0) * 255.0

        if args.rendering_histogram:
            for i in range(3):
                output[:,:,i] = self.match_histograms(output[:,:,i], original[:,:,i])

        return scipy.misc.toimage(output, cmin=0, cmax=255)
项目:SocializedWordEmbeddings    作者:HKUST-KnowComp    | 项目源码 | 文件源码
def forward_all(self, x, masks = None, h0=None, return_c=False, direction = None):
        if h0 is None:
            if x.ndim > 1:
                h0 = T.zeros((x.shape[1], self.n_out*(self.order+1)), dtype=theano.config.floatX)
            else:
                h0 = T.zeros((self.n_out*(self.order+1),), dtype=theano.config.floatX)

        if masks == None:
            masks = T.ones((x.shape[0], x.shape[1]), dtype = theano.config.floatX)
        h, _ = theano.scan(
                    fn = self.forward,
                    sequences = [x, masks],
                    outputs_info = [ h0 ]
                )
        if return_c:
            return h
        elif x.ndim > 1:
            return h[:,:,self.n_out*self.order:]
        else:
            return h[:,self.n_out*self.order:]
项目:SocializedWordEmbeddings    作者:HKUST-KnowComp    | 项目源码 | 文件源码
def initialize_params(self, n_in, n_out, activation):
        if USE_XAVIER_INIT:
            if activation == ReLU:
                scale = np.sqrt(4.0/(n_in+n_out), dtype=theano.config.floatX)
                b_vals = np.ones(n_out, dtype=theano.config.floatX) * 0.01
            elif activation == softmax:
                scale = np.float64(0.001 * scale).astype(theano.config.floatX)
                b_vals = np.zeros(n_out, dtype=theano.config.floatX)
            else:
                scale = np.sqrt(2.0/(n_in+n_out), dtype=theano.config.floatX)
                b_vals = np.zeros(n_out, dtype=theano.config.floatX)
            W_vals = random_init((n_in,n_out), rng_type="normal") * scale
        else:
            W_vals = random_init((n_in,n_out))
            if activation == softmax:
                W_vals *= (0.001 * self.scale)
            if activation == ReLU:
                b_vals = np.ones(n_out, dtype=theano.config.floatX) * 0.01
            else:
                b_vals = random_init((n_out,))
        self.W = create_shared(W_vals, name="W")
        if self.has_bias: self.b = create_shared(b_vals, name="b")
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def ctc_update_log_p(skip_idxs, zeros, active, log_p_curr, log_p_prev):
    active_skip_idxs = skip_idxs[(skip_idxs < active).nonzero()]
    active_next = T.cast(T.minimum(
        T.maximum(
            active + 1,
            T.max(T.concatenate([active_skip_idxs, [-1]])) + 2 + 1
        ), log_p_curr.shape[0]), 'int32')

    common_factor = T.max(log_p_prev[:active])
    p_prev = T.exp(log_p_prev[:active] - common_factor)
    _p_prev = zeros[:active_next]
    # copy over
    _p_prev = T.set_subtensor(_p_prev[:active], p_prev)
    # previous transitions
    _p_prev = T.inc_subtensor(_p_prev[1:], _p_prev[:-1])
    # skip transitions
    _p_prev = T.inc_subtensor(_p_prev[active_skip_idxs + 2], p_prev[active_skip_idxs])
    updated_log_p_prev = T.log(_p_prev) + common_factor

    log_p_next = T.set_subtensor(
        zeros[:active_next],
        log_p_curr[:active_next] + updated_log_p_prev
    )
    return active_next, log_p_next
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def ctc_path_probs(predict, Y, alpha=1e-4):
    smoothed_predict = (1 - alpha) * predict[:, Y] + alpha * np.float32(1.) / Y.shape[0]
    L = T.log(smoothed_predict)
    zeros = T.zeros_like(L[0])
    log_first = zeros

    f_skip_idxs = ctc_create_skip_idxs(Y)
    b_skip_idxs = ctc_create_skip_idxs(Y[::-1])  # there should be a shortcut to calculating this

    def step(log_f_curr, log_b_curr, f_active, log_f_prev, b_active, log_b_prev):
        f_active_next, log_f_next = ctc_update_log_p(f_skip_idxs, zeros, f_active, log_f_curr, log_f_prev)
        b_active_next, log_b_next = ctc_update_log_p(b_skip_idxs, zeros, b_active, log_b_curr, log_b_prev)
        return f_active_next, log_f_next, b_active_next, log_b_next

    [f_active, log_f_probs, b_active, log_b_probs], _ = theano.scan(
        step, sequences=[L, L[::-1, ::-1]], outputs_info=[np.int32(1), log_first, np.int32(1), log_first])

    idxs = T.arange(L.shape[1]).dimshuffle('x', 0)
    mask = (idxs < f_active.dimshuffle(0, 'x')) & (idxs < b_active.dimshuffle(0, 'x'))[::-1, ::-1]
    log_probs = log_f_probs + log_b_probs[::-1, ::-1] - L
    return log_probs, mask
项目:WEARING    作者:nlkim0817    | 项目源码 | 文件源码
def MakeVisual( X_src, X_tar): 
    #LAB pair
    #pdb.set_trace()
    #X_rst = np.zeros( X_src.shape, np.float32)
    #for i in range( X_src.shape[0]):
    #    X_rst[i,:,:,:] = np.concatenate(
    #                    (np.resize( X_src[i,:,:,:], (1,nc,npx,npx/2)),
    #                      np.resize( X_tar[i,:,:,:], (1,nc,npx,npx/2))), axis =3 )


    X_src = np.resize(X_src,(X_src.shape[0],nc,npx,npx/2))
    X_tar = np.resize(X_tar,(X_tar.shape[0],nc,npx,npx/2))

    return X_tar
    #return np.concatenate( (X_src,X_tar), axis = 2) 


# SET PARAMETERS.
项目:WEARING    作者:nlkim0817    | 项目源码 | 文件源码
def MakeVisual( X_src, X_tar): 
    #LAB pair
    #pdb.set_trace()
    #X_rst = np.zeros( X_src.shape, np.float32)
    #for i in range( X_src.shape[0]):
    #    X_rst[i,:,:,:] = np.concatenate(
    #                    (np.resize( X_src[i,:,:,:], (1,nc,npx,npx/2)),
    #                      np.resize( X_tar[i,:,:,:], (1,nc,npx,npx/2))), axis =3 )


    X_src = np.resize(X_src,(X_src.shape[0],nc,npx,npx/2))
    X_tar = np.resize(X_tar,(X_tar.shape[0],nc,npx,npx/2))

    return X_tar
    #return np.concatenate( (X_src,X_tar), axis = 2) 


# SET PARAMETERS.
项目:WEARING    作者:nlkim0817    | 项目源码 | 文件源码
def MakeVisual( X_src, X_tar): 
    #LAB pair
    #pdb.set_trace()
    #X_rst = np.zeros( X_src.shape, np.float32)
    #for i in range( X_src.shape[0]):
    #    X_rst[i,:,:,:] = np.concatenate(
    #                    (np.resize( X_src[i,:,:,:], (1,nc,npx,npx/2)),
    #                      np.resize( X_tar[i,:,:,:], (1,nc,npx,npx/2))), axis =3 )


    X_src = np.resize(X_src,(X_src.shape[0],nc,npx,npx/2))
    X_tar = np.resize(X_tar,(X_tar.shape[0],nc,npx,npx/2))

    return X_tar
    #return np.concatenate( (X_src,X_tar), axis = 2) 


# SET PARAMETERS.
项目:CNNbasedMedicalSegmentation    作者:BRML    | 项目源码 | 文件源码
def stretch_axis(a, axis, factor, original_shape):
    new_shape = [original_shape[0], original_shape[1],
                 original_shape[2], original_shape[3],
                 original_shape[4]]
    new_shape[axis] *= factor
    out_first = T.zeros(new_shape)

    indices_first = [slice(None),] * 5
    indices_first[axis] = slice(0, new_shape[axis], factor*2)
    indices_second = [slice(None),] * 5
    indices_second[axis] = slice(factor*2-1, new_shape[axis], factor*2)

    indices_take_first = [slice(None),] * 5
    indices_take_first[axis] = slice(0, original_shape[axis], factor)
    indices_take_second = [slice(None),] * 5
    indices_take_second[axis] = slice(1, original_shape[axis], factor)

    out_second = T.set_subtensor(out_first[indices_first], a[indices_take_first])
    out = T.set_subtensor(out_second[indices_second], a[indices_take_second])

    return out
项目:CNNbasedMedicalSegmentation    作者:BRML    | 项目源码 | 文件源码
def _forward(self):
        f_loss = lookup(self.loss_ident, _loss)

        self.coord_wise_multi = [f_loss(self.target, self.transfer(pred)) for pred in self.predictions]
        if self.imp_weight is not None:
            self.coord_wise_multi = [coord_wise * self.imp_weight for coord_wise in self.coord_wise_multi]

        self.sample_wise_multi = [coord_wise.sum(self.comp_dim) for coord_wise in self.coord_wise_multi]
        self.total_multi = [sample_wise.mean() for sample_wise in self.sample_wise_multi]

        self.total = T.zeros(self.total_multi[0].shape)
        for tot, pw in zip(self.total_multi, self.p_weights):
            self.total += tot * pw

        if self.mode == 'mean':
            self.total /= len(self.predictions)
项目:mimicry.ai    作者:fizerkhan    | 项目源码 | 文件源码
def initial_states(self, batch_size):
        initial_h1 = self.rnn1.initial_states(batch_size)
        initial_h2 = self.rnn2.initial_states(batch_size)
        initial_h3 = self.rnn3.initial_states(batch_size)

        last_h1 = shared_floatx_zeros((batch_size, self.rnn_h_dim))
        last_h2 = shared_floatx_zeros((batch_size, self.rnn_h_dim))
        last_h3 = shared_floatx_zeros((batch_size, self.rnn_h_dim))

        # Defining for all
        initial_k = tensor.zeros(
            (batch_size, self.attention_size), dtype=floatX)
        last_k = shared_floatx_zeros((batch_size, self.attention_size))

        # Trainable initial state for w. Why not for k?
        initial_w = tensor.repeat(self.initial_w[None, :], batch_size, 0)

        last_w = shared_floatx_zeros((batch_size, self.encoded_input_dim))

        return initial_h1, last_h1, initial_h2, last_h2, initial_h3, last_h3, \
            initial_w, last_w, initial_k, last_k
项目:jointEE-NN    作者:anoperson    | 项目源码 | 文件源码
def rnn_ff(inps, dim, hidden, batSize, prefix, params, names):
    Wx  = theano.shared(randomMatrix(dim, hidden))
    Wh  = theano.shared(randomMatrix(hidden, hidden))
    bh  = theano.shared(numpy.zeros(hidden, dtype=theano.config.floatX))
    #model.container['bi_h0']  = theano.shared(numpy.zeros(model.container['nh'], dtype=theano.config.floatX))

    # bundle
    params += [ Wx, Wh, bh ] #, model.container['bi_h0']
    names += [ prefix + '_Wx', prefix + '_Wh', prefix + '_bh' ] #, 'bi_h0'

    def recurrence(x_t, h_tm1):
        h_t = T.nnet.sigmoid(T.dot(x_t, Wx) + T.dot(h_tm1, Wh) + bh)
        return h_t

    h, _  = theano.scan(fn=recurrence, \
            sequences=inps, outputs_info=[T.alloc(0., batSize, hidden)], n_steps=inps.shape[0])

    return h
项目:sampleRNN_ICLR2017    作者:soroushmehr    | 项目源码 | 文件源码
def __Recurrent(name, hidden_dims, step_fn, inputs, non_sequences=[], h0s=None):
    if not isinstance(inputs, list):
        inputs = [inputs]

    if not isinstance(hidden_dims, list):
        hidden_dims = [hidden_dims]

    if h0s is None:
        h0s = [None]*len(hidden_dims)

    for i in xrange(len(hidden_dims)):
        if h0s[i] is None:
            h0_unbatched = lib.param(
                name + '.h0_' + str(i),
                numpy.zeros((hidden_dims[i],), dtype=theano.config.floatX)
            )
            num_batches = inputs[0].shape[1]
            h0s[i] = T.alloc(h0_unbatched, num_batches, hidden_dims[i])

        h0s[i] = T.patternbroadcast(h0s[i], [False] * h0s[i].ndim)

    outputs, _ = theano.scan(
        step_fn,
        sequences=inputs,
        outputs_info=h0s,
        non_sequences=non_sequences
    )

    return outputs
项目:sampleRNN_ICLR2017    作者:soroushmehr    | 项目源码 | 文件源码
def T_one_hot(inp_tensor, n_classes):
    """
    :todo:
        - Implement other methods from here: 
        - Compare them speed-wise for different sizes
        - Implement N_one_hot for Numpy version, with speed tests.

    Theano one-hot (1-of-k) from an input tensor of indecies.
    If the indecies are of the shape (a0, a1, ..., an) the output
    shape would be (a0, a1, ..., a2, n_classes).

    :params:
        - inp_tensor: any theano tensor with dtype int* as indecies and all of
                      them between [0, n_classes-1].
        - n_classes: number of classes which determines the output size.

    :usage:
        >>> idx = T.itensor3()
        >>> idx_val = numpy.array([[[0,1,2,3],[4,5,6,7]]], dtype='int32')
        >>> one_hot = T_one_hot(t, 8)
        >>> one_hot.eval({idx:idx_val})
        >>> print out
        array([[[[ 1.,  0.,  0.,  0.,  0.,  0.,  0.,  0.],
         [ 0.,  1.,  0.,  0.,  0.,  0.,  0.,  0.],
         [ 0.,  0.,  1.,  0.,  0.,  0.,  0.,  0.],
         [ 0.,  0.,  0.,  1.,  0.,  0.,  0.,  0.]],
        [[ 0.,  0.,  0.,  0.,  1.,  0.,  0.,  0.],
         [ 0.,  0.,  0.,  0.,  0.,  1.,  0.,  0.],
         [ 0.,  0.,  0.,  0.,  0.,  0.,  1.,  0.],
         [ 0.,  0.,  0.,  0.,  0.,  0.,  0.,  1.]]]])
        >>> print idx_val.shape, out.shape
        (1, 2, 4) (1, 2, 4, 8)
    """
    flattened = inp_tensor.flatten()
    z = T.zeros((flattened.shape[0], n_classes), dtype=theano.config.floatX)
    one_hot = T.set_subtensor(z[T.arange(flattened.shape[0]), flattened], 1)
    out_shape = [inp_tensor.shape[i] for i in xrange(inp_tensor.ndim)] + [n_classes]
    one_hot = one_hot.reshape(out_shape)
    return one_hot
项目:pdnn    作者:petered    | 项目源码 | 文件源码
def __init__(self, ws, hidden_activation, output_activation, optimizer, loss, encdec, encdec_back = None,
             grad_calc='xx', minibatch_size=1):
        """
        :param ws:
        :param encdec: A PDEncoderDecoder pair
        :param hidden_activation:
        :param output_activation:
        :param optimizer:
        :param loss:
        :param enddec_back: Opt
        :param grad_calc:
        :param minibatch_size:
        :param fwd_quantizer:
        :param back_quantizer:
        """

        if isinstance(encdec, dict):
            encdec = PDEncoderDecoder(kp=encdec['kp'], kd=encdec['kd'], quantization=encdec['quantizer'])
        if encdec_back is None:
            encdec_back = encdec
        elif isinstance(encdec_back, dict):
            encdec_back = PDEncoderDecoder(kp=encdec_back['kp'], kd=encdec_back['kd'], quantization=encdec_back['quantizer'])

        self.layers = [PDHerdingLayer(w, b=np.zeros(w.shape[1]), encdec=encdec if not callable(encdec) else encdec(), encdec_back=encdec_back if not callable(encdec_back) else encdec_back(),
                  nonlinearity=nonlinearity, grad_calc=grad_calc,  minibatch_size=minibatch_size)
                       for w, nonlinearity in izip_equal(ws, [hidden_activation]*(len(ws)-1)+[output_activation])]
        self.optimizer = optimizer
        self.loss = get_named_cost_function(loss) if isinstance(loss, basestring) else loss
        self.minibatch_size = minibatch_size
项目:pdnn    作者:petered    | 项目源码 | 文件源码
def encode(self, x, shape=None):
        if shape is None:
            xp = create_shared_variable(np.zeros((0, )*x.ndim), name='xp')
            delta = ifelse(xp.size>0, x-xp, x)
        else:
            xp = create_shared_variable(np.zeros(shape), name='xp{}'.format(shape))
            delta = x - xp
        add_update(xp, x)
        y = self.kp*x + self.kd*delta
        if self.quantization is None:
            return y
        elif self.quantization=='herd':
            return herd(y, shape=shape)
        else:
            raise Exception('No quantizer: {}'.format(self.quantization))
项目:pdnn    作者:petered    | 项目源码 | 文件源码
def decode(self, y, shape=None):
        xp = shared_like(y, name='xp') if shape is None else create_shared_variable(np.zeros(shape), name='xp{}'.format(shape))
        div = (self.kp+self.kd)
        x = (y+self.kd*xp)/div
        add_update(xp, x)
        return x
项目:pdnn    作者:petered    | 项目源码 | 文件源码
def herd(x, shape = None):
    phi = shared_like(x, name='phi') if shape is None else create_shared_variable(np.zeros(shape), name='phi{}'.format(shape))
    phi_ = phi + x
    s = tt.round(phi_)
    add_update(phi, phi_ - s)
    return s