Python theano.tensor 模块,lt() 实例源码

我们从Python开源项目中,提取了以下36个代码示例,用于说明如何使用theano.tensor.lt()

项目:LiviaNET    作者:josedolz    | 项目源码 | 文件源码
def negativeLogLikelihoodWeighted(self, y, weightPerClass):      
        #Weighting the cost of the different classes in the cost-function, in order to counter class imbalance.
        e1 = np.finfo(np.float32).tiny
        addTinyProbMatrix = T.lt(self.p_y_given_x_train, 4*e1) * e1

        weights = weightPerClass.dimshuffle('x', 0, 'x', 'x', 'x')
        log_p_y_given_x_train = T.log(self.p_y_given_x_train + addTinyProbMatrix) 
        weighted_log_probs = log_p_y_given_x_train * weights

        wShape =  weighted_log_probs.shape

        # Re-arrange 
        idx0 = T.arange( wShape[0] ).dimshuffle( 0, 'x','x','x')
        idx2 = T.arange( wShape[2] ).dimshuffle('x', 0, 'x','x')
        idx3 = T.arange( wShape[3] ).dimshuffle('x','x', 0, 'x')
        idx4 = T.arange( wShape[4] ).dimshuffle('x','x','x', 0)

        return -T.mean( weighted_log_probs[ idx0, y, idx2, idx3, idx4] )
项目:dict_based_learning    作者:tombosc    | 项目源码 | 文件源码
def _encode(self, application_call, text, mask, def_embs=None, def_map=None, text_name=None):
        if not self._random_unk:
            text = (
                tensor.lt(text, self._num_input_words) * text
                + tensor.ge(text, self._num_input_words) * self._vocab.unk)
        if text_name:
            application_call.add_auxiliary_variable(
                unk_ratio(text, mask, self._vocab.unk),
                name='{}_unk_ratio'.format(text_name))
        embs = self._lookup.apply(text)
        if self._random_unk:
            embs = (
                tensor.lt(text, self._num_input_words)[:, :, None] * embs
                + tensor.ge(text, self._num_input_words)[:, :, None] * disconnected_grad(embs))
        if def_embs:
            embs, _, _ = self._combiner.apply(embs, mask, def_embs, def_map)
        add_role(embs, EMBEDDINGS)
        encoded = flip01(
            self._encoder_rnn.apply(
                self._encoder_fork.apply(
                    flip01(embs)),
                mask=mask.T)[0])
        return encoded
项目:dict_based_learning    作者:tombosc    | 项目源码 | 文件源码
def apply(self, application_call,
              defs, def_mask):
        """
        Returns vector per each word in sequence using the dictionary based lookup
        """
        # Short listing
        defs_sl_main = (T.lt(defs, self._num_input_words) * defs
                   + T.ge(defs, self._num_input_words) * self._vocab.unk)
        defs_sl_cache = (T.ge(defs, self._num_input_words) * defs
                   + T.lt(defs, self._num_input_words) * self._vocab.unk)

        application_call.add_auxiliary_variable(
            unk_ratio(defs_sl_main, def_mask, self._vocab.unk),
            name='def_unk_ratio')

        embedded_def_words = self._def_lookup.apply(defs_sl_main)
        cached_embeddings = self._cache.apply(defs_sl_cache) 
        final_embeddings = (T.lt(defs, self._num_input_words).dimshuffle(0,1,'x') * embedded_def_words
                + T.ge(defs, self._num_input_words).dimshuffle(0, 1, 'x') * cached_embeddings)

        def_embeddings = self._def_rnn.apply(
            T.transpose(self._def_fork.apply(final_embeddings), (1, 0, 2)),
            mask=def_mask.T)[0][-1]

        return def_embeddings
项目:deep-learning-for-genomics    作者:chgroenbech    | 项目源码 | 文件源码
def log_cross_entropy_extended(x, x_theta, log_distribution, k_max, eps = 0.0):

    p_k = x_theta["p_k"]

    F = x.shape[1]

    p_k = T.clip(p_k, eps, 1.0)
    x_k = T.clip(x, 0, k_max)

    p_k = T.reshape(p_k, (-1, k_max + 1))
    x_k = T.reshape(x_k, (-1, 1))

    y_cross_entropy = objectives.categorical_crossentropy(p_k, x_k)
    y_cross_entropy = T.reshape(y_cross_entropy, (-1, F))

    y_log_distribution = T.ge(x, k_max) * log_distribution(x - k_max, x_theta, eps)

    # y = - T.lt(x, 0) * y_cross_entropy + y_log_distribution
    y = - y_cross_entropy + T.lt(x, 0) * y_log_distribution
    # y = - y_cross_entropy + y_log_distribution

    return y
项目:deep-learning-for-genomics    作者:chgroenbech    | 项目源码 | 文件源码
def log_cross_entropy_extended(x, x_theta, log_distribution, k_max, eps = 0.0):

    p_k = x_theta["p_k"]

    F = x.shape[1]

    p_k = T.clip(p_k, eps, 1.0)
    x_k = T.clip(x, 0, k_max)

    p_k = T.reshape(p_k, (-1, k_max + 1))
    x_k = T.reshape(x_k, (-1, 1))

    y_cross_entropy = objectives.categorical_crossentropy(p_k, x_k)
    y_cross_entropy = T.reshape(y_cross_entropy, (-1, F))

    y_log_distribution = T.ge(x, k_max) * log_distribution(x - k_max, x_theta, eps)

    # y = - T.lt(x, 0) * y_cross_entropy + y_log_distribution
    y = - y_cross_entropy + T.lt(x, 0) * y_log_distribution
    # y = - y_cross_entropy + y_log_distribution

    return y
项目:DBQA-KBQA    作者:Lucien-qiang    | 项目源码 | 文件源码
def dynamic_k_max_pooling(input, sent_sizes, k_max_factor, k_max_final):
  """
    k_max_factor -- multiplied by sentence_sizes gives the value of kmax for each sentence
  """
  # Unroll input into (batch_size x nchannels x nwords) x ndim
  nbatches, nchannels, nwords, ndim = input.shape[0], input.shape[1], input.shape[2], input.shape[3]
  x = input.dimshuffle(0,1,3,2)

  sent_sizes = T.cast(T.ceil(sent_sizes * k_max_factor), dtype='int32')
  sent_sizes = T.maximum(sent_sizes, k_max_final)
  # sent_sizes_matrix = T.repeat(sent_sizes, nwords, axis=1)
  sent_sizes_matrix = T.repeat(sent_sizes.dimshuffle(0, 'x'), nwords, axis=1)

  idx = T.arange(nwords).dimshuffle('x', 0)
  idx_matrix = T.repeat(idx, nbatches, axis=0)

  sent_sizes_mask = T.lt(idx_matrix, sent_sizes_matrix)[:,::-1]

  neighborsArgSorted = T.argsort(x, axis=3)
  neighborsArgSorted_masked = ((neighborsArgSorted + 1) * sent_sizes_mask.dimshuffle(0,'x','x',1)) - 1
  neighborsArgSorted_masked_sorted = neighborsArgSorted_masked.sort(axis=3)

  nwords_max = T.cast(T.ceil(nwords * k_max_factor), 'int32')
  # print nwords_max.eval()
  neighborsArgSorted_masked_sorted_clipped = neighborsArgSorted_masked_sorted[:,:,:,-nwords_max:]

  ax0 = T.repeat(T.arange(nbatches), nchannels*ndim*nwords_max)
  ax1 = T.repeat(T.arange(nchannels), ndim * nwords_max).dimshuffle('x', 0)
  ax1 = T.repeat(ax1, nbatches, axis=0).flatten()
  ax2 = T.repeat(T.arange(ndim), nwords_max, axis=0).dimshuffle('x', 'x', 0)
  ax2 = T.repeat(ax2, nchannels, axis=1)
  ax2 = T.repeat(ax2, nbatches, axis=0).flatten()
  ax3 = neighborsArgSorted_masked_sorted_clipped.flatten()

  pooled_out = x[ax0, ax1, ax2, ax3]
  pooled_out = pooled_out.reshape((nbatches, nchannels, ndim, nwords_max)).dimshuffle(0,1,3,2)

  return pooled_out
项目:DEEP-CLICK-MODEL    作者:THUIR    | 项目源码 | 文件源码
def dynamic_k_max_pooling(input, sent_sizes, k_max_factor, k_max_final):
  """
    k_max_factor -- multiplied by sentence_sizes gives the value of kmax for each sentence
  """
  # Unroll input into (batch_size x nchannels x nwords) x ndim
  nbatches, nchannels, nwords, ndim = input.shape[0], input.shape[1], input.shape[2], input.shape[3]
  x = input.dimshuffle(0,1,3,2)

  sent_sizes = T.cast(T.ceil(sent_sizes * k_max_factor), dtype='int32')
  sent_sizes = T.maximum(sent_sizes, k_max_final)
  # sent_sizes_matrix = T.repeat(sent_sizes, nwords, axis=1)
  sent_sizes_matrix = T.repeat(sent_sizes.dimshuffle(0, 'x'), nwords, axis=1)

  idx = T.arange(nwords).dimshuffle('x', 0)
  idx_matrix = T.repeat(idx, nbatches, axis=0)

  sent_sizes_mask = T.lt(idx_matrix, sent_sizes_matrix)[:,::-1]

  neighborsArgSorted = T.argsort(x, axis=3)
  neighborsArgSorted_masked = ((neighborsArgSorted + 1) * sent_sizes_mask.dimshuffle(0,'x','x',1)) - 1
  neighborsArgSorted_masked_sorted = neighborsArgSorted_masked.sort(axis=3)

  nwords_max = T.cast(T.ceil(nwords * k_max_factor), 'int32')
  # print nwords_max.eval()
  neighborsArgSorted_masked_sorted_clipped = neighborsArgSorted_masked_sorted[:,:,:,-nwords_max:]

  ax0 = T.repeat(T.arange(nbatches), nchannels*ndim*nwords_max)
  ax1 = T.repeat(T.arange(nchannels), ndim * nwords_max).dimshuffle('x', 0)
  ax1 = T.repeat(ax1, nbatches, axis=0).flatten()
  ax2 = T.repeat(T.arange(ndim), nwords_max, axis=0).dimshuffle('x', 'x', 0)
  ax2 = T.repeat(ax2, nchannels, axis=1)
  ax2 = T.repeat(ax2, nbatches, axis=0).flatten()
  ax3 = neighborsArgSorted_masked_sorted_clipped.flatten()

  pooled_out = x[ax0, ax1, ax2, ax3]
  pooled_out = pooled_out.reshape((nbatches, nchannels, ndim, nwords_max)).dimshuffle(0,1,3,2)

  return pooled_out
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_inequality_with_self(self):
        x = T.scalar('x', dtype=config.floatX)
        mode = theano.compile.get_default_mode().including('local_useless_elemwise_comparison')

        f = theano.function([x], T.lt(x, x), mode=mode)
        self.assert_eqs_const(f, 0)

        f = theano.function([x], T.le(x, x), mode=mode)
        self.assert_eqs_const(f, 1)

        f = theano.function([x], T.gt(x, x), mode=mode)
        self.assert_eqs_const(f, 0)

        f = theano.function([x], T.ge(x, x), mode=mode)
        self.assert_eqs_const(f, 1)

        f = theano.function([x], T.minimum(x, x), mode=mode)
        self.assert_identity(f)

        f = theano.function([x], T.maximum(x, x), mode=mode)
        self.assert_identity(f)
项目:deep-hashtagprediction    作者:jderiu    | 项目源码 | 文件源码
def dynamic_k_max_pooling(input, sent_sizes, k_max_factor, k_max_final):
  """
    k_max_factor -- multiplied by sentence_sizes gives the value of kmax for each sentence
  """
  # Unroll input into (batch_size x nchannels x nwords) x ndim
  nbatches, nchannels, nwords, ndim = input.shape[0], input.shape[1], input.shape[2], input.shape[3]
  x = input.dimshuffle(0,1,3,2)

  sent_sizes = T.cast(T.ceil(sent_sizes * k_max_factor), dtype='int32')
  sent_sizes = T.maximum(sent_sizes, k_max_final)
  # sent_sizes_matrix = T.repeat(sent_sizes, nwords, axis=1)
  sent_sizes_matrix = T.repeat(sent_sizes.dimshuffle(0, 'x'), nwords, axis=1)

  idx = T.arange(nwords).dimshuffle('x', 0)
  idx_matrix = T.repeat(idx, nbatches, axis=0)

  sent_sizes_mask = T.lt(idx_matrix, sent_sizes_matrix)[:,::-1]

  neighborsArgSorted = T.argsort(x, axis=3)
  neighborsArgSorted_masked = ((neighborsArgSorted + 1) * sent_sizes_mask.dimshuffle(0,'x','x',1)) - 1
  neighborsArgSorted_masked_sorted = neighborsArgSorted_masked.sort(axis=3)

  nwords_max = T.cast(T.ceil(nwords * k_max_factor), 'int32')
  # print nwords_max.eval()
  neighborsArgSorted_masked_sorted_clipped = neighborsArgSorted_masked_sorted[:,:,:,-nwords_max:]

  ax0 = T.repeat(T.arange(nbatches), nchannels*ndim*nwords_max)
  ax1 = T.repeat(T.arange(nchannels), ndim * nwords_max).dimshuffle('x', 0)
  ax1 = T.repeat(ax1, nbatches, axis=0).flatten()
  ax2 = T.repeat(T.arange(ndim), nwords_max, axis=0).dimshuffle('x', 'x', 0)
  ax2 = T.repeat(ax2, nchannels, axis=1)
  ax2 = T.repeat(ax2, nbatches, axis=0).flatten()
  ax3 = neighborsArgSorted_masked_sorted_clipped.flatten()

  pooled_out = x[ax0, ax1, ax2, ax3]
  pooled_out = pooled_out.reshape((nbatches, nchannels, ndim, nwords_max)).dimshuffle(0,1,3,2)

  return pooled_out
项目:Precise-CTC    作者:Michlong    | 项目源码 | 文件源码
def _editdist(s, t):
        """
        Levenshtein's edit distance function
        :param s: vector, source string
        :param t: vector, target string
        :return:  edit distance, scalar
        """
        def update(x, previous_row):
            current_row = previous_row + 1
            current_row = tensor.set_subtensor(current_row[1:], tensor.minimum(current_row[1:], tensor.add(previous_row[:-1], tensor.neq(target,x))))
            current_row = tensor.set_subtensor(current_row[1:], tensor.minimum(current_row[1:], current_row[0:-1] + 1))
            return current_row
        source, target = ifelse(tensor.lt(s.shape[0], t.shape[0]), (t, s), (s, t))
        previous_row = tensor.arange(target.size + 1, dtype=theano.config.floatX)
        result, updates = theano.scan(fn=update, sequences=source, outputs_info=previous_row, name='editdist')
        return result[-1,-1]
项目:lightML    作者:jfzhang95    | 项目源码 | 文件源码
def scale(X, max_norm):
    curr_norm = T.sum(T.abs_(X))
    return ifelse(T.lt(curr_norm, max_norm), X, max_norm * (X / curr_norm))
项目:keras    作者:GeekLiB    | 项目源码 | 文件源码
def lesser(x, y):
    return T.lt(x, y)
项目:dict_based_learning    作者:tombosc    | 项目源码 | 文件源码
def apply(self, application_call,
              defs, def_mask):
        """
        Returns vector per each word in sequence using the dictionary based lookup
        """
        # Short listing
        defs = (T.lt(defs, self._num_input_words) * defs
                + T.ge(defs, self._num_input_words) * self._vocab.unk)
        # Memory bottleneck:
        # For instance (16101,52,300) ~= 32GB.
        # [(16786, 52, 1), (16786, 52, 100)]
        # TODO: Measure memory consumption here and check if it is in sensible range
        # or maybe introduce some control in Retrieval?
        defs_emb = self._def_lookup.apply(defs)
        application_call.add_auxiliary_variable(
            unk_ratio(defs, def_mask, self._vocab.unk),
            name='def_unk_ratio')

        if self._translate:
            logger.info("Translating in MeanPoolReadDefinitions")
            # Translate. Crucial for recovering useful information from embeddings
            defs_emb = self._def_translate.apply(defs_emb)

        def_emb_mask = def_mask[:, :, None]
        defs_emb = (def_emb_mask * defs_emb).sum(axis=1)
        if self._normalize:
            defs_emb = defs_emb / def_emb_mask.sum(axis=1)

        return defs_emb
项目:deep-learning-keras-projects    作者:jasmeetsb    | 项目源码 | 文件源码
def lesser(x, y):
    return T.lt(x, y)
项目:NNBuilder    作者:aeloyq    | 项目源码 | 文件源码
def lt(self, l, r):
            return T.lt(l, r)
项目:deepAI    作者:kaiu85    | 项目源码 | 文件源码
def inner_fn_sample(stm1):

    prior_stmu = T.tanh( T.dot(Wl_stmu_stm1, stm1) + bl_stmu )
    prior_stsig = T.nnet.softplus( T.dot(Wl_stsig_stm1, stm1) + bl_stsig ) + sig_min_states

    # Set explicit prior on score during last time step
    #prior_stmu = ifelse(T.lt(t,n_run_steps - 5),prior_stmu, T.set_subtensor(prior_stmu[0,:],0.1))
    #prior_stsig = ifelse(T.lt(t,n_run_steps - 5),prior_stsig, T.set_subtensor(prior_stsig[0,:],0.001))    

    st = prior_stmu + theano_rng.normal((n_s,n_samples))*prior_stsig

    ost = T.nnet.relu( T.dot(Wl_ost_st,st) + bl_ost )
    ost2 = T.nnet.relu( T.dot(Wl_ost2_ost,ost) + bl_ost2 )
    ost3 = T.nnet.relu( T.dot(Wl_ost3_ost2,ost2) + bl_ost3 )

    otmu = T.dot(Wl_otmu_st, ost3) + bl_otmu
    otsig = T.nnet.softplus(T.dot(Wl_otsig_st, ost3) + bl_otsig) + sig_min_obs

    ohtmu = T.dot(Wl_ohtmu_st, ost3) + bl_ohtmu
    ohtsig = T.nnet.softplus( T.dot(Wl_ohtsig_st, ost3) + bl_ohtsig ) + sig_min_obs

    oatmu = T.dot(Wl_oatmu_st, ost3) + bl_oatmu
    oatsig = T.nnet.softplus( T.dot(Wl_oatsig_st, ost3) + bl_oatsig ) + sig_min_obs

    ot = otmu + theano_rng.normal((n_o,n_samples))*otsig
    oht = ohtmu + theano_rng.normal((n_oh,n_samples))*ohtsig   
    oat = oatmu + theano_rng.normal((n_oa,n_samples))*oatsig   

    return st, ohtmu, ohtsig, ot, oht, oat, prior_stmu, prior_stsig

# Define initial state and action
项目:keras-customized    作者:ambrite    | 项目源码 | 文件源码
def lesser(x, y):
    return T.lt(x, y)
项目:keras    作者:NVIDIA    | 项目源码 | 文件源码
def lesser(x, y):
    return T.lt(x, y)
项目:seq2graph    作者:masterkeywikz    | 项目源码 | 文件源码
def _get_updates_for(self, param, grad):
        grad_tm1 = shared_like(param, 'grad')
        step_tm1 = shared_like(param, 'step', self.learning_rate.eval())
        test = grad * grad_tm1
        diff = TT.lt(test, 0)
        steps = step_tm1 * (TT.eq(test, 0) +
                            TT.gt(test, 0) * self.step_increase +
                            diff * self.step_decrease)
        step = TT.minimum(self.max_step, TT.maximum(self.min_step, steps))
        grad = grad - diff * grad
        yield param, param - TT.sgn(grad) * step
        yield grad_tm1, grad
        yield step_tm1, step
项目:keras_superpixel_pooling    作者:parag2489    | 项目源码 | 文件源码
def less(x, y):
    return T.lt(x, y)
项目:dl4mt-cdec    作者:nyu-dl    | 项目源码 | 文件源码
def gradient_clipping(grads, tparams, clip_c=10):
    g2 = 0.
    for g in grads:
        g2 += (g**2).sum()

    g2 = tensor.sqrt(g2)
    not_finite = tensor.or_(tensor.isnan(g2), tensor.isinf(g2))
    new_grads = []

    for p, g in zip(tparams.values(), grads):
        new_grads.append(tensor.switch(g2 > clip_c,
                                       g * (clip_c / g2),
                                       g))

    return new_grads, not_finite, tensor.lt(clip_c, g2)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_shape_inequality_with_self(self):
        x = T.vector('x', dtype=config.floatX)
        mode = theano.compile.get_default_mode().including('local_useless_elemwise_comparison',
                                                           'local_shape_to_shape_i',
                                                           'local_track_shape_i',
                                                           'local_subtensor_make_vector')
        f = theano.function([x], T.lt(x.shape[0], 0), mode=mode)
        self.assert_eqs_const(f, 0)

        f = theano.function([x], T.ge(x.shape[0], 0), mode=mode)
        self.assert_eqs_const(f, 1)

        f = theano.function([x], T.maximum(x.shape[0], 0), mode=mode)
        topo = f.maker.fgraph.toposort()
        assert len(topo) == 1
        assert isinstance(topo[0].op, Shape_i), topo[0].op
        x_val = numpy.ones(100, dtype=config.floatX)
        assert f(x_val) == x_val.shape[0]

        f = theano.function([x], T.maximum(0, x.shape[0]), mode=mode)
        topo = f.maker.fgraph.toposort()
        assert len(topo) == 1
        assert isinstance(topo[0].op, Shape_i), topo[0].op
        x_val = numpy.ones(100, dtype=config.floatX)
        assert f(x_val) == x_val.shape[0]

        f = theano.function([x], T.minimum(x.shape[0], 0), mode=mode)
        self.assert_eqs_const(f, 0)
        assert f(x_val) == 0

        f = theano.function([x], T.minimum(0, x.shape[0]), mode=mode)
        self.assert_eqs_const(f, 0)
        assert f(x_val) == 0
        f = theano.function([x], T.minimum([0, 0], x.shape[0]), mode=mode)
        # This case isn't optimized.
#        self.assert_eqs_const(f, 0)
        utt.assert_allclose(f(x_val), [0, 0])
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_shape_add_inequality(self):
        x = T.vector('x', dtype=config.floatX)
        mode = theano.compile.get_default_mode().including('local_useless_elemwise_comparison',
                                                           'local_shape_to_shape_i',
                                                           'local_track_shape_i',
                                                           'local_subtensor_make_vector')

        y = T.vector('y', dtype=config.floatX)

        f = theano.function([x, y], T.lt(x.shape[0]+y.shape[0], 0), mode=mode)
        self.assert_eqs_const(f, 0)

        f = theano.function([x, y], T.ge(x.shape[0]+y.shape[0], 0), mode=mode)
        self.assert_eqs_const(f, 1)
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_elemwise(self):
        # float Ops
        mats = theano.tensor.matrices('cabxy')
        c, a, b, x, y = mats
        s1 = T.switch(c, a, b)
        s2 = T.switch(c, x, y)
        for op in (T.add, T.sub, T.mul, T.true_div, T.int_div, T.floor_div,
                   T.minimum, T.maximum, T.gt, T.lt, T.ge, T.le, T.eq, T.neq,
                   T.pow):
            g = optimize(FunctionGraph(mats, [op(s1, s2)]))
            assert str(g).count('Switch') == 1
        # integer Ops
        mats = theano.tensor.imatrices('cabxy')
        c, a, b, x, y = mats
        s1 = T.switch(c, a, b)
        s2 = T.switch(c, x, y)
        for op in (T.and_, T.or_, T.xor,
                   T.bitwise_and, T.bitwise_or, T.bitwise_xor):
            g = optimize(FunctionGraph(mats, [op(s1, s2)]))
            assert str(g).count('Switch') == 1
        # add/mul with more than two inputs
        u, v = theano.tensor.matrices('uv')
        s3 = T.switch(c, u, v)
        for op in (T.add, T.mul):
            g = optimize(FunctionGraph(mats + [u, v], [op(s1, s2, s3)]))
            assert str(g).count('Switch') == 1
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_ifelse(self):
        config1 = theano.config.profile
        config2 = theano.config.profile_memory

        try:
            theano.config.profile = True
            theano.config.profile_memory = True

            a, b = T.scalars('a', 'b')
            x, y = T.scalars('x', 'y')

            z = ifelse(T.lt(a, b), x * 2, y * 2)

            p = theano.ProfileStats(False)

            if theano.config.mode in ["DebugMode", "DEBUG_MODE", "FAST_COMPILE"]:
                m = "FAST_RUN"
            else:
                m = None

            f_ifelse = theano.function([a, b, x, y], z, profile=p, name="test_ifelse",
                                       mode=m)

            val1 = 0.
            val2 = 1.
            big_mat1 = 10
            big_mat2 = 11

            f_ifelse(val1, val2, big_mat1, big_mat2)

        finally:
            theano.config.profile = config1
            theano.config.profile_memory = config2
项目:Theano-Deep-learning    作者:GeekLiB    | 项目源码 | 文件源码
def test_elemwise_composite_float64():
    # test that we don't fuse composite elemwise with float64 somewhere inside
    # nvcc by default downcast them to float32. We would need to tell him not
    # to do so, but that possible only on some device.
    a = tensor.fmatrix()
    b = tensor.fmatrix()
    av = theano._asarray(numpy.random.rand(4, 4), dtype='float32')
    bv = numpy.ones((4, 4), dtype='float32')

    def get_all_basic_scalar(composite_op):
        l = []
        for i in composite_op.fgraph.toposort():
            if isinstance(i, theano.scalar.Composite):
                l += get_all_basic_scalar(i)
            else:
                l.append(i)
        return l
    for mode in [mode_with_gpu, mode_with_gpu.excluding('gpu_after_fusion'),
                 mode_with_gpu.excluding('elemwise_fusion')]:
        f = pfunc(
            [a, b],
            tensor.cast(
                tensor.lt(tensor.cast(a, 'float64') ** 2, b), 'float32'),
            mode=mode)

        out = f(av, bv)
        assert numpy.all(out == ((av ** 2) < bv))
        for node in f.maker.fgraph.toposort():
            if isinstance(node.op, cuda.GpuElemwise):
                if isinstance(node.op.scalar_op, theano.scalar.Composite):
                    scals = get_all_basic_scalar(node.op.scalar_op)
                    for s in scals:
                        assert not any([i.type.dtype == 'float64'
                                        for i in s.inputs + s.outputs])
项目:WaveNet    作者:ritheshkumar95    | 项目源码 | 文件源码
def unpool1d(input,upsample,desired_length,pool_indices=None):
    out = T.extra_ops.repeat(input,upsample,axis=2)[:,:,:desired_length]
    if pool_indices:
        mask = T.lt(pool_indices,0)
        return mask*out
    return out
项目:Hat    作者:qiuqiangkong    | 项目源码 | 文件源码
def lt(a, b):
    return T.lt(a, b)
项目:InnerOuterRNN    作者:Chemoinformatics    | 项目源码 | 文件源码
def lesser(x, y):
    return T.lt(x, y)
项目:CQA-CNN    作者:3141bishwa    | 项目源码 | 文件源码
def dynamic_k_max_pooling(input, sent_sizes, k_max_factor, k_max_final):
  """
    k_max_factor -- multiplied by sentence_sizes gives the value of kmax for each sentence
  """
  # Unroll input into (batch_size x nchannels x nwords) x ndim
  nbatches, nchannels, nwords, ndim = input.shape[0], input.shape[1], input.shape[2], input.shape[3]
  x = input.dimshuffle(0,1,3,2)

  sent_sizes = T.cast(T.ceil(sent_sizes * k_max_factor), dtype='int32')
  sent_sizes = T.maximum(sent_sizes, k_max_final)
  # sent_sizes_matrix = T.repeat(sent_sizes, nwords, axis=1)
  sent_sizes_matrix = T.repeat(sent_sizes.dimshuffle(0, 'x'), nwords, axis=1)

  idx = T.arange(nwords).dimshuffle('x', 0)
  idx_matrix = T.repeat(idx, nbatches, axis=0)

  sent_sizes_mask = T.lt(idx_matrix, sent_sizes_matrix)[:,::-1]

  neighborsArgSorted = T.argsort(x, axis=3)
  neighborsArgSorted_masked = ((neighborsArgSorted + 1) * sent_sizes_mask.dimshuffle(0,'x','x',1)) - 1
  neighborsArgSorted_masked_sorted = neighborsArgSorted_masked.sort(axis=3)

  nwords_max = T.cast(T.ceil(nwords * k_max_factor), 'int32')
  # print nwords_max.eval()
  neighborsArgSorted_masked_sorted_clipped = neighborsArgSorted_masked_sorted[:,:,:,-nwords_max:]

  ax0 = T.repeat(T.arange(nbatches), nchannels*ndim*nwords_max)
  ax1 = T.repeat(T.arange(nchannels), ndim * nwords_max).dimshuffle('x', 0)
  ax1 = T.repeat(ax1, nbatches, axis=0).flatten()
  ax2 = T.repeat(T.arange(ndim), nwords_max, axis=0).dimshuffle('x', 'x', 0)
  ax2 = T.repeat(ax2, nchannels, axis=1)
  ax2 = T.repeat(ax2, nbatches, axis=0).flatten()
  ax3 = neighborsArgSorted_masked_sorted_clipped.flatten()

  pooled_out = x[ax0, ax1, ax2, ax3]
  pooled_out = pooled_out.reshape((nbatches, nchannels, ndim, nwords_max)).dimshuffle(0,1,3,2)

  return pooled_out
项目:Precise-CTC    作者:Michlong    | 项目源码 | 文件源码
def editdist(s, t):
    def update(x, previous_row):
        current_row = previous_row + 1
        current_row = tensor.set_subtensor(current_row[1:], tensor.minimum(current_row[1:], tensor.add(previous_row[:-1], tensor.neq(target,x))))
        current_row = tensor.set_subtensor(current_row[1:], tensor.minimum(current_row[1:], current_row[0:-1] + 1))
        return current_row
    source, target = ifelse(tensor.lt(s.shape[0], t.shape[0]), (t, s), (s, t))
    previous_row = tensor.arange(target.size + 1, dtype=theano.config.floatX)
    result, updates = theano.scan(fn = update, sequences=source, outputs_info=previous_row, name='editdist')
    return result[-1,-1]

# numpy version
# from [https://en.wikibooks.org/wiki/Algorithm_Implementation/Strings/Levenshtein_distance#Python, the 5th version]
项目:dl4mt-c2c    作者:nyu-dl    | 项目源码 | 文件源码
def gradient_clipping(grads, tparams, clip_c=10):
    g2 = 0.
    for g in grads:
        g2 += (g**2).sum()

    g2 = tensor.sqrt(g2)
    not_finite = tensor.or_(tensor.isnan(g2), tensor.isinf(g2))
    new_grads = []

    for p, g in zip(tparams.values(), grads):
        new_grads.append(tensor.switch(g2 > clip_c,
                                       g * (clip_c / g2),
                                       g))

    return new_grads, not_finite, tensor.lt(clip_c, g2)
项目:dl4mt-c2c    作者:nyu-dl    | 项目源码 | 文件源码
def gradient_clipping(grads, tparams, clip_c=10):
    g2 = 0.
    for g in grads:
        g2 += (g**2).sum()

    g2 = tensor.sqrt(g2)
    not_finite = tensor.or_(tensor.isnan(g2), tensor.isinf(g2))
    new_grads = []

    for p, g in zip(tparams.values(), grads):
        new_grads.append(tensor.switch(g2 > clip_c,
                                       g * (clip_c / g2),
                                       g))

    return new_grads, not_finite, tensor.lt(clip_c, g2)
项目:odin_old    作者:trungnt13    | 项目源码 | 文件源码
def lt(a, b):
    """a < b"""
    return T.lt(a, b)
项目:hnmt    作者:robertostling    | 项目源码 | 文件源码
def encode(self, inputs, inputs_mask, chars, chars_mask):
        # First run a bidirectional LSTM encoder over the unknown word
        # character sequences.
        embedded_chars = self.src_char_embeddings(chars)
        fwd_char_h_seq, fwd_char_c_seq = self.fwd_char_encoder(
                embedded_chars, chars_mask)
        back_char_h_seq, back_char_c_seq = self.back_char_encoder(
                T.concatenate([embedded_chars, fwd_char_h_seq], axis=-1),
                chars_mask)

        # Concatenate the final states of the forward and backward character
        # encoders. These form a matrix of size:
        #   n_chars x src_embedding_dims
        # NOTE: the batch size here is n_chars, which is the total number of
        # unknown words in all the sentences in the inputs matrix.
        # Create an empty matrix if there are no unknown words
        # (e.g. pure word-level encoder)
        char_vectors = theano.ifelse.ifelse(T.gt(chars.shape[0], 0),
                T.concatenate([fwd_char_h_seq[-1], back_char_h_seq[0]], axis=-1),
                T.zeros([0, self.config['src_embedding_dims']],
                dtype=theano.config.floatX))

        # Compute separate masks for known words (with input symbol >= 0)
        # and unknown words (with input symbol < 0).
        known_mask = inputs_mask * T.ge(inputs, 0)
        unknown_mask = inputs_mask * T.lt(inputs, 0)
        # Split the inputs matrix into two, one indexing unknown words (from
        # the char_vectors matrix) and the other known words (from the source
        # word embeddings).
        unknown_indexes = (-inputs-1) * unknown_mask
        known_indexes = inputs * known_mask

        # Compute the final embedding sequence by mixing the known word
        # vectors with the character encoder output of the unknown words.
        # If there is no character encoder, just use the known word vectors.
        embedded_unknown = char_vectors[unknown_indexes]
        embedded_known = self.src_embeddings(known_indexes)
        embedded_inputs = theano.ifelse.ifelse(T.gt(chars.shape[0], 0),
                (unknown_mask.dimshuffle(0,1,'x').astype(
                    theano.config.floatX) * embedded_unknown) + \
                (known_mask.dimshuffle(0,1,'x').astype(
                    theano.config.floatX) * embedded_known),
                known_mask.dimshuffle(0,1,'x').astype(
                    theano.config.floatX) * embedded_known)

        # Forward encoding pass
        fwd_h_seq, fwd_c_seq = self.fwd_encoder(embedded_inputs, inputs_mask)
        # Backward encoding pass, using hidden states from forward encoder
        back_h_seq, back_c_seq = self.back_encoder(
                T.concatenate([embedded_inputs, fwd_h_seq], axis=-1),
                inputs_mask)
        # Initial states for decoder
        h_0 = T.tanh(self.proj_h0(back_h_seq[0]))
        c_0 = T.tanh(self.proj_c0(back_c_seq[0]))
        # Attention on concatenated forward/backward sequences
        attended = T.concatenate([fwd_h_seq, back_h_seq], axis=-1)
        return h_0, c_0, attended
项目:deepAI    作者:kaiu85    | 项目源码 | 文件源码
def inner_fn(t, stm1, oat, ot, oht, pos, vt):

    hst =  T.nnet.relu( T.dot(Wq_hst_stm1,stm1) + T.dot(Wq_hst_ot,ot) + T.dot(Wq_hst_oht,oht) + T.dot(Wq_hst_oat,oat) + bq_hst )
    hst2 =  T.nnet.relu( T.dot(Wq_hst2_hst,hst) + bq_hst2 )

    stmu =  T.tanh( T.dot(Wq_stmu_hst2,hst2) + bq_stmu )
    stsig = T.nnet.softplus( T.dot(Wq_stsig_hst2,hst2) + bq_stsig ) + sig_min_states

    # Rescale representation to fit within linear response of the tanh-nonlinearity
    stmu = T.set_subtensor(stmu[0,:],0.1*ot[0,:])
    stsig = T.set_subtensor(stsig[0,:],0.01)

    st = stmu + theano_rng.normal((n_s,n_proc))*stsig

    ost = T.nnet.relu( T.dot(Wl_ost_st,st) + bl_ost )
    ost2 = T.nnet.relu( T.dot(Wl_ost2_ost,ost) + bl_ost2 )
    ost3 = T.nnet.relu( T.dot(Wl_ost3_ost2,ost2) + bl_ost3 )

    otmu = T.dot(Wl_otmu_st, ost3) + bl_otmu
    otsig = T.nnet.softplus(T.dot(Wl_otsig_st, ost3) + bl_otsig) + sig_min_obs

    ohtmu = T.dot(Wl_ohtmu_st, ost3) + bl_ohtmu
    ohtsig = T.nnet.softplus( T.dot(Wl_ohtsig_st, ost3) + bl_ohtsig ) + sig_min_obs

    oatmu = T.dot(Wl_oatmu_st, ost3) + bl_oatmu
    oatsig = T.nnet.softplus( T.dot(Wl_oatsig_st, ost3) + bl_oatsig ) + sig_min_obs

    p_ot  = GaussianNLL(ot, otmu, otsig)
    p_oht = GaussianNLL(oht, ohtmu, ohtsig)    
    p_oat = GaussianNLL(oat, oatmu, oatsig)

    prior_stmu = T.tanh( T.dot(Wl_stmu_stm1, stm1) + bl_stmu )
    prior_stsig = T.nnet.softplus( T.dot(Wl_stsig_stm1, stm1) + bl_stsig ) + sig_min_states

    prior_stmu = ifelse(T.lt(t,20),prior_stmu, T.set_subtensor(prior_stmu[0,:],0.1))
    prior_stsig = ifelse(T.lt(t,20),prior_stsig, T.set_subtensor(prior_stsig[0,:],0.01))    

    KL_st = KLGaussianGaussian(stmu, stsig, prior_stmu, prior_stsig)

    FEt =  KL_st + p_ot + p_oht + p_oat

    oat_mu = T.dot(Wa_atmu_st, st) + ba_atmu
    oat_sig = T.nnet.softplus( T.dot(Wa_atsig_st, st) + ba_atsig ) + sig_min_action

    oat_new = 0.0*oat + oat_mu + theano_rng.normal((n_oa,n_proc))*oat_sig

    action_force = T.tanh( oat_new )
    force = T.switch(T.lt(pos,0.0),-2*pos - 1,-T.pow(1+5*T.sqr(pos),-0.5)-T.sqr(pos)*T.pow(1 + 5*T.sqr(pos),-1.5)-T.pow(pos,4)/16.0) - 0.25*vt
    vt_new = vt + 0.05*force + 0.03*action_force
    pos_new = pos + vt_new     

    ot_new = pos_new + theano_rng.normal((n_o,n_samples))*0.01

    oht_new = T.exp(-T.sqr(pos_new-1.0)/2.0/0.3/0.3)

    return st, oat_new, ot_new, oht_new, pos_new, vt_new, FEt, KL_st, stmu, stsig, force, p_ot, p_oht, p_oat