Python tensorflow 模块,while_loop() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用tensorflow.while_loop()

项目:zhusuan    作者:thu-ml    | 项目源码 | 文件源码
def _leapfrog(self, q, p, step_size, get_gradient, mass):
        def loop_cond(i, q, p):
            return i < self.n_leapfrogs + 1

        def loop_body(i, q, p):
            step_size1 = tf.cond(i > 0,
                                 lambda: step_size,
                                 lambda: tf.constant(0.0, dtype=tf.float32))

            step_size2 = tf.cond(tf.logical_and(tf.less(i, self.n_leapfrogs),
                                                tf.less(0, i)),
                                 lambda: step_size,
                                 lambda: step_size / 2)

            q, p = leapfrog_integrator(q, p, step_size1, step_size2,
                                       lambda q: get_gradient(q), mass)
            return [i + 1, q, p]

        i = tf.constant(0)
        _, q, p = tf.while_loop(loop_cond,
                                loop_body,
                                [i, q, p],
                                back_prop=False,
                                parallel_iterations=1)
        return q, p
项目:a-nice-mc    作者:ermongroup    | 项目源码 | 文件源码
def simulate_dynamics(initial_pos, initial_vel, stepsize, n_steps, energy_fn):
    def leapfrog(pos, vel, step, i):
        de_dp_ = tf.gradients(tf.reduce_sum(energy_fn(pos)), pos)[0]
        new_vel_ = vel - step * de_dp_
        new_pos_ = pos + step * new_vel_
        return [new_pos_, new_vel_, step, tf.add(i, 1)]

    def condition(pos, vel, step, i):
        return tf.less(i, n_steps)

    de_dp = tf.gradients(tf.reduce_sum(energy_fn(initial_pos)), initial_pos)[0]
    vel_half_step = initial_vel - 0.5 * stepsize * de_dp
    pos_full_step = initial_pos + stepsize * vel_half_step

    i = tf.constant(0)
    final_pos, new_vel, _, _ = tf.while_loop(condition, leapfrog, [pos_full_step, vel_half_step, stepsize, i])
    de_dp = tf.gradients(tf.reduce_sum(energy_fn(final_pos)), final_pos)[0]
    final_vel = new_vel - 0.5 * stepsize * de_dp
    return final_pos, final_vel
项目:tensorflow-adversarial    作者:gongzhitaao    | 项目源码 | 文件源码
def _deepfool2(model, x, epochs, eta, clip_min, clip_max, min_prob):
    y0 = tf.stop_gradient(tf.reshape(model(x), [-1])[0])
    y0 = tf.to_int32(tf.greater(y0, 0.5))

    def _cond(i, z):
        xadv = tf.clip_by_value(x + z*(1+eta), clip_min, clip_max)
        y = tf.stop_gradient(tf.reshape(model(xadv), [-1])[0])
        y = tf.to_int32(tf.greater(y, 0.5))
        return tf.logical_and(tf.less(i, epochs), tf.equal(y0, y))

    def _body(i, z):
        xadv = tf.clip_by_value(x + z*(1+eta), clip_min, clip_max)
        y = tf.reshape(model(xadv), [-1])[0]
        g = tf.gradients(y, xadv)[0]
        dx = - y * g / tf.norm(g)
        return i+1, z+dx

    _, noise = tf.while_loop(_cond, _body, [0, tf.zeros_like(x)],
                             name='_deepfool2_impl', back_prop=False)
    return noise
项目:sonnet    作者:deepmind    | 项目源码 | 文件源码
def _body(self, x, cumul_out, prev_state, cumul_state,
            cumul_halting, iteration, remainder, halting_linear, x_ones):
    """The `body` of `tf.while_loop`."""
    # Increase iteration count only for those elements that are still running.
    all_ones = tf.constant(1, shape=(self._batch_size, 1), dtype=self._dtype)
    is_iteration_over = tf.equal(cumul_halting, all_ones)
    next_iteration = tf.where(is_iteration_over, iteration, iteration + 1)
    out, next_state = self._core(x, prev_state)
    # Get part of state used to compute halting values.
    halting_input = halting_linear(self._get_state_for_halting(next_state))
    halting = tf.sigmoid(halting_input, name="halting")
    next_cumul_halting_raw = cumul_halting + halting
    over_threshold = next_cumul_halting_raw > self._threshold
    next_cumul_halting = tf.where(over_threshold, all_ones,
                                  next_cumul_halting_raw)
    next_remainder = tf.where(over_threshold, remainder,
                              1 - next_cumul_halting_raw)
    p = next_cumul_halting - cumul_halting
    next_cumul_state = _nested_add(cumul_state,
                                   _nested_unary_mul(next_state, p))
    next_cumul_out = cumul_out + p * out

    return (x_ones, next_cumul_out, next_state, next_cumul_state,
            next_cumul_halting, next_iteration, next_remainder)
项目:LiTeFlow    作者:petrux    | 项目源码 | 文件源码
def _call_helper(self):
        time = tf.constant(0, dtype=tf.int32)
        inp = self._decoder.init_input()
        state = self._decoder.init_state()
        finished = tf.tile([False], [utils.get_dimension(inp, 0)])
        output_ta = tf.TensorArray(dtype=tf.float32, size=0, dynamic_size=True)
        loop_vars = [time, inp, state, finished, output_ta]
        results = tf.while_loop(
            cond=self.cond, body=self.body, loop_vars=loop_vars,
            parallel_iterations=self._parallel_iterations,
            swap_memory=self._swap_memory)
        output_ta = results[-1]
        output = output_ta.stack()
        output = tf.transpose(output, [1, 0, 2])
        state = results[2]
        return output, state
项目:hart    作者:akosiorek    | 项目源码 | 文件源码
def broadcast_against(tensor, against_expr):
    """Adds trailing dimensions to mask to enable broadcasting against data

    :param tensor: tensor to be broadcasted
    :param against_expr: tensor will be broadcasted against it
    :return: mask expr with tf.rank(mask) == tf.rank(data)
    """

    def cond(data, tensor):
        return tf.less(tf.rank(tensor), tf.rank(data))

    def body(data, tensor):
        return data, tf.expand_dims(tensor, -1)

    shape_invariants = [against_expr.get_shape(), tf.TensorShape(None)]
    _, tensor = tf.while_loop(cond, body, [against_expr, tensor], shape_invariants)
    return tensor
项目:tensorflow-rl    作者:steveKapturowski    | 项目源码 | 文件源码
def _build_ops(self):
        i0 = tf.constant(0, dtype=tf.int32)
        loop_condition = lambda i, inputs, state: tf.less(i, self.max_steps)

        def body(i, inputs, full_state):
            idx = i % self.num_cores
            prev_state = full_state[idx]
            inputs, full_state[idx] = self.shared_cell(inputs, prev_state)

            return i+1, inputs, full_state

        _, inputs, full_state = tf.while_loop(
            loop_condition,
            body,
            loop_vars=[i0,
                       self.inputs,
                       self.initial_state])
项目:neuralmonkey    作者:ufal    | 项目源码 | 文件源码
def empty_attention_loop_state() -> AttentionLoopStateTA:
    """Create an empty attention loop state.

    The attention loop state is a technical object for storing the attention
    distributions and the context vectors in time. It is used with the
    ``tf.while_loop`` dynamic implementation of the decoder.

    This function returns an empty attention loop state which means there are
    two empty arrays, one for attention distributions in time, and one for
    the attention context vectors in time.
    """
    return AttentionLoopStateTA(
        contexts=tf.TensorArray(
            dtype=tf.float32, size=0, dynamic_size=True,
            name="contexts"),
        weights=tf.TensorArray(
            dtype=tf.float32, size=0, dynamic_size=True,
            name="distributions", clear_after_read=False))
项目:neuralmonkey    作者:ufal    | 项目源码 | 文件源码
def empty_attention_loop_state() -> AttentionLoopStateTA:
    """Create an empty attention loop state.

    The attention loop state is a technical object for storing the attention
    distributions and the context vectors in time. It is used with the
    ``tf.while_loop`` dynamic implementation of the decoder.

    This function returns an empty attention loop state which means there are
    two empty arrays, one for attention distributions in time, and one for
    the attention context vectors in time.
    """
    return AttentionLoopStateTA(
        contexts=tf.TensorArray(
            dtype=tf.float32, size=0, dynamic_size=True,
            name="contexts"),
        weights=tf.TensorArray(
            dtype=tf.float32, size=0, dynamic_size=True,
            name="distributions", clear_after_read=False))
项目:learning-to-learn    作者:deepmind    | 项目源码 | 文件源码
def testWhileLoopProblem(self):
    """Tests L2L applied to problem with while loop."""
    def while_loop_problem():
      x = tf.get_variable("x", shape=[], initializer=tf.ones_initializer())

      # Strange way of squaring the variable.
      _, x_squared = tf.while_loop(
          cond=lambda t, _: t < 1,
          body=lambda t, x: (t + 1, x * x),
          loop_vars=(0, x),
          name="loop")
      return x_squared

    optimizer = meta.MetaOptimizer(net=dict(
        net="CoordinateWiseDeepLSTM",
        net_options={"layers": ()}))
    minimize_ops = optimizer.meta_minimize(while_loop_problem, 3)
    with self.test_session() as sess:
      sess.run(tf.global_variables_initializer())
      train(sess, minimize_ops, 1, 2)
项目:MGP-RNN    作者:jfutoma    | 项目源码 | 文件源码
def get_probs_and_accuracy(preds,O):
    """
    helper function. we have a prediction for each MC sample of each observation
    in this batch.  need to distill the multiple preds from each MC into a single
    pred for this observation.  also get accuracy. use true probs to get ROC, PR curves in sklearn
    """
    all_probs = tf.exp(preds[:,1] - tf.reduce_logsumexp(preds, axis = 1)) #normalize; and drop a dim so only prob of positive case
    N = tf.cast(tf.shape(preds)[0]/n_mc_smps,tf.int32) #actual number of observations in preds, collapsing MC samples                    

    #predicted probability per observation; collapse the MC samples
    probs = tf.zeros([0]) #store all samples in a list, then concat into tensor at end
    #setup tf while loop (have to use this bc loop size is variable)
    def cond(i,probs):
        return i < N
    def body(i,probs):
        probs = tf.concat([probs,[tf.reduce_mean(tf.slice(all_probs,[i*n_mc_smps],[n_mc_smps]))]],0)
        return i+1,probs    
    i = tf.constant(0)
    i,probs = tf.while_loop(cond,body,loop_vars=[i,probs],shape_invariants=[i.get_shape(),tf.TensorShape([None])])

    #compare to truth; just use cutoff of 0.5 for right now to get accuracy
    correct_pred = tf.equal(tf.cast(tf.greater(probs,0.5),tf.int32), O)
    accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) 
    return probs,accuracy
项目:Constituent-Centric-Neural-Architecture-for-Reading-Comprehension    作者:shrshore    | 项目源码 | 文件源码
def get_candidates_representations_in_sentence(self, sentence_candidate_answers, sentence_attentioned_hidden_states):
        candidate_answer_num=tf.gather(tf.shape(sentence_candidate_answers), 0)
        logging.warn('candidate_answer_num:{}'.format(candidate_answer_num))
        logging.warn('sentence_candidate_answers:{}'.format(sentence_candidate_answers))
        candidate_answer_nodeids=tf.gather(sentence_candidate_answers, 0) #a node idx list
        candidate_answer_hidden_list=tf.gather(sentence_attentioned_hidden_states, candidate_answer_nodeids)
        candidate_final_representations=self.get_candidate_answer_final_representations(candidate_answer_hidden_list)
        candidates_final_representations=tf.expand_dims(candidate_final_representations, 0)
        idx_cand=tf.constant(1)
        def _recurse_candidate_answer(candidate_final_representations, idx_cand):
            cur_candidate_answer_nodeids=tf.gather(sentence_candidate_answers, idx_cand)
            cur_candidate_answer_hidden_list=tf.gather(sentence_attentioned_hidden_states, cur_candidate_answer_nodeids)
            cur_candidate_final_representations=tf.expand_dims( 
                self.get_candidate_answer_final_representations(cur_candidate_answer_hidden_list), 0)
            candidate_final_representations=tf.concat([candidate_final_representations, cur_candidate_final_representations], axis=0)
            idx_cand=tf.add(idx_cand,1)
            return candidate_final_representations, idx_cand
        loop_cond=lambda a1,idx:tf.less(idx, candidate_answer_num)
        loop_vars=[candidates_final_representations, idx_cand]
        candidates_final_representations, idx_cand=tf.while_loop(loop_cond, _recurse_candidate_answer, loop_vars,
            shape_invariants=[tf.TensorShape([None, 2*self.config.hidden_dim]),idx_cand.get_shape()])
        return candidates_final_representations
项目:tf-tutorial    作者:zchen0211    | 项目源码 | 文件源码
def testWhileLoopProblem(self):
    """Tests L2L applied to problem with while loop."""
    def while_loop_problem():
      x = tf.get_variable("x", shape=[], initializer=tf.ones_initializer())

      # Strange way of squaring the variable.
      _, x_squared = tf.while_loop(
          cond=lambda t, _: t < 1,
          body=lambda t, x: (t + 1, x * x),
          loop_vars=(0, x),
          name="loop")
      return x_squared

    optimizer = meta.MetaOptimizer(net=dict(
        net="CoordinateWiseDeepLSTM",
        net_options={"layers": ()}))
    minimize_ops = optimizer.meta_minimize(while_loop_problem, 3)
    with self.test_session() as sess:
      sess.run(tf.global_variables_initializer())
      train(sess, minimize_ops, 1, 2)
项目:section-detection    作者:gulfaraz    | 项目源码 | 文件源码
def meanShift(n_updates=-1):
    X1 = tf.expand_dims(tf.transpose(input_X), 0)
    X2 = tf.expand_dims(input_X, 0)
    C = init_C

    sbs_C = tf.TensorArray(dtype=tf.float32, size=10000, infer_shape=False)
    sbs_C = sbs_C.write(0, init_C)

    def _mean_shift_step(C):
        C = tf.expand_dims(C, 2)
        Y = tf.reduce_sum(tf.pow((C - X1) / window_radius, 2), axis=1)
        gY = tf.exp(-Y)
        num = tf.reduce_sum(tf.expand_dims(gY, 2) * X2, axis=1)
        denom = tf.reduce_sum(gY, axis=1, keep_dims=True)
        C = num / denom
        return C

    if n_updates > 0:
        for i in range(n_updates):
            C = _mean_shift_step(C)
            sbs_C = sbs_C.write(i + 1, C)
    else:
        def _mean_shift(i, C, sbs_C, max_diff):
            new_C = _mean_shift_step(C)
            max_diff = tf.reshape(tf.reduce_max(tf.sqrt(tf.reduce_sum(tf.pow(new_C - C, 2), axis=1))), [])
            sbs_C = sbs_C.write(i + 1, new_C)
            return i + 1, new_C, sbs_C, max_diff

        def _cond(i, C, sbs_C, max_diff):
            return max_diff > 1e-5

        n_updates, C, sbs_C, _ = tf.while_loop(cond=_cond,
                                       body=_mean_shift,
                                       loop_vars=(tf.constant(0), C, sbs_C, tf.constant(1e10)))

        n_updates = tf.Print(n_updates, [n_updates])


    return C, sbs_C.gather(tf.range(n_updates + 1))
项目:cwt-tensorflow    作者:nickgeoca    | 项目源码 | 文件源码
def cwt(wav, widthCwt, wavelet):
    length = wav.shape[0]
    wav = tf.to_float(wav)
    wav = tf.reshape(wav, [1,length,1,1])

    # While loop functions
    def body(i, m): 
        v = conv1DWavelet(wav, i, wavelet)
        v = tf.reshape(v, [length, 1])

        m = tf.concat([m,v], 1)

        return [1 + i, m]

    def cond_(i, m):
        return tf.less_equal(i, widthCwt)

    # Initialize and run while loop
    emptyCwtMatrix = tf.zeros([length, 0], dtype='float32') 
    i = tf.constant(1)
    _, result = tf.while_loop(
            cond_,
            body,
            [i, emptyCwtMatrix],
            shape_invariants=[i.get_shape(), tf.TensorShape([length, None])],
            back_prop=False,
            parallel_iterations=1024,
            )
    result = tf.transpose(result)

    return result

# ------------------------------------------------------
#                 wavelets
项目:DNC    作者:bgavran    | 项目源码 | 文件源码
def init_memory(self, batch_size):
        """
        Returns the memory state for step 0. Used in DNC for the argument to tf.while_loop

        :return: 
        """
        read_weightings = tf.fill([batch_size, self.memory_size, self.num_read_heads], Memory.epsilon)
        write_weighting = tf.fill([batch_size, self.memory_size], Memory.epsilon, name="Write_weighting")
        precedence_weighting = tf.zeros([batch_size, self.memory_size], name="Precedence_weighting")
        m = tf.fill([batch_size, self.memory_size, self.word_size], Memory.epsilon)  # initial memory matrix
        usage_vector = tf.zeros([batch_size, self.memory_size], name="Usage_vector")
        link_matrix = tf.zeros([batch_size, self.memory_size, self.memory_size])
        read_vectors = tf.fill([batch_size, self.num_read_heads, self.word_size], Memory.epsilon)

        return [read_weightings, write_weighting, usage_vector, precedence_weighting, m, link_matrix, read_vectors]
项目:DNC    作者:bgavran    | 项目源码 | 文件源码
def step(self, x, state, step):
        """
        Returns the output vector for just one time step.
        But I'm not sure anymore how much does all of this work since because of the way tf.while_loop is implemented...

        :param x: one vector representing input for one time step
        :param state: state of the controller
        :param step: current time step
        :return: output of the controller and its current state
        """
        raise NotImplementedError()
项目:tf_classification    作者:visipedia    | 项目源码 | 文件源码
def get_distorted_inputs(original_image, bboxes, cfg, add_summaries):

    distorter = DistortedInputs(cfg, add_summaries)
    num_bboxes = tf.shape(bboxes)[0]
    distorted_inputs = tf.TensorArray(
        dtype=tf.float32,
        size=num_bboxes,
        element_shape=tf.TensorShape([1, cfg.INPUT_SIZE, cfg.INPUT_SIZE, 3])
    )

    if add_summaries:
        image_summaries = tf.TensorArray(
            dtype=tf.float32,
            size=4,
            element_shape=tf.TensorShape([1, cfg.INPUT_SIZE, cfg.INPUT_SIZE, 3])
        )
    else:
        image_summaries = tf.constant([])

    current_index = tf.constant(0, dtype=tf.int32)

    loop_vars = [original_image, bboxes, distorted_inputs, image_summaries, current_index]
    original_image, bboxes, distorted_inputs, image_summaries, current_index = tf.while_loop(
        cond=bbox_crop_loop_cond,
        body=distorter.apply,
        loop_vars=loop_vars,
        parallel_iterations=10, back_prop=False, swap_memory=False
    )

    distorted_inputs = distorted_inputs.concat()

    if add_summaries:
        tf.summary.image('0.original_image', image_summaries.read(0))
        tf.summary.image('1.image_with_random_crop', image_summaries.read(1))
        tf.summary.image('2.cropped_resized_image', image_summaries.read(2))
        tf.summary.image('3.final_distorted_image', image_summaries.read(3))


    return distorted_inputs
项目:fold    作者:tensorflow    | 项目源码 | 文件源码
def eval(self, inp, feed_dict=None, session=None, tolist=False,
           use_while_loop=True):
    """Evaluates this block on `inp` in a TF session.

    Intended for testing and interactive development. If there are any
    uninitialized variables, they will be initialized prior to evaluation.

    Args:
      inp: An input to the block.
      feed_dict: A dictionary that maps `Tensor` objects to feed values.
      session: The TF session to be used. Defaults to the default session.
      tolist: A bool; whether to return (possibly nested) Python lists
        in place of NumPy arrays.
      use_while_loop: A bool; whether to use a `tf.while_loop` in evaluation
        (default) or to unroll the loop. Provided for testing and debugging,
        should not affect the result.

    Returns:
      The result of running the block. If `output_type` is tensor, then a
      NumPy array (or Python list, if `tolist` is true). If a tuple, then a
      tuple. If a sequence, then a list, or an instance of itertools.repeat
      in the case of an infinite sequence. If metrics are defined then `eval`
      returns a `(result, metrics)` tuple, where `metrics` is a dict mapping
      metric names to NumPy arrays.

    Raises:
      ValueError: If `session` is none and no default session is registered.
        If the block contains no TF tensors or ops then a session is not
        required.
    """
    # pylint: disable=protected-access
    return tensorflow_fold.blocks.block_compiler.Compiler._interactive(  # pylint: disable=line-too-long
        self)._eval(inp, feed_dict, session, tolist, use_while_loop)
项目:zhusuan    作者:thu-ml    | 项目源码 | 文件源码
def _init_step_size(self, q, p, mass, get_gradient, get_log_posterior):
        factor = 1.5

        def loop_cond(step_size, last_acceptance_rate, cond):
            return cond

        def loop_body(step_size, last_acceptance_rate, cond):
            # Calculate acceptance_rate
            new_q, new_p = leapfrog_integrator(
                q, p, tf.constant(0.0), step_size / 2,
                get_gradient, mass)
            new_q, new_p = leapfrog_integrator(
                new_q, new_p, step_size, step_size / 2,
                get_gradient, mass)
            __, _, _, _, acceptance_rate = get_acceptance_rate(
                q, p, new_q, new_p,
                get_log_posterior, mass, self.data_axes)

            acceptance_rate = tf.reduce_mean(acceptance_rate)

            # Change step size and stopping criteria
            new_step_size = tf.cond(
                tf.less(acceptance_rate,
                        self.target_acceptance_rate),
                lambda: step_size * (1.0 / factor),
                lambda: step_size * factor)

            cond = tf.logical_not(tf.logical_xor(
                tf.less(last_acceptance_rate, self.target_acceptance_rate),
                tf.less(acceptance_rate, self.target_acceptance_rate)))
            return [new_step_size, acceptance_rate, cond]

        new_step_size, _, _ = tf.while_loop(
            loop_cond,
            loop_body,
            [self.step_size, tf.constant(1.0), tf.constant(True)]
        )
        return new_step_size
项目:zhusuan    作者:thu-ml    | 项目源码 | 文件源码
def _sample(self, n_samples):
        try:
            # tf.random_poisson is implemented after v1.2
            random_poisson = tf.random_poisson
        except AttributeError:
            # This algorithm to generate random Poisson-distributed numbers is
            # given by Kunth [1]
            # [1]: https://en.wikipedia.org/wiki/
            #      Poisson_distribution#Generating_Poisson-distributed_random_variables
            shape = tf.concat([[n_samples], self.batch_shape], 0)
            static_n_samples = n_samples if isinstance(n_samples,
                                                       int) else None
            static_shape = tf.TensorShape([static_n_samples]).concatenate(
                self.get_batch_shape())
            enlam = tf.exp(-self.rate)
            x = tf.zeros(shape, dtype=self.dtype)
            prod = tf.ones(shape, dtype=self.param_dtype)

            def loop_cond(prod, x):
                return tf.reduce_any(tf.greater_equal(prod, enlam))

            def loop_body(prod, x):
                prod *= tf.random_uniform(tf.shape(prod), minval=0, maxval=1)
                x += tf.cast(tf.greater_equal(prod, enlam), dtype=self.dtype)
                return prod, x

            _, samples = tf.while_loop(
                loop_cond, loop_body, loop_vars=[prod, x],
                shape_invariants=[static_shape, static_shape])

            samples.set_shape(static_shape)
        else:
            samples = random_poisson(self.rate, [n_samples],
                                     dtype=self.param_dtype)
            if self.param_dtype != self.dtype:
                samples = tf.cast(samples, self.dtype)
        return samples
项目:sonnet    作者:deepmind    | 项目源码 | 文件源码
def _cond(self, unused_x, unused_cumul_out, unused_prev_state,
            unused_cumul_state, cumul_halting, unused_iteration,
            unused_remainder):
    """The `cond` of the `tf.while_loop`."""
    return tf.reduce_any(cumul_halting < 1)
项目:transform    作者:tensorflow    | 项目源码 | 文件源码
def testCreatePhasesWithLoop(self):
    # Test a preprocessing function with control flow.
    #
    # The loop represents
    #
    # i = 0
    # while i < 10:
    #   i += 1
    #   x += 1
    #
    # To get an error in the case where apply_function is not called, we have
    # to call an analyzer first (see testCreatePhasesWithUnwrappedLoop).  So
    # we also do so here.
    def preprocessing_fn(inputs):
      def _subtract_ten(x):
        i = tf.constant(0)
        c = lambda i, x: tf.less(i, 10)
        b = lambda i, x: (tf.add(i, 1), tf.add(x, -1))
        return tf.while_loop(c, b, [i, x])[1]
      scaled_to_0_1 = mappers.scale_to_0_1(
          api.apply_function(_subtract_ten, inputs['x']))
      return {'x_scaled': scaled_to_0_1}

    input_schema = sch.Schema({
        'x': sch.ColumnSchema(tf.int32, [], sch.FixedColumnRepresentation())
    })
    graph, _, _ = impl_helper.run_preprocessing_fn(
        preprocessing_fn, input_schema)
    phases = impl_helper.create_phases(graph)
    self.assertEqual(len(phases), 1)
    self.assertEqual(len(phases[0].analyzers), 2)
项目:transform    作者:tensorflow    | 项目源码 | 文件源码
def testCreatePhasesWithUnwrappedLoop(self):
    # Test a preprocessing function with control flow.
    #
    # The loop represents
    #
    # i = 0
    # while i < 10:
    #   i += 1
    #   x += 1
    #
    # We need to call an analyzer after the loop because only the transitive
    # parents of analyzers are inspected by create_phases
    def preprocessing_fn(inputs):
      def _subtract_ten(x):
        i = tf.constant(0)
        c = lambda i, x: tf.less(i, 10)
        b = lambda i, x: (tf.add(i, 1), tf.add(x, -1))
        return tf.while_loop(c, b, [i, x])[1]
      scaled_to_0_1 = mappers.scale_to_0_1(_subtract_ten(inputs['x']))
      return {'x_scaled': scaled_to_0_1}

    input_schema = sch.Schema({
        'x': sch.ColumnSchema(tf.int32, [], sch.FixedColumnRepresentation())
    })
    graph, _, _ = impl_helper.run_preprocessing_fn(
        preprocessing_fn, input_schema)
    with self.assertRaisesRegexp(ValueError, 'Cycle detected'):
      _ = impl_helper.create_phases(graph)
项目:tf.rasterizer    作者:vahidk    | 项目源码 | 文件源码
def sequential_for(fn, begin, end):

    def _cond(i):
        return tf.less(i, end)

    def _body(i):
        ops = fn(i)
        with tf.control_dependencies(ops):
            return i + 1

    return tf.while_loop(_cond, _body, [begin])
项目:GPflow    作者:GPflow    | 项目源码 | 文件源码
def _while_loop(cond, body, args):
    return tf.while_loop(cond, body, args, parallel_iterations=1, back_prop=False)
项目:SSD_tensorflow_VOC    作者:LevinJ    | 项目源码 | 文件源码
def __match_no_miss(self,gt_anchor_labels,gt_anchor_bboxes,gt_anchor_scores,jaccard,gt_labels,gt_bboxes, num_anchors):
        #make sure every ground truth box can be matched to at least one anchor box
        max_inds = tf.cast(tf.argmax(jaccard, axis=1),tf.int32)
        def cond(i,gt_anchors_labels,gt_anchors_bboxes,gt_anchors_scores):
            r = tf.less(i, tf.shape(gt_labels)[0])
            return r
        def body(i,gt_anchors_labels,gt_anchors_bboxes,gt_anchors_scores):

            #upate gt_anchors_labels
            updates = tf.reshape(gt_labels[i], [-1])
            indices = tf.reshape(max_inds[i],[1,-1])
            shape = tf.reshape(num_anchors,[-1])


            new_labels = tf.scatter_nd(indices, updates, shape)
            new_mask = tf.cast(new_labels, tf.bool)
            gt_anchors_labels = tf.where(new_mask, new_labels, gt_anchors_labels)

            #update gt_anchors_bboxes
            updates = tf.reshape(gt_bboxes[i], [1,-1])
            indices = tf.reshape(max_inds[i],[1,-1])
            shape = tf.shape(gt_anchors_bboxes)
            new_bboxes = tf.scatter_nd(indices, updates, shape)
            gt_anchors_bboxes = tf.where(new_mask, new_bboxes, gt_anchors_bboxes)

            #update gt_anchors_scores
            updates = tf.reshape(jaccard[i, max_inds[i]], [-1])
            indices = tf.reshape(max_inds[i],[1,-1])
            shape = tf.reshape(num_anchors,[-1])
            new_scores = tf.scatter_nd(indices, updates, shape)
            gt_anchors_scores = tf.where(new_mask, new_scores, gt_anchors_scores)



            return [i+1,gt_anchors_labels,gt_anchors_bboxes,gt_anchors_scores]


        i = 0
        [i,gt_anchor_labels,gt_anchor_bboxes,gt_anchor_scores] = tf.while_loop(cond, body,[i,gt_anchor_labels,gt_anchor_bboxes,gt_anchor_scores])

        return gt_anchor_labels,gt_anchor_bboxes,gt_anchor_scores
项目:act-rte-inference    作者:DeNeutoy    | 项目源码 | 文件源码
def do_act_steps(self, premise, hypothesis):


        self.rep_size = premise.get_shape()[-1].value

        self.one_minus_eps = tf.constant(1.0 - self.config.eps, tf.float32,[self.batch_size])
        self.N = tf.constant(self.config.max_computation, tf.float32,[self.batch_size])


        prob = tf.constant(0.0,tf.float32,[self.batch_size], name="prob")
        prob_compare = tf.constant(0.0,tf.float32,[self.batch_size], name="prob_compare")
        counter = tf.constant(0.0, tf.float32,[self.batch_size], name="counter")
        initial_state = tf.zeros([self.batch_size, 2*self.rep_size], tf.float32, name="state")
        acc_states = tf.zeros([self.batch_size,2*self.rep_size], tf.float32, name="state_accumulator")
        batch_mask = tf.constant(True, tf.bool,[self.batch_size])

        # While loop stops when this predicate is FALSE.
        # Ie all (probability < 1-eps AND counter < N) are false.

        pred = lambda batch_mask,prob_compare,prob,\
                      counter,state,premise, hypothesis ,acc_state:\
            tf.reduce_any(
                tf.logical_and(
                    tf.less(prob_compare,self.one_minus_eps),
                    tf.less(counter,self.N)))
                # only stop if all of the batch have passed either threshold

            # Do while loop iterations until predicate above is false.
        _,_,remainders,iterations,_,_,_,state = \
            tf.while_loop(pred,self.inference_step,
            [batch_mask,prob_compare,prob,
             counter,initial_state, premise, hypothesis, acc_states])

        return state, remainders, iterations
项目:act-rte-inference    作者:DeNeutoy    | 项目源码 | 文件源码
def do_inference_steps(self, initial_state, premise, hypothesis):


        self.one_minus_eps = tf.constant(1.0 - self.config.eps, tf.float32,[self.batch_size])
        self.N = tf.constant(self.config.max_computation, tf.float32,[self.batch_size])


        prob = tf.constant(0.0,tf.float32,[self.batch_size], name="prob")
        prob_compare = tf.constant(0.0,tf.float32,[self.batch_size], name="prob_compare")
        counter = tf.constant(0.0, tf.float32,[self.batch_size], name="counter")
        acc_states = tf.zeros_like(initial_state, tf.float32, name="state_accumulator")
        batch_mask = tf.constant(True, tf.bool,[self.batch_size])

        # While loop stops when this predicate is FALSE.
        # Ie all (probability < 1-eps AND counter < N) are false.

        pred = lambda batch_mask,prob_compare,prob,\
                      counter,state,premise, hypothesis ,acc_state:\
            tf.reduce_any(
                tf.logical_and(
                    tf.less(prob_compare,self.one_minus_eps),
                    tf.less(counter,self.N)))
                # only stop if all of the batch have passed either threshold

            # Do while loop iterations until predicate above is false.
        _,_,remainders,iterations,_,_,_,state = \
            tf.while_loop(pred,self.inference_step,
            [batch_mask,prob_compare,prob,
             counter,initial_state,premise, hypothesis, acc_states])

        return state, remainders, iterations
项目:act-rte-inference    作者:DeNeutoy    | 项目源码 | 文件源码
def do_inference_steps(self, initial_state, premise, hypothesis):


        self.one_minus_eps = tf.constant(1.0 - self.config.eps, tf.float32,[self.batch_size])
        self.N = tf.constant(self.config.max_computation, tf.float32,[self.batch_size])


        prob = tf.constant(0.0,tf.float32,[self.batch_size], name="prob")
        prob_compare = tf.constant(0.0,tf.float32,[self.batch_size], name="prob_compare")
        counter = tf.constant(0.0, tf.float32,[self.batch_size], name="counter")
        acc_states = tf.zeros_like(initial_state, tf.float32, name="state_accumulator")
        batch_mask = tf.constant(True, tf.bool,[self.batch_size])

        # While loop stops when this predicate is FALSE.
        # Ie all (probability < 1-eps AND counter < N) are false.

        pred = lambda batch_mask,prob_compare,prob,\
                      counter,state,premise, hypothesis ,acc_state:\
            tf.reduce_any(
                tf.logical_and(
                    tf.less(prob_compare,self.one_minus_eps),
                    tf.less(counter,self.N)))
                # only stop if all of the batch have passed either threshold

            # Do while loop iterations until predicate above is false.
        _,_,remainders,iterations,_,_,_,state = \
            tf.while_loop(pred,self.inference_step,
            [batch_mask,prob_compare,prob,
             counter,initial_state,premise, hypothesis, acc_states])

        return state, remainders, iterations
项目:document-qa    作者:allenai    | 项目源码 | 文件源码
def best_span_from_bounds(start_logits, end_logits, bound=None):
    """
    Brute force approach to finding the best span from start/end logits in tensorflow, still usually
    faster then the python dynamic-programming version
    """
    b = tf.shape(start_logits)[0]

    # Using `top_k` to get the index and value at once is faster
    # then using argmax and then gather to get in the value
    top_k = tf.nn.top_k(start_logits + end_logits, k=1)
    values, indices = [tf.squeeze(x, axis=[1]) for x in top_k]

    # Convert to (start_position, length) format
    indices = tf.stack([indices, tf.fill((b,), 0)], axis=1)

    # TODO Might be better to build the batch x n_word x n_word
    # matrix and use tf.matrix_band to zero out the unwanted ones...

    if bound is None:
        n_lengths = tf.shape(start_logits)[1]
    else:
        # take the min in case the bound > the context
        n_lengths = tf.minimum(bound, tf.shape(start_logits)[1])

    def compute(i, values, indices):
        top_k = tf.nn.top_k(start_logits[:, :-i] + end_logits[:, i:])
        b_values, b_indices = [tf.squeeze(x, axis=[1]) for x in top_k]

        b_indices = tf.stack([b_indices, tf.fill((b, ), i)], axis=1)
        indices = tf.where(b_values > values, b_indices, indices)
        values = tf.maximum(values, b_values)
        return i+1, values, indices

    _, values, indices = tf.while_loop(
        lambda ix, values, indices: ix < n_lengths,
        compute,
        [1, values, indices],
        back_prop=False)

    spans = tf.stack([indices[:, 0], indices[:, 0] + indices[:, 1]], axis=1)
    return spans, values
项目:neuralmonkey    作者:ufal    | 项目源码 | 文件源码
def decoding_loop(self, train_mode: bool, sample: bool = False) -> Tuple[
            tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor]:
        """Run the decoding while loop.

        Calls get_initial_loop_state and constructs tf.while_loop
        with the continuation criterion returned from loop_continue_criterion,
        and body function returned from get_body.

        After finishing the tf.while_loop, it calls finalize_loop
        to further postprocess the final decoder loop state (usually
        by stacking TensorArrays containing decoding histories).

        Arguments:
            train_mode: Boolean flag, telling whether this is
                a training run.
            sample: Boolean flag, telling whether we should sample
                the output symbols from the output distribution instead
                of using argmax or gold data.
        """
        initial_loop_state = self.get_initial_loop_state()
        final_loop_state = tf.while_loop(
            self.loop_continue_criterion,
            self.get_body(train_mode, sample),
            initial_loop_state)

        self.finalize_loop(final_loop_state, train_mode)

        logits = final_loop_state.histories.logits.stack()
        decoder_outputs = final_loop_state.histories.decoder_outputs.stack()
        decoded = final_loop_state.histories.outputs.stack()

        # TODO mask should include also the end symbol
        mask = final_loop_state.histories.mask.stack()

        return logits, decoder_outputs, mask, decoded
项目:neuralmonkey    作者:ufal    | 项目源码 | 文件源码
def _decoding_loop(self) -> BeamSearchOutput:
        # collect attention objects
        beam_body = self.get_body()

        initial_loop_state = self.get_initial_loop_state()

        def cond(*args) -> tf.Tensor:
            bsls = BeamSearchLoopState(*args)
            return tf.less(
                bsls.decoder_loop_state.feedables.step - 1, self._max_steps)

        # First step has to be run manually because while_loop needs the same
        # shapes between steps and the first beam state is not beam-sized, but
        # just a single state.
        #
        # When running ensembles, we want to provide
        # ensembled logprobs to the beam_body before manually running
        # the first step
        next_bs_loop_state = tf.cond(
            cond(*initial_loop_state),
            lambda: beam_body(*initial_loop_state),
            lambda: initial_loop_state)

        final_state = tf.while_loop(cond, beam_body, next_bs_loop_state)
        dec_loop_state = final_state.decoder_loop_state
        bs_state = final_state.bs_state

        scores = final_state.bs_output.scores.stack()
        parent_ids = final_state.bs_output.parent_ids.stack()
        token_ids = final_state.bs_output.token_ids.stack()

        # TODO: return att_loop_states properly
        return BeamSearchOutput(
            last_search_step_output=SearchStepOutput(
                scores=scores,
                parent_ids=parent_ids,
                token_ids=token_ids),
            last_dec_loop_state=dec_loop_state.feedables,
            last_search_state=bs_state,
            attention_loop_states=[])
项目:neuralmonkey    作者:ufal    | 项目源码 | 文件源码
def decoding_loop(self, train_mode: bool, sample: bool = False) -> Tuple[
            tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor]:
        """Run the decoding while loop.

        Calls get_initial_loop_state and constructs tf.while_loop
        with the continuation criterion returned from loop_continue_criterion,
        and body function returned from get_body.

        After finishing the tf.while_loop, it calls finalize_loop
        to further postprocess the final decoder loop state (usually
        by stacking TensorArrays containing decoding histories).

        Arguments:
            train_mode: Boolean flag, telling whether this is
                a training run.
            sample: Boolean flag, telling whether we should sample
                the output symbols from the output distribution instead
                of using argmax or gold data.
        """
        initial_loop_state = self.get_initial_loop_state()
        final_loop_state = tf.while_loop(
            self.loop_continue_criterion,
            self.get_body(train_mode, sample),
            initial_loop_state)

        self.finalize_loop(final_loop_state, train_mode)

        logits = final_loop_state.histories.logits.stack()
        decoder_outputs = final_loop_state.histories.decoder_outputs.stack()
        decoded = final_loop_state.histories.outputs.stack()

        # TODO mask should include also the end symbol
        mask = final_loop_state.histories.mask.stack()

        return logits, decoder_outputs, mask, decoded
项目:neuralmonkey    作者:ufal    | 项目源码 | 文件源码
def _decoding_loop(self) -> BeamSearchOutput:
        # collect attention objects
        beam_body = self.get_body()

        initial_loop_state = self.get_initial_loop_state()

        def cond(*args) -> tf.Tensor:
            bsls = BeamSearchLoopState(*args)
            return tf.less(
                bsls.decoder_loop_state.feedables.step - 1, self._max_steps)

        # First step has to be run manually because while_loop needs the same
        # shapes between steps and the first beam state is not beam-sized, but
        # just a single state.
        #
        # When running ensembles, we want to provide
        # ensembled logprobs to the beam_body before manually running
        # the first step
        next_bs_loop_state = tf.cond(
            cond(*initial_loop_state),
            lambda: beam_body(*initial_loop_state),
            lambda: initial_loop_state)

        final_state = tf.while_loop(cond, beam_body, next_bs_loop_state)
        dec_loop_state = final_state.decoder_loop_state
        bs_state = final_state.bs_state

        scores = final_state.bs_output.scores.stack()
        parent_ids = final_state.bs_output.parent_ids.stack()
        token_ids = final_state.bs_output.token_ids.stack()

        # TODO: return att_loop_states properly
        return BeamSearchOutput(
            last_search_step_output=SearchStepOutput(
                scores=scores,
                parent_ids=parent_ids,
                token_ids=token_ids),
            last_dec_loop_state=dec_loop_state.feedables,
            last_search_state=bs_state,
            attention_loop_states=[])
项目:neuralmonkey    作者:ufal    | 项目源码 | 文件源码
def decoding_loop(self, train_mode: bool, sample: bool = False) -> Tuple[
            tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor]:
        """Run the decoding while loop.

        Calls get_initial_loop_state and constructs tf.while_loop
        with the continuation criterion returned from loop_continue_criterion,
        and body function returned from get_body.

        After finishing the tf.while_loop, it calls finalize_loop
        to further postprocess the final decoder loop state (usually
        by stacking TensorArrays containing decoding histories).

        Arguments:
            train_mode: Boolean flag, telling whether this is
                a training run.
            sample: Boolean flag, telling whether we should sample
                the output symbols from the output distribution instead
                of using argmax or gold data.
        """
        initial_loop_state = self.get_initial_loop_state()
        final_loop_state = tf.while_loop(
            self.loop_continue_criterion,
            self.get_body(train_mode, sample),
            initial_loop_state)

        self.finalize_loop(final_loop_state, train_mode)

        logits = final_loop_state.histories.logits.stack()
        decoder_outputs = final_loop_state.histories.decoder_outputs.stack()
        decoded = final_loop_state.histories.outputs.stack()

        # TODO mask should include also the end symbol
        mask = final_loop_state.histories.mask.stack()

        return logits, decoder_outputs, mask, decoded
项目:neuralmonkey    作者:ufal    | 项目源码 | 文件源码
def _decoding_loop(self) -> BeamSearchOutput:
        # collect attention objects
        beam_body = self.get_body()

        initial_loop_state = self.get_initial_loop_state()

        def cond(*args) -> tf.Tensor:
            bsls = BeamSearchLoopState(*args)
            return tf.less(
                bsls.decoder_loop_state.feedables.step - 1, self._max_steps)

        # First step has to be run manually because while_loop needs the same
        # shapes between steps and the first beam state is not beam-sized, but
        # just a single state.
        #
        # When running ensembles, we want to provide
        # ensembled logprobs to the beam_body before manually running
        # the first step
        next_bs_loop_state = tf.cond(
            cond(*initial_loop_state),
            lambda: beam_body(*initial_loop_state),
            lambda: initial_loop_state)

        final_state = tf.while_loop(cond, beam_body, next_bs_loop_state)
        dec_loop_state = final_state.decoder_loop_state
        bs_state = final_state.bs_state

        scores = final_state.bs_output.scores.stack()
        parent_ids = final_state.bs_output.parent_ids.stack()
        token_ids = final_state.bs_output.token_ids.stack()

        # TODO: return att_loop_states properly
        return BeamSearchOutput(
            last_search_step_output=SearchStepOutput(
                scores=scores,
                parent_ids=parent_ids,
                token_ids=token_ids),
            last_dec_loop_state=dec_loop_state.feedables,
            last_search_state=bs_state,
            attention_loop_states=[])
项目:tensorforce    作者:reinforceio    | 项目源码 | 文件源码
def tf_solve(self, fn_x, x_init, *args):
        """
        Iteratively solves an equation/optimization for $x$ involving an expression $f(x)$.

        Args:
            fn_x: A callable returning an expression $f(x)$ given $x$.
            x_init: Initial solution guess $x_0$.
            *args: Additional solver-specific arguments.

        Returns:
            A solution $x$ to the problem as given by the solver.
        """
        self.fn_x = fn_x

        # Initialization step
        args = self.initialize(x_init, *args)

        # Iteration loop with termination condition
        if self.unroll_loop:
            # Unrolled for loop
            for _ in range(self.max_iterations):
                next_step = self.next_step(*args)
                step = (lambda: self.step(*args))
                do_nothing = (lambda: args)
                args = tf.cond(pred=next_step, true_fn=step, false_fn=do_nothing)

        else:
            # TensorFlow while loop
            args = tf.while_loop(cond=self.next_step, body=self.step, loop_vars=args)

        # First argument contains solution
        return args[0]
项目:yolo-tensorflow    作者:persistforever    | 项目源码 | 文件源码
def calculate_loss(self, logits):
        # ??class_pred?box_pred
        self.box_preds = logits

        # ?????example
        results = tf.while_loop(
            cond=self._one_example_cond, 
            body=self._one_example_body, 
            loop_vars=[tf.constant(0), self.batch_size,
                       tf.constant(0.0), tf.constant(0.0), tf.constant(0.0), tf.constant(0.0),
                       tf.constant(0.0), tf.constant(0.0), tf.constant(0.0), 
                       tf.constant(0.0), tf.constant(0.0)])
        coord_loss = results[2]
        object_loss = results[3]
        noobject_loss = results[4]
        class_loss = results[5]
        iou_value = results[6]
        object_value = results[7]
        anyobject_value = results[8]
        recall_value = results[9]
        class_value = results[10]

        # ?????
        coord_loss = coord_loss * self.coord_scale / self.batch_size
        object_loss = object_loss * self.object_scale / self.batch_size
        noobject_loss = noobject_loss * self.noobject_scale / self.batch_size
        class_loss = class_loss * self.class_scale / self.batch_size
        # ???
        iou_value /= tf.reduce_sum(tf.cast(self.object_nums, tf.float32), axis=[0])
        object_value /= tf.reduce_sum(tf.cast(self.object_nums, tf.float32), axis=[0])
        anyobject_value /= (self.batch_size * self.cell_size * self.cell_size * self.n_boxes)
        recall_value /= tf.reduce_sum(tf.cast(self.object_nums, tf.float32), axis=[0])
        class_value /= tf.reduce_sum(tf.cast(self.object_nums, tf.float32), axis=[0])

        return coord_loss, object_loss, noobject_loss, class_loss, \
            iou_value, object_value, anyobject_value, recall_value, class_value
项目:yolo-tensorflow    作者:persistforever    | 项目源码 | 文件源码
def calculate_loss(self, logits):
        logits = tf.reshape(
            logits, shape=[self.batch_size, self.cell_size, self.cell_size, 
                           self.n_boxes, 5])

        # ??class_pred?box_pred
        self.box_preds = tf.concat(
            [tf.sigmoid(logits[:,:,:,:,0:2]),
             logits[:,:,:,:,2:4],
             tf.sigmoid(logits[:,:,:,:,4:5])], axis=4)

        # ?????example
        results = tf.while_loop(
            cond=self._one_example_cond, 
            body=self._one_example_body, 
            loop_vars=[tf.constant(0), self.batch_size,
                       tf.constant(0.0), tf.constant(0.0), tf.constant(0.0),
                       tf.constant(0.0), tf.constant(0.0), tf.constant(0.0), tf.constant(0.0)])
        coord_loss = results[2]
        object_loss = results[3]
        noobject_loss = results[4]
        iou_value = results[5]
        object_value = results[6]
        anyobject_value = results[7]
        recall_value = results[8]

        # ?????
        coord_loss = coord_loss * self.coord_scale / self.batch_size
        object_loss = object_loss * self.object_scale / self.batch_size
        noobject_loss = noobject_loss * self.noobject_scale / self.batch_size
        # ???
        iou_value /= tf.reduce_sum(tf.cast(self.object_nums, tf.float32), axis=[0])
        object_value /= tf.reduce_sum(tf.cast(self.object_nums, tf.float32), axis=[0])
        anyobject_value /= (self.batch_size * self.cell_size * self.cell_size * self.n_boxes)
        recall_value /= tf.reduce_sum(tf.cast(self.object_nums, tf.float32), axis=[0])

        return coord_loss, object_loss, noobject_loss, \
            iou_value, object_value, anyobject_value, recall_value
项目:THUMT    作者:thumt    | 项目源码 | 文件源码
def sampler(symbols_to_logits_fn, initial_ids, sample_num, decode_length,
            vocab_size, eos_id, features=None):
    batch_size = tf.shape(initial_ids)[0]

    # Expand each batch to sample_num
    seqlen = tf.constant(0)
    alive_seq = tf.tile(tf.expand_dims(initial_ids, 1), [1, sample_num])
    alive_seq = tf.expand_dims(alive_seq, 2)  # (batch_size, sample_num, 1)
    sa = tf.shape(alive_seq)
    alive_seq = tf.reshape(alive_seq, [sa[0]*sa[1],1])

    def _is_finished(i, alive_seq):
        return i < decode_length

    def inner_loop(i, alive_seq):
        logit = symbols_to_logits_fn(alive_seq)[0]
        new_samples = tf.multinomial(logit, 1)
        new_samples = tf.to_int32(new_samples)
        alive_seq = tf.concat([alive_seq, new_samples], 1)
        return (i + 1, alive_seq)

    (_, alive_seq) = tf.while_loop(
        _is_finished,
        inner_loop,
        [seqlen, alive_seq],
        shape_invariants=[
            tf.TensorShape([]),
            tf.TensorShape([None, None])
        ],
        parallel_iterations=1,
        back_prop=False
    )
    alive_seq.set_shape((sample_num, None))

    return alive_seq
项目:mist-rnns    作者:rdipietro    | 项目源码 | 文件源码
def _compute_states(self):
    """ Compute hidden states.

    Returns:
      A tuple, (outputs, states).
    """

    _inputs = tf.transpose(self.inputs, [1, 0, 2])
    x_ta = tf.TensorArray(tf.float32, size=self.length).unstack(_inputs)
    h_ta = tf.TensorArray(tf.float32, size=self.length)

    def cond(t, h, h_ta):
      return tf.less(t, self.length)

    def body(t, h, h_ta):

      x = x_ta.read(t)
      num_units, input_size = self.num_hidden_units, self.input_size

      with tf.variable_scope('simple_rnn'):
        h_new = self.activation(self._linear(h, x, num_units, scope='simple_rnn'))

      h_ta_new = h_ta.write(t, h_new)
      return t + 1, h_new, h_ta_new

    t = tf.constant(0)
    h = tf.squeeze(self.initial_states, [1])
    _, _, h_ta = tf.while_loop(cond, body, [t, h, h_ta])

    states = tf.transpose(h_ta.stack(), [1, 0, 2], name='states')
    outputs = tf.identity(states, name='outputs')
    return outputs, states
项目:mist-rnns    作者:rdipietro    | 项目源码 | 文件源码
def _compute_states(self):

    _inputs = tf.transpose(self.inputs, [1, 0, 2])
    x_ta = tf.TensorArray(tf.float32, size=self.length).unstack(_inputs)
    h_ta = tf.TensorArray(tf.float32, size=self.length)
    c_ta = tf.TensorArray(tf.float32, size=self.length)

    def cond(t, c, h, c_ta, h_ta):
      return tf.less(t, self.length)

    def body(t, c, h, c_ta, h_ta):

      x = x_ta.read(t)
      num_units, input_size = self.num_hidden_units, self.input_size

      with tf.variable_scope('lstm'):
        c_tilde = self.activation(self._linear(h, x, num_units, scope='c'))
        i = tf.nn.sigmoid(self._linear(h, x, num_units, scope='i'))
        f = tf.nn.sigmoid(self._linear(h, x, num_units, shift=self.optional_bias_shift, scope='f'))
        o = tf.nn.sigmoid(self._linear(h, x, num_units, scope='o'))
        c_new = i * c_tilde + f * c
        h_new = o * self.activation(c_new)

      c_ta_new = c_ta.write(t, c_new)
      h_ta_new = h_ta.write(t, h_new)
      return t + 1, c_new, h_new, c_ta_new, h_ta_new

    t = tf.constant(0)
    c, h = tf.split(tf.squeeze(self.initial_states, [1]), 2, axis=1)
    _, _, _, c_ta, h_ta = tf.while_loop(cond, body, [t, c, h, c_ta, h_ta])

    outputs = tf.transpose(h_ta.stack(), [1, 0, 2], name='outputs')
    cells = tf.transpose(c_ta.stack(), [1, 0, 2])
    states = tf.concat([cells, outputs], axis=2, name='states')
    return outputs, states
项目:mist-rnns    作者:rdipietro    | 项目源码 | 文件源码
def _compute_states(self):

    _inputs = tf.transpose(self.inputs, [1, 0, 2])
    x_ta = tf.TensorArray(tf.float32, size=self.length).unstack(_inputs)
    h_ta = tf.TensorArray(tf.float32, size=self.length)

    def cond(t, h, h_ta):
      return tf.less(t, self.length)

    def body(t, h, h_ta):

      x = x_ta.read(t)
      num_units, input_size = self.num_hidden_units, self.input_size

      with tf.variable_scope('gru'):
        r = tf.nn.sigmoid(self._linear(h, x, num_units, scope='r'))
        h_pre_act = r * h
        h_tilde = self.activation(self._linear(h_pre_act, x, num_units, scope='h'))

        z = tf.nn.sigmoid(self._linear(h, x, num_units, shift=self.optional_bias_shift, scope='z'))
        h_new = z * h + (1 - z) * h_tilde

      h_ta_new = h_ta.write(t, h_new)
      return t + 1, h_new, h_ta_new

    t = tf.constant(0)
    h = tf.squeeze(self.initial_states, [1])
    _, _, h_ta = tf.while_loop(cond, body, [t, h, h_ta])

    states = tf.transpose(h_ta.stack(), [1, 0, 2], name='states')
    outputs = tf.identity(states, name='outputs')
    return outputs, states
项目:MGP-RNN    作者:jfutoma    | 项目源码 | 文件源码
def get_GP_samples(Y,T,X,ind_kf,ind_kt,num_obs_times,num_obs_values,
                   num_rnn_grid_times,med_cov_grid):
    """
    returns samples from GP at evenly-spaced gridpoints
    """ 
    grid_max = tf.shape(X)[1]
    Z = tf.zeros([0,grid_max,input_dim])

    N = tf.shape(T)[0] #number of observations

    #setup tf while loop (have to use this bc loop size is variable)
    def cond(i,Z):
        return i<N

    def body(i,Z):
        Yi = tf.reshape(tf.slice(Y,[i,0],[1,num_obs_values[i]]),[-1])
        Ti = tf.reshape(tf.slice(T,[i,0],[1,num_obs_times[i]]),[-1])
        ind_kfi = tf.reshape(tf.slice(ind_kf,[i,0],[1,num_obs_values[i]]),[-1])
        ind_kti = tf.reshape(tf.slice(ind_kt,[i,0],[1,num_obs_values[i]]),[-1])
        Xi = tf.reshape(tf.slice(X,[i,0],[1,num_rnn_grid_times[i]]),[-1])
        X_len = num_rnn_grid_times[i]

        GP_draws = draw_GP(Yi,Ti,Xi,ind_kfi,ind_kti)
        pad_len = grid_max-X_len #pad by this much
        padded_GP_draws = tf.concat([GP_draws,tf.zeros((n_mc_smps,pad_len,M))],1) 

        medcovs = tf.slice(med_cov_grid,[i,0,0],[1,-1,-1])
        tiled_medcovs = tf.tile(medcovs,[n_mc_smps,1,1])
        padded_GPdraws_medcovs = tf.concat([padded_GP_draws,tiled_medcovs],2)

        Z = tf.concat([Z,padded_GPdraws_medcovs],0)        

        return i+1,Z  

    i = tf.constant(0)
    i,Z = tf.while_loop(cond,body,loop_vars=[i,Z],
                shape_invariants=[i.get_shape(),tf.TensorShape([None,None,None])])

    return Z
项目:MGP-RNN    作者:jfutoma    | 项目源码 | 文件源码
def CG(A,b):
    """ Conjugate gradient, to get solution x = A^-1 * b,
    can be faster than using the Cholesky for large scale problems
    """
    b = tf.reshape(b,[-1])
    n = tf.shape(A)[0]
    x = tf.zeros([n]) 
    r_ = b 
    p = r_ 

    #These settings are somewhat arbitrary
    #You might want to test sensitivity to these
    CG_EPS = tf.cast(n/1000,"float")
    MAX_ITER = tf.div(n,250) + 3

    def cond(i,x,r,p):
        return tf.logical_and(i < MAX_ITER, tf.norm(r) > CG_EPS)

    def body(i,x,r_,p):        
        p_vec = tf.reshape(p,[-1,1])
        Ap = tf.reshape(tf.matmul(A,p_vec),[-1]) #make a vector

        alpha = dot(r_,r_)/dot(p,Ap)
        x = x + alpha*p
        r = r_ - alpha*Ap
        beta = dot(r,r)/dot(r_,r_)
        p = r + beta*p

        return i+1,x,r,p

    i = tf.constant(0)
    i,x,r,p = tf.while_loop(cond,body,loop_vars=[i,x,r_,p])

    return tf.reshape(x,[-1,1])
项目:MGP-RNN    作者:jfutoma    | 项目源码 | 文件源码
def block_CG(A_,B_):
    """
    block version of CG. Get solution to matrix equation AX = B, ie
    X = A^-1 * B. Will be much faster than Cholesky for large-scale problems.
    """
    n = tf.shape(B_)[0]
    m = tf.shape(B_)[1]

    X = tf.zeros((n,m))
    V_ = tf.zeros((n,m))
    R = B_
    R_ = tf.matrix_set_diag(tf.zeros((n,m)),tf.ones([m]))

    #somewhat arbitrary again, may want to check sensitivity
    CG_EPS = tf.cast(n/1000,"float")
    MAX_ITER = tf.div(n,250) + 3

    def cond(i,X,R_,R,V_):
        return tf.logical_and(i < MAX_ITER, tf.norm(R) > CG_EPS)

    def body(i,X,R_,R,V_):   
        S = tf.matrix_solve(tf.matmul(tf.transpose(R_),R_),
                            tf.matmul(tf.transpose(R),R))
        V = R + tf.matmul(V_,S)
        T = tf.matrix_solve(tf.matmul(tf.transpose(V),tf.matmul(A_,V)),
                            tf.matmul(tf.transpose(R),R))
        X = X + tf.matmul(V,T)
        V_ = V
        R_ = R
        R = R - tf.matmul(A_,tf.matmul(V,T))
        return i+1,X,R_,R,V_

    i = tf.constant(0)
    i,X,_,_,_ = tf.while_loop(cond,body,[i,X,R_,R,V_])
    return X
项目:tensorflow-DDT    作者:wangchao66    | 项目源码 | 文件源码
def source_distance(x,y):
    y = tf.cast(tf.argmax(y,axis=1),tf.float32)
    y1,_,_ = tf.unique_with_counts(y)
    TensorArr = tf.TensorArray(tf.float32,size=1, dynamic_size=True,clear_after_read=False)
    x_array = TensorArr.unstack(y1)
    size = x_array.size()
    initial_outputs = tf.TensorArray(dtype=tf.float32,size=size)
    i = tf.constant(0)
    def should_continue(i, *args):
        return i < size
    def loop(i,output):
        y_class = x_array.read(i)
        idx_i = tf.where(tf.equal(y,y_class))
        xi = tf.gather_nd(x,idx_i)
        initial_outputs1 = tf.TensorArray(dtype=tf.float32,size=size)
        j = tf.constant(0)
        def should_continue1(j,*args):
            return j<size
        def loop1(j,output1):
            y2=x_array.read(j)
            idx_j = tf.where(tf.equal(y,y2))
            xj = tf.gather_nd(x,idx_j)
            dis = tf.reduce_mean (tf.square(tf.reduce_mean(xi,0)
                        -tf.reduce_mean(xj,0)))
            output1 = output1.write(j,dis)
            return j+1,output1
        j,r1=tf.while_loop(should_continue1,loop1,[j,initial_outputs1])
        output = output.write(i,r1.stack())
        return i+1,output
    i,r = tf.while_loop(should_continue,loop,[i,initial_outputs])
    out = r.stack()
    return out
项目:tensorflow-DDT    作者:wangchao66    | 项目源码 | 文件源码
def source_distance(x,y):
    y = tf.cast(tf.argmax(y,axis=1),tf.float32)
    y1,_,_ = tf.unique_with_counts(y)
    TensorArr = tf.TensorArray(tf.float32,size=1, dynamic_size=True,clear_after_read=False)
    x_array = TensorArr.unstack(y1)
    size = x_array.size()
    initial_outputs = tf.TensorArray(dtype=tf.float32,size=size)
    i = tf.constant(0)
    def should_continue(i, *args):
        return i < size
    def loop(i,output):
        y_class = x_array.read(i)
        idx_i = tf.where(tf.equal(y,y_class))
        xi = tf.gather_nd(x,idx_i)
        initial_outputs1 = tf.TensorArray(dtype=tf.float32,size=size)
        j = tf.constant(0)
        def should_continue1(j,*args):
            return j<size
        def loop1(j,output1):
            y2=x_array.read(j)
            idx_j = tf.where(tf.equal(y,y2))
            xj = tf.gather_nd(x,idx_j)
            dis = tf.reduce_mean (tf.square(tf.reduce_mean(xi,0)
                        -tf.reduce_mean(xj,0)))
            output1 = output1.write(j,dis)
            return j+1,output1
        j,r1=tf.while_loop(should_continue1,loop1,[j,initial_outputs1])
        output = output.write(i,r1.stack())
        return i+1,output
    i,r = tf.while_loop(should_continue,loop,[i,initial_outputs])
    out = r.stack()
    return out
项目:Constituent-Centric-Neural-Architecture-for-Reading-Comprehension    作者:shrshore    | 项目源码 | 文件源码
def process_leafs(self,inodes_h,inodes_c,emb_leaves):
        num_leaves = self.num_leaves
        embx=tf.gather(emb_leaves,tf.range(num_leaves))
        leaf_parent=tf.gather(self.t_par_leaf,tf.range(num_leaves))
        node_h=tf.identity(inodes_h)
        node_c=tf.identity(inodes_c)
        with tf.variable_scope('td_Composition',reuse=True):
            cW=tf.get_variable('cW',[self.hidden_dim+self.emb_dim,4*self.hidden_dim])
            cb=tf.get_variable('cb',[4*self.hidden_dim])
            bu,bo,bi,bf=tf.split(axis=0,num_or_size_splits=4,value=cb)
            idx_var=tf.constant(0)
            logging.warn('begin enumerate the idx_var')
            def _recurceleaf(node_h, node_c,idx_var):
                node_info=tf.gather(leaf_parent, idx_var)
                cur_embed=tf.gather(embx, idx_var)
                #initial node_h:[inode_size, dim_hidden]
                parent_h=tf.gather(node_h, node_info)
                parent_c=tf.gather(node_c, node_info)
                cur_input=tf.concat(values=[parent_h, cur_embed],axis=0)
                flat_=tf.reshape(cur_input, [-1])

                tmp=tf.matmul(tf.expand_dims(flat_,0),cW)

                u,o,i,f=tf.split(axis=1,num_or_size_splits=4,value=tmp)
                i=tf.nn.sigmoid(i+bi)
                o=tf.nn.sigmoid(o+bo)
                u=tf.nn.sigmoid(u+bu)
                f=tf.nn.sigmoid(f+bf)
                c=i*u+tf.reduce_sum(f*parent_c,[0])
                h=o*tf.nn.tanh(c)

                node_h=tf.concat(axis=0,values=[node_h,h])
                node_c=tf.concat(axis=0,values=[node_c,c])
                idx_var=tf.add(idx_var,1)
                return node_h, node_c, idx_var
            loop_cond=lambda a1,b1,idx_var:tf.less(idx_var,num_leaves)
            loop_vars=[node_h,node_c,idx_var]
            node_h,node_c,idx_var=tf.while_loop(loop_cond, _recurceleaf,loop_vars,shape_invariants=[tf.TensorShape([None,self.hidden_dim]),tf.TensorShape([None,self.hidden_dim]),idx_var.get_shape()])
            logging.warn('return new node_h, finished')
            return node_h,node_c