Python tensorflow 模块,case() 实例源码

我们从Python开源项目中,提取了以下32个代码示例,用于说明如何使用tensorflow.case()

项目:yoctol-keras-layer-zoo    作者:Yoctol    | 项目源码 | 文件源码
def step_with_training(self, training=None):

        def step(inputs, states):
            input_shape = K.int_shape(inputs)
            y_tm1 = self.layer.preprocess_input(
                K.expand_dims(states[0], axis=1),
                training
            )
            y_tm1 = K.reshape(y_tm1, (-1, input_shape[-1]))

            inputs_sum = tf.reduce_sum(inputs)

            def inputs_f(): return inputs
            def output_f(): return y_tm1
            current_inputs = tf.case(
                [(tf.equal(inputs_sum, 0.0), output_f)],
                default=inputs_f
            )

            return self.layer.step(
                current_inputs,
                states
            )

        return step
项目:unsupervised-2017-cvprw    作者:imatge-upc    | 项目源码 | 文件源码
def generate_mask(img_mask_list, h, w, l):
    img_masks, loss_masks = [], []

    for i in range(l):
        # generate image mask
        img_mask = img_mask_list[i]
        img_mask = tf.cast(tf.image.decode_png(img_mask), tf.float32)
        img_mask = tf.reshape(img_mask, (h, w))
        img_masks.append(img_mask)

        # generate loss mask
        s_total   = h * w
        s_mask    = tf.reduce_sum(img_mask)
        def f1(): return img_mask*((s_total-s_mask)/s_mask-1)+1
        def f2(): return tf.zeros_like(img_mask)
        def f3(): return tf.ones_like(img_mask)
        loss_mask = tf.case([(tf.equal(s_mask, 0), f2), \
                             (tf.less(s_mask, s_total/2), f1)],
                             default=f3)

        loss_masks.append(loss_mask)

    return tf.stack(img_masks), tf.stack(loss_masks)
项目:tensorfx    作者:TensorLab    | 项目源码 | 文件源码
def _bucketize(instances, feature, schema, metadata):
  """Applies the bucketize transform to a numeric field.
  """
  field = schema[feature.field]
  if not field.numeric:
    raise ValueError('A scale transform cannot be applied to non-numerical field "%s".' %
                     feature.field)

  transform = feature.transform
  boundaries = map(float, transform['boundaries'].split(','))

  # TODO: Figure out how to use tf.case instead of this contrib op
  from tensorflow.contrib.layers.python.ops.bucketization_op import bucketize

  # Create a one-hot encoded tensor. The dimension of this tensor is the set of buckets defined
  # by N boundaries == N + 1.
  # A squeeze is needed to remove the extra dimension added to the shape.
  value = instances[feature.field]

  value = tf.squeeze(tf.one_hot(bucketize(value, boundaries, name='bucket'),
                                depth=len(boundaries) + 1, on_value=1.0, off_value=0.0,
                                name='one_hot'),
                     axis=1, name='bucketize')
  value.set_shape((None, len(boundaries) + 1))
  return value
项目:neuralmonkey    作者:ufal    | 项目源码 | 文件源码
def piecewise_function(param, values, changepoints, name=None,
                       dtype=tf.float32):
    """Compute a piecewise function.

    Arguments:
        param: The function parameter.
        values: List of function values (numbers or tensors).
        changepoints: Sorted list of points where the function changes from
            one value to the next. Must be one item shorter than `values`.
    """

    if len(changepoints) != len(values) - 1:
        raise ValueError("changepoints has length {}, expected {} (values "
                         "has length {})".format(len(changepoints),
                                                 len(values) - 1,
                                                 len(values)))

    with tf.name_scope(name, "PiecewiseFunction",
                       [param, values, changepoints]) as s_name:
        values = [tf.convert_to_tensor(y, dtype=dtype) for y in values]
        # this is a trick to make each lambda return a different y:
        lambdas = [lambda y=y: y for y in values]
        predicates = [tf.less(param, x) for x in changepoints]
        return tf.case(list(zip(predicates, lambdas[:-1])), lambdas[-1],
                       name=s_name)
项目:neuralmonkey    作者:ufal    | 项目源码 | 文件源码
def piecewise_function(param, values, changepoints, name=None,
                       dtype=tf.float32):
    """Compute a piecewise function.

    Arguments:
        param: The function parameter.
        values: List of function values (numbers or tensors).
        changepoints: Sorted list of points where the function changes from
            one value to the next. Must be one item shorter than `values`.
    """

    if len(changepoints) != len(values) - 1:
        raise ValueError("changepoints has length {}, expected {} (values "
                         "has length {})".format(len(changepoints),
                                                 len(values) - 1,
                                                 len(values)))

    with tf.name_scope(name, "PiecewiseFunction",
                       [param, values, changepoints]) as s_name:
        values = [tf.convert_to_tensor(y, dtype=dtype) for y in values]
        # this is a trick to make each lambda return a different y:
        lambdas = [lambda y=y: y for y in values]
        predicates = [tf.less(param, x) for x in changepoints]
        return tf.case(list(zip(predicates, lambdas[:-1])), lambdas[-1],
                       name=s_name)
项目:neuralmonkey    作者:ufal    | 项目源码 | 文件源码
def piecewise_function(param, values, changepoints, name=None,
                       dtype=tf.float32):
    """Compute a piecewise function.

    Arguments:
        param: The function parameter.
        values: List of function values (numbers or tensors).
        changepoints: Sorted list of points where the function changes from
            one value to the next. Must be one item shorter than `values`.
    """

    if len(changepoints) != len(values) - 1:
        raise ValueError("changepoints has length {}, expected {} (values "
                         "has length {})".format(len(changepoints),
                                                 len(values) - 1,
                                                 len(values)))

    with tf.name_scope(name, "PiecewiseFunction",
                       [param, values, changepoints]) as s_name:
        values = [tf.convert_to_tensor(y, dtype=dtype) for y in values]
        # this is a trick to make each lambda return a different y:
        lambdas = [lambda y=y: y for y in values]
        predicates = [tf.less(param, x) for x in changepoints]
        return tf.case(list(zip(predicates, lambdas[:-1])), lambdas[-1],
                       name=s_name)
项目:keras-mtcnn    作者:xiangrufan    | 项目源码 | 文件源码
def data_input(input):
    x = input[0]
    y = input[1]
    z = input[2]
    random_int = tf.random_uniform([1])
    condition1 = random_int[0] > tf.constant(rand_threshold[1])
    condition0 = random_int[0] > tf.constant(rand_threshold[0])
    val = tf.case({condition1: lambda: x,
                   condition0: lambda: y
                   },
                  default=lambda: z)
    val.set_shape(z.shape)
    return [val,random_int]# tuple (output,random_int ) is NOT allowed
项目:tf-crnn    作者:solivr    | 项目源码 | 文件源码
def data_loader(csv_filename: str, params: Params, batch_size: int=128, data_augmentation: bool=False,
                num_epochs: int=None, image_summaries: bool=False):

    def input_fn():
        # Choose case one csv file or list of csv files
        if not isinstance(csv_filename, list):
            filename_queue = tf.train.string_input_producer([csv_filename], num_epochs=num_epochs, name='filename_queue')
        elif isinstance(csv_filename, list):
            filename_queue = tf.train.string_input_producer(csv_filename, num_epochs=num_epochs, name='filename_queue')

        # Skip lines that have already been processed
        reader = tf.TextLineReader(name='CSV_Reader', skip_header_lines=0)
        key, value = reader.read(filename_queue, name='file_reading_op')

        default_line = [['None'], ['None']]
        path, label = tf.decode_csv(value, record_defaults=default_line, field_delim=params.csv_delimiter,
                                    name='csv_reading_op')

        image, img_width = image_reading(path, resized_size=params.input_shape,
                                         data_augmentation=data_augmentation, padding=True)

        to_batch = {'images': image, 'images_widths': img_width, 'filenames': path, 'labels': label}
        prepared_batch = tf.train.shuffle_batch(to_batch,
                                                batch_size=batch_size,
                                                min_after_dequeue=500,
                                                num_threads=15, capacity=4000,
                                                allow_smaller_final_batch=False,
                                                name='prepared_batch_queue')

        if image_summaries:
            tf.summary.image('input/image', prepared_batch.get('images'), max_outputs=1)
        tf.summary.text('input/labels', prepared_batch.get('labels')[:10])
        tf.summary.text('input/widths', tf.as_string(prepared_batch.get('images_widths')))

        return prepared_batch, prepared_batch.get('labels')

    return input_fn
项目:fold    作者:tensorflow    | 项目源码 | 文件源码
def _create_variables(self):
    """Creates the variables associated with this layer.

    Guaranteed to be called at most once, either when the layer's call operator
    is invoked for the first time, in which case the input type will have been
    set, or when the public method create_variables is called for the first
    time. Scope will be set to this layer's vscope.

    Raises:
      TypeError: If `input_type` is invalid for this layer or isn't set.
    """
    pass
项目:fold    作者:tensorflow    | 项目源码 | 文件源码
def _instantiate_subnet(self, batch, block_idx, seq_prefix):
    def zeros_fn():
      return tf.zeros_like(batch)
    def base_case_fn():
      return self._children[block_idx, seq_prefix](batch)
    def recursive_case_fn():
      first_subnet = self._instantiate_subnet(
          batch, block_idx, seq_prefix + (0,))
      return self._instantiate_subnet(
          first_subnet, block_idx, seq_prefix + (1,))
    if len(seq_prefix) == self._fractal_block_depth:
      return base_case_fn()
    else:
      choice = self._drop_path_choices[self._choice_id[(block_idx, seq_prefix)]]
      base_case = tf.cond(
          tf.not_equal(choice, self._JUST_RECURSE), base_case_fn, zeros_fn)
      base_case.set_shape(batch.get_shape())
      recursive_case = tf.cond(
          tf.not_equal(choice, self._JUST_BASE), recursive_case_fn, zeros_fn)
      recursive_case.set_shape(batch.get_shape())
      cases = [
          (tf.equal(choice, self._BOTH),
           lambda: self._mixer(base_case, recursive_case)),
          (tf.equal(choice, self._JUST_BASE), lambda: base_case),
          (tf.equal(choice, self._JUST_RECURSE), lambda: recursive_case)]
      result = tf.case(cases, lambda: base_case)
      result.set_shape(batch.get_shape())
      return result
项目:fold    作者:tensorflow    | 项目源码 | 文件源码
def _tf_nth(fns, n):
  """Runs only the nth element of fns, where n is a scalar integer tensor."""
  cases = [(tf.equal(tf.constant(i, n.dtype), n), fn)
           for i, fn in enumerate(fns)]
  final_pred, final_fn = cases.pop()
  def default():
    with tf.control_dependencies([
        tf.Assert(final_pred, [n, len(fns)], name='nth_index_error')]):
      return final_fn()
  if len(fns) == 1: return default()
  return tf.case(cases, default)
项目:sonnet    作者:deepmind    | 项目源码 | 文件源码
def _build(self, inputs, index, is_training, test_local_stats):
      """Add the IndexedStatsBatchNorm module to the graph.

      Args:
        inputs: Tensor to apply batch norm to.
        index: Scalar TensorFlow int32 value to select the batch norm index.
        is_training: Boolean to indicate to `snt.BatchNorm` if we are
          currently training.
        test_local_stats: Boolean to indicate to `snt.BatchNorm` if batch
          normalization should  use local batch statistics at test time.

      Returns:
        Output of batch norm operation.
      """
      def create_batch_norm():
        return batch_norm.BatchNorm(offset=False, scale=False)(
            inputs, is_training, test_local_stats)

      if self._max_unique_stats > 1:
        pred_fn_pairs = [(tf.equal(i, index), create_batch_norm)
                         for i in xrange(self._max_unique_stats - 1)]
        out = tf.case(pred_fn_pairs, create_batch_norm)
        out.set_shape(inputs.get_shape())  # needed for tf.case shape inference
        return out
      else:
        return create_batch_norm()
项目:miccai-2016-surgical-activity-rec    作者:rdipietro    | 项目源码 | 文件源码
def piecewise_constant(x, boundaries, values):
    """ Piecewise constant function.

    Arguments:
        x: A 0-D Tensor.
        boundaries: A 1-D NumPy array with strictly increasing entries.
        values: A 1-D NumPy array that specifies the values for the intervals
            defined by `boundaries`. (It should therefore have one more entry
            than `boundaries`.)

    Returns: A 0-D Tensor. Its value is `values[0]` when `x <= boundaries[0]`,
        `values[1]` when `x > boundaries[0]` and `x <= boundaries[1]`, ..., and
        values[-1] when `x > boundaries[-1]`.
    """

    pred_fn_pairs = {}
    pred_fn_pairs[x <= boundaries[0]] = lambda: tf.constant(values[0])
    pred_fn_pairs[x > boundaries[-1]] = lambda: tf.constant(values[-1])
    for lower, upper, value in zip(boundaries[:-1],
                                   boundaries[1:],
                                   values[1:-1]):
        # We need to bind value here; can do this with lambda value=value: ...
        pred = (x > lower) & (x <= upper)
        pred_fn_pairs[pred] = lambda value=value: tf.constant(value)

    return tf.case(pred_fn_pairs, lambda: tf.constant(values[0]),
                   exclusive=True)
项目:Dialog-System-with-GAN-model    作者:drcut    | 项目源码 | 文件源码
def generator(encoder_inputs,decoder_inputs,target_weights,bucket_id,seq_len):
    def seq2seq_f(encoder,decoder):
        cell = tf.contrib.rnn.BasicLSTMCell(embedding_size)
        if num_layers > 1:
            cell = tf.contrib.rnn.MultiRNNCell([cell] * num_layers)
        w = tf.get_variable("proj_w", [embedding_size, num_symbols])
        b = tf.get_variable("proj_b", [num_symbols])
        output_projection = (w, b)
        outputs, state = tf.contrib.legacy_seq2seq.embedding_attention_seq2seq(encoder,
                decoder,cell,num_symbols,num_symbols,embedding_size,output_projection=output_projection,
                feed_previous = True)
        trans_output = []
        for output in outputs:
            trans_output.append(tf.matmul(output,w) + b)
        return trans_output, state

    targets = decoder_inputs
    outputs, losses = tf.contrib.legacy_seq2seq.model_with_buckets(
            encoder_inputs, decoder_inputs, targets, 
            target_weights, buckets, seq2seq_f, 
            softmax_loss_function=None, 
            per_example_loss=False, name='model_with_buckets')
    patch = tf.convert_to_tensor([[0.0]*num_symbols] * batch_size)
    def f0(): 
        for _ in range(0,max_len-buckets[0][1]):
            outputs[0].append(patch)
        return tf.convert_to_tensor(outputs[0],dtype = tf.float32)
    def f1(): 
        for _ in range(0,max_len-buckets[1][1]):
            outputs[1].append(patch)
        return tf.convert_to_tensor(outputs[1],dtype = tf.float32)
    def f2(): 
        for _ in range(0,max_len-buckets[2][1]):
            outputs[2].append(patch)
        return tf.convert_to_tensor(outputs[2],dtype = tf.float32)
    r = tf.case({tf.equal(bucket_id, 0): f0,
                 tf.equal(bucket_id, 1): f1},
                default=f2, exclusive=True)
    return tf.nn.softmax(tf.reshape(r,[max_len,batch_size,num_symbols]))
项目:shuttleNet    作者:shiyemin    | 项目源码 | 文件源码
def adjust_max(start, stop, start_value, stop_value, name=None):
    with ops.name_scope(name, "AdjustMax",
                        [start, stop, name]) as name:
        global_step = tf.train.get_global_step()
        if global_step is not None:
            start = tf.convert_to_tensor(start, dtype=tf.int64)
            stop = tf.convert_to_tensor(stop, dtype=tf.int64)
            start_value = tf.convert_to_tensor(start_value, dtype=tf.float32)
            stop_value = tf.convert_to_tensor(stop_value, dtype=tf.float32)

            pred_fn_pairs = {}
            pred_fn_pairs[global_step <= start] = lambda: start_value
            pred_fn_pairs[(global_step > start) & (global_step <= stop)] = lambda: tf.train.polynomial_decay(
                                        start_value, global_step-start, stop-start,
                                        end_learning_rate=stop_value, power=1.0, cycle=False)
            default = lambda: stop_value
            return tf.case(pred_fn_pairs, default, exclusive=True)
        else:
            return None
项目:indoor_localization    作者:kyeongsoo    | 项目源码 | 文件源码
def bld_idx(x):
        def b0(): return tf.constant(0, dtype=x.dtype)
        def b1(): return tf.constant(1, dtype=x.dtype)
        def b2(): return tf.constant(2, dtype=x.dtype)
        return tf.case([(tf.less(x, tf.constant(4, dtype=x.dtype)), b0),
                        (tf.less(x, tf.constant(8, dtype=x.dtype)), b1)],
                       default = b2, exclusive=False)
项目:indoor_localization    作者:kyeongsoo    | 项目源码 | 文件源码
def flr_idx(x):
        def f0(): return x
        def f1(): return tf.subtract(x, tf.constant(4, dtype=x.dtype))
        def f2(): return tf.subtract(x, tf.constant(8, dtype=x.dtype))
        return tf.case([(tf.less(x, tf.constant(4, dtype=x.dtype)), f0),
                        (tf.less(x, tf.constant(8, dtype=x.dtype)), f1)],
                       default = f2, exclusive=False)
项目:indoor_localization    作者:kyeongsoo    | 项目源码 | 文件源码
def bld_idx(x):
        def b0(): return tf.constant(0, dtype=x.dtype)
        def b1(): return tf.constant(1, dtype=x.dtype)
        def b2(): return tf.constant(2, dtype=x.dtype)
        return tf.case([(tf.less(x, tf.constant(4, dtype=x.dtype)), b0),
                        (tf.less(x, tf.constant(8, dtype=x.dtype)), b1)],
                       default = b2, exclusive=False)
项目:indoor_localization    作者:kyeongsoo    | 项目源码 | 文件源码
def flr_idx(x):
        def f0(): return x
        def f1(): return tf.subtract(x, tf.constant(4, dtype=x.dtype))
        def f2(): return tf.subtract(x, tf.constant(8, dtype=x.dtype))
        return tf.case([(tf.less(x, tf.constant(4, dtype=x.dtype)), f0),
                        (tf.less(x, tf.constant(8, dtype=x.dtype)), f1)],
                       default = f2, exclusive=False)
项目:ntm_keras    作者:flomlo    | 项目源码 | 文件源码
def _cosine_distance(M, k):
    # this is equation (6), or as I like to call it: The NaN factory.
    # TODO: Find it in a library (keras cosine loss?)
    # normalizing first as it is better conditioned.
    nk = K.l2_normalize(k, axis=-1)
    nM = K.l2_normalize(M, axis=-1)
    cosine_distance = K.batch_dot(nM, nk)
    # TODO: Do succesfull error handling
    #cosine_distance_error_handling = tf.Print(cosine_distance, [cosine_distance], message="NaN occured in _cosine_distance")
    #cosine_distance_error_handling = K.ones(cosine_distance_error_handling.shape)
    #cosine_distance = tf.case({K.any(tf.is_nan(cosine_distance)) : (lambda: cosine_distance_error_handling)},
    #        default = lambda: cosine_distance, strict=True)
    return cosine_distance
项目:ntm_keras    作者:flomlo    | 项目源码 | 文件源码
def build(self, input_shape):
        bs, input_length, input_dim = input_shape

        self.controller_input_dim, self.controller_output_dim = controller_input_output_shape(
                input_dim, self.units, self.m_depth, self.n_slots, self.shift_range, self.read_heads,
                self.write_heads)

        # Now that we've calculated the shape of the controller, we have add it to the layer/model.
        if self.controller is None:
            self.controller = Dense(
                name = "controller",
                activation = 'linear',
                bias_initializer = 'zeros',
                units = self.controller_output_dim,
                input_shape = (bs, input_length, self.controller_input_dim))
            self.controller.build(input_shape=(self.batch_size, input_length, self.controller_input_dim))
            self.controller_with_state = False


        # This is a fixed shift matrix
        self.C = _circulant(self.n_slots, self.shift_range)

        self.trainable_weights = self.controller.trainable_weights 

        # We need to declare the number of states we want to carry around.
        # In our case the dimension seems to be 6 (LSTM) or 5 (GRU) or 4 (FF),
        # see self.get_initial_states, those respond to:
        # [old_ntm_output] + [init_M, init_wr, init_ww] +  [init_h] (LSMT and GRU) + [(init_c] (LSTM only))
        # old_ntm_output does not make sense in our world, but is required by the definition of the step function we
        # intend to use.
        # WARNING: What self.state_spec does is only poorly understood,
        # I only copied it from keras/recurrent.py.
        self.states = [None, None, None, None]
        self.state_spec = [InputSpec(shape=(None, self.output_dim)),                            # old_ntm_output
                            InputSpec(shape=(None, self.n_slots, self.m_depth)),                # Memory
                            InputSpec(shape=(None, self.read_heads, self.n_slots)),   # weights_read
                            InputSpec(shape=(None, self.write_heads, self.n_slots))]  # weights_write

        super(NeuralTuringMachine, self).build(input_shape)
项目:tf-tutorial    作者:zchen0211    | 项目源码 | 文件源码
def _build(self, inputs, index, is_training, test_local_stats):
      """Add the IndexedStatsBatchNorm module to the graph.

      Args:
        inputs: Tensor to apply batch norm to.
        index: Scalar TensorFlow int32 value to select the batch norm index.
        is_training: Boolean to indicate to `nn.BatchNorm` if we are
          currently training.
        test_local_stats: Boolean to indicate to `nn.BatchNorm` if batch
          normalization should  use local batch statistics at test time.

      Returns:
        Output of batch norm operation.
      """
      def create_batch_norm():
        return batch_norm.BatchNorm(offset=False, scale=False)(
            inputs, is_training, test_local_stats)

      if self._max_unique_stats > 1:
        pred_fn_pairs = [(tf.equal(i, index), create_batch_norm)
                         for i in xrange(self._max_unique_stats - 1)]
        out = tf.case(pred_fn_pairs, create_batch_norm)
        out.set_shape(inputs.get_shape())  # needed for tf.case shape inference
        return out
      else:
        return create_batch_norm()
项目:cmcl    作者:chhwang    | 项目源码 | 文件源码
def variable_scheduler(var_list, pivot_list, gstep, name=None):
    """Schedule variable according to the global step.
       e.g. var_list = [0.1, 0.01, 0.001], pivot_list = [0, 1000, 2000] then
         0    <= gstep < 1000 --> return 0.1
         1000 <= gstep < 2000 --> return 0.01
         2000 <= gstep        --> return 0.001
    Args:
      var_list: List of variables to return.
      pivot_list: List of pivots when to change the variable.
      gstep: Global step (# of batches trained so far).
      name(Optional): Name of the operation.
    """
    assert(len(var_list) == len(pivot_list))
    if len(var_list) == 1:
        return tf.constant(var_list[0])

    def between(x, a, b):
        return tf.logical_and(tf.greater_equal(x, a), tf.less(x, b))

    # This class is necessary to declare constant lambda expressions
    class temp(object):
        def __init__(self, var):
            self.func = lambda: tf.constant(var)

    gstep = tf.to_int32(gstep)
    conds = {}
    for idx in range(len(pivot_list)-1):
        min_val = tf.constant(pivot_list[idx], tf.int32)
        max_val = tf.constant(pivot_list[idx+1], tf.int32)
        conds[between(gstep, min_val, max_val)] = temp(var_list[idx]).func
    return tf.case(conds, default=temp(var_list[-1]).func, exclusive=True, name=name)
项目:tf-sparql    作者:derdav3    | 项目源码 | 文件源码
def _build(self, inputs, index, is_training, test_local_stats):
      """Add the IndexedStatsBatchNorm module to the graph.

      Args:
        inputs: Tensor to apply batch norm to.
        index: Scalar TensorFlow int32 value to select the batch norm index.
        is_training: Boolean to indicate to `nn.BatchNorm` if we are
          currently training.
        test_local_stats: Boolean to indicate to `nn.BatchNorm` if batch
          normalization should  use local batch statistics at test time.

      Returns:
        Output of batch norm operation.
      """
      def create_batch_norm():
        return batch_norm.BatchNorm(offset=False, scale=False)(
            inputs, is_training, test_local_stats)

      if self._max_unique_stats > 1:
        pred_fn_pairs = [(tf.equal(i, index), create_batch_norm)
                         for i in xrange(self._max_unique_stats - 1)]
        out = tf.case(pred_fn_pairs, create_batch_norm)
        out.set_shape(inputs.get_shape())  # needed for tf.case shape inference
        return out
      else:
        return create_batch_norm()
项目:hdrnet_legacy    作者:mgharbi    | 项目源码 | 文件源码
def _augment_data(self, inout, nchan=6):
    """Flip, crop and rotate samples randomly."""

    with tf.name_scope('data_augmentation'):
      if self.fliplr:
        inout = tf.image.random_flip_left_right(inout, seed=1234)
      if self.flipud:
        inout = tf.image.random_flip_up_down(inout, seed=3456)
      if self.rotate:
        angle = tf.random_uniform((), minval=0, maxval=4, dtype=tf.int32, seed=4567)
        inout = tf.case([(tf.equal(angle, 1), lambda: tf.image.rot90(inout, k=1)),
                         (tf.equal(angle, 2), lambda: tf.image.rot90(inout, k=2)),
                         (tf.equal(angle, 3), lambda: tf.image.rot90(inout, k=3))],
                        lambda: inout)

      inout.set_shape([None, None, nchan])

      with tf.name_scope('crop'):
        shape = tf.shape(inout)
        new_height = tf.to_int32(self.output_resolution[0])
        new_width = tf.to_int32(self.output_resolution[1])
        height_ok = tf.assert_less_equal(new_height, shape[0])
        width_ok = tf.assert_less_equal(new_width, shape[1])
        with tf.control_dependencies([height_ok, width_ok]):
          if self.random_crop:
            inout = tf.random_crop(
                inout, tf.stack([new_height, new_width, nchan]))
          else:
            height_offset = tf.to_int32((shape[0]-new_height)/2)
            width_offset = tf.to_int32((shape[1]-new_width)/2)
            inout = tf.image.crop_to_bounding_box(
                inout, height_offset, width_offset,
                new_height, new_width)

      inout.set_shape([None, None, nchan])
      inout = tf.image.resize_images(
          inout, [self.output_resolution[0], self.output_resolution[1]])
      fullres = inout

      with tf.name_scope('resize'):
        new_size = 256
        inout = tf.image.resize_images(
            inout, [new_size, new_size],
            method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)

      return fullres, inout
项目:keras-mtcnn    作者:xiangrufan    | 项目源码 | 文件源码
def myloss (random_int,type): # need to make sure input type
    if print_progress: random_int = tf.Print(random_int, ['random in cls',random_int])
    condition1 = random_int[0] > tf.constant(rand_threshold[1])
    condition0 = random_int[0] > tf.constant(rand_threshold[0])
    condition_default =  condition0 & condition1
    if type =='cls':
        def lossfun(y_true, y_pred):
            if print_progress: condition = tf.Print(condition1, ['rand int[0]:', random_int[0],
                                                    ' tf.constant:', tf.constant(rand_threshold[1]),
                                                    ' condition1:', condition1 ])
            val= tf.case({ condition1: lambda: K.mean(K.square(y_pred - y_true), axis=-1),
                           condition0: lambda: 0 * K.mean(K.square(y_true), axis=-1)
                           },
                         default=lambda:0 * K.mean(K.square(y_true), axis=-1),
                         exclusive=False )

            if print_progress: val = tf.Print(val, ['cls loss out:',val,
                                                    ' rand int received:',random_int,
                                                    'condition',condition1])
            val.set_shape(K.mean(K.square(y_true), axis=-1).shape)
            return val
    elif type =='roi':
        def lossfun(y_true, y_pred):
            if print_progress: condition = tf.Print(condition1, ['rand int[0]:', random_int[0],
                                                                ' tf.constant:', tf.constant(rand_threshold),
                                                                ' condition:', condition1])
            val= tf.case({ condition1: lambda: 0 * K.mean(K.square(y_true), axis=-1),
                           condition0: lambda: K.mean(K.square(y_pred - y_true), axis=-1)
                         },
                         default=lambda: 0 * K.mean(K.square(y_true), axis=-1),exclusive=False)
            if print_progress: val = tf.Print(val, ['roi loss out :', val,
                                                    ' rand int received:', random_int,
                                                    'condition', condition1])
            val.set_shape(K.mean(K.square(y_true), axis=-1).shape)
            return val
    else :
        def lossfun(y_true, y_pred):
            if print_progress: condition = tf.Print(condition1, ['rand int[0]:', random_int[0],
                                                                 ' tf.constant:', tf.constant(rand_threshold),
                                                                 ' condition:', condition1])
            val = tf.case({condition1: lambda: 0 * K.mean(K.square(y_true), axis=-1),
                           condition0: lambda: 0 * K.mean(K.square(y_true), axis=-1)
                           },
                          default=lambda: K.mean(K.square(y_pred - y_true), axis=-1),exclusive=False)
            val.set_shape(K.mean(K.square(y_true), axis=-1).shape)
            if print_progress: val = tf.Print(val, ['pts loss out :', val,
                                                    ' rand int received:', random_int,
                                                    'condition', condition1])
            return val
    return lossfun
项目:fold    作者:tensorflow    | 项目源码 | 文件源码
def __init__(self, num_buckets, num_units_out, initializer=None, name=None,
               trainable=True, mod_inputs=True):
    """Initializes the layer.

    Args:
      num_buckets: How many buckets the embedding has.
      num_units_out: The number of output units in the layer.
      initializer: the initializer for the weights. Defaults to uniform unit
        scaling. The initializer can also be a Tensor or numpy array, in which
        case the weights are initialized to this value and shape. Note that in
        this case the weights will still be trainable unless you also pass
        `trainable=False`.
      name: An optional string name. Defaults to
        `Embedding_%d_%d % (num_buckets, num_units_out)`. Used to name the
        variable scope where the variables for the layer live.
      trainable: Whether or not to make the weights trainable.
      mod_inputs: Whether or not to mod the input by the number of buckets.

    Raises:
      ValueError: If the shape of `weights` is not
        `(num_buckets, num_units_out)`.
    """

    self.set_constructor_args('td.Embedding',
                              *get_local_arguments(Embedding.__init__, True))

    self._weights_shape = (num_buckets, num_units_out)
    if name is None: name = 'Embedding_%d_%d' % self._weights_shape
    if initializer is None:
      initializer = tf.uniform_unit_scaling_initializer(1.0)
    elif isinstance(initializer, np.ndarray):
      initializer = tf.convert_to_tensor(initializer)
    if isinstance(initializer, tf.Tensor):
      initializer.set_shape(self._weights_shape)
      self._weights_shape = None  # otherwise get_variable barfs
    self._initializer = initializer
    self._num_buckets = num_buckets
    self._num_units_out = num_units_out
    self._trainable = trainable
    self._mod_inputs = bool(mod_inputs)
    super(Embedding, self).__init__(
        output_type=tdt.TensorType([num_units_out]), name_or_scope=name)
项目:fold    作者:tensorflow    | 项目源码 | 文件源码
def __init__(self, num_fractal_blocks, fractal_block_depth,
               base_layer_builder, mixer=None, drop_path=False,
               p_local_drop_path=0.5, p_drop_base_case=0.25,
               p_drop_recursive_case=0.25, name=None):
    """Initializes the FractalNet.

    Args:
      num_fractal_blocks: The number of fractal blocks the net is made from.
        This variable is named `B` in the FractalNet paper.  This argument uses
        the word `block` in the sense that the FractalNet paper uses it.
      fractal_block_depth: How deeply nested the blocks are.  This variable is
        `C-1` in the paper.
      base_layer_builder: A callable that takes a name and returns a `Layer`
        object.  We would pass in a convolutional layer to reproduce the results
        in the paper.
      mixer: The join operation in the paper.  Assumed to have two arguments.
        Defaults to element-wise averaging.  Mixing doesn't occur if either path
        gets dropped.
      drop_path: A boolean, whether or not to do drop-path.  Defaults to False.
        If selected, we do drop path as described in the paper (unless drop-path
        choices is provided in which case how drop path is done can be further
        customized by the user.
      p_local_drop_path: A probability between 0.0 and 1.0.  0.0 means always do
        global drop path.  1.0 means always do local drop path.  Default: 0.5,
        as in the paper.
      p_drop_base_case: The probability, when doing local drop path, to drop the
        base case.
      p_drop_recursive_case: The probability, when doing local drop path, to
        drop the recusrive case. (Requires: `p_drop_base_case +
        p_drop_recursive_case < 1`)
      name: An optional string name.
    """
    self.set_constructor_args('td.FractalNet',
                              *get_local_arguments(FractalNet.__init__, True))

    if mixer is None:
      mixer = lambda a, b: tf.add(a, b)/2.0
    self._num_fractal_blocks = num_fractal_blocks
    self._fractal_block_depth = fractal_block_depth
    self._mixer = mixer
    self._drop_path = drop_path
    self._p_local_drop_path = p_local_drop_path
    self._p_drop_base_case = p_drop_base_case
    self._p_drop_recursive_case = p_drop_recursive_case
    self._drop_path_choices = None

    super(FractalNet, self).__init__(name_or_scope=name)
    self._children = {}
    self._choice_id = {}
    self._choices = []
    with tf.variable_scope(self._vscope):
      for block_idx in xrange(num_fractal_blocks):
        for binary_seq in _binary_sequences_of_at_most(fractal_block_depth):
          child_name = 'block_' + '_'.join(
              [str(block_idx)] + [str(b) for b in binary_seq])
          self._children[block_idx, binary_seq] = base_layer_builder(
              name=child_name)
          if len(binary_seq) < fractal_block_depth:
            self._choice_id[(block_idx, binary_seq)] = len(self._choices)
            self._choices.append((block_idx, binary_seq))
    self._propagate_types()
项目:magenta    作者:tensorflow    | 项目源码 | 文件源码
def _provide_data(input_tensors, truncated_length, hparams):
  """Returns tensors for reading batches from provider."""
  (spec, labels, label_weights, length, onsets, filename,
   note_sequence) = input_tensors

  length = tf.to_int32(length)
  labels = tf.reshape(labels, (-1, constants.MIDI_PITCHES))
  label_weights = tf.reshape(label_weights, (-1, constants.MIDI_PITCHES))
  onsets = tf.reshape(onsets, (-1, constants.MIDI_PITCHES))
  spec = tf.reshape(spec, (-1, hparams_frame_size(hparams)))

  truncated_length = (tf.reduce_min([truncated_length, length])
                      if truncated_length else length)

  # Pad or slice specs and labels tensors to have the same lengths,
  # truncating after truncated_length.
  spec_delta = tf.shape(spec)[0] - truncated_length
  spec = tf.case(
      [(spec_delta < 0,
        lambda: tf.pad(spec, tf.stack([(0, -spec_delta), (0, 0)]))),
       (spec_delta > 0, lambda: spec[0:-spec_delta])],
      default=lambda: spec)
  labels_delta = tf.shape(labels)[0] - truncated_length
  labels = tf.case(
      [(labels_delta < 0,
        lambda: tf.pad(labels, tf.stack([(0, -labels_delta), (0, 0)]))),
       (labels_delta > 0, lambda: labels[0:-labels_delta])],
      default=lambda: labels)
  label_weights = tf.case(
      [(labels_delta < 0,
        lambda: tf.pad(label_weights, tf.stack([(0, -labels_delta), (0, 0)]))
       ), (labels_delta > 0, lambda: label_weights[0:-labels_delta])],
      default=lambda: label_weights)
  onsets = tf.case(
      [(labels_delta < 0,
        lambda: tf.pad(onsets, tf.stack([(0, -labels_delta), (0, 0)]))),
       (labels_delta > 0, lambda: onsets[0:-labels_delta])],
      default=lambda: onsets)

  truncated_note_sequence = truncate_note_sequence_op(
      note_sequence, truncated_length, hparams)

  batch_tensors = {
      'spec': tf.reshape(
          spec, (truncated_length, hparams_frame_size(hparams), 1)),
      'labels': tf.reshape(labels, (truncated_length, constants.MIDI_PITCHES)),
      'label_weights': tf.reshape(
          label_weights, (truncated_length, constants.MIDI_PITCHES)),
      'lengths': truncated_length,
      'onsets': tf.reshape(onsets, (truncated_length, constants.MIDI_PITCHES)),
      'filenames': filename,
      'note_sequences': truncated_note_sequence,
  }

  return batch_tensors
项目:sonnet    作者:deepmind    | 项目源码 | 文件源码
def initial_state(self, batch_size, dtype=tf.float32, trainable=False,
                    trainable_initializers=None, trainable_regularizers=None,
                    name=None):
    """Builds the default start state tensor of zeros.

    Args:
      batch_size: An int, float or scalar Tensor representing the batch size.
      dtype: The data type to use for the state.
      trainable: Boolean that indicates whether to learn the initial state.
      trainable_initializers: An optional pair of initializers for the
          initial hidden state and cell state.
      trainable_regularizers: Optional regularizer function or nested structure
        of functions with the same structure as the `state_size` property of the
        core, to be used as regularizers of the initial state variable. A
        regularizer should be a function that takes a single `Tensor` as an
        input and returns a scalar `Tensor` output, e.g. the L1 and L2
        regularizers in `tf.contrib.layers`.
      name: Optional string used to prefix the initial state variable names, in
          the case of a trainable initial state. If not provided, defaults to
          the name of the module.

    Returns:
      A tensor tuple `([batch_size, state_size], [batch_size, state_size], ?)`
      filled with zeros, with the third entry present when batch norm is enabled
      with `max_unique_stats > 1', with value `0` (representing the time step).
    """
    if self._max_unique_stats == 1:
      return super(BatchNormLSTM, self).initial_state(
          batch_size, dtype=dtype, trainable=trainable,
          trainable_initializers=trainable_initializers,
          trainable_regularizers=trainable_regularizers, name=name)
    else:
      with tf.name_scope(self._initial_state_scope(name)):
        if not trainable:
          state = self.zero_state(batch_size, dtype)
        else:
          # We have to manually create the state ourselves so we don't create a
          # variable that never gets used for the third entry.
          state = rnn_core.trainable_initial_state(
              batch_size,
              (tf.TensorShape([self._hidden_size]),
               tf.TensorShape([self._hidden_size])),
              dtype=dtype,
              initializers=trainable_initializers,
              regularizers=trainable_regularizers,
              name=self._initial_state_scope(name))
        return (state[0], state[1], tf.constant(0, dtype=tf.int32))
项目:hdrnet    作者:google    | 项目源码 | 文件源码
def _augment_data(self, inout, nchan=6):
    """Flip, crop and rotate samples randomly."""

    with tf.name_scope('data_augmentation'):
      if self.fliplr:
        inout = tf.image.random_flip_left_right(inout, seed=1234)
      if self.flipud:
        inout = tf.image.random_flip_up_down(inout, seed=3456)
      if self.rotate:
        angle = tf.random_uniform((), minval=0, maxval=4, dtype=tf.int32, seed=4567)
        inout = tf.case([(tf.equal(angle, 1), lambda: tf.image.rot90(inout, k=1)),
                         (tf.equal(angle, 2), lambda: tf.image.rot90(inout, k=2)),
                         (tf.equal(angle, 3), lambda: tf.image.rot90(inout, k=3))],
                        lambda: inout)

      inout.set_shape([None, None, nchan])

      with tf.name_scope('crop'):
        shape = tf.shape(inout)
        new_height = tf.to_int32(self.output_resolution[0])
        new_width = tf.to_int32(self.output_resolution[1])
        height_ok = tf.assert_less_equal(new_height, shape[0])
        width_ok = tf.assert_less_equal(new_width, shape[1])
        with tf.control_dependencies([height_ok, width_ok]):
          if self.random_crop:
            inout = tf.random_crop(
                inout, tf.stack([new_height, new_width, nchan]))
          else:
            height_offset = tf.to_int32((shape[0]-new_height)/2)
            width_offset = tf.to_int32((shape[1]-new_width)/2)
            inout = tf.image.crop_to_bounding_box(
                inout, height_offset, width_offset,
                new_height, new_width)

      inout.set_shape([None, None, nchan])
      inout = tf.image.resize_images(
          inout, [self.output_resolution[0], self.output_resolution[1]])
      fullres = inout

      with tf.name_scope('resize'):
        new_size = 256
        inout = tf.image.resize_images(
            inout, [new_size, new_size],
            method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)

      return fullres, inout
项目:ntm_keras    作者:flomlo    | 项目源码 | 文件源码
def _split_and_apply_activations(self, controller_output):
        """ This takes the controller output, splits it in ntm_output, read and wright adressing data.
            It returns a triple of ntm_output, controller_instructions_read, controller_instructions_write.
            ntm_output is a tensor, controller_instructions_read and controller_instructions_write are lists containing
            the adressing instruction (k, beta, g, shift, gamma) and in case of write also the writing constructions,
            consisting of an erase and an add vector. 

            As it is necesseary for stable results,
            k and add_vector is activated via tanh, erase_vector via sigmoid (this is critical!),
            shift via softmax,
            gamma is sigmoided, inversed and clipped (probably not ideal)
            g is sigmoided,
            beta is linear (probably not ideal!) """

        # splitting
        ntm_output, controller_instructions_read, controller_instructions_write = tf.split(
                    controller_output,
                    np.asarray([self.output_dim,
                                self.read_heads * self.controller_read_head_emitting_dim,
                                self.write_heads * self.controller_write_head_emitting_dim]),
                    axis=1)

        controller_instructions_read = tf.split(controller_instructions_read, self.read_heads, axis=1)
        controller_instructions_write = tf.split(controller_instructions_write, self.write_heads, axis=1)

        controller_instructions_read = [
                tf.split(single_head_data, np.asarray([self.m_depth, 1, 1, 3, 1]), axis=1) for 
                single_head_data in controller_instructions_read]

        controller_instructions_write = [
                tf.split(single_head_data, np.asarray([self.m_depth, 1, 1, 3, 1, self.m_depth, self.m_depth]), axis=1) for 
                single_head_data in controller_instructions_write]

        #activation
        ntm_output = self.activation(ntm_output)
        controller_instructions_read = [(tanh(k), hard_sigmoid(beta)+0.5, sigmoid(g), softmax(shift), 1 + 9*sigmoid(gamma)) for
                (k, beta, g, shift, gamma) in controller_instructions_read]
        controller_instructions_write = [
                (tanh(k), hard_sigmoid(beta)+0.5, sigmoid(g), softmax(shift), 1 + 9*sigmoid(gamma), hard_sigmoid(erase_vector), tanh(add_vector))  for 
                (k, beta, g, shift, gamma, erase_vector, add_vector) in controller_instructions_write]

        return (ntm_output, controller_instructions_read, controller_instructions_write)