Python keras.backend 模块,dtype() 实例源码

我们从Python开源项目中,提取了以下20个代码示例,用于说明如何使用keras.backend.dtype()

项目:keras-contrib    作者:farizrahman4u    | 项目源码 | 文件源码
def _preprocess_conv2d_input(x, data_format):
    """Transpose and cast the input before the conv2d.
    # Arguments
        x: input tensor.
        data_format: string, `"channels_last"` or `"channels_first"`.
    # Returns
        A tensor.
    """
    if dtype(x) == 'float64':
        x = tf.cast(x, 'float32')
    if data_format == 'channels_first':
        # TF uses the last dimension as channel dimension,
        # instead of the 2nd one.
        # TH input shape: (samples, input_depth, rows, cols)
        # TF input shape: (samples, rows, cols, input_depth)
        x = tf.transpose(x, (0, 2, 3, 1))
    return x
项目:keras-contrib    作者:farizrahman4u    | 项目源码 | 文件源码
def clip(x, min_value, max_value):
    """Element-wise value clipping.

    If min_value > max_value, clipping range is [min_value,min_value].

    # Arguments
        x: Tensor or variable.
        min_value: Tensor, float, int, or None.
            If min_value is None, defaults to -infinity.
        max_value: Tensor, float, int, or None.
            If max_value is None, defaults to infinity.

    # Returns
        A tensor.
    """
    if max_value is None:
        max_value = np.inf
    if min_value is None:
        min_value = -np.inf
    min_value = _to_tensor(min_value, x.dtype.base_dtype)
    max_value = _to_tensor(max_value, x.dtype.base_dtype)
    max_value = tf.maximum(min_value, max_value)
    return tf.clip_by_value(x, min_value, max_value)
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def keras_wrap(model, target, output, loss):
    """ Convenience function for wrapping a Keras loss function.
    """
    # pylint: disable=import-error
    import keras.objectives as O
    import keras.backend as K
    # pylint: enable=import-error
    if isinstance(loss, str):
        loss = O.get(loss)
    shape = model.outputs[target].value._keras_shape # pylint: disable=protected-access
    ins = [
        (target, K.placeholder(
            ndim=len(shape),
            dtype=K.dtype(model.outputs[target].value),
            name=target
        ))
    ]
    out = loss(ins[0][1], output)
    return ins, out

###############################################################################
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def derive(self, inputs):
        # Break it apart
        sizes, = inputs
        if sizes.ndim < 2:
            sizes = numpy.expand_dims(sizes, -1)
        outputs = numpy.array(
            [
                self.model.get_shape_at_layer(
                    name=self.to_this,
                    assumptions={
                        self.relative_to : \
                            (x[0], ) + tuple(self.normal_shape[1:])
                    }
                )[0]
                for x in sizes
            ],
            dtype='int32'
        )
        return numpy.expand_dims(outputs, axis=1)

    ###############################################################
项目:kur    作者:deepgram    | 项目源码 | 文件源码
def keras_wrap(model, target, output, loss):
    """ Convenience function for wrapping a Keras loss function.
    """
    # pylint: disable=import-error
    import keras.objectives as O
    import keras.backend as K
    # pylint: enable=import-error
    if isinstance(loss, str):
        loss = O.get(loss)
    shape = model.outputs[target].value._keras_shape # pylint: disable=protected-access
    ins = [
        (target, K.placeholder(
            ndim=len(shape),
            dtype=K.dtype(model.outputs[target].value),
            name=target
        ))
    ]
    out = loss(ins[0][1], output)
    return ins, out

###############################################################################
项目:kur    作者:deepgram    | 项目源码 | 文件源码
def derive(self, inputs):
        # Break it apart
        sizes, = inputs
        if sizes.ndim < 2:
            sizes = numpy.expand_dims(sizes, -1)
        outputs = numpy.array(
            [
                self.model.get_shape_at_layer(
                    name=self.to_this,
                    assumptions={
                        self.relative_to : \
                            (x[0], ) + tuple(self.normal_shape[1:])
                    }
                )[0]
                for x in sizes
            ],
            dtype='int32'
        )
        return numpy.expand_dims(outputs, axis=1)

    ###############################################################
项目:Named-Entity-Recognition    作者:vishal1796    | 项目源码 | 文件源码
def step(self, input_energy_t, states, return_logZ=True):
        # not in the following  `prev_target_val` has shape = (B, F)
        # where B = batch_size, F = output feature dim
        # Note: `i` is of float32, due to the behavior of `K.rnn`
        prev_target_val, i, chain_energy = states[:3]
        t = K.cast(i[0, 0], dtype='int32')
        if len(states) > 3:
            if K.backend() == 'theano':
                m = states[3][:, t:(t + 2)]
            else:
                m = K.tf.slice(states[3], [0, t], [-1, 2])
            input_energy_t = input_energy_t * K.expand_dims(m[:, 0])
            chain_energy = chain_energy * K.expand_dims(K.expand_dims(m[:, 0] * m[:, 1]))  # (1, F, F)*(B, 1, 1) -> (B, F, F)
        if return_logZ:
            energy = chain_energy + K.expand_dims(input_energy_t - prev_target_val, 2)  # shapes: (1, B, F) + (B, F, 1) -> (B, F, F)
            new_target_val = K.logsumexp(-energy, 1)  # shapes: (B, F)
            return new_target_val, [new_target_val, i + 1]
        else:
            energy = chain_energy + K.expand_dims(input_energy_t + prev_target_val, 2)
            min_energy = K.min(energy, 1)
            argmin_table = K.cast(K.argmin(energy, 1), K.floatx())  # cast for tf-version `K.rnn`
            return argmin_table, [min_energy, i + 1]
项目:DIL    作者:FoxRow    | 项目源码 | 文件源码
def yolo_eval(yolo_outputs, image_shape, max_boxes=10, score_threshold=.6, iou_threshold=.5):
    """Evaluate YOLO model on given input batch and return filtered boxes."""
    box_xy, box_wh, box_confidence, box_class_probs = yolo_outputs
    boxes = yolo_boxes_to_corners(box_xy, box_wh)
    boxes, scores, classes = yolo_filter_boxes(boxes, box_confidence, box_class_probs, threshold=score_threshold)

    # Scale boxes back to original image shape.
    height = image_shape[0]
    width = image_shape[1]
    image_dims = K.stack([height, width, height, width])
    image_dims = K.reshape(image_dims, [1, 4])
    boxes = boxes * image_dims

    max_boxes_tensor = K.variable(max_boxes, dtype='int32')
    K.get_session().run(tf.variables_initializer([max_boxes_tensor]))
    nms_index = tf.image.non_max_suppression(boxes, scores, max_boxes_tensor, iou_threshold=iou_threshold)
    boxes = K.gather(boxes, nms_index)
    scores = K.gather(scores, nms_index)
    classes = K.gather(classes, nms_index)
    return boxes, scores, classes
项目:academic    作者:xinchrome    | 项目源码 | 文件源码
def __init__(self, sequences_value, pred_length, delta = 1., sequence_weights=None, proxy_layer=None, sample_stddev=None, **kwargs):
        """
        can only be the first layer of an architecture

        sequences_value[sequence, event, type, feature]

        sequences only contain training events
        """
        self.sequences_value = np.array(sequences_value,dtype='float32')
        self.sequences_initializer = Constant(self.sequences_value)
        shape = self.sequences_value.shape
        self.nb_sequence = shape[0]
        self.nb_event = shape[1]
        self.nb_type = shape[2]
        self.nb_feature = shape[3]
        self.pred_length = pred_length
        self.delta = delta
        self.proxy_layer = proxy_layer
        self.sample_stddev = sample_stddev

        if self.proxy_layer:
            super(HawkesLayer, self).__init__(**kwargs)
            return

        if sequence_weights:
            assert len(sequence_weights) == self.nb_sequence
            assert len(sequence_weights[0]['spont']) == self.nb_type

            self.spont_initializer = Constant(np.array([x['spont'] for x in sequence_weights]))
            self.Theta_initializer = Constant(np.array([x['theta'] for x in sequence_weights]))
            self.W_initializer = Constant(np.array([x['w'] for x in sequence_weights]))
            self.Alpha_initializer = Constant(np.array([x['alpha'] for x in sequence_weights]))
        else:
            self.spont_initializer = Constant(np.array([[1.3 for j in range(self.nb_type)] for i in range(self.nb_sequence)]))
            self.Theta_initializer = Constant(np.array([[0.05 for j in range(self.nb_type)] for i in range(self.nb_sequence)]))
            self.W_initializer = Constant(np.array([[1. for j in range(self.nb_type)] for i in range(self.nb_sequence)]))
            self.Alpha_initializer = Constant(np.array([[[1. for k in range(self.nb_type)] for j in range(self.nb_type)] for i in range(self.nb_sequence)]))

        super(HawkesLayer, self).__init__(**kwargs)
项目:YAD2K    作者:allanzelener    | 项目源码 | 文件源码
def yolo_eval(yolo_outputs,
              image_shape,
              max_boxes=10,
              score_threshold=.6,
              iou_threshold=.5):
    """Evaluate YOLO model on given input batch and return filtered boxes."""
    box_xy, box_wh, box_confidence, box_class_probs = yolo_outputs
    boxes = yolo_boxes_to_corners(box_xy, box_wh)
    boxes, scores, classes = yolo_filter_boxes(
        boxes, box_confidence, box_class_probs, threshold=score_threshold)

    # Scale boxes back to original image shape.
    height = image_shape[0]
    width = image_shape[1]
    image_dims = K.stack([height, width, height, width])
    image_dims = K.reshape(image_dims, [1, 4])
    boxes = boxes * image_dims

    # TODO: Something must be done about this ugly hack!
    max_boxes_tensor = K.variable(max_boxes, dtype='int32')
    K.get_session().run(tf.variables_initializer([max_boxes_tensor]))
    nms_index = tf.image.non_max_suppression(
        boxes, scores, max_boxes_tensor, iou_threshold=iou_threshold)
    boxes = K.gather(boxes, nms_index)
    scores = K.gather(scores, nms_index)
    classes = K.gather(classes, nms_index)
    return boxes, scores, classes
项目:mcv-m5    作者:david-vazquez    | 项目源码 | 文件源码
def _preprocess_conv2d_input(x, dim_ordering):
    if K.dtype(x) == 'float64':
        x = tf.cast(x, 'float32')
    if dim_ordering == 'th':
        # TF uses the last dimension as channel dimension,
        # instead of the 2nd one.
        # TH input shape: (samples, input_depth, rows, cols)
        # TF input shape: (samples, rows, cols, input_depth)
        x = tf.transpose(x, (0, 2, 3, 1))
    return x
项目:mcv-m5    作者:david-vazquez    | 项目源码 | 文件源码
def _preprocess_conv2d_kernel(kernel, dim_ordering):
    if K.dtype(kernel) == 'float64':
        kernel = tf.cast(kernel, 'float32')
    if dim_ordering == 'th':
        # TF uses the last dimension as channel dimension,
        # instead of the 2nd one.
        # TH kernel shape: (depth, input_depth, rows, cols)
        # TF kernel shape: (rows, cols, input_depth, depth)
        kernel = tf.transpose(kernel, (2, 3, 1, 0))
    return kernel
项目:Named-Entity-Recognition    作者:vishal1796    | 项目源码 | 文件源码
def _get_accuracy(y_true, y_pred, mask, sparse_target=False):
        y_pred = K.argmax(y_pred, -1)
        if sparse_target:
            y_true = K.cast(y_true[:, :, 0], K.dtype(y_pred))
        else:
            y_true = K.argmax(y_true, -1)
        judge = K.cast(K.equal(y_pred, y_true), K.floatx())
        if mask is None:
            return K.mean(judge)
        else:
            mask = K.cast(mask, K.floatx())
            return K.sum(judge * mask) / K.sum(mask)
项目:keras_zoo    作者:david-vazquez    | 项目源码 | 文件源码
def _preprocess_conv2d_input(x, dim_ordering):
    if K.dtype(x) == 'float64':
        x = tf.cast(x, 'float32')
    if dim_ordering == 'th':
        # TF uses the last dimension as channel dimension,
        # instead of the 2nd one.
        # TH input shape: (samples, input_depth, rows, cols)
        # TF input shape: (samples, rows, cols, input_depth)
        x = tf.transpose(x, (0, 2, 3, 1))
    return x
项目:keras_zoo    作者:david-vazquez    | 项目源码 | 文件源码
def _preprocess_conv2d_kernel(kernel, dim_ordering):
    if K.dtype(kernel) == 'float64':
        kernel = tf.cast(kernel, 'float32')
    if dim_ordering == 'th':
        # TF uses the last dimension as channel dimension,
        # instead of the 2nd one.
        # TH kernel shape: (depth, input_depth, rows, cols)
        # TF kernel shape: (rows, cols, input_depth, depth)
        kernel = tf.transpose(kernel, (2, 3, 1, 0))
    return kernel
项目:PiCamNN    作者:PiSimo    | 项目源码 | 文件源码
def yolo_eval(yolo_outputs,
              image_shape,
              max_boxes=10,
              score_threshold=.6,
              iou_threshold=.5):
    """Evaluate YOLO model on given input batch and return filtered boxes."""
    box_xy, box_wh, box_confidence, box_class_probs = yolo_outputs
    boxes = yolo_boxes_to_corners(box_xy, box_wh)
    boxes, scores, classes = yolo_filter_boxes(
        boxes, box_confidence, box_class_probs, threshold=score_threshold)

    # Scale boxes back to original image shape.
    height = image_shape[0]
    width = image_shape[1]
    image_dims = K.stack([height, width, height, width])
    image_dims = K.reshape(image_dims, [1, 4])
    boxes = boxes * image_dims

    # TODO: Something must be done about this ugly hack!
    max_boxes_tensor = K.variable(max_boxes, dtype='int32')
    K.get_session().run(tf.variables_initializer([max_boxes_tensor]))
    nms_index = tf.image.non_max_suppression(
        boxes, scores, max_boxes_tensor, iou_threshold=iou_threshold)
    boxes = K.gather(boxes, nms_index)
    scores = K.gather(scores, nms_index)
    classes = K.gather(classes, nms_index)
    return boxes, scores, classes
项目:keras-neural-graph-fingerprint    作者:keiserlab    | 项目源码 | 文件源码
def neighbour_lookup(atoms, edges, maskvalue=0, include_self=False):
    ''' Looks up the features of an all atoms neighbours, for a batch of molecules.

    # Arguments:
        atoms (K.tensor): of shape (batch_n, max_atoms, num_atom_features)
        edges (K.tensor): of shape (batch_n, max_atoms, max_degree) with neighbour
            indices and -1 as padding value
        maskvalue (numerical): the maskingvalue that should be used for empty atoms
            or atoms that have no neighbours (does not affect the input maskvalue
            which should always be -1!)
        include_self (bool): if True, the featurevector of each atom will be added
            to the list feature vectors of its neighbours

    # Returns:
        neigbour_features (K.tensor): of shape (batch_n, max_atoms(+1), max_degree,
            num_atom_features) depending on the value of include_self

    # Todo:
        - make this function compatible with Tensorflow, it should be quite trivial
            because there is an equivalent of `T.arange` in tensorflow.
    '''

    # The lookup masking trick: We add 1 to all indices, converting the
    #   masking value of -1 to a valid 0 index.
    masked_edges = edges + 1
    # We then add a padding vector at index 0 by padding to the left of the
    #   lookup matrix with the value that the new mask should get
    masked_atoms = temporal_padding(atoms, (1,0), padvalue=maskvalue)


    # Import dimensions
    atoms_shape = K.shape(masked_atoms)
    batch_n = atoms_shape[0]
    lookup_size = atoms_shape[1]
    num_atom_features = atoms_shape[2]

    edges_shape = K.shape(masked_edges)
    max_atoms = edges_shape[1]
    max_degree = edges_shape[2]

    # create broadcastable offset
    offset_shape = (batch_n, 1, 1)
    offset = K.reshape(T.arange(batch_n, dtype=K.dtype(masked_edges)), offset_shape)
    offset *= lookup_size

    # apply offset to account for the fact that after reshape, all individual
    #   batch_n indices will be combined into a single big index
    flattened_atoms = K.reshape(masked_atoms, (-1, num_atom_features))
    flattened_edges = K.reshape(masked_edges + offset, (batch_n, -1))

    # Gather flattened
    flattened_result = K.gather(flattened_atoms, flattened_edges)

    # Unflatten result
    output_shape = (batch_n, max_atoms, max_degree, num_atom_features)
    output = T.reshape(flattened_result, output_shape)

    if include_self:
        return K.concatenate([K.expand_dims(atoms, dim=2), output], axis=2)
    return output
项目:huffmax    作者:farizrahman4u    | 项目源码 | 文件源码
def call(self, x, mask=None):
        input_vector = x[0]
        target_classes = x[1]
        nb_req_classes = self.input_spec[1].shape[1]
        if nb_req_classes is None:
            nb_req_classes = K.shape(target_classes)
        if K.dtype(target_classes) != 'int32':
            target_classes = K.cast(target_classes, 'int32')
        if self.mode == 0:
            # One giant matrix mul
            input_dim = self.input_spec[0].shape[1]
            nb_req_classes = self.input_spec[1].shape[1]
            path_lengths = map(len, self.paths)
            huffman_codes = K.variable(np.array(self.huffman_codes))
            req_nodes = K.gather(self.class_path_map, target_classes)
            req_W = K.gather(self.W, req_nodes)
            y = K.batch_dot(input_vector, req_W, axes=(1, 3))
            if self.bias:
                req_b = K.gather(self.b, req_nodes)
                y += req_b
            y = K.sigmoid(y[:, :, :, 0])
            req_huffman_codes = K.gather(huffman_codes, target_classes)
            return K.prod(req_huffman_codes + y - 2 * req_huffman_codes * y, axis=-1)  # Thug life
        elif self.mode == 1:
            # Many tiny matrix muls
            probs = []
            for i in range(len(self.paths)):
                huffman_code = self.huffman_codes[i]
                path = self.paths[i]
                prob = 1.
                for j in range(len(path)):
                    node = path[j]
                    node_index = self.node_indices[node]
                    p = K.dot(input_vector, self.W[node_index, :, :])[:, 0]
                    if self.bias:
                        p += self.b[node_index, :][0]
                    h = huffman_code[j]
                    p = K.sigmoid(p)
                    prob *= h + p - 2 * p * h
                probs += [prob]
            probs = K.pack(probs)
            req_probs = K.gather(probs, target_classes)
            req_probs = K.permute_dimensions(req_probs, (0, 2, 1))
            req_probs = K.reshape(req_probs, (-1, nb_req_classes))
            batch_size = K.shape(input_vector)[0]
            indices = arange(batch_size * batch_size, batch_size + 1)
            req_probs = K.gather(req_probs, indices)
            return req_probs
项目:Named-Entity-Recognition    作者:vishal1796    | 项目源码 | 文件源码
def recursion(self, input_energy, mask=None, go_backwards=False, return_sequences=True, return_logZ=True, input_length=None):
        """Forward (alpha) or backward (beta) recursion

        If `return_logZ = True`, compute the logZ, the normalization constance:

        \[ Z = \sum_{y1, y2, y3} exp(-E) # energy
          = \sum_{y1, y2, y3} exp(-(u1' y1 + y1' W y2 + u2' y2 + y2' W y3 + u3' y3))
          = sum_{y2, y3} (exp(-(u2' y2 + y2' W y3 + u3' y3)) sum_{y1} exp(-(u1' y1' + y1' W y2))) \]

        Denote:
            \[ S(y2) := sum_{y1} exp(-(u1' y1 + y1' W y2)), \]
            \[ Z = sum_{y2, y3} exp(log S(y2) - (u2' y2 + y2' W y3 + u3' y3)) \]
            \[ logS(y2) = log S(y2) = log_sum_exp(-(u1' y1' + y1' W y2)) \]
        Note that:
              yi's are one-hot vectors
              u1, u3: boundary energies have been merged

        If `return_logZ = False`, compute the Viterbi's best path lookup table.
        """
        chain_energy = self.chain_kernel
        chain_energy = K.expand_dims(chain_energy, 0)  # shape=(1, F, F): F=num of output features. 1st F is for t-1, 2nd F for t
        prev_target_val = K.zeros_like(input_energy[:, 0, :])  # shape=(B, F), dtype=float32

        if go_backwards:
            input_energy = K.reverse(input_energy, 1)
            if mask is not None:
                mask = K.reverse(mask, 1)

        initial_states = [prev_target_val, K.zeros_like(prev_target_val[:, :1])]
        constants = [chain_energy]

        if mask is not None:
            mask2 = K.cast(K.concatenate([mask, K.zeros_like(mask[:, :1])], axis=1), K.floatx())
            constants.append(mask2)

        def _step(input_energy_i, states):
            return self.step(input_energy_i, states, return_logZ)

        target_val_last, target_val_seq, _ = K.rnn(_step, input_energy, initial_states, constants=constants,
                                                   input_length=input_length, unroll=self.unroll)

        if return_sequences:
            if go_backwards:
                target_val_seq = K.reverse(target_val_seq, 1)
            return target_val_seq
        else:
            return target_val_last
项目:DIL    作者:FoxRow    | 项目源码 | 文件源码
def yolo_head(feats, anchors, num_classes):
    """Convert final layer features to bounding box parameters.

    Parameters
    ----------
    feats : tensor
        Final convolutional layer features.
    anchors : array-like
        Anchor box widths and heights.
    num_classes : int
        Number of target classes.

    Returns
    -------
    box_xy : tensor
        x, y box predictions adjusted by spatial location in conv layer.
    box_wh : tensor
        w, h box predictions adjusted by anchors and conv spatial resolution.
    box_conf : tensor
        Probability estimate for whether each box contains any object.
    box_class_pred : tensor
        Probability distribution estimate for each box over class labels.
    """
    num_anchors = len(anchors)
    # Reshape to batch, height, width, num_anchors, box_params.
    anchors_tensor = K.reshape(K.variable(anchors), [1, 1, 1, num_anchors, 2])

    # Dynamic implementation of conv dims for fully convolutional model.
    conv_dims = K.shape(feats)[1:3]  # assuming channels last
    # In YOLO the height index is the inner most iteration.
    conv_height_index = K.arange(0, stop=conv_dims[0])
    conv_width_index = K.arange(0, stop=conv_dims[1])
    conv_height_index = K.tile(conv_height_index, [conv_dims[1]])

    conv_width_index = K.tile(K.expand_dims(conv_width_index, 0), [conv_dims[0], 1])
    conv_width_index = K.flatten(K.transpose(conv_width_index))
    conv_index = K.transpose(K.stack([conv_height_index, conv_width_index]))
    conv_index = K.reshape(conv_index, [1, conv_dims[0], conv_dims[1], 1, 2])
    conv_index = K.cast(conv_index, K.dtype(feats))

    feats = K.reshape(feats, [-1, conv_dims[0], conv_dims[1], num_anchors, num_classes + 5])
    conv_dims = K.cast(K.reshape(conv_dims, [1, 1, 1, 1, 2]), K.dtype(feats))

    box_xy = K.sigmoid(feats[..., :2])
    box_wh = K.exp(feats[..., 2:4])
    box_confidence = K.sigmoid(feats[..., 4:5])
    box_class_probs = K.softmax(feats[..., 5:])

    # Adjust preditions to each spatial grid point and anchor size.
    # Note: YOLO iterates over height index before width index.
    box_xy = (box_xy + conv_index) / conv_dims
    box_wh = box_wh * anchors_tensor / conv_dims

    return box_xy, box_wh, box_confidence, box_class_probs