Python numpy 模块,NINF 实例源码

我们从Python开源项目中,提取了以下18个代码示例,用于说明如何使用numpy.NINF

项目:MAP-IT    作者:alexmarder    | 项目源码 | 文件源码
def max2(iterable, key=lambda x: x):
    first = None
    second = None
    first_value = np.NINF
    second_value = np.NINF
    for v in iterable:
        n = key(v)
        if n > first_value:
            second = first
            second_value = first_value
            first = v
            first_value = n
        elif n > second_value:
            second = v
            second_value = n
    return first, first_value, second, second_value
项目:MAP-IT    作者:alexmarder    | 项目源码 | 文件源码
def max2(iterable, key=None):
    first = None
    second = None
    first_value = np.NINF
    second_value = np.NINF
    for v in iterable:
        n = key(v) if key is not None else v
        if n > first_value:
            second = first
            second_value = first_value
            first = v
            first_value = n
        elif n > second_value:
            second = v
            second_value = n
    return first, first_value, second, second_value
项目:dstk    作者:jotterbach    | 项目源码 | 文件源码
def _plot_bucket_values(splits, values, title=None, class_labels={0: '0', 1: '1'}):
    class_0 = [val[0] for val in values]
    class_1 = [val[1] for val in values]
    sp = np.asarray(splits)
    non_na = sp[~np.isnan(sp)]
    non_na = np.insert(non_na, 0, np.NINF)
    label = ['({0:6.2f}, {1:6.2f}]'.format(tup[0], tup[1]) for tup in zip(non_na[:-1], non_na[1:])] + ['nan']
    ind = np.arange(len(class_0))
    w = 0.5
    plt.bar(ind, class_0, w, label=class_labels[0])
    plt.bar(ind, class_1, w, bottom=class_0, color='g', label=class_labels[1])
    plt.xticks(ind + w / 2., label, size=16, rotation=75)
    plt.yticks(size=16)
    plt.legend(fontsize=16)
    if title:
        plt.title(title, size=16)
    plt.xlabel('bucket', size=18)
    plt.ylabel('bucket value', size=18)
项目:dstk    作者:jotterbach    | 项目源码 | 文件源码
def _recurse_tree(tree, lst, mdlp, node_id=0, depth=0, min_val=np.NINF, max_val=np.PINF):
    left_child = tree.children_left[node_id]
    right_child = tree.children_right[node_id]

    if left_child == sklearn.tree._tree.TREE_LEAF:
        lst.append(((min_val, max_val), tree.value[node_id].flatten().tolist()))
        return
    else:
        if mdlp and _check_mdlp_stop(tree, node_id):
            lst.append(((min_val, max_val), tree.value[node_id].flatten().tolist()))
            return
        _recurse_tree(tree, lst, mdlp, left_child, depth=depth + 1, min_val=min_val, max_val=tree.threshold[node_id])

    if right_child == sklearn.tree._tree.TREE_LEAF:
        lst.append(((min_val, max_val), tree.value[node_id].flatten().tolist()))
        return
    else:
        if mdlp and _check_mdlp_stop(tree, node_id):
            lst.append(((min_val, max_val), tree.value[node_id].flatten().tolist()))
            return
        _recurse_tree(tree, lst, mdlp, right_child, depth=depth + 1, min_val=tree.threshold[node_id], max_val=max_val)
项目:deep_srl    作者:luheng    | 项目源码 | 文件源码
def get_transition_params(label_strs):
  '''Construct transtion scoresd (0 for allowed, -inf for invalid).
  Args:
    label_strs: A [num_tags,] sequence of BIO-tags.
  Returns:
    A [num_tags, num_tags] matrix of transition scores.  
  '''
  num_tags = len(label_strs)
  transition_params = numpy.zeros([num_tags, num_tags], dtype=numpy.float32)
  for i, prev_label in enumerate(label_strs):
    for j, label in enumerate(label_strs):
      if i != j and label[0] == 'I' and not prev_label == 'B' + label[1:]:
        transition_params[i,j] = numpy.NINF
  return transition_params
项目:radar    作者:amoose136    | 项目源码 | 文件源码
def test_any_ninf(self):
        # atan2(+-y, -infinity) returns +-pi for finite y > 0.
        assert_almost_equal(ncu.arctan2(1, np.NINF),  np.pi)
        assert_almost_equal(ncu.arctan2(-1, np.NINF), -np.pi)
项目:krpcScripts    作者:jwvanderbeck    | 项目源码 | 文件源码
def test_any_ninf(self):
        # atan2(+-y, -infinity) returns +-pi for finite y > 0.
        assert_almost_equal(ncu.arctan2(1, np.NINF),  np.pi)
        assert_almost_equal(ncu.arctan2(-1, np.NINF), -np.pi)
项目:AequilibraE    作者:AequilibraE    | 项目源码 | 文件源码
def factor(self, marginals, targets):
        f = np.divide(targets, marginals)  # We compute the factors
        f[f == np.NINF] = 1  # And treat the errors, with the infinites first
        f = f + 1  # and the NaN second
        f = np.nan_to_num(f)  # The sequence of operations is just a resort to
        f[f == 0] = 2  # use at most numpy functions as possible instead of pure Python
        f = f - 1
        return f
项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda    作者:SignalMedia    | 项目源码 | 文件源码
def test_frame_from_json_nones(self):
        df = DataFrame([[1, 2], [4, 5, 6]])
        unser = read_json(df.to_json())
        self.assertTrue(np.isnan(unser[2][0]))

        df = DataFrame([['1', '2'], ['4', '5', '6']])
        unser = read_json(df.to_json())
        self.assertTrue(np.isnan(unser[2][0]))
        unser = read_json(df.to_json(), dtype=False)
        self.assertTrue(unser[2][0] is None)
        unser = read_json(df.to_json(), convert_axes=False, dtype=False)
        self.assertTrue(unser['2']['0'] is None)

        unser = read_json(df.to_json(), numpy=False)
        self.assertTrue(np.isnan(unser[2][0]))
        unser = read_json(df.to_json(), numpy=False, dtype=False)
        self.assertTrue(unser[2][0] is None)
        unser = read_json(df.to_json(), numpy=False,
                          convert_axes=False, dtype=False)
        self.assertTrue(unser['2']['0'] is None)

        # infinities get mapped to nulls which get mapped to NaNs during
        # deserialisation
        df = DataFrame([[1, 2], [4, 5, 6]])
        df.loc[0, 2] = np.inf
        unser = read_json(df.to_json())
        self.assertTrue(np.isnan(unser[2][0]))
        unser = read_json(df.to_json(), dtype=False)
        self.assertTrue(np.isnan(unser[2][0]))

        df.loc[0, 2] = np.NINF
        unser = read_json(df.to_json())
        self.assertTrue(np.isnan(unser[2][0]))
        unser = read_json(df.to_json(), dtype=False)
        self.assertTrue(np.isnan(unser[2][0]))
项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda    作者:SignalMedia    | 项目源码 | 文件源码
def test_any_ninf(self):
        # atan2(+-y, -infinity) returns +-pi for finite y > 0.
        assert_almost_equal(ncu.arctan2(1, np.NINF),  np.pi)
        assert_almost_equal(ncu.arctan2(-1, np.NINF), -np.pi)
项目:aws-lambda-numpy    作者:vitolimandibhrata    | 项目源码 | 文件源码
def test_any_ninf(self):
        # atan2(+-y, -infinity) returns +-pi for finite y > 0.
        assert_almost_equal(ncu.arctan2(1, np.NINF),  np.pi)
        assert_almost_equal(ncu.arctan2(-1, np.NINF), -np.pi)
项目:lambda-numba    作者:rlhotovy    | 项目源码 | 文件源码
def test_any_ninf(self):
        # atan2(+-y, -infinity) returns +-pi for finite y > 0.
        assert_almost_equal(ncu.arctan2(1, np.NINF),  np.pi)
        assert_almost_equal(ncu.arctan2(-1, np.NINF), -np.pi)
项目:cpnest    作者:johnveitch    | 项目源码 | 文件源码
def compute_weights(data, Nlive):
    """Returns log_ev, log_wts for the log-likelihood samples in data,
    assumed to be a result of nested sampling with Nlive live points."""

    start_data=np.concatenate(([float('-inf')], data[:-Nlive]))
    end_data=data[-Nlive:]

    log_wts=np.zeros(data.shape[0])

    log_vols_start=np.cumsum(np.ones(len(start_data)+1)*np.log1p(-1./Nlive))-np.log1p(-1./Nlive)
    log_vols_end=np.zeros(len(end_data))
    log_vols_end[-1]=np.NINF
    log_vols_end[0]=log_vols_start[-1]+np.log1p(-1.0/Nlive)
    for i in range(len(end_data)-1):
        log_vols_end[i+1]=log_vols_end[i]+np.log1p(-1.0/(Nlive-i))

    log_likes = np.concatenate((start_data,end_data,[end_data[-1]]))

    log_vols=np.concatenate((log_vols_start,log_vols_end))
    log_ev = log_integrate_log_trap(log_likes, log_vols)

    log_dXs = logsubexp(log_vols[:-1], log_vols[1:])
    log_wts = log_likes[1:-1] + log_dXs[:-1]

    log_wts -= log_ev

    return log_ev, log_wts
项目:deliver    作者:orchestor    | 项目源码 | 文件源码
def test_any_ninf(self):
        # atan2(+-y, -infinity) returns +-pi for finite y > 0.
        assert_almost_equal(ncu.arctan2(1, np.NINF),  np.pi)
        assert_almost_equal(ncu.arctan2(-1, np.NINF), -np.pi)
项目:dstk    作者:jotterbach    | 项目源码 | 文件源码
def _recurse(tree, feature_vec):

    assert isinstance(tree, Tree), "Tree is not a sklearn Tree"

    break_idx = 0
    node_id = 0

    if not isinstance(feature_vec, list):
        feature_vec = list([feature_vec])

    leaf_node_id = 0
    lower = np.NINF
    upper = np.PINF

    while (node_id != TREE_LEAF) & (tree.feature[node_id] != TREE_UNDEFINED):
        feature_idx = tree.feature[node_id]
        threshold = tree.threshold[node_id]

        if np.float32(feature_vec[feature_idx]) <= threshold:
            upper = threshold
            if (tree.children_left[node_id] != TREE_LEAF) and (tree.children_left[node_id] != TREE_UNDEFINED):
                leaf_node_id = tree.children_left[node_id]
            node_id = tree.children_left[node_id]
        else:
            lower = threshold
            if (tree.children_right[node_id] == TREE_LEAF) and (tree.children_right[node_id] != TREE_UNDEFINED):
                leaf_node_id = tree.children_right[node_id]
            node_id = tree.children_right[node_id]

        break_idx += 1
        if break_idx > 2 * tree.node_count:
            raise RuntimeError("infinite recursion!")

    return leaf_node_id, lower, upper
项目:Alfred    作者:jkachhadia    | 项目源码 | 文件源码
def test_any_ninf(self):
        # atan2(+-y, -infinity) returns +-pi for finite y > 0.
        assert_almost_equal(ncu.arctan2(1, np.NINF),  np.pi)
        assert_almost_equal(ncu.arctan2(-1, np.NINF), -np.pi)
项目:pandas-profiling    作者:JosPolfliet    | 项目源码 | 文件源码
def describe_1d(data, **kwargs):
    leng = len(data)  # number of observations in the Series
    count = data.count()  # number of non-NaN observations in the Series

    # Replace infinite values with NaNs to avoid issues with
    # histograms later.
    data.replace(to_replace=[np.inf, np.NINF, np.PINF], value=np.nan, inplace=True)

    n_infinite = count - data.count()  # number of infinte observations in the Series

    distinct_count = data.nunique(dropna=False)  # number of unique elements in the Series
    if count > distinct_count > 1:
        mode = data.mode().iloc[0]
    else:
        mode = data[0]

    results_data = {'count': count,
                    'distinct_count': distinct_count,
                    'p_missing': 1 - count / leng,
                    'n_missing': leng - count,
                    'p_infinite': n_infinite / leng,
                    'n_infinite': n_infinite,
                    'is_unique': distinct_count == leng,
                    'mode': mode,
                    'p_unique': distinct_count / leng}
    try:
        # pandas 0.17 onwards
        results_data['memorysize'] = data.memory_usage()
    except:
        results_data['memorysize'] = 0

    result = pd.Series(results_data, name=data.name)

    vartype = get_vartype(data)
    if vartype == 'CONST':
        result = result.append(describe_constant_1d(data))
    elif vartype == 'BOOL':
        result = result.append(describe_boolean_1d(data, **kwargs))
    elif vartype == 'NUM':
        result = result.append(describe_numeric_1d(data, **kwargs))
    elif vartype == 'DATE':
        result = result.append(describe_date_1d(data, **kwargs))
    elif vartype == 'UNIQUE':
        result = result.append(describe_unique_1d(data, **kwargs))
    else:
        result = result.append(describe_categorical_1d(data))
    return result
项目:dilated-cnn-ner    作者:iesl    | 项目源码 | 文件源码
def compute_loss(self, scores, scores_no_dropout, labels):

        loss = tf.constant(0.0)

        if self.viterbi:
            zero_elements = tf.equal(self.sequence_lengths, tf.zeros_like(self.sequence_lengths))
            count_zeros_per_row = tf.reduce_sum(tf.to_int32(zero_elements), axis=1)
            flat_sequence_lengths = tf.add(tf.reduce_sum(self.sequence_lengths, 1),
                                           tf.scalar_mul(2, count_zeros_per_row))

            log_likelihood, transition_params = tf.contrib.crf.crf_log_likelihood(scores, labels, flat_sequence_lengths,
                                                                                  transition_params=self.transition_params)
            loss += tf.reduce_mean(-log_likelihood)
        else:
            if self.which_loss == "mean" or self.which_loss == "block":
                losses = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=scores, labels=labels)
                masked_losses = tf.multiply(losses, self.input_mask)
                loss += tf.div(tf.reduce_sum(masked_losses), tf.reduce_sum(self.input_mask))
            elif self.which_loss == "sum":
                losses = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=scores, labels=labels)
                masked_losses = tf.multiply(losses, self.input_mask)
                loss += tf.reduce_sum(masked_losses)
            elif self.which_loss == "margin":
                # todo put into utils
                # also todo put idx-into-3d as sep func
                flat_labels = tf.reshape(labels, [-1])
                batch_offsets = tf.multiply(tf.range(self.batch_size), self.max_seq_len * self.num_classes)
                repeated_batch_offsets = tf_utils.repeat(batch_offsets, self.max_seq_len)
                tok_offsets = tf.multiply(tf.range(self.max_seq_len), self.num_classes)
                tiled_tok_offsets = tf.tile(tok_offsets, [self.batch_size])
                indices = tf.add(tf.add(repeated_batch_offsets, tiled_tok_offsets), flat_labels)

                # scores w/ true label set to -inf
                sparse = tf.sparse_to_dense(indices, [self.batch_size * self.max_seq_len * self.num_classes], np.NINF)
                loss_augmented_flat = tf.add(tf.reshape(scores, [-1]), sparse)
                loss_augmented = tf.reshape(loss_augmented_flat, [self.batch_size, self.max_seq_len, self.num_classes])

                # maxes excluding true label
                max_scores = tf.reshape(tf.reduce_max(loss_augmented, [-1]), [-1])
                sparse = tf.sparse_to_dense(indices, [self.batch_size * self.max_seq_len * self.num_classes],
                                            -self.margin)
                loss_augmented_flat = tf.add(tf.reshape(scores, [-1]), sparse)
                label_scores = tf.gather(loss_augmented_flat, indices)
                # margin + max_logit - correct_logit == max_logit - (correct - margin)
                max2_diffs = tf.subtract(max_scores, label_scores)
                mask = tf.reshape(self.input_mask, [-1])
                loss += tf.reduce_mean(tf.multiply(mask, tf.nn.relu(max2_diffs)))
        loss += self.l2_penalty * self.l2_loss

        drop_loss = tf.nn.l2_loss(tf.subtract(scores, scores_no_dropout))
        loss += self.drop_penalty * drop_loss
        return loss