Python numpy 模块,float() 实例源码

我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.float()

项目:uwb_tracker_ros    作者:eth-ait    | 项目源码 | 文件源码
def _compute_process_and_covariance_matrices(self, dt):
        """Computes the transition and covariance matrix of the process model and measurement model.

        Args:
             dt (float): Timestep of the discrete transition.

        Returns:
            F (numpy.ndarray): Transition matrix.
            Q (numpy.ndarray): Process covariance matrix.
            R (numpy.ndarray): Measurement covariance matrix.
        """
        F = np.array(np.bmat([[np.eye(3), dt * np.eye(3)], [np.zeros((3, 3)), np.eye(3)]]))
        self.process_matrix = F
        q_p = self.process_covariance_position
        q_v = self.process_covariance_velocity
        Q = np.diag([q_p, q_p, q_p, q_v, q_v, q_v]) ** 2 * dt
        r = self.measurement_covariance
        R = r * np.eye(4)
        self.process_covariance = Q
        self.measurement_covariance = R
        return F, Q, R
项目:RasterFairy    作者:Quasimondo    | 项目源码 | 文件源码
def rasterMaskToGrid( rasterMask ):
    grid = []
    mask = rasterMask['mask']
    for y in range(rasterMask['height']):
        for x in range(rasterMask['width']):
            if mask[y,x]==0:
                grid.append([x,y])

    grid = np.array(grid,dtype=np.float)
    if not (rasterMask is None) and rasterMask['hex'] is True:
        f = math.sqrt(3.0)/2.0 
        offset = -0.5
        if np.argmin(rasterMask['mask'][0]) > np.argmin(rasterMask['mask'][1]):
            offset = 0.5
        for i in range(len(grid)):
            if (grid[i][1]%2.0==0.0):
                grid[i][0]-=offset
            grid[i][1] *= f
    return grid
项目:RasterFairy    作者:Quasimondo    | 项目源码 | 文件源码
def getBestCircularMatch(n):
    bestc = n*2
    bestr = 0
    bestrp = 0.0

    minr = int(math.sqrt(n / math.pi))
    for rp in range(0,10):
        rpf = float(rp)/10.0
        for r in range(minr,minr+3):
            rlim = (r+rpf)*(r+rpf)
            c = 0
            for y in range(-r,r+1):
                yy = y*y
                for x in range(-r,r+1):
                    if x*x+yy<rlim:
                        c+=1
            if c == n:
                return r,rpf,c

            if c>n and c < bestc:
                bestrp = rpf
                bestr = r
                bestc = c
    return bestr,bestrp,bestc
项目:table-compositor    作者:InvestmentSystems    | 项目源码 | 文件源码
def df_type_to_str(i):
    '''
    Convert into simple datatypes from pandas/numpy types
    '''
    if isinstance(i, np.bool_):
        return bool(i)
    if isinstance(i, np.int_):
        return int(i)
    if isinstance(i, np.float):
        if np.isnan(i):
            return 'NaN'
        elif np.isinf(i):
            return str(i)
        return float(i)
    if isinstance(i, np.uint):
        return int(i)
    if type(i) == bytes:
        return i.decode('UTF-8')
    if isinstance(i, (tuple, list)):
        return str(i)
    if i is pd.NaT:  # not identified as a float null
        return 'NaN'
    return str(i)
项目:cellranger    作者:10XGenomics    | 项目源码 | 文件源码
def add(self, x):
        x = float(x)
        n1 = self.count

        self.count += 1

        if x < self.min or self.min is None:
            self.min = x

        if x > self.max or self.max is None:
            self.max = x

        delta = x - self.M1
        delta_n = delta / self.count
        delta_n2 = delta_n * delta_n
        term = delta * delta_n * n1
        self.M1 += delta_n
        self.M4 += term * delta_n2 * \
                ( self.count * self.count - 3*self.count + 3 ) + \
                6 * delta_n2 * self.M2 - 4 * delta_n * self.M3
        self.M3 += term * delta_n * (self.count - 2) - 3 * delta_n * self.M2
        self.M2 += term

        return
项目:cellranger    作者:10XGenomics    | 项目源码 | 文件源码
def get_depth_info_json(info):
    fixed_info = {int(x): y for (x, y) in info.iteritems()}

    total_depth_counts = sum(fixed_info.values())
    median_depth = None
    sorted_depths = sorted(fixed_info.keys())
    seen_depth_count = 0
    mean_depth = 0.0
    for depth in sorted_depths:
        seen_depth_count += fixed_info[depth]
        mean_depth += float(depth*fixed_info[depth])/float(total_depth_counts)
        if seen_depth_count > total_depth_counts/2 and median_depth is None:
            median_depth = depth
    zero_cov_fract = tk_stats.robust_divide(float(fixed_info.get(0, 0.0)), float(total_depth_counts))

    return (mean_depth, median_depth, zero_cov_fract)
项目:MachineLearningProjects    作者:geallen    | 项目源码 | 文件源码
def computeStep(X, y, theta):
    '''YOUR CODE HERE'''
    function_result = np.array([0,0], dtype= np.float)
    m = float(len(X))

    d1 = 0.0
    d2 = 0.0
    for i in range(len(X)):
        h1 = np.dot(theta.transpose(), X[i])
        c1 = h1 - y[i]
        d1 = d1 + c1
    j1 = d1/m
    for u in range(len(X)):
        h2 = np.dot(theta.transpose(), X[u])
        c2 = (h2 - y[u]) * X[u][1]
        d2 = d2 + c2
    j2 = d2/m

    function_result[0] = j1
    function_result[1] = j2
    return function_result



# Part 4: Implement the cost function calculation
项目:MachineLearningProjects    作者:geallen    | 项目源码 | 文件源码
def computeCost(X, y, theta):
    '''YOUR CODE HERE'''
    m = float(len(X))

    d = 0
    for i in range(len(X)):
        h = np.dot(theta.transpose(), X[i])
        c = (h - y[i])

        c = (c **2)
        d = (d + c)
    j = (1.0 / (2 * m)) * d
    return j


# Part 5: Prepare the data so that the input X has two columns: first a column of ones to accomodate theta0 and then a column of city population data
项目:HandDetection    作者:YunqiuXu    | 项目源码 | 文件源码
def _coco_results_one_category(self, boxes, cat_id):
    results = []
    for im_ind, index in enumerate(self.image_index):
      dets = boxes[im_ind].astype(np.float)
      if dets == []:
        continue
      scores = dets[:, -1]
      xs = dets[:, 0]
      ys = dets[:, 1]
      ws = dets[:, 2] - xs + 1
      hs = dets[:, 3] - ys + 1
      results.extend(
        [{'image_id': index,
          'category_id': cat_id,
          'bbox': [xs[k], ys[k], ws[k], hs[k]],
          'score': scores[k]} for k in range(dets.shape[0])])
    return results
项目:pybot    作者:spillai    | 项目源码 | 文件源码
def get_xyz_points(cloud_array, remove_nans=True):
    '''
    Pulls out x, y, and z columns from the cloud recordarray, and returns a 3xN matrix.
    '''
    # remove crap points
    if remove_nans:
        mask = np.isfinite(cloud_array['x']) & np.isfinite(cloud_array['y']) & np.isfinite(cloud_array['z'])
        cloud_array = cloud_array[mask]

    # pull out x, y, and z values
    points = np.zeros(list(cloud_array.shape) + [3], dtype=np.float)
    points[...,0] = cloud_array['x']
    points[...,1] = cloud_array['y']
    points[...,2] = cloud_array['z']

    return points
项目:Neural_Artistic_Style    作者:everfor    | 项目源码 | 文件源码
def parse_arguments():
    parser = argparse.ArgumentParser()

    parser.add_argument('-c', '--content', dest = 'content', help = 'Input content image', required = True)
    parser.add_argument('-s', '--styles', dest = 'styles', nargs = '+', help = 'Style image(s)', required = True)
    parser.add_argument('-o', '--output', dest = 'output', help = 'Output image', default = _default_output)
    parser.add_argument('--vgg', dest = 'vgg', help = 'Path to pretrained vgg19 network', default = _default_vgg)
    parser.add_argument('--content-weight', type = float, dest = 'content_weight', help = 'Weight for content (input) image', default = _default_content_weight)
    parser.add_argument('--style-weight', type = float, dest = 'style_weight', help = 'Weight for style image', default = _default_style_weight)
    parser.add_argument('--style-merge-weight', type = float, dest = 'style_merge_weight', nargs = '+', help = 'Weights for style merges', default = None)
    parser.add_argument('--check-per-iteration', type = int, dest = 'check_per_iteration', help = 'Frequency of checking current loss', default = _default_check_per_iteration)
    parser.add_argument('-a', '--learning-rate', type = float, dest = 'learning_rate', help = 'Learning rate for neural network', default = _default_learning_rate)
    parser.add_argument('-i', '--iterations', type = int, dest = 'iterations', help = 'Max iterations', default = _default_iterations)
    parser.add_argument('--preserve-color', type = bool, dest = 'preserve_color', help = 'Preserve color scheme of original content', default = _default_preserve_color)


    return parser.parse_args()
项目:DeepWorks    作者:daigo0927    | 项目源码 | 文件源码
def combine_images(generated_images):

    total, width, height, ch = generated_images.shape
    cols = int(math.sqrt(total))
    rows = math.ceil(float(total)/cols)

    combined_image = np.zeros((height*rows, width*cols, 3),
                              dtype = generated_images.dtype)

    for index, image in enumerate(generated_images):
        i = int(index/cols)
        j = index % cols
        combined_image[width*i:width*(i+1), height*j:height*(j+1), :]\
            = image

    return combined_image
项目:DeepWorks    作者:daigo0927    | 项目源码 | 文件源码
def get_image(filepath, image_target, image_size):

    img = imread(filepath).astype(np.float)
    h_origin, w_origin = img.shape[:2]

    if image_target > h_origin or image_target > w_origin:
        image_target = min(h_origin, w_origin)

    h_drop = int((h_origin - image_target)/2)    
    w_drop = int((w_origin - image_target)/2)

    if img.ndim == 2:
        img = np.tile(img.reshape(h_origin, w_origin, 1), (1,1,3))

    img_crop = img[h_drop:h_drop+image_target, w_drop:w_drop+image_target, :]

    img_resize = imresize(img_crop, [image_size, image_size])

    return np.array(img_resize)/127.5 - 1.
项目:slim-python    作者:ustunb    | 项目源码 | 文件源码
def easy_type(data_value):
    type_name = type(data_value).__name__
    if type_name in {"list", "set"}:
        types = {easy_type(item) for item in data_value}
        if len(types) == 1:
            return next(iter(types))
        elif types.issubset({"int", "float"}):
            return "float"
        else:
            return "multiple"
    elif type_name == "str":
        if data_value in {'True', 'TRUE'}:
            return "bool"
        elif data_value in {'False', 'FALSE'}:
            return "bool"
        else:
            return "str"
    elif type_name == "int":
        return "int"
    elif type_name == "float":
        return "float"
    elif type_name == "bool":
        return "bool"
    else:
        return "unknown"
项目:CausalGAN    作者:mkocaoglu    | 项目源码 | 文件源码
def make_grid(tensor, nrow=8, padding=2,
              normalize=False, scale_each=False):
    """Code based on https://github.com/pytorch/vision/blob/master/torchvision/utils.py"""
    nmaps = tensor.shape[0]
    xmaps = min(nrow, nmaps)
    ymaps = int(math.ceil(float(nmaps) / xmaps))
    height, width = int(tensor.shape[1] + padding), int(tensor.shape[2] + padding)
    grid = np.zeros([height * ymaps + 1 + padding // 2, width * xmaps + 1 + padding // 2, 3], dtype=np.uint8)
    k = 0
    for y in range(ymaps):
        for x in range(xmaps):
            if k >= nmaps:
                break
            h, h_width = y * height + 1 + padding // 2, height - padding
            w, w_width = x * width + 1 + padding // 2, width - padding

            grid[h:h+h_width, w:w+w_width] = tensor[k]
            k = k + 1
    return grid
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def test_read_float(self):
        """
        Tests if spike times are stored as floats if they
        are stored as floats in the file.
        """
        filename = get_test_file_full_path(
                ioclass=NestIO,
                filename='0gid-1time-1256-0.gdf',
                directory=self.local_test_dir, clean=False)
        r = NestIO(filenames=filename)
        st = r.read_spiketrain(gdf_id=1, t_start=400. * pq.ms,
                               t_stop=500. * pq.ms,
                               lazy=False, id_column=0, time_column=1)
        self.assertTrue(st.magnitude.dtype == np.float)
        seg = r.read_segment(gid_list=[1], t_start=400. * pq.ms,
                             t_stop=500. * pq.ms,
                             lazy=False, id_column_gdf=0, time_column_gdf=1)
        sts = seg.spiketrains
        self.assertTrue(all([s.magnitude.dtype == np.float for s in sts]))
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def __init__(self, times, t_stop, units=None,  dtype=np.float,
                 copy=True, sampling_rate=1.0 * pq.Hz, t_start=0.0 * pq.s,
                 waveforms=None, left_sweep=None, name=None, file_origin=None,
                 description=None, **annotations):
        '''
        Initializes a newly constructed :class:`SpikeTrain` instance.
        '''
        # This method is only called when constructing a new SpikeTrain,
        # not when slicing or viewing. We use the same call signature
        # as __new__ for documentation purposes. Anything not in the call
        # signature is stored in annotations.

        # Calls parent __init__, which grabs universally recommended
        # attributes and sets up self.annotations
        BaseNeo.__init__(self, name=name, file_origin=file_origin,
                         description=description, **annotations)
项目:NeoAnalysis    作者:neoanalysis    | 项目源码 | 文件源码
def test_read_float(self):
        """
        Tests if spike times are stored as floats if they
        are stored as floats in the file.
        """
        filename = get_test_file_full_path(
                ioclass=NestIO,
                filename='0gid-1time-1256-0.gdf',
                directory=self.local_test_dir, clean=False)
        r = NestIO(filenames=filename)
        st = r.read_spiketrain(gdf_id=1, t_start=400. * pq.ms,
                               t_stop=500. * pq.ms,
                               lazy=False, id_column=0, time_column=1)
        self.assertTrue(st.magnitude.dtype == np.float)
        seg = r.read_segment(gid_list=[1], t_start=400. * pq.ms,
                             t_stop=500. * pq.ms,
                             lazy=False, id_column_gdf=0, time_column_gdf=1)
        sts = seg.spiketrains
        self.assertTrue(all([s.magnitude.dtype == np.float for s in sts]))
项目:systematic-metafeatures    作者:fhpinto    | 项目源码 | 文件源码
def test__replace_objects_by_integers(self):
        data = pd.DataFrame([{'column1': 1.3, 'column2': "Bla"},
                             {'column1': 3.2, 'column2': "Bla"},
                             {'column1': 2.7, 'column2': "Aha"}])
        data = metafeatures.core.object_analyzer \
            ._replace_objects_by_integers(data,
                                          {0: {'type': 'numerical',
                                               'name': 'column1',
                                               'is_target': False},
                                           1: {'type': 'categorical',
                                               'name': 'column2',
                                               'is_target': False}})
        print(data)
        self.assertEqual(data.dtypes[0], np.float)
        self.assertEqual(data.dtypes[1], np.float)
        np.testing.assert_allclose(data.iloc[:, 1], [0, 0, 1])
项目:ConfigSpace    作者:automl    | 项目源码 | 文件源码
def __getitem__(self, item: str) -> Any:
        if self._query_values or item in self._values:
            return self._values.get(item)

        hyperparameter = self.configuration_space._hyperparameters[item]
        item_idx = self.configuration_space._hyperparameter_idx[item]

        if not np.isfinite(self._vector[item_idx]):
            raise KeyError()

        value = hyperparameter._transform(self._vector[item_idx])
        # Truncate the representation of the float to be of constant
        # length for a python version
        if isinstance(hyperparameter, FloatHyperparameter):
            value = float(repr(value))
        # TODO make everything faster, then it'll be possible to init all values
        # at the same time and use an OrderedDict instead of only a dict here to
        # support iterating that dict in the same order as the actual order of
        # hyperparameters
        self._values[item] = value
        return self._values[item]
项目:pytorch-nec    作者:mjacar    | 项目源码 | 文件源码
def step(self, action):
    screens = []
    total_reward = 0

    for t in range(4):
      screen = self.get_screen()
      screens.append(screen)
      _, reward, done, info = self.env.step(self.action_mapping[action])
      total_reward += reward
      if done or total_reward:
        if done:
          self.env.reset()
        for _ in range(20):
          self.env.step(0)
        for _ in range(3 - t):
          screens.append(screen)
        break

    screens = np.asarray(screens).astype(np.float)
    return screens, total_reward, done, info
项目:deep-prior    作者:moberweger    | 项目源码 | 文件源码
def calculateCoM(self, dpt):
        """
        Calculate the center of mass
        :param dpt: depth image
        :return: (x,y,z) center of mass
        """

        dc = dpt.copy()
        dc[dc < self.minDepth] = 0
        dc[dc > self.maxDepth] = 0
        cc = ndimage.measurements.center_of_mass(dc > 0)
        num = numpy.count_nonzero(dc)
        com = numpy.array((cc[1]*num, cc[0]*num, dc.sum()), numpy.float)

        if num == 0:
            return numpy.array((0, 0, 0), numpy.float)
        else:
            return com/num
项目:dc_stat_think    作者:justinbois    | 项目源码 | 文件源码
def diff_of_means(data_1, data_2):
    """
    Difference in means of two arrays.

    Parameters
    ----------
    data_1 : array_like
        One-dimensional array of data.
    data_2 : array_like
        One-dimensional array of data.

    Returns
    -------
    output : float
        np.mean(data_1) - np.mean(data_2)
    """
    data_1 = _convert_data(data_1)
    data_2 = _convert_data(data_2)

    return _diff_of_means(data_1, data_2)


# @numba.jit(nopython=True)
项目:dc_stat_think    作者:justinbois    | 项目源码 | 文件源码
def _diff_of_means(data_1, data_2):
    """
    Difference in means of two arrays.

    Parameters
    ----------
    data_1 : array_like
        One-dimensional array of data.
    data_2 : array_like
        One-dimensional array of data.

    Returns
    -------
    output : float
        np.mean(data_1) - np.mean(data_2)
    """
    return np.mean(data_1) - np.mean(data_2)
项目:dc_stat_think    作者:justinbois    | 项目源码 | 文件源码
def swap_random(a, b):
    """
    Randomly swap entries in two arrays.

    Parameters
    ----------
    a : array_like
        1D array of entries to be swapped.
    b : array_like
        1D array of entries to be swapped. Must have the same lengths
        as `a`.

    Returns
    -------
    a_out : ndarray, dtype float
        Array with random entries swapped.
    b_out : ndarray, dtype float
        Array with random entries swapped.
    """
    a, b = _convert_two_data(a, b)

    return _swap_random(a, b)


# @numba.jit(nopython=True)
项目:dc_stat_think    作者:justinbois    | 项目源码 | 文件源码
def heritability(parents, offspring):
    """
    Compute the heritability from parent and offspring samples.

    Parameters
    ----------
    parents : array_like
        Array of data for trait of parents.
    offspring : array_like
        Array of data for trait of offspring.

    Returns
    -------
    output : float
        Heritability of trait.
    """
    par, off = _convert_two_data(parents, offspring)
    covariance_matrix = np.cov(par, off)
    return covariance_matrix[0,1] / covariance_matrix[0,0]
项目:dc_stat_think    作者:justinbois    | 项目源码 | 文件源码
def _draw_bs_reps_mean(data, size=1):
    """
    Generate bootstrap replicates of the mean out of `data`.

    Parameters
    ----------
    data : array_like
        One-dimensional array of data.
    size : int, default 1
        Number of bootstrap replicates to generate.

    Returns
    -------
    output : float
        Bootstrap replicates of the mean computed from `data`.
    """
    # Set up output array
    bs_reps = np.empty(size)

    # Draw replicates
    n = len(data)
    for i in range(size):
        bs_reps[i] = np.mean(np.random.choice(data, size=n))

    return bs_reps
项目:dc_stat_think    作者:justinbois    | 项目源码 | 文件源码
def _draw_bs_reps_median(data, size=1):
    """
    Generate bootstrap replicates of the median out of `data`.

    Parameters
    ----------
    data : array_like
        One-dimensional array of data.
    size : int, default 1
        Number of bootstrap replicates to generate.

    Returns
    -------
    output : float
        Bootstrap replicates of the median computed from `data`.
    """
    # Set up output array
    bs_reps = np.empty(size)

    # Draw replicates
    n = len(data)
    for i in range(size):
        bs_reps[i] = np.median(np.random.choice(data, size=n))

    return bs_reps
项目:dc_stat_think    作者:justinbois    | 项目源码 | 文件源码
def _diff_of_means(data_1, data_2):
    """
    Difference in means of two arrays.

    Parameters
    ----------
    data_1 : array_like
        One-dimensional array of data.
    data_2 : array_like
        One-dimensional array of data.

    Returns
    -------
    output : float
        np.mean(data_1) - np.mean(data_2)
    """
    return np.mean(data_1) - np.mean(data_2)
项目:dc_stat_think    作者:justinbois    | 项目源码 | 文件源码
def studentized_diff_of_means(data_1, data_2):
    """
    Studentized difference in means of two arrays.

    Parameters
    ----------
    data_1 : array_like
        One-dimensional array of data.
    data_2 : array_like
        One-dimensional array of data.

    Returns
    -------
    output : float
        Studentized difference of means.

    Notes
    -----
    .. If the variance of both `data_1` and `data_2` is zero, returns
       np.nan.
    """
    data_1 = _convert_data(data_1)
    data_2 = _convert_data(data_2)

    return _studentized_diff_of_means(data_1, data_2)
项目:dc_stat_think    作者:justinbois    | 项目源码 | 文件源码
def pearson_r(data_1, data_2):
    """
    Compute the Pearson correlation coefficient between two samples.

    Parameters
    ----------
    data_1 : array_like
        One-dimensional array of data.
    data_2 : array_like
        One-dimensional array of data.

    Returns
    -------
    output : float
        The Pearson correlation coefficient between `data_1`
        and `data_2`.

    Notes
    -----
    .. Only entries where both `data_1` and `data_2` are not NaN are
       used.
    .. If the variance of `data_1` or `data_2` is zero, return NaN.
    """
    x, y = _convert_two_data(data_1, data_2, inf_ok=False, min_len=2)
    return _pearson_r(x, y)
项目:dc_stat_think    作者:justinbois    | 项目源码 | 文件源码
def swap_random(a, b):
    """
    Randomly swap entries in two arrays.

    Parameters
    ----------
    a : array_like
        1D array of entries to be swapped.
    b : array_like
        1D array of entries to be swapped. Must have the same lengths
        as `a`.

    Returns
    -------
    a_out : ndarray, dtype float
        Array with random entries swapped.
    b_out : ndarray, dtype float
        Array with random entries swapped.
    """
    a, b = _convert_two_data(a, b)

    return _swap_random(a, b)
项目:dc_stat_think    作者:justinbois    | 项目源码 | 文件源码
def frac_yay_dems(dems, reps):
    """
    Compute fraction of yay votes from Democrats. This function is
    specific to exercises in Statistical Thinking in Python Part I.
    It is only included here for completeness.

    Parameters
    ----------
    dems : array_like, dtype bool
        Votes for democrats, True for yay vote, False for nay.
    reps : ignored
        Ignored; was only needed to specific application in permutation
        test in Statistical Thinking I.

    Returns
    -------
    output : float
        Fraction of Democrates who voted yay.
    """
    if dems.dtype != bool:
        raise RuntimeError('`dems` must be array of bools.')

    return np.sum(dems) / len(dems)
项目:dc_stat_think    作者:justinbois    | 项目源码 | 文件源码
def heritability(parents, offspring):
    """
    Compute the heritability from parent and offspring samples.

    Parameters
    ----------
    parents : array_like
        Array of data for trait of parents.
    offspring : array_like
        Array of data for trait of offspring.

    Returns
    -------
    output : float
        Heritability of trait.
    """
    par, off = _convert_two_data(parents, offspring)
    covariance_matrix = np.cov(par, off)
    return covariance_matrix[0,1] / covariance_matrix[0,0]
项目:SourceFilterContoursMelody    作者:juanjobosch    | 项目源码 | 文件源码
def normalize(x, axis=None):
    """Normalize the values of an ndarray to sum to 1 along the given axis.
    Parameters
    ----------
    x : np.ndarray
        Input multidimensional array to normalize.
    axis : int, default=None
        Axis to normalize along, otherwise performed over the full array.
    Returns
    -------
    z : np.ndarray, shape=x.shape
        Normalized array.
    """
    if not axis is None:
        shape = list(x.shape)
        shape[axis] = 1
        scalar = x.astype(float).sum(axis=axis).reshape(shape)
        scalar[scalar == 0] = 1.0
    else:
        scalar = x.sum()
        scalar = 1 if scalar == 0 else scalar
    return x / scalar
项目:mx-rfcn    作者:giorking    | 项目源码 | 文件源码
def bbox_overlaps(boxes, query_boxes):
    """
    determine overlaps between boxes and query_boxes
    :param boxes: n * 4 bounding boxes
    :param query_boxes: k * 4 bounding boxes
    :return: overlaps: n * k overlaps
    """
    n_ = boxes.shape[0]
    k_ = query_boxes.shape[0]
    overlaps = np.zeros((n_, k_), dtype=np.float)
    for k in range(k_):
        query_box_area = (query_boxes[k, 2] - query_boxes[k, 0] + 1) * (query_boxes[k, 3] - query_boxes[k, 1] + 1)
        for n in range(n_):
            iw = min(boxes[n, 2], query_boxes[k, 2]) - max(boxes[n, 0], query_boxes[k, 0]) + 1
            if iw > 0:
                ih = min(boxes[n, 3], query_boxes[k, 3]) - max(boxes[n, 1], query_boxes[k, 1]) + 1
                if ih > 0:
                    box_area = (boxes[n, 2] - boxes[n, 0] + 1) * (boxes[n, 3] - boxes[n, 1] + 1)
                    all_area = float(box_area + query_box_area - iw * ih)
                    overlaps[n, k] = iw * ih / all_area
    return overlaps
项目:adversarial-frcnn    作者:xiaolonw    | 项目源码 | 文件源码
def _coco_results_one_category(self, boxes, cat_id):
        results = []
        for im_ind, index in enumerate(self.image_index):
            dets = boxes[im_ind].astype(np.float)
            if dets == []:
                continue
            scores = dets[:, -1]
            xs = dets[:, 0]
            ys = dets[:, 1]
            ws = dets[:, 2] - xs + 1
            hs = dets[:, 3] - ys + 1
            results.extend(
              [{'image_id' : index,
                'category_id' : cat_id,
                'bbox' : [xs[k], ys[k], ws[k], hs[k]],
                'score' : scores[k]} for k in xrange(dets.shape[0])])
        return results
项目:TDOSE    作者:kasperschmidt    | 项目源码 | 文件源码
def SExtractorCat2fits(sextractorfiles,stringcols=[1],header=73,verbose=True):
    """
    Converting an ascii catalog with columns defined in header in the SExtractor format, i.e. one column
    name per row preceeded by a "#" and a column numner, and followed by a description (or any ascii file
    with the given setup) to a fits binary table

    --- INPUT ---
    sextractorfiles   List of ascii files to convert to fits
    stringcols        Columns to use a string format for (all other columns will be set to double float)
    header            Header containing the column names of the catalogs following the "SExtractor notation"
    verbose           Toggle verbosity

    --- EXAMPLE OF USE ---
    import glob
    import tdose_utilities as tu
    catalogs = glob.glob('/Volumes/DATABCKUP2/MUSE-Wide/catalogs_photometry/catalog_photometry_candels-cdfs-*.cat')
    tu.SExtractorCat2fits(catalogs,stringcols=[1],header=73,verbose=True)

    """
    for sexcat_ascii in sextractorfiles:
        asciiinfo = open(sexcat_ascii,'r')
        photcols = []
        for line in asciiinfo:
            if line.startswith('#'):
                colname = line.split()[2]
                photcols.append(colname)

        photfmt = ['D']*len(photcols)
        for stringcol in stringcols:
            photfmt[stringcol] = 'A60'

        sexcat_fits   = tu.ascii2fits(sexcat_ascii,asciinames=photcols,skip_header=header,fitsformat=photfmt,verbose=verbose)

# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
项目:TDOSE    作者:kasperschmidt    | 项目源码 | 文件源码
def galfit_getheadervalue(compnumber,key,headerinfo):
    """
    Return the paramters of a GALFIT model header

    --- INPUT ---
    compnumber      A string containing the component number to extract info for (number after "COMP_" in header)
    key             The key to extract (keyword after "COMPNUMBER_" in header)
    headerinfo      Header to extract info from.

    """
    hdrinfo = headerinfo[compnumber+'_'+key]

    if '*' in hdrinfo: # handling parameters fixed in GALFIT run
        hdrinfo = hdrinfo.replace('*','')

    if '+/-' in hdrinfo:
        value   = float(hdrinfo.split('+/-')[0])
        error   = float(hdrinfo.split('+/-')[1])
    else:
        value   = float(hdrinfo[1:-1])
        error   = None

    if (key == 'XC') or (key == 'YC'):
        xrange, yrange = headerinfo['FITSECT'][1:-1].split(',')
        xrange = np.asarray(xrange.split(':')).astype(float)
        yrange = np.asarray(yrange.split(':')).astype(float)
        if key == 'XC':
            value = value - xrange[0] + 1.0
        if key == 'YC':
            value = value - yrange[0] + 1.0

    return value, error
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
项目:TDOSE    作者:kasperschmidt    | 项目源码 | 文件源码
def galfit_getcentralcoordinate(modelfile,coordorigin=1,verbose=True):
    """
    Return the central coordinates of a GALFIT model extracted using the reference image WCS and the FITSECT keyword

    --- INPUT ---
    modelfile       Path and name to GALFIT model fits file to retrieve central coordinates for
    coordorigin     Origin of coordinates in reference image to use when converting pixels to degrees (skycoord)
    verbose         Toggle verbosity

    --- EXAMPLE OF USE ---
    fileG   = '/Volumes/DATABCKUP2/TDOSEextractions/models_cutouts/model8685multicomponent/model_acs_814w_candels-cdfs-02_cut_v1.0_id8685_cutout7p0x7p0arcsec.fits' # Gauss components
    fileS   = '/Volumes/DATABCKUP2/TDOSEextractions/models_cutouts/model8685multicomponent/model_acs_814w_candels-cdfs-02_cut_v1.0_id9262_cutout2p0x2p0arcsec.fits' # Sersic components

    xpix, ypix, ra_model, dec_model = tu.galfit_getcentralcoordinate(fileG,coordorigin=1)

    """
    if verbose: print ' - Will extract central coordinates from '+modelfile
    refimg_hdr     = pyfits.open(modelfile)[1].header
    model_hdr      = pyfits.open(modelfile)[2].header
    imgwcs         = wcs.WCS(tu.strip_header(refimg_hdr.copy()))

    fit_region     = model_hdr['FITSECT']
    cutrange_low_x = int(float(fit_region.split(':')[0].split('[')[-1]))
    cutrange_low_y = int(float(fit_region.split(',')[-1].split(':')[0]))
    xsize          = model_hdr['NAXIS1']
    ysize          = model_hdr['NAXIS2']

    xpix           = cutrange_low_x + int(xsize/2.)
    ypix           = cutrange_low_y + int(ysize/2.)

    if verbose: print ' - Converting pixel position to coordinates using a pixel origin='+str(coordorigin)
    skycoord    = wcs.utils.pixel_to_skycoord(xpix,ypix,imgwcs,origin=coordorigin)

    ra_model    = skycoord.ra.value
    dec_model   = skycoord.dec.value

    return xpix,ypix,ra_model,dec_model
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
项目:numerai    作者:gansanay    | 项目源码 | 文件源码
def get_user(self, username):
        leaderboard, status_code = self.get_leaderboard()
        if status_code!=200:
            return (None, None, None, None, status_code)

        for user in leaderboard[0]['leaderboard']:
            if user['username']==username:
                return (user['username'], np.float(user['logloss']['public']),  user['rank']['public'],  user['earned'], status_code)
        return (None, None, None, None, status_code)
项目:cnn-graph-classification    作者:giannisnik    | 项目源码 | 文件源码
def load_data(ds_name, use_node_labels):
    node2graph = {}
    Gs = []

    with open("../datasets/%s/%s_graph_indicator.txt"%(ds_name,ds_name), "r") as f:
        c = 1
        for line in f:
            node2graph[c] = int(line[:-1])
            if not node2graph[c] == len(Gs):
                Gs.append(nx.Graph())
            Gs[-1].add_node(c)
            c += 1

    with open("../datasets/%s/%s_A.txt"%(ds_name,ds_name), "r") as f:
        for line in f:
            edge = line[:-1].split(",")
            edge[1] = edge[1].replace(" ", "")
            Gs[node2graph[int(edge[0])]-1].add_edge(int(edge[0]), int(edge[1]))

    if use_node_labels:
        with open("../datasets/%s/%s_node_labels.txt"%(ds_name,ds_name), "r") as f:
            c = 1
            for line in f:
                node_label = int(line[:-1])
                Gs[node2graph[c]-1].node[c]['label'] = node_label
                c += 1

    labels = []
    with open("../datasets/%s/%s_graph_labels.txt"%(ds_name,ds_name), "r") as f:
        for line in f:
            labels.append(int(line[:-1]))

    labels  = np.array(labels, dtype = np.float)
    return Gs, labels
项目:mpiFFT4py    作者:spectralDNS    | 项目源码 | 文件源码
def empty(N, dtype=np.float, bytes=16):
        return pyfftw.empty_aligned(N, dtype=dtype, n=bytes)
项目:mpiFFT4py    作者:spectralDNS    | 项目源码 | 文件源码
def zeros(N, dtype=np.float, bytes=16):
        return pyfftw.zeros_aligned(N, dtype=dtype, n=bytes)
项目:mpiFFT4py    作者:spectralDNS    | 项目源码 | 文件源码
def empty(N, dtype=np.float, bytes=None):
        return Empty(N, dtype=dtype)
项目:mpiFFT4py    作者:spectralDNS    | 项目源码 | 文件源码
def zeros(N, dtype=np.float, bytes=None):
        return Zeros(N, dtype=dtype)
项目:uwb_tracker_ros    作者:eth-ait    | 项目源码 | 文件源码
def _read_configuration(self):
        """Initialize configuration from ROS parameters.
        """
        self.uwb_multi_range_topic = rospy.get_param('~multi_range_raw_topic', '/uwb/multi_range_with_offsets')
        self.uwb_tracker_topic = rospy.get_param('~tracker_topic', '/uwb/tracker')
        self.tracker_frame = rospy.get_param('~tracker_frame', 'uwb')
        self.target_frame = rospy.get_param('~target_frame', 'target')

        # Get parameters for covariance matrices
        self.initial_position_covariance = rospy.get_param('~initial_position_covariance', 10)
        self.process_covariance_position = rospy.get_param('~process_covariance_position', 0)
        self.process_covariance_velocity = rospy.get_param('~process_covariance_velocity', 1)
        self.measurement_covariance = rospy.get_param('~measurement_covariance', 0.1 ** 2)

        # Get parameters for filter update and initial gauss-newton estimation
        self.ignore_z_position = rospy.get_param('~ignore_z_position', True)
        # The default value of 7.779 represents the 0.9 quantile of a Chi-Square distribution
        # with 4 degrees of freedom (4 UWB measurements).
        self.outlier_threshold_quantile = rospy.get_param('~outlier_threshold_quantile', 0.1)
        self.ikf_iterations = rospy.get_param('~ikf_iterations', 2)
        self.initial_guess_position = np.empty((3, 1), dtype=np.float)
        self.initial_guess_position[0] = rospy.get_param('~initial_guess_position_x', 0)
        self.initial_guess_position[1] = rospy.get_param('~initial_guess_position_y', 0)
        self.initial_guess_position[2] = rospy.get_param('~initial_guess_position_z', 0)
        self.initial_guess_iterations = rospy.get_param('~initial_guess_iterations', 200)
        self.initial_guess_tolerance = rospy.get_param('~initial_guess_tolerance', 1e-5)
        self.initial_guess_residuals_threshold = rospy.get_param('~initial_guess_residuals_threshold', 0.1)
        self.ikf_max_outlier_count = rospy.get_param('~ikf_max_outlier_count', 200)
项目:uwb_tracker_ros    作者:eth-ait    | 项目源码 | 文件源码
def update_filter(self, timestep, estimate, ranges):
        """Update position filter.

        Args:
             timestep (float): Time elapsed since last update.
             estimate (StateEstimate): Position estimate to update.
             ranges (list of floats): Range measurements.

        Returns:
            new_estimate (StateEstimate): Updated position estimate.
            outlier_flag (bool): Flag indicating whether the measurement was rejected as an outlier.
        """
        num_of_units = len(ranges)
        x = estimate.state
        P = estimate.covariance
        # Compute process matrix and covariance matrices
        F, Q, R = self._compute_process_and_covariance_matrices(timestep)
        # rospy.logdebug('F: {}'.format(F))
        # rospy.logdebug('Q: {}'.format(Q))
        # rospy.logdebug('R: {}'.format(R))
        # Prediction
        x = np.dot(F, x)
        P = np.dot(F, np.dot(P, F.T)) + Q
        # Update
        n = np.copy(x)
        H = np.zeros((num_of_units, x.size))
        z = np.zeros((num_of_units, 1))
        h = np.zeros((num_of_units, 1))
        for i in xrange(self.ikf_iterations):
            n, K, outlier_flag = self._ikf_iteration(x, n, ranges, h, H, z, estimate, R)
        if outlier_flag:
            new_estimate = estimate
        else:
            new_state = n
            new_covariance = np.dot((np.eye(6) - np.dot(K, H)), P)
            new_estimate = UWBTracker.StateEstimate(new_state, new_covariance)
        return new_estimate, outlier_flag
项目:RasterFairy    作者:Quasimondo    | 项目源码 | 文件源码
def arrangement_sort(x, y):
    return int(100000000*(abs(float(min(x['width'],x['height'])) / float(max(x['width'],x['height']))) - abs(float(min(y['width'],y['height'])) / float(max(y['width'],y['height'])))))
项目:RasterFairy    作者:Quasimondo    | 项目源码 | 文件源码
def proportion_sort(x, y):
    return int(100000000*(abs(float(min(x[0],x[1])) / float(max(x[0],x[1]))) - abs(float(min(y[0],y[1])) / float(max(y[0],y[1])))))